github.com/chenzhuoyu/iasm@v0.9.1/x86_64/instructions.go (about)

     1  // Code generated by "mkasm_amd64.py", DO NOT EDIT.
     2  
     3  package x86_64
     4  
     5  // ADCB performs "Add with Carry".
     6  //
     7  // Mnemonic        : ADC
     8  // Supported forms : (6 forms)
     9  //
    10  //    * ADCB imm8, al
    11  //    * ADCB imm8, r8
    12  //    * ADCB r8, r8
    13  //    * ADCB m8, r8
    14  //    * ADCB imm8, m8
    15  //    * ADCB r8, m8
    16  //
    17  func (self *Program) ADCB(v0 interface{}, v1 interface{}) *Instruction {
    18      p := self.alloc("ADCB", 2, Operands { v0, v1 })
    19      // ADCB imm8, al
    20      if isImm8(v0) && v1 == AL {
    21          p.domain = DomainGeneric
    22          p.add(0, func(m *_Encoding, v []interface{}) {
    23              m.emit(0x14)
    24              m.imm1(toImmAny(v[0]))
    25          })
    26      }
    27      // ADCB imm8, r8
    28      if isImm8(v0) && isReg8(v1) {
    29          p.domain = DomainGeneric
    30          p.add(0, func(m *_Encoding, v []interface{}) {
    31              m.rexo(0, v[1], isReg8REX(v[1]))
    32              m.emit(0x80)
    33              m.emit(0xd0 | lcode(v[1]))
    34              m.imm1(toImmAny(v[0]))
    35          })
    36      }
    37      // ADCB r8, r8
    38      if isReg8(v0) && isReg8(v1) {
    39          p.domain = DomainGeneric
    40          p.add(0, func(m *_Encoding, v []interface{}) {
    41              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
    42              m.emit(0x10)
    43              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
    44          })
    45          p.add(0, func(m *_Encoding, v []interface{}) {
    46              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]) || isReg8REX(v[1]))
    47              m.emit(0x12)
    48              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
    49          })
    50      }
    51      // ADCB m8, r8
    52      if isM8(v0) && isReg8(v1) {
    53          p.domain = DomainGeneric
    54          p.add(0, func(m *_Encoding, v []interface{}) {
    55              m.rexo(hcode(v[1]), addr(v[0]), isReg8REX(v[1]))
    56              m.emit(0x12)
    57              m.mrsd(lcode(v[1]), addr(v[0]), 1)
    58          })
    59      }
    60      // ADCB imm8, m8
    61      if isImm8(v0) && isM8(v1) {
    62          p.domain = DomainGeneric
    63          p.add(0, func(m *_Encoding, v []interface{}) {
    64              m.rexo(0, addr(v[1]), false)
    65              m.emit(0x80)
    66              m.mrsd(2, addr(v[1]), 1)
    67              m.imm1(toImmAny(v[0]))
    68          })
    69      }
    70      // ADCB r8, m8
    71      if isReg8(v0) && isM8(v1) {
    72          p.domain = DomainGeneric
    73          p.add(0, func(m *_Encoding, v []interface{}) {
    74              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
    75              m.emit(0x10)
    76              m.mrsd(lcode(v[0]), addr(v[1]), 1)
    77          })
    78      }
    79      if p.len == 0 {
    80          panic("invalid operands for ADCB")
    81      }
    82      return p
    83  }
    84  
    85  // ADCL performs "Add with Carry".
    86  //
    87  // Mnemonic        : ADC
    88  // Supported forms : (8 forms)
    89  //
    90  //    * ADCL imm32, eax
    91  //    * ADCL imm8, r32
    92  //    * ADCL imm32, r32
    93  //    * ADCL r32, r32
    94  //    * ADCL m32, r32
    95  //    * ADCL imm8, m32
    96  //    * ADCL imm32, m32
    97  //    * ADCL r32, m32
    98  //
    99  func (self *Program) ADCL(v0 interface{}, v1 interface{}) *Instruction {
   100      p := self.alloc("ADCL", 2, Operands { v0, v1 })
   101      // ADCL imm32, eax
   102      if isImm32(v0) && v1 == EAX {
   103          p.domain = DomainGeneric
   104          p.add(0, func(m *_Encoding, v []interface{}) {
   105              m.emit(0x15)
   106              m.imm4(toImmAny(v[0]))
   107          })
   108      }
   109      // ADCL imm8, r32
   110      if isImm8Ext(v0, 4) && isReg32(v1) {
   111          p.domain = DomainGeneric
   112          p.add(0, func(m *_Encoding, v []interface{}) {
   113              m.rexo(0, v[1], false)
   114              m.emit(0x83)
   115              m.emit(0xd0 | lcode(v[1]))
   116              m.imm1(toImmAny(v[0]))
   117          })
   118      }
   119      // ADCL imm32, r32
   120      if isImm32(v0) && isReg32(v1) {
   121          p.domain = DomainGeneric
   122          p.add(0, func(m *_Encoding, v []interface{}) {
   123              m.rexo(0, v[1], false)
   124              m.emit(0x81)
   125              m.emit(0xd0 | lcode(v[1]))
   126              m.imm4(toImmAny(v[0]))
   127          })
   128      }
   129      // ADCL r32, r32
   130      if isReg32(v0) && isReg32(v1) {
   131          p.domain = DomainGeneric
   132          p.add(0, func(m *_Encoding, v []interface{}) {
   133              m.rexo(hcode(v[0]), v[1], false)
   134              m.emit(0x11)
   135              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
   136          })
   137          p.add(0, func(m *_Encoding, v []interface{}) {
   138              m.rexo(hcode(v[1]), v[0], false)
   139              m.emit(0x13)
   140              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   141          })
   142      }
   143      // ADCL m32, r32
   144      if isM32(v0) && isReg32(v1) {
   145          p.domain = DomainGeneric
   146          p.add(0, func(m *_Encoding, v []interface{}) {
   147              m.rexo(hcode(v[1]), addr(v[0]), false)
   148              m.emit(0x13)
   149              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   150          })
   151      }
   152      // ADCL imm8, m32
   153      if isImm8Ext(v0, 4) && isM32(v1) {
   154          p.domain = DomainGeneric
   155          p.add(0, func(m *_Encoding, v []interface{}) {
   156              m.rexo(0, addr(v[1]), false)
   157              m.emit(0x83)
   158              m.mrsd(2, addr(v[1]), 1)
   159              m.imm1(toImmAny(v[0]))
   160          })
   161      }
   162      // ADCL imm32, m32
   163      if isImm32(v0) && isM32(v1) {
   164          p.domain = DomainGeneric
   165          p.add(0, func(m *_Encoding, v []interface{}) {
   166              m.rexo(0, addr(v[1]), false)
   167              m.emit(0x81)
   168              m.mrsd(2, addr(v[1]), 1)
   169              m.imm4(toImmAny(v[0]))
   170          })
   171      }
   172      // ADCL r32, m32
   173      if isReg32(v0) && isM32(v1) {
   174          p.domain = DomainGeneric
   175          p.add(0, func(m *_Encoding, v []interface{}) {
   176              m.rexo(hcode(v[0]), addr(v[1]), false)
   177              m.emit(0x11)
   178              m.mrsd(lcode(v[0]), addr(v[1]), 1)
   179          })
   180      }
   181      if p.len == 0 {
   182          panic("invalid operands for ADCL")
   183      }
   184      return p
   185  }
   186  
   187  // ADCQ performs "Add with Carry".
   188  //
   189  // Mnemonic        : ADC
   190  // Supported forms : (8 forms)
   191  //
   192  //    * ADCQ imm32, rax
   193  //    * ADCQ imm8, r64
   194  //    * ADCQ imm32, r64
   195  //    * ADCQ r64, r64
   196  //    * ADCQ m64, r64
   197  //    * ADCQ imm8, m64
   198  //    * ADCQ imm32, m64
   199  //    * ADCQ r64, m64
   200  //
   201  func (self *Program) ADCQ(v0 interface{}, v1 interface{}) *Instruction {
   202      p := self.alloc("ADCQ", 2, Operands { v0, v1 })
   203      // ADCQ imm32, rax
   204      if isImm32(v0) && v1 == RAX {
   205          p.domain = DomainGeneric
   206          p.add(0, func(m *_Encoding, v []interface{}) {
   207              m.emit(0x48)
   208              m.emit(0x15)
   209              m.imm4(toImmAny(v[0]))
   210          })
   211      }
   212      // ADCQ imm8, r64
   213      if isImm8Ext(v0, 8) && isReg64(v1) {
   214          p.domain = DomainGeneric
   215          p.add(0, func(m *_Encoding, v []interface{}) {
   216              m.emit(0x48 | hcode(v[1]))
   217              m.emit(0x83)
   218              m.emit(0xd0 | lcode(v[1]))
   219              m.imm1(toImmAny(v[0]))
   220          })
   221      }
   222      // ADCQ imm32, r64
   223      if isImm32Ext(v0, 8) && isReg64(v1) {
   224          p.domain = DomainGeneric
   225          p.add(0, func(m *_Encoding, v []interface{}) {
   226              m.emit(0x48 | hcode(v[1]))
   227              m.emit(0x81)
   228              m.emit(0xd0 | lcode(v[1]))
   229              m.imm4(toImmAny(v[0]))
   230          })
   231      }
   232      // ADCQ r64, r64
   233      if isReg64(v0) && isReg64(v1) {
   234          p.domain = DomainGeneric
   235          p.add(0, func(m *_Encoding, v []interface{}) {
   236              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
   237              m.emit(0x11)
   238              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
   239          })
   240          p.add(0, func(m *_Encoding, v []interface{}) {
   241              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
   242              m.emit(0x13)
   243              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   244          })
   245      }
   246      // ADCQ m64, r64
   247      if isM64(v0) && isReg64(v1) {
   248          p.domain = DomainGeneric
   249          p.add(0, func(m *_Encoding, v []interface{}) {
   250              m.rexm(1, hcode(v[1]), addr(v[0]))
   251              m.emit(0x13)
   252              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   253          })
   254      }
   255      // ADCQ imm8, m64
   256      if isImm8Ext(v0, 8) && isM64(v1) {
   257          p.domain = DomainGeneric
   258          p.add(0, func(m *_Encoding, v []interface{}) {
   259              m.rexm(1, 0, addr(v[1]))
   260              m.emit(0x83)
   261              m.mrsd(2, addr(v[1]), 1)
   262              m.imm1(toImmAny(v[0]))
   263          })
   264      }
   265      // ADCQ imm32, m64
   266      if isImm32Ext(v0, 8) && isM64(v1) {
   267          p.domain = DomainGeneric
   268          p.add(0, func(m *_Encoding, v []interface{}) {
   269              m.rexm(1, 0, addr(v[1]))
   270              m.emit(0x81)
   271              m.mrsd(2, addr(v[1]), 1)
   272              m.imm4(toImmAny(v[0]))
   273          })
   274      }
   275      // ADCQ r64, m64
   276      if isReg64(v0) && isM64(v1) {
   277          p.domain = DomainGeneric
   278          p.add(0, func(m *_Encoding, v []interface{}) {
   279              m.rexm(1, hcode(v[0]), addr(v[1]))
   280              m.emit(0x11)
   281              m.mrsd(lcode(v[0]), addr(v[1]), 1)
   282          })
   283      }
   284      if p.len == 0 {
   285          panic("invalid operands for ADCQ")
   286      }
   287      return p
   288  }
   289  
   290  // ADCW performs "Add with Carry".
   291  //
   292  // Mnemonic        : ADC
   293  // Supported forms : (8 forms)
   294  //
   295  //    * ADCW imm16, ax
   296  //    * ADCW imm8, r16
   297  //    * ADCW imm16, r16
   298  //    * ADCW r16, r16
   299  //    * ADCW m16, r16
   300  //    * ADCW imm8, m16
   301  //    * ADCW imm16, m16
   302  //    * ADCW r16, m16
   303  //
   304  func (self *Program) ADCW(v0 interface{}, v1 interface{}) *Instruction {
   305      p := self.alloc("ADCW", 2, Operands { v0, v1 })
   306      // ADCW imm16, ax
   307      if isImm16(v0) && v1 == AX {
   308          p.domain = DomainGeneric
   309          p.add(0, func(m *_Encoding, v []interface{}) {
   310              m.emit(0x66)
   311              m.emit(0x15)
   312              m.imm2(toImmAny(v[0]))
   313          })
   314      }
   315      // ADCW imm8, r16
   316      if isImm8Ext(v0, 2) && isReg16(v1) {
   317          p.domain = DomainGeneric
   318          p.add(0, func(m *_Encoding, v []interface{}) {
   319              m.emit(0x66)
   320              m.rexo(0, v[1], false)
   321              m.emit(0x83)
   322              m.emit(0xd0 | lcode(v[1]))
   323              m.imm1(toImmAny(v[0]))
   324          })
   325      }
   326      // ADCW imm16, r16
   327      if isImm16(v0) && isReg16(v1) {
   328          p.domain = DomainGeneric
   329          p.add(0, func(m *_Encoding, v []interface{}) {
   330              m.emit(0x66)
   331              m.rexo(0, v[1], false)
   332              m.emit(0x81)
   333              m.emit(0xd0 | lcode(v[1]))
   334              m.imm2(toImmAny(v[0]))
   335          })
   336      }
   337      // ADCW r16, r16
   338      if isReg16(v0) && isReg16(v1) {
   339          p.domain = DomainGeneric
   340          p.add(0, func(m *_Encoding, v []interface{}) {
   341              m.emit(0x66)
   342              m.rexo(hcode(v[0]), v[1], false)
   343              m.emit(0x11)
   344              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
   345          })
   346          p.add(0, func(m *_Encoding, v []interface{}) {
   347              m.emit(0x66)
   348              m.rexo(hcode(v[1]), v[0], false)
   349              m.emit(0x13)
   350              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   351          })
   352      }
   353      // ADCW m16, r16
   354      if isM16(v0) && isReg16(v1) {
   355          p.domain = DomainGeneric
   356          p.add(0, func(m *_Encoding, v []interface{}) {
   357              m.emit(0x66)
   358              m.rexo(hcode(v[1]), addr(v[0]), false)
   359              m.emit(0x13)
   360              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   361          })
   362      }
   363      // ADCW imm8, m16
   364      if isImm8Ext(v0, 2) && isM16(v1) {
   365          p.domain = DomainGeneric
   366          p.add(0, func(m *_Encoding, v []interface{}) {
   367              m.emit(0x66)
   368              m.rexo(0, addr(v[1]), false)
   369              m.emit(0x83)
   370              m.mrsd(2, addr(v[1]), 1)
   371              m.imm1(toImmAny(v[0]))
   372          })
   373      }
   374      // ADCW imm16, m16
   375      if isImm16(v0) && isM16(v1) {
   376          p.domain = DomainGeneric
   377          p.add(0, func(m *_Encoding, v []interface{}) {
   378              m.emit(0x66)
   379              m.rexo(0, addr(v[1]), false)
   380              m.emit(0x81)
   381              m.mrsd(2, addr(v[1]), 1)
   382              m.imm2(toImmAny(v[0]))
   383          })
   384      }
   385      // ADCW r16, m16
   386      if isReg16(v0) && isM16(v1) {
   387          p.domain = DomainGeneric
   388          p.add(0, func(m *_Encoding, v []interface{}) {
   389              m.emit(0x66)
   390              m.rexo(hcode(v[0]), addr(v[1]), false)
   391              m.emit(0x11)
   392              m.mrsd(lcode(v[0]), addr(v[1]), 1)
   393          })
   394      }
   395      if p.len == 0 {
   396          panic("invalid operands for ADCW")
   397      }
   398      return p
   399  }
   400  
   401  // ADCXL performs "Unsigned Integer Addition of Two Operands with Carry Flag".
   402  //
   403  // Mnemonic        : ADCX
   404  // Supported forms : (2 forms)
   405  //
   406  //    * ADCXL r32, r32    [ADX]
   407  //    * ADCXL m32, r32    [ADX]
   408  //
   409  func (self *Program) ADCXL(v0 interface{}, v1 interface{}) *Instruction {
   410      p := self.alloc("ADCXL", 2, Operands { v0, v1 })
   411      // ADCXL r32, r32
   412      if isReg32(v0) && isReg32(v1) {
   413          self.require(ISA_ADX)
   414          p.domain = DomainGeneric
   415          p.add(0, func(m *_Encoding, v []interface{}) {
   416              m.emit(0x66)
   417              m.rexo(hcode(v[1]), v[0], false)
   418              m.emit(0x0f)
   419              m.emit(0x38)
   420              m.emit(0xf6)
   421              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   422          })
   423      }
   424      // ADCXL m32, r32
   425      if isM32(v0) && isReg32(v1) {
   426          self.require(ISA_ADX)
   427          p.domain = DomainGeneric
   428          p.add(0, func(m *_Encoding, v []interface{}) {
   429              m.emit(0x66)
   430              m.rexo(hcode(v[1]), addr(v[0]), false)
   431              m.emit(0x0f)
   432              m.emit(0x38)
   433              m.emit(0xf6)
   434              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   435          })
   436      }
   437      if p.len == 0 {
   438          panic("invalid operands for ADCXL")
   439      }
   440      return p
   441  }
   442  
   443  // ADCXQ performs "Unsigned Integer Addition of Two Operands with Carry Flag".
   444  //
   445  // Mnemonic        : ADCX
   446  // Supported forms : (2 forms)
   447  //
   448  //    * ADCXQ r64, r64    [ADX]
   449  //    * ADCXQ m64, r64    [ADX]
   450  //
   451  func (self *Program) ADCXQ(v0 interface{}, v1 interface{}) *Instruction {
   452      p := self.alloc("ADCXQ", 2, Operands { v0, v1 })
   453      // ADCXQ r64, r64
   454      if isReg64(v0) && isReg64(v1) {
   455          self.require(ISA_ADX)
   456          p.domain = DomainGeneric
   457          p.add(0, func(m *_Encoding, v []interface{}) {
   458              m.emit(0x66)
   459              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
   460              m.emit(0x0f)
   461              m.emit(0x38)
   462              m.emit(0xf6)
   463              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   464          })
   465      }
   466      // ADCXQ m64, r64
   467      if isM64(v0) && isReg64(v1) {
   468          self.require(ISA_ADX)
   469          p.domain = DomainGeneric
   470          p.add(0, func(m *_Encoding, v []interface{}) {
   471              m.emit(0x66)
   472              m.rexm(1, hcode(v[1]), addr(v[0]))
   473              m.emit(0x0f)
   474              m.emit(0x38)
   475              m.emit(0xf6)
   476              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   477          })
   478      }
   479      if p.len == 0 {
   480          panic("invalid operands for ADCXQ")
   481      }
   482      return p
   483  }
   484  
   485  // ADDB performs "Add".
   486  //
   487  // Mnemonic        : ADD
   488  // Supported forms : (6 forms)
   489  //
   490  //    * ADDB imm8, al
   491  //    * ADDB imm8, r8
   492  //    * ADDB r8, r8
   493  //    * ADDB m8, r8
   494  //    * ADDB imm8, m8
   495  //    * ADDB r8, m8
   496  //
   497  func (self *Program) ADDB(v0 interface{}, v1 interface{}) *Instruction {
   498      p := self.alloc("ADDB", 2, Operands { v0, v1 })
   499      // ADDB imm8, al
   500      if isImm8(v0) && v1 == AL {
   501          p.domain = DomainGeneric
   502          p.add(0, func(m *_Encoding, v []interface{}) {
   503              m.emit(0x04)
   504              m.imm1(toImmAny(v[0]))
   505          })
   506      }
   507      // ADDB imm8, r8
   508      if isImm8(v0) && isReg8(v1) {
   509          p.domain = DomainGeneric
   510          p.add(0, func(m *_Encoding, v []interface{}) {
   511              m.rexo(0, v[1], isReg8REX(v[1]))
   512              m.emit(0x80)
   513              m.emit(0xc0 | lcode(v[1]))
   514              m.imm1(toImmAny(v[0]))
   515          })
   516      }
   517      // ADDB r8, r8
   518      if isReg8(v0) && isReg8(v1) {
   519          p.domain = DomainGeneric
   520          p.add(0, func(m *_Encoding, v []interface{}) {
   521              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
   522              m.emit(0x00)
   523              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
   524          })
   525          p.add(0, func(m *_Encoding, v []interface{}) {
   526              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]) || isReg8REX(v[1]))
   527              m.emit(0x02)
   528              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   529          })
   530      }
   531      // ADDB m8, r8
   532      if isM8(v0) && isReg8(v1) {
   533          p.domain = DomainGeneric
   534          p.add(0, func(m *_Encoding, v []interface{}) {
   535              m.rexo(hcode(v[1]), addr(v[0]), isReg8REX(v[1]))
   536              m.emit(0x02)
   537              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   538          })
   539      }
   540      // ADDB imm8, m8
   541      if isImm8(v0) && isM8(v1) {
   542          p.domain = DomainGeneric
   543          p.add(0, func(m *_Encoding, v []interface{}) {
   544              m.rexo(0, addr(v[1]), false)
   545              m.emit(0x80)
   546              m.mrsd(0, addr(v[1]), 1)
   547              m.imm1(toImmAny(v[0]))
   548          })
   549      }
   550      // ADDB r8, m8
   551      if isReg8(v0) && isM8(v1) {
   552          p.domain = DomainGeneric
   553          p.add(0, func(m *_Encoding, v []interface{}) {
   554              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
   555              m.emit(0x00)
   556              m.mrsd(lcode(v[0]), addr(v[1]), 1)
   557          })
   558      }
   559      if p.len == 0 {
   560          panic("invalid operands for ADDB")
   561      }
   562      return p
   563  }
   564  
   565  // ADDL performs "Add".
   566  //
   567  // Mnemonic        : ADD
   568  // Supported forms : (8 forms)
   569  //
   570  //    * ADDL imm32, eax
   571  //    * ADDL imm8, r32
   572  //    * ADDL imm32, r32
   573  //    * ADDL r32, r32
   574  //    * ADDL m32, r32
   575  //    * ADDL imm8, m32
   576  //    * ADDL imm32, m32
   577  //    * ADDL r32, m32
   578  //
   579  func (self *Program) ADDL(v0 interface{}, v1 interface{}) *Instruction {
   580      p := self.alloc("ADDL", 2, Operands { v0, v1 })
   581      // ADDL imm32, eax
   582      if isImm32(v0) && v1 == EAX {
   583          p.domain = DomainGeneric
   584          p.add(0, func(m *_Encoding, v []interface{}) {
   585              m.emit(0x05)
   586              m.imm4(toImmAny(v[0]))
   587          })
   588      }
   589      // ADDL imm8, r32
   590      if isImm8Ext(v0, 4) && isReg32(v1) {
   591          p.domain = DomainGeneric
   592          p.add(0, func(m *_Encoding, v []interface{}) {
   593              m.rexo(0, v[1], false)
   594              m.emit(0x83)
   595              m.emit(0xc0 | lcode(v[1]))
   596              m.imm1(toImmAny(v[0]))
   597          })
   598      }
   599      // ADDL imm32, r32
   600      if isImm32(v0) && isReg32(v1) {
   601          p.domain = DomainGeneric
   602          p.add(0, func(m *_Encoding, v []interface{}) {
   603              m.rexo(0, v[1], false)
   604              m.emit(0x81)
   605              m.emit(0xc0 | lcode(v[1]))
   606              m.imm4(toImmAny(v[0]))
   607          })
   608      }
   609      // ADDL r32, r32
   610      if isReg32(v0) && isReg32(v1) {
   611          p.domain = DomainGeneric
   612          p.add(0, func(m *_Encoding, v []interface{}) {
   613              m.rexo(hcode(v[0]), v[1], false)
   614              m.emit(0x01)
   615              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
   616          })
   617          p.add(0, func(m *_Encoding, v []interface{}) {
   618              m.rexo(hcode(v[1]), v[0], false)
   619              m.emit(0x03)
   620              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   621          })
   622      }
   623      // ADDL m32, r32
   624      if isM32(v0) && isReg32(v1) {
   625          p.domain = DomainGeneric
   626          p.add(0, func(m *_Encoding, v []interface{}) {
   627              m.rexo(hcode(v[1]), addr(v[0]), false)
   628              m.emit(0x03)
   629              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   630          })
   631      }
   632      // ADDL imm8, m32
   633      if isImm8Ext(v0, 4) && isM32(v1) {
   634          p.domain = DomainGeneric
   635          p.add(0, func(m *_Encoding, v []interface{}) {
   636              m.rexo(0, addr(v[1]), false)
   637              m.emit(0x83)
   638              m.mrsd(0, addr(v[1]), 1)
   639              m.imm1(toImmAny(v[0]))
   640          })
   641      }
   642      // ADDL imm32, m32
   643      if isImm32(v0) && isM32(v1) {
   644          p.domain = DomainGeneric
   645          p.add(0, func(m *_Encoding, v []interface{}) {
   646              m.rexo(0, addr(v[1]), false)
   647              m.emit(0x81)
   648              m.mrsd(0, addr(v[1]), 1)
   649              m.imm4(toImmAny(v[0]))
   650          })
   651      }
   652      // ADDL r32, m32
   653      if isReg32(v0) && isM32(v1) {
   654          p.domain = DomainGeneric
   655          p.add(0, func(m *_Encoding, v []interface{}) {
   656              m.rexo(hcode(v[0]), addr(v[1]), false)
   657              m.emit(0x01)
   658              m.mrsd(lcode(v[0]), addr(v[1]), 1)
   659          })
   660      }
   661      if p.len == 0 {
   662          panic("invalid operands for ADDL")
   663      }
   664      return p
   665  }
   666  
   667  // ADDPD performs "Add Packed Double-Precision Floating-Point Values".
   668  //
   669  // Mnemonic        : ADDPD
   670  // Supported forms : (2 forms)
   671  //
   672  //    * ADDPD xmm, xmm     [SSE2]
   673  //    * ADDPD m128, xmm    [SSE2]
   674  //
   675  func (self *Program) ADDPD(v0 interface{}, v1 interface{}) *Instruction {
   676      p := self.alloc("ADDPD", 2, Operands { v0, v1 })
   677      // ADDPD xmm, xmm
   678      if isXMM(v0) && isXMM(v1) {
   679          self.require(ISA_SSE2)
   680          p.domain = DomainMMXSSE
   681          p.add(0, func(m *_Encoding, v []interface{}) {
   682              m.emit(0x66)
   683              m.rexo(hcode(v[1]), v[0], false)
   684              m.emit(0x0f)
   685              m.emit(0x58)
   686              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   687          })
   688      }
   689      // ADDPD m128, xmm
   690      if isM128(v0) && isXMM(v1) {
   691          self.require(ISA_SSE2)
   692          p.domain = DomainMMXSSE
   693          p.add(0, func(m *_Encoding, v []interface{}) {
   694              m.emit(0x66)
   695              m.rexo(hcode(v[1]), addr(v[0]), false)
   696              m.emit(0x0f)
   697              m.emit(0x58)
   698              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   699          })
   700      }
   701      if p.len == 0 {
   702          panic("invalid operands for ADDPD")
   703      }
   704      return p
   705  }
   706  
   707  // ADDPS performs "Add Packed Single-Precision Floating-Point Values".
   708  //
   709  // Mnemonic        : ADDPS
   710  // Supported forms : (2 forms)
   711  //
   712  //    * ADDPS xmm, xmm     [SSE]
   713  //    * ADDPS m128, xmm    [SSE]
   714  //
   715  func (self *Program) ADDPS(v0 interface{}, v1 interface{}) *Instruction {
   716      p := self.alloc("ADDPS", 2, Operands { v0, v1 })
   717      // ADDPS xmm, xmm
   718      if isXMM(v0) && isXMM(v1) {
   719          self.require(ISA_SSE)
   720          p.domain = DomainMMXSSE
   721          p.add(0, func(m *_Encoding, v []interface{}) {
   722              m.rexo(hcode(v[1]), v[0], false)
   723              m.emit(0x0f)
   724              m.emit(0x58)
   725              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   726          })
   727      }
   728      // ADDPS m128, xmm
   729      if isM128(v0) && isXMM(v1) {
   730          self.require(ISA_SSE)
   731          p.domain = DomainMMXSSE
   732          p.add(0, func(m *_Encoding, v []interface{}) {
   733              m.rexo(hcode(v[1]), addr(v[0]), false)
   734              m.emit(0x0f)
   735              m.emit(0x58)
   736              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   737          })
   738      }
   739      if p.len == 0 {
   740          panic("invalid operands for ADDPS")
   741      }
   742      return p
   743  }
   744  
   745  // ADDQ performs "Add".
   746  //
   747  // Mnemonic        : ADD
   748  // Supported forms : (8 forms)
   749  //
   750  //    * ADDQ imm32, rax
   751  //    * ADDQ imm8, r64
   752  //    * ADDQ imm32, r64
   753  //    * ADDQ r64, r64
   754  //    * ADDQ m64, r64
   755  //    * ADDQ imm8, m64
   756  //    * ADDQ imm32, m64
   757  //    * ADDQ r64, m64
   758  //
   759  func (self *Program) ADDQ(v0 interface{}, v1 interface{}) *Instruction {
   760      p := self.alloc("ADDQ", 2, Operands { v0, v1 })
   761      // ADDQ imm32, rax
   762      if isImm32(v0) && v1 == RAX {
   763          p.domain = DomainGeneric
   764          p.add(0, func(m *_Encoding, v []interface{}) {
   765              m.emit(0x48)
   766              m.emit(0x05)
   767              m.imm4(toImmAny(v[0]))
   768          })
   769      }
   770      // ADDQ imm8, r64
   771      if isImm8Ext(v0, 8) && isReg64(v1) {
   772          p.domain = DomainGeneric
   773          p.add(0, func(m *_Encoding, v []interface{}) {
   774              m.emit(0x48 | hcode(v[1]))
   775              m.emit(0x83)
   776              m.emit(0xc0 | lcode(v[1]))
   777              m.imm1(toImmAny(v[0]))
   778          })
   779      }
   780      // ADDQ imm32, r64
   781      if isImm32Ext(v0, 8) && isReg64(v1) {
   782          p.domain = DomainGeneric
   783          p.add(0, func(m *_Encoding, v []interface{}) {
   784              m.emit(0x48 | hcode(v[1]))
   785              m.emit(0x81)
   786              m.emit(0xc0 | lcode(v[1]))
   787              m.imm4(toImmAny(v[0]))
   788          })
   789      }
   790      // ADDQ r64, r64
   791      if isReg64(v0) && isReg64(v1) {
   792          p.domain = DomainGeneric
   793          p.add(0, func(m *_Encoding, v []interface{}) {
   794              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
   795              m.emit(0x01)
   796              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
   797          })
   798          p.add(0, func(m *_Encoding, v []interface{}) {
   799              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
   800              m.emit(0x03)
   801              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   802          })
   803      }
   804      // ADDQ m64, r64
   805      if isM64(v0) && isReg64(v1) {
   806          p.domain = DomainGeneric
   807          p.add(0, func(m *_Encoding, v []interface{}) {
   808              m.rexm(1, hcode(v[1]), addr(v[0]))
   809              m.emit(0x03)
   810              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   811          })
   812      }
   813      // ADDQ imm8, m64
   814      if isImm8Ext(v0, 8) && isM64(v1) {
   815          p.domain = DomainGeneric
   816          p.add(0, func(m *_Encoding, v []interface{}) {
   817              m.rexm(1, 0, addr(v[1]))
   818              m.emit(0x83)
   819              m.mrsd(0, addr(v[1]), 1)
   820              m.imm1(toImmAny(v[0]))
   821          })
   822      }
   823      // ADDQ imm32, m64
   824      if isImm32Ext(v0, 8) && isM64(v1) {
   825          p.domain = DomainGeneric
   826          p.add(0, func(m *_Encoding, v []interface{}) {
   827              m.rexm(1, 0, addr(v[1]))
   828              m.emit(0x81)
   829              m.mrsd(0, addr(v[1]), 1)
   830              m.imm4(toImmAny(v[0]))
   831          })
   832      }
   833      // ADDQ r64, m64
   834      if isReg64(v0) && isM64(v1) {
   835          p.domain = DomainGeneric
   836          p.add(0, func(m *_Encoding, v []interface{}) {
   837              m.rexm(1, hcode(v[0]), addr(v[1]))
   838              m.emit(0x01)
   839              m.mrsd(lcode(v[0]), addr(v[1]), 1)
   840          })
   841      }
   842      if p.len == 0 {
   843          panic("invalid operands for ADDQ")
   844      }
   845      return p
   846  }
   847  
   848  // ADDSD performs "Add Scalar Double-Precision Floating-Point Values".
   849  //
   850  // Mnemonic        : ADDSD
   851  // Supported forms : (2 forms)
   852  //
   853  //    * ADDSD xmm, xmm    [SSE2]
   854  //    * ADDSD m64, xmm    [SSE2]
   855  //
   856  func (self *Program) ADDSD(v0 interface{}, v1 interface{}) *Instruction {
   857      p := self.alloc("ADDSD", 2, Operands { v0, v1 })
   858      // ADDSD xmm, xmm
   859      if isXMM(v0) && isXMM(v1) {
   860          self.require(ISA_SSE2)
   861          p.domain = DomainMMXSSE
   862          p.add(0, func(m *_Encoding, v []interface{}) {
   863              m.emit(0xf2)
   864              m.rexo(hcode(v[1]), v[0], false)
   865              m.emit(0x0f)
   866              m.emit(0x58)
   867              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   868          })
   869      }
   870      // ADDSD m64, xmm
   871      if isM64(v0) && isXMM(v1) {
   872          self.require(ISA_SSE2)
   873          p.domain = DomainMMXSSE
   874          p.add(0, func(m *_Encoding, v []interface{}) {
   875              m.emit(0xf2)
   876              m.rexo(hcode(v[1]), addr(v[0]), false)
   877              m.emit(0x0f)
   878              m.emit(0x58)
   879              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   880          })
   881      }
   882      if p.len == 0 {
   883          panic("invalid operands for ADDSD")
   884      }
   885      return p
   886  }
   887  
   888  // ADDSS performs "Add Scalar Single-Precision Floating-Point Values".
   889  //
   890  // Mnemonic        : ADDSS
   891  // Supported forms : (2 forms)
   892  //
   893  //    * ADDSS xmm, xmm    [SSE]
   894  //    * ADDSS m32, xmm    [SSE]
   895  //
   896  func (self *Program) ADDSS(v0 interface{}, v1 interface{}) *Instruction {
   897      p := self.alloc("ADDSS", 2, Operands { v0, v1 })
   898      // ADDSS xmm, xmm
   899      if isXMM(v0) && isXMM(v1) {
   900          self.require(ISA_SSE)
   901          p.domain = DomainMMXSSE
   902          p.add(0, func(m *_Encoding, v []interface{}) {
   903              m.emit(0xf3)
   904              m.rexo(hcode(v[1]), v[0], false)
   905              m.emit(0x0f)
   906              m.emit(0x58)
   907              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   908          })
   909      }
   910      // ADDSS m32, xmm
   911      if isM32(v0) && isXMM(v1) {
   912          self.require(ISA_SSE)
   913          p.domain = DomainMMXSSE
   914          p.add(0, func(m *_Encoding, v []interface{}) {
   915              m.emit(0xf3)
   916              m.rexo(hcode(v[1]), addr(v[0]), false)
   917              m.emit(0x0f)
   918              m.emit(0x58)
   919              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   920          })
   921      }
   922      if p.len == 0 {
   923          panic("invalid operands for ADDSS")
   924      }
   925      return p
   926  }
   927  
   928  // ADDSUBPD performs "Packed Double-FP Add/Subtract".
   929  //
   930  // Mnemonic        : ADDSUBPD
   931  // Supported forms : (2 forms)
   932  //
   933  //    * ADDSUBPD xmm, xmm     [SSE3]
   934  //    * ADDSUBPD m128, xmm    [SSE3]
   935  //
   936  func (self *Program) ADDSUBPD(v0 interface{}, v1 interface{}) *Instruction {
   937      p := self.alloc("ADDSUBPD", 2, Operands { v0, v1 })
   938      // ADDSUBPD xmm, xmm
   939      if isXMM(v0) && isXMM(v1) {
   940          self.require(ISA_SSE3)
   941          p.domain = DomainMMXSSE
   942          p.add(0, func(m *_Encoding, v []interface{}) {
   943              m.emit(0x66)
   944              m.rexo(hcode(v[1]), v[0], false)
   945              m.emit(0x0f)
   946              m.emit(0xd0)
   947              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   948          })
   949      }
   950      // ADDSUBPD m128, xmm
   951      if isM128(v0) && isXMM(v1) {
   952          self.require(ISA_SSE3)
   953          p.domain = DomainMMXSSE
   954          p.add(0, func(m *_Encoding, v []interface{}) {
   955              m.emit(0x66)
   956              m.rexo(hcode(v[1]), addr(v[0]), false)
   957              m.emit(0x0f)
   958              m.emit(0xd0)
   959              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   960          })
   961      }
   962      if p.len == 0 {
   963          panic("invalid operands for ADDSUBPD")
   964      }
   965      return p
   966  }
   967  
   968  // ADDSUBPS performs "Packed Single-FP Add/Subtract".
   969  //
   970  // Mnemonic        : ADDSUBPS
   971  // Supported forms : (2 forms)
   972  //
   973  //    * ADDSUBPS xmm, xmm     [SSE3]
   974  //    * ADDSUBPS m128, xmm    [SSE3]
   975  //
   976  func (self *Program) ADDSUBPS(v0 interface{}, v1 interface{}) *Instruction {
   977      p := self.alloc("ADDSUBPS", 2, Operands { v0, v1 })
   978      // ADDSUBPS xmm, xmm
   979      if isXMM(v0) && isXMM(v1) {
   980          self.require(ISA_SSE3)
   981          p.domain = DomainMMXSSE
   982          p.add(0, func(m *_Encoding, v []interface{}) {
   983              m.emit(0xf2)
   984              m.rexo(hcode(v[1]), v[0], false)
   985              m.emit(0x0f)
   986              m.emit(0xd0)
   987              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   988          })
   989      }
   990      // ADDSUBPS m128, xmm
   991      if isM128(v0) && isXMM(v1) {
   992          self.require(ISA_SSE3)
   993          p.domain = DomainMMXSSE
   994          p.add(0, func(m *_Encoding, v []interface{}) {
   995              m.emit(0xf2)
   996              m.rexo(hcode(v[1]), addr(v[0]), false)
   997              m.emit(0x0f)
   998              m.emit(0xd0)
   999              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1000          })
  1001      }
  1002      if p.len == 0 {
  1003          panic("invalid operands for ADDSUBPS")
  1004      }
  1005      return p
  1006  }
  1007  
  1008  // ADDW performs "Add".
  1009  //
  1010  // Mnemonic        : ADD
  1011  // Supported forms : (8 forms)
  1012  //
  1013  //    * ADDW imm16, ax
  1014  //    * ADDW imm8, r16
  1015  //    * ADDW imm16, r16
  1016  //    * ADDW r16, r16
  1017  //    * ADDW m16, r16
  1018  //    * ADDW imm8, m16
  1019  //    * ADDW imm16, m16
  1020  //    * ADDW r16, m16
  1021  //
  1022  func (self *Program) ADDW(v0 interface{}, v1 interface{}) *Instruction {
  1023      p := self.alloc("ADDW", 2, Operands { v0, v1 })
  1024      // ADDW imm16, ax
  1025      if isImm16(v0) && v1 == AX {
  1026          p.domain = DomainGeneric
  1027          p.add(0, func(m *_Encoding, v []interface{}) {
  1028              m.emit(0x66)
  1029              m.emit(0x05)
  1030              m.imm2(toImmAny(v[0]))
  1031          })
  1032      }
  1033      // ADDW imm8, r16
  1034      if isImm8Ext(v0, 2) && isReg16(v1) {
  1035          p.domain = DomainGeneric
  1036          p.add(0, func(m *_Encoding, v []interface{}) {
  1037              m.emit(0x66)
  1038              m.rexo(0, v[1], false)
  1039              m.emit(0x83)
  1040              m.emit(0xc0 | lcode(v[1]))
  1041              m.imm1(toImmAny(v[0]))
  1042          })
  1043      }
  1044      // ADDW imm16, r16
  1045      if isImm16(v0) && isReg16(v1) {
  1046          p.domain = DomainGeneric
  1047          p.add(0, func(m *_Encoding, v []interface{}) {
  1048              m.emit(0x66)
  1049              m.rexo(0, v[1], false)
  1050              m.emit(0x81)
  1051              m.emit(0xc0 | lcode(v[1]))
  1052              m.imm2(toImmAny(v[0]))
  1053          })
  1054      }
  1055      // ADDW r16, r16
  1056      if isReg16(v0) && isReg16(v1) {
  1057          p.domain = DomainGeneric
  1058          p.add(0, func(m *_Encoding, v []interface{}) {
  1059              m.emit(0x66)
  1060              m.rexo(hcode(v[0]), v[1], false)
  1061              m.emit(0x01)
  1062              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  1063          })
  1064          p.add(0, func(m *_Encoding, v []interface{}) {
  1065              m.emit(0x66)
  1066              m.rexo(hcode(v[1]), v[0], false)
  1067              m.emit(0x03)
  1068              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1069          })
  1070      }
  1071      // ADDW m16, r16
  1072      if isM16(v0) && isReg16(v1) {
  1073          p.domain = DomainGeneric
  1074          p.add(0, func(m *_Encoding, v []interface{}) {
  1075              m.emit(0x66)
  1076              m.rexo(hcode(v[1]), addr(v[0]), false)
  1077              m.emit(0x03)
  1078              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1079          })
  1080      }
  1081      // ADDW imm8, m16
  1082      if isImm8Ext(v0, 2) && isM16(v1) {
  1083          p.domain = DomainGeneric
  1084          p.add(0, func(m *_Encoding, v []interface{}) {
  1085              m.emit(0x66)
  1086              m.rexo(0, addr(v[1]), false)
  1087              m.emit(0x83)
  1088              m.mrsd(0, addr(v[1]), 1)
  1089              m.imm1(toImmAny(v[0]))
  1090          })
  1091      }
  1092      // ADDW imm16, m16
  1093      if isImm16(v0) && isM16(v1) {
  1094          p.domain = DomainGeneric
  1095          p.add(0, func(m *_Encoding, v []interface{}) {
  1096              m.emit(0x66)
  1097              m.rexo(0, addr(v[1]), false)
  1098              m.emit(0x81)
  1099              m.mrsd(0, addr(v[1]), 1)
  1100              m.imm2(toImmAny(v[0]))
  1101          })
  1102      }
  1103      // ADDW r16, m16
  1104      if isReg16(v0) && isM16(v1) {
  1105          p.domain = DomainGeneric
  1106          p.add(0, func(m *_Encoding, v []interface{}) {
  1107              m.emit(0x66)
  1108              m.rexo(hcode(v[0]), addr(v[1]), false)
  1109              m.emit(0x01)
  1110              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  1111          })
  1112      }
  1113      if p.len == 0 {
  1114          panic("invalid operands for ADDW")
  1115      }
  1116      return p
  1117  }
  1118  
  1119  // ADOXL performs "Unsigned Integer Addition of Two Operands with Overflow Flag".
  1120  //
  1121  // Mnemonic        : ADOX
  1122  // Supported forms : (2 forms)
  1123  //
  1124  //    * ADOXL r32, r32    [ADX]
  1125  //    * ADOXL m32, r32    [ADX]
  1126  //
  1127  func (self *Program) ADOXL(v0 interface{}, v1 interface{}) *Instruction {
  1128      p := self.alloc("ADOXL", 2, Operands { v0, v1 })
  1129      // ADOXL r32, r32
  1130      if isReg32(v0) && isReg32(v1) {
  1131          self.require(ISA_ADX)
  1132          p.domain = DomainGeneric
  1133          p.add(0, func(m *_Encoding, v []interface{}) {
  1134              m.emit(0xf3)
  1135              m.rexo(hcode(v[1]), v[0], false)
  1136              m.emit(0x0f)
  1137              m.emit(0x38)
  1138              m.emit(0xf6)
  1139              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1140          })
  1141      }
  1142      // ADOXL m32, r32
  1143      if isM32(v0) && isReg32(v1) {
  1144          self.require(ISA_ADX)
  1145          p.domain = DomainGeneric
  1146          p.add(0, func(m *_Encoding, v []interface{}) {
  1147              m.emit(0xf3)
  1148              m.rexo(hcode(v[1]), addr(v[0]), false)
  1149              m.emit(0x0f)
  1150              m.emit(0x38)
  1151              m.emit(0xf6)
  1152              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1153          })
  1154      }
  1155      if p.len == 0 {
  1156          panic("invalid operands for ADOXL")
  1157      }
  1158      return p
  1159  }
  1160  
  1161  // ADOXQ performs "Unsigned Integer Addition of Two Operands with Overflow Flag".
  1162  //
  1163  // Mnemonic        : ADOX
  1164  // Supported forms : (2 forms)
  1165  //
  1166  //    * ADOXQ r64, r64    [ADX]
  1167  //    * ADOXQ m64, r64    [ADX]
  1168  //
  1169  func (self *Program) ADOXQ(v0 interface{}, v1 interface{}) *Instruction {
  1170      p := self.alloc("ADOXQ", 2, Operands { v0, v1 })
  1171      // ADOXQ r64, r64
  1172      if isReg64(v0) && isReg64(v1) {
  1173          self.require(ISA_ADX)
  1174          p.domain = DomainGeneric
  1175          p.add(0, func(m *_Encoding, v []interface{}) {
  1176              m.emit(0xf3)
  1177              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  1178              m.emit(0x0f)
  1179              m.emit(0x38)
  1180              m.emit(0xf6)
  1181              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1182          })
  1183      }
  1184      // ADOXQ m64, r64
  1185      if isM64(v0) && isReg64(v1) {
  1186          self.require(ISA_ADX)
  1187          p.domain = DomainGeneric
  1188          p.add(0, func(m *_Encoding, v []interface{}) {
  1189              m.emit(0xf3)
  1190              m.rexm(1, hcode(v[1]), addr(v[0]))
  1191              m.emit(0x0f)
  1192              m.emit(0x38)
  1193              m.emit(0xf6)
  1194              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1195          })
  1196      }
  1197      if p.len == 0 {
  1198          panic("invalid operands for ADOXQ")
  1199      }
  1200      return p
  1201  }
  1202  
  1203  // AESDEC performs "Perform One Round of an AES Decryption Flow".
  1204  //
  1205  // Mnemonic        : AESDEC
  1206  // Supported forms : (2 forms)
  1207  //
  1208  //    * AESDEC xmm, xmm     [AES]
  1209  //    * AESDEC m128, xmm    [AES]
  1210  //
  1211  func (self *Program) AESDEC(v0 interface{}, v1 interface{}) *Instruction {
  1212      p := self.alloc("AESDEC", 2, Operands { v0, v1 })
  1213      // AESDEC xmm, xmm
  1214      if isXMM(v0) && isXMM(v1) {
  1215          self.require(ISA_AES)
  1216          p.domain = DomainCrypto
  1217          p.add(0, func(m *_Encoding, v []interface{}) {
  1218              m.emit(0x66)
  1219              m.rexo(hcode(v[1]), v[0], false)
  1220              m.emit(0x0f)
  1221              m.emit(0x38)
  1222              m.emit(0xde)
  1223              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1224          })
  1225      }
  1226      // AESDEC m128, xmm
  1227      if isM128(v0) && isXMM(v1) {
  1228          self.require(ISA_AES)
  1229          p.domain = DomainCrypto
  1230          p.add(0, func(m *_Encoding, v []interface{}) {
  1231              m.emit(0x66)
  1232              m.rexo(hcode(v[1]), addr(v[0]), false)
  1233              m.emit(0x0f)
  1234              m.emit(0x38)
  1235              m.emit(0xde)
  1236              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1237          })
  1238      }
  1239      if p.len == 0 {
  1240          panic("invalid operands for AESDEC")
  1241      }
  1242      return p
  1243  }
  1244  
  1245  // AESDECLAST performs "Perform Last Round of an AES Decryption Flow".
  1246  //
  1247  // Mnemonic        : AESDECLAST
  1248  // Supported forms : (2 forms)
  1249  //
  1250  //    * AESDECLAST xmm, xmm     [AES]
  1251  //    * AESDECLAST m128, xmm    [AES]
  1252  //
  1253  func (self *Program) AESDECLAST(v0 interface{}, v1 interface{}) *Instruction {
  1254      p := self.alloc("AESDECLAST", 2, Operands { v0, v1 })
  1255      // AESDECLAST xmm, xmm
  1256      if isXMM(v0) && isXMM(v1) {
  1257          self.require(ISA_AES)
  1258          p.domain = DomainCrypto
  1259          p.add(0, func(m *_Encoding, v []interface{}) {
  1260              m.emit(0x66)
  1261              m.rexo(hcode(v[1]), v[0], false)
  1262              m.emit(0x0f)
  1263              m.emit(0x38)
  1264              m.emit(0xdf)
  1265              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1266          })
  1267      }
  1268      // AESDECLAST m128, xmm
  1269      if isM128(v0) && isXMM(v1) {
  1270          self.require(ISA_AES)
  1271          p.domain = DomainCrypto
  1272          p.add(0, func(m *_Encoding, v []interface{}) {
  1273              m.emit(0x66)
  1274              m.rexo(hcode(v[1]), addr(v[0]), false)
  1275              m.emit(0x0f)
  1276              m.emit(0x38)
  1277              m.emit(0xdf)
  1278              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1279          })
  1280      }
  1281      if p.len == 0 {
  1282          panic("invalid operands for AESDECLAST")
  1283      }
  1284      return p
  1285  }
  1286  
  1287  // AESENC performs "Perform One Round of an AES Encryption Flow".
  1288  //
  1289  // Mnemonic        : AESENC
  1290  // Supported forms : (2 forms)
  1291  //
  1292  //    * AESENC xmm, xmm     [AES]
  1293  //    * AESENC m128, xmm    [AES]
  1294  //
  1295  func (self *Program) AESENC(v0 interface{}, v1 interface{}) *Instruction {
  1296      p := self.alloc("AESENC", 2, Operands { v0, v1 })
  1297      // AESENC xmm, xmm
  1298      if isXMM(v0) && isXMM(v1) {
  1299          self.require(ISA_AES)
  1300          p.domain = DomainCrypto
  1301          p.add(0, func(m *_Encoding, v []interface{}) {
  1302              m.emit(0x66)
  1303              m.rexo(hcode(v[1]), v[0], false)
  1304              m.emit(0x0f)
  1305              m.emit(0x38)
  1306              m.emit(0xdc)
  1307              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1308          })
  1309      }
  1310      // AESENC m128, xmm
  1311      if isM128(v0) && isXMM(v1) {
  1312          self.require(ISA_AES)
  1313          p.domain = DomainCrypto
  1314          p.add(0, func(m *_Encoding, v []interface{}) {
  1315              m.emit(0x66)
  1316              m.rexo(hcode(v[1]), addr(v[0]), false)
  1317              m.emit(0x0f)
  1318              m.emit(0x38)
  1319              m.emit(0xdc)
  1320              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1321          })
  1322      }
  1323      if p.len == 0 {
  1324          panic("invalid operands for AESENC")
  1325      }
  1326      return p
  1327  }
  1328  
  1329  // AESENCLAST performs "Perform Last Round of an AES Encryption Flow".
  1330  //
  1331  // Mnemonic        : AESENCLAST
  1332  // Supported forms : (2 forms)
  1333  //
  1334  //    * AESENCLAST xmm, xmm     [AES]
  1335  //    * AESENCLAST m128, xmm    [AES]
  1336  //
  1337  func (self *Program) AESENCLAST(v0 interface{}, v1 interface{}) *Instruction {
  1338      p := self.alloc("AESENCLAST", 2, Operands { v0, v1 })
  1339      // AESENCLAST xmm, xmm
  1340      if isXMM(v0) && isXMM(v1) {
  1341          self.require(ISA_AES)
  1342          p.domain = DomainCrypto
  1343          p.add(0, func(m *_Encoding, v []interface{}) {
  1344              m.emit(0x66)
  1345              m.rexo(hcode(v[1]), v[0], false)
  1346              m.emit(0x0f)
  1347              m.emit(0x38)
  1348              m.emit(0xdd)
  1349              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1350          })
  1351      }
  1352      // AESENCLAST m128, xmm
  1353      if isM128(v0) && isXMM(v1) {
  1354          self.require(ISA_AES)
  1355          p.domain = DomainCrypto
  1356          p.add(0, func(m *_Encoding, v []interface{}) {
  1357              m.emit(0x66)
  1358              m.rexo(hcode(v[1]), addr(v[0]), false)
  1359              m.emit(0x0f)
  1360              m.emit(0x38)
  1361              m.emit(0xdd)
  1362              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1363          })
  1364      }
  1365      if p.len == 0 {
  1366          panic("invalid operands for AESENCLAST")
  1367      }
  1368      return p
  1369  }
  1370  
  1371  // AESIMC performs "Perform the AES InvMixColumn Transformation".
  1372  //
  1373  // Mnemonic        : AESIMC
  1374  // Supported forms : (2 forms)
  1375  //
  1376  //    * AESIMC xmm, xmm     [AES]
  1377  //    * AESIMC m128, xmm    [AES]
  1378  //
  1379  func (self *Program) AESIMC(v0 interface{}, v1 interface{}) *Instruction {
  1380      p := self.alloc("AESIMC", 2, Operands { v0, v1 })
  1381      // AESIMC xmm, xmm
  1382      if isXMM(v0) && isXMM(v1) {
  1383          self.require(ISA_AES)
  1384          p.domain = DomainCrypto
  1385          p.add(0, func(m *_Encoding, v []interface{}) {
  1386              m.emit(0x66)
  1387              m.rexo(hcode(v[1]), v[0], false)
  1388              m.emit(0x0f)
  1389              m.emit(0x38)
  1390              m.emit(0xdb)
  1391              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1392          })
  1393      }
  1394      // AESIMC m128, xmm
  1395      if isM128(v0) && isXMM(v1) {
  1396          self.require(ISA_AES)
  1397          p.domain = DomainCrypto
  1398          p.add(0, func(m *_Encoding, v []interface{}) {
  1399              m.emit(0x66)
  1400              m.rexo(hcode(v[1]), addr(v[0]), false)
  1401              m.emit(0x0f)
  1402              m.emit(0x38)
  1403              m.emit(0xdb)
  1404              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1405          })
  1406      }
  1407      if p.len == 0 {
  1408          panic("invalid operands for AESIMC")
  1409      }
  1410      return p
  1411  }
  1412  
  1413  // AESKEYGENASSIST performs "AES Round Key Generation Assist".
  1414  //
  1415  // Mnemonic        : AESKEYGENASSIST
  1416  // Supported forms : (2 forms)
  1417  //
  1418  //    * AESKEYGENASSIST imm8, xmm, xmm     [AES]
  1419  //    * AESKEYGENASSIST imm8, m128, xmm    [AES]
  1420  //
  1421  func (self *Program) AESKEYGENASSIST(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  1422      p := self.alloc("AESKEYGENASSIST", 3, Operands { v0, v1, v2 })
  1423      // AESKEYGENASSIST imm8, xmm, xmm
  1424      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
  1425          self.require(ISA_AES)
  1426          p.domain = DomainCrypto
  1427          p.add(0, func(m *_Encoding, v []interface{}) {
  1428              m.emit(0x66)
  1429              m.rexo(hcode(v[2]), v[1], false)
  1430              m.emit(0x0f)
  1431              m.emit(0x3a)
  1432              m.emit(0xdf)
  1433              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  1434              m.imm1(toImmAny(v[0]))
  1435          })
  1436      }
  1437      // AESKEYGENASSIST imm8, m128, xmm
  1438      if isImm8(v0) && isM128(v1) && isXMM(v2) {
  1439          self.require(ISA_AES)
  1440          p.domain = DomainCrypto
  1441          p.add(0, func(m *_Encoding, v []interface{}) {
  1442              m.emit(0x66)
  1443              m.rexo(hcode(v[2]), addr(v[1]), false)
  1444              m.emit(0x0f)
  1445              m.emit(0x3a)
  1446              m.emit(0xdf)
  1447              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  1448              m.imm1(toImmAny(v[0]))
  1449          })
  1450      }
  1451      if p.len == 0 {
  1452          panic("invalid operands for AESKEYGENASSIST")
  1453      }
  1454      return p
  1455  }
  1456  
  1457  // ANDB performs "Logical AND".
  1458  //
  1459  // Mnemonic        : AND
  1460  // Supported forms : (6 forms)
  1461  //
  1462  //    * ANDB imm8, al
  1463  //    * ANDB imm8, r8
  1464  //    * ANDB r8, r8
  1465  //    * ANDB m8, r8
  1466  //    * ANDB imm8, m8
  1467  //    * ANDB r8, m8
  1468  //
  1469  func (self *Program) ANDB(v0 interface{}, v1 interface{}) *Instruction {
  1470      p := self.alloc("ANDB", 2, Operands { v0, v1 })
  1471      // ANDB imm8, al
  1472      if isImm8(v0) && v1 == AL {
  1473          p.domain = DomainGeneric
  1474          p.add(0, func(m *_Encoding, v []interface{}) {
  1475              m.emit(0x24)
  1476              m.imm1(toImmAny(v[0]))
  1477          })
  1478      }
  1479      // ANDB imm8, r8
  1480      if isImm8(v0) && isReg8(v1) {
  1481          p.domain = DomainGeneric
  1482          p.add(0, func(m *_Encoding, v []interface{}) {
  1483              m.rexo(0, v[1], isReg8REX(v[1]))
  1484              m.emit(0x80)
  1485              m.emit(0xe0 | lcode(v[1]))
  1486              m.imm1(toImmAny(v[0]))
  1487          })
  1488      }
  1489      // ANDB r8, r8
  1490      if isReg8(v0) && isReg8(v1) {
  1491          p.domain = DomainGeneric
  1492          p.add(0, func(m *_Encoding, v []interface{}) {
  1493              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
  1494              m.emit(0x20)
  1495              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  1496          })
  1497          p.add(0, func(m *_Encoding, v []interface{}) {
  1498              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]) || isReg8REX(v[1]))
  1499              m.emit(0x22)
  1500              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1501          })
  1502      }
  1503      // ANDB m8, r8
  1504      if isM8(v0) && isReg8(v1) {
  1505          p.domain = DomainGeneric
  1506          p.add(0, func(m *_Encoding, v []interface{}) {
  1507              m.rexo(hcode(v[1]), addr(v[0]), isReg8REX(v[1]))
  1508              m.emit(0x22)
  1509              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1510          })
  1511      }
  1512      // ANDB imm8, m8
  1513      if isImm8(v0) && isM8(v1) {
  1514          p.domain = DomainGeneric
  1515          p.add(0, func(m *_Encoding, v []interface{}) {
  1516              m.rexo(0, addr(v[1]), false)
  1517              m.emit(0x80)
  1518              m.mrsd(4, addr(v[1]), 1)
  1519              m.imm1(toImmAny(v[0]))
  1520          })
  1521      }
  1522      // ANDB r8, m8
  1523      if isReg8(v0) && isM8(v1) {
  1524          p.domain = DomainGeneric
  1525          p.add(0, func(m *_Encoding, v []interface{}) {
  1526              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
  1527              m.emit(0x20)
  1528              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  1529          })
  1530      }
  1531      if p.len == 0 {
  1532          panic("invalid operands for ANDB")
  1533      }
  1534      return p
  1535  }
  1536  
  1537  // ANDL performs "Logical AND".
  1538  //
  1539  // Mnemonic        : AND
  1540  // Supported forms : (8 forms)
  1541  //
  1542  //    * ANDL imm32, eax
  1543  //    * ANDL imm8, r32
  1544  //    * ANDL imm32, r32
  1545  //    * ANDL r32, r32
  1546  //    * ANDL m32, r32
  1547  //    * ANDL imm8, m32
  1548  //    * ANDL imm32, m32
  1549  //    * ANDL r32, m32
  1550  //
  1551  func (self *Program) ANDL(v0 interface{}, v1 interface{}) *Instruction {
  1552      p := self.alloc("ANDL", 2, Operands { v0, v1 })
  1553      // ANDL imm32, eax
  1554      if isImm32(v0) && v1 == EAX {
  1555          p.domain = DomainGeneric
  1556          p.add(0, func(m *_Encoding, v []interface{}) {
  1557              m.emit(0x25)
  1558              m.imm4(toImmAny(v[0]))
  1559          })
  1560      }
  1561      // ANDL imm8, r32
  1562      if isImm8Ext(v0, 4) && isReg32(v1) {
  1563          p.domain = DomainGeneric
  1564          p.add(0, func(m *_Encoding, v []interface{}) {
  1565              m.rexo(0, v[1], false)
  1566              m.emit(0x83)
  1567              m.emit(0xe0 | lcode(v[1]))
  1568              m.imm1(toImmAny(v[0]))
  1569          })
  1570      }
  1571      // ANDL imm32, r32
  1572      if isImm32(v0) && isReg32(v1) {
  1573          p.domain = DomainGeneric
  1574          p.add(0, func(m *_Encoding, v []interface{}) {
  1575              m.rexo(0, v[1], false)
  1576              m.emit(0x81)
  1577              m.emit(0xe0 | lcode(v[1]))
  1578              m.imm4(toImmAny(v[0]))
  1579          })
  1580      }
  1581      // ANDL r32, r32
  1582      if isReg32(v0) && isReg32(v1) {
  1583          p.domain = DomainGeneric
  1584          p.add(0, func(m *_Encoding, v []interface{}) {
  1585              m.rexo(hcode(v[0]), v[1], false)
  1586              m.emit(0x21)
  1587              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  1588          })
  1589          p.add(0, func(m *_Encoding, v []interface{}) {
  1590              m.rexo(hcode(v[1]), v[0], false)
  1591              m.emit(0x23)
  1592              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1593          })
  1594      }
  1595      // ANDL m32, r32
  1596      if isM32(v0) && isReg32(v1) {
  1597          p.domain = DomainGeneric
  1598          p.add(0, func(m *_Encoding, v []interface{}) {
  1599              m.rexo(hcode(v[1]), addr(v[0]), false)
  1600              m.emit(0x23)
  1601              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1602          })
  1603      }
  1604      // ANDL imm8, m32
  1605      if isImm8Ext(v0, 4) && isM32(v1) {
  1606          p.domain = DomainGeneric
  1607          p.add(0, func(m *_Encoding, v []interface{}) {
  1608              m.rexo(0, addr(v[1]), false)
  1609              m.emit(0x83)
  1610              m.mrsd(4, addr(v[1]), 1)
  1611              m.imm1(toImmAny(v[0]))
  1612          })
  1613      }
  1614      // ANDL imm32, m32
  1615      if isImm32(v0) && isM32(v1) {
  1616          p.domain = DomainGeneric
  1617          p.add(0, func(m *_Encoding, v []interface{}) {
  1618              m.rexo(0, addr(v[1]), false)
  1619              m.emit(0x81)
  1620              m.mrsd(4, addr(v[1]), 1)
  1621              m.imm4(toImmAny(v[0]))
  1622          })
  1623      }
  1624      // ANDL r32, m32
  1625      if isReg32(v0) && isM32(v1) {
  1626          p.domain = DomainGeneric
  1627          p.add(0, func(m *_Encoding, v []interface{}) {
  1628              m.rexo(hcode(v[0]), addr(v[1]), false)
  1629              m.emit(0x21)
  1630              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  1631          })
  1632      }
  1633      if p.len == 0 {
  1634          panic("invalid operands for ANDL")
  1635      }
  1636      return p
  1637  }
  1638  
  1639  // ANDNL performs "Logical AND NOT".
  1640  //
  1641  // Mnemonic        : ANDN
  1642  // Supported forms : (2 forms)
  1643  //
  1644  //    * ANDNL r32, r32, r32    [BMI]
  1645  //    * ANDNL m32, r32, r32    [BMI]
  1646  //
  1647  func (self *Program) ANDNL(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  1648      p := self.alloc("ANDNL", 3, Operands { v0, v1, v2 })
  1649      // ANDNL r32, r32, r32
  1650      if isReg32(v0) && isReg32(v1) && isReg32(v2) {
  1651          self.require(ISA_BMI)
  1652          p.domain = DomainGeneric
  1653          p.add(0, func(m *_Encoding, v []interface{}) {
  1654              m.emit(0xc4)
  1655              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
  1656              m.emit(0x78 ^ (hlcode(v[1]) << 3))
  1657              m.emit(0xf2)
  1658              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
  1659          })
  1660      }
  1661      // ANDNL m32, r32, r32
  1662      if isM32(v0) && isReg32(v1) && isReg32(v2) {
  1663          self.require(ISA_BMI)
  1664          p.domain = DomainGeneric
  1665          p.add(0, func(m *_Encoding, v []interface{}) {
  1666              m.vex3(0xc4, 0b10, 0x00, hcode(v[2]), addr(v[0]), hlcode(v[1]))
  1667              m.emit(0xf2)
  1668              m.mrsd(lcode(v[2]), addr(v[0]), 1)
  1669          })
  1670      }
  1671      if p.len == 0 {
  1672          panic("invalid operands for ANDNL")
  1673      }
  1674      return p
  1675  }
  1676  
  1677  // ANDNPD performs "Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values".
  1678  //
  1679  // Mnemonic        : ANDNPD
  1680  // Supported forms : (2 forms)
  1681  //
  1682  //    * ANDNPD xmm, xmm     [SSE2]
  1683  //    * ANDNPD m128, xmm    [SSE2]
  1684  //
  1685  func (self *Program) ANDNPD(v0 interface{}, v1 interface{}) *Instruction {
  1686      p := self.alloc("ANDNPD", 2, Operands { v0, v1 })
  1687      // ANDNPD xmm, xmm
  1688      if isXMM(v0) && isXMM(v1) {
  1689          self.require(ISA_SSE2)
  1690          p.domain = DomainMMXSSE
  1691          p.add(0, func(m *_Encoding, v []interface{}) {
  1692              m.emit(0x66)
  1693              m.rexo(hcode(v[1]), v[0], false)
  1694              m.emit(0x0f)
  1695              m.emit(0x55)
  1696              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1697          })
  1698      }
  1699      // ANDNPD m128, xmm
  1700      if isM128(v0) && isXMM(v1) {
  1701          self.require(ISA_SSE2)
  1702          p.domain = DomainMMXSSE
  1703          p.add(0, func(m *_Encoding, v []interface{}) {
  1704              m.emit(0x66)
  1705              m.rexo(hcode(v[1]), addr(v[0]), false)
  1706              m.emit(0x0f)
  1707              m.emit(0x55)
  1708              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1709          })
  1710      }
  1711      if p.len == 0 {
  1712          panic("invalid operands for ANDNPD")
  1713      }
  1714      return p
  1715  }
  1716  
  1717  // ANDNPS performs "Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values".
  1718  //
  1719  // Mnemonic        : ANDNPS
  1720  // Supported forms : (2 forms)
  1721  //
  1722  //    * ANDNPS xmm, xmm     [SSE]
  1723  //    * ANDNPS m128, xmm    [SSE]
  1724  //
  1725  func (self *Program) ANDNPS(v0 interface{}, v1 interface{}) *Instruction {
  1726      p := self.alloc("ANDNPS", 2, Operands { v0, v1 })
  1727      // ANDNPS xmm, xmm
  1728      if isXMM(v0) && isXMM(v1) {
  1729          self.require(ISA_SSE)
  1730          p.domain = DomainMMXSSE
  1731          p.add(0, func(m *_Encoding, v []interface{}) {
  1732              m.rexo(hcode(v[1]), v[0], false)
  1733              m.emit(0x0f)
  1734              m.emit(0x55)
  1735              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1736          })
  1737      }
  1738      // ANDNPS m128, xmm
  1739      if isM128(v0) && isXMM(v1) {
  1740          self.require(ISA_SSE)
  1741          p.domain = DomainMMXSSE
  1742          p.add(0, func(m *_Encoding, v []interface{}) {
  1743              m.rexo(hcode(v[1]), addr(v[0]), false)
  1744              m.emit(0x0f)
  1745              m.emit(0x55)
  1746              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1747          })
  1748      }
  1749      if p.len == 0 {
  1750          panic("invalid operands for ANDNPS")
  1751      }
  1752      return p
  1753  }
  1754  
  1755  // ANDNQ performs "Logical AND NOT".
  1756  //
  1757  // Mnemonic        : ANDN
  1758  // Supported forms : (2 forms)
  1759  //
  1760  //    * ANDNQ r64, r64, r64    [BMI]
  1761  //    * ANDNQ m64, r64, r64    [BMI]
  1762  //
  1763  func (self *Program) ANDNQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  1764      p := self.alloc("ANDNQ", 3, Operands { v0, v1, v2 })
  1765      // ANDNQ r64, r64, r64
  1766      if isReg64(v0) && isReg64(v1) && isReg64(v2) {
  1767          self.require(ISA_BMI)
  1768          p.domain = DomainGeneric
  1769          p.add(0, func(m *_Encoding, v []interface{}) {
  1770              m.emit(0xc4)
  1771              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
  1772              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
  1773              m.emit(0xf2)
  1774              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
  1775          })
  1776      }
  1777      // ANDNQ m64, r64, r64
  1778      if isM64(v0) && isReg64(v1) && isReg64(v2) {
  1779          self.require(ISA_BMI)
  1780          p.domain = DomainGeneric
  1781          p.add(0, func(m *_Encoding, v []interface{}) {
  1782              m.vex3(0xc4, 0b10, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
  1783              m.emit(0xf2)
  1784              m.mrsd(lcode(v[2]), addr(v[0]), 1)
  1785          })
  1786      }
  1787      if p.len == 0 {
  1788          panic("invalid operands for ANDNQ")
  1789      }
  1790      return p
  1791  }
  1792  
  1793  // ANDPD performs "Bitwise Logical AND of Packed Double-Precision Floating-Point Values".
  1794  //
  1795  // Mnemonic        : ANDPD
  1796  // Supported forms : (2 forms)
  1797  //
  1798  //    * ANDPD xmm, xmm     [SSE2]
  1799  //    * ANDPD m128, xmm    [SSE2]
  1800  //
  1801  func (self *Program) ANDPD(v0 interface{}, v1 interface{}) *Instruction {
  1802      p := self.alloc("ANDPD", 2, Operands { v0, v1 })
  1803      // ANDPD xmm, xmm
  1804      if isXMM(v0) && isXMM(v1) {
  1805          self.require(ISA_SSE2)
  1806          p.domain = DomainMMXSSE
  1807          p.add(0, func(m *_Encoding, v []interface{}) {
  1808              m.emit(0x66)
  1809              m.rexo(hcode(v[1]), v[0], false)
  1810              m.emit(0x0f)
  1811              m.emit(0x54)
  1812              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1813          })
  1814      }
  1815      // ANDPD m128, xmm
  1816      if isM128(v0) && isXMM(v1) {
  1817          self.require(ISA_SSE2)
  1818          p.domain = DomainMMXSSE
  1819          p.add(0, func(m *_Encoding, v []interface{}) {
  1820              m.emit(0x66)
  1821              m.rexo(hcode(v[1]), addr(v[0]), false)
  1822              m.emit(0x0f)
  1823              m.emit(0x54)
  1824              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1825          })
  1826      }
  1827      if p.len == 0 {
  1828          panic("invalid operands for ANDPD")
  1829      }
  1830      return p
  1831  }
  1832  
  1833  // ANDPS performs "Bitwise Logical AND of Packed Single-Precision Floating-Point Values".
  1834  //
  1835  // Mnemonic        : ANDPS
  1836  // Supported forms : (2 forms)
  1837  //
  1838  //    * ANDPS xmm, xmm     [SSE]
  1839  //    * ANDPS m128, xmm    [SSE]
  1840  //
  1841  func (self *Program) ANDPS(v0 interface{}, v1 interface{}) *Instruction {
  1842      p := self.alloc("ANDPS", 2, Operands { v0, v1 })
  1843      // ANDPS xmm, xmm
  1844      if isXMM(v0) && isXMM(v1) {
  1845          self.require(ISA_SSE)
  1846          p.domain = DomainMMXSSE
  1847          p.add(0, func(m *_Encoding, v []interface{}) {
  1848              m.rexo(hcode(v[1]), v[0], false)
  1849              m.emit(0x0f)
  1850              m.emit(0x54)
  1851              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1852          })
  1853      }
  1854      // ANDPS m128, xmm
  1855      if isM128(v0) && isXMM(v1) {
  1856          self.require(ISA_SSE)
  1857          p.domain = DomainMMXSSE
  1858          p.add(0, func(m *_Encoding, v []interface{}) {
  1859              m.rexo(hcode(v[1]), addr(v[0]), false)
  1860              m.emit(0x0f)
  1861              m.emit(0x54)
  1862              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1863          })
  1864      }
  1865      if p.len == 0 {
  1866          panic("invalid operands for ANDPS")
  1867      }
  1868      return p
  1869  }
  1870  
  1871  // ANDQ performs "Logical AND".
  1872  //
  1873  // Mnemonic        : AND
  1874  // Supported forms : (8 forms)
  1875  //
  1876  //    * ANDQ imm32, rax
  1877  //    * ANDQ imm8, r64
  1878  //    * ANDQ imm32, r64
  1879  //    * ANDQ r64, r64
  1880  //    * ANDQ m64, r64
  1881  //    * ANDQ imm8, m64
  1882  //    * ANDQ imm32, m64
  1883  //    * ANDQ r64, m64
  1884  //
  1885  func (self *Program) ANDQ(v0 interface{}, v1 interface{}) *Instruction {
  1886      p := self.alloc("ANDQ", 2, Operands { v0, v1 })
  1887      // ANDQ imm32, rax
  1888      if isImm32(v0) && v1 == RAX {
  1889          p.domain = DomainGeneric
  1890          p.add(0, func(m *_Encoding, v []interface{}) {
  1891              m.emit(0x48)
  1892              m.emit(0x25)
  1893              m.imm4(toImmAny(v[0]))
  1894          })
  1895      }
  1896      // ANDQ imm8, r64
  1897      if isImm8Ext(v0, 8) && isReg64(v1) {
  1898          p.domain = DomainGeneric
  1899          p.add(0, func(m *_Encoding, v []interface{}) {
  1900              m.emit(0x48 | hcode(v[1]))
  1901              m.emit(0x83)
  1902              m.emit(0xe0 | lcode(v[1]))
  1903              m.imm1(toImmAny(v[0]))
  1904          })
  1905      }
  1906      // ANDQ imm32, r64
  1907      if isImm32Ext(v0, 8) && isReg64(v1) {
  1908          p.domain = DomainGeneric
  1909          p.add(0, func(m *_Encoding, v []interface{}) {
  1910              m.emit(0x48 | hcode(v[1]))
  1911              m.emit(0x81)
  1912              m.emit(0xe0 | lcode(v[1]))
  1913              m.imm4(toImmAny(v[0]))
  1914          })
  1915      }
  1916      // ANDQ r64, r64
  1917      if isReg64(v0) && isReg64(v1) {
  1918          p.domain = DomainGeneric
  1919          p.add(0, func(m *_Encoding, v []interface{}) {
  1920              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
  1921              m.emit(0x21)
  1922              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  1923          })
  1924          p.add(0, func(m *_Encoding, v []interface{}) {
  1925              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  1926              m.emit(0x23)
  1927              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1928          })
  1929      }
  1930      // ANDQ m64, r64
  1931      if isM64(v0) && isReg64(v1) {
  1932          p.domain = DomainGeneric
  1933          p.add(0, func(m *_Encoding, v []interface{}) {
  1934              m.rexm(1, hcode(v[1]), addr(v[0]))
  1935              m.emit(0x23)
  1936              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1937          })
  1938      }
  1939      // ANDQ imm8, m64
  1940      if isImm8Ext(v0, 8) && isM64(v1) {
  1941          p.domain = DomainGeneric
  1942          p.add(0, func(m *_Encoding, v []interface{}) {
  1943              m.rexm(1, 0, addr(v[1]))
  1944              m.emit(0x83)
  1945              m.mrsd(4, addr(v[1]), 1)
  1946              m.imm1(toImmAny(v[0]))
  1947          })
  1948      }
  1949      // ANDQ imm32, m64
  1950      if isImm32Ext(v0, 8) && isM64(v1) {
  1951          p.domain = DomainGeneric
  1952          p.add(0, func(m *_Encoding, v []interface{}) {
  1953              m.rexm(1, 0, addr(v[1]))
  1954              m.emit(0x81)
  1955              m.mrsd(4, addr(v[1]), 1)
  1956              m.imm4(toImmAny(v[0]))
  1957          })
  1958      }
  1959      // ANDQ r64, m64
  1960      if isReg64(v0) && isM64(v1) {
  1961          p.domain = DomainGeneric
  1962          p.add(0, func(m *_Encoding, v []interface{}) {
  1963              m.rexm(1, hcode(v[0]), addr(v[1]))
  1964              m.emit(0x21)
  1965              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  1966          })
  1967      }
  1968      if p.len == 0 {
  1969          panic("invalid operands for ANDQ")
  1970      }
  1971      return p
  1972  }
  1973  
  1974  // ANDW performs "Logical AND".
  1975  //
  1976  // Mnemonic        : AND
  1977  // Supported forms : (8 forms)
  1978  //
  1979  //    * ANDW imm16, ax
  1980  //    * ANDW imm8, r16
  1981  //    * ANDW imm16, r16
  1982  //    * ANDW r16, r16
  1983  //    * ANDW m16, r16
  1984  //    * ANDW imm8, m16
  1985  //    * ANDW imm16, m16
  1986  //    * ANDW r16, m16
  1987  //
  1988  func (self *Program) ANDW(v0 interface{}, v1 interface{}) *Instruction {
  1989      p := self.alloc("ANDW", 2, Operands { v0, v1 })
  1990      // ANDW imm16, ax
  1991      if isImm16(v0) && v1 == AX {
  1992          p.domain = DomainGeneric
  1993          p.add(0, func(m *_Encoding, v []interface{}) {
  1994              m.emit(0x66)
  1995              m.emit(0x25)
  1996              m.imm2(toImmAny(v[0]))
  1997          })
  1998      }
  1999      // ANDW imm8, r16
  2000      if isImm8Ext(v0, 2) && isReg16(v1) {
  2001          p.domain = DomainGeneric
  2002          p.add(0, func(m *_Encoding, v []interface{}) {
  2003              m.emit(0x66)
  2004              m.rexo(0, v[1], false)
  2005              m.emit(0x83)
  2006              m.emit(0xe0 | lcode(v[1]))
  2007              m.imm1(toImmAny(v[0]))
  2008          })
  2009      }
  2010      // ANDW imm16, r16
  2011      if isImm16(v0) && isReg16(v1) {
  2012          p.domain = DomainGeneric
  2013          p.add(0, func(m *_Encoding, v []interface{}) {
  2014              m.emit(0x66)
  2015              m.rexo(0, v[1], false)
  2016              m.emit(0x81)
  2017              m.emit(0xe0 | lcode(v[1]))
  2018              m.imm2(toImmAny(v[0]))
  2019          })
  2020      }
  2021      // ANDW r16, r16
  2022      if isReg16(v0) && isReg16(v1) {
  2023          p.domain = DomainGeneric
  2024          p.add(0, func(m *_Encoding, v []interface{}) {
  2025              m.emit(0x66)
  2026              m.rexo(hcode(v[0]), v[1], false)
  2027              m.emit(0x21)
  2028              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  2029          })
  2030          p.add(0, func(m *_Encoding, v []interface{}) {
  2031              m.emit(0x66)
  2032              m.rexo(hcode(v[1]), v[0], false)
  2033              m.emit(0x23)
  2034              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  2035          })
  2036      }
  2037      // ANDW m16, r16
  2038      if isM16(v0) && isReg16(v1) {
  2039          p.domain = DomainGeneric
  2040          p.add(0, func(m *_Encoding, v []interface{}) {
  2041              m.emit(0x66)
  2042              m.rexo(hcode(v[1]), addr(v[0]), false)
  2043              m.emit(0x23)
  2044              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  2045          })
  2046      }
  2047      // ANDW imm8, m16
  2048      if isImm8Ext(v0, 2) && isM16(v1) {
  2049          p.domain = DomainGeneric
  2050          p.add(0, func(m *_Encoding, v []interface{}) {
  2051              m.emit(0x66)
  2052              m.rexo(0, addr(v[1]), false)
  2053              m.emit(0x83)
  2054              m.mrsd(4, addr(v[1]), 1)
  2055              m.imm1(toImmAny(v[0]))
  2056          })
  2057      }
  2058      // ANDW imm16, m16
  2059      if isImm16(v0) && isM16(v1) {
  2060          p.domain = DomainGeneric
  2061          p.add(0, func(m *_Encoding, v []interface{}) {
  2062              m.emit(0x66)
  2063              m.rexo(0, addr(v[1]), false)
  2064              m.emit(0x81)
  2065              m.mrsd(4, addr(v[1]), 1)
  2066              m.imm2(toImmAny(v[0]))
  2067          })
  2068      }
  2069      // ANDW r16, m16
  2070      if isReg16(v0) && isM16(v1) {
  2071          p.domain = DomainGeneric
  2072          p.add(0, func(m *_Encoding, v []interface{}) {
  2073              m.emit(0x66)
  2074              m.rexo(hcode(v[0]), addr(v[1]), false)
  2075              m.emit(0x21)
  2076              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  2077          })
  2078      }
  2079      if p.len == 0 {
  2080          panic("invalid operands for ANDW")
  2081      }
  2082      return p
  2083  }
  2084  
  2085  // BEXTR performs "Bit Field Extract".
  2086  //
  2087  // Mnemonic        : BEXTR
  2088  // Supported forms : (8 forms)
  2089  //
  2090  //    * BEXTR imm32, r32, r32    [TBM]
  2091  //    * BEXTR imm32, m32, r32    [TBM]
  2092  //    * BEXTR imm32, r64, r64    [TBM]
  2093  //    * BEXTR imm32, m64, r64    [TBM]
  2094  //    * BEXTR r32, r32, r32      [BMI]
  2095  //    * BEXTR r32, m32, r32      [BMI]
  2096  //    * BEXTR r64, r64, r64      [BMI]
  2097  //    * BEXTR r64, m64, r64      [BMI]
  2098  //
  2099  func (self *Program) BEXTR(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  2100      p := self.alloc("BEXTR", 3, Operands { v0, v1, v2 })
  2101      // BEXTR imm32, r32, r32
  2102      if isImm32(v0) && isReg32(v1) && isReg32(v2) {
  2103          self.require(ISA_TBM)
  2104          p.domain = DomainGeneric
  2105          p.add(0, func(m *_Encoding, v []interface{}) {
  2106              m.emit(0x8f)
  2107              m.emit(0xea ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
  2108              m.emit(0x78)
  2109              m.emit(0x10)
  2110              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  2111              m.imm4(toImmAny(v[0]))
  2112          })
  2113      }
  2114      // BEXTR imm32, m32, r32
  2115      if isImm32(v0) && isM32(v1) && isReg32(v2) {
  2116          self.require(ISA_TBM)
  2117          p.domain = DomainGeneric
  2118          p.add(0, func(m *_Encoding, v []interface{}) {
  2119              m.vex3(0x8f, 0b1010, 0x00, hcode(v[2]), addr(v[1]), 0)
  2120              m.emit(0x10)
  2121              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  2122              m.imm4(toImmAny(v[0]))
  2123          })
  2124      }
  2125      // BEXTR imm32, r64, r64
  2126      if isImm32(v0) && isReg64(v1) && isReg64(v2) {
  2127          self.require(ISA_TBM)
  2128          p.domain = DomainGeneric
  2129          p.add(0, func(m *_Encoding, v []interface{}) {
  2130              m.emit(0x8f)
  2131              m.emit(0xea ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
  2132              m.emit(0xf8)
  2133              m.emit(0x10)
  2134              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  2135              m.imm4(toImmAny(v[0]))
  2136          })
  2137      }
  2138      // BEXTR imm32, m64, r64
  2139      if isImm32(v0) && isM64(v1) && isReg64(v2) {
  2140          self.require(ISA_TBM)
  2141          p.domain = DomainGeneric
  2142          p.add(0, func(m *_Encoding, v []interface{}) {
  2143              m.vex3(0x8f, 0b1010, 0x80, hcode(v[2]), addr(v[1]), 0)
  2144              m.emit(0x10)
  2145              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  2146              m.imm4(toImmAny(v[0]))
  2147          })
  2148      }
  2149      // BEXTR r32, r32, r32
  2150      if isReg32(v0) && isReg32(v1) && isReg32(v2) {
  2151          self.require(ISA_BMI)
  2152          p.domain = DomainGeneric
  2153          p.add(0, func(m *_Encoding, v []interface{}) {
  2154              m.emit(0xc4)
  2155              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
  2156              m.emit(0x78 ^ (hlcode(v[0]) << 3))
  2157              m.emit(0xf7)
  2158              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  2159          })
  2160      }
  2161      // BEXTR r32, m32, r32
  2162      if isReg32(v0) && isM32(v1) && isReg32(v2) {
  2163          self.require(ISA_BMI)
  2164          p.domain = DomainGeneric
  2165          p.add(0, func(m *_Encoding, v []interface{}) {
  2166              m.vex3(0xc4, 0b10, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
  2167              m.emit(0xf7)
  2168              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  2169          })
  2170      }
  2171      // BEXTR r64, r64, r64
  2172      if isReg64(v0) && isReg64(v1) && isReg64(v2) {
  2173          self.require(ISA_BMI)
  2174          p.domain = DomainGeneric
  2175          p.add(0, func(m *_Encoding, v []interface{}) {
  2176              m.emit(0xc4)
  2177              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
  2178              m.emit(0xf8 ^ (hlcode(v[0]) << 3))
  2179              m.emit(0xf7)
  2180              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  2181          })
  2182      }
  2183      // BEXTR r64, m64, r64
  2184      if isReg64(v0) && isM64(v1) && isReg64(v2) {
  2185          self.require(ISA_BMI)
  2186          p.domain = DomainGeneric
  2187          p.add(0, func(m *_Encoding, v []interface{}) {
  2188              m.vex3(0xc4, 0b10, 0x80, hcode(v[2]), addr(v[1]), hlcode(v[0]))
  2189              m.emit(0xf7)
  2190              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  2191          })
  2192      }
  2193      if p.len == 0 {
  2194          panic("invalid operands for BEXTR")
  2195      }
  2196      return p
  2197  }
  2198  
  2199  // BLCFILL performs "Fill From Lowest Clear Bit".
  2200  //
  2201  // Mnemonic        : BLCFILL
  2202  // Supported forms : (4 forms)
  2203  //
  2204  //    * BLCFILL r32, r32    [TBM]
  2205  //    * BLCFILL m32, r32    [TBM]
  2206  //    * BLCFILL r64, r64    [TBM]
  2207  //    * BLCFILL m64, r64    [TBM]
  2208  //
  2209  func (self *Program) BLCFILL(v0 interface{}, v1 interface{}) *Instruction {
  2210      p := self.alloc("BLCFILL", 2, Operands { v0, v1 })
  2211      // BLCFILL r32, r32
  2212      if isReg32(v0) && isReg32(v1) {
  2213          self.require(ISA_TBM)
  2214          p.domain = DomainGeneric
  2215          p.add(0, func(m *_Encoding, v []interface{}) {
  2216              m.emit(0x8f)
  2217              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2218              m.emit(0x78 ^ (hlcode(v[1]) << 3))
  2219              m.emit(0x01)
  2220              m.emit(0xc8 | lcode(v[0]))
  2221          })
  2222      }
  2223      // BLCFILL m32, r32
  2224      if isM32(v0) && isReg32(v1) {
  2225          self.require(ISA_TBM)
  2226          p.domain = DomainGeneric
  2227          p.add(0, func(m *_Encoding, v []interface{}) {
  2228              m.vex3(0x8f, 0b1001, 0x00, 0, addr(v[0]), hlcode(v[1]))
  2229              m.emit(0x01)
  2230              m.mrsd(1, addr(v[0]), 1)
  2231          })
  2232      }
  2233      // BLCFILL r64, r64
  2234      if isReg64(v0) && isReg64(v1) {
  2235          self.require(ISA_TBM)
  2236          p.domain = DomainGeneric
  2237          p.add(0, func(m *_Encoding, v []interface{}) {
  2238              m.emit(0x8f)
  2239              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2240              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
  2241              m.emit(0x01)
  2242              m.emit(0xc8 | lcode(v[0]))
  2243          })
  2244      }
  2245      // BLCFILL m64, r64
  2246      if isM64(v0) && isReg64(v1) {
  2247          self.require(ISA_TBM)
  2248          p.domain = DomainGeneric
  2249          p.add(0, func(m *_Encoding, v []interface{}) {
  2250              m.vex3(0x8f, 0b1001, 0x80, 0, addr(v[0]), hlcode(v[1]))
  2251              m.emit(0x01)
  2252              m.mrsd(1, addr(v[0]), 1)
  2253          })
  2254      }
  2255      if p.len == 0 {
  2256          panic("invalid operands for BLCFILL")
  2257      }
  2258      return p
  2259  }
  2260  
  2261  // BLCI performs "Isolate Lowest Clear Bit".
  2262  //
  2263  // Mnemonic        : BLCI
  2264  // Supported forms : (4 forms)
  2265  //
  2266  //    * BLCI r32, r32    [TBM]
  2267  //    * BLCI m32, r32    [TBM]
  2268  //    * BLCI r64, r64    [TBM]
  2269  //    * BLCI m64, r64    [TBM]
  2270  //
  2271  func (self *Program) BLCI(v0 interface{}, v1 interface{}) *Instruction {
  2272      p := self.alloc("BLCI", 2, Operands { v0, v1 })
  2273      // BLCI r32, r32
  2274      if isReg32(v0) && isReg32(v1) {
  2275          self.require(ISA_TBM)
  2276          p.domain = DomainGeneric
  2277          p.add(0, func(m *_Encoding, v []interface{}) {
  2278              m.emit(0x8f)
  2279              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2280              m.emit(0x78 ^ (hlcode(v[1]) << 3))
  2281              m.emit(0x02)
  2282              m.emit(0xf0 | lcode(v[0]))
  2283          })
  2284      }
  2285      // BLCI m32, r32
  2286      if isM32(v0) && isReg32(v1) {
  2287          self.require(ISA_TBM)
  2288          p.domain = DomainGeneric
  2289          p.add(0, func(m *_Encoding, v []interface{}) {
  2290              m.vex3(0x8f, 0b1001, 0x00, 0, addr(v[0]), hlcode(v[1]))
  2291              m.emit(0x02)
  2292              m.mrsd(6, addr(v[0]), 1)
  2293          })
  2294      }
  2295      // BLCI r64, r64
  2296      if isReg64(v0) && isReg64(v1) {
  2297          self.require(ISA_TBM)
  2298          p.domain = DomainGeneric
  2299          p.add(0, func(m *_Encoding, v []interface{}) {
  2300              m.emit(0x8f)
  2301              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2302              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
  2303              m.emit(0x02)
  2304              m.emit(0xf0 | lcode(v[0]))
  2305          })
  2306      }
  2307      // BLCI m64, r64
  2308      if isM64(v0) && isReg64(v1) {
  2309          self.require(ISA_TBM)
  2310          p.domain = DomainGeneric
  2311          p.add(0, func(m *_Encoding, v []interface{}) {
  2312              m.vex3(0x8f, 0b1001, 0x80, 0, addr(v[0]), hlcode(v[1]))
  2313              m.emit(0x02)
  2314              m.mrsd(6, addr(v[0]), 1)
  2315          })
  2316      }
  2317      if p.len == 0 {
  2318          panic("invalid operands for BLCI")
  2319      }
  2320      return p
  2321  }
  2322  
  2323  // BLCIC performs "Isolate Lowest Set Bit and Complement".
  2324  //
  2325  // Mnemonic        : BLCIC
  2326  // Supported forms : (4 forms)
  2327  //
  2328  //    * BLCIC r32, r32    [TBM]
  2329  //    * BLCIC m32, r32    [TBM]
  2330  //    * BLCIC r64, r64    [TBM]
  2331  //    * BLCIC m64, r64    [TBM]
  2332  //
  2333  func (self *Program) BLCIC(v0 interface{}, v1 interface{}) *Instruction {
  2334      p := self.alloc("BLCIC", 2, Operands { v0, v1 })
  2335      // BLCIC r32, r32
  2336      if isReg32(v0) && isReg32(v1) {
  2337          self.require(ISA_TBM)
  2338          p.domain = DomainGeneric
  2339          p.add(0, func(m *_Encoding, v []interface{}) {
  2340              m.emit(0x8f)
  2341              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2342              m.emit(0x78 ^ (hlcode(v[1]) << 3))
  2343              m.emit(0x01)
  2344              m.emit(0xe8 | lcode(v[0]))
  2345          })
  2346      }
  2347      // BLCIC m32, r32
  2348      if isM32(v0) && isReg32(v1) {
  2349          self.require(ISA_TBM)
  2350          p.domain = DomainGeneric
  2351          p.add(0, func(m *_Encoding, v []interface{}) {
  2352              m.vex3(0x8f, 0b1001, 0x00, 0, addr(v[0]), hlcode(v[1]))
  2353              m.emit(0x01)
  2354              m.mrsd(5, addr(v[0]), 1)
  2355          })
  2356      }
  2357      // BLCIC r64, r64
  2358      if isReg64(v0) && isReg64(v1) {
  2359          self.require(ISA_TBM)
  2360          p.domain = DomainGeneric
  2361          p.add(0, func(m *_Encoding, v []interface{}) {
  2362              m.emit(0x8f)
  2363              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2364              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
  2365              m.emit(0x01)
  2366              m.emit(0xe8 | lcode(v[0]))
  2367          })
  2368      }
  2369      // BLCIC m64, r64
  2370      if isM64(v0) && isReg64(v1) {
  2371          self.require(ISA_TBM)
  2372          p.domain = DomainGeneric
  2373          p.add(0, func(m *_Encoding, v []interface{}) {
  2374              m.vex3(0x8f, 0b1001, 0x80, 0, addr(v[0]), hlcode(v[1]))
  2375              m.emit(0x01)
  2376              m.mrsd(5, addr(v[0]), 1)
  2377          })
  2378      }
  2379      if p.len == 0 {
  2380          panic("invalid operands for BLCIC")
  2381      }
  2382      return p
  2383  }
  2384  
  2385  // BLCMSK performs "Mask From Lowest Clear Bit".
  2386  //
  2387  // Mnemonic        : BLCMSK
  2388  // Supported forms : (4 forms)
  2389  //
  2390  //    * BLCMSK r32, r32    [TBM]
  2391  //    * BLCMSK m32, r32    [TBM]
  2392  //    * BLCMSK r64, r64    [TBM]
  2393  //    * BLCMSK m64, r64    [TBM]
  2394  //
  2395  func (self *Program) BLCMSK(v0 interface{}, v1 interface{}) *Instruction {
  2396      p := self.alloc("BLCMSK", 2, Operands { v0, v1 })
  2397      // BLCMSK r32, r32
  2398      if isReg32(v0) && isReg32(v1) {
  2399          self.require(ISA_TBM)
  2400          p.domain = DomainGeneric
  2401          p.add(0, func(m *_Encoding, v []interface{}) {
  2402              m.emit(0x8f)
  2403              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2404              m.emit(0x78 ^ (hlcode(v[1]) << 3))
  2405              m.emit(0x02)
  2406              m.emit(0xc8 | lcode(v[0]))
  2407          })
  2408      }
  2409      // BLCMSK m32, r32
  2410      if isM32(v0) && isReg32(v1) {
  2411          self.require(ISA_TBM)
  2412          p.domain = DomainGeneric
  2413          p.add(0, func(m *_Encoding, v []interface{}) {
  2414              m.vex3(0x8f, 0b1001, 0x00, 0, addr(v[0]), hlcode(v[1]))
  2415              m.emit(0x02)
  2416              m.mrsd(1, addr(v[0]), 1)
  2417          })
  2418      }
  2419      // BLCMSK r64, r64
  2420      if isReg64(v0) && isReg64(v1) {
  2421          self.require(ISA_TBM)
  2422          p.domain = DomainGeneric
  2423          p.add(0, func(m *_Encoding, v []interface{}) {
  2424              m.emit(0x8f)
  2425              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2426              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
  2427              m.emit(0x02)
  2428              m.emit(0xc8 | lcode(v[0]))
  2429          })
  2430      }
  2431      // BLCMSK m64, r64
  2432      if isM64(v0) && isReg64(v1) {
  2433          self.require(ISA_TBM)
  2434          p.domain = DomainGeneric
  2435          p.add(0, func(m *_Encoding, v []interface{}) {
  2436              m.vex3(0x8f, 0b1001, 0x80, 0, addr(v[0]), hlcode(v[1]))
  2437              m.emit(0x02)
  2438              m.mrsd(1, addr(v[0]), 1)
  2439          })
  2440      }
  2441      if p.len == 0 {
  2442          panic("invalid operands for BLCMSK")
  2443      }
  2444      return p
  2445  }
  2446  
  2447  // BLCS performs "Set Lowest Clear Bit".
  2448  //
  2449  // Mnemonic        : BLCS
  2450  // Supported forms : (4 forms)
  2451  //
  2452  //    * BLCS r32, r32    [TBM]
  2453  //    * BLCS m32, r32    [TBM]
  2454  //    * BLCS r64, r64    [TBM]
  2455  //    * BLCS m64, r64    [TBM]
  2456  //
  2457  func (self *Program) BLCS(v0 interface{}, v1 interface{}) *Instruction {
  2458      p := self.alloc("BLCS", 2, Operands { v0, v1 })
  2459      // BLCS r32, r32
  2460      if isReg32(v0) && isReg32(v1) {
  2461          self.require(ISA_TBM)
  2462          p.domain = DomainGeneric
  2463          p.add(0, func(m *_Encoding, v []interface{}) {
  2464              m.emit(0x8f)
  2465              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2466              m.emit(0x78 ^ (hlcode(v[1]) << 3))
  2467              m.emit(0x01)
  2468              m.emit(0xd8 | lcode(v[0]))
  2469          })
  2470      }
  2471      // BLCS m32, r32
  2472      if isM32(v0) && isReg32(v1) {
  2473          self.require(ISA_TBM)
  2474          p.domain = DomainGeneric
  2475          p.add(0, func(m *_Encoding, v []interface{}) {
  2476              m.vex3(0x8f, 0b1001, 0x00, 0, addr(v[0]), hlcode(v[1]))
  2477              m.emit(0x01)
  2478              m.mrsd(3, addr(v[0]), 1)
  2479          })
  2480      }
  2481      // BLCS r64, r64
  2482      if isReg64(v0) && isReg64(v1) {
  2483          self.require(ISA_TBM)
  2484          p.domain = DomainGeneric
  2485          p.add(0, func(m *_Encoding, v []interface{}) {
  2486              m.emit(0x8f)
  2487              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2488              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
  2489              m.emit(0x01)
  2490              m.emit(0xd8 | lcode(v[0]))
  2491          })
  2492      }
  2493      // BLCS m64, r64
  2494      if isM64(v0) && isReg64(v1) {
  2495          self.require(ISA_TBM)
  2496          p.domain = DomainGeneric
  2497          p.add(0, func(m *_Encoding, v []interface{}) {
  2498              m.vex3(0x8f, 0b1001, 0x80, 0, addr(v[0]), hlcode(v[1]))
  2499              m.emit(0x01)
  2500              m.mrsd(3, addr(v[0]), 1)
  2501          })
  2502      }
  2503      if p.len == 0 {
  2504          panic("invalid operands for BLCS")
  2505      }
  2506      return p
  2507  }
  2508  
  2509  // BLENDPD performs "Blend Packed Double Precision Floating-Point Values".
  2510  //
  2511  // Mnemonic        : BLENDPD
  2512  // Supported forms : (2 forms)
  2513  //
  2514  //    * BLENDPD imm8, xmm, xmm     [SSE4.1]
  2515  //    * BLENDPD imm8, m128, xmm    [SSE4.1]
  2516  //
  2517  func (self *Program) BLENDPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  2518      p := self.alloc("BLENDPD", 3, Operands { v0, v1, v2 })
  2519      // BLENDPD imm8, xmm, xmm
  2520      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
  2521          self.require(ISA_SSE4_1)
  2522          p.domain = DomainMMXSSE
  2523          p.add(0, func(m *_Encoding, v []interface{}) {
  2524              m.emit(0x66)
  2525              m.rexo(hcode(v[2]), v[1], false)
  2526              m.emit(0x0f)
  2527              m.emit(0x3a)
  2528              m.emit(0x0d)
  2529              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  2530              m.imm1(toImmAny(v[0]))
  2531          })
  2532      }
  2533      // BLENDPD imm8, m128, xmm
  2534      if isImm8(v0) && isM128(v1) && isXMM(v2) {
  2535          self.require(ISA_SSE4_1)
  2536          p.domain = DomainMMXSSE
  2537          p.add(0, func(m *_Encoding, v []interface{}) {
  2538              m.emit(0x66)
  2539              m.rexo(hcode(v[2]), addr(v[1]), false)
  2540              m.emit(0x0f)
  2541              m.emit(0x3a)
  2542              m.emit(0x0d)
  2543              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  2544              m.imm1(toImmAny(v[0]))
  2545          })
  2546      }
  2547      if p.len == 0 {
  2548          panic("invalid operands for BLENDPD")
  2549      }
  2550      return p
  2551  }
  2552  
  2553  // BLENDPS performs " Blend Packed Single Precision Floating-Point Values".
  2554  //
  2555  // Mnemonic        : BLENDPS
  2556  // Supported forms : (2 forms)
  2557  //
  2558  //    * BLENDPS imm8, xmm, xmm     [SSE4.1]
  2559  //    * BLENDPS imm8, m128, xmm    [SSE4.1]
  2560  //
  2561  func (self *Program) BLENDPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  2562      p := self.alloc("BLENDPS", 3, Operands { v0, v1, v2 })
  2563      // BLENDPS imm8, xmm, xmm
  2564      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
  2565          self.require(ISA_SSE4_1)
  2566          p.domain = DomainMMXSSE
  2567          p.add(0, func(m *_Encoding, v []interface{}) {
  2568              m.emit(0x66)
  2569              m.rexo(hcode(v[2]), v[1], false)
  2570              m.emit(0x0f)
  2571              m.emit(0x3a)
  2572              m.emit(0x0c)
  2573              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  2574              m.imm1(toImmAny(v[0]))
  2575          })
  2576      }
  2577      // BLENDPS imm8, m128, xmm
  2578      if isImm8(v0) && isM128(v1) && isXMM(v2) {
  2579          self.require(ISA_SSE4_1)
  2580          p.domain = DomainMMXSSE
  2581          p.add(0, func(m *_Encoding, v []interface{}) {
  2582              m.emit(0x66)
  2583              m.rexo(hcode(v[2]), addr(v[1]), false)
  2584              m.emit(0x0f)
  2585              m.emit(0x3a)
  2586              m.emit(0x0c)
  2587              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  2588              m.imm1(toImmAny(v[0]))
  2589          })
  2590      }
  2591      if p.len == 0 {
  2592          panic("invalid operands for BLENDPS")
  2593      }
  2594      return p
  2595  }
  2596  
  2597  // BLENDVPD performs " Variable Blend Packed Double Precision Floating-Point Values".
  2598  //
  2599  // Mnemonic        : BLENDVPD
  2600  // Supported forms : (2 forms)
  2601  //
  2602  //    * BLENDVPD xmm0, xmm, xmm     [SSE4.1]
  2603  //    * BLENDVPD xmm0, m128, xmm    [SSE4.1]
  2604  //
  2605  func (self *Program) BLENDVPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  2606      p := self.alloc("BLENDVPD", 3, Operands { v0, v1, v2 })
  2607      // BLENDVPD xmm0, xmm, xmm
  2608      if v0 == XMM0 && isXMM(v1) && isXMM(v2) {
  2609          self.require(ISA_SSE4_1)
  2610          p.domain = DomainMMXSSE
  2611          p.add(0, func(m *_Encoding, v []interface{}) {
  2612              m.emit(0x66)
  2613              m.rexo(hcode(v[2]), v[1], false)
  2614              m.emit(0x0f)
  2615              m.emit(0x38)
  2616              m.emit(0x15)
  2617              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  2618          })
  2619      }
  2620      // BLENDVPD xmm0, m128, xmm
  2621      if v0 == XMM0 && isM128(v1) && isXMM(v2) {
  2622          self.require(ISA_SSE4_1)
  2623          p.domain = DomainMMXSSE
  2624          p.add(0, func(m *_Encoding, v []interface{}) {
  2625              m.emit(0x66)
  2626              m.rexo(hcode(v[2]), addr(v[1]), false)
  2627              m.emit(0x0f)
  2628              m.emit(0x38)
  2629              m.emit(0x15)
  2630              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  2631          })
  2632      }
  2633      if p.len == 0 {
  2634          panic("invalid operands for BLENDVPD")
  2635      }
  2636      return p
  2637  }
  2638  
  2639  // BLENDVPS performs " Variable Blend Packed Single Precision Floating-Point Values".
  2640  //
  2641  // Mnemonic        : BLENDVPS
  2642  // Supported forms : (2 forms)
  2643  //
  2644  //    * BLENDVPS xmm0, xmm, xmm     [SSE4.1]
  2645  //    * BLENDVPS xmm0, m128, xmm    [SSE4.1]
  2646  //
  2647  func (self *Program) BLENDVPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  2648      p := self.alloc("BLENDVPS", 3, Operands { v0, v1, v2 })
  2649      // BLENDVPS xmm0, xmm, xmm
  2650      if v0 == XMM0 && isXMM(v1) && isXMM(v2) {
  2651          self.require(ISA_SSE4_1)
  2652          p.domain = DomainMMXSSE
  2653          p.add(0, func(m *_Encoding, v []interface{}) {
  2654              m.emit(0x66)
  2655              m.rexo(hcode(v[2]), v[1], false)
  2656              m.emit(0x0f)
  2657              m.emit(0x38)
  2658              m.emit(0x14)
  2659              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  2660          })
  2661      }
  2662      // BLENDVPS xmm0, m128, xmm
  2663      if v0 == XMM0 && isM128(v1) && isXMM(v2) {
  2664          self.require(ISA_SSE4_1)
  2665          p.domain = DomainMMXSSE
  2666          p.add(0, func(m *_Encoding, v []interface{}) {
  2667              m.emit(0x66)
  2668              m.rexo(hcode(v[2]), addr(v[1]), false)
  2669              m.emit(0x0f)
  2670              m.emit(0x38)
  2671              m.emit(0x14)
  2672              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  2673          })
  2674      }
  2675      if p.len == 0 {
  2676          panic("invalid operands for BLENDVPS")
  2677      }
  2678      return p
  2679  }
  2680  
  2681  // BLSFILL performs "Fill From Lowest Set Bit".
  2682  //
  2683  // Mnemonic        : BLSFILL
  2684  // Supported forms : (4 forms)
  2685  //
  2686  //    * BLSFILL r32, r32    [TBM]
  2687  //    * BLSFILL m32, r32    [TBM]
  2688  //    * BLSFILL r64, r64    [TBM]
  2689  //    * BLSFILL m64, r64    [TBM]
  2690  //
  2691  func (self *Program) BLSFILL(v0 interface{}, v1 interface{}) *Instruction {
  2692      p := self.alloc("BLSFILL", 2, Operands { v0, v1 })
  2693      // BLSFILL r32, r32
  2694      if isReg32(v0) && isReg32(v1) {
  2695          self.require(ISA_TBM)
  2696          p.domain = DomainGeneric
  2697          p.add(0, func(m *_Encoding, v []interface{}) {
  2698              m.emit(0x8f)
  2699              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2700              m.emit(0x78 ^ (hlcode(v[1]) << 3))
  2701              m.emit(0x01)
  2702              m.emit(0xd0 | lcode(v[0]))
  2703          })
  2704      }
  2705      // BLSFILL m32, r32
  2706      if isM32(v0) && isReg32(v1) {
  2707          self.require(ISA_TBM)
  2708          p.domain = DomainGeneric
  2709          p.add(0, func(m *_Encoding, v []interface{}) {
  2710              m.vex3(0x8f, 0b1001, 0x00, 0, addr(v[0]), hlcode(v[1]))
  2711              m.emit(0x01)
  2712              m.mrsd(2, addr(v[0]), 1)
  2713          })
  2714      }
  2715      // BLSFILL r64, r64
  2716      if isReg64(v0) && isReg64(v1) {
  2717          self.require(ISA_TBM)
  2718          p.domain = DomainGeneric
  2719          p.add(0, func(m *_Encoding, v []interface{}) {
  2720              m.emit(0x8f)
  2721              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2722              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
  2723              m.emit(0x01)
  2724              m.emit(0xd0 | lcode(v[0]))
  2725          })
  2726      }
  2727      // BLSFILL m64, r64
  2728      if isM64(v0) && isReg64(v1) {
  2729          self.require(ISA_TBM)
  2730          p.domain = DomainGeneric
  2731          p.add(0, func(m *_Encoding, v []interface{}) {
  2732              m.vex3(0x8f, 0b1001, 0x80, 0, addr(v[0]), hlcode(v[1]))
  2733              m.emit(0x01)
  2734              m.mrsd(2, addr(v[0]), 1)
  2735          })
  2736      }
  2737      if p.len == 0 {
  2738          panic("invalid operands for BLSFILL")
  2739      }
  2740      return p
  2741  }
  2742  
  2743  // BLSI performs "Isolate Lowest Set Bit".
  2744  //
  2745  // Mnemonic        : BLSI
  2746  // Supported forms : (4 forms)
  2747  //
  2748  //    * BLSI r32, r32    [BMI]
  2749  //    * BLSI m32, r32    [BMI]
  2750  //    * BLSI r64, r64    [BMI]
  2751  //    * BLSI m64, r64    [BMI]
  2752  //
  2753  func (self *Program) BLSI(v0 interface{}, v1 interface{}) *Instruction {
  2754      p := self.alloc("BLSI", 2, Operands { v0, v1 })
  2755      // BLSI r32, r32
  2756      if isReg32(v0) && isReg32(v1) {
  2757          self.require(ISA_BMI)
  2758          p.domain = DomainGeneric
  2759          p.add(0, func(m *_Encoding, v []interface{}) {
  2760              m.emit(0xc4)
  2761              m.emit(0xe2 ^ (hcode(v[0]) << 5))
  2762              m.emit(0x78 ^ (hlcode(v[1]) << 3))
  2763              m.emit(0xf3)
  2764              m.emit(0xd8 | lcode(v[0]))
  2765          })
  2766      }
  2767      // BLSI m32, r32
  2768      if isM32(v0) && isReg32(v1) {
  2769          self.require(ISA_BMI)
  2770          p.domain = DomainGeneric
  2771          p.add(0, func(m *_Encoding, v []interface{}) {
  2772              m.vex3(0xc4, 0b10, 0x00, 0, addr(v[0]), hlcode(v[1]))
  2773              m.emit(0xf3)
  2774              m.mrsd(3, addr(v[0]), 1)
  2775          })
  2776      }
  2777      // BLSI r64, r64
  2778      if isReg64(v0) && isReg64(v1) {
  2779          self.require(ISA_BMI)
  2780          p.domain = DomainGeneric
  2781          p.add(0, func(m *_Encoding, v []interface{}) {
  2782              m.emit(0xc4)
  2783              m.emit(0xe2 ^ (hcode(v[0]) << 5))
  2784              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
  2785              m.emit(0xf3)
  2786              m.emit(0xd8 | lcode(v[0]))
  2787          })
  2788      }
  2789      // BLSI m64, r64
  2790      if isM64(v0) && isReg64(v1) {
  2791          self.require(ISA_BMI)
  2792          p.domain = DomainGeneric
  2793          p.add(0, func(m *_Encoding, v []interface{}) {
  2794              m.vex3(0xc4, 0b10, 0x80, 0, addr(v[0]), hlcode(v[1]))
  2795              m.emit(0xf3)
  2796              m.mrsd(3, addr(v[0]), 1)
  2797          })
  2798      }
  2799      if p.len == 0 {
  2800          panic("invalid operands for BLSI")
  2801      }
  2802      return p
  2803  }
  2804  
  2805  // BLSIC performs "Isolate Lowest Set Bit and Complement".
  2806  //
  2807  // Mnemonic        : BLSIC
  2808  // Supported forms : (4 forms)
  2809  //
  2810  //    * BLSIC r32, r32    [TBM]
  2811  //    * BLSIC m32, r32    [TBM]
  2812  //    * BLSIC r64, r64    [TBM]
  2813  //    * BLSIC m64, r64    [TBM]
  2814  //
  2815  func (self *Program) BLSIC(v0 interface{}, v1 interface{}) *Instruction {
  2816      p := self.alloc("BLSIC", 2, Operands { v0, v1 })
  2817      // BLSIC r32, r32
  2818      if isReg32(v0) && isReg32(v1) {
  2819          self.require(ISA_TBM)
  2820          p.domain = DomainGeneric
  2821          p.add(0, func(m *_Encoding, v []interface{}) {
  2822              m.emit(0x8f)
  2823              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2824              m.emit(0x78 ^ (hlcode(v[1]) << 3))
  2825              m.emit(0x01)
  2826              m.emit(0xf0 | lcode(v[0]))
  2827          })
  2828      }
  2829      // BLSIC m32, r32
  2830      if isM32(v0) && isReg32(v1) {
  2831          self.require(ISA_TBM)
  2832          p.domain = DomainGeneric
  2833          p.add(0, func(m *_Encoding, v []interface{}) {
  2834              m.vex3(0x8f, 0b1001, 0x00, 0, addr(v[0]), hlcode(v[1]))
  2835              m.emit(0x01)
  2836              m.mrsd(6, addr(v[0]), 1)
  2837          })
  2838      }
  2839      // BLSIC r64, r64
  2840      if isReg64(v0) && isReg64(v1) {
  2841          self.require(ISA_TBM)
  2842          p.domain = DomainGeneric
  2843          p.add(0, func(m *_Encoding, v []interface{}) {
  2844              m.emit(0x8f)
  2845              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2846              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
  2847              m.emit(0x01)
  2848              m.emit(0xf0 | lcode(v[0]))
  2849          })
  2850      }
  2851      // BLSIC m64, r64
  2852      if isM64(v0) && isReg64(v1) {
  2853          self.require(ISA_TBM)
  2854          p.domain = DomainGeneric
  2855          p.add(0, func(m *_Encoding, v []interface{}) {
  2856              m.vex3(0x8f, 0b1001, 0x80, 0, addr(v[0]), hlcode(v[1]))
  2857              m.emit(0x01)
  2858              m.mrsd(6, addr(v[0]), 1)
  2859          })
  2860      }
  2861      if p.len == 0 {
  2862          panic("invalid operands for BLSIC")
  2863      }
  2864      return p
  2865  }
  2866  
  2867  // BLSMSK performs "Mask From Lowest Set Bit".
  2868  //
  2869  // Mnemonic        : BLSMSK
  2870  // Supported forms : (4 forms)
  2871  //
  2872  //    * BLSMSK r32, r32    [BMI]
  2873  //    * BLSMSK m32, r32    [BMI]
  2874  //    * BLSMSK r64, r64    [BMI]
  2875  //    * BLSMSK m64, r64    [BMI]
  2876  //
  2877  func (self *Program) BLSMSK(v0 interface{}, v1 interface{}) *Instruction {
  2878      p := self.alloc("BLSMSK", 2, Operands { v0, v1 })
  2879      // BLSMSK r32, r32
  2880      if isReg32(v0) && isReg32(v1) {
  2881          self.require(ISA_BMI)
  2882          p.domain = DomainGeneric
  2883          p.add(0, func(m *_Encoding, v []interface{}) {
  2884              m.emit(0xc4)
  2885              m.emit(0xe2 ^ (hcode(v[0]) << 5))
  2886              m.emit(0x78 ^ (hlcode(v[1]) << 3))
  2887              m.emit(0xf3)
  2888              m.emit(0xd0 | lcode(v[0]))
  2889          })
  2890      }
  2891      // BLSMSK m32, r32
  2892      if isM32(v0) && isReg32(v1) {
  2893          self.require(ISA_BMI)
  2894          p.domain = DomainGeneric
  2895          p.add(0, func(m *_Encoding, v []interface{}) {
  2896              m.vex3(0xc4, 0b10, 0x00, 0, addr(v[0]), hlcode(v[1]))
  2897              m.emit(0xf3)
  2898              m.mrsd(2, addr(v[0]), 1)
  2899          })
  2900      }
  2901      // BLSMSK r64, r64
  2902      if isReg64(v0) && isReg64(v1) {
  2903          self.require(ISA_BMI)
  2904          p.domain = DomainGeneric
  2905          p.add(0, func(m *_Encoding, v []interface{}) {
  2906              m.emit(0xc4)
  2907              m.emit(0xe2 ^ (hcode(v[0]) << 5))
  2908              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
  2909              m.emit(0xf3)
  2910              m.emit(0xd0 | lcode(v[0]))
  2911          })
  2912      }
  2913      // BLSMSK m64, r64
  2914      if isM64(v0) && isReg64(v1) {
  2915          self.require(ISA_BMI)
  2916          p.domain = DomainGeneric
  2917          p.add(0, func(m *_Encoding, v []interface{}) {
  2918              m.vex3(0xc4, 0b10, 0x80, 0, addr(v[0]), hlcode(v[1]))
  2919              m.emit(0xf3)
  2920              m.mrsd(2, addr(v[0]), 1)
  2921          })
  2922      }
  2923      if p.len == 0 {
  2924          panic("invalid operands for BLSMSK")
  2925      }
  2926      return p
  2927  }
  2928  
  2929  // BLSR performs "Reset Lowest Set Bit".
  2930  //
  2931  // Mnemonic        : BLSR
  2932  // Supported forms : (4 forms)
  2933  //
  2934  //    * BLSR r32, r32    [BMI]
  2935  //    * BLSR m32, r32    [BMI]
  2936  //    * BLSR r64, r64    [BMI]
  2937  //    * BLSR m64, r64    [BMI]
  2938  //
  2939  func (self *Program) BLSR(v0 interface{}, v1 interface{}) *Instruction {
  2940      p := self.alloc("BLSR", 2, Operands { v0, v1 })
  2941      // BLSR r32, r32
  2942      if isReg32(v0) && isReg32(v1) {
  2943          self.require(ISA_BMI)
  2944          p.domain = DomainGeneric
  2945          p.add(0, func(m *_Encoding, v []interface{}) {
  2946              m.emit(0xc4)
  2947              m.emit(0xe2 ^ (hcode(v[0]) << 5))
  2948              m.emit(0x78 ^ (hlcode(v[1]) << 3))
  2949              m.emit(0xf3)
  2950              m.emit(0xc8 | lcode(v[0]))
  2951          })
  2952      }
  2953      // BLSR m32, r32
  2954      if isM32(v0) && isReg32(v1) {
  2955          self.require(ISA_BMI)
  2956          p.domain = DomainGeneric
  2957          p.add(0, func(m *_Encoding, v []interface{}) {
  2958              m.vex3(0xc4, 0b10, 0x00, 0, addr(v[0]), hlcode(v[1]))
  2959              m.emit(0xf3)
  2960              m.mrsd(1, addr(v[0]), 1)
  2961          })
  2962      }
  2963      // BLSR r64, r64
  2964      if isReg64(v0) && isReg64(v1) {
  2965          self.require(ISA_BMI)
  2966          p.domain = DomainGeneric
  2967          p.add(0, func(m *_Encoding, v []interface{}) {
  2968              m.emit(0xc4)
  2969              m.emit(0xe2 ^ (hcode(v[0]) << 5))
  2970              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
  2971              m.emit(0xf3)
  2972              m.emit(0xc8 | lcode(v[0]))
  2973          })
  2974      }
  2975      // BLSR m64, r64
  2976      if isM64(v0) && isReg64(v1) {
  2977          self.require(ISA_BMI)
  2978          p.domain = DomainGeneric
  2979          p.add(0, func(m *_Encoding, v []interface{}) {
  2980              m.vex3(0xc4, 0b10, 0x80, 0, addr(v[0]), hlcode(v[1]))
  2981              m.emit(0xf3)
  2982              m.mrsd(1, addr(v[0]), 1)
  2983          })
  2984      }
  2985      if p.len == 0 {
  2986          panic("invalid operands for BLSR")
  2987      }
  2988      return p
  2989  }
  2990  
  2991  // BSFL performs "Bit Scan Forward".
  2992  //
  2993  // Mnemonic        : BSF
  2994  // Supported forms : (2 forms)
  2995  //
  2996  //    * BSFL r32, r32
  2997  //    * BSFL m32, r32
  2998  //
  2999  func (self *Program) BSFL(v0 interface{}, v1 interface{}) *Instruction {
  3000      p := self.alloc("BSFL", 2, Operands { v0, v1 })
  3001      // BSFL r32, r32
  3002      if isReg32(v0) && isReg32(v1) {
  3003          p.domain = DomainGeneric
  3004          p.add(0, func(m *_Encoding, v []interface{}) {
  3005              m.rexo(hcode(v[1]), v[0], false)
  3006              m.emit(0x0f)
  3007              m.emit(0xbc)
  3008              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  3009          })
  3010      }
  3011      // BSFL m32, r32
  3012      if isM32(v0) && isReg32(v1) {
  3013          p.domain = DomainGeneric
  3014          p.add(0, func(m *_Encoding, v []interface{}) {
  3015              m.rexo(hcode(v[1]), addr(v[0]), false)
  3016              m.emit(0x0f)
  3017              m.emit(0xbc)
  3018              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  3019          })
  3020      }
  3021      if p.len == 0 {
  3022          panic("invalid operands for BSFL")
  3023      }
  3024      return p
  3025  }
  3026  
  3027  // BSFQ performs "Bit Scan Forward".
  3028  //
  3029  // Mnemonic        : BSF
  3030  // Supported forms : (2 forms)
  3031  //
  3032  //    * BSFQ r64, r64
  3033  //    * BSFQ m64, r64
  3034  //
  3035  func (self *Program) BSFQ(v0 interface{}, v1 interface{}) *Instruction {
  3036      p := self.alloc("BSFQ", 2, Operands { v0, v1 })
  3037      // BSFQ r64, r64
  3038      if isReg64(v0) && isReg64(v1) {
  3039          p.domain = DomainGeneric
  3040          p.add(0, func(m *_Encoding, v []interface{}) {
  3041              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  3042              m.emit(0x0f)
  3043              m.emit(0xbc)
  3044              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  3045          })
  3046      }
  3047      // BSFQ m64, r64
  3048      if isM64(v0) && isReg64(v1) {
  3049          p.domain = DomainGeneric
  3050          p.add(0, func(m *_Encoding, v []interface{}) {
  3051              m.rexm(1, hcode(v[1]), addr(v[0]))
  3052              m.emit(0x0f)
  3053              m.emit(0xbc)
  3054              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  3055          })
  3056      }
  3057      if p.len == 0 {
  3058          panic("invalid operands for BSFQ")
  3059      }
  3060      return p
  3061  }
  3062  
  3063  // BSFW performs "Bit Scan Forward".
  3064  //
  3065  // Mnemonic        : BSF
  3066  // Supported forms : (2 forms)
  3067  //
  3068  //    * BSFW r16, r16
  3069  //    * BSFW m16, r16
  3070  //
  3071  func (self *Program) BSFW(v0 interface{}, v1 interface{}) *Instruction {
  3072      p := self.alloc("BSFW", 2, Operands { v0, v1 })
  3073      // BSFW r16, r16
  3074      if isReg16(v0) && isReg16(v1) {
  3075          p.domain = DomainGeneric
  3076          p.add(0, func(m *_Encoding, v []interface{}) {
  3077              m.emit(0x66)
  3078              m.rexo(hcode(v[1]), v[0], false)
  3079              m.emit(0x0f)
  3080              m.emit(0xbc)
  3081              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  3082          })
  3083      }
  3084      // BSFW m16, r16
  3085      if isM16(v0) && isReg16(v1) {
  3086          p.domain = DomainGeneric
  3087          p.add(0, func(m *_Encoding, v []interface{}) {
  3088              m.emit(0x66)
  3089              m.rexo(hcode(v[1]), addr(v[0]), false)
  3090              m.emit(0x0f)
  3091              m.emit(0xbc)
  3092              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  3093          })
  3094      }
  3095      if p.len == 0 {
  3096          panic("invalid operands for BSFW")
  3097      }
  3098      return p
  3099  }
  3100  
  3101  // BSRL performs "Bit Scan Reverse".
  3102  //
  3103  // Mnemonic        : BSR
  3104  // Supported forms : (2 forms)
  3105  //
  3106  //    * BSRL r32, r32
  3107  //    * BSRL m32, r32
  3108  //
  3109  func (self *Program) BSRL(v0 interface{}, v1 interface{}) *Instruction {
  3110      p := self.alloc("BSRL", 2, Operands { v0, v1 })
  3111      // BSRL r32, r32
  3112      if isReg32(v0) && isReg32(v1) {
  3113          p.domain = DomainGeneric
  3114          p.add(0, func(m *_Encoding, v []interface{}) {
  3115              m.rexo(hcode(v[1]), v[0], false)
  3116              m.emit(0x0f)
  3117              m.emit(0xbd)
  3118              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  3119          })
  3120      }
  3121      // BSRL m32, r32
  3122      if isM32(v0) && isReg32(v1) {
  3123          p.domain = DomainGeneric
  3124          p.add(0, func(m *_Encoding, v []interface{}) {
  3125              m.rexo(hcode(v[1]), addr(v[0]), false)
  3126              m.emit(0x0f)
  3127              m.emit(0xbd)
  3128              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  3129          })
  3130      }
  3131      if p.len == 0 {
  3132          panic("invalid operands for BSRL")
  3133      }
  3134      return p
  3135  }
  3136  
  3137  // BSRQ performs "Bit Scan Reverse".
  3138  //
  3139  // Mnemonic        : BSR
  3140  // Supported forms : (2 forms)
  3141  //
  3142  //    * BSRQ r64, r64
  3143  //    * BSRQ m64, r64
  3144  //
  3145  func (self *Program) BSRQ(v0 interface{}, v1 interface{}) *Instruction {
  3146      p := self.alloc("BSRQ", 2, Operands { v0, v1 })
  3147      // BSRQ r64, r64
  3148      if isReg64(v0) && isReg64(v1) {
  3149          p.domain = DomainGeneric
  3150          p.add(0, func(m *_Encoding, v []interface{}) {
  3151              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  3152              m.emit(0x0f)
  3153              m.emit(0xbd)
  3154              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  3155          })
  3156      }
  3157      // BSRQ m64, r64
  3158      if isM64(v0) && isReg64(v1) {
  3159          p.domain = DomainGeneric
  3160          p.add(0, func(m *_Encoding, v []interface{}) {
  3161              m.rexm(1, hcode(v[1]), addr(v[0]))
  3162              m.emit(0x0f)
  3163              m.emit(0xbd)
  3164              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  3165          })
  3166      }
  3167      if p.len == 0 {
  3168          panic("invalid operands for BSRQ")
  3169      }
  3170      return p
  3171  }
  3172  
  3173  // BSRW performs "Bit Scan Reverse".
  3174  //
  3175  // Mnemonic        : BSR
  3176  // Supported forms : (2 forms)
  3177  //
  3178  //    * BSRW r16, r16
  3179  //    * BSRW m16, r16
  3180  //
  3181  func (self *Program) BSRW(v0 interface{}, v1 interface{}) *Instruction {
  3182      p := self.alloc("BSRW", 2, Operands { v0, v1 })
  3183      // BSRW r16, r16
  3184      if isReg16(v0) && isReg16(v1) {
  3185          p.domain = DomainGeneric
  3186          p.add(0, func(m *_Encoding, v []interface{}) {
  3187              m.emit(0x66)
  3188              m.rexo(hcode(v[1]), v[0], false)
  3189              m.emit(0x0f)
  3190              m.emit(0xbd)
  3191              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  3192          })
  3193      }
  3194      // BSRW m16, r16
  3195      if isM16(v0) && isReg16(v1) {
  3196          p.domain = DomainGeneric
  3197          p.add(0, func(m *_Encoding, v []interface{}) {
  3198              m.emit(0x66)
  3199              m.rexo(hcode(v[1]), addr(v[0]), false)
  3200              m.emit(0x0f)
  3201              m.emit(0xbd)
  3202              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  3203          })
  3204      }
  3205      if p.len == 0 {
  3206          panic("invalid operands for BSRW")
  3207      }
  3208      return p
  3209  }
  3210  
  3211  // BSWAPL performs "Byte Swap".
  3212  //
  3213  // Mnemonic        : BSWAP
  3214  // Supported forms : (1 form)
  3215  //
  3216  //    * BSWAPL r32
  3217  //
  3218  func (self *Program) BSWAPL(v0 interface{}) *Instruction {
  3219      p := self.alloc("BSWAPL", 1, Operands { v0 })
  3220      // BSWAPL r32
  3221      if isReg32(v0) {
  3222          p.domain = DomainGeneric
  3223          p.add(0, func(m *_Encoding, v []interface{}) {
  3224              m.rexo(0, v[0], false)
  3225              m.emit(0x0f)
  3226              m.emit(0xc8 | lcode(v[0]))
  3227          })
  3228      }
  3229      if p.len == 0 {
  3230          panic("invalid operands for BSWAPL")
  3231      }
  3232      return p
  3233  }
  3234  
  3235  // BSWAPQ performs "Byte Swap".
  3236  //
  3237  // Mnemonic        : BSWAP
  3238  // Supported forms : (1 form)
  3239  //
  3240  //    * BSWAPQ r64
  3241  //
  3242  func (self *Program) BSWAPQ(v0 interface{}) *Instruction {
  3243      p := self.alloc("BSWAPQ", 1, Operands { v0 })
  3244      // BSWAPQ r64
  3245      if isReg64(v0) {
  3246          p.domain = DomainGeneric
  3247          p.add(0, func(m *_Encoding, v []interface{}) {
  3248              m.emit(0x48 | hcode(v[0]))
  3249              m.emit(0x0f)
  3250              m.emit(0xc8 | lcode(v[0]))
  3251          })
  3252      }
  3253      if p.len == 0 {
  3254          panic("invalid operands for BSWAPQ")
  3255      }
  3256      return p
  3257  }
  3258  
  3259  // BTCL performs "Bit Test and Complement".
  3260  //
  3261  // Mnemonic        : BTC
  3262  // Supported forms : (4 forms)
  3263  //
  3264  //    * BTCL imm8, r32
  3265  //    * BTCL r32, r32
  3266  //    * BTCL imm8, m32
  3267  //    * BTCL r32, m32
  3268  //
  3269  func (self *Program) BTCL(v0 interface{}, v1 interface{}) *Instruction {
  3270      p := self.alloc("BTCL", 2, Operands { v0, v1 })
  3271      // BTCL imm8, r32
  3272      if isImm8(v0) && isReg32(v1) {
  3273          p.domain = DomainGeneric
  3274          p.add(0, func(m *_Encoding, v []interface{}) {
  3275              m.rexo(0, v[1], false)
  3276              m.emit(0x0f)
  3277              m.emit(0xba)
  3278              m.emit(0xf8 | lcode(v[1]))
  3279              m.imm1(toImmAny(v[0]))
  3280          })
  3281      }
  3282      // BTCL r32, r32
  3283      if isReg32(v0) && isReg32(v1) {
  3284          p.domain = DomainGeneric
  3285          p.add(0, func(m *_Encoding, v []interface{}) {
  3286              m.rexo(hcode(v[0]), v[1], false)
  3287              m.emit(0x0f)
  3288              m.emit(0xbb)
  3289              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3290          })
  3291      }
  3292      // BTCL imm8, m32
  3293      if isImm8(v0) && isM32(v1) {
  3294          p.domain = DomainGeneric
  3295          p.add(0, func(m *_Encoding, v []interface{}) {
  3296              m.rexo(0, addr(v[1]), false)
  3297              m.emit(0x0f)
  3298              m.emit(0xba)
  3299              m.mrsd(7, addr(v[1]), 1)
  3300              m.imm1(toImmAny(v[0]))
  3301          })
  3302      }
  3303      // BTCL r32, m32
  3304      if isReg32(v0) && isM32(v1) {
  3305          p.domain = DomainGeneric
  3306          p.add(0, func(m *_Encoding, v []interface{}) {
  3307              m.rexo(hcode(v[0]), addr(v[1]), false)
  3308              m.emit(0x0f)
  3309              m.emit(0xbb)
  3310              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3311          })
  3312      }
  3313      if p.len == 0 {
  3314          panic("invalid operands for BTCL")
  3315      }
  3316      return p
  3317  }
  3318  
  3319  // BTCQ performs "Bit Test and Complement".
  3320  //
  3321  // Mnemonic        : BTC
  3322  // Supported forms : (4 forms)
  3323  //
  3324  //    * BTCQ imm8, r64
  3325  //    * BTCQ r64, r64
  3326  //    * BTCQ imm8, m64
  3327  //    * BTCQ r64, m64
  3328  //
  3329  func (self *Program) BTCQ(v0 interface{}, v1 interface{}) *Instruction {
  3330      p := self.alloc("BTCQ", 2, Operands { v0, v1 })
  3331      // BTCQ imm8, r64
  3332      if isImm8(v0) && isReg64(v1) {
  3333          p.domain = DomainGeneric
  3334          p.add(0, func(m *_Encoding, v []interface{}) {
  3335              m.emit(0x48 | hcode(v[1]))
  3336              m.emit(0x0f)
  3337              m.emit(0xba)
  3338              m.emit(0xf8 | lcode(v[1]))
  3339              m.imm1(toImmAny(v[0]))
  3340          })
  3341      }
  3342      // BTCQ r64, r64
  3343      if isReg64(v0) && isReg64(v1) {
  3344          p.domain = DomainGeneric
  3345          p.add(0, func(m *_Encoding, v []interface{}) {
  3346              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
  3347              m.emit(0x0f)
  3348              m.emit(0xbb)
  3349              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3350          })
  3351      }
  3352      // BTCQ imm8, m64
  3353      if isImm8(v0) && isM64(v1) {
  3354          p.domain = DomainGeneric
  3355          p.add(0, func(m *_Encoding, v []interface{}) {
  3356              m.rexm(1, 0, addr(v[1]))
  3357              m.emit(0x0f)
  3358              m.emit(0xba)
  3359              m.mrsd(7, addr(v[1]), 1)
  3360              m.imm1(toImmAny(v[0]))
  3361          })
  3362      }
  3363      // BTCQ r64, m64
  3364      if isReg64(v0) && isM64(v1) {
  3365          p.domain = DomainGeneric
  3366          p.add(0, func(m *_Encoding, v []interface{}) {
  3367              m.rexm(1, hcode(v[0]), addr(v[1]))
  3368              m.emit(0x0f)
  3369              m.emit(0xbb)
  3370              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3371          })
  3372      }
  3373      if p.len == 0 {
  3374          panic("invalid operands for BTCQ")
  3375      }
  3376      return p
  3377  }
  3378  
  3379  // BTCW performs "Bit Test and Complement".
  3380  //
  3381  // Mnemonic        : BTC
  3382  // Supported forms : (4 forms)
  3383  //
  3384  //    * BTCW imm8, r16
  3385  //    * BTCW r16, r16
  3386  //    * BTCW imm8, m16
  3387  //    * BTCW r16, m16
  3388  //
  3389  func (self *Program) BTCW(v0 interface{}, v1 interface{}) *Instruction {
  3390      p := self.alloc("BTCW", 2, Operands { v0, v1 })
  3391      // BTCW imm8, r16
  3392      if isImm8(v0) && isReg16(v1) {
  3393          p.domain = DomainGeneric
  3394          p.add(0, func(m *_Encoding, v []interface{}) {
  3395              m.emit(0x66)
  3396              m.rexo(0, v[1], false)
  3397              m.emit(0x0f)
  3398              m.emit(0xba)
  3399              m.emit(0xf8 | lcode(v[1]))
  3400              m.imm1(toImmAny(v[0]))
  3401          })
  3402      }
  3403      // BTCW r16, r16
  3404      if isReg16(v0) && isReg16(v1) {
  3405          p.domain = DomainGeneric
  3406          p.add(0, func(m *_Encoding, v []interface{}) {
  3407              m.emit(0x66)
  3408              m.rexo(hcode(v[0]), v[1], false)
  3409              m.emit(0x0f)
  3410              m.emit(0xbb)
  3411              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3412          })
  3413      }
  3414      // BTCW imm8, m16
  3415      if isImm8(v0) && isM16(v1) {
  3416          p.domain = DomainGeneric
  3417          p.add(0, func(m *_Encoding, v []interface{}) {
  3418              m.emit(0x66)
  3419              m.rexo(0, addr(v[1]), false)
  3420              m.emit(0x0f)
  3421              m.emit(0xba)
  3422              m.mrsd(7, addr(v[1]), 1)
  3423              m.imm1(toImmAny(v[0]))
  3424          })
  3425      }
  3426      // BTCW r16, m16
  3427      if isReg16(v0) && isM16(v1) {
  3428          p.domain = DomainGeneric
  3429          p.add(0, func(m *_Encoding, v []interface{}) {
  3430              m.emit(0x66)
  3431              m.rexo(hcode(v[0]), addr(v[1]), false)
  3432              m.emit(0x0f)
  3433              m.emit(0xbb)
  3434              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3435          })
  3436      }
  3437      if p.len == 0 {
  3438          panic("invalid operands for BTCW")
  3439      }
  3440      return p
  3441  }
  3442  
  3443  // BTL performs "Bit Test".
  3444  //
  3445  // Mnemonic        : BT
  3446  // Supported forms : (4 forms)
  3447  //
  3448  //    * BTL imm8, r32
  3449  //    * BTL r32, r32
  3450  //    * BTL imm8, m32
  3451  //    * BTL r32, m32
  3452  //
  3453  func (self *Program) BTL(v0 interface{}, v1 interface{}) *Instruction {
  3454      p := self.alloc("BTL", 2, Operands { v0, v1 })
  3455      // BTL imm8, r32
  3456      if isImm8(v0) && isReg32(v1) {
  3457          p.domain = DomainGeneric
  3458          p.add(0, func(m *_Encoding, v []interface{}) {
  3459              m.rexo(0, v[1], false)
  3460              m.emit(0x0f)
  3461              m.emit(0xba)
  3462              m.emit(0xe0 | lcode(v[1]))
  3463              m.imm1(toImmAny(v[0]))
  3464          })
  3465      }
  3466      // BTL r32, r32
  3467      if isReg32(v0) && isReg32(v1) {
  3468          p.domain = DomainGeneric
  3469          p.add(0, func(m *_Encoding, v []interface{}) {
  3470              m.rexo(hcode(v[0]), v[1], false)
  3471              m.emit(0x0f)
  3472              m.emit(0xa3)
  3473              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3474          })
  3475      }
  3476      // BTL imm8, m32
  3477      if isImm8(v0) && isM32(v1) {
  3478          p.domain = DomainGeneric
  3479          p.add(0, func(m *_Encoding, v []interface{}) {
  3480              m.rexo(0, addr(v[1]), false)
  3481              m.emit(0x0f)
  3482              m.emit(0xba)
  3483              m.mrsd(4, addr(v[1]), 1)
  3484              m.imm1(toImmAny(v[0]))
  3485          })
  3486      }
  3487      // BTL r32, m32
  3488      if isReg32(v0) && isM32(v1) {
  3489          p.domain = DomainGeneric
  3490          p.add(0, func(m *_Encoding, v []interface{}) {
  3491              m.rexo(hcode(v[0]), addr(v[1]), false)
  3492              m.emit(0x0f)
  3493              m.emit(0xa3)
  3494              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3495          })
  3496      }
  3497      if p.len == 0 {
  3498          panic("invalid operands for BTL")
  3499      }
  3500      return p
  3501  }
  3502  
  3503  // BTQ performs "Bit Test".
  3504  //
  3505  // Mnemonic        : BT
  3506  // Supported forms : (4 forms)
  3507  //
  3508  //    * BTQ imm8, r64
  3509  //    * BTQ r64, r64
  3510  //    * BTQ imm8, m64
  3511  //    * BTQ r64, m64
  3512  //
  3513  func (self *Program) BTQ(v0 interface{}, v1 interface{}) *Instruction {
  3514      p := self.alloc("BTQ", 2, Operands { v0, v1 })
  3515      // BTQ imm8, r64
  3516      if isImm8(v0) && isReg64(v1) {
  3517          p.domain = DomainGeneric
  3518          p.add(0, func(m *_Encoding, v []interface{}) {
  3519              m.emit(0x48 | hcode(v[1]))
  3520              m.emit(0x0f)
  3521              m.emit(0xba)
  3522              m.emit(0xe0 | lcode(v[1]))
  3523              m.imm1(toImmAny(v[0]))
  3524          })
  3525      }
  3526      // BTQ r64, r64
  3527      if isReg64(v0) && isReg64(v1) {
  3528          p.domain = DomainGeneric
  3529          p.add(0, func(m *_Encoding, v []interface{}) {
  3530              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
  3531              m.emit(0x0f)
  3532              m.emit(0xa3)
  3533              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3534          })
  3535      }
  3536      // BTQ imm8, m64
  3537      if isImm8(v0) && isM64(v1) {
  3538          p.domain = DomainGeneric
  3539          p.add(0, func(m *_Encoding, v []interface{}) {
  3540              m.rexm(1, 0, addr(v[1]))
  3541              m.emit(0x0f)
  3542              m.emit(0xba)
  3543              m.mrsd(4, addr(v[1]), 1)
  3544              m.imm1(toImmAny(v[0]))
  3545          })
  3546      }
  3547      // BTQ r64, m64
  3548      if isReg64(v0) && isM64(v1) {
  3549          p.domain = DomainGeneric
  3550          p.add(0, func(m *_Encoding, v []interface{}) {
  3551              m.rexm(1, hcode(v[0]), addr(v[1]))
  3552              m.emit(0x0f)
  3553              m.emit(0xa3)
  3554              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3555          })
  3556      }
  3557      if p.len == 0 {
  3558          panic("invalid operands for BTQ")
  3559      }
  3560      return p
  3561  }
  3562  
  3563  // BTRL performs "Bit Test and Reset".
  3564  //
  3565  // Mnemonic        : BTR
  3566  // Supported forms : (4 forms)
  3567  //
  3568  //    * BTRL imm8, r32
  3569  //    * BTRL r32, r32
  3570  //    * BTRL imm8, m32
  3571  //    * BTRL r32, m32
  3572  //
  3573  func (self *Program) BTRL(v0 interface{}, v1 interface{}) *Instruction {
  3574      p := self.alloc("BTRL", 2, Operands { v0, v1 })
  3575      // BTRL imm8, r32
  3576      if isImm8(v0) && isReg32(v1) {
  3577          p.domain = DomainGeneric
  3578          p.add(0, func(m *_Encoding, v []interface{}) {
  3579              m.rexo(0, v[1], false)
  3580              m.emit(0x0f)
  3581              m.emit(0xba)
  3582              m.emit(0xf0 | lcode(v[1]))
  3583              m.imm1(toImmAny(v[0]))
  3584          })
  3585      }
  3586      // BTRL r32, r32
  3587      if isReg32(v0) && isReg32(v1) {
  3588          p.domain = DomainGeneric
  3589          p.add(0, func(m *_Encoding, v []interface{}) {
  3590              m.rexo(hcode(v[0]), v[1], false)
  3591              m.emit(0x0f)
  3592              m.emit(0xb3)
  3593              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3594          })
  3595      }
  3596      // BTRL imm8, m32
  3597      if isImm8(v0) && isM32(v1) {
  3598          p.domain = DomainGeneric
  3599          p.add(0, func(m *_Encoding, v []interface{}) {
  3600              m.rexo(0, addr(v[1]), false)
  3601              m.emit(0x0f)
  3602              m.emit(0xba)
  3603              m.mrsd(6, addr(v[1]), 1)
  3604              m.imm1(toImmAny(v[0]))
  3605          })
  3606      }
  3607      // BTRL r32, m32
  3608      if isReg32(v0) && isM32(v1) {
  3609          p.domain = DomainGeneric
  3610          p.add(0, func(m *_Encoding, v []interface{}) {
  3611              m.rexo(hcode(v[0]), addr(v[1]), false)
  3612              m.emit(0x0f)
  3613              m.emit(0xb3)
  3614              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3615          })
  3616      }
  3617      if p.len == 0 {
  3618          panic("invalid operands for BTRL")
  3619      }
  3620      return p
  3621  }
  3622  
  3623  // BTRQ performs "Bit Test and Reset".
  3624  //
  3625  // Mnemonic        : BTR
  3626  // Supported forms : (4 forms)
  3627  //
  3628  //    * BTRQ imm8, r64
  3629  //    * BTRQ r64, r64
  3630  //    * BTRQ imm8, m64
  3631  //    * BTRQ r64, m64
  3632  //
  3633  func (self *Program) BTRQ(v0 interface{}, v1 interface{}) *Instruction {
  3634      p := self.alloc("BTRQ", 2, Operands { v0, v1 })
  3635      // BTRQ imm8, r64
  3636      if isImm8(v0) && isReg64(v1) {
  3637          p.domain = DomainGeneric
  3638          p.add(0, func(m *_Encoding, v []interface{}) {
  3639              m.emit(0x48 | hcode(v[1]))
  3640              m.emit(0x0f)
  3641              m.emit(0xba)
  3642              m.emit(0xf0 | lcode(v[1]))
  3643              m.imm1(toImmAny(v[0]))
  3644          })
  3645      }
  3646      // BTRQ r64, r64
  3647      if isReg64(v0) && isReg64(v1) {
  3648          p.domain = DomainGeneric
  3649          p.add(0, func(m *_Encoding, v []interface{}) {
  3650              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
  3651              m.emit(0x0f)
  3652              m.emit(0xb3)
  3653              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3654          })
  3655      }
  3656      // BTRQ imm8, m64
  3657      if isImm8(v0) && isM64(v1) {
  3658          p.domain = DomainGeneric
  3659          p.add(0, func(m *_Encoding, v []interface{}) {
  3660              m.rexm(1, 0, addr(v[1]))
  3661              m.emit(0x0f)
  3662              m.emit(0xba)
  3663              m.mrsd(6, addr(v[1]), 1)
  3664              m.imm1(toImmAny(v[0]))
  3665          })
  3666      }
  3667      // BTRQ r64, m64
  3668      if isReg64(v0) && isM64(v1) {
  3669          p.domain = DomainGeneric
  3670          p.add(0, func(m *_Encoding, v []interface{}) {
  3671              m.rexm(1, hcode(v[0]), addr(v[1]))
  3672              m.emit(0x0f)
  3673              m.emit(0xb3)
  3674              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3675          })
  3676      }
  3677      if p.len == 0 {
  3678          panic("invalid operands for BTRQ")
  3679      }
  3680      return p
  3681  }
  3682  
  3683  // BTRW performs "Bit Test and Reset".
  3684  //
  3685  // Mnemonic        : BTR
  3686  // Supported forms : (4 forms)
  3687  //
  3688  //    * BTRW imm8, r16
  3689  //    * BTRW r16, r16
  3690  //    * BTRW imm8, m16
  3691  //    * BTRW r16, m16
  3692  //
  3693  func (self *Program) BTRW(v0 interface{}, v1 interface{}) *Instruction {
  3694      p := self.alloc("BTRW", 2, Operands { v0, v1 })
  3695      // BTRW imm8, r16
  3696      if isImm8(v0) && isReg16(v1) {
  3697          p.domain = DomainGeneric
  3698          p.add(0, func(m *_Encoding, v []interface{}) {
  3699              m.emit(0x66)
  3700              m.rexo(0, v[1], false)
  3701              m.emit(0x0f)
  3702              m.emit(0xba)
  3703              m.emit(0xf0 | lcode(v[1]))
  3704              m.imm1(toImmAny(v[0]))
  3705          })
  3706      }
  3707      // BTRW r16, r16
  3708      if isReg16(v0) && isReg16(v1) {
  3709          p.domain = DomainGeneric
  3710          p.add(0, func(m *_Encoding, v []interface{}) {
  3711              m.emit(0x66)
  3712              m.rexo(hcode(v[0]), v[1], false)
  3713              m.emit(0x0f)
  3714              m.emit(0xb3)
  3715              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3716          })
  3717      }
  3718      // BTRW imm8, m16
  3719      if isImm8(v0) && isM16(v1) {
  3720          p.domain = DomainGeneric
  3721          p.add(0, func(m *_Encoding, v []interface{}) {
  3722              m.emit(0x66)
  3723              m.rexo(0, addr(v[1]), false)
  3724              m.emit(0x0f)
  3725              m.emit(0xba)
  3726              m.mrsd(6, addr(v[1]), 1)
  3727              m.imm1(toImmAny(v[0]))
  3728          })
  3729      }
  3730      // BTRW r16, m16
  3731      if isReg16(v0) && isM16(v1) {
  3732          p.domain = DomainGeneric
  3733          p.add(0, func(m *_Encoding, v []interface{}) {
  3734              m.emit(0x66)
  3735              m.rexo(hcode(v[0]), addr(v[1]), false)
  3736              m.emit(0x0f)
  3737              m.emit(0xb3)
  3738              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3739          })
  3740      }
  3741      if p.len == 0 {
  3742          panic("invalid operands for BTRW")
  3743      }
  3744      return p
  3745  }
  3746  
  3747  // BTSL performs "Bit Test and Set".
  3748  //
  3749  // Mnemonic        : BTS
  3750  // Supported forms : (4 forms)
  3751  //
  3752  //    * BTSL imm8, r32
  3753  //    * BTSL r32, r32
  3754  //    * BTSL imm8, m32
  3755  //    * BTSL r32, m32
  3756  //
  3757  func (self *Program) BTSL(v0 interface{}, v1 interface{}) *Instruction {
  3758      p := self.alloc("BTSL", 2, Operands { v0, v1 })
  3759      // BTSL imm8, r32
  3760      if isImm8(v0) && isReg32(v1) {
  3761          p.domain = DomainGeneric
  3762          p.add(0, func(m *_Encoding, v []interface{}) {
  3763              m.rexo(0, v[1], false)
  3764              m.emit(0x0f)
  3765              m.emit(0xba)
  3766              m.emit(0xe8 | lcode(v[1]))
  3767              m.imm1(toImmAny(v[0]))
  3768          })
  3769      }
  3770      // BTSL r32, r32
  3771      if isReg32(v0) && isReg32(v1) {
  3772          p.domain = DomainGeneric
  3773          p.add(0, func(m *_Encoding, v []interface{}) {
  3774              m.rexo(hcode(v[0]), v[1], false)
  3775              m.emit(0x0f)
  3776              m.emit(0xab)
  3777              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3778          })
  3779      }
  3780      // BTSL imm8, m32
  3781      if isImm8(v0) && isM32(v1) {
  3782          p.domain = DomainGeneric
  3783          p.add(0, func(m *_Encoding, v []interface{}) {
  3784              m.rexo(0, addr(v[1]), false)
  3785              m.emit(0x0f)
  3786              m.emit(0xba)
  3787              m.mrsd(5, addr(v[1]), 1)
  3788              m.imm1(toImmAny(v[0]))
  3789          })
  3790      }
  3791      // BTSL r32, m32
  3792      if isReg32(v0) && isM32(v1) {
  3793          p.domain = DomainGeneric
  3794          p.add(0, func(m *_Encoding, v []interface{}) {
  3795              m.rexo(hcode(v[0]), addr(v[1]), false)
  3796              m.emit(0x0f)
  3797              m.emit(0xab)
  3798              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3799          })
  3800      }
  3801      if p.len == 0 {
  3802          panic("invalid operands for BTSL")
  3803      }
  3804      return p
  3805  }
  3806  
  3807  // BTSQ performs "Bit Test and Set".
  3808  //
  3809  // Mnemonic        : BTS
  3810  // Supported forms : (4 forms)
  3811  //
  3812  //    * BTSQ imm8, r64
  3813  //    * BTSQ r64, r64
  3814  //    * BTSQ imm8, m64
  3815  //    * BTSQ r64, m64
  3816  //
  3817  func (self *Program) BTSQ(v0 interface{}, v1 interface{}) *Instruction {
  3818      p := self.alloc("BTSQ", 2, Operands { v0, v1 })
  3819      // BTSQ imm8, r64
  3820      if isImm8(v0) && isReg64(v1) {
  3821          p.domain = DomainGeneric
  3822          p.add(0, func(m *_Encoding, v []interface{}) {
  3823              m.emit(0x48 | hcode(v[1]))
  3824              m.emit(0x0f)
  3825              m.emit(0xba)
  3826              m.emit(0xe8 | lcode(v[1]))
  3827              m.imm1(toImmAny(v[0]))
  3828          })
  3829      }
  3830      // BTSQ r64, r64
  3831      if isReg64(v0) && isReg64(v1) {
  3832          p.domain = DomainGeneric
  3833          p.add(0, func(m *_Encoding, v []interface{}) {
  3834              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
  3835              m.emit(0x0f)
  3836              m.emit(0xab)
  3837              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3838          })
  3839      }
  3840      // BTSQ imm8, m64
  3841      if isImm8(v0) && isM64(v1) {
  3842          p.domain = DomainGeneric
  3843          p.add(0, func(m *_Encoding, v []interface{}) {
  3844              m.rexm(1, 0, addr(v[1]))
  3845              m.emit(0x0f)
  3846              m.emit(0xba)
  3847              m.mrsd(5, addr(v[1]), 1)
  3848              m.imm1(toImmAny(v[0]))
  3849          })
  3850      }
  3851      // BTSQ r64, m64
  3852      if isReg64(v0) && isM64(v1) {
  3853          p.domain = DomainGeneric
  3854          p.add(0, func(m *_Encoding, v []interface{}) {
  3855              m.rexm(1, hcode(v[0]), addr(v[1]))
  3856              m.emit(0x0f)
  3857              m.emit(0xab)
  3858              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3859          })
  3860      }
  3861      if p.len == 0 {
  3862          panic("invalid operands for BTSQ")
  3863      }
  3864      return p
  3865  }
  3866  
  3867  // BTSW performs "Bit Test and Set".
  3868  //
  3869  // Mnemonic        : BTS
  3870  // Supported forms : (4 forms)
  3871  //
  3872  //    * BTSW imm8, r16
  3873  //    * BTSW r16, r16
  3874  //    * BTSW imm8, m16
  3875  //    * BTSW r16, m16
  3876  //
  3877  func (self *Program) BTSW(v0 interface{}, v1 interface{}) *Instruction {
  3878      p := self.alloc("BTSW", 2, Operands { v0, v1 })
  3879      // BTSW imm8, r16
  3880      if isImm8(v0) && isReg16(v1) {
  3881          p.domain = DomainGeneric
  3882          p.add(0, func(m *_Encoding, v []interface{}) {
  3883              m.emit(0x66)
  3884              m.rexo(0, v[1], false)
  3885              m.emit(0x0f)
  3886              m.emit(0xba)
  3887              m.emit(0xe8 | lcode(v[1]))
  3888              m.imm1(toImmAny(v[0]))
  3889          })
  3890      }
  3891      // BTSW r16, r16
  3892      if isReg16(v0) && isReg16(v1) {
  3893          p.domain = DomainGeneric
  3894          p.add(0, func(m *_Encoding, v []interface{}) {
  3895              m.emit(0x66)
  3896              m.rexo(hcode(v[0]), v[1], false)
  3897              m.emit(0x0f)
  3898              m.emit(0xab)
  3899              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3900          })
  3901      }
  3902      // BTSW imm8, m16
  3903      if isImm8(v0) && isM16(v1) {
  3904          p.domain = DomainGeneric
  3905          p.add(0, func(m *_Encoding, v []interface{}) {
  3906              m.emit(0x66)
  3907              m.rexo(0, addr(v[1]), false)
  3908              m.emit(0x0f)
  3909              m.emit(0xba)
  3910              m.mrsd(5, addr(v[1]), 1)
  3911              m.imm1(toImmAny(v[0]))
  3912          })
  3913      }
  3914      // BTSW r16, m16
  3915      if isReg16(v0) && isM16(v1) {
  3916          p.domain = DomainGeneric
  3917          p.add(0, func(m *_Encoding, v []interface{}) {
  3918              m.emit(0x66)
  3919              m.rexo(hcode(v[0]), addr(v[1]), false)
  3920              m.emit(0x0f)
  3921              m.emit(0xab)
  3922              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3923          })
  3924      }
  3925      if p.len == 0 {
  3926          panic("invalid operands for BTSW")
  3927      }
  3928      return p
  3929  }
  3930  
  3931  // BTW performs "Bit Test".
  3932  //
  3933  // Mnemonic        : BT
  3934  // Supported forms : (4 forms)
  3935  //
  3936  //    * BTW imm8, r16
  3937  //    * BTW r16, r16
  3938  //    * BTW imm8, m16
  3939  //    * BTW r16, m16
  3940  //
  3941  func (self *Program) BTW(v0 interface{}, v1 interface{}) *Instruction {
  3942      p := self.alloc("BTW", 2, Operands { v0, v1 })
  3943      // BTW imm8, r16
  3944      if isImm8(v0) && isReg16(v1) {
  3945          p.domain = DomainGeneric
  3946          p.add(0, func(m *_Encoding, v []interface{}) {
  3947              m.emit(0x66)
  3948              m.rexo(0, v[1], false)
  3949              m.emit(0x0f)
  3950              m.emit(0xba)
  3951              m.emit(0xe0 | lcode(v[1]))
  3952              m.imm1(toImmAny(v[0]))
  3953          })
  3954      }
  3955      // BTW r16, r16
  3956      if isReg16(v0) && isReg16(v1) {
  3957          p.domain = DomainGeneric
  3958          p.add(0, func(m *_Encoding, v []interface{}) {
  3959              m.emit(0x66)
  3960              m.rexo(hcode(v[0]), v[1], false)
  3961              m.emit(0x0f)
  3962              m.emit(0xa3)
  3963              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3964          })
  3965      }
  3966      // BTW imm8, m16
  3967      if isImm8(v0) && isM16(v1) {
  3968          p.domain = DomainGeneric
  3969          p.add(0, func(m *_Encoding, v []interface{}) {
  3970              m.emit(0x66)
  3971              m.rexo(0, addr(v[1]), false)
  3972              m.emit(0x0f)
  3973              m.emit(0xba)
  3974              m.mrsd(4, addr(v[1]), 1)
  3975              m.imm1(toImmAny(v[0]))
  3976          })
  3977      }
  3978      // BTW r16, m16
  3979      if isReg16(v0) && isM16(v1) {
  3980          p.domain = DomainGeneric
  3981          p.add(0, func(m *_Encoding, v []interface{}) {
  3982              m.emit(0x66)
  3983              m.rexo(hcode(v[0]), addr(v[1]), false)
  3984              m.emit(0x0f)
  3985              m.emit(0xa3)
  3986              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3987          })
  3988      }
  3989      if p.len == 0 {
  3990          panic("invalid operands for BTW")
  3991      }
  3992      return p
  3993  }
  3994  
  3995  // BZHI performs "Zero High Bits Starting with Specified Bit Position".
  3996  //
  3997  // Mnemonic        : BZHI
  3998  // Supported forms : (4 forms)
  3999  //
  4000  //    * BZHI r32, r32, r32    [BMI2]
  4001  //    * BZHI r32, m32, r32    [BMI2]
  4002  //    * BZHI r64, r64, r64    [BMI2]
  4003  //    * BZHI r64, m64, r64    [BMI2]
  4004  //
  4005  func (self *Program) BZHI(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  4006      p := self.alloc("BZHI", 3, Operands { v0, v1, v2 })
  4007      // BZHI r32, r32, r32
  4008      if isReg32(v0) && isReg32(v1) && isReg32(v2) {
  4009          self.require(ISA_BMI2)
  4010          p.domain = DomainGeneric
  4011          p.add(0, func(m *_Encoding, v []interface{}) {
  4012              m.emit(0xc4)
  4013              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
  4014              m.emit(0x78 ^ (hlcode(v[0]) << 3))
  4015              m.emit(0xf5)
  4016              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  4017          })
  4018      }
  4019      // BZHI r32, m32, r32
  4020      if isReg32(v0) && isM32(v1) && isReg32(v2) {
  4021          self.require(ISA_BMI2)
  4022          p.domain = DomainGeneric
  4023          p.add(0, func(m *_Encoding, v []interface{}) {
  4024              m.vex3(0xc4, 0b10, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
  4025              m.emit(0xf5)
  4026              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  4027          })
  4028      }
  4029      // BZHI r64, r64, r64
  4030      if isReg64(v0) && isReg64(v1) && isReg64(v2) {
  4031          self.require(ISA_BMI2)
  4032          p.domain = DomainGeneric
  4033          p.add(0, func(m *_Encoding, v []interface{}) {
  4034              m.emit(0xc4)
  4035              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
  4036              m.emit(0xf8 ^ (hlcode(v[0]) << 3))
  4037              m.emit(0xf5)
  4038              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  4039          })
  4040      }
  4041      // BZHI r64, m64, r64
  4042      if isReg64(v0) && isM64(v1) && isReg64(v2) {
  4043          self.require(ISA_BMI2)
  4044          p.domain = DomainGeneric
  4045          p.add(0, func(m *_Encoding, v []interface{}) {
  4046              m.vex3(0xc4, 0b10, 0x80, hcode(v[2]), addr(v[1]), hlcode(v[0]))
  4047              m.emit(0xf5)
  4048              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  4049          })
  4050      }
  4051      if p.len == 0 {
  4052          panic("invalid operands for BZHI")
  4053      }
  4054      return p
  4055  }
  4056  
  4057  // CALL performs "Call Procedure".
  4058  //
  4059  // Mnemonic        : CALL
  4060  // Supported forms : (1 form)
  4061  //
  4062  //    * CALL rel32
  4063  //
  4064  func (self *Program) CALL(v0 interface{}) *Instruction {
  4065      p := self.alloc("CALL", 1, Operands { v0 })
  4066      // CALL rel32
  4067      if isRel32(v0) {
  4068          p.domain = DomainGeneric
  4069          p.add(0, func(m *_Encoding, v []interface{}) {
  4070              m.emit(0xe8)
  4071              m.imm4(relv(v[0]))
  4072          })
  4073      }
  4074      // CALL label
  4075      if isLabel(v0) {
  4076          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
  4077              m.emit(0xe8)
  4078              m.imm4(relv(v[0]))
  4079          })
  4080      }
  4081      if p.len == 0 {
  4082          panic("invalid operands for CALL")
  4083      }
  4084      return p
  4085  }
  4086  
  4087  // CALLQ performs "Call Procedure".
  4088  //
  4089  // Mnemonic        : CALL
  4090  // Supported forms : (2 forms)
  4091  //
  4092  //    * CALLQ r64
  4093  //    * CALLQ m64
  4094  //
  4095  func (self *Program) CALLQ(v0 interface{}) *Instruction {
  4096      p := self.alloc("CALLQ", 1, Operands { v0 })
  4097      // CALLQ r64
  4098      if isReg64(v0) {
  4099          p.domain = DomainGeneric
  4100          p.add(0, func(m *_Encoding, v []interface{}) {
  4101              m.rexo(0, v[0], false)
  4102              m.emit(0xff)
  4103              m.emit(0xd0 | lcode(v[0]))
  4104          })
  4105      }
  4106      // CALLQ m64
  4107      if isM64(v0) {
  4108          p.domain = DomainGeneric
  4109          p.add(0, func(m *_Encoding, v []interface{}) {
  4110              m.rexo(0, addr(v[0]), false)
  4111              m.emit(0xff)
  4112              m.mrsd(2, addr(v[0]), 1)
  4113          })
  4114      }
  4115      if p.len == 0 {
  4116          panic("invalid operands for CALLQ")
  4117      }
  4118      return p
  4119  }
  4120  
  4121  // CBTW performs "Convert Byte to Word".
  4122  //
  4123  // Mnemonic        : CBW
  4124  // Supported forms : (1 form)
  4125  //
  4126  //    * CBTW
  4127  //
  4128  func (self *Program) CBTW() *Instruction {
  4129      p := self.alloc("CBTW", 0, Operands {  })
  4130      // CBTW
  4131      p.domain = DomainGeneric
  4132      p.add(0, func(m *_Encoding, v []interface{}) {
  4133          m.emit(0x66)
  4134          m.emit(0x98)
  4135      })
  4136      return p
  4137  }
  4138  
  4139  // CLC performs "Clear Carry Flag".
  4140  //
  4141  // Mnemonic        : CLC
  4142  // Supported forms : (1 form)
  4143  //
  4144  //    * CLC
  4145  //
  4146  func (self *Program) CLC() *Instruction {
  4147      p := self.alloc("CLC", 0, Operands {  })
  4148      // CLC
  4149      p.domain = DomainGeneric
  4150      p.add(0, func(m *_Encoding, v []interface{}) {
  4151          m.emit(0xf8)
  4152      })
  4153      return p
  4154  }
  4155  
  4156  // CLD performs "Clear Direction Flag".
  4157  //
  4158  // Mnemonic        : CLD
  4159  // Supported forms : (1 form)
  4160  //
  4161  //    * CLD
  4162  //
  4163  func (self *Program) CLD() *Instruction {
  4164      p := self.alloc("CLD", 0, Operands {  })
  4165      // CLD
  4166      p.domain = DomainGeneric
  4167      p.add(0, func(m *_Encoding, v []interface{}) {
  4168          m.emit(0xfc)
  4169      })
  4170      return p
  4171  }
  4172  
  4173  // CLFLUSH performs "Flush Cache Line".
  4174  //
  4175  // Mnemonic        : CLFLUSH
  4176  // Supported forms : (1 form)
  4177  //
  4178  //    * CLFLUSH m8    [CLFLUSH]
  4179  //
  4180  func (self *Program) CLFLUSH(v0 interface{}) *Instruction {
  4181      p := self.alloc("CLFLUSH", 1, Operands { v0 })
  4182      // CLFLUSH m8
  4183      if isM8(v0) {
  4184          self.require(ISA_CLFLUSH)
  4185          p.domain = DomainGeneric
  4186          p.add(0, func(m *_Encoding, v []interface{}) {
  4187              m.rexo(0, addr(v[0]), false)
  4188              m.emit(0x0f)
  4189              m.emit(0xae)
  4190              m.mrsd(7, addr(v[0]), 1)
  4191          })
  4192      }
  4193      if p.len == 0 {
  4194          panic("invalid operands for CLFLUSH")
  4195      }
  4196      return p
  4197  }
  4198  
  4199  // CLFLUSHOPT performs "Flush Cache Line Optimized".
  4200  //
  4201  // Mnemonic        : CLFLUSHOPT
  4202  // Supported forms : (1 form)
  4203  //
  4204  //    * CLFLUSHOPT m8    [CLFLUSHOPT]
  4205  //
  4206  func (self *Program) CLFLUSHOPT(v0 interface{}) *Instruction {
  4207      p := self.alloc("CLFLUSHOPT", 1, Operands { v0 })
  4208      // CLFLUSHOPT m8
  4209      if isM8(v0) {
  4210          self.require(ISA_CLFLUSHOPT)
  4211          p.domain = DomainGeneric
  4212          p.add(0, func(m *_Encoding, v []interface{}) {
  4213              m.emit(0x66)
  4214              m.rexo(0, addr(v[0]), false)
  4215              m.emit(0x0f)
  4216              m.emit(0xae)
  4217              m.mrsd(7, addr(v[0]), 1)
  4218          })
  4219      }
  4220      if p.len == 0 {
  4221          panic("invalid operands for CLFLUSHOPT")
  4222      }
  4223      return p
  4224  }
  4225  
  4226  // CLTD performs "Convert Doubleword to Quadword".
  4227  //
  4228  // Mnemonic        : CDQ
  4229  // Supported forms : (1 form)
  4230  //
  4231  //    * CLTD
  4232  //
  4233  func (self *Program) CLTD() *Instruction {
  4234      p := self.alloc("CLTD", 0, Operands {  })
  4235      // CLTD
  4236      p.domain = DomainGeneric
  4237      p.add(0, func(m *_Encoding, v []interface{}) {
  4238          m.emit(0x99)
  4239      })
  4240      return p
  4241  }
  4242  
  4243  // CLTQ performs "Convert Doubleword to Quadword".
  4244  //
  4245  // Mnemonic        : CDQE
  4246  // Supported forms : (1 form)
  4247  //
  4248  //    * CLTQ
  4249  //
  4250  func (self *Program) CLTQ() *Instruction {
  4251      p := self.alloc("CLTQ", 0, Operands {  })
  4252      // CLTQ
  4253      p.domain = DomainGeneric
  4254      p.add(0, func(m *_Encoding, v []interface{}) {
  4255          m.emit(0x48)
  4256          m.emit(0x98)
  4257      })
  4258      return p
  4259  }
  4260  
  4261  // CLWB performs "Cache Line Write Back".
  4262  //
  4263  // Mnemonic        : CLWB
  4264  // Supported forms : (1 form)
  4265  //
  4266  //    * CLWB m8    [CLWB]
  4267  //
  4268  func (self *Program) CLWB(v0 interface{}) *Instruction {
  4269      p := self.alloc("CLWB", 1, Operands { v0 })
  4270      // CLWB m8
  4271      if isM8(v0) {
  4272          self.require(ISA_CLWB)
  4273          p.domain = DomainGeneric
  4274          p.add(0, func(m *_Encoding, v []interface{}) {
  4275              m.emit(0x66)
  4276              m.rexo(0, addr(v[0]), false)
  4277              m.emit(0x0f)
  4278              m.emit(0xae)
  4279              m.mrsd(6, addr(v[0]), 1)
  4280          })
  4281      }
  4282      if p.len == 0 {
  4283          panic("invalid operands for CLWB")
  4284      }
  4285      return p
  4286  }
  4287  
  4288  // CLZERO performs "Zero-out 64-bit Cache Line".
  4289  //
  4290  // Mnemonic        : CLZERO
  4291  // Supported forms : (1 form)
  4292  //
  4293  //    * CLZERO    [CLZERO]
  4294  //
  4295  func (self *Program) CLZERO() *Instruction {
  4296      p := self.alloc("CLZERO", 0, Operands {  })
  4297      // CLZERO
  4298      self.require(ISA_CLZERO)
  4299      p.domain = DomainGeneric
  4300      p.add(0, func(m *_Encoding, v []interface{}) {
  4301          m.emit(0x0f)
  4302          m.emit(0x01)
  4303          m.emit(0xfc)
  4304      })
  4305      return p
  4306  }
  4307  
  4308  // CMC performs "Complement Carry Flag".
  4309  //
  4310  // Mnemonic        : CMC
  4311  // Supported forms : (1 form)
  4312  //
  4313  //    * CMC
  4314  //
  4315  func (self *Program) CMC() *Instruction {
  4316      p := self.alloc("CMC", 0, Operands {  })
  4317      // CMC
  4318      p.domain = DomainGeneric
  4319      p.add(0, func(m *_Encoding, v []interface{}) {
  4320          m.emit(0xf5)
  4321      })
  4322      return p
  4323  }
  4324  
  4325  // CMOVA performs "Move if above (CF == 0 and ZF == 0)".
  4326  //
  4327  // Mnemonic        : CMOVA
  4328  // Supported forms : (6 forms)
  4329  //
  4330  //    * CMOVA r16, r16    [CMOV]
  4331  //    * CMOVA m16, r16    [CMOV]
  4332  //    * CMOVA r32, r32    [CMOV]
  4333  //    * CMOVA m32, r32    [CMOV]
  4334  //    * CMOVA r64, r64    [CMOV]
  4335  //    * CMOVA m64, r64    [CMOV]
  4336  //
  4337  func (self *Program) CMOVA(v0 interface{}, v1 interface{}) *Instruction {
  4338      p := self.alloc("CMOVA", 2, Operands { v0, v1 })
  4339      // CMOVA r16, r16
  4340      if isReg16(v0) && isReg16(v1) {
  4341          self.require(ISA_CMOV)
  4342          p.domain = DomainGeneric
  4343          p.add(0, func(m *_Encoding, v []interface{}) {
  4344              m.emit(0x66)
  4345              m.rexo(hcode(v[1]), v[0], false)
  4346              m.emit(0x0f)
  4347              m.emit(0x47)
  4348              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4349          })
  4350      }
  4351      // CMOVA m16, r16
  4352      if isM16(v0) && isReg16(v1) {
  4353          self.require(ISA_CMOV)
  4354          p.domain = DomainGeneric
  4355          p.add(0, func(m *_Encoding, v []interface{}) {
  4356              m.emit(0x66)
  4357              m.rexo(hcode(v[1]), addr(v[0]), false)
  4358              m.emit(0x0f)
  4359              m.emit(0x47)
  4360              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4361          })
  4362      }
  4363      // CMOVA r32, r32
  4364      if isReg32(v0) && isReg32(v1) {
  4365          self.require(ISA_CMOV)
  4366          p.domain = DomainGeneric
  4367          p.add(0, func(m *_Encoding, v []interface{}) {
  4368              m.rexo(hcode(v[1]), v[0], false)
  4369              m.emit(0x0f)
  4370              m.emit(0x47)
  4371              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4372          })
  4373      }
  4374      // CMOVA m32, r32
  4375      if isM32(v0) && isReg32(v1) {
  4376          self.require(ISA_CMOV)
  4377          p.domain = DomainGeneric
  4378          p.add(0, func(m *_Encoding, v []interface{}) {
  4379              m.rexo(hcode(v[1]), addr(v[0]), false)
  4380              m.emit(0x0f)
  4381              m.emit(0x47)
  4382              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4383          })
  4384      }
  4385      // CMOVA r64, r64
  4386      if isReg64(v0) && isReg64(v1) {
  4387          self.require(ISA_CMOV)
  4388          p.domain = DomainGeneric
  4389          p.add(0, func(m *_Encoding, v []interface{}) {
  4390              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  4391              m.emit(0x0f)
  4392              m.emit(0x47)
  4393              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4394          })
  4395      }
  4396      // CMOVA m64, r64
  4397      if isM64(v0) && isReg64(v1) {
  4398          self.require(ISA_CMOV)
  4399          p.domain = DomainGeneric
  4400          p.add(0, func(m *_Encoding, v []interface{}) {
  4401              m.rexm(1, hcode(v[1]), addr(v[0]))
  4402              m.emit(0x0f)
  4403              m.emit(0x47)
  4404              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4405          })
  4406      }
  4407      if p.len == 0 {
  4408          panic("invalid operands for CMOVA")
  4409      }
  4410      return p
  4411  }
  4412  
  4413  // CMOVAE performs "Move if above or equal (CF == 0)".
  4414  //
  4415  // Mnemonic        : CMOVAE
  4416  // Supported forms : (6 forms)
  4417  //
  4418  //    * CMOVAE r16, r16    [CMOV]
  4419  //    * CMOVAE m16, r16    [CMOV]
  4420  //    * CMOVAE r32, r32    [CMOV]
  4421  //    * CMOVAE m32, r32    [CMOV]
  4422  //    * CMOVAE r64, r64    [CMOV]
  4423  //    * CMOVAE m64, r64    [CMOV]
  4424  //
  4425  func (self *Program) CMOVAE(v0 interface{}, v1 interface{}) *Instruction {
  4426      p := self.alloc("CMOVAE", 2, Operands { v0, v1 })
  4427      // CMOVAE r16, r16
  4428      if isReg16(v0) && isReg16(v1) {
  4429          self.require(ISA_CMOV)
  4430          p.domain = DomainGeneric
  4431          p.add(0, func(m *_Encoding, v []interface{}) {
  4432              m.emit(0x66)
  4433              m.rexo(hcode(v[1]), v[0], false)
  4434              m.emit(0x0f)
  4435              m.emit(0x43)
  4436              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4437          })
  4438      }
  4439      // CMOVAE m16, r16
  4440      if isM16(v0) && isReg16(v1) {
  4441          self.require(ISA_CMOV)
  4442          p.domain = DomainGeneric
  4443          p.add(0, func(m *_Encoding, v []interface{}) {
  4444              m.emit(0x66)
  4445              m.rexo(hcode(v[1]), addr(v[0]), false)
  4446              m.emit(0x0f)
  4447              m.emit(0x43)
  4448              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4449          })
  4450      }
  4451      // CMOVAE r32, r32
  4452      if isReg32(v0) && isReg32(v1) {
  4453          self.require(ISA_CMOV)
  4454          p.domain = DomainGeneric
  4455          p.add(0, func(m *_Encoding, v []interface{}) {
  4456              m.rexo(hcode(v[1]), v[0], false)
  4457              m.emit(0x0f)
  4458              m.emit(0x43)
  4459              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4460          })
  4461      }
  4462      // CMOVAE m32, r32
  4463      if isM32(v0) && isReg32(v1) {
  4464          self.require(ISA_CMOV)
  4465          p.domain = DomainGeneric
  4466          p.add(0, func(m *_Encoding, v []interface{}) {
  4467              m.rexo(hcode(v[1]), addr(v[0]), false)
  4468              m.emit(0x0f)
  4469              m.emit(0x43)
  4470              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4471          })
  4472      }
  4473      // CMOVAE r64, r64
  4474      if isReg64(v0) && isReg64(v1) {
  4475          self.require(ISA_CMOV)
  4476          p.domain = DomainGeneric
  4477          p.add(0, func(m *_Encoding, v []interface{}) {
  4478              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  4479              m.emit(0x0f)
  4480              m.emit(0x43)
  4481              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4482          })
  4483      }
  4484      // CMOVAE m64, r64
  4485      if isM64(v0) && isReg64(v1) {
  4486          self.require(ISA_CMOV)
  4487          p.domain = DomainGeneric
  4488          p.add(0, func(m *_Encoding, v []interface{}) {
  4489              m.rexm(1, hcode(v[1]), addr(v[0]))
  4490              m.emit(0x0f)
  4491              m.emit(0x43)
  4492              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4493          })
  4494      }
  4495      if p.len == 0 {
  4496          panic("invalid operands for CMOVAE")
  4497      }
  4498      return p
  4499  }
  4500  
  4501  // CMOVB performs "Move if below (CF == 1)".
  4502  //
  4503  // Mnemonic        : CMOVB
  4504  // Supported forms : (6 forms)
  4505  //
  4506  //    * CMOVB r16, r16    [CMOV]
  4507  //    * CMOVB m16, r16    [CMOV]
  4508  //    * CMOVB r32, r32    [CMOV]
  4509  //    * CMOVB m32, r32    [CMOV]
  4510  //    * CMOVB r64, r64    [CMOV]
  4511  //    * CMOVB m64, r64    [CMOV]
  4512  //
  4513  func (self *Program) CMOVB(v0 interface{}, v1 interface{}) *Instruction {
  4514      p := self.alloc("CMOVB", 2, Operands { v0, v1 })
  4515      // CMOVB r16, r16
  4516      if isReg16(v0) && isReg16(v1) {
  4517          self.require(ISA_CMOV)
  4518          p.domain = DomainGeneric
  4519          p.add(0, func(m *_Encoding, v []interface{}) {
  4520              m.emit(0x66)
  4521              m.rexo(hcode(v[1]), v[0], false)
  4522              m.emit(0x0f)
  4523              m.emit(0x42)
  4524              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4525          })
  4526      }
  4527      // CMOVB m16, r16
  4528      if isM16(v0) && isReg16(v1) {
  4529          self.require(ISA_CMOV)
  4530          p.domain = DomainGeneric
  4531          p.add(0, func(m *_Encoding, v []interface{}) {
  4532              m.emit(0x66)
  4533              m.rexo(hcode(v[1]), addr(v[0]), false)
  4534              m.emit(0x0f)
  4535              m.emit(0x42)
  4536              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4537          })
  4538      }
  4539      // CMOVB r32, r32
  4540      if isReg32(v0) && isReg32(v1) {
  4541          self.require(ISA_CMOV)
  4542          p.domain = DomainGeneric
  4543          p.add(0, func(m *_Encoding, v []interface{}) {
  4544              m.rexo(hcode(v[1]), v[0], false)
  4545              m.emit(0x0f)
  4546              m.emit(0x42)
  4547              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4548          })
  4549      }
  4550      // CMOVB m32, r32
  4551      if isM32(v0) && isReg32(v1) {
  4552          self.require(ISA_CMOV)
  4553          p.domain = DomainGeneric
  4554          p.add(0, func(m *_Encoding, v []interface{}) {
  4555              m.rexo(hcode(v[1]), addr(v[0]), false)
  4556              m.emit(0x0f)
  4557              m.emit(0x42)
  4558              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4559          })
  4560      }
  4561      // CMOVB r64, r64
  4562      if isReg64(v0) && isReg64(v1) {
  4563          self.require(ISA_CMOV)
  4564          p.domain = DomainGeneric
  4565          p.add(0, func(m *_Encoding, v []interface{}) {
  4566              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  4567              m.emit(0x0f)
  4568              m.emit(0x42)
  4569              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4570          })
  4571      }
  4572      // CMOVB m64, r64
  4573      if isM64(v0) && isReg64(v1) {
  4574          self.require(ISA_CMOV)
  4575          p.domain = DomainGeneric
  4576          p.add(0, func(m *_Encoding, v []interface{}) {
  4577              m.rexm(1, hcode(v[1]), addr(v[0]))
  4578              m.emit(0x0f)
  4579              m.emit(0x42)
  4580              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4581          })
  4582      }
  4583      if p.len == 0 {
  4584          panic("invalid operands for CMOVB")
  4585      }
  4586      return p
  4587  }
  4588  
  4589  // CMOVBE performs "Move if below or equal (CF == 1 or ZF == 1)".
  4590  //
  4591  // Mnemonic        : CMOVBE
  4592  // Supported forms : (6 forms)
  4593  //
  4594  //    * CMOVBE r16, r16    [CMOV]
  4595  //    * CMOVBE m16, r16    [CMOV]
  4596  //    * CMOVBE r32, r32    [CMOV]
  4597  //    * CMOVBE m32, r32    [CMOV]
  4598  //    * CMOVBE r64, r64    [CMOV]
  4599  //    * CMOVBE m64, r64    [CMOV]
  4600  //
  4601  func (self *Program) CMOVBE(v0 interface{}, v1 interface{}) *Instruction {
  4602      p := self.alloc("CMOVBE", 2, Operands { v0, v1 })
  4603      // CMOVBE r16, r16
  4604      if isReg16(v0) && isReg16(v1) {
  4605          self.require(ISA_CMOV)
  4606          p.domain = DomainGeneric
  4607          p.add(0, func(m *_Encoding, v []interface{}) {
  4608              m.emit(0x66)
  4609              m.rexo(hcode(v[1]), v[0], false)
  4610              m.emit(0x0f)
  4611              m.emit(0x46)
  4612              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4613          })
  4614      }
  4615      // CMOVBE m16, r16
  4616      if isM16(v0) && isReg16(v1) {
  4617          self.require(ISA_CMOV)
  4618          p.domain = DomainGeneric
  4619          p.add(0, func(m *_Encoding, v []interface{}) {
  4620              m.emit(0x66)
  4621              m.rexo(hcode(v[1]), addr(v[0]), false)
  4622              m.emit(0x0f)
  4623              m.emit(0x46)
  4624              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4625          })
  4626      }
  4627      // CMOVBE r32, r32
  4628      if isReg32(v0) && isReg32(v1) {
  4629          self.require(ISA_CMOV)
  4630          p.domain = DomainGeneric
  4631          p.add(0, func(m *_Encoding, v []interface{}) {
  4632              m.rexo(hcode(v[1]), v[0], false)
  4633              m.emit(0x0f)
  4634              m.emit(0x46)
  4635              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4636          })
  4637      }
  4638      // CMOVBE m32, r32
  4639      if isM32(v0) && isReg32(v1) {
  4640          self.require(ISA_CMOV)
  4641          p.domain = DomainGeneric
  4642          p.add(0, func(m *_Encoding, v []interface{}) {
  4643              m.rexo(hcode(v[1]), addr(v[0]), false)
  4644              m.emit(0x0f)
  4645              m.emit(0x46)
  4646              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4647          })
  4648      }
  4649      // CMOVBE r64, r64
  4650      if isReg64(v0) && isReg64(v1) {
  4651          self.require(ISA_CMOV)
  4652          p.domain = DomainGeneric
  4653          p.add(0, func(m *_Encoding, v []interface{}) {
  4654              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  4655              m.emit(0x0f)
  4656              m.emit(0x46)
  4657              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4658          })
  4659      }
  4660      // CMOVBE m64, r64
  4661      if isM64(v0) && isReg64(v1) {
  4662          self.require(ISA_CMOV)
  4663          p.domain = DomainGeneric
  4664          p.add(0, func(m *_Encoding, v []interface{}) {
  4665              m.rexm(1, hcode(v[1]), addr(v[0]))
  4666              m.emit(0x0f)
  4667              m.emit(0x46)
  4668              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4669          })
  4670      }
  4671      if p.len == 0 {
  4672          panic("invalid operands for CMOVBE")
  4673      }
  4674      return p
  4675  }
  4676  
  4677  // CMOVC performs "Move if carry (CF == 1)".
  4678  //
  4679  // Mnemonic        : CMOVC
  4680  // Supported forms : (6 forms)
  4681  //
  4682  //    * CMOVC r16, r16    [CMOV]
  4683  //    * CMOVC m16, r16    [CMOV]
  4684  //    * CMOVC r32, r32    [CMOV]
  4685  //    * CMOVC m32, r32    [CMOV]
  4686  //    * CMOVC r64, r64    [CMOV]
  4687  //    * CMOVC m64, r64    [CMOV]
  4688  //
  4689  func (self *Program) CMOVC(v0 interface{}, v1 interface{}) *Instruction {
  4690      p := self.alloc("CMOVC", 2, Operands { v0, v1 })
  4691      // CMOVC r16, r16
  4692      if isReg16(v0) && isReg16(v1) {
  4693          self.require(ISA_CMOV)
  4694          p.domain = DomainGeneric
  4695          p.add(0, func(m *_Encoding, v []interface{}) {
  4696              m.emit(0x66)
  4697              m.rexo(hcode(v[1]), v[0], false)
  4698              m.emit(0x0f)
  4699              m.emit(0x42)
  4700              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4701          })
  4702      }
  4703      // CMOVC m16, r16
  4704      if isM16(v0) && isReg16(v1) {
  4705          self.require(ISA_CMOV)
  4706          p.domain = DomainGeneric
  4707          p.add(0, func(m *_Encoding, v []interface{}) {
  4708              m.emit(0x66)
  4709              m.rexo(hcode(v[1]), addr(v[0]), false)
  4710              m.emit(0x0f)
  4711              m.emit(0x42)
  4712              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4713          })
  4714      }
  4715      // CMOVC r32, r32
  4716      if isReg32(v0) && isReg32(v1) {
  4717          self.require(ISA_CMOV)
  4718          p.domain = DomainGeneric
  4719          p.add(0, func(m *_Encoding, v []interface{}) {
  4720              m.rexo(hcode(v[1]), v[0], false)
  4721              m.emit(0x0f)
  4722              m.emit(0x42)
  4723              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4724          })
  4725      }
  4726      // CMOVC m32, r32
  4727      if isM32(v0) && isReg32(v1) {
  4728          self.require(ISA_CMOV)
  4729          p.domain = DomainGeneric
  4730          p.add(0, func(m *_Encoding, v []interface{}) {
  4731              m.rexo(hcode(v[1]), addr(v[0]), false)
  4732              m.emit(0x0f)
  4733              m.emit(0x42)
  4734              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4735          })
  4736      }
  4737      // CMOVC r64, r64
  4738      if isReg64(v0) && isReg64(v1) {
  4739          self.require(ISA_CMOV)
  4740          p.domain = DomainGeneric
  4741          p.add(0, func(m *_Encoding, v []interface{}) {
  4742              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  4743              m.emit(0x0f)
  4744              m.emit(0x42)
  4745              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4746          })
  4747      }
  4748      // CMOVC m64, r64
  4749      if isM64(v0) && isReg64(v1) {
  4750          self.require(ISA_CMOV)
  4751          p.domain = DomainGeneric
  4752          p.add(0, func(m *_Encoding, v []interface{}) {
  4753              m.rexm(1, hcode(v[1]), addr(v[0]))
  4754              m.emit(0x0f)
  4755              m.emit(0x42)
  4756              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4757          })
  4758      }
  4759      if p.len == 0 {
  4760          panic("invalid operands for CMOVC")
  4761      }
  4762      return p
  4763  }
  4764  
  4765  // CMOVE performs "Move if equal (ZF == 1)".
  4766  //
  4767  // Mnemonic        : CMOVE
  4768  // Supported forms : (6 forms)
  4769  //
  4770  //    * CMOVE r16, r16    [CMOV]
  4771  //    * CMOVE m16, r16    [CMOV]
  4772  //    * CMOVE r32, r32    [CMOV]
  4773  //    * CMOVE m32, r32    [CMOV]
  4774  //    * CMOVE r64, r64    [CMOV]
  4775  //    * CMOVE m64, r64    [CMOV]
  4776  //
  4777  func (self *Program) CMOVE(v0 interface{}, v1 interface{}) *Instruction {
  4778      p := self.alloc("CMOVE", 2, Operands { v0, v1 })
  4779      // CMOVE r16, r16
  4780      if isReg16(v0) && isReg16(v1) {
  4781          self.require(ISA_CMOV)
  4782          p.domain = DomainGeneric
  4783          p.add(0, func(m *_Encoding, v []interface{}) {
  4784              m.emit(0x66)
  4785              m.rexo(hcode(v[1]), v[0], false)
  4786              m.emit(0x0f)
  4787              m.emit(0x44)
  4788              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4789          })
  4790      }
  4791      // CMOVE m16, r16
  4792      if isM16(v0) && isReg16(v1) {
  4793          self.require(ISA_CMOV)
  4794          p.domain = DomainGeneric
  4795          p.add(0, func(m *_Encoding, v []interface{}) {
  4796              m.emit(0x66)
  4797              m.rexo(hcode(v[1]), addr(v[0]), false)
  4798              m.emit(0x0f)
  4799              m.emit(0x44)
  4800              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4801          })
  4802      }
  4803      // CMOVE r32, r32
  4804      if isReg32(v0) && isReg32(v1) {
  4805          self.require(ISA_CMOV)
  4806          p.domain = DomainGeneric
  4807          p.add(0, func(m *_Encoding, v []interface{}) {
  4808              m.rexo(hcode(v[1]), v[0], false)
  4809              m.emit(0x0f)
  4810              m.emit(0x44)
  4811              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4812          })
  4813      }
  4814      // CMOVE m32, r32
  4815      if isM32(v0) && isReg32(v1) {
  4816          self.require(ISA_CMOV)
  4817          p.domain = DomainGeneric
  4818          p.add(0, func(m *_Encoding, v []interface{}) {
  4819              m.rexo(hcode(v[1]), addr(v[0]), false)
  4820              m.emit(0x0f)
  4821              m.emit(0x44)
  4822              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4823          })
  4824      }
  4825      // CMOVE r64, r64
  4826      if isReg64(v0) && isReg64(v1) {
  4827          self.require(ISA_CMOV)
  4828          p.domain = DomainGeneric
  4829          p.add(0, func(m *_Encoding, v []interface{}) {
  4830              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  4831              m.emit(0x0f)
  4832              m.emit(0x44)
  4833              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4834          })
  4835      }
  4836      // CMOVE m64, r64
  4837      if isM64(v0) && isReg64(v1) {
  4838          self.require(ISA_CMOV)
  4839          p.domain = DomainGeneric
  4840          p.add(0, func(m *_Encoding, v []interface{}) {
  4841              m.rexm(1, hcode(v[1]), addr(v[0]))
  4842              m.emit(0x0f)
  4843              m.emit(0x44)
  4844              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4845          })
  4846      }
  4847      if p.len == 0 {
  4848          panic("invalid operands for CMOVE")
  4849      }
  4850      return p
  4851  }
  4852  
  4853  // CMOVG performs "Move if greater (ZF == 0 and SF == OF)".
  4854  //
  4855  // Mnemonic        : CMOVG
  4856  // Supported forms : (6 forms)
  4857  //
  4858  //    * CMOVG r16, r16    [CMOV]
  4859  //    * CMOVG m16, r16    [CMOV]
  4860  //    * CMOVG r32, r32    [CMOV]
  4861  //    * CMOVG m32, r32    [CMOV]
  4862  //    * CMOVG r64, r64    [CMOV]
  4863  //    * CMOVG m64, r64    [CMOV]
  4864  //
  4865  func (self *Program) CMOVG(v0 interface{}, v1 interface{}) *Instruction {
  4866      p := self.alloc("CMOVG", 2, Operands { v0, v1 })
  4867      // CMOVG r16, r16
  4868      if isReg16(v0) && isReg16(v1) {
  4869          self.require(ISA_CMOV)
  4870          p.domain = DomainGeneric
  4871          p.add(0, func(m *_Encoding, v []interface{}) {
  4872              m.emit(0x66)
  4873              m.rexo(hcode(v[1]), v[0], false)
  4874              m.emit(0x0f)
  4875              m.emit(0x4f)
  4876              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4877          })
  4878      }
  4879      // CMOVG m16, r16
  4880      if isM16(v0) && isReg16(v1) {
  4881          self.require(ISA_CMOV)
  4882          p.domain = DomainGeneric
  4883          p.add(0, func(m *_Encoding, v []interface{}) {
  4884              m.emit(0x66)
  4885              m.rexo(hcode(v[1]), addr(v[0]), false)
  4886              m.emit(0x0f)
  4887              m.emit(0x4f)
  4888              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4889          })
  4890      }
  4891      // CMOVG r32, r32
  4892      if isReg32(v0) && isReg32(v1) {
  4893          self.require(ISA_CMOV)
  4894          p.domain = DomainGeneric
  4895          p.add(0, func(m *_Encoding, v []interface{}) {
  4896              m.rexo(hcode(v[1]), v[0], false)
  4897              m.emit(0x0f)
  4898              m.emit(0x4f)
  4899              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4900          })
  4901      }
  4902      // CMOVG m32, r32
  4903      if isM32(v0) && isReg32(v1) {
  4904          self.require(ISA_CMOV)
  4905          p.domain = DomainGeneric
  4906          p.add(0, func(m *_Encoding, v []interface{}) {
  4907              m.rexo(hcode(v[1]), addr(v[0]), false)
  4908              m.emit(0x0f)
  4909              m.emit(0x4f)
  4910              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4911          })
  4912      }
  4913      // CMOVG r64, r64
  4914      if isReg64(v0) && isReg64(v1) {
  4915          self.require(ISA_CMOV)
  4916          p.domain = DomainGeneric
  4917          p.add(0, func(m *_Encoding, v []interface{}) {
  4918              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  4919              m.emit(0x0f)
  4920              m.emit(0x4f)
  4921              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4922          })
  4923      }
  4924      // CMOVG m64, r64
  4925      if isM64(v0) && isReg64(v1) {
  4926          self.require(ISA_CMOV)
  4927          p.domain = DomainGeneric
  4928          p.add(0, func(m *_Encoding, v []interface{}) {
  4929              m.rexm(1, hcode(v[1]), addr(v[0]))
  4930              m.emit(0x0f)
  4931              m.emit(0x4f)
  4932              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4933          })
  4934      }
  4935      if p.len == 0 {
  4936          panic("invalid operands for CMOVG")
  4937      }
  4938      return p
  4939  }
  4940  
  4941  // CMOVGE performs "Move if greater or equal (SF == OF)".
  4942  //
  4943  // Mnemonic        : CMOVGE
  4944  // Supported forms : (6 forms)
  4945  //
  4946  //    * CMOVGE r16, r16    [CMOV]
  4947  //    * CMOVGE m16, r16    [CMOV]
  4948  //    * CMOVGE r32, r32    [CMOV]
  4949  //    * CMOVGE m32, r32    [CMOV]
  4950  //    * CMOVGE r64, r64    [CMOV]
  4951  //    * CMOVGE m64, r64    [CMOV]
  4952  //
  4953  func (self *Program) CMOVGE(v0 interface{}, v1 interface{}) *Instruction {
  4954      p := self.alloc("CMOVGE", 2, Operands { v0, v1 })
  4955      // CMOVGE r16, r16
  4956      if isReg16(v0) && isReg16(v1) {
  4957          self.require(ISA_CMOV)
  4958          p.domain = DomainGeneric
  4959          p.add(0, func(m *_Encoding, v []interface{}) {
  4960              m.emit(0x66)
  4961              m.rexo(hcode(v[1]), v[0], false)
  4962              m.emit(0x0f)
  4963              m.emit(0x4d)
  4964              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4965          })
  4966      }
  4967      // CMOVGE m16, r16
  4968      if isM16(v0) && isReg16(v1) {
  4969          self.require(ISA_CMOV)
  4970          p.domain = DomainGeneric
  4971          p.add(0, func(m *_Encoding, v []interface{}) {
  4972              m.emit(0x66)
  4973              m.rexo(hcode(v[1]), addr(v[0]), false)
  4974              m.emit(0x0f)
  4975              m.emit(0x4d)
  4976              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4977          })
  4978      }
  4979      // CMOVGE r32, r32
  4980      if isReg32(v0) && isReg32(v1) {
  4981          self.require(ISA_CMOV)
  4982          p.domain = DomainGeneric
  4983          p.add(0, func(m *_Encoding, v []interface{}) {
  4984              m.rexo(hcode(v[1]), v[0], false)
  4985              m.emit(0x0f)
  4986              m.emit(0x4d)
  4987              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4988          })
  4989      }
  4990      // CMOVGE m32, r32
  4991      if isM32(v0) && isReg32(v1) {
  4992          self.require(ISA_CMOV)
  4993          p.domain = DomainGeneric
  4994          p.add(0, func(m *_Encoding, v []interface{}) {
  4995              m.rexo(hcode(v[1]), addr(v[0]), false)
  4996              m.emit(0x0f)
  4997              m.emit(0x4d)
  4998              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4999          })
  5000      }
  5001      // CMOVGE r64, r64
  5002      if isReg64(v0) && isReg64(v1) {
  5003          self.require(ISA_CMOV)
  5004          p.domain = DomainGeneric
  5005          p.add(0, func(m *_Encoding, v []interface{}) {
  5006              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5007              m.emit(0x0f)
  5008              m.emit(0x4d)
  5009              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5010          })
  5011      }
  5012      // CMOVGE m64, r64
  5013      if isM64(v0) && isReg64(v1) {
  5014          self.require(ISA_CMOV)
  5015          p.domain = DomainGeneric
  5016          p.add(0, func(m *_Encoding, v []interface{}) {
  5017              m.rexm(1, hcode(v[1]), addr(v[0]))
  5018              m.emit(0x0f)
  5019              m.emit(0x4d)
  5020              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5021          })
  5022      }
  5023      if p.len == 0 {
  5024          panic("invalid operands for CMOVGE")
  5025      }
  5026      return p
  5027  }
  5028  
  5029  // CMOVL performs "Move if less (SF != OF)".
  5030  //
  5031  // Mnemonic        : CMOVL
  5032  // Supported forms : (6 forms)
  5033  //
  5034  //    * CMOVL r16, r16    [CMOV]
  5035  //    * CMOVL m16, r16    [CMOV]
  5036  //    * CMOVL r32, r32    [CMOV]
  5037  //    * CMOVL m32, r32    [CMOV]
  5038  //    * CMOVL r64, r64    [CMOV]
  5039  //    * CMOVL m64, r64    [CMOV]
  5040  //
  5041  func (self *Program) CMOVL(v0 interface{}, v1 interface{}) *Instruction {
  5042      p := self.alloc("CMOVL", 2, Operands { v0, v1 })
  5043      // CMOVL r16, r16
  5044      if isReg16(v0) && isReg16(v1) {
  5045          self.require(ISA_CMOV)
  5046          p.domain = DomainGeneric
  5047          p.add(0, func(m *_Encoding, v []interface{}) {
  5048              m.emit(0x66)
  5049              m.rexo(hcode(v[1]), v[0], false)
  5050              m.emit(0x0f)
  5051              m.emit(0x4c)
  5052              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5053          })
  5054      }
  5055      // CMOVL m16, r16
  5056      if isM16(v0) && isReg16(v1) {
  5057          self.require(ISA_CMOV)
  5058          p.domain = DomainGeneric
  5059          p.add(0, func(m *_Encoding, v []interface{}) {
  5060              m.emit(0x66)
  5061              m.rexo(hcode(v[1]), addr(v[0]), false)
  5062              m.emit(0x0f)
  5063              m.emit(0x4c)
  5064              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5065          })
  5066      }
  5067      // CMOVL r32, r32
  5068      if isReg32(v0) && isReg32(v1) {
  5069          self.require(ISA_CMOV)
  5070          p.domain = DomainGeneric
  5071          p.add(0, func(m *_Encoding, v []interface{}) {
  5072              m.rexo(hcode(v[1]), v[0], false)
  5073              m.emit(0x0f)
  5074              m.emit(0x4c)
  5075              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5076          })
  5077      }
  5078      // CMOVL m32, r32
  5079      if isM32(v0) && isReg32(v1) {
  5080          self.require(ISA_CMOV)
  5081          p.domain = DomainGeneric
  5082          p.add(0, func(m *_Encoding, v []interface{}) {
  5083              m.rexo(hcode(v[1]), addr(v[0]), false)
  5084              m.emit(0x0f)
  5085              m.emit(0x4c)
  5086              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5087          })
  5088      }
  5089      // CMOVL r64, r64
  5090      if isReg64(v0) && isReg64(v1) {
  5091          self.require(ISA_CMOV)
  5092          p.domain = DomainGeneric
  5093          p.add(0, func(m *_Encoding, v []interface{}) {
  5094              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5095              m.emit(0x0f)
  5096              m.emit(0x4c)
  5097              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5098          })
  5099      }
  5100      // CMOVL m64, r64
  5101      if isM64(v0) && isReg64(v1) {
  5102          self.require(ISA_CMOV)
  5103          p.domain = DomainGeneric
  5104          p.add(0, func(m *_Encoding, v []interface{}) {
  5105              m.rexm(1, hcode(v[1]), addr(v[0]))
  5106              m.emit(0x0f)
  5107              m.emit(0x4c)
  5108              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5109          })
  5110      }
  5111      if p.len == 0 {
  5112          panic("invalid operands for CMOVL")
  5113      }
  5114      return p
  5115  }
  5116  
  5117  // CMOVLE performs "Move if less or equal (ZF == 1 or SF != OF)".
  5118  //
  5119  // Mnemonic        : CMOVLE
  5120  // Supported forms : (6 forms)
  5121  //
  5122  //    * CMOVLE r16, r16    [CMOV]
  5123  //    * CMOVLE m16, r16    [CMOV]
  5124  //    * CMOVLE r32, r32    [CMOV]
  5125  //    * CMOVLE m32, r32    [CMOV]
  5126  //    * CMOVLE r64, r64    [CMOV]
  5127  //    * CMOVLE m64, r64    [CMOV]
  5128  //
  5129  func (self *Program) CMOVLE(v0 interface{}, v1 interface{}) *Instruction {
  5130      p := self.alloc("CMOVLE", 2, Operands { v0, v1 })
  5131      // CMOVLE r16, r16
  5132      if isReg16(v0) && isReg16(v1) {
  5133          self.require(ISA_CMOV)
  5134          p.domain = DomainGeneric
  5135          p.add(0, func(m *_Encoding, v []interface{}) {
  5136              m.emit(0x66)
  5137              m.rexo(hcode(v[1]), v[0], false)
  5138              m.emit(0x0f)
  5139              m.emit(0x4e)
  5140              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5141          })
  5142      }
  5143      // CMOVLE m16, r16
  5144      if isM16(v0) && isReg16(v1) {
  5145          self.require(ISA_CMOV)
  5146          p.domain = DomainGeneric
  5147          p.add(0, func(m *_Encoding, v []interface{}) {
  5148              m.emit(0x66)
  5149              m.rexo(hcode(v[1]), addr(v[0]), false)
  5150              m.emit(0x0f)
  5151              m.emit(0x4e)
  5152              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5153          })
  5154      }
  5155      // CMOVLE r32, r32
  5156      if isReg32(v0) && isReg32(v1) {
  5157          self.require(ISA_CMOV)
  5158          p.domain = DomainGeneric
  5159          p.add(0, func(m *_Encoding, v []interface{}) {
  5160              m.rexo(hcode(v[1]), v[0], false)
  5161              m.emit(0x0f)
  5162              m.emit(0x4e)
  5163              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5164          })
  5165      }
  5166      // CMOVLE m32, r32
  5167      if isM32(v0) && isReg32(v1) {
  5168          self.require(ISA_CMOV)
  5169          p.domain = DomainGeneric
  5170          p.add(0, func(m *_Encoding, v []interface{}) {
  5171              m.rexo(hcode(v[1]), addr(v[0]), false)
  5172              m.emit(0x0f)
  5173              m.emit(0x4e)
  5174              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5175          })
  5176      }
  5177      // CMOVLE r64, r64
  5178      if isReg64(v0) && isReg64(v1) {
  5179          self.require(ISA_CMOV)
  5180          p.domain = DomainGeneric
  5181          p.add(0, func(m *_Encoding, v []interface{}) {
  5182              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5183              m.emit(0x0f)
  5184              m.emit(0x4e)
  5185              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5186          })
  5187      }
  5188      // CMOVLE m64, r64
  5189      if isM64(v0) && isReg64(v1) {
  5190          self.require(ISA_CMOV)
  5191          p.domain = DomainGeneric
  5192          p.add(0, func(m *_Encoding, v []interface{}) {
  5193              m.rexm(1, hcode(v[1]), addr(v[0]))
  5194              m.emit(0x0f)
  5195              m.emit(0x4e)
  5196              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5197          })
  5198      }
  5199      if p.len == 0 {
  5200          panic("invalid operands for CMOVLE")
  5201      }
  5202      return p
  5203  }
  5204  
  5205  // CMOVNA performs "Move if not above (CF == 1 or ZF == 1)".
  5206  //
  5207  // Mnemonic        : CMOVNA
  5208  // Supported forms : (6 forms)
  5209  //
  5210  //    * CMOVNA r16, r16    [CMOV]
  5211  //    * CMOVNA m16, r16    [CMOV]
  5212  //    * CMOVNA r32, r32    [CMOV]
  5213  //    * CMOVNA m32, r32    [CMOV]
  5214  //    * CMOVNA r64, r64    [CMOV]
  5215  //    * CMOVNA m64, r64    [CMOV]
  5216  //
  5217  func (self *Program) CMOVNA(v0 interface{}, v1 interface{}) *Instruction {
  5218      p := self.alloc("CMOVNA", 2, Operands { v0, v1 })
  5219      // CMOVNA r16, r16
  5220      if isReg16(v0) && isReg16(v1) {
  5221          self.require(ISA_CMOV)
  5222          p.domain = DomainGeneric
  5223          p.add(0, func(m *_Encoding, v []interface{}) {
  5224              m.emit(0x66)
  5225              m.rexo(hcode(v[1]), v[0], false)
  5226              m.emit(0x0f)
  5227              m.emit(0x46)
  5228              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5229          })
  5230      }
  5231      // CMOVNA m16, r16
  5232      if isM16(v0) && isReg16(v1) {
  5233          self.require(ISA_CMOV)
  5234          p.domain = DomainGeneric
  5235          p.add(0, func(m *_Encoding, v []interface{}) {
  5236              m.emit(0x66)
  5237              m.rexo(hcode(v[1]), addr(v[0]), false)
  5238              m.emit(0x0f)
  5239              m.emit(0x46)
  5240              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5241          })
  5242      }
  5243      // CMOVNA r32, r32
  5244      if isReg32(v0) && isReg32(v1) {
  5245          self.require(ISA_CMOV)
  5246          p.domain = DomainGeneric
  5247          p.add(0, func(m *_Encoding, v []interface{}) {
  5248              m.rexo(hcode(v[1]), v[0], false)
  5249              m.emit(0x0f)
  5250              m.emit(0x46)
  5251              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5252          })
  5253      }
  5254      // CMOVNA m32, r32
  5255      if isM32(v0) && isReg32(v1) {
  5256          self.require(ISA_CMOV)
  5257          p.domain = DomainGeneric
  5258          p.add(0, func(m *_Encoding, v []interface{}) {
  5259              m.rexo(hcode(v[1]), addr(v[0]), false)
  5260              m.emit(0x0f)
  5261              m.emit(0x46)
  5262              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5263          })
  5264      }
  5265      // CMOVNA r64, r64
  5266      if isReg64(v0) && isReg64(v1) {
  5267          self.require(ISA_CMOV)
  5268          p.domain = DomainGeneric
  5269          p.add(0, func(m *_Encoding, v []interface{}) {
  5270              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5271              m.emit(0x0f)
  5272              m.emit(0x46)
  5273              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5274          })
  5275      }
  5276      // CMOVNA m64, r64
  5277      if isM64(v0) && isReg64(v1) {
  5278          self.require(ISA_CMOV)
  5279          p.domain = DomainGeneric
  5280          p.add(0, func(m *_Encoding, v []interface{}) {
  5281              m.rexm(1, hcode(v[1]), addr(v[0]))
  5282              m.emit(0x0f)
  5283              m.emit(0x46)
  5284              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5285          })
  5286      }
  5287      if p.len == 0 {
  5288          panic("invalid operands for CMOVNA")
  5289      }
  5290      return p
  5291  }
  5292  
  5293  // CMOVNAE performs "Move if not above or equal (CF == 1)".
  5294  //
  5295  // Mnemonic        : CMOVNAE
  5296  // Supported forms : (6 forms)
  5297  //
  5298  //    * CMOVNAE r16, r16    [CMOV]
  5299  //    * CMOVNAE m16, r16    [CMOV]
  5300  //    * CMOVNAE r32, r32    [CMOV]
  5301  //    * CMOVNAE m32, r32    [CMOV]
  5302  //    * CMOVNAE r64, r64    [CMOV]
  5303  //    * CMOVNAE m64, r64    [CMOV]
  5304  //
  5305  func (self *Program) CMOVNAE(v0 interface{}, v1 interface{}) *Instruction {
  5306      p := self.alloc("CMOVNAE", 2, Operands { v0, v1 })
  5307      // CMOVNAE r16, r16
  5308      if isReg16(v0) && isReg16(v1) {
  5309          self.require(ISA_CMOV)
  5310          p.domain = DomainGeneric
  5311          p.add(0, func(m *_Encoding, v []interface{}) {
  5312              m.emit(0x66)
  5313              m.rexo(hcode(v[1]), v[0], false)
  5314              m.emit(0x0f)
  5315              m.emit(0x42)
  5316              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5317          })
  5318      }
  5319      // CMOVNAE m16, r16
  5320      if isM16(v0) && isReg16(v1) {
  5321          self.require(ISA_CMOV)
  5322          p.domain = DomainGeneric
  5323          p.add(0, func(m *_Encoding, v []interface{}) {
  5324              m.emit(0x66)
  5325              m.rexo(hcode(v[1]), addr(v[0]), false)
  5326              m.emit(0x0f)
  5327              m.emit(0x42)
  5328              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5329          })
  5330      }
  5331      // CMOVNAE r32, r32
  5332      if isReg32(v0) && isReg32(v1) {
  5333          self.require(ISA_CMOV)
  5334          p.domain = DomainGeneric
  5335          p.add(0, func(m *_Encoding, v []interface{}) {
  5336              m.rexo(hcode(v[1]), v[0], false)
  5337              m.emit(0x0f)
  5338              m.emit(0x42)
  5339              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5340          })
  5341      }
  5342      // CMOVNAE m32, r32
  5343      if isM32(v0) && isReg32(v1) {
  5344          self.require(ISA_CMOV)
  5345          p.domain = DomainGeneric
  5346          p.add(0, func(m *_Encoding, v []interface{}) {
  5347              m.rexo(hcode(v[1]), addr(v[0]), false)
  5348              m.emit(0x0f)
  5349              m.emit(0x42)
  5350              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5351          })
  5352      }
  5353      // CMOVNAE r64, r64
  5354      if isReg64(v0) && isReg64(v1) {
  5355          self.require(ISA_CMOV)
  5356          p.domain = DomainGeneric
  5357          p.add(0, func(m *_Encoding, v []interface{}) {
  5358              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5359              m.emit(0x0f)
  5360              m.emit(0x42)
  5361              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5362          })
  5363      }
  5364      // CMOVNAE m64, r64
  5365      if isM64(v0) && isReg64(v1) {
  5366          self.require(ISA_CMOV)
  5367          p.domain = DomainGeneric
  5368          p.add(0, func(m *_Encoding, v []interface{}) {
  5369              m.rexm(1, hcode(v[1]), addr(v[0]))
  5370              m.emit(0x0f)
  5371              m.emit(0x42)
  5372              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5373          })
  5374      }
  5375      if p.len == 0 {
  5376          panic("invalid operands for CMOVNAE")
  5377      }
  5378      return p
  5379  }
  5380  
  5381  // CMOVNB performs "Move if not below (CF == 0)".
  5382  //
  5383  // Mnemonic        : CMOVNB
  5384  // Supported forms : (6 forms)
  5385  //
  5386  //    * CMOVNB r16, r16    [CMOV]
  5387  //    * CMOVNB m16, r16    [CMOV]
  5388  //    * CMOVNB r32, r32    [CMOV]
  5389  //    * CMOVNB m32, r32    [CMOV]
  5390  //    * CMOVNB r64, r64    [CMOV]
  5391  //    * CMOVNB m64, r64    [CMOV]
  5392  //
  5393  func (self *Program) CMOVNB(v0 interface{}, v1 interface{}) *Instruction {
  5394      p := self.alloc("CMOVNB", 2, Operands { v0, v1 })
  5395      // CMOVNB r16, r16
  5396      if isReg16(v0) && isReg16(v1) {
  5397          self.require(ISA_CMOV)
  5398          p.domain = DomainGeneric
  5399          p.add(0, func(m *_Encoding, v []interface{}) {
  5400              m.emit(0x66)
  5401              m.rexo(hcode(v[1]), v[0], false)
  5402              m.emit(0x0f)
  5403              m.emit(0x43)
  5404              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5405          })
  5406      }
  5407      // CMOVNB m16, r16
  5408      if isM16(v0) && isReg16(v1) {
  5409          self.require(ISA_CMOV)
  5410          p.domain = DomainGeneric
  5411          p.add(0, func(m *_Encoding, v []interface{}) {
  5412              m.emit(0x66)
  5413              m.rexo(hcode(v[1]), addr(v[0]), false)
  5414              m.emit(0x0f)
  5415              m.emit(0x43)
  5416              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5417          })
  5418      }
  5419      // CMOVNB r32, r32
  5420      if isReg32(v0) && isReg32(v1) {
  5421          self.require(ISA_CMOV)
  5422          p.domain = DomainGeneric
  5423          p.add(0, func(m *_Encoding, v []interface{}) {
  5424              m.rexo(hcode(v[1]), v[0], false)
  5425              m.emit(0x0f)
  5426              m.emit(0x43)
  5427              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5428          })
  5429      }
  5430      // CMOVNB m32, r32
  5431      if isM32(v0) && isReg32(v1) {
  5432          self.require(ISA_CMOV)
  5433          p.domain = DomainGeneric
  5434          p.add(0, func(m *_Encoding, v []interface{}) {
  5435              m.rexo(hcode(v[1]), addr(v[0]), false)
  5436              m.emit(0x0f)
  5437              m.emit(0x43)
  5438              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5439          })
  5440      }
  5441      // CMOVNB r64, r64
  5442      if isReg64(v0) && isReg64(v1) {
  5443          self.require(ISA_CMOV)
  5444          p.domain = DomainGeneric
  5445          p.add(0, func(m *_Encoding, v []interface{}) {
  5446              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5447              m.emit(0x0f)
  5448              m.emit(0x43)
  5449              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5450          })
  5451      }
  5452      // CMOVNB m64, r64
  5453      if isM64(v0) && isReg64(v1) {
  5454          self.require(ISA_CMOV)
  5455          p.domain = DomainGeneric
  5456          p.add(0, func(m *_Encoding, v []interface{}) {
  5457              m.rexm(1, hcode(v[1]), addr(v[0]))
  5458              m.emit(0x0f)
  5459              m.emit(0x43)
  5460              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5461          })
  5462      }
  5463      if p.len == 0 {
  5464          panic("invalid operands for CMOVNB")
  5465      }
  5466      return p
  5467  }
  5468  
  5469  // CMOVNBE performs "Move if not below or equal (CF == 0 and ZF == 0)".
  5470  //
  5471  // Mnemonic        : CMOVNBE
  5472  // Supported forms : (6 forms)
  5473  //
  5474  //    * CMOVNBE r16, r16    [CMOV]
  5475  //    * CMOVNBE m16, r16    [CMOV]
  5476  //    * CMOVNBE r32, r32    [CMOV]
  5477  //    * CMOVNBE m32, r32    [CMOV]
  5478  //    * CMOVNBE r64, r64    [CMOV]
  5479  //    * CMOVNBE m64, r64    [CMOV]
  5480  //
  5481  func (self *Program) CMOVNBE(v0 interface{}, v1 interface{}) *Instruction {
  5482      p := self.alloc("CMOVNBE", 2, Operands { v0, v1 })
  5483      // CMOVNBE r16, r16
  5484      if isReg16(v0) && isReg16(v1) {
  5485          self.require(ISA_CMOV)
  5486          p.domain = DomainGeneric
  5487          p.add(0, func(m *_Encoding, v []interface{}) {
  5488              m.emit(0x66)
  5489              m.rexo(hcode(v[1]), v[0], false)
  5490              m.emit(0x0f)
  5491              m.emit(0x47)
  5492              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5493          })
  5494      }
  5495      // CMOVNBE m16, r16
  5496      if isM16(v0) && isReg16(v1) {
  5497          self.require(ISA_CMOV)
  5498          p.domain = DomainGeneric
  5499          p.add(0, func(m *_Encoding, v []interface{}) {
  5500              m.emit(0x66)
  5501              m.rexo(hcode(v[1]), addr(v[0]), false)
  5502              m.emit(0x0f)
  5503              m.emit(0x47)
  5504              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5505          })
  5506      }
  5507      // CMOVNBE r32, r32
  5508      if isReg32(v0) && isReg32(v1) {
  5509          self.require(ISA_CMOV)
  5510          p.domain = DomainGeneric
  5511          p.add(0, func(m *_Encoding, v []interface{}) {
  5512              m.rexo(hcode(v[1]), v[0], false)
  5513              m.emit(0x0f)
  5514              m.emit(0x47)
  5515              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5516          })
  5517      }
  5518      // CMOVNBE m32, r32
  5519      if isM32(v0) && isReg32(v1) {
  5520          self.require(ISA_CMOV)
  5521          p.domain = DomainGeneric
  5522          p.add(0, func(m *_Encoding, v []interface{}) {
  5523              m.rexo(hcode(v[1]), addr(v[0]), false)
  5524              m.emit(0x0f)
  5525              m.emit(0x47)
  5526              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5527          })
  5528      }
  5529      // CMOVNBE r64, r64
  5530      if isReg64(v0) && isReg64(v1) {
  5531          self.require(ISA_CMOV)
  5532          p.domain = DomainGeneric
  5533          p.add(0, func(m *_Encoding, v []interface{}) {
  5534              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5535              m.emit(0x0f)
  5536              m.emit(0x47)
  5537              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5538          })
  5539      }
  5540      // CMOVNBE m64, r64
  5541      if isM64(v0) && isReg64(v1) {
  5542          self.require(ISA_CMOV)
  5543          p.domain = DomainGeneric
  5544          p.add(0, func(m *_Encoding, v []interface{}) {
  5545              m.rexm(1, hcode(v[1]), addr(v[0]))
  5546              m.emit(0x0f)
  5547              m.emit(0x47)
  5548              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5549          })
  5550      }
  5551      if p.len == 0 {
  5552          panic("invalid operands for CMOVNBE")
  5553      }
  5554      return p
  5555  }
  5556  
  5557  // CMOVNC performs "Move if not carry (CF == 0)".
  5558  //
  5559  // Mnemonic        : CMOVNC
  5560  // Supported forms : (6 forms)
  5561  //
  5562  //    * CMOVNC r16, r16    [CMOV]
  5563  //    * CMOVNC m16, r16    [CMOV]
  5564  //    * CMOVNC r32, r32    [CMOV]
  5565  //    * CMOVNC m32, r32    [CMOV]
  5566  //    * CMOVNC r64, r64    [CMOV]
  5567  //    * CMOVNC m64, r64    [CMOV]
  5568  //
  5569  func (self *Program) CMOVNC(v0 interface{}, v1 interface{}) *Instruction {
  5570      p := self.alloc("CMOVNC", 2, Operands { v0, v1 })
  5571      // CMOVNC r16, r16
  5572      if isReg16(v0) && isReg16(v1) {
  5573          self.require(ISA_CMOV)
  5574          p.domain = DomainGeneric
  5575          p.add(0, func(m *_Encoding, v []interface{}) {
  5576              m.emit(0x66)
  5577              m.rexo(hcode(v[1]), v[0], false)
  5578              m.emit(0x0f)
  5579              m.emit(0x43)
  5580              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5581          })
  5582      }
  5583      // CMOVNC m16, r16
  5584      if isM16(v0) && isReg16(v1) {
  5585          self.require(ISA_CMOV)
  5586          p.domain = DomainGeneric
  5587          p.add(0, func(m *_Encoding, v []interface{}) {
  5588              m.emit(0x66)
  5589              m.rexo(hcode(v[1]), addr(v[0]), false)
  5590              m.emit(0x0f)
  5591              m.emit(0x43)
  5592              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5593          })
  5594      }
  5595      // CMOVNC r32, r32
  5596      if isReg32(v0) && isReg32(v1) {
  5597          self.require(ISA_CMOV)
  5598          p.domain = DomainGeneric
  5599          p.add(0, func(m *_Encoding, v []interface{}) {
  5600              m.rexo(hcode(v[1]), v[0], false)
  5601              m.emit(0x0f)
  5602              m.emit(0x43)
  5603              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5604          })
  5605      }
  5606      // CMOVNC m32, r32
  5607      if isM32(v0) && isReg32(v1) {
  5608          self.require(ISA_CMOV)
  5609          p.domain = DomainGeneric
  5610          p.add(0, func(m *_Encoding, v []interface{}) {
  5611              m.rexo(hcode(v[1]), addr(v[0]), false)
  5612              m.emit(0x0f)
  5613              m.emit(0x43)
  5614              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5615          })
  5616      }
  5617      // CMOVNC r64, r64
  5618      if isReg64(v0) && isReg64(v1) {
  5619          self.require(ISA_CMOV)
  5620          p.domain = DomainGeneric
  5621          p.add(0, func(m *_Encoding, v []interface{}) {
  5622              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5623              m.emit(0x0f)
  5624              m.emit(0x43)
  5625              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5626          })
  5627      }
  5628      // CMOVNC m64, r64
  5629      if isM64(v0) && isReg64(v1) {
  5630          self.require(ISA_CMOV)
  5631          p.domain = DomainGeneric
  5632          p.add(0, func(m *_Encoding, v []interface{}) {
  5633              m.rexm(1, hcode(v[1]), addr(v[0]))
  5634              m.emit(0x0f)
  5635              m.emit(0x43)
  5636              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5637          })
  5638      }
  5639      if p.len == 0 {
  5640          panic("invalid operands for CMOVNC")
  5641      }
  5642      return p
  5643  }
  5644  
  5645  // CMOVNE performs "Move if not equal (ZF == 0)".
  5646  //
  5647  // Mnemonic        : CMOVNE
  5648  // Supported forms : (6 forms)
  5649  //
  5650  //    * CMOVNE r16, r16    [CMOV]
  5651  //    * CMOVNE m16, r16    [CMOV]
  5652  //    * CMOVNE r32, r32    [CMOV]
  5653  //    * CMOVNE m32, r32    [CMOV]
  5654  //    * CMOVNE r64, r64    [CMOV]
  5655  //    * CMOVNE m64, r64    [CMOV]
  5656  //
  5657  func (self *Program) CMOVNE(v0 interface{}, v1 interface{}) *Instruction {
  5658      p := self.alloc("CMOVNE", 2, Operands { v0, v1 })
  5659      // CMOVNE r16, r16
  5660      if isReg16(v0) && isReg16(v1) {
  5661          self.require(ISA_CMOV)
  5662          p.domain = DomainGeneric
  5663          p.add(0, func(m *_Encoding, v []interface{}) {
  5664              m.emit(0x66)
  5665              m.rexo(hcode(v[1]), v[0], false)
  5666              m.emit(0x0f)
  5667              m.emit(0x45)
  5668              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5669          })
  5670      }
  5671      // CMOVNE m16, r16
  5672      if isM16(v0) && isReg16(v1) {
  5673          self.require(ISA_CMOV)
  5674          p.domain = DomainGeneric
  5675          p.add(0, func(m *_Encoding, v []interface{}) {
  5676              m.emit(0x66)
  5677              m.rexo(hcode(v[1]), addr(v[0]), false)
  5678              m.emit(0x0f)
  5679              m.emit(0x45)
  5680              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5681          })
  5682      }
  5683      // CMOVNE r32, r32
  5684      if isReg32(v0) && isReg32(v1) {
  5685          self.require(ISA_CMOV)
  5686          p.domain = DomainGeneric
  5687          p.add(0, func(m *_Encoding, v []interface{}) {
  5688              m.rexo(hcode(v[1]), v[0], false)
  5689              m.emit(0x0f)
  5690              m.emit(0x45)
  5691              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5692          })
  5693      }
  5694      // CMOVNE m32, r32
  5695      if isM32(v0) && isReg32(v1) {
  5696          self.require(ISA_CMOV)
  5697          p.domain = DomainGeneric
  5698          p.add(0, func(m *_Encoding, v []interface{}) {
  5699              m.rexo(hcode(v[1]), addr(v[0]), false)
  5700              m.emit(0x0f)
  5701              m.emit(0x45)
  5702              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5703          })
  5704      }
  5705      // CMOVNE r64, r64
  5706      if isReg64(v0) && isReg64(v1) {
  5707          self.require(ISA_CMOV)
  5708          p.domain = DomainGeneric
  5709          p.add(0, func(m *_Encoding, v []interface{}) {
  5710              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5711              m.emit(0x0f)
  5712              m.emit(0x45)
  5713              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5714          })
  5715      }
  5716      // CMOVNE m64, r64
  5717      if isM64(v0) && isReg64(v1) {
  5718          self.require(ISA_CMOV)
  5719          p.domain = DomainGeneric
  5720          p.add(0, func(m *_Encoding, v []interface{}) {
  5721              m.rexm(1, hcode(v[1]), addr(v[0]))
  5722              m.emit(0x0f)
  5723              m.emit(0x45)
  5724              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5725          })
  5726      }
  5727      if p.len == 0 {
  5728          panic("invalid operands for CMOVNE")
  5729      }
  5730      return p
  5731  }
  5732  
  5733  // CMOVNG performs "Move if not greater (ZF == 1 or SF != OF)".
  5734  //
  5735  // Mnemonic        : CMOVNG
  5736  // Supported forms : (6 forms)
  5737  //
  5738  //    * CMOVNG r16, r16    [CMOV]
  5739  //    * CMOVNG m16, r16    [CMOV]
  5740  //    * CMOVNG r32, r32    [CMOV]
  5741  //    * CMOVNG m32, r32    [CMOV]
  5742  //    * CMOVNG r64, r64    [CMOV]
  5743  //    * CMOVNG m64, r64    [CMOV]
  5744  //
  5745  func (self *Program) CMOVNG(v0 interface{}, v1 interface{}) *Instruction {
  5746      p := self.alloc("CMOVNG", 2, Operands { v0, v1 })
  5747      // CMOVNG r16, r16
  5748      if isReg16(v0) && isReg16(v1) {
  5749          self.require(ISA_CMOV)
  5750          p.domain = DomainGeneric
  5751          p.add(0, func(m *_Encoding, v []interface{}) {
  5752              m.emit(0x66)
  5753              m.rexo(hcode(v[1]), v[0], false)
  5754              m.emit(0x0f)
  5755              m.emit(0x4e)
  5756              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5757          })
  5758      }
  5759      // CMOVNG m16, r16
  5760      if isM16(v0) && isReg16(v1) {
  5761          self.require(ISA_CMOV)
  5762          p.domain = DomainGeneric
  5763          p.add(0, func(m *_Encoding, v []interface{}) {
  5764              m.emit(0x66)
  5765              m.rexo(hcode(v[1]), addr(v[0]), false)
  5766              m.emit(0x0f)
  5767              m.emit(0x4e)
  5768              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5769          })
  5770      }
  5771      // CMOVNG r32, r32
  5772      if isReg32(v0) && isReg32(v1) {
  5773          self.require(ISA_CMOV)
  5774          p.domain = DomainGeneric
  5775          p.add(0, func(m *_Encoding, v []interface{}) {
  5776              m.rexo(hcode(v[1]), v[0], false)
  5777              m.emit(0x0f)
  5778              m.emit(0x4e)
  5779              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5780          })
  5781      }
  5782      // CMOVNG m32, r32
  5783      if isM32(v0) && isReg32(v1) {
  5784          self.require(ISA_CMOV)
  5785          p.domain = DomainGeneric
  5786          p.add(0, func(m *_Encoding, v []interface{}) {
  5787              m.rexo(hcode(v[1]), addr(v[0]), false)
  5788              m.emit(0x0f)
  5789              m.emit(0x4e)
  5790              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5791          })
  5792      }
  5793      // CMOVNG r64, r64
  5794      if isReg64(v0) && isReg64(v1) {
  5795          self.require(ISA_CMOV)
  5796          p.domain = DomainGeneric
  5797          p.add(0, func(m *_Encoding, v []interface{}) {
  5798              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5799              m.emit(0x0f)
  5800              m.emit(0x4e)
  5801              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5802          })
  5803      }
  5804      // CMOVNG m64, r64
  5805      if isM64(v0) && isReg64(v1) {
  5806          self.require(ISA_CMOV)
  5807          p.domain = DomainGeneric
  5808          p.add(0, func(m *_Encoding, v []interface{}) {
  5809              m.rexm(1, hcode(v[1]), addr(v[0]))
  5810              m.emit(0x0f)
  5811              m.emit(0x4e)
  5812              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5813          })
  5814      }
  5815      if p.len == 0 {
  5816          panic("invalid operands for CMOVNG")
  5817      }
  5818      return p
  5819  }
  5820  
  5821  // CMOVNGE performs "Move if not greater or equal (SF != OF)".
  5822  //
  5823  // Mnemonic        : CMOVNGE
  5824  // Supported forms : (6 forms)
  5825  //
  5826  //    * CMOVNGE r16, r16    [CMOV]
  5827  //    * CMOVNGE m16, r16    [CMOV]
  5828  //    * CMOVNGE r32, r32    [CMOV]
  5829  //    * CMOVNGE m32, r32    [CMOV]
  5830  //    * CMOVNGE r64, r64    [CMOV]
  5831  //    * CMOVNGE m64, r64    [CMOV]
  5832  //
  5833  func (self *Program) CMOVNGE(v0 interface{}, v1 interface{}) *Instruction {
  5834      p := self.alloc("CMOVNGE", 2, Operands { v0, v1 })
  5835      // CMOVNGE r16, r16
  5836      if isReg16(v0) && isReg16(v1) {
  5837          self.require(ISA_CMOV)
  5838          p.domain = DomainGeneric
  5839          p.add(0, func(m *_Encoding, v []interface{}) {
  5840              m.emit(0x66)
  5841              m.rexo(hcode(v[1]), v[0], false)
  5842              m.emit(0x0f)
  5843              m.emit(0x4c)
  5844              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5845          })
  5846      }
  5847      // CMOVNGE m16, r16
  5848      if isM16(v0) && isReg16(v1) {
  5849          self.require(ISA_CMOV)
  5850          p.domain = DomainGeneric
  5851          p.add(0, func(m *_Encoding, v []interface{}) {
  5852              m.emit(0x66)
  5853              m.rexo(hcode(v[1]), addr(v[0]), false)
  5854              m.emit(0x0f)
  5855              m.emit(0x4c)
  5856              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5857          })
  5858      }
  5859      // CMOVNGE r32, r32
  5860      if isReg32(v0) && isReg32(v1) {
  5861          self.require(ISA_CMOV)
  5862          p.domain = DomainGeneric
  5863          p.add(0, func(m *_Encoding, v []interface{}) {
  5864              m.rexo(hcode(v[1]), v[0], false)
  5865              m.emit(0x0f)
  5866              m.emit(0x4c)
  5867              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5868          })
  5869      }
  5870      // CMOVNGE m32, r32
  5871      if isM32(v0) && isReg32(v1) {
  5872          self.require(ISA_CMOV)
  5873          p.domain = DomainGeneric
  5874          p.add(0, func(m *_Encoding, v []interface{}) {
  5875              m.rexo(hcode(v[1]), addr(v[0]), false)
  5876              m.emit(0x0f)
  5877              m.emit(0x4c)
  5878              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5879          })
  5880      }
  5881      // CMOVNGE r64, r64
  5882      if isReg64(v0) && isReg64(v1) {
  5883          self.require(ISA_CMOV)
  5884          p.domain = DomainGeneric
  5885          p.add(0, func(m *_Encoding, v []interface{}) {
  5886              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5887              m.emit(0x0f)
  5888              m.emit(0x4c)
  5889              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5890          })
  5891      }
  5892      // CMOVNGE m64, r64
  5893      if isM64(v0) && isReg64(v1) {
  5894          self.require(ISA_CMOV)
  5895          p.domain = DomainGeneric
  5896          p.add(0, func(m *_Encoding, v []interface{}) {
  5897              m.rexm(1, hcode(v[1]), addr(v[0]))
  5898              m.emit(0x0f)
  5899              m.emit(0x4c)
  5900              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5901          })
  5902      }
  5903      if p.len == 0 {
  5904          panic("invalid operands for CMOVNGE")
  5905      }
  5906      return p
  5907  }
  5908  
  5909  // CMOVNL performs "Move if not less (SF == OF)".
  5910  //
  5911  // Mnemonic        : CMOVNL
  5912  // Supported forms : (6 forms)
  5913  //
  5914  //    * CMOVNL r16, r16    [CMOV]
  5915  //    * CMOVNL m16, r16    [CMOV]
  5916  //    * CMOVNL r32, r32    [CMOV]
  5917  //    * CMOVNL m32, r32    [CMOV]
  5918  //    * CMOVNL r64, r64    [CMOV]
  5919  //    * CMOVNL m64, r64    [CMOV]
  5920  //
  5921  func (self *Program) CMOVNL(v0 interface{}, v1 interface{}) *Instruction {
  5922      p := self.alloc("CMOVNL", 2, Operands { v0, v1 })
  5923      // CMOVNL r16, r16
  5924      if isReg16(v0) && isReg16(v1) {
  5925          self.require(ISA_CMOV)
  5926          p.domain = DomainGeneric
  5927          p.add(0, func(m *_Encoding, v []interface{}) {
  5928              m.emit(0x66)
  5929              m.rexo(hcode(v[1]), v[0], false)
  5930              m.emit(0x0f)
  5931              m.emit(0x4d)
  5932              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5933          })
  5934      }
  5935      // CMOVNL m16, r16
  5936      if isM16(v0) && isReg16(v1) {
  5937          self.require(ISA_CMOV)
  5938          p.domain = DomainGeneric
  5939          p.add(0, func(m *_Encoding, v []interface{}) {
  5940              m.emit(0x66)
  5941              m.rexo(hcode(v[1]), addr(v[0]), false)
  5942              m.emit(0x0f)
  5943              m.emit(0x4d)
  5944              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5945          })
  5946      }
  5947      // CMOVNL r32, r32
  5948      if isReg32(v0) && isReg32(v1) {
  5949          self.require(ISA_CMOV)
  5950          p.domain = DomainGeneric
  5951          p.add(0, func(m *_Encoding, v []interface{}) {
  5952              m.rexo(hcode(v[1]), v[0], false)
  5953              m.emit(0x0f)
  5954              m.emit(0x4d)
  5955              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5956          })
  5957      }
  5958      // CMOVNL m32, r32
  5959      if isM32(v0) && isReg32(v1) {
  5960          self.require(ISA_CMOV)
  5961          p.domain = DomainGeneric
  5962          p.add(0, func(m *_Encoding, v []interface{}) {
  5963              m.rexo(hcode(v[1]), addr(v[0]), false)
  5964              m.emit(0x0f)
  5965              m.emit(0x4d)
  5966              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5967          })
  5968      }
  5969      // CMOVNL r64, r64
  5970      if isReg64(v0) && isReg64(v1) {
  5971          self.require(ISA_CMOV)
  5972          p.domain = DomainGeneric
  5973          p.add(0, func(m *_Encoding, v []interface{}) {
  5974              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5975              m.emit(0x0f)
  5976              m.emit(0x4d)
  5977              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5978          })
  5979      }
  5980      // CMOVNL m64, r64
  5981      if isM64(v0) && isReg64(v1) {
  5982          self.require(ISA_CMOV)
  5983          p.domain = DomainGeneric
  5984          p.add(0, func(m *_Encoding, v []interface{}) {
  5985              m.rexm(1, hcode(v[1]), addr(v[0]))
  5986              m.emit(0x0f)
  5987              m.emit(0x4d)
  5988              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5989          })
  5990      }
  5991      if p.len == 0 {
  5992          panic("invalid operands for CMOVNL")
  5993      }
  5994      return p
  5995  }
  5996  
  5997  // CMOVNLE performs "Move if not less or equal (ZF == 0 and SF == OF)".
  5998  //
  5999  // Mnemonic        : CMOVNLE
  6000  // Supported forms : (6 forms)
  6001  //
  6002  //    * CMOVNLE r16, r16    [CMOV]
  6003  //    * CMOVNLE m16, r16    [CMOV]
  6004  //    * CMOVNLE r32, r32    [CMOV]
  6005  //    * CMOVNLE m32, r32    [CMOV]
  6006  //    * CMOVNLE r64, r64    [CMOV]
  6007  //    * CMOVNLE m64, r64    [CMOV]
  6008  //
  6009  func (self *Program) CMOVNLE(v0 interface{}, v1 interface{}) *Instruction {
  6010      p := self.alloc("CMOVNLE", 2, Operands { v0, v1 })
  6011      // CMOVNLE r16, r16
  6012      if isReg16(v0) && isReg16(v1) {
  6013          self.require(ISA_CMOV)
  6014          p.domain = DomainGeneric
  6015          p.add(0, func(m *_Encoding, v []interface{}) {
  6016              m.emit(0x66)
  6017              m.rexo(hcode(v[1]), v[0], false)
  6018              m.emit(0x0f)
  6019              m.emit(0x4f)
  6020              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6021          })
  6022      }
  6023      // CMOVNLE m16, r16
  6024      if isM16(v0) && isReg16(v1) {
  6025          self.require(ISA_CMOV)
  6026          p.domain = DomainGeneric
  6027          p.add(0, func(m *_Encoding, v []interface{}) {
  6028              m.emit(0x66)
  6029              m.rexo(hcode(v[1]), addr(v[0]), false)
  6030              m.emit(0x0f)
  6031              m.emit(0x4f)
  6032              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6033          })
  6034      }
  6035      // CMOVNLE r32, r32
  6036      if isReg32(v0) && isReg32(v1) {
  6037          self.require(ISA_CMOV)
  6038          p.domain = DomainGeneric
  6039          p.add(0, func(m *_Encoding, v []interface{}) {
  6040              m.rexo(hcode(v[1]), v[0], false)
  6041              m.emit(0x0f)
  6042              m.emit(0x4f)
  6043              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6044          })
  6045      }
  6046      // CMOVNLE m32, r32
  6047      if isM32(v0) && isReg32(v1) {
  6048          self.require(ISA_CMOV)
  6049          p.domain = DomainGeneric
  6050          p.add(0, func(m *_Encoding, v []interface{}) {
  6051              m.rexo(hcode(v[1]), addr(v[0]), false)
  6052              m.emit(0x0f)
  6053              m.emit(0x4f)
  6054              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6055          })
  6056      }
  6057      // CMOVNLE r64, r64
  6058      if isReg64(v0) && isReg64(v1) {
  6059          self.require(ISA_CMOV)
  6060          p.domain = DomainGeneric
  6061          p.add(0, func(m *_Encoding, v []interface{}) {
  6062              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  6063              m.emit(0x0f)
  6064              m.emit(0x4f)
  6065              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6066          })
  6067      }
  6068      // CMOVNLE m64, r64
  6069      if isM64(v0) && isReg64(v1) {
  6070          self.require(ISA_CMOV)
  6071          p.domain = DomainGeneric
  6072          p.add(0, func(m *_Encoding, v []interface{}) {
  6073              m.rexm(1, hcode(v[1]), addr(v[0]))
  6074              m.emit(0x0f)
  6075              m.emit(0x4f)
  6076              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6077          })
  6078      }
  6079      if p.len == 0 {
  6080          panic("invalid operands for CMOVNLE")
  6081      }
  6082      return p
  6083  }
  6084  
  6085  // CMOVNO performs "Move if not overflow (OF == 0)".
  6086  //
  6087  // Mnemonic        : CMOVNO
  6088  // Supported forms : (6 forms)
  6089  //
  6090  //    * CMOVNO r16, r16    [CMOV]
  6091  //    * CMOVNO m16, r16    [CMOV]
  6092  //    * CMOVNO r32, r32    [CMOV]
  6093  //    * CMOVNO m32, r32    [CMOV]
  6094  //    * CMOVNO r64, r64    [CMOV]
  6095  //    * CMOVNO m64, r64    [CMOV]
  6096  //
  6097  func (self *Program) CMOVNO(v0 interface{}, v1 interface{}) *Instruction {
  6098      p := self.alloc("CMOVNO", 2, Operands { v0, v1 })
  6099      // CMOVNO r16, r16
  6100      if isReg16(v0) && isReg16(v1) {
  6101          self.require(ISA_CMOV)
  6102          p.domain = DomainGeneric
  6103          p.add(0, func(m *_Encoding, v []interface{}) {
  6104              m.emit(0x66)
  6105              m.rexo(hcode(v[1]), v[0], false)
  6106              m.emit(0x0f)
  6107              m.emit(0x41)
  6108              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6109          })
  6110      }
  6111      // CMOVNO m16, r16
  6112      if isM16(v0) && isReg16(v1) {
  6113          self.require(ISA_CMOV)
  6114          p.domain = DomainGeneric
  6115          p.add(0, func(m *_Encoding, v []interface{}) {
  6116              m.emit(0x66)
  6117              m.rexo(hcode(v[1]), addr(v[0]), false)
  6118              m.emit(0x0f)
  6119              m.emit(0x41)
  6120              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6121          })
  6122      }
  6123      // CMOVNO r32, r32
  6124      if isReg32(v0) && isReg32(v1) {
  6125          self.require(ISA_CMOV)
  6126          p.domain = DomainGeneric
  6127          p.add(0, func(m *_Encoding, v []interface{}) {
  6128              m.rexo(hcode(v[1]), v[0], false)
  6129              m.emit(0x0f)
  6130              m.emit(0x41)
  6131              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6132          })
  6133      }
  6134      // CMOVNO m32, r32
  6135      if isM32(v0) && isReg32(v1) {
  6136          self.require(ISA_CMOV)
  6137          p.domain = DomainGeneric
  6138          p.add(0, func(m *_Encoding, v []interface{}) {
  6139              m.rexo(hcode(v[1]), addr(v[0]), false)
  6140              m.emit(0x0f)
  6141              m.emit(0x41)
  6142              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6143          })
  6144      }
  6145      // CMOVNO r64, r64
  6146      if isReg64(v0) && isReg64(v1) {
  6147          self.require(ISA_CMOV)
  6148          p.domain = DomainGeneric
  6149          p.add(0, func(m *_Encoding, v []interface{}) {
  6150              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  6151              m.emit(0x0f)
  6152              m.emit(0x41)
  6153              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6154          })
  6155      }
  6156      // CMOVNO m64, r64
  6157      if isM64(v0) && isReg64(v1) {
  6158          self.require(ISA_CMOV)
  6159          p.domain = DomainGeneric
  6160          p.add(0, func(m *_Encoding, v []interface{}) {
  6161              m.rexm(1, hcode(v[1]), addr(v[0]))
  6162              m.emit(0x0f)
  6163              m.emit(0x41)
  6164              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6165          })
  6166      }
  6167      if p.len == 0 {
  6168          panic("invalid operands for CMOVNO")
  6169      }
  6170      return p
  6171  }
  6172  
  6173  // CMOVNP performs "Move if not parity (PF == 0)".
  6174  //
  6175  // Mnemonic        : CMOVNP
  6176  // Supported forms : (6 forms)
  6177  //
  6178  //    * CMOVNP r16, r16    [CMOV]
  6179  //    * CMOVNP m16, r16    [CMOV]
  6180  //    * CMOVNP r32, r32    [CMOV]
  6181  //    * CMOVNP m32, r32    [CMOV]
  6182  //    * CMOVNP r64, r64    [CMOV]
  6183  //    * CMOVNP m64, r64    [CMOV]
  6184  //
  6185  func (self *Program) CMOVNP(v0 interface{}, v1 interface{}) *Instruction {
  6186      p := self.alloc("CMOVNP", 2, Operands { v0, v1 })
  6187      // CMOVNP r16, r16
  6188      if isReg16(v0) && isReg16(v1) {
  6189          self.require(ISA_CMOV)
  6190          p.domain = DomainGeneric
  6191          p.add(0, func(m *_Encoding, v []interface{}) {
  6192              m.emit(0x66)
  6193              m.rexo(hcode(v[1]), v[0], false)
  6194              m.emit(0x0f)
  6195              m.emit(0x4b)
  6196              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6197          })
  6198      }
  6199      // CMOVNP m16, r16
  6200      if isM16(v0) && isReg16(v1) {
  6201          self.require(ISA_CMOV)
  6202          p.domain = DomainGeneric
  6203          p.add(0, func(m *_Encoding, v []interface{}) {
  6204              m.emit(0x66)
  6205              m.rexo(hcode(v[1]), addr(v[0]), false)
  6206              m.emit(0x0f)
  6207              m.emit(0x4b)
  6208              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6209          })
  6210      }
  6211      // CMOVNP r32, r32
  6212      if isReg32(v0) && isReg32(v1) {
  6213          self.require(ISA_CMOV)
  6214          p.domain = DomainGeneric
  6215          p.add(0, func(m *_Encoding, v []interface{}) {
  6216              m.rexo(hcode(v[1]), v[0], false)
  6217              m.emit(0x0f)
  6218              m.emit(0x4b)
  6219              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6220          })
  6221      }
  6222      // CMOVNP m32, r32
  6223      if isM32(v0) && isReg32(v1) {
  6224          self.require(ISA_CMOV)
  6225          p.domain = DomainGeneric
  6226          p.add(0, func(m *_Encoding, v []interface{}) {
  6227              m.rexo(hcode(v[1]), addr(v[0]), false)
  6228              m.emit(0x0f)
  6229              m.emit(0x4b)
  6230              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6231          })
  6232      }
  6233      // CMOVNP r64, r64
  6234      if isReg64(v0) && isReg64(v1) {
  6235          self.require(ISA_CMOV)
  6236          p.domain = DomainGeneric
  6237          p.add(0, func(m *_Encoding, v []interface{}) {
  6238              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  6239              m.emit(0x0f)
  6240              m.emit(0x4b)
  6241              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6242          })
  6243      }
  6244      // CMOVNP m64, r64
  6245      if isM64(v0) && isReg64(v1) {
  6246          self.require(ISA_CMOV)
  6247          p.domain = DomainGeneric
  6248          p.add(0, func(m *_Encoding, v []interface{}) {
  6249              m.rexm(1, hcode(v[1]), addr(v[0]))
  6250              m.emit(0x0f)
  6251              m.emit(0x4b)
  6252              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6253          })
  6254      }
  6255      if p.len == 0 {
  6256          panic("invalid operands for CMOVNP")
  6257      }
  6258      return p
  6259  }
  6260  
  6261  // CMOVNS performs "Move if not sign (SF == 0)".
  6262  //
  6263  // Mnemonic        : CMOVNS
  6264  // Supported forms : (6 forms)
  6265  //
  6266  //    * CMOVNS r16, r16    [CMOV]
  6267  //    * CMOVNS m16, r16    [CMOV]
  6268  //    * CMOVNS r32, r32    [CMOV]
  6269  //    * CMOVNS m32, r32    [CMOV]
  6270  //    * CMOVNS r64, r64    [CMOV]
  6271  //    * CMOVNS m64, r64    [CMOV]
  6272  //
  6273  func (self *Program) CMOVNS(v0 interface{}, v1 interface{}) *Instruction {
  6274      p := self.alloc("CMOVNS", 2, Operands { v0, v1 })
  6275      // CMOVNS r16, r16
  6276      if isReg16(v0) && isReg16(v1) {
  6277          self.require(ISA_CMOV)
  6278          p.domain = DomainGeneric
  6279          p.add(0, func(m *_Encoding, v []interface{}) {
  6280              m.emit(0x66)
  6281              m.rexo(hcode(v[1]), v[0], false)
  6282              m.emit(0x0f)
  6283              m.emit(0x49)
  6284              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6285          })
  6286      }
  6287      // CMOVNS m16, r16
  6288      if isM16(v0) && isReg16(v1) {
  6289          self.require(ISA_CMOV)
  6290          p.domain = DomainGeneric
  6291          p.add(0, func(m *_Encoding, v []interface{}) {
  6292              m.emit(0x66)
  6293              m.rexo(hcode(v[1]), addr(v[0]), false)
  6294              m.emit(0x0f)
  6295              m.emit(0x49)
  6296              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6297          })
  6298      }
  6299      // CMOVNS r32, r32
  6300      if isReg32(v0) && isReg32(v1) {
  6301          self.require(ISA_CMOV)
  6302          p.domain = DomainGeneric
  6303          p.add(0, func(m *_Encoding, v []interface{}) {
  6304              m.rexo(hcode(v[1]), v[0], false)
  6305              m.emit(0x0f)
  6306              m.emit(0x49)
  6307              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6308          })
  6309      }
  6310      // CMOVNS m32, r32
  6311      if isM32(v0) && isReg32(v1) {
  6312          self.require(ISA_CMOV)
  6313          p.domain = DomainGeneric
  6314          p.add(0, func(m *_Encoding, v []interface{}) {
  6315              m.rexo(hcode(v[1]), addr(v[0]), false)
  6316              m.emit(0x0f)
  6317              m.emit(0x49)
  6318              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6319          })
  6320      }
  6321      // CMOVNS r64, r64
  6322      if isReg64(v0) && isReg64(v1) {
  6323          self.require(ISA_CMOV)
  6324          p.domain = DomainGeneric
  6325          p.add(0, func(m *_Encoding, v []interface{}) {
  6326              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  6327              m.emit(0x0f)
  6328              m.emit(0x49)
  6329              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6330          })
  6331      }
  6332      // CMOVNS m64, r64
  6333      if isM64(v0) && isReg64(v1) {
  6334          self.require(ISA_CMOV)
  6335          p.domain = DomainGeneric
  6336          p.add(0, func(m *_Encoding, v []interface{}) {
  6337              m.rexm(1, hcode(v[1]), addr(v[0]))
  6338              m.emit(0x0f)
  6339              m.emit(0x49)
  6340              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6341          })
  6342      }
  6343      if p.len == 0 {
  6344          panic("invalid operands for CMOVNS")
  6345      }
  6346      return p
  6347  }
  6348  
  6349  // CMOVNZ performs "Move if not zero (ZF == 0)".
  6350  //
  6351  // Mnemonic        : CMOVNZ
  6352  // Supported forms : (6 forms)
  6353  //
  6354  //    * CMOVNZ r16, r16    [CMOV]
  6355  //    * CMOVNZ m16, r16    [CMOV]
  6356  //    * CMOVNZ r32, r32    [CMOV]
  6357  //    * CMOVNZ m32, r32    [CMOV]
  6358  //    * CMOVNZ r64, r64    [CMOV]
  6359  //    * CMOVNZ m64, r64    [CMOV]
  6360  //
  6361  func (self *Program) CMOVNZ(v0 interface{}, v1 interface{}) *Instruction {
  6362      p := self.alloc("CMOVNZ", 2, Operands { v0, v1 })
  6363      // CMOVNZ r16, r16
  6364      if isReg16(v0) && isReg16(v1) {
  6365          self.require(ISA_CMOV)
  6366          p.domain = DomainGeneric
  6367          p.add(0, func(m *_Encoding, v []interface{}) {
  6368              m.emit(0x66)
  6369              m.rexo(hcode(v[1]), v[0], false)
  6370              m.emit(0x0f)
  6371              m.emit(0x45)
  6372              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6373          })
  6374      }
  6375      // CMOVNZ m16, r16
  6376      if isM16(v0) && isReg16(v1) {
  6377          self.require(ISA_CMOV)
  6378          p.domain = DomainGeneric
  6379          p.add(0, func(m *_Encoding, v []interface{}) {
  6380              m.emit(0x66)
  6381              m.rexo(hcode(v[1]), addr(v[0]), false)
  6382              m.emit(0x0f)
  6383              m.emit(0x45)
  6384              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6385          })
  6386      }
  6387      // CMOVNZ r32, r32
  6388      if isReg32(v0) && isReg32(v1) {
  6389          self.require(ISA_CMOV)
  6390          p.domain = DomainGeneric
  6391          p.add(0, func(m *_Encoding, v []interface{}) {
  6392              m.rexo(hcode(v[1]), v[0], false)
  6393              m.emit(0x0f)
  6394              m.emit(0x45)
  6395              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6396          })
  6397      }
  6398      // CMOVNZ m32, r32
  6399      if isM32(v0) && isReg32(v1) {
  6400          self.require(ISA_CMOV)
  6401          p.domain = DomainGeneric
  6402          p.add(0, func(m *_Encoding, v []interface{}) {
  6403              m.rexo(hcode(v[1]), addr(v[0]), false)
  6404              m.emit(0x0f)
  6405              m.emit(0x45)
  6406              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6407          })
  6408      }
  6409      // CMOVNZ r64, r64
  6410      if isReg64(v0) && isReg64(v1) {
  6411          self.require(ISA_CMOV)
  6412          p.domain = DomainGeneric
  6413          p.add(0, func(m *_Encoding, v []interface{}) {
  6414              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  6415              m.emit(0x0f)
  6416              m.emit(0x45)
  6417              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6418          })
  6419      }
  6420      // CMOVNZ m64, r64
  6421      if isM64(v0) && isReg64(v1) {
  6422          self.require(ISA_CMOV)
  6423          p.domain = DomainGeneric
  6424          p.add(0, func(m *_Encoding, v []interface{}) {
  6425              m.rexm(1, hcode(v[1]), addr(v[0]))
  6426              m.emit(0x0f)
  6427              m.emit(0x45)
  6428              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6429          })
  6430      }
  6431      if p.len == 0 {
  6432          panic("invalid operands for CMOVNZ")
  6433      }
  6434      return p
  6435  }
  6436  
  6437  // CMOVO performs "Move if overflow (OF == 1)".
  6438  //
  6439  // Mnemonic        : CMOVO
  6440  // Supported forms : (6 forms)
  6441  //
  6442  //    * CMOVO r16, r16    [CMOV]
  6443  //    * CMOVO m16, r16    [CMOV]
  6444  //    * CMOVO r32, r32    [CMOV]
  6445  //    * CMOVO m32, r32    [CMOV]
  6446  //    * CMOVO r64, r64    [CMOV]
  6447  //    * CMOVO m64, r64    [CMOV]
  6448  //
  6449  func (self *Program) CMOVO(v0 interface{}, v1 interface{}) *Instruction {
  6450      p := self.alloc("CMOVO", 2, Operands { v0, v1 })
  6451      // CMOVO r16, r16
  6452      if isReg16(v0) && isReg16(v1) {
  6453          self.require(ISA_CMOV)
  6454          p.domain = DomainGeneric
  6455          p.add(0, func(m *_Encoding, v []interface{}) {
  6456              m.emit(0x66)
  6457              m.rexo(hcode(v[1]), v[0], false)
  6458              m.emit(0x0f)
  6459              m.emit(0x40)
  6460              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6461          })
  6462      }
  6463      // CMOVO m16, r16
  6464      if isM16(v0) && isReg16(v1) {
  6465          self.require(ISA_CMOV)
  6466          p.domain = DomainGeneric
  6467          p.add(0, func(m *_Encoding, v []interface{}) {
  6468              m.emit(0x66)
  6469              m.rexo(hcode(v[1]), addr(v[0]), false)
  6470              m.emit(0x0f)
  6471              m.emit(0x40)
  6472              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6473          })
  6474      }
  6475      // CMOVO r32, r32
  6476      if isReg32(v0) && isReg32(v1) {
  6477          self.require(ISA_CMOV)
  6478          p.domain = DomainGeneric
  6479          p.add(0, func(m *_Encoding, v []interface{}) {
  6480              m.rexo(hcode(v[1]), v[0], false)
  6481              m.emit(0x0f)
  6482              m.emit(0x40)
  6483              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6484          })
  6485      }
  6486      // CMOVO m32, r32
  6487      if isM32(v0) && isReg32(v1) {
  6488          self.require(ISA_CMOV)
  6489          p.domain = DomainGeneric
  6490          p.add(0, func(m *_Encoding, v []interface{}) {
  6491              m.rexo(hcode(v[1]), addr(v[0]), false)
  6492              m.emit(0x0f)
  6493              m.emit(0x40)
  6494              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6495          })
  6496      }
  6497      // CMOVO r64, r64
  6498      if isReg64(v0) && isReg64(v1) {
  6499          self.require(ISA_CMOV)
  6500          p.domain = DomainGeneric
  6501          p.add(0, func(m *_Encoding, v []interface{}) {
  6502              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  6503              m.emit(0x0f)
  6504              m.emit(0x40)
  6505              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6506          })
  6507      }
  6508      // CMOVO m64, r64
  6509      if isM64(v0) && isReg64(v1) {
  6510          self.require(ISA_CMOV)
  6511          p.domain = DomainGeneric
  6512          p.add(0, func(m *_Encoding, v []interface{}) {
  6513              m.rexm(1, hcode(v[1]), addr(v[0]))
  6514              m.emit(0x0f)
  6515              m.emit(0x40)
  6516              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6517          })
  6518      }
  6519      if p.len == 0 {
  6520          panic("invalid operands for CMOVO")
  6521      }
  6522      return p
  6523  }
  6524  
  6525  // CMOVP performs "Move if parity (PF == 1)".
  6526  //
  6527  // Mnemonic        : CMOVP
  6528  // Supported forms : (6 forms)
  6529  //
  6530  //    * CMOVP r16, r16    [CMOV]
  6531  //    * CMOVP m16, r16    [CMOV]
  6532  //    * CMOVP r32, r32    [CMOV]
  6533  //    * CMOVP m32, r32    [CMOV]
  6534  //    * CMOVP r64, r64    [CMOV]
  6535  //    * CMOVP m64, r64    [CMOV]
  6536  //
  6537  func (self *Program) CMOVP(v0 interface{}, v1 interface{}) *Instruction {
  6538      p := self.alloc("CMOVP", 2, Operands { v0, v1 })
  6539      // CMOVP r16, r16
  6540      if isReg16(v0) && isReg16(v1) {
  6541          self.require(ISA_CMOV)
  6542          p.domain = DomainGeneric
  6543          p.add(0, func(m *_Encoding, v []interface{}) {
  6544              m.emit(0x66)
  6545              m.rexo(hcode(v[1]), v[0], false)
  6546              m.emit(0x0f)
  6547              m.emit(0x4a)
  6548              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6549          })
  6550      }
  6551      // CMOVP m16, r16
  6552      if isM16(v0) && isReg16(v1) {
  6553          self.require(ISA_CMOV)
  6554          p.domain = DomainGeneric
  6555          p.add(0, func(m *_Encoding, v []interface{}) {
  6556              m.emit(0x66)
  6557              m.rexo(hcode(v[1]), addr(v[0]), false)
  6558              m.emit(0x0f)
  6559              m.emit(0x4a)
  6560              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6561          })
  6562      }
  6563      // CMOVP r32, r32
  6564      if isReg32(v0) && isReg32(v1) {
  6565          self.require(ISA_CMOV)
  6566          p.domain = DomainGeneric
  6567          p.add(0, func(m *_Encoding, v []interface{}) {
  6568              m.rexo(hcode(v[1]), v[0], false)
  6569              m.emit(0x0f)
  6570              m.emit(0x4a)
  6571              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6572          })
  6573      }
  6574      // CMOVP m32, r32
  6575      if isM32(v0) && isReg32(v1) {
  6576          self.require(ISA_CMOV)
  6577          p.domain = DomainGeneric
  6578          p.add(0, func(m *_Encoding, v []interface{}) {
  6579              m.rexo(hcode(v[1]), addr(v[0]), false)
  6580              m.emit(0x0f)
  6581              m.emit(0x4a)
  6582              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6583          })
  6584      }
  6585      // CMOVP r64, r64
  6586      if isReg64(v0) && isReg64(v1) {
  6587          self.require(ISA_CMOV)
  6588          p.domain = DomainGeneric
  6589          p.add(0, func(m *_Encoding, v []interface{}) {
  6590              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  6591              m.emit(0x0f)
  6592              m.emit(0x4a)
  6593              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6594          })
  6595      }
  6596      // CMOVP m64, r64
  6597      if isM64(v0) && isReg64(v1) {
  6598          self.require(ISA_CMOV)
  6599          p.domain = DomainGeneric
  6600          p.add(0, func(m *_Encoding, v []interface{}) {
  6601              m.rexm(1, hcode(v[1]), addr(v[0]))
  6602              m.emit(0x0f)
  6603              m.emit(0x4a)
  6604              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6605          })
  6606      }
  6607      if p.len == 0 {
  6608          panic("invalid operands for CMOVP")
  6609      }
  6610      return p
  6611  }
  6612  
  6613  // CMOVPE performs "Move if parity even (PF == 1)".
  6614  //
  6615  // Mnemonic        : CMOVPE
  6616  // Supported forms : (6 forms)
  6617  //
  6618  //    * CMOVPE r16, r16    [CMOV]
  6619  //    * CMOVPE m16, r16    [CMOV]
  6620  //    * CMOVPE r32, r32    [CMOV]
  6621  //    * CMOVPE m32, r32    [CMOV]
  6622  //    * CMOVPE r64, r64    [CMOV]
  6623  //    * CMOVPE m64, r64    [CMOV]
  6624  //
  6625  func (self *Program) CMOVPE(v0 interface{}, v1 interface{}) *Instruction {
  6626      p := self.alloc("CMOVPE", 2, Operands { v0, v1 })
  6627      // CMOVPE r16, r16
  6628      if isReg16(v0) && isReg16(v1) {
  6629          self.require(ISA_CMOV)
  6630          p.domain = DomainGeneric
  6631          p.add(0, func(m *_Encoding, v []interface{}) {
  6632              m.emit(0x66)
  6633              m.rexo(hcode(v[1]), v[0], false)
  6634              m.emit(0x0f)
  6635              m.emit(0x4a)
  6636              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6637          })
  6638      }
  6639      // CMOVPE m16, r16
  6640      if isM16(v0) && isReg16(v1) {
  6641          self.require(ISA_CMOV)
  6642          p.domain = DomainGeneric
  6643          p.add(0, func(m *_Encoding, v []interface{}) {
  6644              m.emit(0x66)
  6645              m.rexo(hcode(v[1]), addr(v[0]), false)
  6646              m.emit(0x0f)
  6647              m.emit(0x4a)
  6648              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6649          })
  6650      }
  6651      // CMOVPE r32, r32
  6652      if isReg32(v0) && isReg32(v1) {
  6653          self.require(ISA_CMOV)
  6654          p.domain = DomainGeneric
  6655          p.add(0, func(m *_Encoding, v []interface{}) {
  6656              m.rexo(hcode(v[1]), v[0], false)
  6657              m.emit(0x0f)
  6658              m.emit(0x4a)
  6659              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6660          })
  6661      }
  6662      // CMOVPE m32, r32
  6663      if isM32(v0) && isReg32(v1) {
  6664          self.require(ISA_CMOV)
  6665          p.domain = DomainGeneric
  6666          p.add(0, func(m *_Encoding, v []interface{}) {
  6667              m.rexo(hcode(v[1]), addr(v[0]), false)
  6668              m.emit(0x0f)
  6669              m.emit(0x4a)
  6670              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6671          })
  6672      }
  6673      // CMOVPE r64, r64
  6674      if isReg64(v0) && isReg64(v1) {
  6675          self.require(ISA_CMOV)
  6676          p.domain = DomainGeneric
  6677          p.add(0, func(m *_Encoding, v []interface{}) {
  6678              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  6679              m.emit(0x0f)
  6680              m.emit(0x4a)
  6681              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6682          })
  6683      }
  6684      // CMOVPE m64, r64
  6685      if isM64(v0) && isReg64(v1) {
  6686          self.require(ISA_CMOV)
  6687          p.domain = DomainGeneric
  6688          p.add(0, func(m *_Encoding, v []interface{}) {
  6689              m.rexm(1, hcode(v[1]), addr(v[0]))
  6690              m.emit(0x0f)
  6691              m.emit(0x4a)
  6692              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6693          })
  6694      }
  6695      if p.len == 0 {
  6696          panic("invalid operands for CMOVPE")
  6697      }
  6698      return p
  6699  }
  6700  
  6701  // CMOVPO performs "Move if parity odd (PF == 0)".
  6702  //
  6703  // Mnemonic        : CMOVPO
  6704  // Supported forms : (6 forms)
  6705  //
  6706  //    * CMOVPO r16, r16    [CMOV]
  6707  //    * CMOVPO m16, r16    [CMOV]
  6708  //    * CMOVPO r32, r32    [CMOV]
  6709  //    * CMOVPO m32, r32    [CMOV]
  6710  //    * CMOVPO r64, r64    [CMOV]
  6711  //    * CMOVPO m64, r64    [CMOV]
  6712  //
  6713  func (self *Program) CMOVPO(v0 interface{}, v1 interface{}) *Instruction {
  6714      p := self.alloc("CMOVPO", 2, Operands { v0, v1 })
  6715      // CMOVPO r16, r16
  6716      if isReg16(v0) && isReg16(v1) {
  6717          self.require(ISA_CMOV)
  6718          p.domain = DomainGeneric
  6719          p.add(0, func(m *_Encoding, v []interface{}) {
  6720              m.emit(0x66)
  6721              m.rexo(hcode(v[1]), v[0], false)
  6722              m.emit(0x0f)
  6723              m.emit(0x4b)
  6724              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6725          })
  6726      }
  6727      // CMOVPO m16, r16
  6728      if isM16(v0) && isReg16(v1) {
  6729          self.require(ISA_CMOV)
  6730          p.domain = DomainGeneric
  6731          p.add(0, func(m *_Encoding, v []interface{}) {
  6732              m.emit(0x66)
  6733              m.rexo(hcode(v[1]), addr(v[0]), false)
  6734              m.emit(0x0f)
  6735              m.emit(0x4b)
  6736              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6737          })
  6738      }
  6739      // CMOVPO r32, r32
  6740      if isReg32(v0) && isReg32(v1) {
  6741          self.require(ISA_CMOV)
  6742          p.domain = DomainGeneric
  6743          p.add(0, func(m *_Encoding, v []interface{}) {
  6744              m.rexo(hcode(v[1]), v[0], false)
  6745              m.emit(0x0f)
  6746              m.emit(0x4b)
  6747              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6748          })
  6749      }
  6750      // CMOVPO m32, r32
  6751      if isM32(v0) && isReg32(v1) {
  6752          self.require(ISA_CMOV)
  6753          p.domain = DomainGeneric
  6754          p.add(0, func(m *_Encoding, v []interface{}) {
  6755              m.rexo(hcode(v[1]), addr(v[0]), false)
  6756              m.emit(0x0f)
  6757              m.emit(0x4b)
  6758              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6759          })
  6760      }
  6761      // CMOVPO r64, r64
  6762      if isReg64(v0) && isReg64(v1) {
  6763          self.require(ISA_CMOV)
  6764          p.domain = DomainGeneric
  6765          p.add(0, func(m *_Encoding, v []interface{}) {
  6766              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  6767              m.emit(0x0f)
  6768              m.emit(0x4b)
  6769              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6770          })
  6771      }
  6772      // CMOVPO m64, r64
  6773      if isM64(v0) && isReg64(v1) {
  6774          self.require(ISA_CMOV)
  6775          p.domain = DomainGeneric
  6776          p.add(0, func(m *_Encoding, v []interface{}) {
  6777              m.rexm(1, hcode(v[1]), addr(v[0]))
  6778              m.emit(0x0f)
  6779              m.emit(0x4b)
  6780              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6781          })
  6782      }
  6783      if p.len == 0 {
  6784          panic("invalid operands for CMOVPO")
  6785      }
  6786      return p
  6787  }
  6788  
  6789  // CMOVS performs "Move if sign (SF == 1)".
  6790  //
  6791  // Mnemonic        : CMOVS
  6792  // Supported forms : (6 forms)
  6793  //
  6794  //    * CMOVS r16, r16    [CMOV]
  6795  //    * CMOVS m16, r16    [CMOV]
  6796  //    * CMOVS r32, r32    [CMOV]
  6797  //    * CMOVS m32, r32    [CMOV]
  6798  //    * CMOVS r64, r64    [CMOV]
  6799  //    * CMOVS m64, r64    [CMOV]
  6800  //
  6801  func (self *Program) CMOVS(v0 interface{}, v1 interface{}) *Instruction {
  6802      p := self.alloc("CMOVS", 2, Operands { v0, v1 })
  6803      // CMOVS r16, r16
  6804      if isReg16(v0) && isReg16(v1) {
  6805          self.require(ISA_CMOV)
  6806          p.domain = DomainGeneric
  6807          p.add(0, func(m *_Encoding, v []interface{}) {
  6808              m.emit(0x66)
  6809              m.rexo(hcode(v[1]), v[0], false)
  6810              m.emit(0x0f)
  6811              m.emit(0x48)
  6812              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6813          })
  6814      }
  6815      // CMOVS m16, r16
  6816      if isM16(v0) && isReg16(v1) {
  6817          self.require(ISA_CMOV)
  6818          p.domain = DomainGeneric
  6819          p.add(0, func(m *_Encoding, v []interface{}) {
  6820              m.emit(0x66)
  6821              m.rexo(hcode(v[1]), addr(v[0]), false)
  6822              m.emit(0x0f)
  6823              m.emit(0x48)
  6824              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6825          })
  6826      }
  6827      // CMOVS r32, r32
  6828      if isReg32(v0) && isReg32(v1) {
  6829          self.require(ISA_CMOV)
  6830          p.domain = DomainGeneric
  6831          p.add(0, func(m *_Encoding, v []interface{}) {
  6832              m.rexo(hcode(v[1]), v[0], false)
  6833              m.emit(0x0f)
  6834              m.emit(0x48)
  6835              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6836          })
  6837      }
  6838      // CMOVS m32, r32
  6839      if isM32(v0) && isReg32(v1) {
  6840          self.require(ISA_CMOV)
  6841          p.domain = DomainGeneric
  6842          p.add(0, func(m *_Encoding, v []interface{}) {
  6843              m.rexo(hcode(v[1]), addr(v[0]), false)
  6844              m.emit(0x0f)
  6845              m.emit(0x48)
  6846              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6847          })
  6848      }
  6849      // CMOVS r64, r64
  6850      if isReg64(v0) && isReg64(v1) {
  6851          self.require(ISA_CMOV)
  6852          p.domain = DomainGeneric
  6853          p.add(0, func(m *_Encoding, v []interface{}) {
  6854              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  6855              m.emit(0x0f)
  6856              m.emit(0x48)
  6857              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6858          })
  6859      }
  6860      // CMOVS m64, r64
  6861      if isM64(v0) && isReg64(v1) {
  6862          self.require(ISA_CMOV)
  6863          p.domain = DomainGeneric
  6864          p.add(0, func(m *_Encoding, v []interface{}) {
  6865              m.rexm(1, hcode(v[1]), addr(v[0]))
  6866              m.emit(0x0f)
  6867              m.emit(0x48)
  6868              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6869          })
  6870      }
  6871      if p.len == 0 {
  6872          panic("invalid operands for CMOVS")
  6873      }
  6874      return p
  6875  }
  6876  
  6877  // CMOVZ performs "Move if zero (ZF == 1)".
  6878  //
  6879  // Mnemonic        : CMOVZ
  6880  // Supported forms : (6 forms)
  6881  //
  6882  //    * CMOVZ r16, r16    [CMOV]
  6883  //    * CMOVZ m16, r16    [CMOV]
  6884  //    * CMOVZ r32, r32    [CMOV]
  6885  //    * CMOVZ m32, r32    [CMOV]
  6886  //    * CMOVZ r64, r64    [CMOV]
  6887  //    * CMOVZ m64, r64    [CMOV]
  6888  //
  6889  func (self *Program) CMOVZ(v0 interface{}, v1 interface{}) *Instruction {
  6890      p := self.alloc("CMOVZ", 2, Operands { v0, v1 })
  6891      // CMOVZ r16, r16
  6892      if isReg16(v0) && isReg16(v1) {
  6893          self.require(ISA_CMOV)
  6894          p.domain = DomainGeneric
  6895          p.add(0, func(m *_Encoding, v []interface{}) {
  6896              m.emit(0x66)
  6897              m.rexo(hcode(v[1]), v[0], false)
  6898              m.emit(0x0f)
  6899              m.emit(0x44)
  6900              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6901          })
  6902      }
  6903      // CMOVZ m16, r16
  6904      if isM16(v0) && isReg16(v1) {
  6905          self.require(ISA_CMOV)
  6906          p.domain = DomainGeneric
  6907          p.add(0, func(m *_Encoding, v []interface{}) {
  6908              m.emit(0x66)
  6909              m.rexo(hcode(v[1]), addr(v[0]), false)
  6910              m.emit(0x0f)
  6911              m.emit(0x44)
  6912              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6913          })
  6914      }
  6915      // CMOVZ r32, r32
  6916      if isReg32(v0) && isReg32(v1) {
  6917          self.require(ISA_CMOV)
  6918          p.domain = DomainGeneric
  6919          p.add(0, func(m *_Encoding, v []interface{}) {
  6920              m.rexo(hcode(v[1]), v[0], false)
  6921              m.emit(0x0f)
  6922              m.emit(0x44)
  6923              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6924          })
  6925      }
  6926      // CMOVZ m32, r32
  6927      if isM32(v0) && isReg32(v1) {
  6928          self.require(ISA_CMOV)
  6929          p.domain = DomainGeneric
  6930          p.add(0, func(m *_Encoding, v []interface{}) {
  6931              m.rexo(hcode(v[1]), addr(v[0]), false)
  6932              m.emit(0x0f)
  6933              m.emit(0x44)
  6934              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6935          })
  6936      }
  6937      // CMOVZ r64, r64
  6938      if isReg64(v0) && isReg64(v1) {
  6939          self.require(ISA_CMOV)
  6940          p.domain = DomainGeneric
  6941          p.add(0, func(m *_Encoding, v []interface{}) {
  6942              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  6943              m.emit(0x0f)
  6944              m.emit(0x44)
  6945              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6946          })
  6947      }
  6948      // CMOVZ m64, r64
  6949      if isM64(v0) && isReg64(v1) {
  6950          self.require(ISA_CMOV)
  6951          p.domain = DomainGeneric
  6952          p.add(0, func(m *_Encoding, v []interface{}) {
  6953              m.rexm(1, hcode(v[1]), addr(v[0]))
  6954              m.emit(0x0f)
  6955              m.emit(0x44)
  6956              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6957          })
  6958      }
  6959      if p.len == 0 {
  6960          panic("invalid operands for CMOVZ")
  6961      }
  6962      return p
  6963  }
  6964  
  6965  // CMPB performs "Compare Two Operands".
  6966  //
  6967  // Mnemonic        : CMP
  6968  // Supported forms : (6 forms)
  6969  //
  6970  //    * CMPB imm8, al
  6971  //    * CMPB imm8, r8
  6972  //    * CMPB r8, r8
  6973  //    * CMPB m8, r8
  6974  //    * CMPB imm8, m8
  6975  //    * CMPB r8, m8
  6976  //
  6977  func (self *Program) CMPB(v0 interface{}, v1 interface{}) *Instruction {
  6978      p := self.alloc("CMPB", 2, Operands { v0, v1 })
  6979      // CMPB imm8, al
  6980      if isImm8(v0) && v1 == AL {
  6981          p.domain = DomainGeneric
  6982          p.add(0, func(m *_Encoding, v []interface{}) {
  6983              m.emit(0x3c)
  6984              m.imm1(toImmAny(v[0]))
  6985          })
  6986      }
  6987      // CMPB imm8, r8
  6988      if isImm8(v0) && isReg8(v1) {
  6989          p.domain = DomainGeneric
  6990          p.add(0, func(m *_Encoding, v []interface{}) {
  6991              m.rexo(0, v[1], isReg8REX(v[1]))
  6992              m.emit(0x80)
  6993              m.emit(0xf8 | lcode(v[1]))
  6994              m.imm1(toImmAny(v[0]))
  6995          })
  6996      }
  6997      // CMPB r8, r8
  6998      if isReg8(v0) && isReg8(v1) {
  6999          p.domain = DomainGeneric
  7000          p.add(0, func(m *_Encoding, v []interface{}) {
  7001              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
  7002              m.emit(0x38)
  7003              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  7004          })
  7005          p.add(0, func(m *_Encoding, v []interface{}) {
  7006              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]) || isReg8REX(v[1]))
  7007              m.emit(0x3a)
  7008              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  7009          })
  7010      }
  7011      // CMPB m8, r8
  7012      if isM8(v0) && isReg8(v1) {
  7013          p.domain = DomainGeneric
  7014          p.add(0, func(m *_Encoding, v []interface{}) {
  7015              m.rexo(hcode(v[1]), addr(v[0]), isReg8REX(v[1]))
  7016              m.emit(0x3a)
  7017              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  7018          })
  7019      }
  7020      // CMPB imm8, m8
  7021      if isImm8(v0) && isM8(v1) {
  7022          p.domain = DomainGeneric
  7023          p.add(0, func(m *_Encoding, v []interface{}) {
  7024              m.rexo(0, addr(v[1]), false)
  7025              m.emit(0x80)
  7026              m.mrsd(7, addr(v[1]), 1)
  7027              m.imm1(toImmAny(v[0]))
  7028          })
  7029      }
  7030      // CMPB r8, m8
  7031      if isReg8(v0) && isM8(v1) {
  7032          p.domain = DomainGeneric
  7033          p.add(0, func(m *_Encoding, v []interface{}) {
  7034              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
  7035              m.emit(0x38)
  7036              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  7037          })
  7038      }
  7039      if p.len == 0 {
  7040          panic("invalid operands for CMPB")
  7041      }
  7042      return p
  7043  }
  7044  
  7045  // CMPL performs "Compare Two Operands".
  7046  //
  7047  // Mnemonic        : CMP
  7048  // Supported forms : (8 forms)
  7049  //
  7050  //    * CMPL imm32, eax
  7051  //    * CMPL imm8, r32
  7052  //    * CMPL imm32, r32
  7053  //    * CMPL r32, r32
  7054  //    * CMPL m32, r32
  7055  //    * CMPL imm8, m32
  7056  //    * CMPL imm32, m32
  7057  //    * CMPL r32, m32
  7058  //
  7059  func (self *Program) CMPL(v0 interface{}, v1 interface{}) *Instruction {
  7060      p := self.alloc("CMPL", 2, Operands { v0, v1 })
  7061      // CMPL imm32, eax
  7062      if isImm32(v0) && v1 == EAX {
  7063          p.domain = DomainGeneric
  7064          p.add(0, func(m *_Encoding, v []interface{}) {
  7065              m.emit(0x3d)
  7066              m.imm4(toImmAny(v[0]))
  7067          })
  7068      }
  7069      // CMPL imm8, r32
  7070      if isImm8Ext(v0, 4) && isReg32(v1) {
  7071          p.domain = DomainGeneric
  7072          p.add(0, func(m *_Encoding, v []interface{}) {
  7073              m.rexo(0, v[1], false)
  7074              m.emit(0x83)
  7075              m.emit(0xf8 | lcode(v[1]))
  7076              m.imm1(toImmAny(v[0]))
  7077          })
  7078      }
  7079      // CMPL imm32, r32
  7080      if isImm32(v0) && isReg32(v1) {
  7081          p.domain = DomainGeneric
  7082          p.add(0, func(m *_Encoding, v []interface{}) {
  7083              m.rexo(0, v[1], false)
  7084              m.emit(0x81)
  7085              m.emit(0xf8 | lcode(v[1]))
  7086              m.imm4(toImmAny(v[0]))
  7087          })
  7088      }
  7089      // CMPL r32, r32
  7090      if isReg32(v0) && isReg32(v1) {
  7091          p.domain = DomainGeneric
  7092          p.add(0, func(m *_Encoding, v []interface{}) {
  7093              m.rexo(hcode(v[0]), v[1], false)
  7094              m.emit(0x39)
  7095              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  7096          })
  7097          p.add(0, func(m *_Encoding, v []interface{}) {
  7098              m.rexo(hcode(v[1]), v[0], false)
  7099              m.emit(0x3b)
  7100              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  7101          })
  7102      }
  7103      // CMPL m32, r32
  7104      if isM32(v0) && isReg32(v1) {
  7105          p.domain = DomainGeneric
  7106          p.add(0, func(m *_Encoding, v []interface{}) {
  7107              m.rexo(hcode(v[1]), addr(v[0]), false)
  7108              m.emit(0x3b)
  7109              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  7110          })
  7111      }
  7112      // CMPL imm8, m32
  7113      if isImm8Ext(v0, 4) && isM32(v1) {
  7114          p.domain = DomainGeneric
  7115          p.add(0, func(m *_Encoding, v []interface{}) {
  7116              m.rexo(0, addr(v[1]), false)
  7117              m.emit(0x83)
  7118              m.mrsd(7, addr(v[1]), 1)
  7119              m.imm1(toImmAny(v[0]))
  7120          })
  7121      }
  7122      // CMPL imm32, m32
  7123      if isImm32(v0) && isM32(v1) {
  7124          p.domain = DomainGeneric
  7125          p.add(0, func(m *_Encoding, v []interface{}) {
  7126              m.rexo(0, addr(v[1]), false)
  7127              m.emit(0x81)
  7128              m.mrsd(7, addr(v[1]), 1)
  7129              m.imm4(toImmAny(v[0]))
  7130          })
  7131      }
  7132      // CMPL r32, m32
  7133      if isReg32(v0) && isM32(v1) {
  7134          p.domain = DomainGeneric
  7135          p.add(0, func(m *_Encoding, v []interface{}) {
  7136              m.rexo(hcode(v[0]), addr(v[1]), false)
  7137              m.emit(0x39)
  7138              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  7139          })
  7140      }
  7141      if p.len == 0 {
  7142          panic("invalid operands for CMPL")
  7143      }
  7144      return p
  7145  }
  7146  
  7147  // CMPPD performs "Compare Packed Double-Precision Floating-Point Values".
  7148  //
  7149  // Mnemonic        : CMPPD
  7150  // Supported forms : (2 forms)
  7151  //
  7152  //    * CMPPD imm8, xmm, xmm     [SSE2]
  7153  //    * CMPPD imm8, m128, xmm    [SSE2]
  7154  //
  7155  func (self *Program) CMPPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  7156      p := self.alloc("CMPPD", 3, Operands { v0, v1, v2 })
  7157      // CMPPD imm8, xmm, xmm
  7158      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
  7159          self.require(ISA_SSE2)
  7160          p.domain = DomainMMXSSE
  7161          p.add(0, func(m *_Encoding, v []interface{}) {
  7162              m.emit(0x66)
  7163              m.rexo(hcode(v[2]), v[1], false)
  7164              m.emit(0x0f)
  7165              m.emit(0xc2)
  7166              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  7167              m.imm1(toImmAny(v[0]))
  7168          })
  7169      }
  7170      // CMPPD imm8, m128, xmm
  7171      if isImm8(v0) && isM128(v1) && isXMM(v2) {
  7172          self.require(ISA_SSE2)
  7173          p.domain = DomainMMXSSE
  7174          p.add(0, func(m *_Encoding, v []interface{}) {
  7175              m.emit(0x66)
  7176              m.rexo(hcode(v[2]), addr(v[1]), false)
  7177              m.emit(0x0f)
  7178              m.emit(0xc2)
  7179              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  7180              m.imm1(toImmAny(v[0]))
  7181          })
  7182      }
  7183      if p.len == 0 {
  7184          panic("invalid operands for CMPPD")
  7185      }
  7186      return p
  7187  }
  7188  
  7189  // CMPPS performs "Compare Packed Single-Precision Floating-Point Values".
  7190  //
  7191  // Mnemonic        : CMPPS
  7192  // Supported forms : (2 forms)
  7193  //
  7194  //    * CMPPS imm8, xmm, xmm     [SSE]
  7195  //    * CMPPS imm8, m128, xmm    [SSE]
  7196  //
  7197  func (self *Program) CMPPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  7198      p := self.alloc("CMPPS", 3, Operands { v0, v1, v2 })
  7199      // CMPPS imm8, xmm, xmm
  7200      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
  7201          self.require(ISA_SSE)
  7202          p.domain = DomainMMXSSE
  7203          p.add(0, func(m *_Encoding, v []interface{}) {
  7204              m.rexo(hcode(v[2]), v[1], false)
  7205              m.emit(0x0f)
  7206              m.emit(0xc2)
  7207              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  7208              m.imm1(toImmAny(v[0]))
  7209          })
  7210      }
  7211      // CMPPS imm8, m128, xmm
  7212      if isImm8(v0) && isM128(v1) && isXMM(v2) {
  7213          self.require(ISA_SSE)
  7214          p.domain = DomainMMXSSE
  7215          p.add(0, func(m *_Encoding, v []interface{}) {
  7216              m.rexo(hcode(v[2]), addr(v[1]), false)
  7217              m.emit(0x0f)
  7218              m.emit(0xc2)
  7219              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  7220              m.imm1(toImmAny(v[0]))
  7221          })
  7222      }
  7223      if p.len == 0 {
  7224          panic("invalid operands for CMPPS")
  7225      }
  7226      return p
  7227  }
  7228  
  7229  // CMPQ performs "Compare Two Operands".
  7230  //
  7231  // Mnemonic        : CMP
  7232  // Supported forms : (8 forms)
  7233  //
  7234  //    * CMPQ imm32, rax
  7235  //    * CMPQ imm8, r64
  7236  //    * CMPQ imm32, r64
  7237  //    * CMPQ r64, r64
  7238  //    * CMPQ m64, r64
  7239  //    * CMPQ imm8, m64
  7240  //    * CMPQ imm32, m64
  7241  //    * CMPQ r64, m64
  7242  //
  7243  func (self *Program) CMPQ(v0 interface{}, v1 interface{}) *Instruction {
  7244      p := self.alloc("CMPQ", 2, Operands { v0, v1 })
  7245      // CMPQ imm32, rax
  7246      if isImm32(v0) && v1 == RAX {
  7247          p.domain = DomainGeneric
  7248          p.add(0, func(m *_Encoding, v []interface{}) {
  7249              m.emit(0x48)
  7250              m.emit(0x3d)
  7251              m.imm4(toImmAny(v[0]))
  7252          })
  7253      }
  7254      // CMPQ imm8, r64
  7255      if isImm8Ext(v0, 8) && isReg64(v1) {
  7256          p.domain = DomainGeneric
  7257          p.add(0, func(m *_Encoding, v []interface{}) {
  7258              m.emit(0x48 | hcode(v[1]))
  7259              m.emit(0x83)
  7260              m.emit(0xf8 | lcode(v[1]))
  7261              m.imm1(toImmAny(v[0]))
  7262          })
  7263      }
  7264      // CMPQ imm32, r64
  7265      if isImm32Ext(v0, 8) && isReg64(v1) {
  7266          p.domain = DomainGeneric
  7267          p.add(0, func(m *_Encoding, v []interface{}) {
  7268              m.emit(0x48 | hcode(v[1]))
  7269              m.emit(0x81)
  7270              m.emit(0xf8 | lcode(v[1]))
  7271              m.imm4(toImmAny(v[0]))
  7272          })
  7273      }
  7274      // CMPQ r64, r64
  7275      if isReg64(v0) && isReg64(v1) {
  7276          p.domain = DomainGeneric
  7277          p.add(0, func(m *_Encoding, v []interface{}) {
  7278              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
  7279              m.emit(0x39)
  7280              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  7281          })
  7282          p.add(0, func(m *_Encoding, v []interface{}) {
  7283              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  7284              m.emit(0x3b)
  7285              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  7286          })
  7287      }
  7288      // CMPQ m64, r64
  7289      if isM64(v0) && isReg64(v1) {
  7290          p.domain = DomainGeneric
  7291          p.add(0, func(m *_Encoding, v []interface{}) {
  7292              m.rexm(1, hcode(v[1]), addr(v[0]))
  7293              m.emit(0x3b)
  7294              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  7295          })
  7296      }
  7297      // CMPQ imm8, m64
  7298      if isImm8Ext(v0, 8) && isM64(v1) {
  7299          p.domain = DomainGeneric
  7300          p.add(0, func(m *_Encoding, v []interface{}) {
  7301              m.rexm(1, 0, addr(v[1]))
  7302              m.emit(0x83)
  7303              m.mrsd(7, addr(v[1]), 1)
  7304              m.imm1(toImmAny(v[0]))
  7305          })
  7306      }
  7307      // CMPQ imm32, m64
  7308      if isImm32Ext(v0, 8) && isM64(v1) {
  7309          p.domain = DomainGeneric
  7310          p.add(0, func(m *_Encoding, v []interface{}) {
  7311              m.rexm(1, 0, addr(v[1]))
  7312              m.emit(0x81)
  7313              m.mrsd(7, addr(v[1]), 1)
  7314              m.imm4(toImmAny(v[0]))
  7315          })
  7316      }
  7317      // CMPQ r64, m64
  7318      if isReg64(v0) && isM64(v1) {
  7319          p.domain = DomainGeneric
  7320          p.add(0, func(m *_Encoding, v []interface{}) {
  7321              m.rexm(1, hcode(v[0]), addr(v[1]))
  7322              m.emit(0x39)
  7323              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  7324          })
  7325      }
  7326      if p.len == 0 {
  7327          panic("invalid operands for CMPQ")
  7328      }
  7329      return p
  7330  }
  7331  
  7332  // CMPSD performs "Compare Scalar Double-Precision Floating-Point Values".
  7333  //
  7334  // Mnemonic        : CMPSD
  7335  // Supported forms : (2 forms)
  7336  //
  7337  //    * CMPSD imm8, xmm, xmm    [SSE2]
  7338  //    * CMPSD imm8, m64, xmm    [SSE2]
  7339  //
  7340  func (self *Program) CMPSD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  7341      p := self.alloc("CMPSD", 3, Operands { v0, v1, v2 })
  7342      // CMPSD imm8, xmm, xmm
  7343      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
  7344          self.require(ISA_SSE2)
  7345          p.domain = DomainMMXSSE
  7346          p.add(0, func(m *_Encoding, v []interface{}) {
  7347              m.emit(0xf2)
  7348              m.rexo(hcode(v[2]), v[1], false)
  7349              m.emit(0x0f)
  7350              m.emit(0xc2)
  7351              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  7352              m.imm1(toImmAny(v[0]))
  7353          })
  7354      }
  7355      // CMPSD imm8, m64, xmm
  7356      if isImm8(v0) && isM64(v1) && isXMM(v2) {
  7357          self.require(ISA_SSE2)
  7358          p.domain = DomainMMXSSE
  7359          p.add(0, func(m *_Encoding, v []interface{}) {
  7360              m.emit(0xf2)
  7361              m.rexo(hcode(v[2]), addr(v[1]), false)
  7362              m.emit(0x0f)
  7363              m.emit(0xc2)
  7364              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  7365              m.imm1(toImmAny(v[0]))
  7366          })
  7367      }
  7368      if p.len == 0 {
  7369          panic("invalid operands for CMPSD")
  7370      }
  7371      return p
  7372  }
  7373  
  7374  // CMPSS performs "Compare Scalar Single-Precision Floating-Point Values".
  7375  //
  7376  // Mnemonic        : CMPSS
  7377  // Supported forms : (2 forms)
  7378  //
  7379  //    * CMPSS imm8, xmm, xmm    [SSE]
  7380  //    * CMPSS imm8, m32, xmm    [SSE]
  7381  //
  7382  func (self *Program) CMPSS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  7383      p := self.alloc("CMPSS", 3, Operands { v0, v1, v2 })
  7384      // CMPSS imm8, xmm, xmm
  7385      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
  7386          self.require(ISA_SSE)
  7387          p.domain = DomainMMXSSE
  7388          p.add(0, func(m *_Encoding, v []interface{}) {
  7389              m.emit(0xf3)
  7390              m.rexo(hcode(v[2]), v[1], false)
  7391              m.emit(0x0f)
  7392              m.emit(0xc2)
  7393              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  7394              m.imm1(toImmAny(v[0]))
  7395          })
  7396      }
  7397      // CMPSS imm8, m32, xmm
  7398      if isImm8(v0) && isM32(v1) && isXMM(v2) {
  7399          self.require(ISA_SSE)
  7400          p.domain = DomainMMXSSE
  7401          p.add(0, func(m *_Encoding, v []interface{}) {
  7402              m.emit(0xf3)
  7403              m.rexo(hcode(v[2]), addr(v[1]), false)
  7404              m.emit(0x0f)
  7405              m.emit(0xc2)
  7406              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  7407              m.imm1(toImmAny(v[0]))
  7408          })
  7409      }
  7410      if p.len == 0 {
  7411          panic("invalid operands for CMPSS")
  7412      }
  7413      return p
  7414  }
  7415  
  7416  // CMPW performs "Compare Two Operands".
  7417  //
  7418  // Mnemonic        : CMP
  7419  // Supported forms : (8 forms)
  7420  //
  7421  //    * CMPW imm16, ax
  7422  //    * CMPW imm8, r16
  7423  //    * CMPW imm16, r16
  7424  //    * CMPW r16, r16
  7425  //    * CMPW m16, r16
  7426  //    * CMPW imm8, m16
  7427  //    * CMPW imm16, m16
  7428  //    * CMPW r16, m16
  7429  //
  7430  func (self *Program) CMPW(v0 interface{}, v1 interface{}) *Instruction {
  7431      p := self.alloc("CMPW", 2, Operands { v0, v1 })
  7432      // CMPW imm16, ax
  7433      if isImm16(v0) && v1 == AX {
  7434          p.domain = DomainGeneric
  7435          p.add(0, func(m *_Encoding, v []interface{}) {
  7436              m.emit(0x66)
  7437              m.emit(0x3d)
  7438              m.imm2(toImmAny(v[0]))
  7439          })
  7440      }
  7441      // CMPW imm8, r16
  7442      if isImm8Ext(v0, 2) && isReg16(v1) {
  7443          p.domain = DomainGeneric
  7444          p.add(0, func(m *_Encoding, v []interface{}) {
  7445              m.emit(0x66)
  7446              m.rexo(0, v[1], false)
  7447              m.emit(0x83)
  7448              m.emit(0xf8 | lcode(v[1]))
  7449              m.imm1(toImmAny(v[0]))
  7450          })
  7451      }
  7452      // CMPW imm16, r16
  7453      if isImm16(v0) && isReg16(v1) {
  7454          p.domain = DomainGeneric
  7455          p.add(0, func(m *_Encoding, v []interface{}) {
  7456              m.emit(0x66)
  7457              m.rexo(0, v[1], false)
  7458              m.emit(0x81)
  7459              m.emit(0xf8 | lcode(v[1]))
  7460              m.imm2(toImmAny(v[0]))
  7461          })
  7462      }
  7463      // CMPW r16, r16
  7464      if isReg16(v0) && isReg16(v1) {
  7465          p.domain = DomainGeneric
  7466          p.add(0, func(m *_Encoding, v []interface{}) {
  7467              m.emit(0x66)
  7468              m.rexo(hcode(v[0]), v[1], false)
  7469              m.emit(0x39)
  7470              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  7471          })
  7472          p.add(0, func(m *_Encoding, v []interface{}) {
  7473              m.emit(0x66)
  7474              m.rexo(hcode(v[1]), v[0], false)
  7475              m.emit(0x3b)
  7476              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  7477          })
  7478      }
  7479      // CMPW m16, r16
  7480      if isM16(v0) && isReg16(v1) {
  7481          p.domain = DomainGeneric
  7482          p.add(0, func(m *_Encoding, v []interface{}) {
  7483              m.emit(0x66)
  7484              m.rexo(hcode(v[1]), addr(v[0]), false)
  7485              m.emit(0x3b)
  7486              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  7487          })
  7488      }
  7489      // CMPW imm8, m16
  7490      if isImm8Ext(v0, 2) && isM16(v1) {
  7491          p.domain = DomainGeneric
  7492          p.add(0, func(m *_Encoding, v []interface{}) {
  7493              m.emit(0x66)
  7494              m.rexo(0, addr(v[1]), false)
  7495              m.emit(0x83)
  7496              m.mrsd(7, addr(v[1]), 1)
  7497              m.imm1(toImmAny(v[0]))
  7498          })
  7499      }
  7500      // CMPW imm16, m16
  7501      if isImm16(v0) && isM16(v1) {
  7502          p.domain = DomainGeneric
  7503          p.add(0, func(m *_Encoding, v []interface{}) {
  7504              m.emit(0x66)
  7505              m.rexo(0, addr(v[1]), false)
  7506              m.emit(0x81)
  7507              m.mrsd(7, addr(v[1]), 1)
  7508              m.imm2(toImmAny(v[0]))
  7509          })
  7510      }
  7511      // CMPW r16, m16
  7512      if isReg16(v0) && isM16(v1) {
  7513          p.domain = DomainGeneric
  7514          p.add(0, func(m *_Encoding, v []interface{}) {
  7515              m.emit(0x66)
  7516              m.rexo(hcode(v[0]), addr(v[1]), false)
  7517              m.emit(0x39)
  7518              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  7519          })
  7520      }
  7521      if p.len == 0 {
  7522          panic("invalid operands for CMPW")
  7523      }
  7524      return p
  7525  }
  7526  
  7527  // CMPXCHG16B performs "Compare and Exchange 16 Bytes".
  7528  //
  7529  // Mnemonic        : CMPXCHG16B
  7530  // Supported forms : (1 form)
  7531  //
  7532  //    * CMPXCHG16B m128
  7533  //
  7534  func (self *Program) CMPXCHG16B(v0 interface{}) *Instruction {
  7535      p := self.alloc("CMPXCHG16B", 1, Operands { v0 })
  7536      // CMPXCHG16B m128
  7537      if isM128(v0) {
  7538          p.domain = DomainGeneric
  7539          p.add(0, func(m *_Encoding, v []interface{}) {
  7540              m.rexm(1, 0, addr(v[0]))
  7541              m.emit(0x0f)
  7542              m.emit(0xc7)
  7543              m.mrsd(1, addr(v[0]), 1)
  7544          })
  7545      }
  7546      if p.len == 0 {
  7547          panic("invalid operands for CMPXCHG16B")
  7548      }
  7549      return p
  7550  }
  7551  
  7552  // CMPXCHG8B performs "Compare and Exchange 8 Bytes".
  7553  //
  7554  // Mnemonic        : CMPXCHG8B
  7555  // Supported forms : (1 form)
  7556  //
  7557  //    * CMPXCHG8B m64
  7558  //
  7559  func (self *Program) CMPXCHG8B(v0 interface{}) *Instruction {
  7560      p := self.alloc("CMPXCHG8B", 1, Operands { v0 })
  7561      // CMPXCHG8B m64
  7562      if isM64(v0) {
  7563          p.domain = DomainGeneric
  7564          p.add(0, func(m *_Encoding, v []interface{}) {
  7565              m.rexo(0, addr(v[0]), false)
  7566              m.emit(0x0f)
  7567              m.emit(0xc7)
  7568              m.mrsd(1, addr(v[0]), 1)
  7569          })
  7570      }
  7571      if p.len == 0 {
  7572          panic("invalid operands for CMPXCHG8B")
  7573      }
  7574      return p
  7575  }
  7576  
  7577  // CMPXCHGB performs "Compare and Exchange".
  7578  //
  7579  // Mnemonic        : CMPXCHG
  7580  // Supported forms : (2 forms)
  7581  //
  7582  //    * CMPXCHGB r8, r8
  7583  //    * CMPXCHGB r8, m8
  7584  //
  7585  func (self *Program) CMPXCHGB(v0 interface{}, v1 interface{}) *Instruction {
  7586      p := self.alloc("CMPXCHGB", 2, Operands { v0, v1 })
  7587      // CMPXCHGB r8, r8
  7588      if isReg8(v0) && isReg8(v1) {
  7589          p.domain = DomainGeneric
  7590          p.add(0, func(m *_Encoding, v []interface{}) {
  7591              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
  7592              m.emit(0x0f)
  7593              m.emit(0xb0)
  7594              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  7595          })
  7596      }
  7597      // CMPXCHGB r8, m8
  7598      if isReg8(v0) && isM8(v1) {
  7599          p.domain = DomainGeneric
  7600          p.add(0, func(m *_Encoding, v []interface{}) {
  7601              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
  7602              m.emit(0x0f)
  7603              m.emit(0xb0)
  7604              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  7605          })
  7606      }
  7607      if p.len == 0 {
  7608          panic("invalid operands for CMPXCHGB")
  7609      }
  7610      return p
  7611  }
  7612  
  7613  // CMPXCHGL performs "Compare and Exchange".
  7614  //
  7615  // Mnemonic        : CMPXCHG
  7616  // Supported forms : (2 forms)
  7617  //
  7618  //    * CMPXCHGL r32, r32
  7619  //    * CMPXCHGL r32, m32
  7620  //
  7621  func (self *Program) CMPXCHGL(v0 interface{}, v1 interface{}) *Instruction {
  7622      p := self.alloc("CMPXCHGL", 2, Operands { v0, v1 })
  7623      // CMPXCHGL r32, r32
  7624      if isReg32(v0) && isReg32(v1) {
  7625          p.domain = DomainGeneric
  7626          p.add(0, func(m *_Encoding, v []interface{}) {
  7627              m.rexo(hcode(v[0]), v[1], false)
  7628              m.emit(0x0f)
  7629              m.emit(0xb1)
  7630              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  7631          })
  7632      }
  7633      // CMPXCHGL r32, m32
  7634      if isReg32(v0) && isM32(v1) {
  7635          p.domain = DomainGeneric
  7636          p.add(0, func(m *_Encoding, v []interface{}) {
  7637              m.rexo(hcode(v[0]), addr(v[1]), false)
  7638              m.emit(0x0f)
  7639              m.emit(0xb1)
  7640              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  7641          })
  7642      }
  7643      if p.len == 0 {
  7644          panic("invalid operands for CMPXCHGL")
  7645      }
  7646      return p
  7647  }
  7648  
  7649  // CMPXCHGQ performs "Compare and Exchange".
  7650  //
  7651  // Mnemonic        : CMPXCHG
  7652  // Supported forms : (2 forms)
  7653  //
  7654  //    * CMPXCHGQ r64, r64
  7655  //    * CMPXCHGQ r64, m64
  7656  //
  7657  func (self *Program) CMPXCHGQ(v0 interface{}, v1 interface{}) *Instruction {
  7658      p := self.alloc("CMPXCHGQ", 2, Operands { v0, v1 })
  7659      // CMPXCHGQ r64, r64
  7660      if isReg64(v0) && isReg64(v1) {
  7661          p.domain = DomainGeneric
  7662          p.add(0, func(m *_Encoding, v []interface{}) {
  7663              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
  7664              m.emit(0x0f)
  7665              m.emit(0xb1)
  7666              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  7667          })
  7668      }
  7669      // CMPXCHGQ r64, m64
  7670      if isReg64(v0) && isM64(v1) {
  7671          p.domain = DomainGeneric
  7672          p.add(0, func(m *_Encoding, v []interface{}) {
  7673              m.rexm(1, hcode(v[0]), addr(v[1]))
  7674              m.emit(0x0f)
  7675              m.emit(0xb1)
  7676              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  7677          })
  7678      }
  7679      if p.len == 0 {
  7680          panic("invalid operands for CMPXCHGQ")
  7681      }
  7682      return p
  7683  }
  7684  
  7685  // CMPXCHGW performs "Compare and Exchange".
  7686  //
  7687  // Mnemonic        : CMPXCHG
  7688  // Supported forms : (2 forms)
  7689  //
  7690  //    * CMPXCHGW r16, r16
  7691  //    * CMPXCHGW r16, m16
  7692  //
  7693  func (self *Program) CMPXCHGW(v0 interface{}, v1 interface{}) *Instruction {
  7694      p := self.alloc("CMPXCHGW", 2, Operands { v0, v1 })
  7695      // CMPXCHGW r16, r16
  7696      if isReg16(v0) && isReg16(v1) {
  7697          p.domain = DomainGeneric
  7698          p.add(0, func(m *_Encoding, v []interface{}) {
  7699              m.emit(0x66)
  7700              m.rexo(hcode(v[0]), v[1], false)
  7701              m.emit(0x0f)
  7702              m.emit(0xb1)
  7703              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  7704          })
  7705      }
  7706      // CMPXCHGW r16, m16
  7707      if isReg16(v0) && isM16(v1) {
  7708          p.domain = DomainGeneric
  7709          p.add(0, func(m *_Encoding, v []interface{}) {
  7710              m.emit(0x66)
  7711              m.rexo(hcode(v[0]), addr(v[1]), false)
  7712              m.emit(0x0f)
  7713              m.emit(0xb1)
  7714              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  7715          })
  7716      }
  7717      if p.len == 0 {
  7718          panic("invalid operands for CMPXCHGW")
  7719      }
  7720      return p
  7721  }
  7722  
  7723  // COMISD performs "Compare Scalar Ordered Double-Precision Floating-Point Values and Set EFLAGS".
  7724  //
  7725  // Mnemonic        : COMISD
  7726  // Supported forms : (2 forms)
  7727  //
  7728  //    * COMISD xmm, xmm    [SSE2]
  7729  //    * COMISD m64, xmm    [SSE2]
  7730  //
  7731  func (self *Program) COMISD(v0 interface{}, v1 interface{}) *Instruction {
  7732      p := self.alloc("COMISD", 2, Operands { v0, v1 })
  7733      // COMISD xmm, xmm
  7734      if isXMM(v0) && isXMM(v1) {
  7735          self.require(ISA_SSE2)
  7736          p.domain = DomainMMXSSE
  7737          p.add(0, func(m *_Encoding, v []interface{}) {
  7738              m.emit(0x66)
  7739              m.rexo(hcode(v[1]), v[0], false)
  7740              m.emit(0x0f)
  7741              m.emit(0x2f)
  7742              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  7743          })
  7744      }
  7745      // COMISD m64, xmm
  7746      if isM64(v0) && isXMM(v1) {
  7747          self.require(ISA_SSE2)
  7748          p.domain = DomainMMXSSE
  7749          p.add(0, func(m *_Encoding, v []interface{}) {
  7750              m.emit(0x66)
  7751              m.rexo(hcode(v[1]), addr(v[0]), false)
  7752              m.emit(0x0f)
  7753              m.emit(0x2f)
  7754              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  7755          })
  7756      }
  7757      if p.len == 0 {
  7758          panic("invalid operands for COMISD")
  7759      }
  7760      return p
  7761  }
  7762  
  7763  // COMISS performs "Compare Scalar Ordered Single-Precision Floating-Point Values and Set EFLAGS".
  7764  //
  7765  // Mnemonic        : COMISS
  7766  // Supported forms : (2 forms)
  7767  //
  7768  //    * COMISS xmm, xmm    [SSE]
  7769  //    * COMISS m32, xmm    [SSE]
  7770  //
  7771  func (self *Program) COMISS(v0 interface{}, v1 interface{}) *Instruction {
  7772      p := self.alloc("COMISS", 2, Operands { v0, v1 })
  7773      // COMISS xmm, xmm
  7774      if isXMM(v0) && isXMM(v1) {
  7775          self.require(ISA_SSE)
  7776          p.domain = DomainMMXSSE
  7777          p.add(0, func(m *_Encoding, v []interface{}) {
  7778              m.rexo(hcode(v[1]), v[0], false)
  7779              m.emit(0x0f)
  7780              m.emit(0x2f)
  7781              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  7782          })
  7783      }
  7784      // COMISS m32, xmm
  7785      if isM32(v0) && isXMM(v1) {
  7786          self.require(ISA_SSE)
  7787          p.domain = DomainMMXSSE
  7788          p.add(0, func(m *_Encoding, v []interface{}) {
  7789              m.rexo(hcode(v[1]), addr(v[0]), false)
  7790              m.emit(0x0f)
  7791              m.emit(0x2f)
  7792              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  7793          })
  7794      }
  7795      if p.len == 0 {
  7796          panic("invalid operands for COMISS")
  7797      }
  7798      return p
  7799  }
  7800  
  7801  // CPUID performs "CPU Identification".
  7802  //
  7803  // Mnemonic        : CPUID
  7804  // Supported forms : (1 form)
  7805  //
  7806  //    * CPUID    [CPUID]
  7807  //
  7808  func (self *Program) CPUID() *Instruction {
  7809      p := self.alloc("CPUID", 0, Operands {  })
  7810      // CPUID
  7811      self.require(ISA_CPUID)
  7812      p.domain = DomainGeneric
  7813      p.add(0, func(m *_Encoding, v []interface{}) {
  7814          m.emit(0x0f)
  7815          m.emit(0xa2)
  7816      })
  7817      return p
  7818  }
  7819  
  7820  // CQTO performs "Convert Quadword to Octaword".
  7821  //
  7822  // Mnemonic        : CQO
  7823  // Supported forms : (1 form)
  7824  //
  7825  //    * CQTO
  7826  //
  7827  func (self *Program) CQTO() *Instruction {
  7828      p := self.alloc("CQTO", 0, Operands {  })
  7829      // CQTO
  7830      p.domain = DomainGeneric
  7831      p.add(0, func(m *_Encoding, v []interface{}) {
  7832          m.emit(0x48)
  7833          m.emit(0x99)
  7834      })
  7835      return p
  7836  }
  7837  
  7838  // CRC32B performs "Accumulate CRC32 Value".
  7839  //
  7840  // Mnemonic        : CRC32
  7841  // Supported forms : (4 forms)
  7842  //
  7843  //    * CRC32B r8, r32    [SSE4.2]
  7844  //    * CRC32B m8, r32    [SSE4.2]
  7845  //    * CRC32B r8, r64    [SSE4.2]
  7846  //    * CRC32B m8, r64    [SSE4.2]
  7847  //
  7848  func (self *Program) CRC32B(v0 interface{}, v1 interface{}) *Instruction {
  7849      p := self.alloc("CRC32B", 2, Operands { v0, v1 })
  7850      // CRC32B r8, r32
  7851      if isReg8(v0) && isReg32(v1) {
  7852          self.require(ISA_SSE4_2)
  7853          p.domain = DomainGeneric
  7854          p.add(0, func(m *_Encoding, v []interface{}) {
  7855              m.emit(0xf2)
  7856              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]))
  7857              m.emit(0x0f)
  7858              m.emit(0x38)
  7859              m.emit(0xf0)
  7860              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  7861          })
  7862      }
  7863      // CRC32B m8, r32
  7864      if isM8(v0) && isReg32(v1) {
  7865          self.require(ISA_SSE4_2)
  7866          p.domain = DomainGeneric
  7867          p.add(0, func(m *_Encoding, v []interface{}) {
  7868              m.emit(0xf2)
  7869              m.rexo(hcode(v[1]), addr(v[0]), false)
  7870              m.emit(0x0f)
  7871              m.emit(0x38)
  7872              m.emit(0xf0)
  7873              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  7874          })
  7875      }
  7876      // CRC32B r8, r64
  7877      if isReg8(v0) && isReg64(v1) {
  7878          self.require(ISA_SSE4_2)
  7879          p.domain = DomainGeneric
  7880          p.add(0, func(m *_Encoding, v []interface{}) {
  7881              m.emit(0xf2)
  7882              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  7883              m.emit(0x0f)
  7884              m.emit(0x38)
  7885              m.emit(0xf0)
  7886              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  7887          })
  7888      }
  7889      // CRC32B m8, r64
  7890      if isM8(v0) && isReg64(v1) {
  7891          self.require(ISA_SSE4_2)
  7892          p.domain = DomainGeneric
  7893          p.add(0, func(m *_Encoding, v []interface{}) {
  7894              m.emit(0xf2)
  7895              m.rexm(1, hcode(v[1]), addr(v[0]))
  7896              m.emit(0x0f)
  7897              m.emit(0x38)
  7898              m.emit(0xf0)
  7899              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  7900          })
  7901      }
  7902      if p.len == 0 {
  7903          panic("invalid operands for CRC32B")
  7904      }
  7905      return p
  7906  }
  7907  
  7908  // CRC32L performs "Accumulate CRC32 Value".
  7909  //
  7910  // Mnemonic        : CRC32
  7911  // Supported forms : (2 forms)
  7912  //
  7913  //    * CRC32L r32, r32    [SSE4.2]
  7914  //    * CRC32L m32, r32    [SSE4.2]
  7915  //
  7916  func (self *Program) CRC32L(v0 interface{}, v1 interface{}) *Instruction {
  7917      p := self.alloc("CRC32L", 2, Operands { v0, v1 })
  7918      // CRC32L r32, r32
  7919      if isReg32(v0) && isReg32(v1) {
  7920          self.require(ISA_SSE4_2)
  7921          p.domain = DomainGeneric
  7922          p.add(0, func(m *_Encoding, v []interface{}) {
  7923              m.emit(0xf2)
  7924              m.rexo(hcode(v[1]), v[0], false)
  7925              m.emit(0x0f)
  7926              m.emit(0x38)
  7927              m.emit(0xf1)
  7928              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  7929          })
  7930      }
  7931      // CRC32L m32, r32
  7932      if isM32(v0) && isReg32(v1) {
  7933          self.require(ISA_SSE4_2)
  7934          p.domain = DomainGeneric
  7935          p.add(0, func(m *_Encoding, v []interface{}) {
  7936              m.emit(0xf2)
  7937              m.rexo(hcode(v[1]), addr(v[0]), false)
  7938              m.emit(0x0f)
  7939              m.emit(0x38)
  7940              m.emit(0xf1)
  7941              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  7942          })
  7943      }
  7944      if p.len == 0 {
  7945          panic("invalid operands for CRC32L")
  7946      }
  7947      return p
  7948  }
  7949  
  7950  // CRC32Q performs "Accumulate CRC32 Value".
  7951  //
  7952  // Mnemonic        : CRC32
  7953  // Supported forms : (2 forms)
  7954  //
  7955  //    * CRC32Q r64, r64    [SSE4.2]
  7956  //    * CRC32Q m64, r64    [SSE4.2]
  7957  //
  7958  func (self *Program) CRC32Q(v0 interface{}, v1 interface{}) *Instruction {
  7959      p := self.alloc("CRC32Q", 2, Operands { v0, v1 })
  7960      // CRC32Q r64, r64
  7961      if isReg64(v0) && isReg64(v1) {
  7962          self.require(ISA_SSE4_2)
  7963          p.domain = DomainGeneric
  7964          p.add(0, func(m *_Encoding, v []interface{}) {
  7965              m.emit(0xf2)
  7966              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  7967              m.emit(0x0f)
  7968              m.emit(0x38)
  7969              m.emit(0xf1)
  7970              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  7971          })
  7972      }
  7973      // CRC32Q m64, r64
  7974      if isM64(v0) && isReg64(v1) {
  7975          self.require(ISA_SSE4_2)
  7976          p.domain = DomainGeneric
  7977          p.add(0, func(m *_Encoding, v []interface{}) {
  7978              m.emit(0xf2)
  7979              m.rexm(1, hcode(v[1]), addr(v[0]))
  7980              m.emit(0x0f)
  7981              m.emit(0x38)
  7982              m.emit(0xf1)
  7983              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  7984          })
  7985      }
  7986      if p.len == 0 {
  7987          panic("invalid operands for CRC32Q")
  7988      }
  7989      return p
  7990  }
  7991  
  7992  // CRC32W performs "Accumulate CRC32 Value".
  7993  //
  7994  // Mnemonic        : CRC32
  7995  // Supported forms : (2 forms)
  7996  //
  7997  //    * CRC32W r16, r32    [SSE4.2]
  7998  //    * CRC32W m16, r32    [SSE4.2]
  7999  //
  8000  func (self *Program) CRC32W(v0 interface{}, v1 interface{}) *Instruction {
  8001      p := self.alloc("CRC32W", 2, Operands { v0, v1 })
  8002      // CRC32W r16, r32
  8003      if isReg16(v0) && isReg32(v1) {
  8004          self.require(ISA_SSE4_2)
  8005          p.domain = DomainGeneric
  8006          p.add(0, func(m *_Encoding, v []interface{}) {
  8007              m.emit(0x66)
  8008              m.emit(0xf2)
  8009              m.rexo(hcode(v[1]), v[0], false)
  8010              m.emit(0x0f)
  8011              m.emit(0x38)
  8012              m.emit(0xf1)
  8013              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8014          })
  8015      }
  8016      // CRC32W m16, r32
  8017      if isM16(v0) && isReg32(v1) {
  8018          self.require(ISA_SSE4_2)
  8019          p.domain = DomainGeneric
  8020          p.add(0, func(m *_Encoding, v []interface{}) {
  8021              m.emit(0x66)
  8022              m.emit(0xf2)
  8023              m.rexo(hcode(v[1]), addr(v[0]), false)
  8024              m.emit(0x0f)
  8025              m.emit(0x38)
  8026              m.emit(0xf1)
  8027              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8028          })
  8029      }
  8030      if p.len == 0 {
  8031          panic("invalid operands for CRC32W")
  8032      }
  8033      return p
  8034  }
  8035  
  8036  // CVTDQ2PD performs "Convert Packed Dword Integers to Packed Double-Precision FP Values".
  8037  //
  8038  // Mnemonic        : CVTDQ2PD
  8039  // Supported forms : (2 forms)
  8040  //
  8041  //    * CVTDQ2PD xmm, xmm    [SSE2]
  8042  //    * CVTDQ2PD m64, xmm    [SSE2]
  8043  //
  8044  func (self *Program) CVTDQ2PD(v0 interface{}, v1 interface{}) *Instruction {
  8045      p := self.alloc("CVTDQ2PD", 2, Operands { v0, v1 })
  8046      // CVTDQ2PD xmm, xmm
  8047      if isXMM(v0) && isXMM(v1) {
  8048          self.require(ISA_SSE2)
  8049          p.domain = DomainMMXSSE
  8050          p.add(0, func(m *_Encoding, v []interface{}) {
  8051              m.emit(0xf3)
  8052              m.rexo(hcode(v[1]), v[0], false)
  8053              m.emit(0x0f)
  8054              m.emit(0xe6)
  8055              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8056          })
  8057      }
  8058      // CVTDQ2PD m64, xmm
  8059      if isM64(v0) && isXMM(v1) {
  8060          self.require(ISA_SSE2)
  8061          p.domain = DomainMMXSSE
  8062          p.add(0, func(m *_Encoding, v []interface{}) {
  8063              m.emit(0xf3)
  8064              m.rexo(hcode(v[1]), addr(v[0]), false)
  8065              m.emit(0x0f)
  8066              m.emit(0xe6)
  8067              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8068          })
  8069      }
  8070      if p.len == 0 {
  8071          panic("invalid operands for CVTDQ2PD")
  8072      }
  8073      return p
  8074  }
  8075  
  8076  // CVTDQ2PS performs "Convert Packed Dword Integers to Packed Single-Precision FP Values".
  8077  //
  8078  // Mnemonic        : CVTDQ2PS
  8079  // Supported forms : (2 forms)
  8080  //
  8081  //    * CVTDQ2PS xmm, xmm     [SSE2]
  8082  //    * CVTDQ2PS m128, xmm    [SSE2]
  8083  //
  8084  func (self *Program) CVTDQ2PS(v0 interface{}, v1 interface{}) *Instruction {
  8085      p := self.alloc("CVTDQ2PS", 2, Operands { v0, v1 })
  8086      // CVTDQ2PS xmm, xmm
  8087      if isXMM(v0) && isXMM(v1) {
  8088          self.require(ISA_SSE2)
  8089          p.domain = DomainMMXSSE
  8090          p.add(0, func(m *_Encoding, v []interface{}) {
  8091              m.rexo(hcode(v[1]), v[0], false)
  8092              m.emit(0x0f)
  8093              m.emit(0x5b)
  8094              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8095          })
  8096      }
  8097      // CVTDQ2PS m128, xmm
  8098      if isM128(v0) && isXMM(v1) {
  8099          self.require(ISA_SSE2)
  8100          p.domain = DomainMMXSSE
  8101          p.add(0, func(m *_Encoding, v []interface{}) {
  8102              m.rexo(hcode(v[1]), addr(v[0]), false)
  8103              m.emit(0x0f)
  8104              m.emit(0x5b)
  8105              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8106          })
  8107      }
  8108      if p.len == 0 {
  8109          panic("invalid operands for CVTDQ2PS")
  8110      }
  8111      return p
  8112  }
  8113  
  8114  // CVTPD2DQ performs "Convert Packed Double-Precision FP Values to Packed Dword Integers".
  8115  //
  8116  // Mnemonic        : CVTPD2DQ
  8117  // Supported forms : (2 forms)
  8118  //
  8119  //    * CVTPD2DQ xmm, xmm     [SSE2]
  8120  //    * CVTPD2DQ m128, xmm    [SSE2]
  8121  //
  8122  func (self *Program) CVTPD2DQ(v0 interface{}, v1 interface{}) *Instruction {
  8123      p := self.alloc("CVTPD2DQ", 2, Operands { v0, v1 })
  8124      // CVTPD2DQ xmm, xmm
  8125      if isXMM(v0) && isXMM(v1) {
  8126          self.require(ISA_SSE2)
  8127          p.domain = DomainMMXSSE
  8128          p.add(0, func(m *_Encoding, v []interface{}) {
  8129              m.emit(0xf2)
  8130              m.rexo(hcode(v[1]), v[0], false)
  8131              m.emit(0x0f)
  8132              m.emit(0xe6)
  8133              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8134          })
  8135      }
  8136      // CVTPD2DQ m128, xmm
  8137      if isM128(v0) && isXMM(v1) {
  8138          self.require(ISA_SSE2)
  8139          p.domain = DomainMMXSSE
  8140          p.add(0, func(m *_Encoding, v []interface{}) {
  8141              m.emit(0xf2)
  8142              m.rexo(hcode(v[1]), addr(v[0]), false)
  8143              m.emit(0x0f)
  8144              m.emit(0xe6)
  8145              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8146          })
  8147      }
  8148      if p.len == 0 {
  8149          panic("invalid operands for CVTPD2DQ")
  8150      }
  8151      return p
  8152  }
  8153  
  8154  // CVTPD2PI performs "Convert Packed Double-Precision FP Values to Packed Dword Integers".
  8155  //
  8156  // Mnemonic        : CVTPD2PI
  8157  // Supported forms : (2 forms)
  8158  //
  8159  //    * CVTPD2PI xmm, mm     [SSE]
  8160  //    * CVTPD2PI m128, mm    [SSE]
  8161  //
  8162  func (self *Program) CVTPD2PI(v0 interface{}, v1 interface{}) *Instruction {
  8163      p := self.alloc("CVTPD2PI", 2, Operands { v0, v1 })
  8164      // CVTPD2PI xmm, mm
  8165      if isXMM(v0) && isMM(v1) {
  8166          self.require(ISA_SSE)
  8167          p.domain = DomainMMXSSE
  8168          p.add(0, func(m *_Encoding, v []interface{}) {
  8169              m.emit(0x66)
  8170              m.rexo(hcode(v[1]), v[0], false)
  8171              m.emit(0x0f)
  8172              m.emit(0x2d)
  8173              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8174          })
  8175      }
  8176      // CVTPD2PI m128, mm
  8177      if isM128(v0) && isMM(v1) {
  8178          self.require(ISA_SSE)
  8179          p.domain = DomainMMXSSE
  8180          p.add(0, func(m *_Encoding, v []interface{}) {
  8181              m.emit(0x66)
  8182              m.rexo(hcode(v[1]), addr(v[0]), false)
  8183              m.emit(0x0f)
  8184              m.emit(0x2d)
  8185              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8186          })
  8187      }
  8188      if p.len == 0 {
  8189          panic("invalid operands for CVTPD2PI")
  8190      }
  8191      return p
  8192  }
  8193  
  8194  // CVTPD2PS performs "Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values".
  8195  //
  8196  // Mnemonic        : CVTPD2PS
  8197  // Supported forms : (2 forms)
  8198  //
  8199  //    * CVTPD2PS xmm, xmm     [SSE2]
  8200  //    * CVTPD2PS m128, xmm    [SSE2]
  8201  //
  8202  func (self *Program) CVTPD2PS(v0 interface{}, v1 interface{}) *Instruction {
  8203      p := self.alloc("CVTPD2PS", 2, Operands { v0, v1 })
  8204      // CVTPD2PS xmm, xmm
  8205      if isXMM(v0) && isXMM(v1) {
  8206          self.require(ISA_SSE2)
  8207          p.domain = DomainMMXSSE
  8208          p.add(0, func(m *_Encoding, v []interface{}) {
  8209              m.emit(0x66)
  8210              m.rexo(hcode(v[1]), v[0], false)
  8211              m.emit(0x0f)
  8212              m.emit(0x5a)
  8213              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8214          })
  8215      }
  8216      // CVTPD2PS m128, xmm
  8217      if isM128(v0) && isXMM(v1) {
  8218          self.require(ISA_SSE2)
  8219          p.domain = DomainMMXSSE
  8220          p.add(0, func(m *_Encoding, v []interface{}) {
  8221              m.emit(0x66)
  8222              m.rexo(hcode(v[1]), addr(v[0]), false)
  8223              m.emit(0x0f)
  8224              m.emit(0x5a)
  8225              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8226          })
  8227      }
  8228      if p.len == 0 {
  8229          panic("invalid operands for CVTPD2PS")
  8230      }
  8231      return p
  8232  }
  8233  
  8234  // CVTPI2PD performs "Convert Packed Dword Integers to Packed Double-Precision FP Values".
  8235  //
  8236  // Mnemonic        : CVTPI2PD
  8237  // Supported forms : (2 forms)
  8238  //
  8239  //    * CVTPI2PD mm, xmm     [SSE2]
  8240  //    * CVTPI2PD m64, xmm    [SSE2]
  8241  //
  8242  func (self *Program) CVTPI2PD(v0 interface{}, v1 interface{}) *Instruction {
  8243      p := self.alloc("CVTPI2PD", 2, Operands { v0, v1 })
  8244      // CVTPI2PD mm, xmm
  8245      if isMM(v0) && isXMM(v1) {
  8246          self.require(ISA_SSE2)
  8247          p.domain = DomainMMXSSE
  8248          p.add(0, func(m *_Encoding, v []interface{}) {
  8249              m.emit(0x66)
  8250              m.rexo(hcode(v[1]), v[0], false)
  8251              m.emit(0x0f)
  8252              m.emit(0x2a)
  8253              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8254          })
  8255      }
  8256      // CVTPI2PD m64, xmm
  8257      if isM64(v0) && isXMM(v1) {
  8258          self.require(ISA_SSE2)
  8259          p.domain = DomainMMXSSE
  8260          p.add(0, func(m *_Encoding, v []interface{}) {
  8261              m.emit(0x66)
  8262              m.rexo(hcode(v[1]), addr(v[0]), false)
  8263              m.emit(0x0f)
  8264              m.emit(0x2a)
  8265              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8266          })
  8267      }
  8268      if p.len == 0 {
  8269          panic("invalid operands for CVTPI2PD")
  8270      }
  8271      return p
  8272  }
  8273  
  8274  // CVTPI2PS performs "Convert Packed Dword Integers to Packed Single-Precision FP Values".
  8275  //
  8276  // Mnemonic        : CVTPI2PS
  8277  // Supported forms : (2 forms)
  8278  //
  8279  //    * CVTPI2PS mm, xmm     [SSE]
  8280  //    * CVTPI2PS m64, xmm    [SSE]
  8281  //
  8282  func (self *Program) CVTPI2PS(v0 interface{}, v1 interface{}) *Instruction {
  8283      p := self.alloc("CVTPI2PS", 2, Operands { v0, v1 })
  8284      // CVTPI2PS mm, xmm
  8285      if isMM(v0) && isXMM(v1) {
  8286          self.require(ISA_SSE)
  8287          p.domain = DomainMMXSSE
  8288          p.add(0, func(m *_Encoding, v []interface{}) {
  8289              m.rexo(hcode(v[1]), v[0], false)
  8290              m.emit(0x0f)
  8291              m.emit(0x2a)
  8292              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8293          })
  8294      }
  8295      // CVTPI2PS m64, xmm
  8296      if isM64(v0) && isXMM(v1) {
  8297          self.require(ISA_SSE)
  8298          p.domain = DomainMMXSSE
  8299          p.add(0, func(m *_Encoding, v []interface{}) {
  8300              m.rexo(hcode(v[1]), addr(v[0]), false)
  8301              m.emit(0x0f)
  8302              m.emit(0x2a)
  8303              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8304          })
  8305      }
  8306      if p.len == 0 {
  8307          panic("invalid operands for CVTPI2PS")
  8308      }
  8309      return p
  8310  }
  8311  
  8312  // CVTPS2DQ performs "Convert Packed Single-Precision FP Values to Packed Dword Integers".
  8313  //
  8314  // Mnemonic        : CVTPS2DQ
  8315  // Supported forms : (2 forms)
  8316  //
  8317  //    * CVTPS2DQ xmm, xmm     [SSE2]
  8318  //    * CVTPS2DQ m128, xmm    [SSE2]
  8319  //
  8320  func (self *Program) CVTPS2DQ(v0 interface{}, v1 interface{}) *Instruction {
  8321      p := self.alloc("CVTPS2DQ", 2, Operands { v0, v1 })
  8322      // CVTPS2DQ xmm, xmm
  8323      if isXMM(v0) && isXMM(v1) {
  8324          self.require(ISA_SSE2)
  8325          p.domain = DomainMMXSSE
  8326          p.add(0, func(m *_Encoding, v []interface{}) {
  8327              m.emit(0x66)
  8328              m.rexo(hcode(v[1]), v[0], false)
  8329              m.emit(0x0f)
  8330              m.emit(0x5b)
  8331              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8332          })
  8333      }
  8334      // CVTPS2DQ m128, xmm
  8335      if isM128(v0) && isXMM(v1) {
  8336          self.require(ISA_SSE2)
  8337          p.domain = DomainMMXSSE
  8338          p.add(0, func(m *_Encoding, v []interface{}) {
  8339              m.emit(0x66)
  8340              m.rexo(hcode(v[1]), addr(v[0]), false)
  8341              m.emit(0x0f)
  8342              m.emit(0x5b)
  8343              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8344          })
  8345      }
  8346      if p.len == 0 {
  8347          panic("invalid operands for CVTPS2DQ")
  8348      }
  8349      return p
  8350  }
  8351  
  8352  // CVTPS2PD performs "Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values".
  8353  //
  8354  // Mnemonic        : CVTPS2PD
  8355  // Supported forms : (2 forms)
  8356  //
  8357  //    * CVTPS2PD xmm, xmm    [SSE2]
  8358  //    * CVTPS2PD m64, xmm    [SSE2]
  8359  //
  8360  func (self *Program) CVTPS2PD(v0 interface{}, v1 interface{}) *Instruction {
  8361      p := self.alloc("CVTPS2PD", 2, Operands { v0, v1 })
  8362      // CVTPS2PD xmm, xmm
  8363      if isXMM(v0) && isXMM(v1) {
  8364          self.require(ISA_SSE2)
  8365          p.domain = DomainMMXSSE
  8366          p.add(0, func(m *_Encoding, v []interface{}) {
  8367              m.rexo(hcode(v[1]), v[0], false)
  8368              m.emit(0x0f)
  8369              m.emit(0x5a)
  8370              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8371          })
  8372      }
  8373      // CVTPS2PD m64, xmm
  8374      if isM64(v0) && isXMM(v1) {
  8375          self.require(ISA_SSE2)
  8376          p.domain = DomainMMXSSE
  8377          p.add(0, func(m *_Encoding, v []interface{}) {
  8378              m.rexo(hcode(v[1]), addr(v[0]), false)
  8379              m.emit(0x0f)
  8380              m.emit(0x5a)
  8381              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8382          })
  8383      }
  8384      if p.len == 0 {
  8385          panic("invalid operands for CVTPS2PD")
  8386      }
  8387      return p
  8388  }
  8389  
  8390  // CVTPS2PI performs "Convert Packed Single-Precision FP Values to Packed Dword Integers".
  8391  //
  8392  // Mnemonic        : CVTPS2PI
  8393  // Supported forms : (2 forms)
  8394  //
  8395  //    * CVTPS2PI xmm, mm    [SSE]
  8396  //    * CVTPS2PI m64, mm    [SSE]
  8397  //
  8398  func (self *Program) CVTPS2PI(v0 interface{}, v1 interface{}) *Instruction {
  8399      p := self.alloc("CVTPS2PI", 2, Operands { v0, v1 })
  8400      // CVTPS2PI xmm, mm
  8401      if isXMM(v0) && isMM(v1) {
  8402          self.require(ISA_SSE)
  8403          p.domain = DomainMMXSSE
  8404          p.add(0, func(m *_Encoding, v []interface{}) {
  8405              m.rexo(hcode(v[1]), v[0], false)
  8406              m.emit(0x0f)
  8407              m.emit(0x2d)
  8408              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8409          })
  8410      }
  8411      // CVTPS2PI m64, mm
  8412      if isM64(v0) && isMM(v1) {
  8413          self.require(ISA_SSE)
  8414          p.domain = DomainMMXSSE
  8415          p.add(0, func(m *_Encoding, v []interface{}) {
  8416              m.rexo(hcode(v[1]), addr(v[0]), false)
  8417              m.emit(0x0f)
  8418              m.emit(0x2d)
  8419              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8420          })
  8421      }
  8422      if p.len == 0 {
  8423          panic("invalid operands for CVTPS2PI")
  8424      }
  8425      return p
  8426  }
  8427  
  8428  // CVTSD2SI performs "Convert Scalar Double-Precision FP Value to Integer".
  8429  //
  8430  // Mnemonic        : CVTSD2SI
  8431  // Supported forms : (4 forms)
  8432  //
  8433  //    * CVTSD2SI xmm, r32    [SSE2]
  8434  //    * CVTSD2SI m64, r32    [SSE2]
  8435  //    * CVTSD2SI xmm, r64    [SSE2]
  8436  //    * CVTSD2SI m64, r64    [SSE2]
  8437  //
  8438  func (self *Program) CVTSD2SI(v0 interface{}, v1 interface{}) *Instruction {
  8439      p := self.alloc("CVTSD2SI", 2, Operands { v0, v1 })
  8440      // CVTSD2SI xmm, r32
  8441      if isXMM(v0) && isReg32(v1) {
  8442          self.require(ISA_SSE2)
  8443          p.domain = DomainMMXSSE
  8444          p.add(0, func(m *_Encoding, v []interface{}) {
  8445              m.emit(0xf2)
  8446              m.rexo(hcode(v[1]), v[0], false)
  8447              m.emit(0x0f)
  8448              m.emit(0x2d)
  8449              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8450          })
  8451      }
  8452      // CVTSD2SI m64, r32
  8453      if isM64(v0) && isReg32(v1) {
  8454          self.require(ISA_SSE2)
  8455          p.domain = DomainMMXSSE
  8456          p.add(0, func(m *_Encoding, v []interface{}) {
  8457              m.emit(0xf2)
  8458              m.rexo(hcode(v[1]), addr(v[0]), false)
  8459              m.emit(0x0f)
  8460              m.emit(0x2d)
  8461              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8462          })
  8463      }
  8464      // CVTSD2SI xmm, r64
  8465      if isXMM(v0) && isReg64(v1) {
  8466          self.require(ISA_SSE2)
  8467          p.domain = DomainMMXSSE
  8468          p.add(0, func(m *_Encoding, v []interface{}) {
  8469              m.emit(0xf2)
  8470              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  8471              m.emit(0x0f)
  8472              m.emit(0x2d)
  8473              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8474          })
  8475      }
  8476      // CVTSD2SI m64, r64
  8477      if isM64(v0) && isReg64(v1) {
  8478          self.require(ISA_SSE2)
  8479          p.domain = DomainMMXSSE
  8480          p.add(0, func(m *_Encoding, v []interface{}) {
  8481              m.emit(0xf2)
  8482              m.rexm(1, hcode(v[1]), addr(v[0]))
  8483              m.emit(0x0f)
  8484              m.emit(0x2d)
  8485              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8486          })
  8487      }
  8488      if p.len == 0 {
  8489          panic("invalid operands for CVTSD2SI")
  8490      }
  8491      return p
  8492  }
  8493  
  8494  // CVTSD2SS performs "Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value".
  8495  //
  8496  // Mnemonic        : CVTSD2SS
  8497  // Supported forms : (2 forms)
  8498  //
  8499  //    * CVTSD2SS xmm, xmm    [SSE2]
  8500  //    * CVTSD2SS m64, xmm    [SSE2]
  8501  //
  8502  func (self *Program) CVTSD2SS(v0 interface{}, v1 interface{}) *Instruction {
  8503      p := self.alloc("CVTSD2SS", 2, Operands { v0, v1 })
  8504      // CVTSD2SS xmm, xmm
  8505      if isXMM(v0) && isXMM(v1) {
  8506          self.require(ISA_SSE2)
  8507          p.domain = DomainMMXSSE
  8508          p.add(0, func(m *_Encoding, v []interface{}) {
  8509              m.emit(0xf2)
  8510              m.rexo(hcode(v[1]), v[0], false)
  8511              m.emit(0x0f)
  8512              m.emit(0x5a)
  8513              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8514          })
  8515      }
  8516      // CVTSD2SS m64, xmm
  8517      if isM64(v0) && isXMM(v1) {
  8518          self.require(ISA_SSE2)
  8519          p.domain = DomainMMXSSE
  8520          p.add(0, func(m *_Encoding, v []interface{}) {
  8521              m.emit(0xf2)
  8522              m.rexo(hcode(v[1]), addr(v[0]), false)
  8523              m.emit(0x0f)
  8524              m.emit(0x5a)
  8525              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8526          })
  8527      }
  8528      if p.len == 0 {
  8529          panic("invalid operands for CVTSD2SS")
  8530      }
  8531      return p
  8532  }
  8533  
  8534  // CVTSI2SD performs "Convert Dword Integer to Scalar Double-Precision FP Value".
  8535  //
  8536  // Mnemonic        : CVTSI2SD
  8537  // Supported forms : (4 forms)
  8538  //
  8539  //    * CVTSI2SD r32, xmm    [SSE2]
  8540  //    * CVTSI2SD r64, xmm    [SSE2]
  8541  //    * CVTSI2SD m32, xmm    [SSE2]
  8542  //    * CVTSI2SD m64, xmm    [SSE2]
  8543  //
  8544  func (self *Program) CVTSI2SD(v0 interface{}, v1 interface{}) *Instruction {
  8545      p := self.alloc("CVTSI2SD", 2, Operands { v0, v1 })
  8546      // CVTSI2SD r32, xmm
  8547      if isReg32(v0) && isXMM(v1) {
  8548          self.require(ISA_SSE2)
  8549          p.domain = DomainMMXSSE
  8550          p.add(0, func(m *_Encoding, v []interface{}) {
  8551              m.emit(0xf2)
  8552              m.rexo(hcode(v[1]), v[0], false)
  8553              m.emit(0x0f)
  8554              m.emit(0x2a)
  8555              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8556          })
  8557      }
  8558      // CVTSI2SD r64, xmm
  8559      if isReg64(v0) && isXMM(v1) {
  8560          self.require(ISA_SSE2)
  8561          p.domain = DomainMMXSSE
  8562          p.add(0, func(m *_Encoding, v []interface{}) {
  8563              m.emit(0xf2)
  8564              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  8565              m.emit(0x0f)
  8566              m.emit(0x2a)
  8567              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8568          })
  8569      }
  8570      // CVTSI2SD m32, xmm
  8571      if isM32(v0) && isXMM(v1) {
  8572          self.require(ISA_SSE2)
  8573          p.domain = DomainMMXSSE
  8574          p.add(0, func(m *_Encoding, v []interface{}) {
  8575              m.emit(0xf2)
  8576              m.rexo(hcode(v[1]), addr(v[0]), false)
  8577              m.emit(0x0f)
  8578              m.emit(0x2a)
  8579              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8580          })
  8581      }
  8582      // CVTSI2SD m64, xmm
  8583      if isM64(v0) && isXMM(v1) {
  8584          self.require(ISA_SSE2)
  8585          p.domain = DomainMMXSSE
  8586          p.add(0, func(m *_Encoding, v []interface{}) {
  8587              m.emit(0xf2)
  8588              m.rexm(1, hcode(v[1]), addr(v[0]))
  8589              m.emit(0x0f)
  8590              m.emit(0x2a)
  8591              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8592          })
  8593      }
  8594      if p.len == 0 {
  8595          panic("invalid operands for CVTSI2SD")
  8596      }
  8597      return p
  8598  }
  8599  
  8600  // CVTSI2SS performs "Convert Dword Integer to Scalar Single-Precision FP Value".
  8601  //
  8602  // Mnemonic        : CVTSI2SS
  8603  // Supported forms : (4 forms)
  8604  //
  8605  //    * CVTSI2SS r32, xmm    [SSE]
  8606  //    * CVTSI2SS r64, xmm    [SSE]
  8607  //    * CVTSI2SS m32, xmm    [SSE]
  8608  //    * CVTSI2SS m64, xmm    [SSE]
  8609  //
  8610  func (self *Program) CVTSI2SS(v0 interface{}, v1 interface{}) *Instruction {
  8611      p := self.alloc("CVTSI2SS", 2, Operands { v0, v1 })
  8612      // CVTSI2SS r32, xmm
  8613      if isReg32(v0) && isXMM(v1) {
  8614          self.require(ISA_SSE)
  8615          p.domain = DomainMMXSSE
  8616          p.add(0, func(m *_Encoding, v []interface{}) {
  8617              m.emit(0xf3)
  8618              m.rexo(hcode(v[1]), v[0], false)
  8619              m.emit(0x0f)
  8620              m.emit(0x2a)
  8621              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8622          })
  8623      }
  8624      // CVTSI2SS r64, xmm
  8625      if isReg64(v0) && isXMM(v1) {
  8626          self.require(ISA_SSE)
  8627          p.domain = DomainMMXSSE
  8628          p.add(0, func(m *_Encoding, v []interface{}) {
  8629              m.emit(0xf3)
  8630              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  8631              m.emit(0x0f)
  8632              m.emit(0x2a)
  8633              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8634          })
  8635      }
  8636      // CVTSI2SS m32, xmm
  8637      if isM32(v0) && isXMM(v1) {
  8638          self.require(ISA_SSE)
  8639          p.domain = DomainMMXSSE
  8640          p.add(0, func(m *_Encoding, v []interface{}) {
  8641              m.emit(0xf3)
  8642              m.rexo(hcode(v[1]), addr(v[0]), false)
  8643              m.emit(0x0f)
  8644              m.emit(0x2a)
  8645              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8646          })
  8647      }
  8648      // CVTSI2SS m64, xmm
  8649      if isM64(v0) && isXMM(v1) {
  8650          self.require(ISA_SSE)
  8651          p.domain = DomainMMXSSE
  8652          p.add(0, func(m *_Encoding, v []interface{}) {
  8653              m.emit(0xf3)
  8654              m.rexm(1, hcode(v[1]), addr(v[0]))
  8655              m.emit(0x0f)
  8656              m.emit(0x2a)
  8657              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8658          })
  8659      }
  8660      if p.len == 0 {
  8661          panic("invalid operands for CVTSI2SS")
  8662      }
  8663      return p
  8664  }
  8665  
  8666  // CVTSS2SD performs "Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value".
  8667  //
  8668  // Mnemonic        : CVTSS2SD
  8669  // Supported forms : (2 forms)
  8670  //
  8671  //    * CVTSS2SD xmm, xmm    [SSE2]
  8672  //    * CVTSS2SD m32, xmm    [SSE2]
  8673  //
  8674  func (self *Program) CVTSS2SD(v0 interface{}, v1 interface{}) *Instruction {
  8675      p := self.alloc("CVTSS2SD", 2, Operands { v0, v1 })
  8676      // CVTSS2SD xmm, xmm
  8677      if isXMM(v0) && isXMM(v1) {
  8678          self.require(ISA_SSE2)
  8679          p.domain = DomainMMXSSE
  8680          p.add(0, func(m *_Encoding, v []interface{}) {
  8681              m.emit(0xf3)
  8682              m.rexo(hcode(v[1]), v[0], false)
  8683              m.emit(0x0f)
  8684              m.emit(0x5a)
  8685              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8686          })
  8687      }
  8688      // CVTSS2SD m32, xmm
  8689      if isM32(v0) && isXMM(v1) {
  8690          self.require(ISA_SSE2)
  8691          p.domain = DomainMMXSSE
  8692          p.add(0, func(m *_Encoding, v []interface{}) {
  8693              m.emit(0xf3)
  8694              m.rexo(hcode(v[1]), addr(v[0]), false)
  8695              m.emit(0x0f)
  8696              m.emit(0x5a)
  8697              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8698          })
  8699      }
  8700      if p.len == 0 {
  8701          panic("invalid operands for CVTSS2SD")
  8702      }
  8703      return p
  8704  }
  8705  
  8706  // CVTSS2SI performs "Convert Scalar Single-Precision FP Value to Dword Integer".
  8707  //
  8708  // Mnemonic        : CVTSS2SI
  8709  // Supported forms : (4 forms)
  8710  //
  8711  //    * CVTSS2SI xmm, r32    [SSE]
  8712  //    * CVTSS2SI m32, r32    [SSE]
  8713  //    * CVTSS2SI xmm, r64    [SSE]
  8714  //    * CVTSS2SI m32, r64    [SSE]
  8715  //
  8716  func (self *Program) CVTSS2SI(v0 interface{}, v1 interface{}) *Instruction {
  8717      p := self.alloc("CVTSS2SI", 2, Operands { v0, v1 })
  8718      // CVTSS2SI xmm, r32
  8719      if isXMM(v0) && isReg32(v1) {
  8720          self.require(ISA_SSE)
  8721          p.domain = DomainMMXSSE
  8722          p.add(0, func(m *_Encoding, v []interface{}) {
  8723              m.emit(0xf3)
  8724              m.rexo(hcode(v[1]), v[0], false)
  8725              m.emit(0x0f)
  8726              m.emit(0x2d)
  8727              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8728          })
  8729      }
  8730      // CVTSS2SI m32, r32
  8731      if isM32(v0) && isReg32(v1) {
  8732          self.require(ISA_SSE)
  8733          p.domain = DomainMMXSSE
  8734          p.add(0, func(m *_Encoding, v []interface{}) {
  8735              m.emit(0xf3)
  8736              m.rexo(hcode(v[1]), addr(v[0]), false)
  8737              m.emit(0x0f)
  8738              m.emit(0x2d)
  8739              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8740          })
  8741      }
  8742      // CVTSS2SI xmm, r64
  8743      if isXMM(v0) && isReg64(v1) {
  8744          self.require(ISA_SSE)
  8745          p.domain = DomainMMXSSE
  8746          p.add(0, func(m *_Encoding, v []interface{}) {
  8747              m.emit(0xf3)
  8748              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  8749              m.emit(0x0f)
  8750              m.emit(0x2d)
  8751              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8752          })
  8753      }
  8754      // CVTSS2SI m32, r64
  8755      if isM32(v0) && isReg64(v1) {
  8756          self.require(ISA_SSE)
  8757          p.domain = DomainMMXSSE
  8758          p.add(0, func(m *_Encoding, v []interface{}) {
  8759              m.emit(0xf3)
  8760              m.rexm(1, hcode(v[1]), addr(v[0]))
  8761              m.emit(0x0f)
  8762              m.emit(0x2d)
  8763              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8764          })
  8765      }
  8766      if p.len == 0 {
  8767          panic("invalid operands for CVTSS2SI")
  8768      }
  8769      return p
  8770  }
  8771  
  8772  // CVTTPD2DQ performs "Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers".
  8773  //
  8774  // Mnemonic        : CVTTPD2DQ
  8775  // Supported forms : (2 forms)
  8776  //
  8777  //    * CVTTPD2DQ xmm, xmm     [SSE2]
  8778  //    * CVTTPD2DQ m128, xmm    [SSE2]
  8779  //
  8780  func (self *Program) CVTTPD2DQ(v0 interface{}, v1 interface{}) *Instruction {
  8781      p := self.alloc("CVTTPD2DQ", 2, Operands { v0, v1 })
  8782      // CVTTPD2DQ xmm, xmm
  8783      if isXMM(v0) && isXMM(v1) {
  8784          self.require(ISA_SSE2)
  8785          p.domain = DomainMMXSSE
  8786          p.add(0, func(m *_Encoding, v []interface{}) {
  8787              m.emit(0x66)
  8788              m.rexo(hcode(v[1]), v[0], false)
  8789              m.emit(0x0f)
  8790              m.emit(0xe6)
  8791              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8792          })
  8793      }
  8794      // CVTTPD2DQ m128, xmm
  8795      if isM128(v0) && isXMM(v1) {
  8796          self.require(ISA_SSE2)
  8797          p.domain = DomainMMXSSE
  8798          p.add(0, func(m *_Encoding, v []interface{}) {
  8799              m.emit(0x66)
  8800              m.rexo(hcode(v[1]), addr(v[0]), false)
  8801              m.emit(0x0f)
  8802              m.emit(0xe6)
  8803              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8804          })
  8805      }
  8806      if p.len == 0 {
  8807          panic("invalid operands for CVTTPD2DQ")
  8808      }
  8809      return p
  8810  }
  8811  
  8812  // CVTTPD2PI performs "Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers".
  8813  //
  8814  // Mnemonic        : CVTTPD2PI
  8815  // Supported forms : (2 forms)
  8816  //
  8817  //    * CVTTPD2PI xmm, mm     [SSE2]
  8818  //    * CVTTPD2PI m128, mm    [SSE2]
  8819  //
  8820  func (self *Program) CVTTPD2PI(v0 interface{}, v1 interface{}) *Instruction {
  8821      p := self.alloc("CVTTPD2PI", 2, Operands { v0, v1 })
  8822      // CVTTPD2PI xmm, mm
  8823      if isXMM(v0) && isMM(v1) {
  8824          self.require(ISA_SSE2)
  8825          p.domain = DomainMMXSSE
  8826          p.add(0, func(m *_Encoding, v []interface{}) {
  8827              m.emit(0x66)
  8828              m.rexo(hcode(v[1]), v[0], false)
  8829              m.emit(0x0f)
  8830              m.emit(0x2c)
  8831              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8832          })
  8833      }
  8834      // CVTTPD2PI m128, mm
  8835      if isM128(v0) && isMM(v1) {
  8836          self.require(ISA_SSE2)
  8837          p.domain = DomainMMXSSE
  8838          p.add(0, func(m *_Encoding, v []interface{}) {
  8839              m.emit(0x66)
  8840              m.rexo(hcode(v[1]), addr(v[0]), false)
  8841              m.emit(0x0f)
  8842              m.emit(0x2c)
  8843              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8844          })
  8845      }
  8846      if p.len == 0 {
  8847          panic("invalid operands for CVTTPD2PI")
  8848      }
  8849      return p
  8850  }
  8851  
  8852  // CVTTPS2DQ performs "Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers".
  8853  //
  8854  // Mnemonic        : CVTTPS2DQ
  8855  // Supported forms : (2 forms)
  8856  //
  8857  //    * CVTTPS2DQ xmm, xmm     [SSE2]
  8858  //    * CVTTPS2DQ m128, xmm    [SSE2]
  8859  //
  8860  func (self *Program) CVTTPS2DQ(v0 interface{}, v1 interface{}) *Instruction {
  8861      p := self.alloc("CVTTPS2DQ", 2, Operands { v0, v1 })
  8862      // CVTTPS2DQ xmm, xmm
  8863      if isXMM(v0) && isXMM(v1) {
  8864          self.require(ISA_SSE2)
  8865          p.domain = DomainMMXSSE
  8866          p.add(0, func(m *_Encoding, v []interface{}) {
  8867              m.emit(0xf3)
  8868              m.rexo(hcode(v[1]), v[0], false)
  8869              m.emit(0x0f)
  8870              m.emit(0x5b)
  8871              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8872          })
  8873      }
  8874      // CVTTPS2DQ m128, xmm
  8875      if isM128(v0) && isXMM(v1) {
  8876          self.require(ISA_SSE2)
  8877          p.domain = DomainMMXSSE
  8878          p.add(0, func(m *_Encoding, v []interface{}) {
  8879              m.emit(0xf3)
  8880              m.rexo(hcode(v[1]), addr(v[0]), false)
  8881              m.emit(0x0f)
  8882              m.emit(0x5b)
  8883              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8884          })
  8885      }
  8886      if p.len == 0 {
  8887          panic("invalid operands for CVTTPS2DQ")
  8888      }
  8889      return p
  8890  }
  8891  
  8892  // CVTTPS2PI performs "Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers".
  8893  //
  8894  // Mnemonic        : CVTTPS2PI
  8895  // Supported forms : (2 forms)
  8896  //
  8897  //    * CVTTPS2PI xmm, mm    [SSE]
  8898  //    * CVTTPS2PI m64, mm    [SSE]
  8899  //
  8900  func (self *Program) CVTTPS2PI(v0 interface{}, v1 interface{}) *Instruction {
  8901      p := self.alloc("CVTTPS2PI", 2, Operands { v0, v1 })
  8902      // CVTTPS2PI xmm, mm
  8903      if isXMM(v0) && isMM(v1) {
  8904          self.require(ISA_SSE)
  8905          p.domain = DomainMMXSSE
  8906          p.add(0, func(m *_Encoding, v []interface{}) {
  8907              m.rexo(hcode(v[1]), v[0], false)
  8908              m.emit(0x0f)
  8909              m.emit(0x2c)
  8910              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8911          })
  8912      }
  8913      // CVTTPS2PI m64, mm
  8914      if isM64(v0) && isMM(v1) {
  8915          self.require(ISA_SSE)
  8916          p.domain = DomainMMXSSE
  8917          p.add(0, func(m *_Encoding, v []interface{}) {
  8918              m.rexo(hcode(v[1]), addr(v[0]), false)
  8919              m.emit(0x0f)
  8920              m.emit(0x2c)
  8921              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8922          })
  8923      }
  8924      if p.len == 0 {
  8925          panic("invalid operands for CVTTPS2PI")
  8926      }
  8927      return p
  8928  }
  8929  
  8930  // CVTTSD2SI performs "Convert with Truncation Scalar Double-Precision FP Value to Signed Integer".
  8931  //
  8932  // Mnemonic        : CVTTSD2SI
  8933  // Supported forms : (4 forms)
  8934  //
  8935  //    * CVTTSD2SI xmm, r32    [SSE2]
  8936  //    * CVTTSD2SI m64, r32    [SSE2]
  8937  //    * CVTTSD2SI xmm, r64    [SSE2]
  8938  //    * CVTTSD2SI m64, r64    [SSE2]
  8939  //
  8940  func (self *Program) CVTTSD2SI(v0 interface{}, v1 interface{}) *Instruction {
  8941      p := self.alloc("CVTTSD2SI", 2, Operands { v0, v1 })
  8942      // CVTTSD2SI xmm, r32
  8943      if isXMM(v0) && isReg32(v1) {
  8944          self.require(ISA_SSE2)
  8945          p.domain = DomainMMXSSE
  8946          p.add(0, func(m *_Encoding, v []interface{}) {
  8947              m.emit(0xf2)
  8948              m.rexo(hcode(v[1]), v[0], false)
  8949              m.emit(0x0f)
  8950              m.emit(0x2c)
  8951              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8952          })
  8953      }
  8954      // CVTTSD2SI m64, r32
  8955      if isM64(v0) && isReg32(v1) {
  8956          self.require(ISA_SSE2)
  8957          p.domain = DomainMMXSSE
  8958          p.add(0, func(m *_Encoding, v []interface{}) {
  8959              m.emit(0xf2)
  8960              m.rexo(hcode(v[1]), addr(v[0]), false)
  8961              m.emit(0x0f)
  8962              m.emit(0x2c)
  8963              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8964          })
  8965      }
  8966      // CVTTSD2SI xmm, r64
  8967      if isXMM(v0) && isReg64(v1) {
  8968          self.require(ISA_SSE2)
  8969          p.domain = DomainMMXSSE
  8970          p.add(0, func(m *_Encoding, v []interface{}) {
  8971              m.emit(0xf2)
  8972              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  8973              m.emit(0x0f)
  8974              m.emit(0x2c)
  8975              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8976          })
  8977      }
  8978      // CVTTSD2SI m64, r64
  8979      if isM64(v0) && isReg64(v1) {
  8980          self.require(ISA_SSE2)
  8981          p.domain = DomainMMXSSE
  8982          p.add(0, func(m *_Encoding, v []interface{}) {
  8983              m.emit(0xf2)
  8984              m.rexm(1, hcode(v[1]), addr(v[0]))
  8985              m.emit(0x0f)
  8986              m.emit(0x2c)
  8987              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8988          })
  8989      }
  8990      if p.len == 0 {
  8991          panic("invalid operands for CVTTSD2SI")
  8992      }
  8993      return p
  8994  }
  8995  
  8996  // CVTTSS2SI performs "Convert with Truncation Scalar Single-Precision FP Value to Dword Integer".
  8997  //
  8998  // Mnemonic        : CVTTSS2SI
  8999  // Supported forms : (4 forms)
  9000  //
  9001  //    * CVTTSS2SI xmm, r32    [SSE]
  9002  //    * CVTTSS2SI m32, r32    [SSE]
  9003  //    * CVTTSS2SI xmm, r64    [SSE]
  9004  //    * CVTTSS2SI m32, r64    [SSE]
  9005  //
  9006  func (self *Program) CVTTSS2SI(v0 interface{}, v1 interface{}) *Instruction {
  9007      p := self.alloc("CVTTSS2SI", 2, Operands { v0, v1 })
  9008      // CVTTSS2SI xmm, r32
  9009      if isXMM(v0) && isReg32(v1) {
  9010          self.require(ISA_SSE)
  9011          p.domain = DomainMMXSSE
  9012          p.add(0, func(m *_Encoding, v []interface{}) {
  9013              m.emit(0xf3)
  9014              m.rexo(hcode(v[1]), v[0], false)
  9015              m.emit(0x0f)
  9016              m.emit(0x2c)
  9017              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  9018          })
  9019      }
  9020      // CVTTSS2SI m32, r32
  9021      if isM32(v0) && isReg32(v1) {
  9022          self.require(ISA_SSE)
  9023          p.domain = DomainMMXSSE
  9024          p.add(0, func(m *_Encoding, v []interface{}) {
  9025              m.emit(0xf3)
  9026              m.rexo(hcode(v[1]), addr(v[0]), false)
  9027              m.emit(0x0f)
  9028              m.emit(0x2c)
  9029              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  9030          })
  9031      }
  9032      // CVTTSS2SI xmm, r64
  9033      if isXMM(v0) && isReg64(v1) {
  9034          self.require(ISA_SSE)
  9035          p.domain = DomainMMXSSE
  9036          p.add(0, func(m *_Encoding, v []interface{}) {
  9037              m.emit(0xf3)
  9038              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  9039              m.emit(0x0f)
  9040              m.emit(0x2c)
  9041              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  9042          })
  9043      }
  9044      // CVTTSS2SI m32, r64
  9045      if isM32(v0) && isReg64(v1) {
  9046          self.require(ISA_SSE)
  9047          p.domain = DomainMMXSSE
  9048          p.add(0, func(m *_Encoding, v []interface{}) {
  9049              m.emit(0xf3)
  9050              m.rexm(1, hcode(v[1]), addr(v[0]))
  9051              m.emit(0x0f)
  9052              m.emit(0x2c)
  9053              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  9054          })
  9055      }
  9056      if p.len == 0 {
  9057          panic("invalid operands for CVTTSS2SI")
  9058      }
  9059      return p
  9060  }
  9061  
  9062  // CWTD performs "Convert Word to Doubleword".
  9063  //
  9064  // Mnemonic        : CWD
  9065  // Supported forms : (1 form)
  9066  //
  9067  //    * CWTD
  9068  //
  9069  func (self *Program) CWTD() *Instruction {
  9070      p := self.alloc("CWTD", 0, Operands {  })
  9071      // CWTD
  9072      p.domain = DomainGeneric
  9073      p.add(0, func(m *_Encoding, v []interface{}) {
  9074          m.emit(0x66)
  9075          m.emit(0x99)
  9076      })
  9077      return p
  9078  }
  9079  
  9080  // CWTL performs "Convert Word to Doubleword".
  9081  //
  9082  // Mnemonic        : CWDE
  9083  // Supported forms : (1 form)
  9084  //
  9085  //    * CWTL
  9086  //
  9087  func (self *Program) CWTL() *Instruction {
  9088      p := self.alloc("CWTL", 0, Operands {  })
  9089      // CWTL
  9090      p.domain = DomainGeneric
  9091      p.add(0, func(m *_Encoding, v []interface{}) {
  9092          m.emit(0x98)
  9093      })
  9094      return p
  9095  }
  9096  
  9097  // DECB performs "Decrement by 1".
  9098  //
  9099  // Mnemonic        : DEC
  9100  // Supported forms : (2 forms)
  9101  //
  9102  //    * DECB r8
  9103  //    * DECB m8
  9104  //
  9105  func (self *Program) DECB(v0 interface{}) *Instruction {
  9106      p := self.alloc("DECB", 1, Operands { v0 })
  9107      // DECB r8
  9108      if isReg8(v0) {
  9109          p.domain = DomainGeneric
  9110          p.add(0, func(m *_Encoding, v []interface{}) {
  9111              m.rexo(0, v[0], isReg8REX(v[0]))
  9112              m.emit(0xfe)
  9113              m.emit(0xc8 | lcode(v[0]))
  9114          })
  9115      }
  9116      // DECB m8
  9117      if isM8(v0) {
  9118          p.domain = DomainGeneric
  9119          p.add(0, func(m *_Encoding, v []interface{}) {
  9120              m.rexo(0, addr(v[0]), false)
  9121              m.emit(0xfe)
  9122              m.mrsd(1, addr(v[0]), 1)
  9123          })
  9124      }
  9125      if p.len == 0 {
  9126          panic("invalid operands for DECB")
  9127      }
  9128      return p
  9129  }
  9130  
  9131  // DECL performs "Decrement by 1".
  9132  //
  9133  // Mnemonic        : DEC
  9134  // Supported forms : (2 forms)
  9135  //
  9136  //    * DECL r32
  9137  //    * DECL m32
  9138  //
  9139  func (self *Program) DECL(v0 interface{}) *Instruction {
  9140      p := self.alloc("DECL", 1, Operands { v0 })
  9141      // DECL r32
  9142      if isReg32(v0) {
  9143          p.domain = DomainGeneric
  9144          p.add(0, func(m *_Encoding, v []interface{}) {
  9145              m.rexo(0, v[0], false)
  9146              m.emit(0xff)
  9147              m.emit(0xc8 | lcode(v[0]))
  9148          })
  9149      }
  9150      // DECL m32
  9151      if isM32(v0) {
  9152          p.domain = DomainGeneric
  9153          p.add(0, func(m *_Encoding, v []interface{}) {
  9154              m.rexo(0, addr(v[0]), false)
  9155              m.emit(0xff)
  9156              m.mrsd(1, addr(v[0]), 1)
  9157          })
  9158      }
  9159      if p.len == 0 {
  9160          panic("invalid operands for DECL")
  9161      }
  9162      return p
  9163  }
  9164  
  9165  // DECQ performs "Decrement by 1".
  9166  //
  9167  // Mnemonic        : DEC
  9168  // Supported forms : (2 forms)
  9169  //
  9170  //    * DECQ r64
  9171  //    * DECQ m64
  9172  //
  9173  func (self *Program) DECQ(v0 interface{}) *Instruction {
  9174      p := self.alloc("DECQ", 1, Operands { v0 })
  9175      // DECQ r64
  9176      if isReg64(v0) {
  9177          p.domain = DomainGeneric
  9178          p.add(0, func(m *_Encoding, v []interface{}) {
  9179              m.emit(0x48 | hcode(v[0]))
  9180              m.emit(0xff)
  9181              m.emit(0xc8 | lcode(v[0]))
  9182          })
  9183      }
  9184      // DECQ m64
  9185      if isM64(v0) {
  9186          p.domain = DomainGeneric
  9187          p.add(0, func(m *_Encoding, v []interface{}) {
  9188              m.rexm(1, 0, addr(v[0]))
  9189              m.emit(0xff)
  9190              m.mrsd(1, addr(v[0]), 1)
  9191          })
  9192      }
  9193      if p.len == 0 {
  9194          panic("invalid operands for DECQ")
  9195      }
  9196      return p
  9197  }
  9198  
  9199  // DECW performs "Decrement by 1".
  9200  //
  9201  // Mnemonic        : DEC
  9202  // Supported forms : (2 forms)
  9203  //
  9204  //    * DECW r16
  9205  //    * DECW m16
  9206  //
  9207  func (self *Program) DECW(v0 interface{}) *Instruction {
  9208      p := self.alloc("DECW", 1, Operands { v0 })
  9209      // DECW r16
  9210      if isReg16(v0) {
  9211          p.domain = DomainGeneric
  9212          p.add(0, func(m *_Encoding, v []interface{}) {
  9213              m.emit(0x66)
  9214              m.rexo(0, v[0], false)
  9215              m.emit(0xff)
  9216              m.emit(0xc8 | lcode(v[0]))
  9217          })
  9218      }
  9219      // DECW m16
  9220      if isM16(v0) {
  9221          p.domain = DomainGeneric
  9222          p.add(0, func(m *_Encoding, v []interface{}) {
  9223              m.emit(0x66)
  9224              m.rexo(0, addr(v[0]), false)
  9225              m.emit(0xff)
  9226              m.mrsd(1, addr(v[0]), 1)
  9227          })
  9228      }
  9229      if p.len == 0 {
  9230          panic("invalid operands for DECW")
  9231      }
  9232      return p
  9233  }
  9234  
  9235  // DIVB performs "Unsigned Divide".
  9236  //
  9237  // Mnemonic        : DIV
  9238  // Supported forms : (2 forms)
  9239  //
  9240  //    * DIVB r8
  9241  //    * DIVB m8
  9242  //
  9243  func (self *Program) DIVB(v0 interface{}) *Instruction {
  9244      p := self.alloc("DIVB", 1, Operands { v0 })
  9245      // DIVB r8
  9246      if isReg8(v0) {
  9247          p.domain = DomainGeneric
  9248          p.add(0, func(m *_Encoding, v []interface{}) {
  9249              m.rexo(0, v[0], isReg8REX(v[0]))
  9250              m.emit(0xf6)
  9251              m.emit(0xf0 | lcode(v[0]))
  9252          })
  9253      }
  9254      // DIVB m8
  9255      if isM8(v0) {
  9256          p.domain = DomainGeneric
  9257          p.add(0, func(m *_Encoding, v []interface{}) {
  9258              m.rexo(0, addr(v[0]), false)
  9259              m.emit(0xf6)
  9260              m.mrsd(6, addr(v[0]), 1)
  9261          })
  9262      }
  9263      if p.len == 0 {
  9264          panic("invalid operands for DIVB")
  9265      }
  9266      return p
  9267  }
  9268  
  9269  // DIVL performs "Unsigned Divide".
  9270  //
  9271  // Mnemonic        : DIV
  9272  // Supported forms : (2 forms)
  9273  //
  9274  //    * DIVL r32
  9275  //    * DIVL m32
  9276  //
  9277  func (self *Program) DIVL(v0 interface{}) *Instruction {
  9278      p := self.alloc("DIVL", 1, Operands { v0 })
  9279      // DIVL r32
  9280      if isReg32(v0) {
  9281          p.domain = DomainGeneric
  9282          p.add(0, func(m *_Encoding, v []interface{}) {
  9283              m.rexo(0, v[0], false)
  9284              m.emit(0xf7)
  9285              m.emit(0xf0 | lcode(v[0]))
  9286          })
  9287      }
  9288      // DIVL m32
  9289      if isM32(v0) {
  9290          p.domain = DomainGeneric
  9291          p.add(0, func(m *_Encoding, v []interface{}) {
  9292              m.rexo(0, addr(v[0]), false)
  9293              m.emit(0xf7)
  9294              m.mrsd(6, addr(v[0]), 1)
  9295          })
  9296      }
  9297      if p.len == 0 {
  9298          panic("invalid operands for DIVL")
  9299      }
  9300      return p
  9301  }
  9302  
  9303  // DIVPD performs "Divide Packed Double-Precision Floating-Point Values".
  9304  //
  9305  // Mnemonic        : DIVPD
  9306  // Supported forms : (2 forms)
  9307  //
  9308  //    * DIVPD xmm, xmm     [SSE2]
  9309  //    * DIVPD m128, xmm    [SSE2]
  9310  //
  9311  func (self *Program) DIVPD(v0 interface{}, v1 interface{}) *Instruction {
  9312      p := self.alloc("DIVPD", 2, Operands { v0, v1 })
  9313      // DIVPD xmm, xmm
  9314      if isXMM(v0) && isXMM(v1) {
  9315          self.require(ISA_SSE2)
  9316          p.domain = DomainMMXSSE
  9317          p.add(0, func(m *_Encoding, v []interface{}) {
  9318              m.emit(0x66)
  9319              m.rexo(hcode(v[1]), v[0], false)
  9320              m.emit(0x0f)
  9321              m.emit(0x5e)
  9322              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  9323          })
  9324      }
  9325      // DIVPD m128, xmm
  9326      if isM128(v0) && isXMM(v1) {
  9327          self.require(ISA_SSE2)
  9328          p.domain = DomainMMXSSE
  9329          p.add(0, func(m *_Encoding, v []interface{}) {
  9330              m.emit(0x66)
  9331              m.rexo(hcode(v[1]), addr(v[0]), false)
  9332              m.emit(0x0f)
  9333              m.emit(0x5e)
  9334              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  9335          })
  9336      }
  9337      if p.len == 0 {
  9338          panic("invalid operands for DIVPD")
  9339      }
  9340      return p
  9341  }
  9342  
  9343  // DIVPS performs "Divide Packed Single-Precision Floating-Point Values".
  9344  //
  9345  // Mnemonic        : DIVPS
  9346  // Supported forms : (2 forms)
  9347  //
  9348  //    * DIVPS xmm, xmm     [SSE]
  9349  //    * DIVPS m128, xmm    [SSE]
  9350  //
  9351  func (self *Program) DIVPS(v0 interface{}, v1 interface{}) *Instruction {
  9352      p := self.alloc("DIVPS", 2, Operands { v0, v1 })
  9353      // DIVPS xmm, xmm
  9354      if isXMM(v0) && isXMM(v1) {
  9355          self.require(ISA_SSE)
  9356          p.domain = DomainMMXSSE
  9357          p.add(0, func(m *_Encoding, v []interface{}) {
  9358              m.rexo(hcode(v[1]), v[0], false)
  9359              m.emit(0x0f)
  9360              m.emit(0x5e)
  9361              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  9362          })
  9363      }
  9364      // DIVPS m128, xmm
  9365      if isM128(v0) && isXMM(v1) {
  9366          self.require(ISA_SSE)
  9367          p.domain = DomainMMXSSE
  9368          p.add(0, func(m *_Encoding, v []interface{}) {
  9369              m.rexo(hcode(v[1]), addr(v[0]), false)
  9370              m.emit(0x0f)
  9371              m.emit(0x5e)
  9372              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  9373          })
  9374      }
  9375      if p.len == 0 {
  9376          panic("invalid operands for DIVPS")
  9377      }
  9378      return p
  9379  }
  9380  
  9381  // DIVQ performs "Unsigned Divide".
  9382  //
  9383  // Mnemonic        : DIV
  9384  // Supported forms : (2 forms)
  9385  //
  9386  //    * DIVQ r64
  9387  //    * DIVQ m64
  9388  //
  9389  func (self *Program) DIVQ(v0 interface{}) *Instruction {
  9390      p := self.alloc("DIVQ", 1, Operands { v0 })
  9391      // DIVQ r64
  9392      if isReg64(v0) {
  9393          p.domain = DomainGeneric
  9394          p.add(0, func(m *_Encoding, v []interface{}) {
  9395              m.emit(0x48 | hcode(v[0]))
  9396              m.emit(0xf7)
  9397              m.emit(0xf0 | lcode(v[0]))
  9398          })
  9399      }
  9400      // DIVQ m64
  9401      if isM64(v0) {
  9402          p.domain = DomainGeneric
  9403          p.add(0, func(m *_Encoding, v []interface{}) {
  9404              m.rexm(1, 0, addr(v[0]))
  9405              m.emit(0xf7)
  9406              m.mrsd(6, addr(v[0]), 1)
  9407          })
  9408      }
  9409      if p.len == 0 {
  9410          panic("invalid operands for DIVQ")
  9411      }
  9412      return p
  9413  }
  9414  
  9415  // DIVSD performs "Divide Scalar Double-Precision Floating-Point Values".
  9416  //
  9417  // Mnemonic        : DIVSD
  9418  // Supported forms : (2 forms)
  9419  //
  9420  //    * DIVSD xmm, xmm    [SSE2]
  9421  //    * DIVSD m64, xmm    [SSE2]
  9422  //
  9423  func (self *Program) DIVSD(v0 interface{}, v1 interface{}) *Instruction {
  9424      p := self.alloc("DIVSD", 2, Operands { v0, v1 })
  9425      // DIVSD xmm, xmm
  9426      if isXMM(v0) && isXMM(v1) {
  9427          self.require(ISA_SSE2)
  9428          p.domain = DomainMMXSSE
  9429          p.add(0, func(m *_Encoding, v []interface{}) {
  9430              m.emit(0xf2)
  9431              m.rexo(hcode(v[1]), v[0], false)
  9432              m.emit(0x0f)
  9433              m.emit(0x5e)
  9434              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  9435          })
  9436      }
  9437      // DIVSD m64, xmm
  9438      if isM64(v0) && isXMM(v1) {
  9439          self.require(ISA_SSE2)
  9440          p.domain = DomainMMXSSE
  9441          p.add(0, func(m *_Encoding, v []interface{}) {
  9442              m.emit(0xf2)
  9443              m.rexo(hcode(v[1]), addr(v[0]), false)
  9444              m.emit(0x0f)
  9445              m.emit(0x5e)
  9446              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  9447          })
  9448      }
  9449      if p.len == 0 {
  9450          panic("invalid operands for DIVSD")
  9451      }
  9452      return p
  9453  }
  9454  
  9455  // DIVSS performs "Divide Scalar Single-Precision Floating-Point Values".
  9456  //
  9457  // Mnemonic        : DIVSS
  9458  // Supported forms : (2 forms)
  9459  //
  9460  //    * DIVSS xmm, xmm    [SSE]
  9461  //    * DIVSS m32, xmm    [SSE]
  9462  //
  9463  func (self *Program) DIVSS(v0 interface{}, v1 interface{}) *Instruction {
  9464      p := self.alloc("DIVSS", 2, Operands { v0, v1 })
  9465      // DIVSS xmm, xmm
  9466      if isXMM(v0) && isXMM(v1) {
  9467          self.require(ISA_SSE)
  9468          p.domain = DomainMMXSSE
  9469          p.add(0, func(m *_Encoding, v []interface{}) {
  9470              m.emit(0xf3)
  9471              m.rexo(hcode(v[1]), v[0], false)
  9472              m.emit(0x0f)
  9473              m.emit(0x5e)
  9474              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  9475          })
  9476      }
  9477      // DIVSS m32, xmm
  9478      if isM32(v0) && isXMM(v1) {
  9479          self.require(ISA_SSE)
  9480          p.domain = DomainMMXSSE
  9481          p.add(0, func(m *_Encoding, v []interface{}) {
  9482              m.emit(0xf3)
  9483              m.rexo(hcode(v[1]), addr(v[0]), false)
  9484              m.emit(0x0f)
  9485              m.emit(0x5e)
  9486              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  9487          })
  9488      }
  9489      if p.len == 0 {
  9490          panic("invalid operands for DIVSS")
  9491      }
  9492      return p
  9493  }
  9494  
  9495  // DIVW performs "Unsigned Divide".
  9496  //
  9497  // Mnemonic        : DIV
  9498  // Supported forms : (2 forms)
  9499  //
  9500  //    * DIVW r16
  9501  //    * DIVW m16
  9502  //
  9503  func (self *Program) DIVW(v0 interface{}) *Instruction {
  9504      p := self.alloc("DIVW", 1, Operands { v0 })
  9505      // DIVW r16
  9506      if isReg16(v0) {
  9507          p.domain = DomainGeneric
  9508          p.add(0, func(m *_Encoding, v []interface{}) {
  9509              m.emit(0x66)
  9510              m.rexo(0, v[0], false)
  9511              m.emit(0xf7)
  9512              m.emit(0xf0 | lcode(v[0]))
  9513          })
  9514      }
  9515      // DIVW m16
  9516      if isM16(v0) {
  9517          p.domain = DomainGeneric
  9518          p.add(0, func(m *_Encoding, v []interface{}) {
  9519              m.emit(0x66)
  9520              m.rexo(0, addr(v[0]), false)
  9521              m.emit(0xf7)
  9522              m.mrsd(6, addr(v[0]), 1)
  9523          })
  9524      }
  9525      if p.len == 0 {
  9526          panic("invalid operands for DIVW")
  9527      }
  9528      return p
  9529  }
  9530  
  9531  // DPPD performs "Dot Product of Packed Double Precision Floating-Point Values".
  9532  //
  9533  // Mnemonic        : DPPD
  9534  // Supported forms : (2 forms)
  9535  //
  9536  //    * DPPD imm8, xmm, xmm     [SSE4.1]
  9537  //    * DPPD imm8, m128, xmm    [SSE4.1]
  9538  //
  9539  func (self *Program) DPPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  9540      p := self.alloc("DPPD", 3, Operands { v0, v1, v2 })
  9541      // DPPD imm8, xmm, xmm
  9542      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
  9543          self.require(ISA_SSE4_1)
  9544          p.domain = DomainMMXSSE
  9545          p.add(0, func(m *_Encoding, v []interface{}) {
  9546              m.emit(0x66)
  9547              m.rexo(hcode(v[2]), v[1], false)
  9548              m.emit(0x0f)
  9549              m.emit(0x3a)
  9550              m.emit(0x41)
  9551              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  9552              m.imm1(toImmAny(v[0]))
  9553          })
  9554      }
  9555      // DPPD imm8, m128, xmm
  9556      if isImm8(v0) && isM128(v1) && isXMM(v2) {
  9557          self.require(ISA_SSE4_1)
  9558          p.domain = DomainMMXSSE
  9559          p.add(0, func(m *_Encoding, v []interface{}) {
  9560              m.emit(0x66)
  9561              m.rexo(hcode(v[2]), addr(v[1]), false)
  9562              m.emit(0x0f)
  9563              m.emit(0x3a)
  9564              m.emit(0x41)
  9565              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  9566              m.imm1(toImmAny(v[0]))
  9567          })
  9568      }
  9569      if p.len == 0 {
  9570          panic("invalid operands for DPPD")
  9571      }
  9572      return p
  9573  }
  9574  
  9575  // DPPS performs "Dot Product of Packed Single Precision Floating-Point Values".
  9576  //
  9577  // Mnemonic        : DPPS
  9578  // Supported forms : (2 forms)
  9579  //
  9580  //    * DPPS imm8, xmm, xmm     [SSE4.1]
  9581  //    * DPPS imm8, m128, xmm    [SSE4.1]
  9582  //
  9583  func (self *Program) DPPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  9584      p := self.alloc("DPPS", 3, Operands { v0, v1, v2 })
  9585      // DPPS imm8, xmm, xmm
  9586      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
  9587          self.require(ISA_SSE4_1)
  9588          p.domain = DomainMMXSSE
  9589          p.add(0, func(m *_Encoding, v []interface{}) {
  9590              m.emit(0x66)
  9591              m.rexo(hcode(v[2]), v[1], false)
  9592              m.emit(0x0f)
  9593              m.emit(0x3a)
  9594              m.emit(0x40)
  9595              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  9596              m.imm1(toImmAny(v[0]))
  9597          })
  9598      }
  9599      // DPPS imm8, m128, xmm
  9600      if isImm8(v0) && isM128(v1) && isXMM(v2) {
  9601          self.require(ISA_SSE4_1)
  9602          p.domain = DomainMMXSSE
  9603          p.add(0, func(m *_Encoding, v []interface{}) {
  9604              m.emit(0x66)
  9605              m.rexo(hcode(v[2]), addr(v[1]), false)
  9606              m.emit(0x0f)
  9607              m.emit(0x3a)
  9608              m.emit(0x40)
  9609              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  9610              m.imm1(toImmAny(v[0]))
  9611          })
  9612      }
  9613      if p.len == 0 {
  9614          panic("invalid operands for DPPS")
  9615      }
  9616      return p
  9617  }
  9618  
  9619  // EMMS performs "Exit MMX State".
  9620  //
  9621  // Mnemonic        : EMMS
  9622  // Supported forms : (1 form)
  9623  //
  9624  //    * EMMS    [MMX]
  9625  //
  9626  func (self *Program) EMMS() *Instruction {
  9627      p := self.alloc("EMMS", 0, Operands {  })
  9628      // EMMS
  9629      self.require(ISA_MMX)
  9630      p.domain = DomainMMXSSE
  9631      p.add(0, func(m *_Encoding, v []interface{}) {
  9632          m.emit(0x0f)
  9633          m.emit(0x77)
  9634      })
  9635      return p
  9636  }
  9637  
  9638  // EXTRACTPS performs "Extract Packed Single Precision Floating-Point Value".
  9639  //
  9640  // Mnemonic        : EXTRACTPS
  9641  // Supported forms : (2 forms)
  9642  //
  9643  //    * EXTRACTPS imm8, xmm, r32    [SSE4.1]
  9644  //    * EXTRACTPS imm8, xmm, m32    [SSE4.1]
  9645  //
  9646  func (self *Program) EXTRACTPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  9647      p := self.alloc("EXTRACTPS", 3, Operands { v0, v1, v2 })
  9648      // EXTRACTPS imm8, xmm, r32
  9649      if isImm8(v0) && isXMM(v1) && isReg32(v2) {
  9650          self.require(ISA_SSE4_1)
  9651          p.domain = DomainMMXSSE
  9652          p.add(0, func(m *_Encoding, v []interface{}) {
  9653              m.emit(0x66)
  9654              m.rexo(hcode(v[1]), v[2], false)
  9655              m.emit(0x0f)
  9656              m.emit(0x3a)
  9657              m.emit(0x17)
  9658              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
  9659              m.imm1(toImmAny(v[0]))
  9660          })
  9661      }
  9662      // EXTRACTPS imm8, xmm, m32
  9663      if isImm8(v0) && isXMM(v1) && isM32(v2) {
  9664          self.require(ISA_SSE4_1)
  9665          p.domain = DomainMMXSSE
  9666          p.add(0, func(m *_Encoding, v []interface{}) {
  9667              m.emit(0x66)
  9668              m.rexo(hcode(v[1]), addr(v[2]), false)
  9669              m.emit(0x0f)
  9670              m.emit(0x3a)
  9671              m.emit(0x17)
  9672              m.mrsd(lcode(v[1]), addr(v[2]), 1)
  9673              m.imm1(toImmAny(v[0]))
  9674          })
  9675      }
  9676      if p.len == 0 {
  9677          panic("invalid operands for EXTRACTPS")
  9678      }
  9679      return p
  9680  }
  9681  
  9682  // EXTRQ performs "Extract Field".
  9683  //
  9684  // Mnemonic        : EXTRQ
  9685  // Supported forms : (2 forms)
  9686  //
  9687  //    * EXTRQ xmm, xmm           [SSE4A]
  9688  //    * EXTRQ imm8, imm8, xmm    [SSE4A]
  9689  //
  9690  func (self *Program) EXTRQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
  9691      var p *Instruction
  9692      switch len(vv) {
  9693          case 0  : p = self.alloc("EXTRQ", 2, Operands { v0, v1 })
  9694          case 1  : p = self.alloc("EXTRQ", 3, Operands { v0, v1, vv[0] })
  9695          default : panic("instruction EXTRQ takes 2 or 3 operands")
  9696      }
  9697      // EXTRQ xmm, xmm
  9698      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
  9699          self.require(ISA_SSE4A)
  9700          p.domain = DomainAMDSpecific
  9701          p.add(0, func(m *_Encoding, v []interface{}) {
  9702              m.emit(0x66)
  9703              m.rexo(hcode(v[1]), v[0], false)
  9704              m.emit(0x0f)
  9705              m.emit(0x79)
  9706              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  9707          })
  9708      }
  9709      // EXTRQ imm8, imm8, xmm
  9710      if len(vv) == 1 && isImm8(v0) && isImm8(v1) && isXMM(vv[0]) {
  9711          self.require(ISA_SSE4A)
  9712          p.domain = DomainAMDSpecific
  9713          p.add(0, func(m *_Encoding, v []interface{}) {
  9714              m.emit(0x66)
  9715              m.rexo(0, v[2], false)
  9716              m.emit(0x0f)
  9717              m.emit(0x78)
  9718              m.emit(0xc0 | lcode(v[2]))
  9719              m.imm1(toImmAny(v[1]))
  9720              m.imm1(toImmAny(v[0]))
  9721          })
  9722      }
  9723      if p.len == 0 {
  9724          panic("invalid operands for EXTRQ")
  9725      }
  9726      return p
  9727  }
  9728  
  9729  // FEMMS performs "Fast Exit Multimedia State".
  9730  //
  9731  // Mnemonic        : FEMMS
  9732  // Supported forms : (1 form)
  9733  //
  9734  //    * FEMMS    [FEMMS]
  9735  //
  9736  func (self *Program) FEMMS() *Instruction {
  9737      p := self.alloc("FEMMS", 0, Operands {  })
  9738      // FEMMS
  9739      self.require(ISA_FEMMS)
  9740      p.domain = DomainAMDSpecific
  9741      p.add(0, func(m *_Encoding, v []interface{}) {
  9742          m.emit(0x0f)
  9743          m.emit(0x0e)
  9744      })
  9745      return p
  9746  }
  9747  
  9748  // HADDPD performs "Packed Double-FP Horizontal Add".
  9749  //
  9750  // Mnemonic        : HADDPD
  9751  // Supported forms : (2 forms)
  9752  //
  9753  //    * HADDPD xmm, xmm     [SSE3]
  9754  //    * HADDPD m128, xmm    [SSE3]
  9755  //
  9756  func (self *Program) HADDPD(v0 interface{}, v1 interface{}) *Instruction {
  9757      p := self.alloc("HADDPD", 2, Operands { v0, v1 })
  9758      // HADDPD xmm, xmm
  9759      if isXMM(v0) && isXMM(v1) {
  9760          self.require(ISA_SSE3)
  9761          p.domain = DomainMMXSSE
  9762          p.add(0, func(m *_Encoding, v []interface{}) {
  9763              m.emit(0x66)
  9764              m.rexo(hcode(v[1]), v[0], false)
  9765              m.emit(0x0f)
  9766              m.emit(0x7c)
  9767              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  9768          })
  9769      }
  9770      // HADDPD m128, xmm
  9771      if isM128(v0) && isXMM(v1) {
  9772          self.require(ISA_SSE3)
  9773          p.domain = DomainMMXSSE
  9774          p.add(0, func(m *_Encoding, v []interface{}) {
  9775              m.emit(0x66)
  9776              m.rexo(hcode(v[1]), addr(v[0]), false)
  9777              m.emit(0x0f)
  9778              m.emit(0x7c)
  9779              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  9780          })
  9781      }
  9782      if p.len == 0 {
  9783          panic("invalid operands for HADDPD")
  9784      }
  9785      return p
  9786  }
  9787  
  9788  // HADDPS performs "Packed Single-FP Horizontal Add".
  9789  //
  9790  // Mnemonic        : HADDPS
  9791  // Supported forms : (2 forms)
  9792  //
  9793  //    * HADDPS xmm, xmm     [SSE3]
  9794  //    * HADDPS m128, xmm    [SSE3]
  9795  //
  9796  func (self *Program) HADDPS(v0 interface{}, v1 interface{}) *Instruction {
  9797      p := self.alloc("HADDPS", 2, Operands { v0, v1 })
  9798      // HADDPS xmm, xmm
  9799      if isXMM(v0) && isXMM(v1) {
  9800          self.require(ISA_SSE3)
  9801          p.domain = DomainMMXSSE
  9802          p.add(0, func(m *_Encoding, v []interface{}) {
  9803              m.emit(0xf2)
  9804              m.rexo(hcode(v[1]), v[0], false)
  9805              m.emit(0x0f)
  9806              m.emit(0x7c)
  9807              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  9808          })
  9809      }
  9810      // HADDPS m128, xmm
  9811      if isM128(v0) && isXMM(v1) {
  9812          self.require(ISA_SSE3)
  9813          p.domain = DomainMMXSSE
  9814          p.add(0, func(m *_Encoding, v []interface{}) {
  9815              m.emit(0xf2)
  9816              m.rexo(hcode(v[1]), addr(v[0]), false)
  9817              m.emit(0x0f)
  9818              m.emit(0x7c)
  9819              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  9820          })
  9821      }
  9822      if p.len == 0 {
  9823          panic("invalid operands for HADDPS")
  9824      }
  9825      return p
  9826  }
  9827  
  9828  // HSUBPD performs "Packed Double-FP Horizontal Subtract".
  9829  //
  9830  // Mnemonic        : HSUBPD
  9831  // Supported forms : (2 forms)
  9832  //
  9833  //    * HSUBPD xmm, xmm     [SSE3]
  9834  //    * HSUBPD m128, xmm    [SSE3]
  9835  //
  9836  func (self *Program) HSUBPD(v0 interface{}, v1 interface{}) *Instruction {
  9837      p := self.alloc("HSUBPD", 2, Operands { v0, v1 })
  9838      // HSUBPD xmm, xmm
  9839      if isXMM(v0) && isXMM(v1) {
  9840          self.require(ISA_SSE3)
  9841          p.domain = DomainMMXSSE
  9842          p.add(0, func(m *_Encoding, v []interface{}) {
  9843              m.emit(0x66)
  9844              m.rexo(hcode(v[1]), v[0], false)
  9845              m.emit(0x0f)
  9846              m.emit(0x7d)
  9847              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  9848          })
  9849      }
  9850      // HSUBPD m128, xmm
  9851      if isM128(v0) && isXMM(v1) {
  9852          self.require(ISA_SSE3)
  9853          p.domain = DomainMMXSSE
  9854          p.add(0, func(m *_Encoding, v []interface{}) {
  9855              m.emit(0x66)
  9856              m.rexo(hcode(v[1]), addr(v[0]), false)
  9857              m.emit(0x0f)
  9858              m.emit(0x7d)
  9859              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  9860          })
  9861      }
  9862      if p.len == 0 {
  9863          panic("invalid operands for HSUBPD")
  9864      }
  9865      return p
  9866  }
  9867  
  9868  // HSUBPS performs "Packed Single-FP Horizontal Subtract".
  9869  //
  9870  // Mnemonic        : HSUBPS
  9871  // Supported forms : (2 forms)
  9872  //
  9873  //    * HSUBPS xmm, xmm     [SSE3]
  9874  //    * HSUBPS m128, xmm    [SSE3]
  9875  //
  9876  func (self *Program) HSUBPS(v0 interface{}, v1 interface{}) *Instruction {
  9877      p := self.alloc("HSUBPS", 2, Operands { v0, v1 })
  9878      // HSUBPS xmm, xmm
  9879      if isXMM(v0) && isXMM(v1) {
  9880          self.require(ISA_SSE3)
  9881          p.domain = DomainMMXSSE
  9882          p.add(0, func(m *_Encoding, v []interface{}) {
  9883              m.emit(0xf2)
  9884              m.rexo(hcode(v[1]), v[0], false)
  9885              m.emit(0x0f)
  9886              m.emit(0x7d)
  9887              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  9888          })
  9889      }
  9890      // HSUBPS m128, xmm
  9891      if isM128(v0) && isXMM(v1) {
  9892          self.require(ISA_SSE3)
  9893          p.domain = DomainMMXSSE
  9894          p.add(0, func(m *_Encoding, v []interface{}) {
  9895              m.emit(0xf2)
  9896              m.rexo(hcode(v[1]), addr(v[0]), false)
  9897              m.emit(0x0f)
  9898              m.emit(0x7d)
  9899              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  9900          })
  9901      }
  9902      if p.len == 0 {
  9903          panic("invalid operands for HSUBPS")
  9904      }
  9905      return p
  9906  }
  9907  
  9908  // IDIVB performs "Signed Divide".
  9909  //
  9910  // Mnemonic        : IDIV
  9911  // Supported forms : (2 forms)
  9912  //
  9913  //    * IDIVB r8
  9914  //    * IDIVB m8
  9915  //
  9916  func (self *Program) IDIVB(v0 interface{}) *Instruction {
  9917      p := self.alloc("IDIVB", 1, Operands { v0 })
  9918      // IDIVB r8
  9919      if isReg8(v0) {
  9920          p.domain = DomainGeneric
  9921          p.add(0, func(m *_Encoding, v []interface{}) {
  9922              m.rexo(0, v[0], isReg8REX(v[0]))
  9923              m.emit(0xf6)
  9924              m.emit(0xf8 | lcode(v[0]))
  9925          })
  9926      }
  9927      // IDIVB m8
  9928      if isM8(v0) {
  9929          p.domain = DomainGeneric
  9930          p.add(0, func(m *_Encoding, v []interface{}) {
  9931              m.rexo(0, addr(v[0]), false)
  9932              m.emit(0xf6)
  9933              m.mrsd(7, addr(v[0]), 1)
  9934          })
  9935      }
  9936      if p.len == 0 {
  9937          panic("invalid operands for IDIVB")
  9938      }
  9939      return p
  9940  }
  9941  
  9942  // IDIVL performs "Signed Divide".
  9943  //
  9944  // Mnemonic        : IDIV
  9945  // Supported forms : (2 forms)
  9946  //
  9947  //    * IDIVL r32
  9948  //    * IDIVL m32
  9949  //
  9950  func (self *Program) IDIVL(v0 interface{}) *Instruction {
  9951      p := self.alloc("IDIVL", 1, Operands { v0 })
  9952      // IDIVL r32
  9953      if isReg32(v0) {
  9954          p.domain = DomainGeneric
  9955          p.add(0, func(m *_Encoding, v []interface{}) {
  9956              m.rexo(0, v[0], false)
  9957              m.emit(0xf7)
  9958              m.emit(0xf8 | lcode(v[0]))
  9959          })
  9960      }
  9961      // IDIVL m32
  9962      if isM32(v0) {
  9963          p.domain = DomainGeneric
  9964          p.add(0, func(m *_Encoding, v []interface{}) {
  9965              m.rexo(0, addr(v[0]), false)
  9966              m.emit(0xf7)
  9967              m.mrsd(7, addr(v[0]), 1)
  9968          })
  9969      }
  9970      if p.len == 0 {
  9971          panic("invalid operands for IDIVL")
  9972      }
  9973      return p
  9974  }
  9975  
  9976  // IDIVQ performs "Signed Divide".
  9977  //
  9978  // Mnemonic        : IDIV
  9979  // Supported forms : (2 forms)
  9980  //
  9981  //    * IDIVQ r64
  9982  //    * IDIVQ m64
  9983  //
  9984  func (self *Program) IDIVQ(v0 interface{}) *Instruction {
  9985      p := self.alloc("IDIVQ", 1, Operands { v0 })
  9986      // IDIVQ r64
  9987      if isReg64(v0) {
  9988          p.domain = DomainGeneric
  9989          p.add(0, func(m *_Encoding, v []interface{}) {
  9990              m.emit(0x48 | hcode(v[0]))
  9991              m.emit(0xf7)
  9992              m.emit(0xf8 | lcode(v[0]))
  9993          })
  9994      }
  9995      // IDIVQ m64
  9996      if isM64(v0) {
  9997          p.domain = DomainGeneric
  9998          p.add(0, func(m *_Encoding, v []interface{}) {
  9999              m.rexm(1, 0, addr(v[0]))
 10000              m.emit(0xf7)
 10001              m.mrsd(7, addr(v[0]), 1)
 10002          })
 10003      }
 10004      if p.len == 0 {
 10005          panic("invalid operands for IDIVQ")
 10006      }
 10007      return p
 10008  }
 10009  
 10010  // IDIVW performs "Signed Divide".
 10011  //
 10012  // Mnemonic        : IDIV
 10013  // Supported forms : (2 forms)
 10014  //
 10015  //    * IDIVW r16
 10016  //    * IDIVW m16
 10017  //
 10018  func (self *Program) IDIVW(v0 interface{}) *Instruction {
 10019      p := self.alloc("IDIVW", 1, Operands { v0 })
 10020      // IDIVW r16
 10021      if isReg16(v0) {
 10022          p.domain = DomainGeneric
 10023          p.add(0, func(m *_Encoding, v []interface{}) {
 10024              m.emit(0x66)
 10025              m.rexo(0, v[0], false)
 10026              m.emit(0xf7)
 10027              m.emit(0xf8 | lcode(v[0]))
 10028          })
 10029      }
 10030      // IDIVW m16
 10031      if isM16(v0) {
 10032          p.domain = DomainGeneric
 10033          p.add(0, func(m *_Encoding, v []interface{}) {
 10034              m.emit(0x66)
 10035              m.rexo(0, addr(v[0]), false)
 10036              m.emit(0xf7)
 10037              m.mrsd(7, addr(v[0]), 1)
 10038          })
 10039      }
 10040      if p.len == 0 {
 10041          panic("invalid operands for IDIVW")
 10042      }
 10043      return p
 10044  }
 10045  
 10046  // IMULB performs "Signed Multiply".
 10047  //
 10048  // Mnemonic        : IMUL
 10049  // Supported forms : (2 forms)
 10050  //
 10051  //    * IMULB r8
 10052  //    * IMULB m8
 10053  //
 10054  func (self *Program) IMULB(v0 interface{}) *Instruction {
 10055      p := self.alloc("IMULB", 1, Operands { v0 })
 10056      // IMULB r8
 10057      if isReg8(v0) {
 10058          p.domain = DomainGeneric
 10059          p.add(0, func(m *_Encoding, v []interface{}) {
 10060              m.rexo(0, v[0], isReg8REX(v[0]))
 10061              m.emit(0xf6)
 10062              m.emit(0xe8 | lcode(v[0]))
 10063          })
 10064      }
 10065      // IMULB m8
 10066      if isM8(v0) {
 10067          p.domain = DomainGeneric
 10068          p.add(0, func(m *_Encoding, v []interface{}) {
 10069              m.rexo(0, addr(v[0]), false)
 10070              m.emit(0xf6)
 10071              m.mrsd(5, addr(v[0]), 1)
 10072          })
 10073      }
 10074      if p.len == 0 {
 10075          panic("invalid operands for IMULB")
 10076      }
 10077      return p
 10078  }
 10079  
 10080  // IMULL performs "Signed Multiply".
 10081  //
 10082  // Mnemonic        : IMUL
 10083  // Supported forms : (8 forms)
 10084  //
 10085  //    * IMULL r32
 10086  //    * IMULL m32
 10087  //    * IMULL r32, r32
 10088  //    * IMULL m32, r32
 10089  //    * IMULL imm8, r32, r32
 10090  //    * IMULL imm32, r32, r32
 10091  //    * IMULL imm8, m32, r32
 10092  //    * IMULL imm32, m32, r32
 10093  //
 10094  func (self *Program) IMULL(v0 interface{}, vv ...interface{}) *Instruction {
 10095      var p *Instruction
 10096      switch len(vv) {
 10097          case 0  : p = self.alloc("IMULL", 1, Operands { v0 })
 10098          case 1  : p = self.alloc("IMULL", 2, Operands { v0, vv[0] })
 10099          case 2  : p = self.alloc("IMULL", 3, Operands { v0, vv[0], vv[1] })
 10100          default : panic("instruction IMULL takes 1 or 2 or 3 operands")
 10101      }
 10102      // IMULL r32
 10103      if len(vv) == 0 && isReg32(v0) {
 10104          p.domain = DomainGeneric
 10105          p.add(0, func(m *_Encoding, v []interface{}) {
 10106              m.rexo(0, v[0], false)
 10107              m.emit(0xf7)
 10108              m.emit(0xe8 | lcode(v[0]))
 10109          })
 10110      }
 10111      // IMULL m32
 10112      if len(vv) == 0 && isM32(v0) {
 10113          p.domain = DomainGeneric
 10114          p.add(0, func(m *_Encoding, v []interface{}) {
 10115              m.rexo(0, addr(v[0]), false)
 10116              m.emit(0xf7)
 10117              m.mrsd(5, addr(v[0]), 1)
 10118          })
 10119      }
 10120      // IMULL r32, r32
 10121      if len(vv) == 1 && isReg32(v0) && isReg32(vv[0]) {
 10122          p.domain = DomainGeneric
 10123          p.add(0, func(m *_Encoding, v []interface{}) {
 10124              m.rexo(hcode(v[1]), v[0], false)
 10125              m.emit(0x0f)
 10126              m.emit(0xaf)
 10127              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 10128          })
 10129      }
 10130      // IMULL m32, r32
 10131      if len(vv) == 1 && isM32(v0) && isReg32(vv[0]) {
 10132          p.domain = DomainGeneric
 10133          p.add(0, func(m *_Encoding, v []interface{}) {
 10134              m.rexo(hcode(v[1]), addr(v[0]), false)
 10135              m.emit(0x0f)
 10136              m.emit(0xaf)
 10137              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 10138          })
 10139      }
 10140      // IMULL imm8, r32, r32
 10141      if len(vv) == 2 && isImm8(v0) && isReg32(vv[0]) && isReg32(vv[1]) {
 10142          p.domain = DomainGeneric
 10143          p.add(0, func(m *_Encoding, v []interface{}) {
 10144              m.rexo(hcode(v[2]), v[1], false)
 10145              m.emit(0x6b)
 10146              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 10147              m.imm1(toImmAny(v[0]))
 10148          })
 10149      }
 10150      // IMULL imm32, r32, r32
 10151      if len(vv) == 2 && isImm32(v0) && isReg32(vv[0]) && isReg32(vv[1]) {
 10152          p.domain = DomainGeneric
 10153          p.add(0, func(m *_Encoding, v []interface{}) {
 10154              m.rexo(hcode(v[2]), v[1], false)
 10155              m.emit(0x69)
 10156              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 10157              m.imm4(toImmAny(v[0]))
 10158          })
 10159      }
 10160      // IMULL imm8, m32, r32
 10161      if len(vv) == 2 && isImm8(v0) && isM32(vv[0]) && isReg32(vv[1]) {
 10162          p.domain = DomainGeneric
 10163          p.add(0, func(m *_Encoding, v []interface{}) {
 10164              m.rexo(hcode(v[2]), addr(v[1]), false)
 10165              m.emit(0x6b)
 10166              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 10167              m.imm1(toImmAny(v[0]))
 10168          })
 10169      }
 10170      // IMULL imm32, m32, r32
 10171      if len(vv) == 2 && isImm32(v0) && isM32(vv[0]) && isReg32(vv[1]) {
 10172          p.domain = DomainGeneric
 10173          p.add(0, func(m *_Encoding, v []interface{}) {
 10174              m.rexo(hcode(v[2]), addr(v[1]), false)
 10175              m.emit(0x69)
 10176              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 10177              m.imm4(toImmAny(v[0]))
 10178          })
 10179      }
 10180      if p.len == 0 {
 10181          panic("invalid operands for IMULL")
 10182      }
 10183      return p
 10184  }
 10185  
 10186  // IMULQ performs "Signed Multiply".
 10187  //
 10188  // Mnemonic        : IMUL
 10189  // Supported forms : (8 forms)
 10190  //
 10191  //    * IMULQ r64
 10192  //    * IMULQ m64
 10193  //    * IMULQ r64, r64
 10194  //    * IMULQ m64, r64
 10195  //    * IMULQ imm8, r64, r64
 10196  //    * IMULQ imm32, r64, r64
 10197  //    * IMULQ imm8, m64, r64
 10198  //    * IMULQ imm32, m64, r64
 10199  //
 10200  func (self *Program) IMULQ(v0 interface{}, vv ...interface{}) *Instruction {
 10201      var p *Instruction
 10202      switch len(vv) {
 10203          case 0  : p = self.alloc("IMULQ", 1, Operands { v0 })
 10204          case 1  : p = self.alloc("IMULQ", 2, Operands { v0, vv[0] })
 10205          case 2  : p = self.alloc("IMULQ", 3, Operands { v0, vv[0], vv[1] })
 10206          default : panic("instruction IMULQ takes 1 or 2 or 3 operands")
 10207      }
 10208      // IMULQ r64
 10209      if len(vv) == 0 && isReg64(v0) {
 10210          p.domain = DomainGeneric
 10211          p.add(0, func(m *_Encoding, v []interface{}) {
 10212              m.emit(0x48 | hcode(v[0]))
 10213              m.emit(0xf7)
 10214              m.emit(0xe8 | lcode(v[0]))
 10215          })
 10216      }
 10217      // IMULQ m64
 10218      if len(vv) == 0 && isM64(v0) {
 10219          p.domain = DomainGeneric
 10220          p.add(0, func(m *_Encoding, v []interface{}) {
 10221              m.rexm(1, 0, addr(v[0]))
 10222              m.emit(0xf7)
 10223              m.mrsd(5, addr(v[0]), 1)
 10224          })
 10225      }
 10226      // IMULQ r64, r64
 10227      if len(vv) == 1 && isReg64(v0) && isReg64(vv[0]) {
 10228          p.domain = DomainGeneric
 10229          p.add(0, func(m *_Encoding, v []interface{}) {
 10230              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 10231              m.emit(0x0f)
 10232              m.emit(0xaf)
 10233              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 10234          })
 10235      }
 10236      // IMULQ m64, r64
 10237      if len(vv) == 1 && isM64(v0) && isReg64(vv[0]) {
 10238          p.domain = DomainGeneric
 10239          p.add(0, func(m *_Encoding, v []interface{}) {
 10240              m.rexm(1, hcode(v[1]), addr(v[0]))
 10241              m.emit(0x0f)
 10242              m.emit(0xaf)
 10243              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 10244          })
 10245      }
 10246      // IMULQ imm8, r64, r64
 10247      if len(vv) == 2 && isImm8(v0) && isReg64(vv[0]) && isReg64(vv[1]) {
 10248          p.domain = DomainGeneric
 10249          p.add(0, func(m *_Encoding, v []interface{}) {
 10250              m.emit(0x48 | hcode(v[2]) << 2 | hcode(v[1]))
 10251              m.emit(0x6b)
 10252              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 10253              m.imm1(toImmAny(v[0]))
 10254          })
 10255      }
 10256      // IMULQ imm32, r64, r64
 10257      if len(vv) == 2 && isImm32(v0) && isReg64(vv[0]) && isReg64(vv[1]) {
 10258          p.domain = DomainGeneric
 10259          p.add(0, func(m *_Encoding, v []interface{}) {
 10260              m.emit(0x48 | hcode(v[2]) << 2 | hcode(v[1]))
 10261              m.emit(0x69)
 10262              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 10263              m.imm4(toImmAny(v[0]))
 10264          })
 10265      }
 10266      // IMULQ imm8, m64, r64
 10267      if len(vv) == 2 && isImm8(v0) && isM64(vv[0]) && isReg64(vv[1]) {
 10268          p.domain = DomainGeneric
 10269          p.add(0, func(m *_Encoding, v []interface{}) {
 10270              m.rexm(1, hcode(v[2]), addr(v[1]))
 10271              m.emit(0x6b)
 10272              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 10273              m.imm1(toImmAny(v[0]))
 10274          })
 10275      }
 10276      // IMULQ imm32, m64, r64
 10277      if len(vv) == 2 && isImm32(v0) && isM64(vv[0]) && isReg64(vv[1]) {
 10278          p.domain = DomainGeneric
 10279          p.add(0, func(m *_Encoding, v []interface{}) {
 10280              m.rexm(1, hcode(v[2]), addr(v[1]))
 10281              m.emit(0x69)
 10282              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 10283              m.imm4(toImmAny(v[0]))
 10284          })
 10285      }
 10286      if p.len == 0 {
 10287          panic("invalid operands for IMULQ")
 10288      }
 10289      return p
 10290  }
 10291  
 10292  // IMULW performs "Signed Multiply".
 10293  //
 10294  // Mnemonic        : IMUL
 10295  // Supported forms : (8 forms)
 10296  //
 10297  //    * IMULW r16
 10298  //    * IMULW m16
 10299  //    * IMULW r16, r16
 10300  //    * IMULW m16, r16
 10301  //    * IMULW imm8, r16, r16
 10302  //    * IMULW imm16, r16, r16
 10303  //    * IMULW imm8, m16, r16
 10304  //    * IMULW imm16, m16, r16
 10305  //
 10306  func (self *Program) IMULW(v0 interface{}, vv ...interface{}) *Instruction {
 10307      var p *Instruction
 10308      switch len(vv) {
 10309          case 0  : p = self.alloc("IMULW", 1, Operands { v0 })
 10310          case 1  : p = self.alloc("IMULW", 2, Operands { v0, vv[0] })
 10311          case 2  : p = self.alloc("IMULW", 3, Operands { v0, vv[0], vv[1] })
 10312          default : panic("instruction IMULW takes 1 or 2 or 3 operands")
 10313      }
 10314      // IMULW r16
 10315      if len(vv) == 0 && isReg16(v0) {
 10316          p.domain = DomainGeneric
 10317          p.add(0, func(m *_Encoding, v []interface{}) {
 10318              m.emit(0x66)
 10319              m.rexo(0, v[0], false)
 10320              m.emit(0xf7)
 10321              m.emit(0xe8 | lcode(v[0]))
 10322          })
 10323      }
 10324      // IMULW m16
 10325      if len(vv) == 0 && isM16(v0) {
 10326          p.domain = DomainGeneric
 10327          p.add(0, func(m *_Encoding, v []interface{}) {
 10328              m.emit(0x66)
 10329              m.rexo(0, addr(v[0]), false)
 10330              m.emit(0xf7)
 10331              m.mrsd(5, addr(v[0]), 1)
 10332          })
 10333      }
 10334      // IMULW r16, r16
 10335      if len(vv) == 1 && isReg16(v0) && isReg16(vv[0]) {
 10336          p.domain = DomainGeneric
 10337          p.add(0, func(m *_Encoding, v []interface{}) {
 10338              m.emit(0x66)
 10339              m.rexo(hcode(v[1]), v[0], false)
 10340              m.emit(0x0f)
 10341              m.emit(0xaf)
 10342              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 10343          })
 10344      }
 10345      // IMULW m16, r16
 10346      if len(vv) == 1 && isM16(v0) && isReg16(vv[0]) {
 10347          p.domain = DomainGeneric
 10348          p.add(0, func(m *_Encoding, v []interface{}) {
 10349              m.emit(0x66)
 10350              m.rexo(hcode(v[1]), addr(v[0]), false)
 10351              m.emit(0x0f)
 10352              m.emit(0xaf)
 10353              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 10354          })
 10355      }
 10356      // IMULW imm8, r16, r16
 10357      if len(vv) == 2 && isImm8(v0) && isReg16(vv[0]) && isReg16(vv[1]) {
 10358          p.domain = DomainGeneric
 10359          p.add(0, func(m *_Encoding, v []interface{}) {
 10360              m.emit(0x66)
 10361              m.rexo(hcode(v[2]), v[1], false)
 10362              m.emit(0x6b)
 10363              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 10364              m.imm1(toImmAny(v[0]))
 10365          })
 10366      }
 10367      // IMULW imm16, r16, r16
 10368      if len(vv) == 2 && isImm16(v0) && isReg16(vv[0]) && isReg16(vv[1]) {
 10369          p.domain = DomainGeneric
 10370          p.add(0, func(m *_Encoding, v []interface{}) {
 10371              m.emit(0x66)
 10372              m.rexo(hcode(v[2]), v[1], false)
 10373              m.emit(0x69)
 10374              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 10375              m.imm2(toImmAny(v[0]))
 10376          })
 10377      }
 10378      // IMULW imm8, m16, r16
 10379      if len(vv) == 2 && isImm8(v0) && isM16(vv[0]) && isReg16(vv[1]) {
 10380          p.domain = DomainGeneric
 10381          p.add(0, func(m *_Encoding, v []interface{}) {
 10382              m.emit(0x66)
 10383              m.rexo(hcode(v[2]), addr(v[1]), false)
 10384              m.emit(0x6b)
 10385              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 10386              m.imm1(toImmAny(v[0]))
 10387          })
 10388      }
 10389      // IMULW imm16, m16, r16
 10390      if len(vv) == 2 && isImm16(v0) && isM16(vv[0]) && isReg16(vv[1]) {
 10391          p.domain = DomainGeneric
 10392          p.add(0, func(m *_Encoding, v []interface{}) {
 10393              m.emit(0x66)
 10394              m.rexo(hcode(v[2]), addr(v[1]), false)
 10395              m.emit(0x69)
 10396              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 10397              m.imm2(toImmAny(v[0]))
 10398          })
 10399      }
 10400      if p.len == 0 {
 10401          panic("invalid operands for IMULW")
 10402      }
 10403      return p
 10404  }
 10405  
 10406  // INCB performs "Increment by 1".
 10407  //
 10408  // Mnemonic        : INC
 10409  // Supported forms : (2 forms)
 10410  //
 10411  //    * INCB r8
 10412  //    * INCB m8
 10413  //
 10414  func (self *Program) INCB(v0 interface{}) *Instruction {
 10415      p := self.alloc("INCB", 1, Operands { v0 })
 10416      // INCB r8
 10417      if isReg8(v0) {
 10418          p.domain = DomainGeneric
 10419          p.add(0, func(m *_Encoding, v []interface{}) {
 10420              m.rexo(0, v[0], isReg8REX(v[0]))
 10421              m.emit(0xfe)
 10422              m.emit(0xc0 | lcode(v[0]))
 10423          })
 10424      }
 10425      // INCB m8
 10426      if isM8(v0) {
 10427          p.domain = DomainGeneric
 10428          p.add(0, func(m *_Encoding, v []interface{}) {
 10429              m.rexo(0, addr(v[0]), false)
 10430              m.emit(0xfe)
 10431              m.mrsd(0, addr(v[0]), 1)
 10432          })
 10433      }
 10434      if p.len == 0 {
 10435          panic("invalid operands for INCB")
 10436      }
 10437      return p
 10438  }
 10439  
 10440  // INCL performs "Increment by 1".
 10441  //
 10442  // Mnemonic        : INC
 10443  // Supported forms : (2 forms)
 10444  //
 10445  //    * INCL r32
 10446  //    * INCL m32
 10447  //
 10448  func (self *Program) INCL(v0 interface{}) *Instruction {
 10449      p := self.alloc("INCL", 1, Operands { v0 })
 10450      // INCL r32
 10451      if isReg32(v0) {
 10452          p.domain = DomainGeneric
 10453          p.add(0, func(m *_Encoding, v []interface{}) {
 10454              m.rexo(0, v[0], false)
 10455              m.emit(0xff)
 10456              m.emit(0xc0 | lcode(v[0]))
 10457          })
 10458      }
 10459      // INCL m32
 10460      if isM32(v0) {
 10461          p.domain = DomainGeneric
 10462          p.add(0, func(m *_Encoding, v []interface{}) {
 10463              m.rexo(0, addr(v[0]), false)
 10464              m.emit(0xff)
 10465              m.mrsd(0, addr(v[0]), 1)
 10466          })
 10467      }
 10468      if p.len == 0 {
 10469          panic("invalid operands for INCL")
 10470      }
 10471      return p
 10472  }
 10473  
 10474  // INCQ performs "Increment by 1".
 10475  //
 10476  // Mnemonic        : INC
 10477  // Supported forms : (2 forms)
 10478  //
 10479  //    * INCQ r64
 10480  //    * INCQ m64
 10481  //
 10482  func (self *Program) INCQ(v0 interface{}) *Instruction {
 10483      p := self.alloc("INCQ", 1, Operands { v0 })
 10484      // INCQ r64
 10485      if isReg64(v0) {
 10486          p.domain = DomainGeneric
 10487          p.add(0, func(m *_Encoding, v []interface{}) {
 10488              m.emit(0x48 | hcode(v[0]))
 10489              m.emit(0xff)
 10490              m.emit(0xc0 | lcode(v[0]))
 10491          })
 10492      }
 10493      // INCQ m64
 10494      if isM64(v0) {
 10495          p.domain = DomainGeneric
 10496          p.add(0, func(m *_Encoding, v []interface{}) {
 10497              m.rexm(1, 0, addr(v[0]))
 10498              m.emit(0xff)
 10499              m.mrsd(0, addr(v[0]), 1)
 10500          })
 10501      }
 10502      if p.len == 0 {
 10503          panic("invalid operands for INCQ")
 10504      }
 10505      return p
 10506  }
 10507  
 10508  // INCW performs "Increment by 1".
 10509  //
 10510  // Mnemonic        : INC
 10511  // Supported forms : (2 forms)
 10512  //
 10513  //    * INCW r16
 10514  //    * INCW m16
 10515  //
 10516  func (self *Program) INCW(v0 interface{}) *Instruction {
 10517      p := self.alloc("INCW", 1, Operands { v0 })
 10518      // INCW r16
 10519      if isReg16(v0) {
 10520          p.domain = DomainGeneric
 10521          p.add(0, func(m *_Encoding, v []interface{}) {
 10522              m.emit(0x66)
 10523              m.rexo(0, v[0], false)
 10524              m.emit(0xff)
 10525              m.emit(0xc0 | lcode(v[0]))
 10526          })
 10527      }
 10528      // INCW m16
 10529      if isM16(v0) {
 10530          p.domain = DomainGeneric
 10531          p.add(0, func(m *_Encoding, v []interface{}) {
 10532              m.emit(0x66)
 10533              m.rexo(0, addr(v[0]), false)
 10534              m.emit(0xff)
 10535              m.mrsd(0, addr(v[0]), 1)
 10536          })
 10537      }
 10538      if p.len == 0 {
 10539          panic("invalid operands for INCW")
 10540      }
 10541      return p
 10542  }
 10543  
 10544  // INSERTPS performs "Insert Packed Single Precision Floating-Point Value".
 10545  //
 10546  // Mnemonic        : INSERTPS
 10547  // Supported forms : (2 forms)
 10548  //
 10549  //    * INSERTPS imm8, xmm, xmm    [SSE4.1]
 10550  //    * INSERTPS imm8, m32, xmm    [SSE4.1]
 10551  //
 10552  func (self *Program) INSERTPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 10553      p := self.alloc("INSERTPS", 3, Operands { v0, v1, v2 })
 10554      // INSERTPS imm8, xmm, xmm
 10555      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 10556          self.require(ISA_SSE4_1)
 10557          p.domain = DomainMMXSSE
 10558          p.add(0, func(m *_Encoding, v []interface{}) {
 10559              m.emit(0x66)
 10560              m.rexo(hcode(v[2]), v[1], false)
 10561              m.emit(0x0f)
 10562              m.emit(0x3a)
 10563              m.emit(0x21)
 10564              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 10565              m.imm1(toImmAny(v[0]))
 10566          })
 10567      }
 10568      // INSERTPS imm8, m32, xmm
 10569      if isImm8(v0) && isM32(v1) && isXMM(v2) {
 10570          self.require(ISA_SSE4_1)
 10571          p.domain = DomainMMXSSE
 10572          p.add(0, func(m *_Encoding, v []interface{}) {
 10573              m.emit(0x66)
 10574              m.rexo(hcode(v[2]), addr(v[1]), false)
 10575              m.emit(0x0f)
 10576              m.emit(0x3a)
 10577              m.emit(0x21)
 10578              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 10579              m.imm1(toImmAny(v[0]))
 10580          })
 10581      }
 10582      if p.len == 0 {
 10583          panic("invalid operands for INSERTPS")
 10584      }
 10585      return p
 10586  }
 10587  
 10588  // INSERTQ performs "Insert Field".
 10589  //
 10590  // Mnemonic        : INSERTQ
 10591  // Supported forms : (2 forms)
 10592  //
 10593  //    * INSERTQ xmm, xmm                [SSE4A]
 10594  //    * INSERTQ imm8, imm8, xmm, xmm    [SSE4A]
 10595  //
 10596  func (self *Program) INSERTQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 10597      var p *Instruction
 10598      switch len(vv) {
 10599          case 0  : p = self.alloc("INSERTQ", 2, Operands { v0, v1 })
 10600          case 2  : p = self.alloc("INSERTQ", 4, Operands { v0, v1, vv[0], vv[1] })
 10601          default : panic("instruction INSERTQ takes 2 or 4 operands")
 10602      }
 10603      // INSERTQ xmm, xmm
 10604      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 10605          self.require(ISA_SSE4A)
 10606          p.domain = DomainAMDSpecific
 10607          p.add(0, func(m *_Encoding, v []interface{}) {
 10608              m.emit(0xf2)
 10609              m.rexo(hcode(v[1]), v[0], false)
 10610              m.emit(0x0f)
 10611              m.emit(0x79)
 10612              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 10613          })
 10614      }
 10615      // INSERTQ imm8, imm8, xmm, xmm
 10616      if len(vv) == 2 && isImm8(v0) && isImm8(v1) && isXMM(vv[0]) && isXMM(vv[1]) {
 10617          self.require(ISA_SSE4A)
 10618          p.domain = DomainAMDSpecific
 10619          p.add(0, func(m *_Encoding, v []interface{}) {
 10620              m.emit(0xf2)
 10621              m.rexo(hcode(v[3]), v[2], false)
 10622              m.emit(0x0f)
 10623              m.emit(0x78)
 10624              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[2]))
 10625              m.imm1(toImmAny(v[1]))
 10626              m.imm1(toImmAny(v[0]))
 10627          })
 10628      }
 10629      if p.len == 0 {
 10630          panic("invalid operands for INSERTQ")
 10631      }
 10632      return p
 10633  }
 10634  
 10635  // INT performs "Call to Interrupt Procedure".
 10636  //
 10637  // Mnemonic        : INT
 10638  // Supported forms : (2 forms)
 10639  //
 10640  //    * INT 3
 10641  //    * INT imm8
 10642  //
 10643  func (self *Program) INT(v0 interface{}) *Instruction {
 10644      p := self.alloc("INT", 1, Operands { v0 })
 10645      // INT 3
 10646      if isConst3(v0) {
 10647          p.domain = DomainGeneric
 10648          p.add(0, func(m *_Encoding, v []interface{}) {
 10649              m.emit(0xcc)
 10650          })
 10651      }
 10652      // INT imm8
 10653      if isImm8(v0) {
 10654          p.domain = DomainGeneric
 10655          p.add(0, func(m *_Encoding, v []interface{}) {
 10656              m.emit(0xcd)
 10657              m.imm1(toImmAny(v[0]))
 10658          })
 10659      }
 10660      if p.len == 0 {
 10661          panic("invalid operands for INT")
 10662      }
 10663      return p
 10664  }
 10665  
 10666  // JA performs "Jump if above (CF == 0 and ZF == 0)".
 10667  //
 10668  // Mnemonic        : JA
 10669  // Supported forms : (2 forms)
 10670  //
 10671  //    * JA rel8
 10672  //    * JA rel32
 10673  //
 10674  func (self *Program) JA(v0 interface{}) *Instruction {
 10675      p := self.alloc("JA", 1, Operands { v0 })
 10676      p.branch = _B_conditional
 10677      // JA rel8
 10678      if isRel8(v0) {
 10679          p.domain = DomainGeneric
 10680          p.add(0, func(m *_Encoding, v []interface{}) {
 10681              m.emit(0x77)
 10682              m.imm1(relv(v[0]))
 10683          })
 10684      }
 10685      // JA rel32
 10686      if isRel32(v0) {
 10687          p.domain = DomainGeneric
 10688          p.add(0, func(m *_Encoding, v []interface{}) {
 10689              m.emit(0x0f)
 10690              m.emit(0x87)
 10691              m.imm4(relv(v[0]))
 10692          })
 10693      }
 10694      // JA label
 10695      if isLabel(v0) {
 10696          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 10697              m.emit(0x77)
 10698              m.imm1(relv(v[0]))
 10699          })
 10700          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 10701              m.emit(0x0f)
 10702              m.emit(0x87)
 10703              m.imm4(relv(v[0]))
 10704          })
 10705      }
 10706      if p.len == 0 {
 10707          panic("invalid operands for JA")
 10708      }
 10709      return p
 10710  }
 10711  
 10712  // JAE performs "Jump if above or equal (CF == 0)".
 10713  //
 10714  // Mnemonic        : JAE
 10715  // Supported forms : (2 forms)
 10716  //
 10717  //    * JAE rel8
 10718  //    * JAE rel32
 10719  //
 10720  func (self *Program) JAE(v0 interface{}) *Instruction {
 10721      p := self.alloc("JAE", 1, Operands { v0 })
 10722      p.branch = _B_conditional
 10723      // JAE rel8
 10724      if isRel8(v0) {
 10725          p.domain = DomainGeneric
 10726          p.add(0, func(m *_Encoding, v []interface{}) {
 10727              m.emit(0x73)
 10728              m.imm1(relv(v[0]))
 10729          })
 10730      }
 10731      // JAE rel32
 10732      if isRel32(v0) {
 10733          p.domain = DomainGeneric
 10734          p.add(0, func(m *_Encoding, v []interface{}) {
 10735              m.emit(0x0f)
 10736              m.emit(0x83)
 10737              m.imm4(relv(v[0]))
 10738          })
 10739      }
 10740      // JAE label
 10741      if isLabel(v0) {
 10742          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 10743              m.emit(0x73)
 10744              m.imm1(relv(v[0]))
 10745          })
 10746          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 10747              m.emit(0x0f)
 10748              m.emit(0x83)
 10749              m.imm4(relv(v[0]))
 10750          })
 10751      }
 10752      if p.len == 0 {
 10753          panic("invalid operands for JAE")
 10754      }
 10755      return p
 10756  }
 10757  
 10758  // JB performs "Jump if below (CF == 1)".
 10759  //
 10760  // Mnemonic        : JB
 10761  // Supported forms : (2 forms)
 10762  //
 10763  //    * JB rel8
 10764  //    * JB rel32
 10765  //
 10766  func (self *Program) JB(v0 interface{}) *Instruction {
 10767      p := self.alloc("JB", 1, Operands { v0 })
 10768      p.branch = _B_conditional
 10769      // JB rel8
 10770      if isRel8(v0) {
 10771          p.domain = DomainGeneric
 10772          p.add(0, func(m *_Encoding, v []interface{}) {
 10773              m.emit(0x72)
 10774              m.imm1(relv(v[0]))
 10775          })
 10776      }
 10777      // JB rel32
 10778      if isRel32(v0) {
 10779          p.domain = DomainGeneric
 10780          p.add(0, func(m *_Encoding, v []interface{}) {
 10781              m.emit(0x0f)
 10782              m.emit(0x82)
 10783              m.imm4(relv(v[0]))
 10784          })
 10785      }
 10786      // JB label
 10787      if isLabel(v0) {
 10788          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 10789              m.emit(0x72)
 10790              m.imm1(relv(v[0]))
 10791          })
 10792          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 10793              m.emit(0x0f)
 10794              m.emit(0x82)
 10795              m.imm4(relv(v[0]))
 10796          })
 10797      }
 10798      if p.len == 0 {
 10799          panic("invalid operands for JB")
 10800      }
 10801      return p
 10802  }
 10803  
 10804  // JBE performs "Jump if below or equal (CF == 1 or ZF == 1)".
 10805  //
 10806  // Mnemonic        : JBE
 10807  // Supported forms : (2 forms)
 10808  //
 10809  //    * JBE rel8
 10810  //    * JBE rel32
 10811  //
 10812  func (self *Program) JBE(v0 interface{}) *Instruction {
 10813      p := self.alloc("JBE", 1, Operands { v0 })
 10814      p.branch = _B_conditional
 10815      // JBE rel8
 10816      if isRel8(v0) {
 10817          p.domain = DomainGeneric
 10818          p.add(0, func(m *_Encoding, v []interface{}) {
 10819              m.emit(0x76)
 10820              m.imm1(relv(v[0]))
 10821          })
 10822      }
 10823      // JBE rel32
 10824      if isRel32(v0) {
 10825          p.domain = DomainGeneric
 10826          p.add(0, func(m *_Encoding, v []interface{}) {
 10827              m.emit(0x0f)
 10828              m.emit(0x86)
 10829              m.imm4(relv(v[0]))
 10830          })
 10831      }
 10832      // JBE label
 10833      if isLabel(v0) {
 10834          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 10835              m.emit(0x76)
 10836              m.imm1(relv(v[0]))
 10837          })
 10838          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 10839              m.emit(0x0f)
 10840              m.emit(0x86)
 10841              m.imm4(relv(v[0]))
 10842          })
 10843      }
 10844      if p.len == 0 {
 10845          panic("invalid operands for JBE")
 10846      }
 10847      return p
 10848  }
 10849  
 10850  // JC performs "Jump if carry (CF == 1)".
 10851  //
 10852  // Mnemonic        : JC
 10853  // Supported forms : (2 forms)
 10854  //
 10855  //    * JC rel8
 10856  //    * JC rel32
 10857  //
 10858  func (self *Program) JC(v0 interface{}) *Instruction {
 10859      p := self.alloc("JC", 1, Operands { v0 })
 10860      p.branch = _B_conditional
 10861      // JC rel8
 10862      if isRel8(v0) {
 10863          p.domain = DomainGeneric
 10864          p.add(0, func(m *_Encoding, v []interface{}) {
 10865              m.emit(0x72)
 10866              m.imm1(relv(v[0]))
 10867          })
 10868      }
 10869      // JC rel32
 10870      if isRel32(v0) {
 10871          p.domain = DomainGeneric
 10872          p.add(0, func(m *_Encoding, v []interface{}) {
 10873              m.emit(0x0f)
 10874              m.emit(0x82)
 10875              m.imm4(relv(v[0]))
 10876          })
 10877      }
 10878      // JC label
 10879      if isLabel(v0) {
 10880          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 10881              m.emit(0x72)
 10882              m.imm1(relv(v[0]))
 10883          })
 10884          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 10885              m.emit(0x0f)
 10886              m.emit(0x82)
 10887              m.imm4(relv(v[0]))
 10888          })
 10889      }
 10890      if p.len == 0 {
 10891          panic("invalid operands for JC")
 10892      }
 10893      return p
 10894  }
 10895  
 10896  // JE performs "Jump if equal (ZF == 1)".
 10897  //
 10898  // Mnemonic        : JE
 10899  // Supported forms : (2 forms)
 10900  //
 10901  //    * JE rel8
 10902  //    * JE rel32
 10903  //
 10904  func (self *Program) JE(v0 interface{}) *Instruction {
 10905      p := self.alloc("JE", 1, Operands { v0 })
 10906      p.branch = _B_conditional
 10907      // JE rel8
 10908      if isRel8(v0) {
 10909          p.domain = DomainGeneric
 10910          p.add(0, func(m *_Encoding, v []interface{}) {
 10911              m.emit(0x74)
 10912              m.imm1(relv(v[0]))
 10913          })
 10914      }
 10915      // JE rel32
 10916      if isRel32(v0) {
 10917          p.domain = DomainGeneric
 10918          p.add(0, func(m *_Encoding, v []interface{}) {
 10919              m.emit(0x0f)
 10920              m.emit(0x84)
 10921              m.imm4(relv(v[0]))
 10922          })
 10923      }
 10924      // JE label
 10925      if isLabel(v0) {
 10926          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 10927              m.emit(0x74)
 10928              m.imm1(relv(v[0]))
 10929          })
 10930          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 10931              m.emit(0x0f)
 10932              m.emit(0x84)
 10933              m.imm4(relv(v[0]))
 10934          })
 10935      }
 10936      if p.len == 0 {
 10937          panic("invalid operands for JE")
 10938      }
 10939      return p
 10940  }
 10941  
 10942  // JECXZ performs "Jump if ECX register is 0".
 10943  //
 10944  // Mnemonic        : JECXZ
 10945  // Supported forms : (1 form)
 10946  //
 10947  //    * JECXZ rel8
 10948  //
 10949  func (self *Program) JECXZ(v0 interface{}) *Instruction {
 10950      p := self.alloc("JECXZ", 1, Operands { v0 })
 10951      p.branch = _B_conditional
 10952      // JECXZ rel8
 10953      if isRel8(v0) {
 10954          p.domain = DomainGeneric
 10955          p.add(0, func(m *_Encoding, v []interface{}) {
 10956              m.emit(0xe3)
 10957              m.imm1(relv(v[0]))
 10958          })
 10959      }
 10960      // JECXZ label
 10961      if isLabel(v0) {
 10962          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 10963              m.emit(0xe3)
 10964              m.imm1(relv(v[0]))
 10965          })
 10966      }
 10967      if p.len == 0 {
 10968          panic("invalid operands for JECXZ")
 10969      }
 10970      return p
 10971  }
 10972  
 10973  // JG performs "Jump if greater (ZF == 0 and SF == OF)".
 10974  //
 10975  // Mnemonic        : JG
 10976  // Supported forms : (2 forms)
 10977  //
 10978  //    * JG rel8
 10979  //    * JG rel32
 10980  //
 10981  func (self *Program) JG(v0 interface{}) *Instruction {
 10982      p := self.alloc("JG", 1, Operands { v0 })
 10983      p.branch = _B_conditional
 10984      // JG rel8
 10985      if isRel8(v0) {
 10986          p.domain = DomainGeneric
 10987          p.add(0, func(m *_Encoding, v []interface{}) {
 10988              m.emit(0x7f)
 10989              m.imm1(relv(v[0]))
 10990          })
 10991      }
 10992      // JG rel32
 10993      if isRel32(v0) {
 10994          p.domain = DomainGeneric
 10995          p.add(0, func(m *_Encoding, v []interface{}) {
 10996              m.emit(0x0f)
 10997              m.emit(0x8f)
 10998              m.imm4(relv(v[0]))
 10999          })
 11000      }
 11001      // JG label
 11002      if isLabel(v0) {
 11003          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11004              m.emit(0x7f)
 11005              m.imm1(relv(v[0]))
 11006          })
 11007          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11008              m.emit(0x0f)
 11009              m.emit(0x8f)
 11010              m.imm4(relv(v[0]))
 11011          })
 11012      }
 11013      if p.len == 0 {
 11014          panic("invalid operands for JG")
 11015      }
 11016      return p
 11017  }
 11018  
 11019  // JGE performs "Jump if greater or equal (SF == OF)".
 11020  //
 11021  // Mnemonic        : JGE
 11022  // Supported forms : (2 forms)
 11023  //
 11024  //    * JGE rel8
 11025  //    * JGE rel32
 11026  //
 11027  func (self *Program) JGE(v0 interface{}) *Instruction {
 11028      p := self.alloc("JGE", 1, Operands { v0 })
 11029      p.branch = _B_conditional
 11030      // JGE rel8
 11031      if isRel8(v0) {
 11032          p.domain = DomainGeneric
 11033          p.add(0, func(m *_Encoding, v []interface{}) {
 11034              m.emit(0x7d)
 11035              m.imm1(relv(v[0]))
 11036          })
 11037      }
 11038      // JGE rel32
 11039      if isRel32(v0) {
 11040          p.domain = DomainGeneric
 11041          p.add(0, func(m *_Encoding, v []interface{}) {
 11042              m.emit(0x0f)
 11043              m.emit(0x8d)
 11044              m.imm4(relv(v[0]))
 11045          })
 11046      }
 11047      // JGE label
 11048      if isLabel(v0) {
 11049          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11050              m.emit(0x7d)
 11051              m.imm1(relv(v[0]))
 11052          })
 11053          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11054              m.emit(0x0f)
 11055              m.emit(0x8d)
 11056              m.imm4(relv(v[0]))
 11057          })
 11058      }
 11059      if p.len == 0 {
 11060          panic("invalid operands for JGE")
 11061      }
 11062      return p
 11063  }
 11064  
 11065  // JL performs "Jump if less (SF != OF)".
 11066  //
 11067  // Mnemonic        : JL
 11068  // Supported forms : (2 forms)
 11069  //
 11070  //    * JL rel8
 11071  //    * JL rel32
 11072  //
 11073  func (self *Program) JL(v0 interface{}) *Instruction {
 11074      p := self.alloc("JL", 1, Operands { v0 })
 11075      p.branch = _B_conditional
 11076      // JL rel8
 11077      if isRel8(v0) {
 11078          p.domain = DomainGeneric
 11079          p.add(0, func(m *_Encoding, v []interface{}) {
 11080              m.emit(0x7c)
 11081              m.imm1(relv(v[0]))
 11082          })
 11083      }
 11084      // JL rel32
 11085      if isRel32(v0) {
 11086          p.domain = DomainGeneric
 11087          p.add(0, func(m *_Encoding, v []interface{}) {
 11088              m.emit(0x0f)
 11089              m.emit(0x8c)
 11090              m.imm4(relv(v[0]))
 11091          })
 11092      }
 11093      // JL label
 11094      if isLabel(v0) {
 11095          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11096              m.emit(0x7c)
 11097              m.imm1(relv(v[0]))
 11098          })
 11099          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11100              m.emit(0x0f)
 11101              m.emit(0x8c)
 11102              m.imm4(relv(v[0]))
 11103          })
 11104      }
 11105      if p.len == 0 {
 11106          panic("invalid operands for JL")
 11107      }
 11108      return p
 11109  }
 11110  
 11111  // JLE performs "Jump if less or equal (ZF == 1 or SF != OF)".
 11112  //
 11113  // Mnemonic        : JLE
 11114  // Supported forms : (2 forms)
 11115  //
 11116  //    * JLE rel8
 11117  //    * JLE rel32
 11118  //
 11119  func (self *Program) JLE(v0 interface{}) *Instruction {
 11120      p := self.alloc("JLE", 1, Operands { v0 })
 11121      p.branch = _B_conditional
 11122      // JLE rel8
 11123      if isRel8(v0) {
 11124          p.domain = DomainGeneric
 11125          p.add(0, func(m *_Encoding, v []interface{}) {
 11126              m.emit(0x7e)
 11127              m.imm1(relv(v[0]))
 11128          })
 11129      }
 11130      // JLE rel32
 11131      if isRel32(v0) {
 11132          p.domain = DomainGeneric
 11133          p.add(0, func(m *_Encoding, v []interface{}) {
 11134              m.emit(0x0f)
 11135              m.emit(0x8e)
 11136              m.imm4(relv(v[0]))
 11137          })
 11138      }
 11139      // JLE label
 11140      if isLabel(v0) {
 11141          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11142              m.emit(0x7e)
 11143              m.imm1(relv(v[0]))
 11144          })
 11145          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11146              m.emit(0x0f)
 11147              m.emit(0x8e)
 11148              m.imm4(relv(v[0]))
 11149          })
 11150      }
 11151      if p.len == 0 {
 11152          panic("invalid operands for JLE")
 11153      }
 11154      return p
 11155  }
 11156  
 11157  // JMP performs "Jump Unconditionally".
 11158  //
 11159  // Mnemonic        : JMP
 11160  // Supported forms : (2 forms)
 11161  //
 11162  //    * JMP rel8
 11163  //    * JMP rel32
 11164  //
 11165  func (self *Program) JMP(v0 interface{}) *Instruction {
 11166      p := self.alloc("JMP", 1, Operands { v0 })
 11167      p.branch = _B_unconditional
 11168      // JMP rel8
 11169      if isRel8(v0) {
 11170          p.domain = DomainGeneric
 11171          p.add(0, func(m *_Encoding, v []interface{}) {
 11172              m.emit(0xeb)
 11173              m.imm1(relv(v[0]))
 11174          })
 11175      }
 11176      // JMP rel32
 11177      if isRel32(v0) {
 11178          p.domain = DomainGeneric
 11179          p.add(0, func(m *_Encoding, v []interface{}) {
 11180              m.emit(0xe9)
 11181              m.imm4(relv(v[0]))
 11182          })
 11183      }
 11184      // JMP label
 11185      if isLabel(v0) {
 11186          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11187              m.emit(0xeb)
 11188              m.imm1(relv(v[0]))
 11189          })
 11190          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11191              m.emit(0xe9)
 11192              m.imm4(relv(v[0]))
 11193          })
 11194      }
 11195      if p.len == 0 {
 11196          panic("invalid operands for JMP")
 11197      }
 11198      return p
 11199  }
 11200  
 11201  // JMPQ performs "Jump Unconditionally".
 11202  //
 11203  // Mnemonic        : JMP
 11204  // Supported forms : (2 forms)
 11205  //
 11206  //    * JMPQ r64
 11207  //    * JMPQ m64
 11208  //
 11209  func (self *Program) JMPQ(v0 interface{}) *Instruction {
 11210      p := self.alloc("JMPQ", 1, Operands { v0 })
 11211      // JMPQ r64
 11212      if isReg64(v0) {
 11213          p.domain = DomainGeneric
 11214          p.add(0, func(m *_Encoding, v []interface{}) {
 11215              m.rexo(0, v[0], false)
 11216              m.emit(0xff)
 11217              m.emit(0xe0 | lcode(v[0]))
 11218          })
 11219      }
 11220      // JMPQ m64
 11221      if isM64(v0) {
 11222          p.domain = DomainGeneric
 11223          p.add(0, func(m *_Encoding, v []interface{}) {
 11224              m.rexo(0, addr(v[0]), false)
 11225              m.emit(0xff)
 11226              m.mrsd(4, addr(v[0]), 1)
 11227          })
 11228      }
 11229      if p.len == 0 {
 11230          panic("invalid operands for JMPQ")
 11231      }
 11232      return p
 11233  }
 11234  
 11235  // JNA performs "Jump if not above (CF == 1 or ZF == 1)".
 11236  //
 11237  // Mnemonic        : JNA
 11238  // Supported forms : (2 forms)
 11239  //
 11240  //    * JNA rel8
 11241  //    * JNA rel32
 11242  //
 11243  func (self *Program) JNA(v0 interface{}) *Instruction {
 11244      p := self.alloc("JNA", 1, Operands { v0 })
 11245      p.branch = _B_conditional
 11246      // JNA rel8
 11247      if isRel8(v0) {
 11248          p.domain = DomainGeneric
 11249          p.add(0, func(m *_Encoding, v []interface{}) {
 11250              m.emit(0x76)
 11251              m.imm1(relv(v[0]))
 11252          })
 11253      }
 11254      // JNA rel32
 11255      if isRel32(v0) {
 11256          p.domain = DomainGeneric
 11257          p.add(0, func(m *_Encoding, v []interface{}) {
 11258              m.emit(0x0f)
 11259              m.emit(0x86)
 11260              m.imm4(relv(v[0]))
 11261          })
 11262      }
 11263      // JNA label
 11264      if isLabel(v0) {
 11265          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11266              m.emit(0x76)
 11267              m.imm1(relv(v[0]))
 11268          })
 11269          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11270              m.emit(0x0f)
 11271              m.emit(0x86)
 11272              m.imm4(relv(v[0]))
 11273          })
 11274      }
 11275      if p.len == 0 {
 11276          panic("invalid operands for JNA")
 11277      }
 11278      return p
 11279  }
 11280  
 11281  // JNAE performs "Jump if not above or equal (CF == 1)".
 11282  //
 11283  // Mnemonic        : JNAE
 11284  // Supported forms : (2 forms)
 11285  //
 11286  //    * JNAE rel8
 11287  //    * JNAE rel32
 11288  //
 11289  func (self *Program) JNAE(v0 interface{}) *Instruction {
 11290      p := self.alloc("JNAE", 1, Operands { v0 })
 11291      p.branch = _B_conditional
 11292      // JNAE rel8
 11293      if isRel8(v0) {
 11294          p.domain = DomainGeneric
 11295          p.add(0, func(m *_Encoding, v []interface{}) {
 11296              m.emit(0x72)
 11297              m.imm1(relv(v[0]))
 11298          })
 11299      }
 11300      // JNAE rel32
 11301      if isRel32(v0) {
 11302          p.domain = DomainGeneric
 11303          p.add(0, func(m *_Encoding, v []interface{}) {
 11304              m.emit(0x0f)
 11305              m.emit(0x82)
 11306              m.imm4(relv(v[0]))
 11307          })
 11308      }
 11309      // JNAE label
 11310      if isLabel(v0) {
 11311          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11312              m.emit(0x72)
 11313              m.imm1(relv(v[0]))
 11314          })
 11315          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11316              m.emit(0x0f)
 11317              m.emit(0x82)
 11318              m.imm4(relv(v[0]))
 11319          })
 11320      }
 11321      if p.len == 0 {
 11322          panic("invalid operands for JNAE")
 11323      }
 11324      return p
 11325  }
 11326  
 11327  // JNB performs "Jump if not below (CF == 0)".
 11328  //
 11329  // Mnemonic        : JNB
 11330  // Supported forms : (2 forms)
 11331  //
 11332  //    * JNB rel8
 11333  //    * JNB rel32
 11334  //
 11335  func (self *Program) JNB(v0 interface{}) *Instruction {
 11336      p := self.alloc("JNB", 1, Operands { v0 })
 11337      p.branch = _B_conditional
 11338      // JNB rel8
 11339      if isRel8(v0) {
 11340          p.domain = DomainGeneric
 11341          p.add(0, func(m *_Encoding, v []interface{}) {
 11342              m.emit(0x73)
 11343              m.imm1(relv(v[0]))
 11344          })
 11345      }
 11346      // JNB rel32
 11347      if isRel32(v0) {
 11348          p.domain = DomainGeneric
 11349          p.add(0, func(m *_Encoding, v []interface{}) {
 11350              m.emit(0x0f)
 11351              m.emit(0x83)
 11352              m.imm4(relv(v[0]))
 11353          })
 11354      }
 11355      // JNB label
 11356      if isLabel(v0) {
 11357          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11358              m.emit(0x73)
 11359              m.imm1(relv(v[0]))
 11360          })
 11361          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11362              m.emit(0x0f)
 11363              m.emit(0x83)
 11364              m.imm4(relv(v[0]))
 11365          })
 11366      }
 11367      if p.len == 0 {
 11368          panic("invalid operands for JNB")
 11369      }
 11370      return p
 11371  }
 11372  
 11373  // JNBE performs "Jump if not below or equal (CF == 0 and ZF == 0)".
 11374  //
 11375  // Mnemonic        : JNBE
 11376  // Supported forms : (2 forms)
 11377  //
 11378  //    * JNBE rel8
 11379  //    * JNBE rel32
 11380  //
 11381  func (self *Program) JNBE(v0 interface{}) *Instruction {
 11382      p := self.alloc("JNBE", 1, Operands { v0 })
 11383      p.branch = _B_conditional
 11384      // JNBE rel8
 11385      if isRel8(v0) {
 11386          p.domain = DomainGeneric
 11387          p.add(0, func(m *_Encoding, v []interface{}) {
 11388              m.emit(0x77)
 11389              m.imm1(relv(v[0]))
 11390          })
 11391      }
 11392      // JNBE rel32
 11393      if isRel32(v0) {
 11394          p.domain = DomainGeneric
 11395          p.add(0, func(m *_Encoding, v []interface{}) {
 11396              m.emit(0x0f)
 11397              m.emit(0x87)
 11398              m.imm4(relv(v[0]))
 11399          })
 11400      }
 11401      // JNBE label
 11402      if isLabel(v0) {
 11403          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11404              m.emit(0x77)
 11405              m.imm1(relv(v[0]))
 11406          })
 11407          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11408              m.emit(0x0f)
 11409              m.emit(0x87)
 11410              m.imm4(relv(v[0]))
 11411          })
 11412      }
 11413      if p.len == 0 {
 11414          panic("invalid operands for JNBE")
 11415      }
 11416      return p
 11417  }
 11418  
 11419  // JNC performs "Jump if not carry (CF == 0)".
 11420  //
 11421  // Mnemonic        : JNC
 11422  // Supported forms : (2 forms)
 11423  //
 11424  //    * JNC rel8
 11425  //    * JNC rel32
 11426  //
 11427  func (self *Program) JNC(v0 interface{}) *Instruction {
 11428      p := self.alloc("JNC", 1, Operands { v0 })
 11429      p.branch = _B_conditional
 11430      // JNC rel8
 11431      if isRel8(v0) {
 11432          p.domain = DomainGeneric
 11433          p.add(0, func(m *_Encoding, v []interface{}) {
 11434              m.emit(0x73)
 11435              m.imm1(relv(v[0]))
 11436          })
 11437      }
 11438      // JNC rel32
 11439      if isRel32(v0) {
 11440          p.domain = DomainGeneric
 11441          p.add(0, func(m *_Encoding, v []interface{}) {
 11442              m.emit(0x0f)
 11443              m.emit(0x83)
 11444              m.imm4(relv(v[0]))
 11445          })
 11446      }
 11447      // JNC label
 11448      if isLabel(v0) {
 11449          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11450              m.emit(0x73)
 11451              m.imm1(relv(v[0]))
 11452          })
 11453          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11454              m.emit(0x0f)
 11455              m.emit(0x83)
 11456              m.imm4(relv(v[0]))
 11457          })
 11458      }
 11459      if p.len == 0 {
 11460          panic("invalid operands for JNC")
 11461      }
 11462      return p
 11463  }
 11464  
 11465  // JNE performs "Jump if not equal (ZF == 0)".
 11466  //
 11467  // Mnemonic        : JNE
 11468  // Supported forms : (2 forms)
 11469  //
 11470  //    * JNE rel8
 11471  //    * JNE rel32
 11472  //
 11473  func (self *Program) JNE(v0 interface{}) *Instruction {
 11474      p := self.alloc("JNE", 1, Operands { v0 })
 11475      p.branch = _B_conditional
 11476      // JNE rel8
 11477      if isRel8(v0) {
 11478          p.domain = DomainGeneric
 11479          p.add(0, func(m *_Encoding, v []interface{}) {
 11480              m.emit(0x75)
 11481              m.imm1(relv(v[0]))
 11482          })
 11483      }
 11484      // JNE rel32
 11485      if isRel32(v0) {
 11486          p.domain = DomainGeneric
 11487          p.add(0, func(m *_Encoding, v []interface{}) {
 11488              m.emit(0x0f)
 11489              m.emit(0x85)
 11490              m.imm4(relv(v[0]))
 11491          })
 11492      }
 11493      // JNE label
 11494      if isLabel(v0) {
 11495          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11496              m.emit(0x75)
 11497              m.imm1(relv(v[0]))
 11498          })
 11499          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11500              m.emit(0x0f)
 11501              m.emit(0x85)
 11502              m.imm4(relv(v[0]))
 11503          })
 11504      }
 11505      if p.len == 0 {
 11506          panic("invalid operands for JNE")
 11507      }
 11508      return p
 11509  }
 11510  
 11511  // JNG performs "Jump if not greater (ZF == 1 or SF != OF)".
 11512  //
 11513  // Mnemonic        : JNG
 11514  // Supported forms : (2 forms)
 11515  //
 11516  //    * JNG rel8
 11517  //    * JNG rel32
 11518  //
 11519  func (self *Program) JNG(v0 interface{}) *Instruction {
 11520      p := self.alloc("JNG", 1, Operands { v0 })
 11521      p.branch = _B_conditional
 11522      // JNG rel8
 11523      if isRel8(v0) {
 11524          p.domain = DomainGeneric
 11525          p.add(0, func(m *_Encoding, v []interface{}) {
 11526              m.emit(0x7e)
 11527              m.imm1(relv(v[0]))
 11528          })
 11529      }
 11530      // JNG rel32
 11531      if isRel32(v0) {
 11532          p.domain = DomainGeneric
 11533          p.add(0, func(m *_Encoding, v []interface{}) {
 11534              m.emit(0x0f)
 11535              m.emit(0x8e)
 11536              m.imm4(relv(v[0]))
 11537          })
 11538      }
 11539      // JNG label
 11540      if isLabel(v0) {
 11541          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11542              m.emit(0x7e)
 11543              m.imm1(relv(v[0]))
 11544          })
 11545          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11546              m.emit(0x0f)
 11547              m.emit(0x8e)
 11548              m.imm4(relv(v[0]))
 11549          })
 11550      }
 11551      if p.len == 0 {
 11552          panic("invalid operands for JNG")
 11553      }
 11554      return p
 11555  }
 11556  
 11557  // JNGE performs "Jump if not greater or equal (SF != OF)".
 11558  //
 11559  // Mnemonic        : JNGE
 11560  // Supported forms : (2 forms)
 11561  //
 11562  //    * JNGE rel8
 11563  //    * JNGE rel32
 11564  //
 11565  func (self *Program) JNGE(v0 interface{}) *Instruction {
 11566      p := self.alloc("JNGE", 1, Operands { v0 })
 11567      p.branch = _B_conditional
 11568      // JNGE rel8
 11569      if isRel8(v0) {
 11570          p.domain = DomainGeneric
 11571          p.add(0, func(m *_Encoding, v []interface{}) {
 11572              m.emit(0x7c)
 11573              m.imm1(relv(v[0]))
 11574          })
 11575      }
 11576      // JNGE rel32
 11577      if isRel32(v0) {
 11578          p.domain = DomainGeneric
 11579          p.add(0, func(m *_Encoding, v []interface{}) {
 11580              m.emit(0x0f)
 11581              m.emit(0x8c)
 11582              m.imm4(relv(v[0]))
 11583          })
 11584      }
 11585      // JNGE label
 11586      if isLabel(v0) {
 11587          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11588              m.emit(0x7c)
 11589              m.imm1(relv(v[0]))
 11590          })
 11591          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11592              m.emit(0x0f)
 11593              m.emit(0x8c)
 11594              m.imm4(relv(v[0]))
 11595          })
 11596      }
 11597      if p.len == 0 {
 11598          panic("invalid operands for JNGE")
 11599      }
 11600      return p
 11601  }
 11602  
 11603  // JNL performs "Jump if not less (SF == OF)".
 11604  //
 11605  // Mnemonic        : JNL
 11606  // Supported forms : (2 forms)
 11607  //
 11608  //    * JNL rel8
 11609  //    * JNL rel32
 11610  //
 11611  func (self *Program) JNL(v0 interface{}) *Instruction {
 11612      p := self.alloc("JNL", 1, Operands { v0 })
 11613      p.branch = _B_conditional
 11614      // JNL rel8
 11615      if isRel8(v0) {
 11616          p.domain = DomainGeneric
 11617          p.add(0, func(m *_Encoding, v []interface{}) {
 11618              m.emit(0x7d)
 11619              m.imm1(relv(v[0]))
 11620          })
 11621      }
 11622      // JNL rel32
 11623      if isRel32(v0) {
 11624          p.domain = DomainGeneric
 11625          p.add(0, func(m *_Encoding, v []interface{}) {
 11626              m.emit(0x0f)
 11627              m.emit(0x8d)
 11628              m.imm4(relv(v[0]))
 11629          })
 11630      }
 11631      // JNL label
 11632      if isLabel(v0) {
 11633          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11634              m.emit(0x7d)
 11635              m.imm1(relv(v[0]))
 11636          })
 11637          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11638              m.emit(0x0f)
 11639              m.emit(0x8d)
 11640              m.imm4(relv(v[0]))
 11641          })
 11642      }
 11643      if p.len == 0 {
 11644          panic("invalid operands for JNL")
 11645      }
 11646      return p
 11647  }
 11648  
 11649  // JNLE performs "Jump if not less or equal (ZF == 0 and SF == OF)".
 11650  //
 11651  // Mnemonic        : JNLE
 11652  // Supported forms : (2 forms)
 11653  //
 11654  //    * JNLE rel8
 11655  //    * JNLE rel32
 11656  //
 11657  func (self *Program) JNLE(v0 interface{}) *Instruction {
 11658      p := self.alloc("JNLE", 1, Operands { v0 })
 11659      p.branch = _B_conditional
 11660      // JNLE rel8
 11661      if isRel8(v0) {
 11662          p.domain = DomainGeneric
 11663          p.add(0, func(m *_Encoding, v []interface{}) {
 11664              m.emit(0x7f)
 11665              m.imm1(relv(v[0]))
 11666          })
 11667      }
 11668      // JNLE rel32
 11669      if isRel32(v0) {
 11670          p.domain = DomainGeneric
 11671          p.add(0, func(m *_Encoding, v []interface{}) {
 11672              m.emit(0x0f)
 11673              m.emit(0x8f)
 11674              m.imm4(relv(v[0]))
 11675          })
 11676      }
 11677      // JNLE label
 11678      if isLabel(v0) {
 11679          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11680              m.emit(0x7f)
 11681              m.imm1(relv(v[0]))
 11682          })
 11683          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11684              m.emit(0x0f)
 11685              m.emit(0x8f)
 11686              m.imm4(relv(v[0]))
 11687          })
 11688      }
 11689      if p.len == 0 {
 11690          panic("invalid operands for JNLE")
 11691      }
 11692      return p
 11693  }
 11694  
 11695  // JNO performs "Jump if not overflow (OF == 0)".
 11696  //
 11697  // Mnemonic        : JNO
 11698  // Supported forms : (2 forms)
 11699  //
 11700  //    * JNO rel8
 11701  //    * JNO rel32
 11702  //
 11703  func (self *Program) JNO(v0 interface{}) *Instruction {
 11704      p := self.alloc("JNO", 1, Operands { v0 })
 11705      p.branch = _B_conditional
 11706      // JNO rel8
 11707      if isRel8(v0) {
 11708          p.domain = DomainGeneric
 11709          p.add(0, func(m *_Encoding, v []interface{}) {
 11710              m.emit(0x71)
 11711              m.imm1(relv(v[0]))
 11712          })
 11713      }
 11714      // JNO rel32
 11715      if isRel32(v0) {
 11716          p.domain = DomainGeneric
 11717          p.add(0, func(m *_Encoding, v []interface{}) {
 11718              m.emit(0x0f)
 11719              m.emit(0x81)
 11720              m.imm4(relv(v[0]))
 11721          })
 11722      }
 11723      // JNO label
 11724      if isLabel(v0) {
 11725          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11726              m.emit(0x71)
 11727              m.imm1(relv(v[0]))
 11728          })
 11729          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11730              m.emit(0x0f)
 11731              m.emit(0x81)
 11732              m.imm4(relv(v[0]))
 11733          })
 11734      }
 11735      if p.len == 0 {
 11736          panic("invalid operands for JNO")
 11737      }
 11738      return p
 11739  }
 11740  
 11741  // JNP performs "Jump if not parity (PF == 0)".
 11742  //
 11743  // Mnemonic        : JNP
 11744  // Supported forms : (2 forms)
 11745  //
 11746  //    * JNP rel8
 11747  //    * JNP rel32
 11748  //
 11749  func (self *Program) JNP(v0 interface{}) *Instruction {
 11750      p := self.alloc("JNP", 1, Operands { v0 })
 11751      p.branch = _B_conditional
 11752      // JNP rel8
 11753      if isRel8(v0) {
 11754          p.domain = DomainGeneric
 11755          p.add(0, func(m *_Encoding, v []interface{}) {
 11756              m.emit(0x7b)
 11757              m.imm1(relv(v[0]))
 11758          })
 11759      }
 11760      // JNP rel32
 11761      if isRel32(v0) {
 11762          p.domain = DomainGeneric
 11763          p.add(0, func(m *_Encoding, v []interface{}) {
 11764              m.emit(0x0f)
 11765              m.emit(0x8b)
 11766              m.imm4(relv(v[0]))
 11767          })
 11768      }
 11769      // JNP label
 11770      if isLabel(v0) {
 11771          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11772              m.emit(0x7b)
 11773              m.imm1(relv(v[0]))
 11774          })
 11775          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11776              m.emit(0x0f)
 11777              m.emit(0x8b)
 11778              m.imm4(relv(v[0]))
 11779          })
 11780      }
 11781      if p.len == 0 {
 11782          panic("invalid operands for JNP")
 11783      }
 11784      return p
 11785  }
 11786  
 11787  // JNS performs "Jump if not sign (SF == 0)".
 11788  //
 11789  // Mnemonic        : JNS
 11790  // Supported forms : (2 forms)
 11791  //
 11792  //    * JNS rel8
 11793  //    * JNS rel32
 11794  //
 11795  func (self *Program) JNS(v0 interface{}) *Instruction {
 11796      p := self.alloc("JNS", 1, Operands { v0 })
 11797      p.branch = _B_conditional
 11798      // JNS rel8
 11799      if isRel8(v0) {
 11800          p.domain = DomainGeneric
 11801          p.add(0, func(m *_Encoding, v []interface{}) {
 11802              m.emit(0x79)
 11803              m.imm1(relv(v[0]))
 11804          })
 11805      }
 11806      // JNS rel32
 11807      if isRel32(v0) {
 11808          p.domain = DomainGeneric
 11809          p.add(0, func(m *_Encoding, v []interface{}) {
 11810              m.emit(0x0f)
 11811              m.emit(0x89)
 11812              m.imm4(relv(v[0]))
 11813          })
 11814      }
 11815      // JNS label
 11816      if isLabel(v0) {
 11817          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11818              m.emit(0x79)
 11819              m.imm1(relv(v[0]))
 11820          })
 11821          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11822              m.emit(0x0f)
 11823              m.emit(0x89)
 11824              m.imm4(relv(v[0]))
 11825          })
 11826      }
 11827      if p.len == 0 {
 11828          panic("invalid operands for JNS")
 11829      }
 11830      return p
 11831  }
 11832  
 11833  // JNZ performs "Jump if not zero (ZF == 0)".
 11834  //
 11835  // Mnemonic        : JNZ
 11836  // Supported forms : (2 forms)
 11837  //
 11838  //    * JNZ rel8
 11839  //    * JNZ rel32
 11840  //
 11841  func (self *Program) JNZ(v0 interface{}) *Instruction {
 11842      p := self.alloc("JNZ", 1, Operands { v0 })
 11843      p.branch = _B_conditional
 11844      // JNZ rel8
 11845      if isRel8(v0) {
 11846          p.domain = DomainGeneric
 11847          p.add(0, func(m *_Encoding, v []interface{}) {
 11848              m.emit(0x75)
 11849              m.imm1(relv(v[0]))
 11850          })
 11851      }
 11852      // JNZ rel32
 11853      if isRel32(v0) {
 11854          p.domain = DomainGeneric
 11855          p.add(0, func(m *_Encoding, v []interface{}) {
 11856              m.emit(0x0f)
 11857              m.emit(0x85)
 11858              m.imm4(relv(v[0]))
 11859          })
 11860      }
 11861      // JNZ label
 11862      if isLabel(v0) {
 11863          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11864              m.emit(0x75)
 11865              m.imm1(relv(v[0]))
 11866          })
 11867          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11868              m.emit(0x0f)
 11869              m.emit(0x85)
 11870              m.imm4(relv(v[0]))
 11871          })
 11872      }
 11873      if p.len == 0 {
 11874          panic("invalid operands for JNZ")
 11875      }
 11876      return p
 11877  }
 11878  
 11879  // JO performs "Jump if overflow (OF == 1)".
 11880  //
 11881  // Mnemonic        : JO
 11882  // Supported forms : (2 forms)
 11883  //
 11884  //    * JO rel8
 11885  //    * JO rel32
 11886  //
 11887  func (self *Program) JO(v0 interface{}) *Instruction {
 11888      p := self.alloc("JO", 1, Operands { v0 })
 11889      p.branch = _B_conditional
 11890      // JO rel8
 11891      if isRel8(v0) {
 11892          p.domain = DomainGeneric
 11893          p.add(0, func(m *_Encoding, v []interface{}) {
 11894              m.emit(0x70)
 11895              m.imm1(relv(v[0]))
 11896          })
 11897      }
 11898      // JO rel32
 11899      if isRel32(v0) {
 11900          p.domain = DomainGeneric
 11901          p.add(0, func(m *_Encoding, v []interface{}) {
 11902              m.emit(0x0f)
 11903              m.emit(0x80)
 11904              m.imm4(relv(v[0]))
 11905          })
 11906      }
 11907      // JO label
 11908      if isLabel(v0) {
 11909          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11910              m.emit(0x70)
 11911              m.imm1(relv(v[0]))
 11912          })
 11913          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11914              m.emit(0x0f)
 11915              m.emit(0x80)
 11916              m.imm4(relv(v[0]))
 11917          })
 11918      }
 11919      if p.len == 0 {
 11920          panic("invalid operands for JO")
 11921      }
 11922      return p
 11923  }
 11924  
 11925  // JP performs "Jump if parity (PF == 1)".
 11926  //
 11927  // Mnemonic        : JP
 11928  // Supported forms : (2 forms)
 11929  //
 11930  //    * JP rel8
 11931  //    * JP rel32
 11932  //
 11933  func (self *Program) JP(v0 interface{}) *Instruction {
 11934      p := self.alloc("JP", 1, Operands { v0 })
 11935      p.branch = _B_conditional
 11936      // JP rel8
 11937      if isRel8(v0) {
 11938          p.domain = DomainGeneric
 11939          p.add(0, func(m *_Encoding, v []interface{}) {
 11940              m.emit(0x7a)
 11941              m.imm1(relv(v[0]))
 11942          })
 11943      }
 11944      // JP rel32
 11945      if isRel32(v0) {
 11946          p.domain = DomainGeneric
 11947          p.add(0, func(m *_Encoding, v []interface{}) {
 11948              m.emit(0x0f)
 11949              m.emit(0x8a)
 11950              m.imm4(relv(v[0]))
 11951          })
 11952      }
 11953      // JP label
 11954      if isLabel(v0) {
 11955          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11956              m.emit(0x7a)
 11957              m.imm1(relv(v[0]))
 11958          })
 11959          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11960              m.emit(0x0f)
 11961              m.emit(0x8a)
 11962              m.imm4(relv(v[0]))
 11963          })
 11964      }
 11965      if p.len == 0 {
 11966          panic("invalid operands for JP")
 11967      }
 11968      return p
 11969  }
 11970  
 11971  // JPE performs "Jump if parity even (PF == 1)".
 11972  //
 11973  // Mnemonic        : JPE
 11974  // Supported forms : (2 forms)
 11975  //
 11976  //    * JPE rel8
 11977  //    * JPE rel32
 11978  //
 11979  func (self *Program) JPE(v0 interface{}) *Instruction {
 11980      p := self.alloc("JPE", 1, Operands { v0 })
 11981      p.branch = _B_conditional
 11982      // JPE rel8
 11983      if isRel8(v0) {
 11984          p.domain = DomainGeneric
 11985          p.add(0, func(m *_Encoding, v []interface{}) {
 11986              m.emit(0x7a)
 11987              m.imm1(relv(v[0]))
 11988          })
 11989      }
 11990      // JPE rel32
 11991      if isRel32(v0) {
 11992          p.domain = DomainGeneric
 11993          p.add(0, func(m *_Encoding, v []interface{}) {
 11994              m.emit(0x0f)
 11995              m.emit(0x8a)
 11996              m.imm4(relv(v[0]))
 11997          })
 11998      }
 11999      // JPE label
 12000      if isLabel(v0) {
 12001          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 12002              m.emit(0x7a)
 12003              m.imm1(relv(v[0]))
 12004          })
 12005          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 12006              m.emit(0x0f)
 12007              m.emit(0x8a)
 12008              m.imm4(relv(v[0]))
 12009          })
 12010      }
 12011      if p.len == 0 {
 12012          panic("invalid operands for JPE")
 12013      }
 12014      return p
 12015  }
 12016  
 12017  // JPO performs "Jump if parity odd (PF == 0)".
 12018  //
 12019  // Mnemonic        : JPO
 12020  // Supported forms : (2 forms)
 12021  //
 12022  //    * JPO rel8
 12023  //    * JPO rel32
 12024  //
 12025  func (self *Program) JPO(v0 interface{}) *Instruction {
 12026      p := self.alloc("JPO", 1, Operands { v0 })
 12027      p.branch = _B_conditional
 12028      // JPO rel8
 12029      if isRel8(v0) {
 12030          p.domain = DomainGeneric
 12031          p.add(0, func(m *_Encoding, v []interface{}) {
 12032              m.emit(0x7b)
 12033              m.imm1(relv(v[0]))
 12034          })
 12035      }
 12036      // JPO rel32
 12037      if isRel32(v0) {
 12038          p.domain = DomainGeneric
 12039          p.add(0, func(m *_Encoding, v []interface{}) {
 12040              m.emit(0x0f)
 12041              m.emit(0x8b)
 12042              m.imm4(relv(v[0]))
 12043          })
 12044      }
 12045      // JPO label
 12046      if isLabel(v0) {
 12047          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 12048              m.emit(0x7b)
 12049              m.imm1(relv(v[0]))
 12050          })
 12051          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 12052              m.emit(0x0f)
 12053              m.emit(0x8b)
 12054              m.imm4(relv(v[0]))
 12055          })
 12056      }
 12057      if p.len == 0 {
 12058          panic("invalid operands for JPO")
 12059      }
 12060      return p
 12061  }
 12062  
 12063  // JRCXZ performs "Jump if RCX register is 0".
 12064  //
 12065  // Mnemonic        : JRCXZ
 12066  // Supported forms : (1 form)
 12067  //
 12068  //    * JRCXZ rel8
 12069  //
 12070  func (self *Program) JRCXZ(v0 interface{}) *Instruction {
 12071      p := self.alloc("JRCXZ", 1, Operands { v0 })
 12072      p.branch = _B_conditional
 12073      // JRCXZ rel8
 12074      if isRel8(v0) {
 12075          p.domain = DomainGeneric
 12076          p.add(0, func(m *_Encoding, v []interface{}) {
 12077              m.emit(0xe3)
 12078              m.imm1(relv(v[0]))
 12079          })
 12080      }
 12081      // JRCXZ label
 12082      if isLabel(v0) {
 12083          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 12084              m.emit(0xe3)
 12085              m.imm1(relv(v[0]))
 12086          })
 12087      }
 12088      if p.len == 0 {
 12089          panic("invalid operands for JRCXZ")
 12090      }
 12091      return p
 12092  }
 12093  
 12094  // JS performs "Jump if sign (SF == 1)".
 12095  //
 12096  // Mnemonic        : JS
 12097  // Supported forms : (2 forms)
 12098  //
 12099  //    * JS rel8
 12100  //    * JS rel32
 12101  //
 12102  func (self *Program) JS(v0 interface{}) *Instruction {
 12103      p := self.alloc("JS", 1, Operands { v0 })
 12104      p.branch = _B_conditional
 12105      // JS rel8
 12106      if isRel8(v0) {
 12107          p.domain = DomainGeneric
 12108          p.add(0, func(m *_Encoding, v []interface{}) {
 12109              m.emit(0x78)
 12110              m.imm1(relv(v[0]))
 12111          })
 12112      }
 12113      // JS rel32
 12114      if isRel32(v0) {
 12115          p.domain = DomainGeneric
 12116          p.add(0, func(m *_Encoding, v []interface{}) {
 12117              m.emit(0x0f)
 12118              m.emit(0x88)
 12119              m.imm4(relv(v[0]))
 12120          })
 12121      }
 12122      // JS label
 12123      if isLabel(v0) {
 12124          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 12125              m.emit(0x78)
 12126              m.imm1(relv(v[0]))
 12127          })
 12128          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 12129              m.emit(0x0f)
 12130              m.emit(0x88)
 12131              m.imm4(relv(v[0]))
 12132          })
 12133      }
 12134      if p.len == 0 {
 12135          panic("invalid operands for JS")
 12136      }
 12137      return p
 12138  }
 12139  
 12140  // JZ performs "Jump if zero (ZF == 1)".
 12141  //
 12142  // Mnemonic        : JZ
 12143  // Supported forms : (2 forms)
 12144  //
 12145  //    * JZ rel8
 12146  //    * JZ rel32
 12147  //
 12148  func (self *Program) JZ(v0 interface{}) *Instruction {
 12149      p := self.alloc("JZ", 1, Operands { v0 })
 12150      p.branch = _B_conditional
 12151      // JZ rel8
 12152      if isRel8(v0) {
 12153          p.domain = DomainGeneric
 12154          p.add(0, func(m *_Encoding, v []interface{}) {
 12155              m.emit(0x74)
 12156              m.imm1(relv(v[0]))
 12157          })
 12158      }
 12159      // JZ rel32
 12160      if isRel32(v0) {
 12161          p.domain = DomainGeneric
 12162          p.add(0, func(m *_Encoding, v []interface{}) {
 12163              m.emit(0x0f)
 12164              m.emit(0x84)
 12165              m.imm4(relv(v[0]))
 12166          })
 12167      }
 12168      // JZ label
 12169      if isLabel(v0) {
 12170          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 12171              m.emit(0x74)
 12172              m.imm1(relv(v[0]))
 12173          })
 12174          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 12175              m.emit(0x0f)
 12176              m.emit(0x84)
 12177              m.imm4(relv(v[0]))
 12178          })
 12179      }
 12180      if p.len == 0 {
 12181          panic("invalid operands for JZ")
 12182      }
 12183      return p
 12184  }
 12185  
 12186  // KADDB performs "ADD Two 8-bit Masks".
 12187  //
 12188  // Mnemonic        : KADDB
 12189  // Supported forms : (1 form)
 12190  //
 12191  //    * KADDB k, k, k    [AVX512DQ]
 12192  //
 12193  func (self *Program) KADDB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12194      p := self.alloc("KADDB", 3, Operands { v0, v1, v2 })
 12195      // KADDB k, k, k
 12196      if isK(v0) && isK(v1) && isK(v2) {
 12197          self.require(ISA_AVX512DQ)
 12198          p.domain = DomainMask
 12199          p.add(0, func(m *_Encoding, v []interface{}) {
 12200              m.vex2(5, 0, nil, hlcode(v[1]))
 12201              m.emit(0x4a)
 12202              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12203          })
 12204      }
 12205      if p.len == 0 {
 12206          panic("invalid operands for KADDB")
 12207      }
 12208      return p
 12209  }
 12210  
 12211  // KADDD performs "ADD Two 32-bit Masks".
 12212  //
 12213  // Mnemonic        : KADDD
 12214  // Supported forms : (1 form)
 12215  //
 12216  //    * KADDD k, k, k    [AVX512BW]
 12217  //
 12218  func (self *Program) KADDD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12219      p := self.alloc("KADDD", 3, Operands { v0, v1, v2 })
 12220      // KADDD k, k, k
 12221      if isK(v0) && isK(v1) && isK(v2) {
 12222          self.require(ISA_AVX512BW)
 12223          p.domain = DomainMask
 12224          p.add(0, func(m *_Encoding, v []interface{}) {
 12225              m.emit(0xc4)
 12226              m.emit(0xe1)
 12227              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 12228              m.emit(0x4a)
 12229              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12230          })
 12231      }
 12232      if p.len == 0 {
 12233          panic("invalid operands for KADDD")
 12234      }
 12235      return p
 12236  }
 12237  
 12238  // KADDQ performs "ADD Two 64-bit Masks".
 12239  //
 12240  // Mnemonic        : KADDQ
 12241  // Supported forms : (1 form)
 12242  //
 12243  //    * KADDQ k, k, k    [AVX512BW]
 12244  //
 12245  func (self *Program) KADDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12246      p := self.alloc("KADDQ", 3, Operands { v0, v1, v2 })
 12247      // KADDQ k, k, k
 12248      if isK(v0) && isK(v1) && isK(v2) {
 12249          self.require(ISA_AVX512BW)
 12250          p.domain = DomainMask
 12251          p.add(0, func(m *_Encoding, v []interface{}) {
 12252              m.emit(0xc4)
 12253              m.emit(0xe1)
 12254              m.emit(0xfc ^ (hlcode(v[1]) << 3))
 12255              m.emit(0x4a)
 12256              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12257          })
 12258      }
 12259      if p.len == 0 {
 12260          panic("invalid operands for KADDQ")
 12261      }
 12262      return p
 12263  }
 12264  
 12265  // KADDW performs "ADD Two 16-bit Masks".
 12266  //
 12267  // Mnemonic        : KADDW
 12268  // Supported forms : (1 form)
 12269  //
 12270  //    * KADDW k, k, k    [AVX512DQ]
 12271  //
 12272  func (self *Program) KADDW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12273      p := self.alloc("KADDW", 3, Operands { v0, v1, v2 })
 12274      // KADDW k, k, k
 12275      if isK(v0) && isK(v1) && isK(v2) {
 12276          self.require(ISA_AVX512DQ)
 12277          p.domain = DomainMask
 12278          p.add(0, func(m *_Encoding, v []interface{}) {
 12279              m.vex2(4, 0, nil, hlcode(v[1]))
 12280              m.emit(0x4a)
 12281              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12282          })
 12283      }
 12284      if p.len == 0 {
 12285          panic("invalid operands for KADDW")
 12286      }
 12287      return p
 12288  }
 12289  
 12290  // KANDB performs "Bitwise Logical AND 8-bit Masks".
 12291  //
 12292  // Mnemonic        : KANDB
 12293  // Supported forms : (1 form)
 12294  //
 12295  //    * KANDB k, k, k    [AVX512DQ]
 12296  //
 12297  func (self *Program) KANDB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12298      p := self.alloc("KANDB", 3, Operands { v0, v1, v2 })
 12299      // KANDB k, k, k
 12300      if isK(v0) && isK(v1) && isK(v2) {
 12301          self.require(ISA_AVX512DQ)
 12302          p.domain = DomainMask
 12303          p.add(0, func(m *_Encoding, v []interface{}) {
 12304              m.vex2(5, 0, nil, hlcode(v[1]))
 12305              m.emit(0x41)
 12306              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12307          })
 12308      }
 12309      if p.len == 0 {
 12310          panic("invalid operands for KANDB")
 12311      }
 12312      return p
 12313  }
 12314  
 12315  // KANDD performs "Bitwise Logical AND 32-bit Masks".
 12316  //
 12317  // Mnemonic        : KANDD
 12318  // Supported forms : (1 form)
 12319  //
 12320  //    * KANDD k, k, k    [AVX512BW]
 12321  //
 12322  func (self *Program) KANDD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12323      p := self.alloc("KANDD", 3, Operands { v0, v1, v2 })
 12324      // KANDD k, k, k
 12325      if isK(v0) && isK(v1) && isK(v2) {
 12326          self.require(ISA_AVX512BW)
 12327          p.domain = DomainMask
 12328          p.add(0, func(m *_Encoding, v []interface{}) {
 12329              m.emit(0xc4)
 12330              m.emit(0xe1)
 12331              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 12332              m.emit(0x41)
 12333              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12334          })
 12335      }
 12336      if p.len == 0 {
 12337          panic("invalid operands for KANDD")
 12338      }
 12339      return p
 12340  }
 12341  
 12342  // KANDNB performs "Bitwise Logical AND NOT 8-bit Masks".
 12343  //
 12344  // Mnemonic        : KANDNB
 12345  // Supported forms : (1 form)
 12346  //
 12347  //    * KANDNB k, k, k    [AVX512DQ]
 12348  //
 12349  func (self *Program) KANDNB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12350      p := self.alloc("KANDNB", 3, Operands { v0, v1, v2 })
 12351      // KANDNB k, k, k
 12352      if isK(v0) && isK(v1) && isK(v2) {
 12353          self.require(ISA_AVX512DQ)
 12354          p.domain = DomainMask
 12355          p.add(0, func(m *_Encoding, v []interface{}) {
 12356              m.vex2(5, 0, nil, hlcode(v[1]))
 12357              m.emit(0x42)
 12358              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12359          })
 12360      }
 12361      if p.len == 0 {
 12362          panic("invalid operands for KANDNB")
 12363      }
 12364      return p
 12365  }
 12366  
 12367  // KANDND performs "Bitwise Logical AND NOT 32-bit Masks".
 12368  //
 12369  // Mnemonic        : KANDND
 12370  // Supported forms : (1 form)
 12371  //
 12372  //    * KANDND k, k, k    [AVX512BW]
 12373  //
 12374  func (self *Program) KANDND(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12375      p := self.alloc("KANDND", 3, Operands { v0, v1, v2 })
 12376      // KANDND k, k, k
 12377      if isK(v0) && isK(v1) && isK(v2) {
 12378          self.require(ISA_AVX512BW)
 12379          p.domain = DomainMask
 12380          p.add(0, func(m *_Encoding, v []interface{}) {
 12381              m.emit(0xc4)
 12382              m.emit(0xe1)
 12383              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 12384              m.emit(0x42)
 12385              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12386          })
 12387      }
 12388      if p.len == 0 {
 12389          panic("invalid operands for KANDND")
 12390      }
 12391      return p
 12392  }
 12393  
 12394  // KANDNQ performs "Bitwise Logical AND NOT 64-bit Masks".
 12395  //
 12396  // Mnemonic        : KANDNQ
 12397  // Supported forms : (1 form)
 12398  //
 12399  //    * KANDNQ k, k, k    [AVX512BW]
 12400  //
 12401  func (self *Program) KANDNQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12402      p := self.alloc("KANDNQ", 3, Operands { v0, v1, v2 })
 12403      // KANDNQ k, k, k
 12404      if isK(v0) && isK(v1) && isK(v2) {
 12405          self.require(ISA_AVX512BW)
 12406          p.domain = DomainMask
 12407          p.add(0, func(m *_Encoding, v []interface{}) {
 12408              m.emit(0xc4)
 12409              m.emit(0xe1)
 12410              m.emit(0xfc ^ (hlcode(v[1]) << 3))
 12411              m.emit(0x42)
 12412              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12413          })
 12414      }
 12415      if p.len == 0 {
 12416          panic("invalid operands for KANDNQ")
 12417      }
 12418      return p
 12419  }
 12420  
 12421  // KANDNW performs "Bitwise Logical AND NOT 16-bit Masks".
 12422  //
 12423  // Mnemonic        : KANDNW
 12424  // Supported forms : (1 form)
 12425  //
 12426  //    * KANDNW k, k, k    [AVX512F]
 12427  //
 12428  func (self *Program) KANDNW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12429      p := self.alloc("KANDNW", 3, Operands { v0, v1, v2 })
 12430      // KANDNW k, k, k
 12431      if isK(v0) && isK(v1) && isK(v2) {
 12432          self.require(ISA_AVX512F)
 12433          p.domain = DomainMask
 12434          p.add(0, func(m *_Encoding, v []interface{}) {
 12435              m.vex2(4, 0, nil, hlcode(v[1]))
 12436              m.emit(0x42)
 12437              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12438          })
 12439      }
 12440      if p.len == 0 {
 12441          panic("invalid operands for KANDNW")
 12442      }
 12443      return p
 12444  }
 12445  
 12446  // KANDQ performs "Bitwise Logical AND 64-bit Masks".
 12447  //
 12448  // Mnemonic        : KANDQ
 12449  // Supported forms : (1 form)
 12450  //
 12451  //    * KANDQ k, k, k    [AVX512BW]
 12452  //
 12453  func (self *Program) KANDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12454      p := self.alloc("KANDQ", 3, Operands { v0, v1, v2 })
 12455      // KANDQ k, k, k
 12456      if isK(v0) && isK(v1) && isK(v2) {
 12457          self.require(ISA_AVX512BW)
 12458          p.domain = DomainMask
 12459          p.add(0, func(m *_Encoding, v []interface{}) {
 12460              m.emit(0xc4)
 12461              m.emit(0xe1)
 12462              m.emit(0xfc ^ (hlcode(v[1]) << 3))
 12463              m.emit(0x41)
 12464              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12465          })
 12466      }
 12467      if p.len == 0 {
 12468          panic("invalid operands for KANDQ")
 12469      }
 12470      return p
 12471  }
 12472  
 12473  // KANDW performs "Bitwise Logical AND 16-bit Masks".
 12474  //
 12475  // Mnemonic        : KANDW
 12476  // Supported forms : (1 form)
 12477  //
 12478  //    * KANDW k, k, k    [AVX512F]
 12479  //
 12480  func (self *Program) KANDW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12481      p := self.alloc("KANDW", 3, Operands { v0, v1, v2 })
 12482      // KANDW k, k, k
 12483      if isK(v0) && isK(v1) && isK(v2) {
 12484          self.require(ISA_AVX512F)
 12485          p.domain = DomainMask
 12486          p.add(0, func(m *_Encoding, v []interface{}) {
 12487              m.vex2(4, 0, nil, hlcode(v[1]))
 12488              m.emit(0x41)
 12489              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12490          })
 12491      }
 12492      if p.len == 0 {
 12493          panic("invalid operands for KANDW")
 12494      }
 12495      return p
 12496  }
 12497  
 12498  // KMOVB performs "Move 8-bit Mask".
 12499  //
 12500  // Mnemonic        : KMOVB
 12501  // Supported forms : (5 forms)
 12502  //
 12503  //    * KMOVB k, k      [AVX512DQ]
 12504  //    * KMOVB r32, k    [AVX512DQ]
 12505  //    * KMOVB m8, k     [AVX512DQ]
 12506  //    * KMOVB k, r32    [AVX512DQ]
 12507  //    * KMOVB k, m8     [AVX512DQ]
 12508  //
 12509  func (self *Program) KMOVB(v0 interface{}, v1 interface{}) *Instruction {
 12510      p := self.alloc("KMOVB", 2, Operands { v0, v1 })
 12511      // KMOVB k, k
 12512      if isK(v0) && isK(v1) {
 12513          self.require(ISA_AVX512DQ)
 12514          p.domain = DomainMask
 12515          p.add(0, func(m *_Encoding, v []interface{}) {
 12516              m.vex2(1, 0, nil, 0)
 12517              m.emit(0x90)
 12518              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12519          })
 12520      }
 12521      // KMOVB r32, k
 12522      if isReg32(v0) && isK(v1) {
 12523          self.require(ISA_AVX512DQ)
 12524          p.domain = DomainMask
 12525          p.add(0, func(m *_Encoding, v []interface{}) {
 12526              m.vex2(1, 0, v[0], 0)
 12527              m.emit(0x92)
 12528              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12529          })
 12530      }
 12531      // KMOVB m8, k
 12532      if isM8(v0) && isK(v1) {
 12533          self.require(ISA_AVX512DQ)
 12534          p.domain = DomainMask
 12535          p.add(0, func(m *_Encoding, v []interface{}) {
 12536              m.vex2(1, 0, addr(v[0]), 0)
 12537              m.emit(0x90)
 12538              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 12539          })
 12540      }
 12541      // KMOVB k, r32
 12542      if isK(v0) && isReg32(v1) {
 12543          self.require(ISA_AVX512DQ)
 12544          p.domain = DomainMask
 12545          p.add(0, func(m *_Encoding, v []interface{}) {
 12546              m.vex2(1, hcode(v[1]), nil, 0)
 12547              m.emit(0x93)
 12548              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12549          })
 12550      }
 12551      // KMOVB k, m8
 12552      if isK(v0) && isM8(v1) {
 12553          self.require(ISA_AVX512DQ)
 12554          p.domain = DomainMask
 12555          p.add(0, func(m *_Encoding, v []interface{}) {
 12556              m.vex2(1, 0, addr(v[1]), 0)
 12557              m.emit(0x91)
 12558              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 12559          })
 12560      }
 12561      if p.len == 0 {
 12562          panic("invalid operands for KMOVB")
 12563      }
 12564      return p
 12565  }
 12566  
 12567  // KMOVD performs "Move 32-bit Mask".
 12568  //
 12569  // Mnemonic        : KMOVD
 12570  // Supported forms : (5 forms)
 12571  //
 12572  //    * KMOVD k, k      [AVX512BW]
 12573  //    * KMOVD r32, k    [AVX512BW]
 12574  //    * KMOVD m32, k    [AVX512BW]
 12575  //    * KMOVD k, r32    [AVX512BW]
 12576  //    * KMOVD k, m32    [AVX512BW]
 12577  //
 12578  func (self *Program) KMOVD(v0 interface{}, v1 interface{}) *Instruction {
 12579      p := self.alloc("KMOVD", 2, Operands { v0, v1 })
 12580      // KMOVD k, k
 12581      if isK(v0) && isK(v1) {
 12582          self.require(ISA_AVX512BW)
 12583          p.domain = DomainMask
 12584          p.add(0, func(m *_Encoding, v []interface{}) {
 12585              m.emit(0xc4)
 12586              m.emit(0xe1)
 12587              m.emit(0xf9)
 12588              m.emit(0x90)
 12589              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12590          })
 12591      }
 12592      // KMOVD r32, k
 12593      if isReg32(v0) && isK(v1) {
 12594          self.require(ISA_AVX512BW)
 12595          p.domain = DomainMask
 12596          p.add(0, func(m *_Encoding, v []interface{}) {
 12597              m.vex2(3, 0, v[0], 0)
 12598              m.emit(0x92)
 12599              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12600          })
 12601      }
 12602      // KMOVD m32, k
 12603      if isM32(v0) && isK(v1) {
 12604          self.require(ISA_AVX512BW)
 12605          p.domain = DomainMask
 12606          p.add(0, func(m *_Encoding, v []interface{}) {
 12607              m.vex3(0xc4, 0b1, 0x81, 0, addr(v[0]), 0)
 12608              m.emit(0x90)
 12609              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 12610          })
 12611      }
 12612      // KMOVD k, r32
 12613      if isK(v0) && isReg32(v1) {
 12614          self.require(ISA_AVX512BW)
 12615          p.domain = DomainMask
 12616          p.add(0, func(m *_Encoding, v []interface{}) {
 12617              m.vex2(3, hcode(v[1]), nil, 0)
 12618              m.emit(0x93)
 12619              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12620          })
 12621      }
 12622      // KMOVD k, m32
 12623      if isK(v0) && isM32(v1) {
 12624          self.require(ISA_AVX512BW)
 12625          p.domain = DomainMask
 12626          p.add(0, func(m *_Encoding, v []interface{}) {
 12627              m.vex3(0xc4, 0b1, 0x81, 0, addr(v[1]), 0)
 12628              m.emit(0x91)
 12629              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 12630          })
 12631      }
 12632      if p.len == 0 {
 12633          panic("invalid operands for KMOVD")
 12634      }
 12635      return p
 12636  }
 12637  
 12638  // KMOVQ performs "Move 64-bit Mask".
 12639  //
 12640  // Mnemonic        : KMOVQ
 12641  // Supported forms : (5 forms)
 12642  //
 12643  //    * KMOVQ k, k      [AVX512BW]
 12644  //    * KMOVQ r64, k    [AVX512BW]
 12645  //    * KMOVQ m64, k    [AVX512BW]
 12646  //    * KMOVQ k, r64    [AVX512BW]
 12647  //    * KMOVQ k, m64    [AVX512BW]
 12648  //
 12649  func (self *Program) KMOVQ(v0 interface{}, v1 interface{}) *Instruction {
 12650      p := self.alloc("KMOVQ", 2, Operands { v0, v1 })
 12651      // KMOVQ k, k
 12652      if isK(v0) && isK(v1) {
 12653          self.require(ISA_AVX512BW)
 12654          p.domain = DomainMask
 12655          p.add(0, func(m *_Encoding, v []interface{}) {
 12656              m.emit(0xc4)
 12657              m.emit(0xe1)
 12658              m.emit(0xf8)
 12659              m.emit(0x90)
 12660              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12661          })
 12662      }
 12663      // KMOVQ r64, k
 12664      if isReg64(v0) && isK(v1) {
 12665          self.require(ISA_AVX512BW)
 12666          p.domain = DomainMask
 12667          p.add(0, func(m *_Encoding, v []interface{}) {
 12668              m.emit(0xc4)
 12669              m.emit(0xe1 ^ (hcode(v[0]) << 5))
 12670              m.emit(0xfb)
 12671              m.emit(0x92)
 12672              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12673          })
 12674      }
 12675      // KMOVQ m64, k
 12676      if isM64(v0) && isK(v1) {
 12677          self.require(ISA_AVX512BW)
 12678          p.domain = DomainMask
 12679          p.add(0, func(m *_Encoding, v []interface{}) {
 12680              m.vex3(0xc4, 0b1, 0x80, 0, addr(v[0]), 0)
 12681              m.emit(0x90)
 12682              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 12683          })
 12684      }
 12685      // KMOVQ k, r64
 12686      if isK(v0) && isReg64(v1) {
 12687          self.require(ISA_AVX512BW)
 12688          p.domain = DomainMask
 12689          p.add(0, func(m *_Encoding, v []interface{}) {
 12690              m.emit(0xc4)
 12691              m.emit(0xe1 ^ (hcode(v[1]) << 7))
 12692              m.emit(0xfb)
 12693              m.emit(0x93)
 12694              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12695          })
 12696      }
 12697      // KMOVQ k, m64
 12698      if isK(v0) && isM64(v1) {
 12699          self.require(ISA_AVX512BW)
 12700          p.domain = DomainMask
 12701          p.add(0, func(m *_Encoding, v []interface{}) {
 12702              m.vex3(0xc4, 0b1, 0x80, 0, addr(v[1]), 0)
 12703              m.emit(0x91)
 12704              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 12705          })
 12706      }
 12707      if p.len == 0 {
 12708          panic("invalid operands for KMOVQ")
 12709      }
 12710      return p
 12711  }
 12712  
 12713  // KMOVW performs "Move 16-bit Mask".
 12714  //
 12715  // Mnemonic        : KMOVW
 12716  // Supported forms : (5 forms)
 12717  //
 12718  //    * KMOVW k, k      [AVX512F]
 12719  //    * KMOVW r32, k    [AVX512F]
 12720  //    * KMOVW m16, k    [AVX512F]
 12721  //    * KMOVW k, r32    [AVX512F]
 12722  //    * KMOVW k, m16    [AVX512F]
 12723  //
 12724  func (self *Program) KMOVW(v0 interface{}, v1 interface{}) *Instruction {
 12725      p := self.alloc("KMOVW", 2, Operands { v0, v1 })
 12726      // KMOVW k, k
 12727      if isK(v0) && isK(v1) {
 12728          self.require(ISA_AVX512F)
 12729          p.domain = DomainMask
 12730          p.add(0, func(m *_Encoding, v []interface{}) {
 12731              m.vex2(0, 0, nil, 0)
 12732              m.emit(0x90)
 12733              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12734          })
 12735      }
 12736      // KMOVW r32, k
 12737      if isReg32(v0) && isK(v1) {
 12738          self.require(ISA_AVX512F)
 12739          p.domain = DomainMask
 12740          p.add(0, func(m *_Encoding, v []interface{}) {
 12741              m.vex2(0, 0, v[0], 0)
 12742              m.emit(0x92)
 12743              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12744          })
 12745      }
 12746      // KMOVW m16, k
 12747      if isM16(v0) && isK(v1) {
 12748          self.require(ISA_AVX512F)
 12749          p.domain = DomainMask
 12750          p.add(0, func(m *_Encoding, v []interface{}) {
 12751              m.vex2(0, 0, addr(v[0]), 0)
 12752              m.emit(0x90)
 12753              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 12754          })
 12755      }
 12756      // KMOVW k, r32
 12757      if isK(v0) && isReg32(v1) {
 12758          self.require(ISA_AVX512F)
 12759          p.domain = DomainMask
 12760          p.add(0, func(m *_Encoding, v []interface{}) {
 12761              m.vex2(0, hcode(v[1]), nil, 0)
 12762              m.emit(0x93)
 12763              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12764          })
 12765      }
 12766      // KMOVW k, m16
 12767      if isK(v0) && isM16(v1) {
 12768          self.require(ISA_AVX512F)
 12769          p.domain = DomainMask
 12770          p.add(0, func(m *_Encoding, v []interface{}) {
 12771              m.vex2(0, 0, addr(v[1]), 0)
 12772              m.emit(0x91)
 12773              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 12774          })
 12775      }
 12776      if p.len == 0 {
 12777          panic("invalid operands for KMOVW")
 12778      }
 12779      return p
 12780  }
 12781  
 12782  // KNOTB performs "NOT 8-bit Mask Register".
 12783  //
 12784  // Mnemonic        : KNOTB
 12785  // Supported forms : (1 form)
 12786  //
 12787  //    * KNOTB k, k    [AVX512DQ]
 12788  //
 12789  func (self *Program) KNOTB(v0 interface{}, v1 interface{}) *Instruction {
 12790      p := self.alloc("KNOTB", 2, Operands { v0, v1 })
 12791      // KNOTB k, k
 12792      if isK(v0) && isK(v1) {
 12793          self.require(ISA_AVX512DQ)
 12794          p.domain = DomainMask
 12795          p.add(0, func(m *_Encoding, v []interface{}) {
 12796              m.vex2(1, 0, nil, 0)
 12797              m.emit(0x44)
 12798              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12799          })
 12800      }
 12801      if p.len == 0 {
 12802          panic("invalid operands for KNOTB")
 12803      }
 12804      return p
 12805  }
 12806  
 12807  // KNOTD performs "NOT 32-bit Mask Register".
 12808  //
 12809  // Mnemonic        : KNOTD
 12810  // Supported forms : (1 form)
 12811  //
 12812  //    * KNOTD k, k    [AVX512BW]
 12813  //
 12814  func (self *Program) KNOTD(v0 interface{}, v1 interface{}) *Instruction {
 12815      p := self.alloc("KNOTD", 2, Operands { v0, v1 })
 12816      // KNOTD k, k
 12817      if isK(v0) && isK(v1) {
 12818          self.require(ISA_AVX512BW)
 12819          p.domain = DomainMask
 12820          p.add(0, func(m *_Encoding, v []interface{}) {
 12821              m.emit(0xc4)
 12822              m.emit(0xe1)
 12823              m.emit(0xf9)
 12824              m.emit(0x44)
 12825              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12826          })
 12827      }
 12828      if p.len == 0 {
 12829          panic("invalid operands for KNOTD")
 12830      }
 12831      return p
 12832  }
 12833  
 12834  // KNOTQ performs "NOT 64-bit Mask Register".
 12835  //
 12836  // Mnemonic        : KNOTQ
 12837  // Supported forms : (1 form)
 12838  //
 12839  //    * KNOTQ k, k    [AVX512BW]
 12840  //
 12841  func (self *Program) KNOTQ(v0 interface{}, v1 interface{}) *Instruction {
 12842      p := self.alloc("KNOTQ", 2, Operands { v0, v1 })
 12843      // KNOTQ k, k
 12844      if isK(v0) && isK(v1) {
 12845          self.require(ISA_AVX512BW)
 12846          p.domain = DomainMask
 12847          p.add(0, func(m *_Encoding, v []interface{}) {
 12848              m.emit(0xc4)
 12849              m.emit(0xe1)
 12850              m.emit(0xf8)
 12851              m.emit(0x44)
 12852              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12853          })
 12854      }
 12855      if p.len == 0 {
 12856          panic("invalid operands for KNOTQ")
 12857      }
 12858      return p
 12859  }
 12860  
 12861  // KNOTW performs "NOT 16-bit Mask Register".
 12862  //
 12863  // Mnemonic        : KNOTW
 12864  // Supported forms : (1 form)
 12865  //
 12866  //    * KNOTW k, k    [AVX512F]
 12867  //
 12868  func (self *Program) KNOTW(v0 interface{}, v1 interface{}) *Instruction {
 12869      p := self.alloc("KNOTW", 2, Operands { v0, v1 })
 12870      // KNOTW k, k
 12871      if isK(v0) && isK(v1) {
 12872          self.require(ISA_AVX512F)
 12873          p.domain = DomainMask
 12874          p.add(0, func(m *_Encoding, v []interface{}) {
 12875              m.vex2(0, 0, nil, 0)
 12876              m.emit(0x44)
 12877              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12878          })
 12879      }
 12880      if p.len == 0 {
 12881          panic("invalid operands for KNOTW")
 12882      }
 12883      return p
 12884  }
 12885  
 12886  // KORB performs "Bitwise Logical OR 8-bit Masks".
 12887  //
 12888  // Mnemonic        : KORB
 12889  // Supported forms : (1 form)
 12890  //
 12891  //    * KORB k, k, k    [AVX512DQ]
 12892  //
 12893  func (self *Program) KORB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12894      p := self.alloc("KORB", 3, Operands { v0, v1, v2 })
 12895      // KORB k, k, k
 12896      if isK(v0) && isK(v1) && isK(v2) {
 12897          self.require(ISA_AVX512DQ)
 12898          p.domain = DomainMask
 12899          p.add(0, func(m *_Encoding, v []interface{}) {
 12900              m.vex2(5, 0, nil, hlcode(v[1]))
 12901              m.emit(0x45)
 12902              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12903          })
 12904      }
 12905      if p.len == 0 {
 12906          panic("invalid operands for KORB")
 12907      }
 12908      return p
 12909  }
 12910  
 12911  // KORD performs "Bitwise Logical OR 32-bit Masks".
 12912  //
 12913  // Mnemonic        : KORD
 12914  // Supported forms : (1 form)
 12915  //
 12916  //    * KORD k, k, k    [AVX512BW]
 12917  //
 12918  func (self *Program) KORD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12919      p := self.alloc("KORD", 3, Operands { v0, v1, v2 })
 12920      // KORD k, k, k
 12921      if isK(v0) && isK(v1) && isK(v2) {
 12922          self.require(ISA_AVX512BW)
 12923          p.domain = DomainMask
 12924          p.add(0, func(m *_Encoding, v []interface{}) {
 12925              m.emit(0xc4)
 12926              m.emit(0xe1)
 12927              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 12928              m.emit(0x45)
 12929              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12930          })
 12931      }
 12932      if p.len == 0 {
 12933          panic("invalid operands for KORD")
 12934      }
 12935      return p
 12936  }
 12937  
 12938  // KORQ performs "Bitwise Logical OR 64-bit Masks".
 12939  //
 12940  // Mnemonic        : KORQ
 12941  // Supported forms : (1 form)
 12942  //
 12943  //    * KORQ k, k, k    [AVX512BW]
 12944  //
 12945  func (self *Program) KORQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12946      p := self.alloc("KORQ", 3, Operands { v0, v1, v2 })
 12947      // KORQ k, k, k
 12948      if isK(v0) && isK(v1) && isK(v2) {
 12949          self.require(ISA_AVX512BW)
 12950          p.domain = DomainMask
 12951          p.add(0, func(m *_Encoding, v []interface{}) {
 12952              m.emit(0xc4)
 12953              m.emit(0xe1)
 12954              m.emit(0xfc ^ (hlcode(v[1]) << 3))
 12955              m.emit(0x45)
 12956              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12957          })
 12958      }
 12959      if p.len == 0 {
 12960          panic("invalid operands for KORQ")
 12961      }
 12962      return p
 12963  }
 12964  
 12965  // KORTESTB performs "OR 8-bit Masks and Set Flags".
 12966  //
 12967  // Mnemonic        : KORTESTB
 12968  // Supported forms : (1 form)
 12969  //
 12970  //    * KORTESTB k, k    [AVX512DQ]
 12971  //
 12972  func (self *Program) KORTESTB(v0 interface{}, v1 interface{}) *Instruction {
 12973      p := self.alloc("KORTESTB", 2, Operands { v0, v1 })
 12974      // KORTESTB k, k
 12975      if isK(v0) && isK(v1) {
 12976          self.require(ISA_AVX512DQ)
 12977          p.domain = DomainMask
 12978          p.add(0, func(m *_Encoding, v []interface{}) {
 12979              m.vex2(1, 0, nil, 0)
 12980              m.emit(0x98)
 12981              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12982          })
 12983      }
 12984      if p.len == 0 {
 12985          panic("invalid operands for KORTESTB")
 12986      }
 12987      return p
 12988  }
 12989  
 12990  // KORTESTD performs "OR 32-bit Masks and Set Flags".
 12991  //
 12992  // Mnemonic        : KORTESTD
 12993  // Supported forms : (1 form)
 12994  //
 12995  //    * KORTESTD k, k    [AVX512BW]
 12996  //
 12997  func (self *Program) KORTESTD(v0 interface{}, v1 interface{}) *Instruction {
 12998      p := self.alloc("KORTESTD", 2, Operands { v0, v1 })
 12999      // KORTESTD k, k
 13000      if isK(v0) && isK(v1) {
 13001          self.require(ISA_AVX512BW)
 13002          p.domain = DomainMask
 13003          p.add(0, func(m *_Encoding, v []interface{}) {
 13004              m.emit(0xc4)
 13005              m.emit(0xe1)
 13006              m.emit(0xf9)
 13007              m.emit(0x98)
 13008              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 13009          })
 13010      }
 13011      if p.len == 0 {
 13012          panic("invalid operands for KORTESTD")
 13013      }
 13014      return p
 13015  }
 13016  
 13017  // KORTESTQ performs "OR 64-bit Masks and Set Flags".
 13018  //
 13019  // Mnemonic        : KORTESTQ
 13020  // Supported forms : (1 form)
 13021  //
 13022  //    * KORTESTQ k, k    [AVX512BW]
 13023  //
 13024  func (self *Program) KORTESTQ(v0 interface{}, v1 interface{}) *Instruction {
 13025      p := self.alloc("KORTESTQ", 2, Operands { v0, v1 })
 13026      // KORTESTQ k, k
 13027      if isK(v0) && isK(v1) {
 13028          self.require(ISA_AVX512BW)
 13029          p.domain = DomainMask
 13030          p.add(0, func(m *_Encoding, v []interface{}) {
 13031              m.emit(0xc4)
 13032              m.emit(0xe1)
 13033              m.emit(0xf8)
 13034              m.emit(0x98)
 13035              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 13036          })
 13037      }
 13038      if p.len == 0 {
 13039          panic("invalid operands for KORTESTQ")
 13040      }
 13041      return p
 13042  }
 13043  
 13044  // KORTESTW performs "OR 16-bit Masks and Set Flags".
 13045  //
 13046  // Mnemonic        : KORTESTW
 13047  // Supported forms : (1 form)
 13048  //
 13049  //    * KORTESTW k, k    [AVX512F]
 13050  //
 13051  func (self *Program) KORTESTW(v0 interface{}, v1 interface{}) *Instruction {
 13052      p := self.alloc("KORTESTW", 2, Operands { v0, v1 })
 13053      // KORTESTW k, k
 13054      if isK(v0) && isK(v1) {
 13055          self.require(ISA_AVX512F)
 13056          p.domain = DomainMask
 13057          p.add(0, func(m *_Encoding, v []interface{}) {
 13058              m.vex2(0, 0, nil, 0)
 13059              m.emit(0x98)
 13060              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 13061          })
 13062      }
 13063      if p.len == 0 {
 13064          panic("invalid operands for KORTESTW")
 13065      }
 13066      return p
 13067  }
 13068  
 13069  // KORW performs "Bitwise Logical OR 16-bit Masks".
 13070  //
 13071  // Mnemonic        : KORW
 13072  // Supported forms : (1 form)
 13073  //
 13074  //    * KORW k, k, k    [AVX512F]
 13075  //
 13076  func (self *Program) KORW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13077      p := self.alloc("KORW", 3, Operands { v0, v1, v2 })
 13078      // KORW k, k, k
 13079      if isK(v0) && isK(v1) && isK(v2) {
 13080          self.require(ISA_AVX512F)
 13081          p.domain = DomainMask
 13082          p.add(0, func(m *_Encoding, v []interface{}) {
 13083              m.vex2(4, 0, nil, hlcode(v[1]))
 13084              m.emit(0x45)
 13085              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13086          })
 13087      }
 13088      if p.len == 0 {
 13089          panic("invalid operands for KORW")
 13090      }
 13091      return p
 13092  }
 13093  
 13094  // KSHIFTLB performs "Shift Left 8-bit Masks".
 13095  //
 13096  // Mnemonic        : KSHIFTLB
 13097  // Supported forms : (1 form)
 13098  //
 13099  //    * KSHIFTLB imm8, k, k    [AVX512DQ]
 13100  //
 13101  func (self *Program) KSHIFTLB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13102      p := self.alloc("KSHIFTLB", 3, Operands { v0, v1, v2 })
 13103      // KSHIFTLB imm8, k, k
 13104      if isImm8(v0) && isK(v1) && isK(v2) {
 13105          self.require(ISA_AVX512DQ)
 13106          p.domain = DomainMask
 13107          p.add(0, func(m *_Encoding, v []interface{}) {
 13108              m.emit(0xc4)
 13109              m.emit(0xe3)
 13110              m.emit(0x79)
 13111              m.emit(0x32)
 13112              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 13113              m.imm1(toImmAny(v[0]))
 13114          })
 13115      }
 13116      if p.len == 0 {
 13117          panic("invalid operands for KSHIFTLB")
 13118      }
 13119      return p
 13120  }
 13121  
 13122  // KSHIFTLD performs "Shift Left 32-bit Masks".
 13123  //
 13124  // Mnemonic        : KSHIFTLD
 13125  // Supported forms : (1 form)
 13126  //
 13127  //    * KSHIFTLD imm8, k, k    [AVX512BW]
 13128  //
 13129  func (self *Program) KSHIFTLD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13130      p := self.alloc("KSHIFTLD", 3, Operands { v0, v1, v2 })
 13131      // KSHIFTLD imm8, k, k
 13132      if isImm8(v0) && isK(v1) && isK(v2) {
 13133          self.require(ISA_AVX512BW)
 13134          p.domain = DomainMask
 13135          p.add(0, func(m *_Encoding, v []interface{}) {
 13136              m.emit(0xc4)
 13137              m.emit(0xe3)
 13138              m.emit(0x79)
 13139              m.emit(0x33)
 13140              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 13141              m.imm1(toImmAny(v[0]))
 13142          })
 13143      }
 13144      if p.len == 0 {
 13145          panic("invalid operands for KSHIFTLD")
 13146      }
 13147      return p
 13148  }
 13149  
 13150  // KSHIFTLQ performs "Shift Left 64-bit Masks".
 13151  //
 13152  // Mnemonic        : KSHIFTLQ
 13153  // Supported forms : (1 form)
 13154  //
 13155  //    * KSHIFTLQ imm8, k, k    [AVX512BW]
 13156  //
 13157  func (self *Program) KSHIFTLQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13158      p := self.alloc("KSHIFTLQ", 3, Operands { v0, v1, v2 })
 13159      // KSHIFTLQ imm8, k, k
 13160      if isImm8(v0) && isK(v1) && isK(v2) {
 13161          self.require(ISA_AVX512BW)
 13162          p.domain = DomainMask
 13163          p.add(0, func(m *_Encoding, v []interface{}) {
 13164              m.emit(0xc4)
 13165              m.emit(0xe3)
 13166              m.emit(0xf9)
 13167              m.emit(0x33)
 13168              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 13169              m.imm1(toImmAny(v[0]))
 13170          })
 13171      }
 13172      if p.len == 0 {
 13173          panic("invalid operands for KSHIFTLQ")
 13174      }
 13175      return p
 13176  }
 13177  
 13178  // KSHIFTLW performs "Shift Left 16-bit Masks".
 13179  //
 13180  // Mnemonic        : KSHIFTLW
 13181  // Supported forms : (1 form)
 13182  //
 13183  //    * KSHIFTLW imm8, k, k    [AVX512F]
 13184  //
 13185  func (self *Program) KSHIFTLW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13186      p := self.alloc("KSHIFTLW", 3, Operands { v0, v1, v2 })
 13187      // KSHIFTLW imm8, k, k
 13188      if isImm8(v0) && isK(v1) && isK(v2) {
 13189          self.require(ISA_AVX512F)
 13190          p.domain = DomainMask
 13191          p.add(0, func(m *_Encoding, v []interface{}) {
 13192              m.emit(0xc4)
 13193              m.emit(0xe3)
 13194              m.emit(0xf9)
 13195              m.emit(0x32)
 13196              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 13197              m.imm1(toImmAny(v[0]))
 13198          })
 13199      }
 13200      if p.len == 0 {
 13201          panic("invalid operands for KSHIFTLW")
 13202      }
 13203      return p
 13204  }
 13205  
 13206  // KSHIFTRB performs "Shift Right 8-bit Masks".
 13207  //
 13208  // Mnemonic        : KSHIFTRB
 13209  // Supported forms : (1 form)
 13210  //
 13211  //    * KSHIFTRB imm8, k, k    [AVX512DQ]
 13212  //
 13213  func (self *Program) KSHIFTRB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13214      p := self.alloc("KSHIFTRB", 3, Operands { v0, v1, v2 })
 13215      // KSHIFTRB imm8, k, k
 13216      if isImm8(v0) && isK(v1) && isK(v2) {
 13217          self.require(ISA_AVX512DQ)
 13218          p.domain = DomainMask
 13219          p.add(0, func(m *_Encoding, v []interface{}) {
 13220              m.emit(0xc4)
 13221              m.emit(0xe3)
 13222              m.emit(0x79)
 13223              m.emit(0x30)
 13224              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 13225              m.imm1(toImmAny(v[0]))
 13226          })
 13227      }
 13228      if p.len == 0 {
 13229          panic("invalid operands for KSHIFTRB")
 13230      }
 13231      return p
 13232  }
 13233  
 13234  // KSHIFTRD performs "Shift Right 32-bit Masks".
 13235  //
 13236  // Mnemonic        : KSHIFTRD
 13237  // Supported forms : (1 form)
 13238  //
 13239  //    * KSHIFTRD imm8, k, k    [AVX512BW]
 13240  //
 13241  func (self *Program) KSHIFTRD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13242      p := self.alloc("KSHIFTRD", 3, Operands { v0, v1, v2 })
 13243      // KSHIFTRD imm8, k, k
 13244      if isImm8(v0) && isK(v1) && isK(v2) {
 13245          self.require(ISA_AVX512BW)
 13246          p.domain = DomainMask
 13247          p.add(0, func(m *_Encoding, v []interface{}) {
 13248              m.emit(0xc4)
 13249              m.emit(0xe3)
 13250              m.emit(0x79)
 13251              m.emit(0x31)
 13252              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 13253              m.imm1(toImmAny(v[0]))
 13254          })
 13255      }
 13256      if p.len == 0 {
 13257          panic("invalid operands for KSHIFTRD")
 13258      }
 13259      return p
 13260  }
 13261  
 13262  // KSHIFTRQ performs "Shift Right 64-bit Masks".
 13263  //
 13264  // Mnemonic        : KSHIFTRQ
 13265  // Supported forms : (1 form)
 13266  //
 13267  //    * KSHIFTRQ imm8, k, k    [AVX512BW]
 13268  //
 13269  func (self *Program) KSHIFTRQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13270      p := self.alloc("KSHIFTRQ", 3, Operands { v0, v1, v2 })
 13271      // KSHIFTRQ imm8, k, k
 13272      if isImm8(v0) && isK(v1) && isK(v2) {
 13273          self.require(ISA_AVX512BW)
 13274          p.domain = DomainMask
 13275          p.add(0, func(m *_Encoding, v []interface{}) {
 13276              m.emit(0xc4)
 13277              m.emit(0xe3)
 13278              m.emit(0xf9)
 13279              m.emit(0x31)
 13280              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 13281              m.imm1(toImmAny(v[0]))
 13282          })
 13283      }
 13284      if p.len == 0 {
 13285          panic("invalid operands for KSHIFTRQ")
 13286      }
 13287      return p
 13288  }
 13289  
 13290  // KSHIFTRW performs "Shift Right 16-bit Masks".
 13291  //
 13292  // Mnemonic        : KSHIFTRW
 13293  // Supported forms : (1 form)
 13294  //
 13295  //    * KSHIFTRW imm8, k, k    [AVX512F]
 13296  //
 13297  func (self *Program) KSHIFTRW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13298      p := self.alloc("KSHIFTRW", 3, Operands { v0, v1, v2 })
 13299      // KSHIFTRW imm8, k, k
 13300      if isImm8(v0) && isK(v1) && isK(v2) {
 13301          self.require(ISA_AVX512F)
 13302          p.domain = DomainMask
 13303          p.add(0, func(m *_Encoding, v []interface{}) {
 13304              m.emit(0xc4)
 13305              m.emit(0xe3)
 13306              m.emit(0xf9)
 13307              m.emit(0x30)
 13308              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 13309              m.imm1(toImmAny(v[0]))
 13310          })
 13311      }
 13312      if p.len == 0 {
 13313          panic("invalid operands for KSHIFTRW")
 13314      }
 13315      return p
 13316  }
 13317  
 13318  // KTESTB performs "Bit Test 8-bit Masks and Set Flags".
 13319  //
 13320  // Mnemonic        : KTESTB
 13321  // Supported forms : (1 form)
 13322  //
 13323  //    * KTESTB k, k    [AVX512DQ]
 13324  //
 13325  func (self *Program) KTESTB(v0 interface{}, v1 interface{}) *Instruction {
 13326      p := self.alloc("KTESTB", 2, Operands { v0, v1 })
 13327      // KTESTB k, k
 13328      if isK(v0) && isK(v1) {
 13329          self.require(ISA_AVX512DQ)
 13330          p.domain = DomainMask
 13331          p.add(0, func(m *_Encoding, v []interface{}) {
 13332              m.vex2(1, 0, nil, 0)
 13333              m.emit(0x99)
 13334              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 13335          })
 13336      }
 13337      if p.len == 0 {
 13338          panic("invalid operands for KTESTB")
 13339      }
 13340      return p
 13341  }
 13342  
 13343  // KTESTD performs "Bit Test 32-bit Masks and Set Flags".
 13344  //
 13345  // Mnemonic        : KTESTD
 13346  // Supported forms : (1 form)
 13347  //
 13348  //    * KTESTD k, k    [AVX512BW]
 13349  //
 13350  func (self *Program) KTESTD(v0 interface{}, v1 interface{}) *Instruction {
 13351      p := self.alloc("KTESTD", 2, Operands { v0, v1 })
 13352      // KTESTD k, k
 13353      if isK(v0) && isK(v1) {
 13354          self.require(ISA_AVX512BW)
 13355          p.domain = DomainMask
 13356          p.add(0, func(m *_Encoding, v []interface{}) {
 13357              m.emit(0xc4)
 13358              m.emit(0xe1)
 13359              m.emit(0xf9)
 13360              m.emit(0x99)
 13361              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 13362          })
 13363      }
 13364      if p.len == 0 {
 13365          panic("invalid operands for KTESTD")
 13366      }
 13367      return p
 13368  }
 13369  
 13370  // KTESTQ performs "Bit Test 64-bit Masks and Set Flags".
 13371  //
 13372  // Mnemonic        : KTESTQ
 13373  // Supported forms : (1 form)
 13374  //
 13375  //    * KTESTQ k, k    [AVX512BW]
 13376  //
 13377  func (self *Program) KTESTQ(v0 interface{}, v1 interface{}) *Instruction {
 13378      p := self.alloc("KTESTQ", 2, Operands { v0, v1 })
 13379      // KTESTQ k, k
 13380      if isK(v0) && isK(v1) {
 13381          self.require(ISA_AVX512BW)
 13382          p.domain = DomainMask
 13383          p.add(0, func(m *_Encoding, v []interface{}) {
 13384              m.emit(0xc4)
 13385              m.emit(0xe1)
 13386              m.emit(0xf8)
 13387              m.emit(0x99)
 13388              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 13389          })
 13390      }
 13391      if p.len == 0 {
 13392          panic("invalid operands for KTESTQ")
 13393      }
 13394      return p
 13395  }
 13396  
 13397  // KTESTW performs "Bit Test 16-bit Masks and Set Flags".
 13398  //
 13399  // Mnemonic        : KTESTW
 13400  // Supported forms : (1 form)
 13401  //
 13402  //    * KTESTW k, k    [AVX512DQ]
 13403  //
 13404  func (self *Program) KTESTW(v0 interface{}, v1 interface{}) *Instruction {
 13405      p := self.alloc("KTESTW", 2, Operands { v0, v1 })
 13406      // KTESTW k, k
 13407      if isK(v0) && isK(v1) {
 13408          self.require(ISA_AVX512DQ)
 13409          p.domain = DomainMask
 13410          p.add(0, func(m *_Encoding, v []interface{}) {
 13411              m.vex2(0, 0, nil, 0)
 13412              m.emit(0x99)
 13413              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 13414          })
 13415      }
 13416      if p.len == 0 {
 13417          panic("invalid operands for KTESTW")
 13418      }
 13419      return p
 13420  }
 13421  
 13422  // KUNPCKBW performs "Unpack and Interleave 8-bit Masks".
 13423  //
 13424  // Mnemonic        : KUNPCKBW
 13425  // Supported forms : (1 form)
 13426  //
 13427  //    * KUNPCKBW k, k, k    [AVX512F]
 13428  //
 13429  func (self *Program) KUNPCKBW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13430      p := self.alloc("KUNPCKBW", 3, Operands { v0, v1, v2 })
 13431      // KUNPCKBW k, k, k
 13432      if isK(v0) && isK(v1) && isK(v2) {
 13433          self.require(ISA_AVX512F)
 13434          p.domain = DomainMask
 13435          p.add(0, func(m *_Encoding, v []interface{}) {
 13436              m.vex2(5, 0, nil, hlcode(v[1]))
 13437              m.emit(0x4b)
 13438              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13439          })
 13440      }
 13441      if p.len == 0 {
 13442          panic("invalid operands for KUNPCKBW")
 13443      }
 13444      return p
 13445  }
 13446  
 13447  // KUNPCKDQ performs "Unpack and Interleave 32-bit Masks".
 13448  //
 13449  // Mnemonic        : KUNPCKDQ
 13450  // Supported forms : (1 form)
 13451  //
 13452  //    * KUNPCKDQ k, k, k    [AVX512BW]
 13453  //
 13454  func (self *Program) KUNPCKDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13455      p := self.alloc("KUNPCKDQ", 3, Operands { v0, v1, v2 })
 13456      // KUNPCKDQ k, k, k
 13457      if isK(v0) && isK(v1) && isK(v2) {
 13458          self.require(ISA_AVX512BW)
 13459          p.domain = DomainMask
 13460          p.add(0, func(m *_Encoding, v []interface{}) {
 13461              m.emit(0xc4)
 13462              m.emit(0xe1)
 13463              m.emit(0xfc ^ (hlcode(v[1]) << 3))
 13464              m.emit(0x4b)
 13465              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13466          })
 13467      }
 13468      if p.len == 0 {
 13469          panic("invalid operands for KUNPCKDQ")
 13470      }
 13471      return p
 13472  }
 13473  
 13474  // KUNPCKWD performs "Unpack and Interleave 16-bit Masks".
 13475  //
 13476  // Mnemonic        : KUNPCKWD
 13477  // Supported forms : (1 form)
 13478  //
 13479  //    * KUNPCKWD k, k, k    [AVX512BW]
 13480  //
 13481  func (self *Program) KUNPCKWD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13482      p := self.alloc("KUNPCKWD", 3, Operands { v0, v1, v2 })
 13483      // KUNPCKWD k, k, k
 13484      if isK(v0) && isK(v1) && isK(v2) {
 13485          self.require(ISA_AVX512BW)
 13486          p.domain = DomainMask
 13487          p.add(0, func(m *_Encoding, v []interface{}) {
 13488              m.vex2(4, 0, nil, hlcode(v[1]))
 13489              m.emit(0x4b)
 13490              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13491          })
 13492      }
 13493      if p.len == 0 {
 13494          panic("invalid operands for KUNPCKWD")
 13495      }
 13496      return p
 13497  }
 13498  
 13499  // KXNORB performs "Bitwise Logical XNOR 8-bit Masks".
 13500  //
 13501  // Mnemonic        : KXNORB
 13502  // Supported forms : (1 form)
 13503  //
 13504  //    * KXNORB k, k, k    [AVX512DQ]
 13505  //
 13506  func (self *Program) KXNORB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13507      p := self.alloc("KXNORB", 3, Operands { v0, v1, v2 })
 13508      // KXNORB k, k, k
 13509      if isK(v0) && isK(v1) && isK(v2) {
 13510          self.require(ISA_AVX512DQ)
 13511          p.domain = DomainMask
 13512          p.add(0, func(m *_Encoding, v []interface{}) {
 13513              m.vex2(5, 0, nil, hlcode(v[1]))
 13514              m.emit(0x46)
 13515              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13516          })
 13517      }
 13518      if p.len == 0 {
 13519          panic("invalid operands for KXNORB")
 13520      }
 13521      return p
 13522  }
 13523  
 13524  // KXNORD performs "Bitwise Logical XNOR 32-bit Masks".
 13525  //
 13526  // Mnemonic        : KXNORD
 13527  // Supported forms : (1 form)
 13528  //
 13529  //    * KXNORD k, k, k    [AVX512BW]
 13530  //
 13531  func (self *Program) KXNORD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13532      p := self.alloc("KXNORD", 3, Operands { v0, v1, v2 })
 13533      // KXNORD k, k, k
 13534      if isK(v0) && isK(v1) && isK(v2) {
 13535          self.require(ISA_AVX512BW)
 13536          p.domain = DomainMask
 13537          p.add(0, func(m *_Encoding, v []interface{}) {
 13538              m.emit(0xc4)
 13539              m.emit(0xe1)
 13540              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 13541              m.emit(0x46)
 13542              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13543          })
 13544      }
 13545      if p.len == 0 {
 13546          panic("invalid operands for KXNORD")
 13547      }
 13548      return p
 13549  }
 13550  
 13551  // KXNORQ performs "Bitwise Logical XNOR 64-bit Masks".
 13552  //
 13553  // Mnemonic        : KXNORQ
 13554  // Supported forms : (1 form)
 13555  //
 13556  //    * KXNORQ k, k, k    [AVX512BW]
 13557  //
 13558  func (self *Program) KXNORQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13559      p := self.alloc("KXNORQ", 3, Operands { v0, v1, v2 })
 13560      // KXNORQ k, k, k
 13561      if isK(v0) && isK(v1) && isK(v2) {
 13562          self.require(ISA_AVX512BW)
 13563          p.domain = DomainMask
 13564          p.add(0, func(m *_Encoding, v []interface{}) {
 13565              m.emit(0xc4)
 13566              m.emit(0xe1)
 13567              m.emit(0xfc ^ (hlcode(v[1]) << 3))
 13568              m.emit(0x46)
 13569              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13570          })
 13571      }
 13572      if p.len == 0 {
 13573          panic("invalid operands for KXNORQ")
 13574      }
 13575      return p
 13576  }
 13577  
 13578  // KXNORW performs "Bitwise Logical XNOR 16-bit Masks".
 13579  //
 13580  // Mnemonic        : KXNORW
 13581  // Supported forms : (1 form)
 13582  //
 13583  //    * KXNORW k, k, k    [AVX512F]
 13584  //
 13585  func (self *Program) KXNORW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13586      p := self.alloc("KXNORW", 3, Operands { v0, v1, v2 })
 13587      // KXNORW k, k, k
 13588      if isK(v0) && isK(v1) && isK(v2) {
 13589          self.require(ISA_AVX512F)
 13590          p.domain = DomainMask
 13591          p.add(0, func(m *_Encoding, v []interface{}) {
 13592              m.vex2(4, 0, nil, hlcode(v[1]))
 13593              m.emit(0x46)
 13594              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13595          })
 13596      }
 13597      if p.len == 0 {
 13598          panic("invalid operands for KXNORW")
 13599      }
 13600      return p
 13601  }
 13602  
 13603  // KXORB performs "Bitwise Logical XOR 8-bit Masks".
 13604  //
 13605  // Mnemonic        : KXORB
 13606  // Supported forms : (1 form)
 13607  //
 13608  //    * KXORB k, k, k    [AVX512DQ]
 13609  //
 13610  func (self *Program) KXORB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13611      p := self.alloc("KXORB", 3, Operands { v0, v1, v2 })
 13612      // KXORB k, k, k
 13613      if isK(v0) && isK(v1) && isK(v2) {
 13614          self.require(ISA_AVX512DQ)
 13615          p.domain = DomainMask
 13616          p.add(0, func(m *_Encoding, v []interface{}) {
 13617              m.vex2(5, 0, nil, hlcode(v[1]))
 13618              m.emit(0x47)
 13619              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13620          })
 13621      }
 13622      if p.len == 0 {
 13623          panic("invalid operands for KXORB")
 13624      }
 13625      return p
 13626  }
 13627  
 13628  // KXORD performs "Bitwise Logical XOR 32-bit Masks".
 13629  //
 13630  // Mnemonic        : KXORD
 13631  // Supported forms : (1 form)
 13632  //
 13633  //    * KXORD k, k, k    [AVX512BW]
 13634  //
 13635  func (self *Program) KXORD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13636      p := self.alloc("KXORD", 3, Operands { v0, v1, v2 })
 13637      // KXORD k, k, k
 13638      if isK(v0) && isK(v1) && isK(v2) {
 13639          self.require(ISA_AVX512BW)
 13640          p.domain = DomainMask
 13641          p.add(0, func(m *_Encoding, v []interface{}) {
 13642              m.emit(0xc4)
 13643              m.emit(0xe1)
 13644              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 13645              m.emit(0x47)
 13646              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13647          })
 13648      }
 13649      if p.len == 0 {
 13650          panic("invalid operands for KXORD")
 13651      }
 13652      return p
 13653  }
 13654  
 13655  // KXORQ performs "Bitwise Logical XOR 64-bit Masks".
 13656  //
 13657  // Mnemonic        : KXORQ
 13658  // Supported forms : (1 form)
 13659  //
 13660  //    * KXORQ k, k, k    [AVX512BW]
 13661  //
 13662  func (self *Program) KXORQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13663      p := self.alloc("KXORQ", 3, Operands { v0, v1, v2 })
 13664      // KXORQ k, k, k
 13665      if isK(v0) && isK(v1) && isK(v2) {
 13666          self.require(ISA_AVX512BW)
 13667          p.domain = DomainMask
 13668          p.add(0, func(m *_Encoding, v []interface{}) {
 13669              m.emit(0xc4)
 13670              m.emit(0xe1)
 13671              m.emit(0xfc ^ (hlcode(v[1]) << 3))
 13672              m.emit(0x47)
 13673              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13674          })
 13675      }
 13676      if p.len == 0 {
 13677          panic("invalid operands for KXORQ")
 13678      }
 13679      return p
 13680  }
 13681  
 13682  // KXORW performs "Bitwise Logical XOR 16-bit Masks".
 13683  //
 13684  // Mnemonic        : KXORW
 13685  // Supported forms : (1 form)
 13686  //
 13687  //    * KXORW k, k, k    [AVX512F]
 13688  //
 13689  func (self *Program) KXORW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13690      p := self.alloc("KXORW", 3, Operands { v0, v1, v2 })
 13691      // KXORW k, k, k
 13692      if isK(v0) && isK(v1) && isK(v2) {
 13693          self.require(ISA_AVX512F)
 13694          p.domain = DomainMask
 13695          p.add(0, func(m *_Encoding, v []interface{}) {
 13696              m.vex2(4, 0, nil, hlcode(v[1]))
 13697              m.emit(0x47)
 13698              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13699          })
 13700      }
 13701      if p.len == 0 {
 13702          panic("invalid operands for KXORW")
 13703      }
 13704      return p
 13705  }
 13706  
 13707  // LDDQU performs "Load Unaligned Integer 128 Bits".
 13708  //
 13709  // Mnemonic        : LDDQU
 13710  // Supported forms : (1 form)
 13711  //
 13712  //    * LDDQU m128, xmm    [SSE3]
 13713  //
 13714  func (self *Program) LDDQU(v0 interface{}, v1 interface{}) *Instruction {
 13715      p := self.alloc("LDDQU", 2, Operands { v0, v1 })
 13716      // LDDQU m128, xmm
 13717      if isM128(v0) && isXMM(v1) {
 13718          self.require(ISA_SSE3)
 13719          p.domain = DomainMMXSSE
 13720          p.add(0, func(m *_Encoding, v []interface{}) {
 13721              m.emit(0xf2)
 13722              m.rexo(hcode(v[1]), addr(v[0]), false)
 13723              m.emit(0x0f)
 13724              m.emit(0xf0)
 13725              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 13726          })
 13727      }
 13728      if p.len == 0 {
 13729          panic("invalid operands for LDDQU")
 13730      }
 13731      return p
 13732  }
 13733  
 13734  // LDMXCSR performs "Load MXCSR Register".
 13735  //
 13736  // Mnemonic        : LDMXCSR
 13737  // Supported forms : (1 form)
 13738  //
 13739  //    * LDMXCSR m32    [SSE]
 13740  //
 13741  func (self *Program) LDMXCSR(v0 interface{}) *Instruction {
 13742      p := self.alloc("LDMXCSR", 1, Operands { v0 })
 13743      // LDMXCSR m32
 13744      if isM32(v0) {
 13745          self.require(ISA_SSE)
 13746          p.domain = DomainMMXSSE
 13747          p.add(0, func(m *_Encoding, v []interface{}) {
 13748              m.rexo(0, addr(v[0]), false)
 13749              m.emit(0x0f)
 13750              m.emit(0xae)
 13751              m.mrsd(2, addr(v[0]), 1)
 13752          })
 13753      }
 13754      if p.len == 0 {
 13755          panic("invalid operands for LDMXCSR")
 13756      }
 13757      return p
 13758  }
 13759  
 13760  // LEAL performs "Load Effective Address".
 13761  //
 13762  // Mnemonic        : LEA
 13763  // Supported forms : (1 form)
 13764  //
 13765  //    * LEAL m, r32
 13766  //
 13767  func (self *Program) LEAL(v0 interface{}, v1 interface{}) *Instruction {
 13768      p := self.alloc("LEAL", 2, Operands { v0, v1 })
 13769      // LEAL m, r32
 13770      if isM(v0) && isReg32(v1) {
 13771          p.domain = DomainGeneric
 13772          p.add(0, func(m *_Encoding, v []interface{}) {
 13773              m.rexo(hcode(v[1]), addr(v[0]), false)
 13774              m.emit(0x8d)
 13775              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 13776          })
 13777      }
 13778      if p.len == 0 {
 13779          panic("invalid operands for LEAL")
 13780      }
 13781      return p
 13782  }
 13783  
 13784  // LEAQ performs "Load Effective Address".
 13785  //
 13786  // Mnemonic        : LEA
 13787  // Supported forms : (1 form)
 13788  //
 13789  //    * LEAQ m, r64
 13790  //
 13791  func (self *Program) LEAQ(v0 interface{}, v1 interface{}) *Instruction {
 13792      p := self.alloc("LEAQ", 2, Operands { v0, v1 })
 13793      // LEAQ m, r64
 13794      if isM(v0) && isReg64(v1) {
 13795          p.domain = DomainGeneric
 13796          p.add(0, func(m *_Encoding, v []interface{}) {
 13797              m.rexm(1, hcode(v[1]), addr(v[0]))
 13798              m.emit(0x8d)
 13799              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 13800          })
 13801      }
 13802      if p.len == 0 {
 13803          panic("invalid operands for LEAQ")
 13804      }
 13805      return p
 13806  }
 13807  
 13808  // LEAW performs "Load Effective Address".
 13809  //
 13810  // Mnemonic        : LEA
 13811  // Supported forms : (1 form)
 13812  //
 13813  //    * LEAW m, r16
 13814  //
 13815  func (self *Program) LEAW(v0 interface{}, v1 interface{}) *Instruction {
 13816      p := self.alloc("LEAW", 2, Operands { v0, v1 })
 13817      // LEAW m, r16
 13818      if isM(v0) && isReg16(v1) {
 13819          p.domain = DomainGeneric
 13820          p.add(0, func(m *_Encoding, v []interface{}) {
 13821              m.emit(0x66)
 13822              m.rexo(hcode(v[1]), addr(v[0]), false)
 13823              m.emit(0x8d)
 13824              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 13825          })
 13826      }
 13827      if p.len == 0 {
 13828          panic("invalid operands for LEAW")
 13829      }
 13830      return p
 13831  }
 13832  
 13833  // LFENCE performs "Load Fence".
 13834  //
 13835  // Mnemonic        : LFENCE
 13836  // Supported forms : (1 form)
 13837  //
 13838  //    * LFENCE    [SSE2]
 13839  //
 13840  func (self *Program) LFENCE() *Instruction {
 13841      p := self.alloc("LFENCE", 0, Operands {  })
 13842      // LFENCE
 13843      self.require(ISA_SSE2)
 13844      p.domain = DomainGeneric
 13845      p.add(0, func(m *_Encoding, v []interface{}) {
 13846          m.emit(0x0f)
 13847          m.emit(0xae)
 13848          m.emit(0xe8)
 13849      })
 13850      return p
 13851  }
 13852  
 13853  // LZCNTL performs "Count the Number of Leading Zero Bits".
 13854  //
 13855  // Mnemonic        : LZCNT
 13856  // Supported forms : (2 forms)
 13857  //
 13858  //    * LZCNTL r32, r32    [LZCNT]
 13859  //    * LZCNTL m32, r32    [LZCNT]
 13860  //
 13861  func (self *Program) LZCNTL(v0 interface{}, v1 interface{}) *Instruction {
 13862      p := self.alloc("LZCNTL", 2, Operands { v0, v1 })
 13863      // LZCNTL r32, r32
 13864      if isReg32(v0) && isReg32(v1) {
 13865          self.require(ISA_LZCNT)
 13866          p.domain = DomainGeneric
 13867          p.add(0, func(m *_Encoding, v []interface{}) {
 13868              m.emit(0xf3)
 13869              m.rexo(hcode(v[1]), v[0], false)
 13870              m.emit(0x0f)
 13871              m.emit(0xbd)
 13872              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 13873          })
 13874      }
 13875      // LZCNTL m32, r32
 13876      if isM32(v0) && isReg32(v1) {
 13877          self.require(ISA_LZCNT)
 13878          p.domain = DomainGeneric
 13879          p.add(0, func(m *_Encoding, v []interface{}) {
 13880              m.emit(0xf3)
 13881              m.rexo(hcode(v[1]), addr(v[0]), false)
 13882              m.emit(0x0f)
 13883              m.emit(0xbd)
 13884              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 13885          })
 13886      }
 13887      if p.len == 0 {
 13888          panic("invalid operands for LZCNTL")
 13889      }
 13890      return p
 13891  }
 13892  
 13893  // LZCNTQ performs "Count the Number of Leading Zero Bits".
 13894  //
 13895  // Mnemonic        : LZCNT
 13896  // Supported forms : (2 forms)
 13897  //
 13898  //    * LZCNTQ r64, r64    [LZCNT]
 13899  //    * LZCNTQ m64, r64    [LZCNT]
 13900  //
 13901  func (self *Program) LZCNTQ(v0 interface{}, v1 interface{}) *Instruction {
 13902      p := self.alloc("LZCNTQ", 2, Operands { v0, v1 })
 13903      // LZCNTQ r64, r64
 13904      if isReg64(v0) && isReg64(v1) {
 13905          self.require(ISA_LZCNT)
 13906          p.domain = DomainGeneric
 13907          p.add(0, func(m *_Encoding, v []interface{}) {
 13908              m.emit(0xf3)
 13909              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 13910              m.emit(0x0f)
 13911              m.emit(0xbd)
 13912              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 13913          })
 13914      }
 13915      // LZCNTQ m64, r64
 13916      if isM64(v0) && isReg64(v1) {
 13917          self.require(ISA_LZCNT)
 13918          p.domain = DomainGeneric
 13919          p.add(0, func(m *_Encoding, v []interface{}) {
 13920              m.emit(0xf3)
 13921              m.rexm(1, hcode(v[1]), addr(v[0]))
 13922              m.emit(0x0f)
 13923              m.emit(0xbd)
 13924              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 13925          })
 13926      }
 13927      if p.len == 0 {
 13928          panic("invalid operands for LZCNTQ")
 13929      }
 13930      return p
 13931  }
 13932  
 13933  // LZCNTW performs "Count the Number of Leading Zero Bits".
 13934  //
 13935  // Mnemonic        : LZCNT
 13936  // Supported forms : (2 forms)
 13937  //
 13938  //    * LZCNTW r16, r16    [LZCNT]
 13939  //    * LZCNTW m16, r16    [LZCNT]
 13940  //
 13941  func (self *Program) LZCNTW(v0 interface{}, v1 interface{}) *Instruction {
 13942      p := self.alloc("LZCNTW", 2, Operands { v0, v1 })
 13943      // LZCNTW r16, r16
 13944      if isReg16(v0) && isReg16(v1) {
 13945          self.require(ISA_LZCNT)
 13946          p.domain = DomainGeneric
 13947          p.add(0, func(m *_Encoding, v []interface{}) {
 13948              m.emit(0x66)
 13949              m.emit(0xf3)
 13950              m.rexo(hcode(v[1]), v[0], false)
 13951              m.emit(0x0f)
 13952              m.emit(0xbd)
 13953              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 13954          })
 13955      }
 13956      // LZCNTW m16, r16
 13957      if isM16(v0) && isReg16(v1) {
 13958          self.require(ISA_LZCNT)
 13959          p.domain = DomainGeneric
 13960          p.add(0, func(m *_Encoding, v []interface{}) {
 13961              m.emit(0x66)
 13962              m.emit(0xf3)
 13963              m.rexo(hcode(v[1]), addr(v[0]), false)
 13964              m.emit(0x0f)
 13965              m.emit(0xbd)
 13966              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 13967          })
 13968      }
 13969      if p.len == 0 {
 13970          panic("invalid operands for LZCNTW")
 13971      }
 13972      return p
 13973  }
 13974  
 13975  // MASKMOVDQU performs "Store Selected Bytes of Double Quadword".
 13976  //
 13977  // Mnemonic        : MASKMOVDQU
 13978  // Supported forms : (1 form)
 13979  //
 13980  //    * MASKMOVDQU xmm, xmm    [SSE2]
 13981  //
 13982  func (self *Program) MASKMOVDQU(v0 interface{}, v1 interface{}) *Instruction {
 13983      p := self.alloc("MASKMOVDQU", 2, Operands { v0, v1 })
 13984      // MASKMOVDQU xmm, xmm
 13985      if isXMM(v0) && isXMM(v1) {
 13986          self.require(ISA_SSE2)
 13987          p.domain = DomainMMXSSE
 13988          p.add(0, func(m *_Encoding, v []interface{}) {
 13989              m.emit(0x66)
 13990              m.rexo(hcode(v[1]), v[0], false)
 13991              m.emit(0x0f)
 13992              m.emit(0xf7)
 13993              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 13994          })
 13995      }
 13996      if p.len == 0 {
 13997          panic("invalid operands for MASKMOVDQU")
 13998      }
 13999      return p
 14000  }
 14001  
 14002  // MASKMOVQ performs "Store Selected Bytes of Quadword".
 14003  //
 14004  // Mnemonic        : MASKMOVQ
 14005  // Supported forms : (1 form)
 14006  //
 14007  //    * MASKMOVQ mm, mm    [MMX+]
 14008  //
 14009  func (self *Program) MASKMOVQ(v0 interface{}, v1 interface{}) *Instruction {
 14010      p := self.alloc("MASKMOVQ", 2, Operands { v0, v1 })
 14011      // MASKMOVQ mm, mm
 14012      if isMM(v0) && isMM(v1) {
 14013          self.require(ISA_MMX_PLUS)
 14014          p.domain = DomainMMXSSE
 14015          p.add(0, func(m *_Encoding, v []interface{}) {
 14016              m.rexo(hcode(v[1]), v[0], false)
 14017              m.emit(0x0f)
 14018              m.emit(0xf7)
 14019              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14020          })
 14021      }
 14022      if p.len == 0 {
 14023          panic("invalid operands for MASKMOVQ")
 14024      }
 14025      return p
 14026  }
 14027  
 14028  // MAXPD performs "Return Maximum Packed Double-Precision Floating-Point Values".
 14029  //
 14030  // Mnemonic        : MAXPD
 14031  // Supported forms : (2 forms)
 14032  //
 14033  //    * MAXPD xmm, xmm     [SSE2]
 14034  //    * MAXPD m128, xmm    [SSE2]
 14035  //
 14036  func (self *Program) MAXPD(v0 interface{}, v1 interface{}) *Instruction {
 14037      p := self.alloc("MAXPD", 2, Operands { v0, v1 })
 14038      // MAXPD xmm, xmm
 14039      if isXMM(v0) && isXMM(v1) {
 14040          self.require(ISA_SSE2)
 14041          p.domain = DomainMMXSSE
 14042          p.add(0, func(m *_Encoding, v []interface{}) {
 14043              m.emit(0x66)
 14044              m.rexo(hcode(v[1]), v[0], false)
 14045              m.emit(0x0f)
 14046              m.emit(0x5f)
 14047              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14048          })
 14049      }
 14050      // MAXPD m128, xmm
 14051      if isM128(v0) && isXMM(v1) {
 14052          self.require(ISA_SSE2)
 14053          p.domain = DomainMMXSSE
 14054          p.add(0, func(m *_Encoding, v []interface{}) {
 14055              m.emit(0x66)
 14056              m.rexo(hcode(v[1]), addr(v[0]), false)
 14057              m.emit(0x0f)
 14058              m.emit(0x5f)
 14059              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14060          })
 14061      }
 14062      if p.len == 0 {
 14063          panic("invalid operands for MAXPD")
 14064      }
 14065      return p
 14066  }
 14067  
 14068  // MAXPS performs "Return Maximum Packed Single-Precision Floating-Point Values".
 14069  //
 14070  // Mnemonic        : MAXPS
 14071  // Supported forms : (2 forms)
 14072  //
 14073  //    * MAXPS xmm, xmm     [SSE]
 14074  //    * MAXPS m128, xmm    [SSE]
 14075  //
 14076  func (self *Program) MAXPS(v0 interface{}, v1 interface{}) *Instruction {
 14077      p := self.alloc("MAXPS", 2, Operands { v0, v1 })
 14078      // MAXPS xmm, xmm
 14079      if isXMM(v0) && isXMM(v1) {
 14080          self.require(ISA_SSE)
 14081          p.domain = DomainMMXSSE
 14082          p.add(0, func(m *_Encoding, v []interface{}) {
 14083              m.rexo(hcode(v[1]), v[0], false)
 14084              m.emit(0x0f)
 14085              m.emit(0x5f)
 14086              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14087          })
 14088      }
 14089      // MAXPS m128, xmm
 14090      if isM128(v0) && isXMM(v1) {
 14091          self.require(ISA_SSE)
 14092          p.domain = DomainMMXSSE
 14093          p.add(0, func(m *_Encoding, v []interface{}) {
 14094              m.rexo(hcode(v[1]), addr(v[0]), false)
 14095              m.emit(0x0f)
 14096              m.emit(0x5f)
 14097              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14098          })
 14099      }
 14100      if p.len == 0 {
 14101          panic("invalid operands for MAXPS")
 14102      }
 14103      return p
 14104  }
 14105  
 14106  // MAXSD performs "Return Maximum Scalar Double-Precision Floating-Point Value".
 14107  //
 14108  // Mnemonic        : MAXSD
 14109  // Supported forms : (2 forms)
 14110  //
 14111  //    * MAXSD xmm, xmm    [SSE2]
 14112  //    * MAXSD m64, xmm    [SSE2]
 14113  //
 14114  func (self *Program) MAXSD(v0 interface{}, v1 interface{}) *Instruction {
 14115      p := self.alloc("MAXSD", 2, Operands { v0, v1 })
 14116      // MAXSD xmm, xmm
 14117      if isXMM(v0) && isXMM(v1) {
 14118          self.require(ISA_SSE2)
 14119          p.domain = DomainMMXSSE
 14120          p.add(0, func(m *_Encoding, v []interface{}) {
 14121              m.emit(0xf2)
 14122              m.rexo(hcode(v[1]), v[0], false)
 14123              m.emit(0x0f)
 14124              m.emit(0x5f)
 14125              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14126          })
 14127      }
 14128      // MAXSD m64, xmm
 14129      if isM64(v0) && isXMM(v1) {
 14130          self.require(ISA_SSE2)
 14131          p.domain = DomainMMXSSE
 14132          p.add(0, func(m *_Encoding, v []interface{}) {
 14133              m.emit(0xf2)
 14134              m.rexo(hcode(v[1]), addr(v[0]), false)
 14135              m.emit(0x0f)
 14136              m.emit(0x5f)
 14137              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14138          })
 14139      }
 14140      if p.len == 0 {
 14141          panic("invalid operands for MAXSD")
 14142      }
 14143      return p
 14144  }
 14145  
 14146  // MAXSS performs "Return Maximum Scalar Single-Precision Floating-Point Value".
 14147  //
 14148  // Mnemonic        : MAXSS
 14149  // Supported forms : (2 forms)
 14150  //
 14151  //    * MAXSS xmm, xmm    [SSE]
 14152  //    * MAXSS m32, xmm    [SSE]
 14153  //
 14154  func (self *Program) MAXSS(v0 interface{}, v1 interface{}) *Instruction {
 14155      p := self.alloc("MAXSS", 2, Operands { v0, v1 })
 14156      // MAXSS xmm, xmm
 14157      if isXMM(v0) && isXMM(v1) {
 14158          self.require(ISA_SSE)
 14159          p.domain = DomainMMXSSE
 14160          p.add(0, func(m *_Encoding, v []interface{}) {
 14161              m.emit(0xf3)
 14162              m.rexo(hcode(v[1]), v[0], false)
 14163              m.emit(0x0f)
 14164              m.emit(0x5f)
 14165              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14166          })
 14167      }
 14168      // MAXSS m32, xmm
 14169      if isM32(v0) && isXMM(v1) {
 14170          self.require(ISA_SSE)
 14171          p.domain = DomainMMXSSE
 14172          p.add(0, func(m *_Encoding, v []interface{}) {
 14173              m.emit(0xf3)
 14174              m.rexo(hcode(v[1]), addr(v[0]), false)
 14175              m.emit(0x0f)
 14176              m.emit(0x5f)
 14177              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14178          })
 14179      }
 14180      if p.len == 0 {
 14181          panic("invalid operands for MAXSS")
 14182      }
 14183      return p
 14184  }
 14185  
 14186  // MFENCE performs "Memory Fence".
 14187  //
 14188  // Mnemonic        : MFENCE
 14189  // Supported forms : (1 form)
 14190  //
 14191  //    * MFENCE    [SSE2]
 14192  //
 14193  func (self *Program) MFENCE() *Instruction {
 14194      p := self.alloc("MFENCE", 0, Operands {  })
 14195      // MFENCE
 14196      self.require(ISA_SSE2)
 14197      p.domain = DomainGeneric
 14198      p.add(0, func(m *_Encoding, v []interface{}) {
 14199          m.emit(0x0f)
 14200          m.emit(0xae)
 14201          m.emit(0xf0)
 14202      })
 14203      return p
 14204  }
 14205  
 14206  // MINPD performs "Return Minimum Packed Double-Precision Floating-Point Values".
 14207  //
 14208  // Mnemonic        : MINPD
 14209  // Supported forms : (2 forms)
 14210  //
 14211  //    * MINPD xmm, xmm     [SSE2]
 14212  //    * MINPD m128, xmm    [SSE2]
 14213  //
 14214  func (self *Program) MINPD(v0 interface{}, v1 interface{}) *Instruction {
 14215      p := self.alloc("MINPD", 2, Operands { v0, v1 })
 14216      // MINPD xmm, xmm
 14217      if isXMM(v0) && isXMM(v1) {
 14218          self.require(ISA_SSE2)
 14219          p.domain = DomainMMXSSE
 14220          p.add(0, func(m *_Encoding, v []interface{}) {
 14221              m.emit(0x66)
 14222              m.rexo(hcode(v[1]), v[0], false)
 14223              m.emit(0x0f)
 14224              m.emit(0x5d)
 14225              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14226          })
 14227      }
 14228      // MINPD m128, xmm
 14229      if isM128(v0) && isXMM(v1) {
 14230          self.require(ISA_SSE2)
 14231          p.domain = DomainMMXSSE
 14232          p.add(0, func(m *_Encoding, v []interface{}) {
 14233              m.emit(0x66)
 14234              m.rexo(hcode(v[1]), addr(v[0]), false)
 14235              m.emit(0x0f)
 14236              m.emit(0x5d)
 14237              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14238          })
 14239      }
 14240      if p.len == 0 {
 14241          panic("invalid operands for MINPD")
 14242      }
 14243      return p
 14244  }
 14245  
 14246  // MINPS performs "Return Minimum Packed Single-Precision Floating-Point Values".
 14247  //
 14248  // Mnemonic        : MINPS
 14249  // Supported forms : (2 forms)
 14250  //
 14251  //    * MINPS xmm, xmm     [SSE]
 14252  //    * MINPS m128, xmm    [SSE]
 14253  //
 14254  func (self *Program) MINPS(v0 interface{}, v1 interface{}) *Instruction {
 14255      p := self.alloc("MINPS", 2, Operands { v0, v1 })
 14256      // MINPS xmm, xmm
 14257      if isXMM(v0) && isXMM(v1) {
 14258          self.require(ISA_SSE)
 14259          p.domain = DomainMMXSSE
 14260          p.add(0, func(m *_Encoding, v []interface{}) {
 14261              m.rexo(hcode(v[1]), v[0], false)
 14262              m.emit(0x0f)
 14263              m.emit(0x5d)
 14264              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14265          })
 14266      }
 14267      // MINPS m128, xmm
 14268      if isM128(v0) && isXMM(v1) {
 14269          self.require(ISA_SSE)
 14270          p.domain = DomainMMXSSE
 14271          p.add(0, func(m *_Encoding, v []interface{}) {
 14272              m.rexo(hcode(v[1]), addr(v[0]), false)
 14273              m.emit(0x0f)
 14274              m.emit(0x5d)
 14275              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14276          })
 14277      }
 14278      if p.len == 0 {
 14279          panic("invalid operands for MINPS")
 14280      }
 14281      return p
 14282  }
 14283  
 14284  // MINSD performs "Return Minimum Scalar Double-Precision Floating-Point Value".
 14285  //
 14286  // Mnemonic        : MINSD
 14287  // Supported forms : (2 forms)
 14288  //
 14289  //    * MINSD xmm, xmm    [SSE2]
 14290  //    * MINSD m64, xmm    [SSE2]
 14291  //
 14292  func (self *Program) MINSD(v0 interface{}, v1 interface{}) *Instruction {
 14293      p := self.alloc("MINSD", 2, Operands { v0, v1 })
 14294      // MINSD xmm, xmm
 14295      if isXMM(v0) && isXMM(v1) {
 14296          self.require(ISA_SSE2)
 14297          p.domain = DomainMMXSSE
 14298          p.add(0, func(m *_Encoding, v []interface{}) {
 14299              m.emit(0xf2)
 14300              m.rexo(hcode(v[1]), v[0], false)
 14301              m.emit(0x0f)
 14302              m.emit(0x5d)
 14303              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14304          })
 14305      }
 14306      // MINSD m64, xmm
 14307      if isM64(v0) && isXMM(v1) {
 14308          self.require(ISA_SSE2)
 14309          p.domain = DomainMMXSSE
 14310          p.add(0, func(m *_Encoding, v []interface{}) {
 14311              m.emit(0xf2)
 14312              m.rexo(hcode(v[1]), addr(v[0]), false)
 14313              m.emit(0x0f)
 14314              m.emit(0x5d)
 14315              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14316          })
 14317      }
 14318      if p.len == 0 {
 14319          panic("invalid operands for MINSD")
 14320      }
 14321      return p
 14322  }
 14323  
 14324  // MINSS performs "Return Minimum Scalar Single-Precision Floating-Point Value".
 14325  //
 14326  // Mnemonic        : MINSS
 14327  // Supported forms : (2 forms)
 14328  //
 14329  //    * MINSS xmm, xmm    [SSE]
 14330  //    * MINSS m32, xmm    [SSE]
 14331  //
 14332  func (self *Program) MINSS(v0 interface{}, v1 interface{}) *Instruction {
 14333      p := self.alloc("MINSS", 2, Operands { v0, v1 })
 14334      // MINSS xmm, xmm
 14335      if isXMM(v0) && isXMM(v1) {
 14336          self.require(ISA_SSE)
 14337          p.domain = DomainMMXSSE
 14338          p.add(0, func(m *_Encoding, v []interface{}) {
 14339              m.emit(0xf3)
 14340              m.rexo(hcode(v[1]), v[0], false)
 14341              m.emit(0x0f)
 14342              m.emit(0x5d)
 14343              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14344          })
 14345      }
 14346      // MINSS m32, xmm
 14347      if isM32(v0) && isXMM(v1) {
 14348          self.require(ISA_SSE)
 14349          p.domain = DomainMMXSSE
 14350          p.add(0, func(m *_Encoding, v []interface{}) {
 14351              m.emit(0xf3)
 14352              m.rexo(hcode(v[1]), addr(v[0]), false)
 14353              m.emit(0x0f)
 14354              m.emit(0x5d)
 14355              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14356          })
 14357      }
 14358      if p.len == 0 {
 14359          panic("invalid operands for MINSS")
 14360      }
 14361      return p
 14362  }
 14363  
 14364  // MONITOR performs "Monitor a Linear Address Range".
 14365  //
 14366  // Mnemonic        : MONITOR
 14367  // Supported forms : (1 form)
 14368  //
 14369  //    * MONITOR    [MONITOR]
 14370  //
 14371  func (self *Program) MONITOR() *Instruction {
 14372      p := self.alloc("MONITOR", 0, Operands {  })
 14373      // MONITOR
 14374      self.require(ISA_MONITOR)
 14375      p.domain = DomainMisc
 14376      p.add(0, func(m *_Encoding, v []interface{}) {
 14377          m.emit(0x0f)
 14378          m.emit(0x01)
 14379          m.emit(0xc8)
 14380      })
 14381      return p
 14382  }
 14383  
 14384  // MONITORX performs "Monitor a Linear Address Range with Timeout".
 14385  //
 14386  // Mnemonic        : MONITORX
 14387  // Supported forms : (1 form)
 14388  //
 14389  //    * MONITORX    [MONITORX]
 14390  //
 14391  func (self *Program) MONITORX() *Instruction {
 14392      p := self.alloc("MONITORX", 0, Operands {  })
 14393      // MONITORX
 14394      self.require(ISA_MONITORX)
 14395      p.domain = DomainMisc
 14396      p.add(0, func(m *_Encoding, v []interface{}) {
 14397          m.emit(0x0f)
 14398          m.emit(0x01)
 14399          m.emit(0xfa)
 14400      })
 14401      return p
 14402  }
 14403  
 14404  // MOVAPD performs "Move Aligned Packed Double-Precision Floating-Point Values".
 14405  //
 14406  // Mnemonic        : MOVAPD
 14407  // Supported forms : (3 forms)
 14408  //
 14409  //    * MOVAPD xmm, xmm     [SSE2]
 14410  //    * MOVAPD m128, xmm    [SSE2]
 14411  //    * MOVAPD xmm, m128    [SSE2]
 14412  //
 14413  func (self *Program) MOVAPD(v0 interface{}, v1 interface{}) *Instruction {
 14414      p := self.alloc("MOVAPD", 2, Operands { v0, v1 })
 14415      // MOVAPD xmm, xmm
 14416      if isXMM(v0) && isXMM(v1) {
 14417          self.require(ISA_SSE2)
 14418          p.domain = DomainMMXSSE
 14419          p.add(0, func(m *_Encoding, v []interface{}) {
 14420              m.emit(0x66)
 14421              m.rexo(hcode(v[1]), v[0], false)
 14422              m.emit(0x0f)
 14423              m.emit(0x28)
 14424              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14425          })
 14426          p.add(0, func(m *_Encoding, v []interface{}) {
 14427              m.emit(0x66)
 14428              m.rexo(hcode(v[0]), v[1], false)
 14429              m.emit(0x0f)
 14430              m.emit(0x29)
 14431              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 14432          })
 14433      }
 14434      // MOVAPD m128, xmm
 14435      if isM128(v0) && isXMM(v1) {
 14436          self.require(ISA_SSE2)
 14437          p.domain = DomainMMXSSE
 14438          p.add(0, func(m *_Encoding, v []interface{}) {
 14439              m.emit(0x66)
 14440              m.rexo(hcode(v[1]), addr(v[0]), false)
 14441              m.emit(0x0f)
 14442              m.emit(0x28)
 14443              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14444          })
 14445      }
 14446      // MOVAPD xmm, m128
 14447      if isXMM(v0) && isM128(v1) {
 14448          self.require(ISA_SSE2)
 14449          p.domain = DomainMMXSSE
 14450          p.add(0, func(m *_Encoding, v []interface{}) {
 14451              m.emit(0x66)
 14452              m.rexo(hcode(v[0]), addr(v[1]), false)
 14453              m.emit(0x0f)
 14454              m.emit(0x29)
 14455              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 14456          })
 14457      }
 14458      if p.len == 0 {
 14459          panic("invalid operands for MOVAPD")
 14460      }
 14461      return p
 14462  }
 14463  
 14464  // MOVAPS performs "Move Aligned Packed Single-Precision Floating-Point Values".
 14465  //
 14466  // Mnemonic        : MOVAPS
 14467  // Supported forms : (3 forms)
 14468  //
 14469  //    * MOVAPS xmm, xmm     [SSE]
 14470  //    * MOVAPS m128, xmm    [SSE]
 14471  //    * MOVAPS xmm, m128    [SSE]
 14472  //
 14473  func (self *Program) MOVAPS(v0 interface{}, v1 interface{}) *Instruction {
 14474      p := self.alloc("MOVAPS", 2, Operands { v0, v1 })
 14475      // MOVAPS xmm, xmm
 14476      if isXMM(v0) && isXMM(v1) {
 14477          self.require(ISA_SSE)
 14478          p.domain = DomainMMXSSE
 14479          p.add(0, func(m *_Encoding, v []interface{}) {
 14480              m.rexo(hcode(v[1]), v[0], false)
 14481              m.emit(0x0f)
 14482              m.emit(0x28)
 14483              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14484          })
 14485          p.add(0, func(m *_Encoding, v []interface{}) {
 14486              m.rexo(hcode(v[0]), v[1], false)
 14487              m.emit(0x0f)
 14488              m.emit(0x29)
 14489              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 14490          })
 14491      }
 14492      // MOVAPS m128, xmm
 14493      if isM128(v0) && isXMM(v1) {
 14494          self.require(ISA_SSE)
 14495          p.domain = DomainMMXSSE
 14496          p.add(0, func(m *_Encoding, v []interface{}) {
 14497              m.rexo(hcode(v[1]), addr(v[0]), false)
 14498              m.emit(0x0f)
 14499              m.emit(0x28)
 14500              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14501          })
 14502      }
 14503      // MOVAPS xmm, m128
 14504      if isXMM(v0) && isM128(v1) {
 14505          self.require(ISA_SSE)
 14506          p.domain = DomainMMXSSE
 14507          p.add(0, func(m *_Encoding, v []interface{}) {
 14508              m.rexo(hcode(v[0]), addr(v[1]), false)
 14509              m.emit(0x0f)
 14510              m.emit(0x29)
 14511              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 14512          })
 14513      }
 14514      if p.len == 0 {
 14515          panic("invalid operands for MOVAPS")
 14516      }
 14517      return p
 14518  }
 14519  
 14520  // MOVB performs "Move".
 14521  //
 14522  // Mnemonic        : MOV
 14523  // Supported forms : (5 forms)
 14524  //
 14525  //    * MOVB imm8, r8
 14526  //    * MOVB r8, r8
 14527  //    * MOVB m8, r8
 14528  //    * MOVB imm8, m8
 14529  //    * MOVB r8, m8
 14530  //
 14531  func (self *Program) MOVB(v0 interface{}, v1 interface{}) *Instruction {
 14532      p := self.alloc("MOVB", 2, Operands { v0, v1 })
 14533      // MOVB imm8, r8
 14534      if isImm8(v0) && isReg8(v1) {
 14535          p.domain = DomainGeneric
 14536          p.add(0, func(m *_Encoding, v []interface{}) {
 14537              m.rexo(0, v[1], isReg8REX(v[1]))
 14538              m.emit(0xc6)
 14539              m.emit(0xc0 | lcode(v[1]))
 14540              m.imm1(toImmAny(v[0]))
 14541          })
 14542          p.add(0, func(m *_Encoding, v []interface{}) {
 14543              m.rexo(0, v[1], isReg8REX(v[1]))
 14544              m.emit(0xb0 | lcode(v[1]))
 14545              m.imm1(toImmAny(v[0]))
 14546          })
 14547      }
 14548      // MOVB r8, r8
 14549      if isReg8(v0) && isReg8(v1) {
 14550          p.domain = DomainGeneric
 14551          p.add(0, func(m *_Encoding, v []interface{}) {
 14552              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
 14553              m.emit(0x88)
 14554              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 14555          })
 14556          p.add(0, func(m *_Encoding, v []interface{}) {
 14557              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]) || isReg8REX(v[1]))
 14558              m.emit(0x8a)
 14559              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14560          })
 14561      }
 14562      // MOVB m8, r8
 14563      if isM8(v0) && isReg8(v1) {
 14564          p.domain = DomainGeneric
 14565          p.add(0, func(m *_Encoding, v []interface{}) {
 14566              m.rexo(hcode(v[1]), addr(v[0]), isReg8REX(v[1]))
 14567              m.emit(0x8a)
 14568              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14569          })
 14570      }
 14571      // MOVB imm8, m8
 14572      if isImm8(v0) && isM8(v1) {
 14573          p.domain = DomainGeneric
 14574          p.add(0, func(m *_Encoding, v []interface{}) {
 14575              m.rexo(0, addr(v[1]), false)
 14576              m.emit(0xc6)
 14577              m.mrsd(0, addr(v[1]), 1)
 14578              m.imm1(toImmAny(v[0]))
 14579          })
 14580      }
 14581      // MOVB r8, m8
 14582      if isReg8(v0) && isM8(v1) {
 14583          p.domain = DomainGeneric
 14584          p.add(0, func(m *_Encoding, v []interface{}) {
 14585              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
 14586              m.emit(0x88)
 14587              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 14588          })
 14589      }
 14590      if p.len == 0 {
 14591          panic("invalid operands for MOVB")
 14592      }
 14593      return p
 14594  }
 14595  
 14596  // MOVBEL performs "Move Data After Swapping Bytes".
 14597  //
 14598  // Mnemonic        : MOVBE
 14599  // Supported forms : (2 forms)
 14600  //
 14601  //    * MOVBEL m32, r32    [MOVBE]
 14602  //    * MOVBEL r32, m32    [MOVBE]
 14603  //
 14604  func (self *Program) MOVBEL(v0 interface{}, v1 interface{}) *Instruction {
 14605      p := self.alloc("MOVBEL", 2, Operands { v0, v1 })
 14606      // MOVBEL m32, r32
 14607      if isM32(v0) && isReg32(v1) {
 14608          self.require(ISA_MOVBE)
 14609          p.domain = DomainGeneric
 14610          p.add(0, func(m *_Encoding, v []interface{}) {
 14611              m.rexo(hcode(v[1]), addr(v[0]), false)
 14612              m.emit(0x0f)
 14613              m.emit(0x38)
 14614              m.emit(0xf0)
 14615              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14616          })
 14617      }
 14618      // MOVBEL r32, m32
 14619      if isReg32(v0) && isM32(v1) {
 14620          self.require(ISA_MOVBE)
 14621          p.domain = DomainGeneric
 14622          p.add(0, func(m *_Encoding, v []interface{}) {
 14623              m.rexo(hcode(v[0]), addr(v[1]), false)
 14624              m.emit(0x0f)
 14625              m.emit(0x38)
 14626              m.emit(0xf1)
 14627              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 14628          })
 14629      }
 14630      if p.len == 0 {
 14631          panic("invalid operands for MOVBEL")
 14632      }
 14633      return p
 14634  }
 14635  
 14636  // MOVBEQ performs "Move Data After Swapping Bytes".
 14637  //
 14638  // Mnemonic        : MOVBE
 14639  // Supported forms : (2 forms)
 14640  //
 14641  //    * MOVBEQ m64, r64    [MOVBE]
 14642  //    * MOVBEQ r64, m64    [MOVBE]
 14643  //
 14644  func (self *Program) MOVBEQ(v0 interface{}, v1 interface{}) *Instruction {
 14645      p := self.alloc("MOVBEQ", 2, Operands { v0, v1 })
 14646      // MOVBEQ m64, r64
 14647      if isM64(v0) && isReg64(v1) {
 14648          self.require(ISA_MOVBE)
 14649          p.domain = DomainGeneric
 14650          p.add(0, func(m *_Encoding, v []interface{}) {
 14651              m.rexm(1, hcode(v[1]), addr(v[0]))
 14652              m.emit(0x0f)
 14653              m.emit(0x38)
 14654              m.emit(0xf0)
 14655              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14656          })
 14657      }
 14658      // MOVBEQ r64, m64
 14659      if isReg64(v0) && isM64(v1) {
 14660          self.require(ISA_MOVBE)
 14661          p.domain = DomainGeneric
 14662          p.add(0, func(m *_Encoding, v []interface{}) {
 14663              m.rexm(1, hcode(v[0]), addr(v[1]))
 14664              m.emit(0x0f)
 14665              m.emit(0x38)
 14666              m.emit(0xf1)
 14667              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 14668          })
 14669      }
 14670      if p.len == 0 {
 14671          panic("invalid operands for MOVBEQ")
 14672      }
 14673      return p
 14674  }
 14675  
 14676  // MOVBEW performs "Move Data After Swapping Bytes".
 14677  //
 14678  // Mnemonic        : MOVBE
 14679  // Supported forms : (2 forms)
 14680  //
 14681  //    * MOVBEW m16, r16    [MOVBE]
 14682  //    * MOVBEW r16, m16    [MOVBE]
 14683  //
 14684  func (self *Program) MOVBEW(v0 interface{}, v1 interface{}) *Instruction {
 14685      p := self.alloc("MOVBEW", 2, Operands { v0, v1 })
 14686      // MOVBEW m16, r16
 14687      if isM16(v0) && isReg16(v1) {
 14688          self.require(ISA_MOVBE)
 14689          p.domain = DomainGeneric
 14690          p.add(0, func(m *_Encoding, v []interface{}) {
 14691              m.emit(0x66)
 14692              m.rexo(hcode(v[1]), addr(v[0]), false)
 14693              m.emit(0x0f)
 14694              m.emit(0x38)
 14695              m.emit(0xf0)
 14696              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14697          })
 14698      }
 14699      // MOVBEW r16, m16
 14700      if isReg16(v0) && isM16(v1) {
 14701          self.require(ISA_MOVBE)
 14702          p.domain = DomainGeneric
 14703          p.add(0, func(m *_Encoding, v []interface{}) {
 14704              m.emit(0x66)
 14705              m.rexo(hcode(v[0]), addr(v[1]), false)
 14706              m.emit(0x0f)
 14707              m.emit(0x38)
 14708              m.emit(0xf1)
 14709              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 14710          })
 14711      }
 14712      if p.len == 0 {
 14713          panic("invalid operands for MOVBEW")
 14714      }
 14715      return p
 14716  }
 14717  
 14718  // MOVD performs "Move Doubleword".
 14719  //
 14720  // Mnemonic        : MOVD
 14721  // Supported forms : (8 forms)
 14722  //
 14723  //    * MOVD mm, r32     [MMX]
 14724  //    * MOVD r32, mm     [MMX]
 14725  //    * MOVD m32, mm     [MMX]
 14726  //    * MOVD mm, m32     [MMX]
 14727  //    * MOVD xmm, r32    [SSE2]
 14728  //    * MOVD r32, xmm    [SSE2]
 14729  //    * MOVD m32, xmm    [SSE2]
 14730  //    * MOVD xmm, m32    [SSE2]
 14731  //
 14732  func (self *Program) MOVD(v0 interface{}, v1 interface{}) *Instruction {
 14733      p := self.alloc("MOVD", 2, Operands { v0, v1 })
 14734      // MOVD mm, r32
 14735      if isMM(v0) && isReg32(v1) {
 14736          self.require(ISA_MMX)
 14737          p.domain = DomainMMXSSE
 14738          p.add(0, func(m *_Encoding, v []interface{}) {
 14739              m.rexo(hcode(v[0]), v[1], false)
 14740              m.emit(0x0f)
 14741              m.emit(0x7e)
 14742              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 14743          })
 14744      }
 14745      // MOVD r32, mm
 14746      if isReg32(v0) && isMM(v1) {
 14747          self.require(ISA_MMX)
 14748          p.domain = DomainMMXSSE
 14749          p.add(0, func(m *_Encoding, v []interface{}) {
 14750              m.rexo(hcode(v[1]), v[0], false)
 14751              m.emit(0x0f)
 14752              m.emit(0x6e)
 14753              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14754          })
 14755      }
 14756      // MOVD m32, mm
 14757      if isM32(v0) && isMM(v1) {
 14758          self.require(ISA_MMX)
 14759          p.domain = DomainMMXSSE
 14760          p.add(0, func(m *_Encoding, v []interface{}) {
 14761              m.rexo(hcode(v[1]), addr(v[0]), false)
 14762              m.emit(0x0f)
 14763              m.emit(0x6e)
 14764              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14765          })
 14766      }
 14767      // MOVD mm, m32
 14768      if isMM(v0) && isM32(v1) {
 14769          self.require(ISA_MMX)
 14770          p.domain = DomainMMXSSE
 14771          p.add(0, func(m *_Encoding, v []interface{}) {
 14772              m.rexo(hcode(v[0]), addr(v[1]), false)
 14773              m.emit(0x0f)
 14774              m.emit(0x7e)
 14775              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 14776          })
 14777      }
 14778      // MOVD xmm, r32
 14779      if isXMM(v0) && isReg32(v1) {
 14780          self.require(ISA_SSE2)
 14781          p.domain = DomainMMXSSE
 14782          p.add(0, func(m *_Encoding, v []interface{}) {
 14783              m.emit(0x66)
 14784              m.rexo(hcode(v[0]), v[1], false)
 14785              m.emit(0x0f)
 14786              m.emit(0x7e)
 14787              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 14788          })
 14789      }
 14790      // MOVD r32, xmm
 14791      if isReg32(v0) && isXMM(v1) {
 14792          self.require(ISA_SSE2)
 14793          p.domain = DomainMMXSSE
 14794          p.add(0, func(m *_Encoding, v []interface{}) {
 14795              m.emit(0x66)
 14796              m.rexo(hcode(v[1]), v[0], false)
 14797              m.emit(0x0f)
 14798              m.emit(0x6e)
 14799              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14800          })
 14801      }
 14802      // MOVD m32, xmm
 14803      if isM32(v0) && isXMM(v1) {
 14804          self.require(ISA_SSE2)
 14805          p.domain = DomainMMXSSE
 14806          p.add(0, func(m *_Encoding, v []interface{}) {
 14807              m.emit(0x66)
 14808              m.rexo(hcode(v[1]), addr(v[0]), false)
 14809              m.emit(0x0f)
 14810              m.emit(0x6e)
 14811              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14812          })
 14813      }
 14814      // MOVD xmm, m32
 14815      if isXMM(v0) && isM32(v1) {
 14816          self.require(ISA_SSE2)
 14817          p.domain = DomainMMXSSE
 14818          p.add(0, func(m *_Encoding, v []interface{}) {
 14819              m.emit(0x66)
 14820              m.rexo(hcode(v[0]), addr(v[1]), false)
 14821              m.emit(0x0f)
 14822              m.emit(0x7e)
 14823              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 14824          })
 14825      }
 14826      if p.len == 0 {
 14827          panic("invalid operands for MOVD")
 14828      }
 14829      return p
 14830  }
 14831  
 14832  // MOVDDUP performs "Move One Double-FP and Duplicate".
 14833  //
 14834  // Mnemonic        : MOVDDUP
 14835  // Supported forms : (2 forms)
 14836  //
 14837  //    * MOVDDUP xmm, xmm    [SSE3]
 14838  //    * MOVDDUP m64, xmm    [SSE3]
 14839  //
 14840  func (self *Program) MOVDDUP(v0 interface{}, v1 interface{}) *Instruction {
 14841      p := self.alloc("MOVDDUP", 2, Operands { v0, v1 })
 14842      // MOVDDUP xmm, xmm
 14843      if isXMM(v0) && isXMM(v1) {
 14844          self.require(ISA_SSE3)
 14845          p.domain = DomainMMXSSE
 14846          p.add(0, func(m *_Encoding, v []interface{}) {
 14847              m.emit(0xf2)
 14848              m.rexo(hcode(v[1]), v[0], false)
 14849              m.emit(0x0f)
 14850              m.emit(0x12)
 14851              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14852          })
 14853      }
 14854      // MOVDDUP m64, xmm
 14855      if isM64(v0) && isXMM(v1) {
 14856          self.require(ISA_SSE3)
 14857          p.domain = DomainMMXSSE
 14858          p.add(0, func(m *_Encoding, v []interface{}) {
 14859              m.emit(0xf2)
 14860              m.rexo(hcode(v[1]), addr(v[0]), false)
 14861              m.emit(0x0f)
 14862              m.emit(0x12)
 14863              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14864          })
 14865      }
 14866      if p.len == 0 {
 14867          panic("invalid operands for MOVDDUP")
 14868      }
 14869      return p
 14870  }
 14871  
 14872  // MOVDQ2Q performs "Move Quadword from XMM to MMX Technology Register".
 14873  //
 14874  // Mnemonic        : MOVDQ2Q
 14875  // Supported forms : (1 form)
 14876  //
 14877  //    * MOVDQ2Q xmm, mm    [SSE2]
 14878  //
 14879  func (self *Program) MOVDQ2Q(v0 interface{}, v1 interface{}) *Instruction {
 14880      p := self.alloc("MOVDQ2Q", 2, Operands { v0, v1 })
 14881      // MOVDQ2Q xmm, mm
 14882      if isXMM(v0) && isMM(v1) {
 14883          self.require(ISA_SSE2)
 14884          p.domain = DomainMMXSSE
 14885          p.add(0, func(m *_Encoding, v []interface{}) {
 14886              m.emit(0xf2)
 14887              m.rexo(hcode(v[1]), v[0], false)
 14888              m.emit(0x0f)
 14889              m.emit(0xd6)
 14890              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14891          })
 14892      }
 14893      if p.len == 0 {
 14894          panic("invalid operands for MOVDQ2Q")
 14895      }
 14896      return p
 14897  }
 14898  
 14899  // MOVDQA performs "Move Aligned Double Quadword".
 14900  //
 14901  // Mnemonic        : MOVDQA
 14902  // Supported forms : (3 forms)
 14903  //
 14904  //    * MOVDQA xmm, xmm     [SSE2]
 14905  //    * MOVDQA m128, xmm    [SSE2]
 14906  //    * MOVDQA xmm, m128    [SSE2]
 14907  //
 14908  func (self *Program) MOVDQA(v0 interface{}, v1 interface{}) *Instruction {
 14909      p := self.alloc("MOVDQA", 2, Operands { v0, v1 })
 14910      // MOVDQA xmm, xmm
 14911      if isXMM(v0) && isXMM(v1) {
 14912          self.require(ISA_SSE2)
 14913          p.domain = DomainMMXSSE
 14914          p.add(0, func(m *_Encoding, v []interface{}) {
 14915              m.emit(0x66)
 14916              m.rexo(hcode(v[1]), v[0], false)
 14917              m.emit(0x0f)
 14918              m.emit(0x6f)
 14919              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14920          })
 14921          p.add(0, func(m *_Encoding, v []interface{}) {
 14922              m.emit(0x66)
 14923              m.rexo(hcode(v[0]), v[1], false)
 14924              m.emit(0x0f)
 14925              m.emit(0x7f)
 14926              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 14927          })
 14928      }
 14929      // MOVDQA m128, xmm
 14930      if isM128(v0) && isXMM(v1) {
 14931          self.require(ISA_SSE2)
 14932          p.domain = DomainMMXSSE
 14933          p.add(0, func(m *_Encoding, v []interface{}) {
 14934              m.emit(0x66)
 14935              m.rexo(hcode(v[1]), addr(v[0]), false)
 14936              m.emit(0x0f)
 14937              m.emit(0x6f)
 14938              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14939          })
 14940      }
 14941      // MOVDQA xmm, m128
 14942      if isXMM(v0) && isM128(v1) {
 14943          self.require(ISA_SSE2)
 14944          p.domain = DomainMMXSSE
 14945          p.add(0, func(m *_Encoding, v []interface{}) {
 14946              m.emit(0x66)
 14947              m.rexo(hcode(v[0]), addr(v[1]), false)
 14948              m.emit(0x0f)
 14949              m.emit(0x7f)
 14950              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 14951          })
 14952      }
 14953      if p.len == 0 {
 14954          panic("invalid operands for MOVDQA")
 14955      }
 14956      return p
 14957  }
 14958  
 14959  // MOVDQU performs "Move Unaligned Double Quadword".
 14960  //
 14961  // Mnemonic        : MOVDQU
 14962  // Supported forms : (3 forms)
 14963  //
 14964  //    * MOVDQU xmm, xmm     [SSE2]
 14965  //    * MOVDQU m128, xmm    [SSE2]
 14966  //    * MOVDQU xmm, m128    [SSE2]
 14967  //
 14968  func (self *Program) MOVDQU(v0 interface{}, v1 interface{}) *Instruction {
 14969      p := self.alloc("MOVDQU", 2, Operands { v0, v1 })
 14970      // MOVDQU xmm, xmm
 14971      if isXMM(v0) && isXMM(v1) {
 14972          self.require(ISA_SSE2)
 14973          p.domain = DomainMMXSSE
 14974          p.add(0, func(m *_Encoding, v []interface{}) {
 14975              m.emit(0xf3)
 14976              m.rexo(hcode(v[1]), v[0], false)
 14977              m.emit(0x0f)
 14978              m.emit(0x6f)
 14979              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14980          })
 14981          p.add(0, func(m *_Encoding, v []interface{}) {
 14982              m.emit(0xf3)
 14983              m.rexo(hcode(v[0]), v[1], false)
 14984              m.emit(0x0f)
 14985              m.emit(0x7f)
 14986              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 14987          })
 14988      }
 14989      // MOVDQU m128, xmm
 14990      if isM128(v0) && isXMM(v1) {
 14991          self.require(ISA_SSE2)
 14992          p.domain = DomainMMXSSE
 14993          p.add(0, func(m *_Encoding, v []interface{}) {
 14994              m.emit(0xf3)
 14995              m.rexo(hcode(v[1]), addr(v[0]), false)
 14996              m.emit(0x0f)
 14997              m.emit(0x6f)
 14998              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14999          })
 15000      }
 15001      // MOVDQU xmm, m128
 15002      if isXMM(v0) && isM128(v1) {
 15003          self.require(ISA_SSE2)
 15004          p.domain = DomainMMXSSE
 15005          p.add(0, func(m *_Encoding, v []interface{}) {
 15006              m.emit(0xf3)
 15007              m.rexo(hcode(v[0]), addr(v[1]), false)
 15008              m.emit(0x0f)
 15009              m.emit(0x7f)
 15010              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15011          })
 15012      }
 15013      if p.len == 0 {
 15014          panic("invalid operands for MOVDQU")
 15015      }
 15016      return p
 15017  }
 15018  
 15019  // MOVHLPS performs "Move Packed Single-Precision Floating-Point Values High to Low".
 15020  //
 15021  // Mnemonic        : MOVHLPS
 15022  // Supported forms : (1 form)
 15023  //
 15024  //    * MOVHLPS xmm, xmm    [SSE]
 15025  //
 15026  func (self *Program) MOVHLPS(v0 interface{}, v1 interface{}) *Instruction {
 15027      p := self.alloc("MOVHLPS", 2, Operands { v0, v1 })
 15028      // MOVHLPS xmm, xmm
 15029      if isXMM(v0) && isXMM(v1) {
 15030          self.require(ISA_SSE)
 15031          p.domain = DomainMMXSSE
 15032          p.add(0, func(m *_Encoding, v []interface{}) {
 15033              m.rexo(hcode(v[1]), v[0], false)
 15034              m.emit(0x0f)
 15035              m.emit(0x12)
 15036              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15037          })
 15038      }
 15039      if p.len == 0 {
 15040          panic("invalid operands for MOVHLPS")
 15041      }
 15042      return p
 15043  }
 15044  
 15045  // MOVHPD performs "Move High Packed Double-Precision Floating-Point Value".
 15046  //
 15047  // Mnemonic        : MOVHPD
 15048  // Supported forms : (2 forms)
 15049  //
 15050  //    * MOVHPD m64, xmm    [SSE2]
 15051  //    * MOVHPD xmm, m64    [SSE2]
 15052  //
 15053  func (self *Program) MOVHPD(v0 interface{}, v1 interface{}) *Instruction {
 15054      p := self.alloc("MOVHPD", 2, Operands { v0, v1 })
 15055      // MOVHPD m64, xmm
 15056      if isM64(v0) && isXMM(v1) {
 15057          self.require(ISA_SSE2)
 15058          p.domain = DomainMMXSSE
 15059          p.add(0, func(m *_Encoding, v []interface{}) {
 15060              m.emit(0x66)
 15061              m.rexo(hcode(v[1]), addr(v[0]), false)
 15062              m.emit(0x0f)
 15063              m.emit(0x16)
 15064              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15065          })
 15066      }
 15067      // MOVHPD xmm, m64
 15068      if isXMM(v0) && isM64(v1) {
 15069          self.require(ISA_SSE2)
 15070          p.domain = DomainMMXSSE
 15071          p.add(0, func(m *_Encoding, v []interface{}) {
 15072              m.emit(0x66)
 15073              m.rexo(hcode(v[0]), addr(v[1]), false)
 15074              m.emit(0x0f)
 15075              m.emit(0x17)
 15076              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15077          })
 15078      }
 15079      if p.len == 0 {
 15080          panic("invalid operands for MOVHPD")
 15081      }
 15082      return p
 15083  }
 15084  
 15085  // MOVHPS performs "Move High Packed Single-Precision Floating-Point Values".
 15086  //
 15087  // Mnemonic        : MOVHPS
 15088  // Supported forms : (2 forms)
 15089  //
 15090  //    * MOVHPS m64, xmm    [SSE]
 15091  //    * MOVHPS xmm, m64    [SSE]
 15092  //
 15093  func (self *Program) MOVHPS(v0 interface{}, v1 interface{}) *Instruction {
 15094      p := self.alloc("MOVHPS", 2, Operands { v0, v1 })
 15095      // MOVHPS m64, xmm
 15096      if isM64(v0) && isXMM(v1) {
 15097          self.require(ISA_SSE)
 15098          p.domain = DomainMMXSSE
 15099          p.add(0, func(m *_Encoding, v []interface{}) {
 15100              m.rexo(hcode(v[1]), addr(v[0]), false)
 15101              m.emit(0x0f)
 15102              m.emit(0x16)
 15103              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15104          })
 15105      }
 15106      // MOVHPS xmm, m64
 15107      if isXMM(v0) && isM64(v1) {
 15108          self.require(ISA_SSE)
 15109          p.domain = DomainMMXSSE
 15110          p.add(0, func(m *_Encoding, v []interface{}) {
 15111              m.rexo(hcode(v[0]), addr(v[1]), false)
 15112              m.emit(0x0f)
 15113              m.emit(0x17)
 15114              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15115          })
 15116      }
 15117      if p.len == 0 {
 15118          panic("invalid operands for MOVHPS")
 15119      }
 15120      return p
 15121  }
 15122  
 15123  // MOVL performs "Move".
 15124  //
 15125  // Mnemonic        : MOV
 15126  // Supported forms : (5 forms)
 15127  //
 15128  //    * MOVL imm32, r32
 15129  //    * MOVL r32, r32
 15130  //    * MOVL m32, r32
 15131  //    * MOVL imm32, m32
 15132  //    * MOVL r32, m32
 15133  //
 15134  func (self *Program) MOVL(v0 interface{}, v1 interface{}) *Instruction {
 15135      p := self.alloc("MOVL", 2, Operands { v0, v1 })
 15136      // MOVL imm32, r32
 15137      if isImm32(v0) && isReg32(v1) {
 15138          p.domain = DomainGeneric
 15139          p.add(0, func(m *_Encoding, v []interface{}) {
 15140              m.rexo(0, v[1], false)
 15141              m.emit(0xc7)
 15142              m.emit(0xc0 | lcode(v[1]))
 15143              m.imm4(toImmAny(v[0]))
 15144          })
 15145          p.add(0, func(m *_Encoding, v []interface{}) {
 15146              m.rexo(0, v[1], false)
 15147              m.emit(0xb8 | lcode(v[1]))
 15148              m.imm4(toImmAny(v[0]))
 15149          })
 15150      }
 15151      // MOVL r32, r32
 15152      if isReg32(v0) && isReg32(v1) {
 15153          p.domain = DomainGeneric
 15154          p.add(0, func(m *_Encoding, v []interface{}) {
 15155              m.rexo(hcode(v[0]), v[1], false)
 15156              m.emit(0x89)
 15157              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 15158          })
 15159          p.add(0, func(m *_Encoding, v []interface{}) {
 15160              m.rexo(hcode(v[1]), v[0], false)
 15161              m.emit(0x8b)
 15162              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15163          })
 15164      }
 15165      // MOVL m32, r32
 15166      if isM32(v0) && isReg32(v1) {
 15167          p.domain = DomainGeneric
 15168          p.add(0, func(m *_Encoding, v []interface{}) {
 15169              m.rexo(hcode(v[1]), addr(v[0]), false)
 15170              m.emit(0x8b)
 15171              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15172          })
 15173      }
 15174      // MOVL imm32, m32
 15175      if isImm32(v0) && isM32(v1) {
 15176          p.domain = DomainGeneric
 15177          p.add(0, func(m *_Encoding, v []interface{}) {
 15178              m.rexo(0, addr(v[1]), false)
 15179              m.emit(0xc7)
 15180              m.mrsd(0, addr(v[1]), 1)
 15181              m.imm4(toImmAny(v[0]))
 15182          })
 15183      }
 15184      // MOVL r32, m32
 15185      if isReg32(v0) && isM32(v1) {
 15186          p.domain = DomainGeneric
 15187          p.add(0, func(m *_Encoding, v []interface{}) {
 15188              m.rexo(hcode(v[0]), addr(v[1]), false)
 15189              m.emit(0x89)
 15190              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15191          })
 15192      }
 15193      if p.len == 0 {
 15194          panic("invalid operands for MOVL")
 15195      }
 15196      return p
 15197  }
 15198  
 15199  // MOVLHPS performs "Move Packed Single-Precision Floating-Point Values Low to High".
 15200  //
 15201  // Mnemonic        : MOVLHPS
 15202  // Supported forms : (1 form)
 15203  //
 15204  //    * MOVLHPS xmm, xmm    [SSE]
 15205  //
 15206  func (self *Program) MOVLHPS(v0 interface{}, v1 interface{}) *Instruction {
 15207      p := self.alloc("MOVLHPS", 2, Operands { v0, v1 })
 15208      // MOVLHPS xmm, xmm
 15209      if isXMM(v0) && isXMM(v1) {
 15210          self.require(ISA_SSE)
 15211          p.domain = DomainMMXSSE
 15212          p.add(0, func(m *_Encoding, v []interface{}) {
 15213              m.rexo(hcode(v[1]), v[0], false)
 15214              m.emit(0x0f)
 15215              m.emit(0x16)
 15216              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15217          })
 15218      }
 15219      if p.len == 0 {
 15220          panic("invalid operands for MOVLHPS")
 15221      }
 15222      return p
 15223  }
 15224  
 15225  // MOVLPD performs "Move Low Packed Double-Precision Floating-Point Value".
 15226  //
 15227  // Mnemonic        : MOVLPD
 15228  // Supported forms : (2 forms)
 15229  //
 15230  //    * MOVLPD m64, xmm    [SSE2]
 15231  //    * MOVLPD xmm, m64    [SSE2]
 15232  //
 15233  func (self *Program) MOVLPD(v0 interface{}, v1 interface{}) *Instruction {
 15234      p := self.alloc("MOVLPD", 2, Operands { v0, v1 })
 15235      // MOVLPD m64, xmm
 15236      if isM64(v0) && isXMM(v1) {
 15237          self.require(ISA_SSE2)
 15238          p.domain = DomainMMXSSE
 15239          p.add(0, func(m *_Encoding, v []interface{}) {
 15240              m.emit(0x66)
 15241              m.rexo(hcode(v[1]), addr(v[0]), false)
 15242              m.emit(0x0f)
 15243              m.emit(0x12)
 15244              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15245          })
 15246      }
 15247      // MOVLPD xmm, m64
 15248      if isXMM(v0) && isM64(v1) {
 15249          self.require(ISA_SSE2)
 15250          p.domain = DomainMMXSSE
 15251          p.add(0, func(m *_Encoding, v []interface{}) {
 15252              m.emit(0x66)
 15253              m.rexo(hcode(v[0]), addr(v[1]), false)
 15254              m.emit(0x0f)
 15255              m.emit(0x13)
 15256              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15257          })
 15258      }
 15259      if p.len == 0 {
 15260          panic("invalid operands for MOVLPD")
 15261      }
 15262      return p
 15263  }
 15264  
 15265  // MOVLPS performs "Move Low Packed Single-Precision Floating-Point Values".
 15266  //
 15267  // Mnemonic        : MOVLPS
 15268  // Supported forms : (2 forms)
 15269  //
 15270  //    * MOVLPS m64, xmm    [SSE]
 15271  //    * MOVLPS xmm, m64    [SSE]
 15272  //
 15273  func (self *Program) MOVLPS(v0 interface{}, v1 interface{}) *Instruction {
 15274      p := self.alloc("MOVLPS", 2, Operands { v0, v1 })
 15275      // MOVLPS m64, xmm
 15276      if isM64(v0) && isXMM(v1) {
 15277          self.require(ISA_SSE)
 15278          p.domain = DomainMMXSSE
 15279          p.add(0, func(m *_Encoding, v []interface{}) {
 15280              m.rexo(hcode(v[1]), addr(v[0]), false)
 15281              m.emit(0x0f)
 15282              m.emit(0x12)
 15283              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15284          })
 15285      }
 15286      // MOVLPS xmm, m64
 15287      if isXMM(v0) && isM64(v1) {
 15288          self.require(ISA_SSE)
 15289          p.domain = DomainMMXSSE
 15290          p.add(0, func(m *_Encoding, v []interface{}) {
 15291              m.rexo(hcode(v[0]), addr(v[1]), false)
 15292              m.emit(0x0f)
 15293              m.emit(0x13)
 15294              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15295          })
 15296      }
 15297      if p.len == 0 {
 15298          panic("invalid operands for MOVLPS")
 15299      }
 15300      return p
 15301  }
 15302  
 15303  // MOVMSKPD performs "Extract Packed Double-Precision Floating-Point Sign Mask".
 15304  //
 15305  // Mnemonic        : MOVMSKPD
 15306  // Supported forms : (1 form)
 15307  //
 15308  //    * MOVMSKPD xmm, r32    [SSE2]
 15309  //
 15310  func (self *Program) MOVMSKPD(v0 interface{}, v1 interface{}) *Instruction {
 15311      p := self.alloc("MOVMSKPD", 2, Operands { v0, v1 })
 15312      // MOVMSKPD xmm, r32
 15313      if isXMM(v0) && isReg32(v1) {
 15314          self.require(ISA_SSE2)
 15315          p.domain = DomainMMXSSE
 15316          p.add(0, func(m *_Encoding, v []interface{}) {
 15317              m.emit(0x66)
 15318              m.rexo(hcode(v[1]), v[0], false)
 15319              m.emit(0x0f)
 15320              m.emit(0x50)
 15321              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15322          })
 15323      }
 15324      if p.len == 0 {
 15325          panic("invalid operands for MOVMSKPD")
 15326      }
 15327      return p
 15328  }
 15329  
 15330  // MOVMSKPS performs "Extract Packed Single-Precision Floating-Point Sign Mask".
 15331  //
 15332  // Mnemonic        : MOVMSKPS
 15333  // Supported forms : (1 form)
 15334  //
 15335  //    * MOVMSKPS xmm, r32    [SSE]
 15336  //
 15337  func (self *Program) MOVMSKPS(v0 interface{}, v1 interface{}) *Instruction {
 15338      p := self.alloc("MOVMSKPS", 2, Operands { v0, v1 })
 15339      // MOVMSKPS xmm, r32
 15340      if isXMM(v0) && isReg32(v1) {
 15341          self.require(ISA_SSE)
 15342          p.domain = DomainMMXSSE
 15343          p.add(0, func(m *_Encoding, v []interface{}) {
 15344              m.rexo(hcode(v[1]), v[0], false)
 15345              m.emit(0x0f)
 15346              m.emit(0x50)
 15347              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15348          })
 15349      }
 15350      if p.len == 0 {
 15351          panic("invalid operands for MOVMSKPS")
 15352      }
 15353      return p
 15354  }
 15355  
 15356  // MOVNTDQ performs "Store Double Quadword Using Non-Temporal Hint".
 15357  //
 15358  // Mnemonic        : MOVNTDQ
 15359  // Supported forms : (1 form)
 15360  //
 15361  //    * MOVNTDQ xmm, m128    [SSE2]
 15362  //
 15363  func (self *Program) MOVNTDQ(v0 interface{}, v1 interface{}) *Instruction {
 15364      p := self.alloc("MOVNTDQ", 2, Operands { v0, v1 })
 15365      // MOVNTDQ xmm, m128
 15366      if isXMM(v0) && isM128(v1) {
 15367          self.require(ISA_SSE2)
 15368          p.domain = DomainMMXSSE
 15369          p.add(0, func(m *_Encoding, v []interface{}) {
 15370              m.emit(0x66)
 15371              m.rexo(hcode(v[0]), addr(v[1]), false)
 15372              m.emit(0x0f)
 15373              m.emit(0xe7)
 15374              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15375          })
 15376      }
 15377      if p.len == 0 {
 15378          panic("invalid operands for MOVNTDQ")
 15379      }
 15380      return p
 15381  }
 15382  
 15383  // MOVNTDQA performs "Load Double Quadword Non-Temporal Aligned Hint".
 15384  //
 15385  // Mnemonic        : MOVNTDQA
 15386  // Supported forms : (1 form)
 15387  //
 15388  //    * MOVNTDQA m128, xmm    [SSE4.1]
 15389  //
 15390  func (self *Program) MOVNTDQA(v0 interface{}, v1 interface{}) *Instruction {
 15391      p := self.alloc("MOVNTDQA", 2, Operands { v0, v1 })
 15392      // MOVNTDQA m128, xmm
 15393      if isM128(v0) && isXMM(v1) {
 15394          self.require(ISA_SSE4_1)
 15395          p.domain = DomainMMXSSE
 15396          p.add(0, func(m *_Encoding, v []interface{}) {
 15397              m.emit(0x66)
 15398              m.rexo(hcode(v[1]), addr(v[0]), false)
 15399              m.emit(0x0f)
 15400              m.emit(0x38)
 15401              m.emit(0x2a)
 15402              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15403          })
 15404      }
 15405      if p.len == 0 {
 15406          panic("invalid operands for MOVNTDQA")
 15407      }
 15408      return p
 15409  }
 15410  
 15411  // MOVNTIL performs "Store Doubleword Using Non-Temporal Hint".
 15412  //
 15413  // Mnemonic        : MOVNTI
 15414  // Supported forms : (1 form)
 15415  //
 15416  //    * MOVNTIL r32, m32    [SSE2]
 15417  //
 15418  func (self *Program) MOVNTIL(v0 interface{}, v1 interface{}) *Instruction {
 15419      p := self.alloc("MOVNTIL", 2, Operands { v0, v1 })
 15420      // MOVNTIL r32, m32
 15421      if isReg32(v0) && isM32(v1) {
 15422          self.require(ISA_SSE2)
 15423          p.domain = DomainGeneric
 15424          p.add(0, func(m *_Encoding, v []interface{}) {
 15425              m.rexo(hcode(v[0]), addr(v[1]), false)
 15426              m.emit(0x0f)
 15427              m.emit(0xc3)
 15428              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15429          })
 15430      }
 15431      if p.len == 0 {
 15432          panic("invalid operands for MOVNTIL")
 15433      }
 15434      return p
 15435  }
 15436  
 15437  // MOVNTIQ performs "Store Doubleword Using Non-Temporal Hint".
 15438  //
 15439  // Mnemonic        : MOVNTI
 15440  // Supported forms : (1 form)
 15441  //
 15442  //    * MOVNTIQ r64, m64    [SSE2]
 15443  //
 15444  func (self *Program) MOVNTIQ(v0 interface{}, v1 interface{}) *Instruction {
 15445      p := self.alloc("MOVNTIQ", 2, Operands { v0, v1 })
 15446      // MOVNTIQ r64, m64
 15447      if isReg64(v0) && isM64(v1) {
 15448          self.require(ISA_SSE2)
 15449          p.domain = DomainGeneric
 15450          p.add(0, func(m *_Encoding, v []interface{}) {
 15451              m.rexm(1, hcode(v[0]), addr(v[1]))
 15452              m.emit(0x0f)
 15453              m.emit(0xc3)
 15454              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15455          })
 15456      }
 15457      if p.len == 0 {
 15458          panic("invalid operands for MOVNTIQ")
 15459      }
 15460      return p
 15461  }
 15462  
 15463  // MOVNTPD performs "Store Packed Double-Precision Floating-Point Values Using Non-Temporal Hint".
 15464  //
 15465  // Mnemonic        : MOVNTPD
 15466  // Supported forms : (1 form)
 15467  //
 15468  //    * MOVNTPD xmm, m128    [SSE2]
 15469  //
 15470  func (self *Program) MOVNTPD(v0 interface{}, v1 interface{}) *Instruction {
 15471      p := self.alloc("MOVNTPD", 2, Operands { v0, v1 })
 15472      // MOVNTPD xmm, m128
 15473      if isXMM(v0) && isM128(v1) {
 15474          self.require(ISA_SSE2)
 15475          p.domain = DomainMMXSSE
 15476          p.add(0, func(m *_Encoding, v []interface{}) {
 15477              m.emit(0x66)
 15478              m.rexo(hcode(v[0]), addr(v[1]), false)
 15479              m.emit(0x0f)
 15480              m.emit(0x2b)
 15481              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15482          })
 15483      }
 15484      if p.len == 0 {
 15485          panic("invalid operands for MOVNTPD")
 15486      }
 15487      return p
 15488  }
 15489  
 15490  // MOVNTPS performs "Store Packed Single-Precision Floating-Point Values Using Non-Temporal Hint".
 15491  //
 15492  // Mnemonic        : MOVNTPS
 15493  // Supported forms : (1 form)
 15494  //
 15495  //    * MOVNTPS xmm, m128    [SSE]
 15496  //
 15497  func (self *Program) MOVNTPS(v0 interface{}, v1 interface{}) *Instruction {
 15498      p := self.alloc("MOVNTPS", 2, Operands { v0, v1 })
 15499      // MOVNTPS xmm, m128
 15500      if isXMM(v0) && isM128(v1) {
 15501          self.require(ISA_SSE)
 15502          p.domain = DomainMMXSSE
 15503          p.add(0, func(m *_Encoding, v []interface{}) {
 15504              m.rexo(hcode(v[0]), addr(v[1]), false)
 15505              m.emit(0x0f)
 15506              m.emit(0x2b)
 15507              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15508          })
 15509      }
 15510      if p.len == 0 {
 15511          panic("invalid operands for MOVNTPS")
 15512      }
 15513      return p
 15514  }
 15515  
 15516  // MOVNTQ performs "Store of Quadword Using Non-Temporal Hint".
 15517  //
 15518  // Mnemonic        : MOVNTQ
 15519  // Supported forms : (1 form)
 15520  //
 15521  //    * MOVNTQ mm, m64    [MMX+]
 15522  //
 15523  func (self *Program) MOVNTQ(v0 interface{}, v1 interface{}) *Instruction {
 15524      p := self.alloc("MOVNTQ", 2, Operands { v0, v1 })
 15525      // MOVNTQ mm, m64
 15526      if isMM(v0) && isM64(v1) {
 15527          self.require(ISA_MMX_PLUS)
 15528          p.domain = DomainMMXSSE
 15529          p.add(0, func(m *_Encoding, v []interface{}) {
 15530              m.rexo(hcode(v[0]), addr(v[1]), false)
 15531              m.emit(0x0f)
 15532              m.emit(0xe7)
 15533              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15534          })
 15535      }
 15536      if p.len == 0 {
 15537          panic("invalid operands for MOVNTQ")
 15538      }
 15539      return p
 15540  }
 15541  
 15542  // MOVNTSD performs "Store Scalar Double-Precision Floating-Point Values Using Non-Temporal Hint".
 15543  //
 15544  // Mnemonic        : MOVNTSD
 15545  // Supported forms : (1 form)
 15546  //
 15547  //    * MOVNTSD xmm, m64    [SSE4A]
 15548  //
 15549  func (self *Program) MOVNTSD(v0 interface{}, v1 interface{}) *Instruction {
 15550      p := self.alloc("MOVNTSD", 2, Operands { v0, v1 })
 15551      // MOVNTSD xmm, m64
 15552      if isXMM(v0) && isM64(v1) {
 15553          self.require(ISA_SSE4A)
 15554          p.domain = DomainAMDSpecific
 15555          p.add(0, func(m *_Encoding, v []interface{}) {
 15556              m.emit(0xf2)
 15557              m.rexo(hcode(v[0]), addr(v[1]), false)
 15558              m.emit(0x0f)
 15559              m.emit(0x2b)
 15560              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15561          })
 15562      }
 15563      if p.len == 0 {
 15564          panic("invalid operands for MOVNTSD")
 15565      }
 15566      return p
 15567  }
 15568  
 15569  // MOVNTSS performs "Store Scalar Single-Precision Floating-Point Values Using Non-Temporal Hint".
 15570  //
 15571  // Mnemonic        : MOVNTSS
 15572  // Supported forms : (1 form)
 15573  //
 15574  //    * MOVNTSS xmm, m32    [SSE4A]
 15575  //
 15576  func (self *Program) MOVNTSS(v0 interface{}, v1 interface{}) *Instruction {
 15577      p := self.alloc("MOVNTSS", 2, Operands { v0, v1 })
 15578      // MOVNTSS xmm, m32
 15579      if isXMM(v0) && isM32(v1) {
 15580          self.require(ISA_SSE4A)
 15581          p.domain = DomainAMDSpecific
 15582          p.add(0, func(m *_Encoding, v []interface{}) {
 15583              m.emit(0xf3)
 15584              m.rexo(hcode(v[0]), addr(v[1]), false)
 15585              m.emit(0x0f)
 15586              m.emit(0x2b)
 15587              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15588          })
 15589      }
 15590      if p.len == 0 {
 15591          panic("invalid operands for MOVNTSS")
 15592      }
 15593      return p
 15594  }
 15595  
 15596  // MOVQ performs "Move".
 15597  //
 15598  // Mnemonic        : MOV
 15599  // Supported forms : (16 forms)
 15600  //
 15601  //    * MOVQ imm32, r64
 15602  //    * MOVQ imm64, r64
 15603  //    * MOVQ r64, r64
 15604  //    * MOVQ m64, r64
 15605  //    * MOVQ imm32, m64
 15606  //    * MOVQ r64, m64
 15607  //    * MOVQ mm, r64       [MMX]
 15608  //    * MOVQ r64, mm       [MMX]
 15609  //    * MOVQ mm, mm        [MMX]
 15610  //    * MOVQ m64, mm       [MMX]
 15611  //    * MOVQ mm, m64       [MMX]
 15612  //    * MOVQ xmm, r64      [SSE2]
 15613  //    * MOVQ r64, xmm      [SSE2]
 15614  //    * MOVQ xmm, xmm      [SSE2]
 15615  //    * MOVQ m64, xmm      [SSE2]
 15616  //    * MOVQ xmm, m64      [SSE2]
 15617  //
 15618  func (self *Program) MOVQ(v0 interface{}, v1 interface{}) *Instruction {
 15619      p := self.alloc("MOVQ", 2, Operands { v0, v1 })
 15620      // MOVQ imm32, r64
 15621      if isImm32Ext(v0, 8) && isReg64(v1) {
 15622          p.domain = DomainGeneric
 15623          p.add(0, func(m *_Encoding, v []interface{}) {
 15624              m.emit(0x48 | hcode(v[1]))
 15625              m.emit(0xc7)
 15626              m.emit(0xc0 | lcode(v[1]))
 15627              m.imm4(toImmAny(v[0]))
 15628          })
 15629      }
 15630      // MOVQ imm64, r64
 15631      if isImm64(v0) && isReg64(v1) {
 15632          p.domain = DomainGeneric
 15633          p.add(0, func(m *_Encoding, v []interface{}) {
 15634              m.emit(0x48 | hcode(v[1]))
 15635              m.emit(0xb8 | lcode(v[1]))
 15636              m.imm8(toImmAny(v[0]))
 15637          })
 15638      }
 15639      // MOVQ r64, r64
 15640      if isReg64(v0) && isReg64(v1) {
 15641          p.domain = DomainGeneric
 15642          p.add(0, func(m *_Encoding, v []interface{}) {
 15643              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
 15644              m.emit(0x89)
 15645              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 15646          })
 15647          p.add(0, func(m *_Encoding, v []interface{}) {
 15648              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 15649              m.emit(0x8b)
 15650              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15651          })
 15652      }
 15653      // MOVQ m64, r64
 15654      if isM64(v0) && isReg64(v1) {
 15655          p.domain = DomainGeneric
 15656          p.add(0, func(m *_Encoding, v []interface{}) {
 15657              m.rexm(1, hcode(v[1]), addr(v[0]))
 15658              m.emit(0x8b)
 15659              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15660          })
 15661      }
 15662      // MOVQ imm32, m64
 15663      if isImm32Ext(v0, 8) && isM64(v1) {
 15664          p.domain = DomainGeneric
 15665          p.add(0, func(m *_Encoding, v []interface{}) {
 15666              m.rexm(1, 0, addr(v[1]))
 15667              m.emit(0xc7)
 15668              m.mrsd(0, addr(v[1]), 1)
 15669              m.imm4(toImmAny(v[0]))
 15670          })
 15671      }
 15672      // MOVQ r64, m64
 15673      if isReg64(v0) && isM64(v1) {
 15674          p.domain = DomainGeneric
 15675          p.add(0, func(m *_Encoding, v []interface{}) {
 15676              m.rexm(1, hcode(v[0]), addr(v[1]))
 15677              m.emit(0x89)
 15678              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15679          })
 15680      }
 15681      // MOVQ mm, r64
 15682      if isMM(v0) && isReg64(v1) {
 15683          self.require(ISA_MMX)
 15684          p.domain = DomainMMXSSE
 15685          p.add(0, func(m *_Encoding, v []interface{}) {
 15686              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
 15687              m.emit(0x0f)
 15688              m.emit(0x7e)
 15689              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 15690          })
 15691      }
 15692      // MOVQ r64, mm
 15693      if isReg64(v0) && isMM(v1) {
 15694          self.require(ISA_MMX)
 15695          p.domain = DomainMMXSSE
 15696          p.add(0, func(m *_Encoding, v []interface{}) {
 15697              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 15698              m.emit(0x0f)
 15699              m.emit(0x6e)
 15700              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15701          })
 15702      }
 15703      // MOVQ mm, mm
 15704      if isMM(v0) && isMM(v1) {
 15705          self.require(ISA_MMX)
 15706          p.domain = DomainMMXSSE
 15707          p.add(0, func(m *_Encoding, v []interface{}) {
 15708              m.rexo(hcode(v[1]), v[0], false)
 15709              m.emit(0x0f)
 15710              m.emit(0x6f)
 15711              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15712          })
 15713          p.add(0, func(m *_Encoding, v []interface{}) {
 15714              m.rexo(hcode(v[0]), v[1], false)
 15715              m.emit(0x0f)
 15716              m.emit(0x7f)
 15717              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 15718          })
 15719      }
 15720      // MOVQ m64, mm
 15721      if isM64(v0) && isMM(v1) {
 15722          self.require(ISA_MMX)
 15723          p.domain = DomainMMXSSE
 15724          p.add(0, func(m *_Encoding, v []interface{}) {
 15725              m.rexo(hcode(v[1]), addr(v[0]), false)
 15726              m.emit(0x0f)
 15727              m.emit(0x6f)
 15728              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15729          })
 15730          p.add(0, func(m *_Encoding, v []interface{}) {
 15731              m.rexm(1, hcode(v[1]), addr(v[0]))
 15732              m.emit(0x0f)
 15733              m.emit(0x6e)
 15734              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15735          })
 15736      }
 15737      // MOVQ mm, m64
 15738      if isMM(v0) && isM64(v1) {
 15739          self.require(ISA_MMX)
 15740          p.domain = DomainMMXSSE
 15741          p.add(0, func(m *_Encoding, v []interface{}) {
 15742              m.rexo(hcode(v[0]), addr(v[1]), false)
 15743              m.emit(0x0f)
 15744              m.emit(0x7f)
 15745              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15746          })
 15747          p.add(0, func(m *_Encoding, v []interface{}) {
 15748              m.rexm(1, hcode(v[0]), addr(v[1]))
 15749              m.emit(0x0f)
 15750              m.emit(0x7e)
 15751              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15752          })
 15753      }
 15754      // MOVQ xmm, r64
 15755      if isXMM(v0) && isReg64(v1) {
 15756          self.require(ISA_SSE2)
 15757          p.domain = DomainMMXSSE
 15758          p.add(0, func(m *_Encoding, v []interface{}) {
 15759              m.emit(0x66)
 15760              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
 15761              m.emit(0x0f)
 15762              m.emit(0x7e)
 15763              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 15764          })
 15765      }
 15766      // MOVQ r64, xmm
 15767      if isReg64(v0) && isXMM(v1) {
 15768          self.require(ISA_SSE2)
 15769          p.domain = DomainMMXSSE
 15770          p.add(0, func(m *_Encoding, v []interface{}) {
 15771              m.emit(0x66)
 15772              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 15773              m.emit(0x0f)
 15774              m.emit(0x6e)
 15775              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15776          })
 15777      }
 15778      // MOVQ xmm, xmm
 15779      if isXMM(v0) && isXMM(v1) {
 15780          self.require(ISA_SSE2)
 15781          p.domain = DomainMMXSSE
 15782          p.add(0, func(m *_Encoding, v []interface{}) {
 15783              m.emit(0xf3)
 15784              m.rexo(hcode(v[1]), v[0], false)
 15785              m.emit(0x0f)
 15786              m.emit(0x7e)
 15787              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15788          })
 15789          p.add(0, func(m *_Encoding, v []interface{}) {
 15790              m.emit(0x66)
 15791              m.rexo(hcode(v[0]), v[1], false)
 15792              m.emit(0x0f)
 15793              m.emit(0xd6)
 15794              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 15795          })
 15796      }
 15797      // MOVQ m64, xmm
 15798      if isM64(v0) && isXMM(v1) {
 15799          self.require(ISA_SSE2)
 15800          p.domain = DomainMMXSSE
 15801          p.add(0, func(m *_Encoding, v []interface{}) {
 15802              m.emit(0xf3)
 15803              m.rexo(hcode(v[1]), addr(v[0]), false)
 15804              m.emit(0x0f)
 15805              m.emit(0x7e)
 15806              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15807          })
 15808          p.add(0, func(m *_Encoding, v []interface{}) {
 15809              m.emit(0x66)
 15810              m.rexm(1, hcode(v[1]), addr(v[0]))
 15811              m.emit(0x0f)
 15812              m.emit(0x6e)
 15813              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15814          })
 15815      }
 15816      // MOVQ xmm, m64
 15817      if isXMM(v0) && isM64(v1) {
 15818          self.require(ISA_SSE2)
 15819          p.domain = DomainMMXSSE
 15820          p.add(0, func(m *_Encoding, v []interface{}) {
 15821              m.emit(0x66)
 15822              m.rexo(hcode(v[0]), addr(v[1]), false)
 15823              m.emit(0x0f)
 15824              m.emit(0xd6)
 15825              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15826          })
 15827          p.add(0, func(m *_Encoding, v []interface{}) {
 15828              m.emit(0x66)
 15829              m.rexm(1, hcode(v[0]), addr(v[1]))
 15830              m.emit(0x0f)
 15831              m.emit(0x7e)
 15832              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15833          })
 15834      }
 15835      if p.len == 0 {
 15836          panic("invalid operands for MOVQ")
 15837      }
 15838      return p
 15839  }
 15840  
 15841  // MOVQ2DQ performs "Move Quadword from MMX Technology to XMM Register".
 15842  //
 15843  // Mnemonic        : MOVQ2DQ
 15844  // Supported forms : (1 form)
 15845  //
 15846  //    * MOVQ2DQ mm, xmm    [SSE2]
 15847  //
 15848  func (self *Program) MOVQ2DQ(v0 interface{}, v1 interface{}) *Instruction {
 15849      p := self.alloc("MOVQ2DQ", 2, Operands { v0, v1 })
 15850      // MOVQ2DQ mm, xmm
 15851      if isMM(v0) && isXMM(v1) {
 15852          self.require(ISA_SSE2)
 15853          p.domain = DomainMMXSSE
 15854          p.add(0, func(m *_Encoding, v []interface{}) {
 15855              m.emit(0xf3)
 15856              m.rexo(hcode(v[1]), v[0], false)
 15857              m.emit(0x0f)
 15858              m.emit(0xd6)
 15859              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15860          })
 15861      }
 15862      if p.len == 0 {
 15863          panic("invalid operands for MOVQ2DQ")
 15864      }
 15865      return p
 15866  }
 15867  
 15868  // MOVSBL performs "Move with Sign-Extension".
 15869  //
 15870  // Mnemonic        : MOVSX
 15871  // Supported forms : (2 forms)
 15872  //
 15873  //    * MOVSBL r8, r32
 15874  //    * MOVSBL m8, r32
 15875  //
 15876  func (self *Program) MOVSBL(v0 interface{}, v1 interface{}) *Instruction {
 15877      p := self.alloc("MOVSBL", 2, Operands { v0, v1 })
 15878      // MOVSBL r8, r32
 15879      if isReg8(v0) && isReg32(v1) {
 15880          p.domain = DomainGeneric
 15881          p.add(0, func(m *_Encoding, v []interface{}) {
 15882              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]))
 15883              m.emit(0x0f)
 15884              m.emit(0xbe)
 15885              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15886          })
 15887      }
 15888      // MOVSBL m8, r32
 15889      if isM8(v0) && isReg32(v1) {
 15890          p.domain = DomainGeneric
 15891          p.add(0, func(m *_Encoding, v []interface{}) {
 15892              m.rexo(hcode(v[1]), addr(v[0]), false)
 15893              m.emit(0x0f)
 15894              m.emit(0xbe)
 15895              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15896          })
 15897      }
 15898      if p.len == 0 {
 15899          panic("invalid operands for MOVSBL")
 15900      }
 15901      return p
 15902  }
 15903  
 15904  // MOVSBQ performs "Move with Sign-Extension".
 15905  //
 15906  // Mnemonic        : MOVSX
 15907  // Supported forms : (2 forms)
 15908  //
 15909  //    * MOVSBQ r8, r64
 15910  //    * MOVSBQ m8, r64
 15911  //
 15912  func (self *Program) MOVSBQ(v0 interface{}, v1 interface{}) *Instruction {
 15913      p := self.alloc("MOVSBQ", 2, Operands { v0, v1 })
 15914      // MOVSBQ r8, r64
 15915      if isReg8(v0) && isReg64(v1) {
 15916          p.domain = DomainGeneric
 15917          p.add(0, func(m *_Encoding, v []interface{}) {
 15918              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 15919              m.emit(0x0f)
 15920              m.emit(0xbe)
 15921              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15922          })
 15923      }
 15924      // MOVSBQ m8, r64
 15925      if isM8(v0) && isReg64(v1) {
 15926          p.domain = DomainGeneric
 15927          p.add(0, func(m *_Encoding, v []interface{}) {
 15928              m.rexm(1, hcode(v[1]), addr(v[0]))
 15929              m.emit(0x0f)
 15930              m.emit(0xbe)
 15931              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15932          })
 15933      }
 15934      if p.len == 0 {
 15935          panic("invalid operands for MOVSBQ")
 15936      }
 15937      return p
 15938  }
 15939  
 15940  // MOVSBW performs "Move with Sign-Extension".
 15941  //
 15942  // Mnemonic        : MOVSX
 15943  // Supported forms : (2 forms)
 15944  //
 15945  //    * MOVSBW r8, r16
 15946  //    * MOVSBW m8, r16
 15947  //
 15948  func (self *Program) MOVSBW(v0 interface{}, v1 interface{}) *Instruction {
 15949      p := self.alloc("MOVSBW", 2, Operands { v0, v1 })
 15950      // MOVSBW r8, r16
 15951      if isReg8(v0) && isReg16(v1) {
 15952          p.domain = DomainGeneric
 15953          p.add(0, func(m *_Encoding, v []interface{}) {
 15954              m.emit(0x66)
 15955              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]))
 15956              m.emit(0x0f)
 15957              m.emit(0xbe)
 15958              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15959          })
 15960      }
 15961      // MOVSBW m8, r16
 15962      if isM8(v0) && isReg16(v1) {
 15963          p.domain = DomainGeneric
 15964          p.add(0, func(m *_Encoding, v []interface{}) {
 15965              m.emit(0x66)
 15966              m.rexo(hcode(v[1]), addr(v[0]), false)
 15967              m.emit(0x0f)
 15968              m.emit(0xbe)
 15969              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15970          })
 15971      }
 15972      if p.len == 0 {
 15973          panic("invalid operands for MOVSBW")
 15974      }
 15975      return p
 15976  }
 15977  
 15978  // MOVSD performs "Move Scalar Double-Precision Floating-Point Value".
 15979  //
 15980  // Mnemonic        : MOVSD
 15981  // Supported forms : (3 forms)
 15982  //
 15983  //    * MOVSD xmm, xmm    [SSE2]
 15984  //    * MOVSD m64, xmm    [SSE2]
 15985  //    * MOVSD xmm, m64    [SSE2]
 15986  //
 15987  func (self *Program) MOVSD(v0 interface{}, v1 interface{}) *Instruction {
 15988      p := self.alloc("MOVSD", 2, Operands { v0, v1 })
 15989      // MOVSD xmm, xmm
 15990      if isXMM(v0) && isXMM(v1) {
 15991          self.require(ISA_SSE2)
 15992          p.domain = DomainMMXSSE
 15993          p.add(0, func(m *_Encoding, v []interface{}) {
 15994              m.emit(0xf2)
 15995              m.rexo(hcode(v[1]), v[0], false)
 15996              m.emit(0x0f)
 15997              m.emit(0x10)
 15998              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15999          })
 16000          p.add(0, func(m *_Encoding, v []interface{}) {
 16001              m.emit(0xf2)
 16002              m.rexo(hcode(v[0]), v[1], false)
 16003              m.emit(0x0f)
 16004              m.emit(0x11)
 16005              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 16006          })
 16007      }
 16008      // MOVSD m64, xmm
 16009      if isM64(v0) && isXMM(v1) {
 16010          self.require(ISA_SSE2)
 16011          p.domain = DomainMMXSSE
 16012          p.add(0, func(m *_Encoding, v []interface{}) {
 16013              m.emit(0xf2)
 16014              m.rexo(hcode(v[1]), addr(v[0]), false)
 16015              m.emit(0x0f)
 16016              m.emit(0x10)
 16017              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16018          })
 16019      }
 16020      // MOVSD xmm, m64
 16021      if isXMM(v0) && isM64(v1) {
 16022          self.require(ISA_SSE2)
 16023          p.domain = DomainMMXSSE
 16024          p.add(0, func(m *_Encoding, v []interface{}) {
 16025              m.emit(0xf2)
 16026              m.rexo(hcode(v[0]), addr(v[1]), false)
 16027              m.emit(0x0f)
 16028              m.emit(0x11)
 16029              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 16030          })
 16031      }
 16032      if p.len == 0 {
 16033          panic("invalid operands for MOVSD")
 16034      }
 16035      return p
 16036  }
 16037  
 16038  // MOVSHDUP performs "Move Packed Single-FP High and Duplicate".
 16039  //
 16040  // Mnemonic        : MOVSHDUP
 16041  // Supported forms : (2 forms)
 16042  //
 16043  //    * MOVSHDUP xmm, xmm     [SSE3]
 16044  //    * MOVSHDUP m128, xmm    [SSE3]
 16045  //
 16046  func (self *Program) MOVSHDUP(v0 interface{}, v1 interface{}) *Instruction {
 16047      p := self.alloc("MOVSHDUP", 2, Operands { v0, v1 })
 16048      // MOVSHDUP xmm, xmm
 16049      if isXMM(v0) && isXMM(v1) {
 16050          self.require(ISA_SSE3)
 16051          p.domain = DomainMMXSSE
 16052          p.add(0, func(m *_Encoding, v []interface{}) {
 16053              m.emit(0xf3)
 16054              m.rexo(hcode(v[1]), v[0], false)
 16055              m.emit(0x0f)
 16056              m.emit(0x16)
 16057              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16058          })
 16059      }
 16060      // MOVSHDUP m128, xmm
 16061      if isM128(v0) && isXMM(v1) {
 16062          self.require(ISA_SSE3)
 16063          p.domain = DomainMMXSSE
 16064          p.add(0, func(m *_Encoding, v []interface{}) {
 16065              m.emit(0xf3)
 16066              m.rexo(hcode(v[1]), addr(v[0]), false)
 16067              m.emit(0x0f)
 16068              m.emit(0x16)
 16069              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16070          })
 16071      }
 16072      if p.len == 0 {
 16073          panic("invalid operands for MOVSHDUP")
 16074      }
 16075      return p
 16076  }
 16077  
 16078  // MOVSLDUP performs "Move Packed Single-FP Low and Duplicate".
 16079  //
 16080  // Mnemonic        : MOVSLDUP
 16081  // Supported forms : (2 forms)
 16082  //
 16083  //    * MOVSLDUP xmm, xmm     [SSE3]
 16084  //    * MOVSLDUP m128, xmm    [SSE3]
 16085  //
 16086  func (self *Program) MOVSLDUP(v0 interface{}, v1 interface{}) *Instruction {
 16087      p := self.alloc("MOVSLDUP", 2, Operands { v0, v1 })
 16088      // MOVSLDUP xmm, xmm
 16089      if isXMM(v0) && isXMM(v1) {
 16090          self.require(ISA_SSE3)
 16091          p.domain = DomainMMXSSE
 16092          p.add(0, func(m *_Encoding, v []interface{}) {
 16093              m.emit(0xf3)
 16094              m.rexo(hcode(v[1]), v[0], false)
 16095              m.emit(0x0f)
 16096              m.emit(0x12)
 16097              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16098          })
 16099      }
 16100      // MOVSLDUP m128, xmm
 16101      if isM128(v0) && isXMM(v1) {
 16102          self.require(ISA_SSE3)
 16103          p.domain = DomainMMXSSE
 16104          p.add(0, func(m *_Encoding, v []interface{}) {
 16105              m.emit(0xf3)
 16106              m.rexo(hcode(v[1]), addr(v[0]), false)
 16107              m.emit(0x0f)
 16108              m.emit(0x12)
 16109              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16110          })
 16111      }
 16112      if p.len == 0 {
 16113          panic("invalid operands for MOVSLDUP")
 16114      }
 16115      return p
 16116  }
 16117  
 16118  // MOVSLQ performs "Move Doubleword to Quadword with Sign-Extension".
 16119  //
 16120  // Mnemonic        : MOVSXD
 16121  // Supported forms : (2 forms)
 16122  //
 16123  //    * MOVSLQ r32, r64
 16124  //    * MOVSLQ m32, r64
 16125  //
 16126  func (self *Program) MOVSLQ(v0 interface{}, v1 interface{}) *Instruction {
 16127      p := self.alloc("MOVSLQ", 2, Operands { v0, v1 })
 16128      // MOVSLQ r32, r64
 16129      if isReg32(v0) && isReg64(v1) {
 16130          p.domain = DomainGeneric
 16131          p.add(0, func(m *_Encoding, v []interface{}) {
 16132              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 16133              m.emit(0x63)
 16134              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16135          })
 16136      }
 16137      // MOVSLQ m32, r64
 16138      if isM32(v0) && isReg64(v1) {
 16139          p.domain = DomainGeneric
 16140          p.add(0, func(m *_Encoding, v []interface{}) {
 16141              m.rexm(1, hcode(v[1]), addr(v[0]))
 16142              m.emit(0x63)
 16143              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16144          })
 16145      }
 16146      if p.len == 0 {
 16147          panic("invalid operands for MOVSLQ")
 16148      }
 16149      return p
 16150  }
 16151  
 16152  // MOVSS performs "Move Scalar Single-Precision Floating-Point Values".
 16153  //
 16154  // Mnemonic        : MOVSS
 16155  // Supported forms : (3 forms)
 16156  //
 16157  //    * MOVSS xmm, xmm    [SSE]
 16158  //    * MOVSS m32, xmm    [SSE]
 16159  //    * MOVSS xmm, m32    [SSE]
 16160  //
 16161  func (self *Program) MOVSS(v0 interface{}, v1 interface{}) *Instruction {
 16162      p := self.alloc("MOVSS", 2, Operands { v0, v1 })
 16163      // MOVSS xmm, xmm
 16164      if isXMM(v0) && isXMM(v1) {
 16165          self.require(ISA_SSE)
 16166          p.domain = DomainMMXSSE
 16167          p.add(0, func(m *_Encoding, v []interface{}) {
 16168              m.emit(0xf3)
 16169              m.rexo(hcode(v[1]), v[0], false)
 16170              m.emit(0x0f)
 16171              m.emit(0x10)
 16172              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16173          })
 16174          p.add(0, func(m *_Encoding, v []interface{}) {
 16175              m.emit(0xf3)
 16176              m.rexo(hcode(v[0]), v[1], false)
 16177              m.emit(0x0f)
 16178              m.emit(0x11)
 16179              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 16180          })
 16181      }
 16182      // MOVSS m32, xmm
 16183      if isM32(v0) && isXMM(v1) {
 16184          self.require(ISA_SSE)
 16185          p.domain = DomainMMXSSE
 16186          p.add(0, func(m *_Encoding, v []interface{}) {
 16187              m.emit(0xf3)
 16188              m.rexo(hcode(v[1]), addr(v[0]), false)
 16189              m.emit(0x0f)
 16190              m.emit(0x10)
 16191              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16192          })
 16193      }
 16194      // MOVSS xmm, m32
 16195      if isXMM(v0) && isM32(v1) {
 16196          self.require(ISA_SSE)
 16197          p.domain = DomainMMXSSE
 16198          p.add(0, func(m *_Encoding, v []interface{}) {
 16199              m.emit(0xf3)
 16200              m.rexo(hcode(v[0]), addr(v[1]), false)
 16201              m.emit(0x0f)
 16202              m.emit(0x11)
 16203              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 16204          })
 16205      }
 16206      if p.len == 0 {
 16207          panic("invalid operands for MOVSS")
 16208      }
 16209      return p
 16210  }
 16211  
 16212  // MOVSWL performs "Move with Sign-Extension".
 16213  //
 16214  // Mnemonic        : MOVSX
 16215  // Supported forms : (2 forms)
 16216  //
 16217  //    * MOVSWL r16, r32
 16218  //    * MOVSWL m16, r32
 16219  //
 16220  func (self *Program) MOVSWL(v0 interface{}, v1 interface{}) *Instruction {
 16221      p := self.alloc("MOVSWL", 2, Operands { v0, v1 })
 16222      // MOVSWL r16, r32
 16223      if isReg16(v0) && isReg32(v1) {
 16224          p.domain = DomainGeneric
 16225          p.add(0, func(m *_Encoding, v []interface{}) {
 16226              m.rexo(hcode(v[1]), v[0], false)
 16227              m.emit(0x0f)
 16228              m.emit(0xbf)
 16229              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16230          })
 16231      }
 16232      // MOVSWL m16, r32
 16233      if isM16(v0) && isReg32(v1) {
 16234          p.domain = DomainGeneric
 16235          p.add(0, func(m *_Encoding, v []interface{}) {
 16236              m.rexo(hcode(v[1]), addr(v[0]), false)
 16237              m.emit(0x0f)
 16238              m.emit(0xbf)
 16239              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16240          })
 16241      }
 16242      if p.len == 0 {
 16243          panic("invalid operands for MOVSWL")
 16244      }
 16245      return p
 16246  }
 16247  
 16248  // MOVSWQ performs "Move with Sign-Extension".
 16249  //
 16250  // Mnemonic        : MOVSX
 16251  // Supported forms : (2 forms)
 16252  //
 16253  //    * MOVSWQ r16, r64
 16254  //    * MOVSWQ m16, r64
 16255  //
 16256  func (self *Program) MOVSWQ(v0 interface{}, v1 interface{}) *Instruction {
 16257      p := self.alloc("MOVSWQ", 2, Operands { v0, v1 })
 16258      // MOVSWQ r16, r64
 16259      if isReg16(v0) && isReg64(v1) {
 16260          p.domain = DomainGeneric
 16261          p.add(0, func(m *_Encoding, v []interface{}) {
 16262              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 16263              m.emit(0x0f)
 16264              m.emit(0xbf)
 16265              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16266          })
 16267      }
 16268      // MOVSWQ m16, r64
 16269      if isM16(v0) && isReg64(v1) {
 16270          p.domain = DomainGeneric
 16271          p.add(0, func(m *_Encoding, v []interface{}) {
 16272              m.rexm(1, hcode(v[1]), addr(v[0]))
 16273              m.emit(0x0f)
 16274              m.emit(0xbf)
 16275              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16276          })
 16277      }
 16278      if p.len == 0 {
 16279          panic("invalid operands for MOVSWQ")
 16280      }
 16281      return p
 16282  }
 16283  
 16284  // MOVUPD performs "Move Unaligned Packed Double-Precision Floating-Point Values".
 16285  //
 16286  // Mnemonic        : MOVUPD
 16287  // Supported forms : (3 forms)
 16288  //
 16289  //    * MOVUPD xmm, xmm     [SSE2]
 16290  //    * MOVUPD m128, xmm    [SSE2]
 16291  //    * MOVUPD xmm, m128    [SSE2]
 16292  //
 16293  func (self *Program) MOVUPD(v0 interface{}, v1 interface{}) *Instruction {
 16294      p := self.alloc("MOVUPD", 2, Operands { v0, v1 })
 16295      // MOVUPD xmm, xmm
 16296      if isXMM(v0) && isXMM(v1) {
 16297          self.require(ISA_SSE2)
 16298          p.domain = DomainMMXSSE
 16299          p.add(0, func(m *_Encoding, v []interface{}) {
 16300              m.emit(0x66)
 16301              m.rexo(hcode(v[1]), v[0], false)
 16302              m.emit(0x0f)
 16303              m.emit(0x10)
 16304              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16305          })
 16306          p.add(0, func(m *_Encoding, v []interface{}) {
 16307              m.emit(0x66)
 16308              m.rexo(hcode(v[0]), v[1], false)
 16309              m.emit(0x0f)
 16310              m.emit(0x11)
 16311              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 16312          })
 16313      }
 16314      // MOVUPD m128, xmm
 16315      if isM128(v0) && isXMM(v1) {
 16316          self.require(ISA_SSE2)
 16317          p.domain = DomainMMXSSE
 16318          p.add(0, func(m *_Encoding, v []interface{}) {
 16319              m.emit(0x66)
 16320              m.rexo(hcode(v[1]), addr(v[0]), false)
 16321              m.emit(0x0f)
 16322              m.emit(0x10)
 16323              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16324          })
 16325      }
 16326      // MOVUPD xmm, m128
 16327      if isXMM(v0) && isM128(v1) {
 16328          self.require(ISA_SSE2)
 16329          p.domain = DomainMMXSSE
 16330          p.add(0, func(m *_Encoding, v []interface{}) {
 16331              m.emit(0x66)
 16332              m.rexo(hcode(v[0]), addr(v[1]), false)
 16333              m.emit(0x0f)
 16334              m.emit(0x11)
 16335              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 16336          })
 16337      }
 16338      if p.len == 0 {
 16339          panic("invalid operands for MOVUPD")
 16340      }
 16341      return p
 16342  }
 16343  
 16344  // MOVUPS performs "Move Unaligned Packed Single-Precision Floating-Point Values".
 16345  //
 16346  // Mnemonic        : MOVUPS
 16347  // Supported forms : (3 forms)
 16348  //
 16349  //    * MOVUPS xmm, xmm     [SSE]
 16350  //    * MOVUPS m128, xmm    [SSE]
 16351  //    * MOVUPS xmm, m128    [SSE]
 16352  //
 16353  func (self *Program) MOVUPS(v0 interface{}, v1 interface{}) *Instruction {
 16354      p := self.alloc("MOVUPS", 2, Operands { v0, v1 })
 16355      // MOVUPS xmm, xmm
 16356      if isXMM(v0) && isXMM(v1) {
 16357          self.require(ISA_SSE)
 16358          p.domain = DomainMMXSSE
 16359          p.add(0, func(m *_Encoding, v []interface{}) {
 16360              m.rexo(hcode(v[1]), v[0], false)
 16361              m.emit(0x0f)
 16362              m.emit(0x10)
 16363              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16364          })
 16365          p.add(0, func(m *_Encoding, v []interface{}) {
 16366              m.rexo(hcode(v[0]), v[1], false)
 16367              m.emit(0x0f)
 16368              m.emit(0x11)
 16369              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 16370          })
 16371      }
 16372      // MOVUPS m128, xmm
 16373      if isM128(v0) && isXMM(v1) {
 16374          self.require(ISA_SSE)
 16375          p.domain = DomainMMXSSE
 16376          p.add(0, func(m *_Encoding, v []interface{}) {
 16377              m.rexo(hcode(v[1]), addr(v[0]), false)
 16378              m.emit(0x0f)
 16379              m.emit(0x10)
 16380              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16381          })
 16382      }
 16383      // MOVUPS xmm, m128
 16384      if isXMM(v0) && isM128(v1) {
 16385          self.require(ISA_SSE)
 16386          p.domain = DomainMMXSSE
 16387          p.add(0, func(m *_Encoding, v []interface{}) {
 16388              m.rexo(hcode(v[0]), addr(v[1]), false)
 16389              m.emit(0x0f)
 16390              m.emit(0x11)
 16391              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 16392          })
 16393      }
 16394      if p.len == 0 {
 16395          panic("invalid operands for MOVUPS")
 16396      }
 16397      return p
 16398  }
 16399  
 16400  // MOVW performs "Move".
 16401  //
 16402  // Mnemonic        : MOV
 16403  // Supported forms : (5 forms)
 16404  //
 16405  //    * MOVW imm16, r16
 16406  //    * MOVW r16, r16
 16407  //    * MOVW m16, r16
 16408  //    * MOVW imm16, m16
 16409  //    * MOVW r16, m16
 16410  //
 16411  func (self *Program) MOVW(v0 interface{}, v1 interface{}) *Instruction {
 16412      p := self.alloc("MOVW", 2, Operands { v0, v1 })
 16413      // MOVW imm16, r16
 16414      if isImm16(v0) && isReg16(v1) {
 16415          p.domain = DomainGeneric
 16416          p.add(0, func(m *_Encoding, v []interface{}) {
 16417              m.emit(0x66)
 16418              m.rexo(0, v[1], false)
 16419              m.emit(0xc7)
 16420              m.emit(0xc0 | lcode(v[1]))
 16421              m.imm2(toImmAny(v[0]))
 16422          })
 16423          p.add(0, func(m *_Encoding, v []interface{}) {
 16424              m.emit(0x66)
 16425              m.rexo(0, v[1], false)
 16426              m.emit(0xb8 | lcode(v[1]))
 16427              m.imm2(toImmAny(v[0]))
 16428          })
 16429      }
 16430      // MOVW r16, r16
 16431      if isReg16(v0) && isReg16(v1) {
 16432          p.domain = DomainGeneric
 16433          p.add(0, func(m *_Encoding, v []interface{}) {
 16434              m.emit(0x66)
 16435              m.rexo(hcode(v[0]), v[1], false)
 16436              m.emit(0x89)
 16437              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 16438          })
 16439          p.add(0, func(m *_Encoding, v []interface{}) {
 16440              m.emit(0x66)
 16441              m.rexo(hcode(v[1]), v[0], false)
 16442              m.emit(0x8b)
 16443              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16444          })
 16445      }
 16446      // MOVW m16, r16
 16447      if isM16(v0) && isReg16(v1) {
 16448          p.domain = DomainGeneric
 16449          p.add(0, func(m *_Encoding, v []interface{}) {
 16450              m.emit(0x66)
 16451              m.rexo(hcode(v[1]), addr(v[0]), false)
 16452              m.emit(0x8b)
 16453              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16454          })
 16455      }
 16456      // MOVW imm16, m16
 16457      if isImm16(v0) && isM16(v1) {
 16458          p.domain = DomainGeneric
 16459          p.add(0, func(m *_Encoding, v []interface{}) {
 16460              m.emit(0x66)
 16461              m.rexo(0, addr(v[1]), false)
 16462              m.emit(0xc7)
 16463              m.mrsd(0, addr(v[1]), 1)
 16464              m.imm2(toImmAny(v[0]))
 16465          })
 16466      }
 16467      // MOVW r16, m16
 16468      if isReg16(v0) && isM16(v1) {
 16469          p.domain = DomainGeneric
 16470          p.add(0, func(m *_Encoding, v []interface{}) {
 16471              m.emit(0x66)
 16472              m.rexo(hcode(v[0]), addr(v[1]), false)
 16473              m.emit(0x89)
 16474              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 16475          })
 16476      }
 16477      if p.len == 0 {
 16478          panic("invalid operands for MOVW")
 16479      }
 16480      return p
 16481  }
 16482  
 16483  // MOVZBL performs "Move with Zero-Extend".
 16484  //
 16485  // Mnemonic        : MOVZX
 16486  // Supported forms : (2 forms)
 16487  //
 16488  //    * MOVZBL r8, r32
 16489  //    * MOVZBL m8, r32
 16490  //
 16491  func (self *Program) MOVZBL(v0 interface{}, v1 interface{}) *Instruction {
 16492      p := self.alloc("MOVZBL", 2, Operands { v0, v1 })
 16493      // MOVZBL r8, r32
 16494      if isReg8(v0) && isReg32(v1) {
 16495          p.domain = DomainGeneric
 16496          p.add(0, func(m *_Encoding, v []interface{}) {
 16497              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]))
 16498              m.emit(0x0f)
 16499              m.emit(0xb6)
 16500              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16501          })
 16502      }
 16503      // MOVZBL m8, r32
 16504      if isM8(v0) && isReg32(v1) {
 16505          p.domain = DomainGeneric
 16506          p.add(0, func(m *_Encoding, v []interface{}) {
 16507              m.rexo(hcode(v[1]), addr(v[0]), false)
 16508              m.emit(0x0f)
 16509              m.emit(0xb6)
 16510              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16511          })
 16512      }
 16513      if p.len == 0 {
 16514          panic("invalid operands for MOVZBL")
 16515      }
 16516      return p
 16517  }
 16518  
 16519  // MOVZBQ performs "Move with Zero-Extend".
 16520  //
 16521  // Mnemonic        : MOVZX
 16522  // Supported forms : (2 forms)
 16523  //
 16524  //    * MOVZBQ r8, r64
 16525  //    * MOVZBQ m8, r64
 16526  //
 16527  func (self *Program) MOVZBQ(v0 interface{}, v1 interface{}) *Instruction {
 16528      p := self.alloc("MOVZBQ", 2, Operands { v0, v1 })
 16529      // MOVZBQ r8, r64
 16530      if isReg8(v0) && isReg64(v1) {
 16531          p.domain = DomainGeneric
 16532          p.add(0, func(m *_Encoding, v []interface{}) {
 16533              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 16534              m.emit(0x0f)
 16535              m.emit(0xb6)
 16536              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16537          })
 16538      }
 16539      // MOVZBQ m8, r64
 16540      if isM8(v0) && isReg64(v1) {
 16541          p.domain = DomainGeneric
 16542          p.add(0, func(m *_Encoding, v []interface{}) {
 16543              m.rexm(1, hcode(v[1]), addr(v[0]))
 16544              m.emit(0x0f)
 16545              m.emit(0xb6)
 16546              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16547          })
 16548      }
 16549      if p.len == 0 {
 16550          panic("invalid operands for MOVZBQ")
 16551      }
 16552      return p
 16553  }
 16554  
 16555  // MOVZBW performs "Move with Zero-Extend".
 16556  //
 16557  // Mnemonic        : MOVZX
 16558  // Supported forms : (2 forms)
 16559  //
 16560  //    * MOVZBW r8, r16
 16561  //    * MOVZBW m8, r16
 16562  //
 16563  func (self *Program) MOVZBW(v0 interface{}, v1 interface{}) *Instruction {
 16564      p := self.alloc("MOVZBW", 2, Operands { v0, v1 })
 16565      // MOVZBW r8, r16
 16566      if isReg8(v0) && isReg16(v1) {
 16567          p.domain = DomainGeneric
 16568          p.add(0, func(m *_Encoding, v []interface{}) {
 16569              m.emit(0x66)
 16570              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]))
 16571              m.emit(0x0f)
 16572              m.emit(0xb6)
 16573              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16574          })
 16575      }
 16576      // MOVZBW m8, r16
 16577      if isM8(v0) && isReg16(v1) {
 16578          p.domain = DomainGeneric
 16579          p.add(0, func(m *_Encoding, v []interface{}) {
 16580              m.emit(0x66)
 16581              m.rexo(hcode(v[1]), addr(v[0]), false)
 16582              m.emit(0x0f)
 16583              m.emit(0xb6)
 16584              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16585          })
 16586      }
 16587      if p.len == 0 {
 16588          panic("invalid operands for MOVZBW")
 16589      }
 16590      return p
 16591  }
 16592  
 16593  // MOVZWL performs "Move with Zero-Extend".
 16594  //
 16595  // Mnemonic        : MOVZX
 16596  // Supported forms : (2 forms)
 16597  //
 16598  //    * MOVZWL r16, r32
 16599  //    * MOVZWL m16, r32
 16600  //
 16601  func (self *Program) MOVZWL(v0 interface{}, v1 interface{}) *Instruction {
 16602      p := self.alloc("MOVZWL", 2, Operands { v0, v1 })
 16603      // MOVZWL r16, r32
 16604      if isReg16(v0) && isReg32(v1) {
 16605          p.domain = DomainGeneric
 16606          p.add(0, func(m *_Encoding, v []interface{}) {
 16607              m.rexo(hcode(v[1]), v[0], false)
 16608              m.emit(0x0f)
 16609              m.emit(0xb7)
 16610              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16611          })
 16612      }
 16613      // MOVZWL m16, r32
 16614      if isM16(v0) && isReg32(v1) {
 16615          p.domain = DomainGeneric
 16616          p.add(0, func(m *_Encoding, v []interface{}) {
 16617              m.rexo(hcode(v[1]), addr(v[0]), false)
 16618              m.emit(0x0f)
 16619              m.emit(0xb7)
 16620              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16621          })
 16622      }
 16623      if p.len == 0 {
 16624          panic("invalid operands for MOVZWL")
 16625      }
 16626      return p
 16627  }
 16628  
 16629  // MOVZWQ performs "Move with Zero-Extend".
 16630  //
 16631  // Mnemonic        : MOVZX
 16632  // Supported forms : (2 forms)
 16633  //
 16634  //    * MOVZWQ r16, r64
 16635  //    * MOVZWQ m16, r64
 16636  //
 16637  func (self *Program) MOVZWQ(v0 interface{}, v1 interface{}) *Instruction {
 16638      p := self.alloc("MOVZWQ", 2, Operands { v0, v1 })
 16639      // MOVZWQ r16, r64
 16640      if isReg16(v0) && isReg64(v1) {
 16641          p.domain = DomainGeneric
 16642          p.add(0, func(m *_Encoding, v []interface{}) {
 16643              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 16644              m.emit(0x0f)
 16645              m.emit(0xb7)
 16646              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16647          })
 16648      }
 16649      // MOVZWQ m16, r64
 16650      if isM16(v0) && isReg64(v1) {
 16651          p.domain = DomainGeneric
 16652          p.add(0, func(m *_Encoding, v []interface{}) {
 16653              m.rexm(1, hcode(v[1]), addr(v[0]))
 16654              m.emit(0x0f)
 16655              m.emit(0xb7)
 16656              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16657          })
 16658      }
 16659      if p.len == 0 {
 16660          panic("invalid operands for MOVZWQ")
 16661      }
 16662      return p
 16663  }
 16664  
 16665  // MPSADBW performs "Compute Multiple Packed Sums of Absolute Difference".
 16666  //
 16667  // Mnemonic        : MPSADBW
 16668  // Supported forms : (2 forms)
 16669  //
 16670  //    * MPSADBW imm8, xmm, xmm     [SSE4.1]
 16671  //    * MPSADBW imm8, m128, xmm    [SSE4.1]
 16672  //
 16673  func (self *Program) MPSADBW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 16674      p := self.alloc("MPSADBW", 3, Operands { v0, v1, v2 })
 16675      // MPSADBW imm8, xmm, xmm
 16676      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 16677          self.require(ISA_SSE4_1)
 16678          p.domain = DomainMMXSSE
 16679          p.add(0, func(m *_Encoding, v []interface{}) {
 16680              m.emit(0x66)
 16681              m.rexo(hcode(v[2]), v[1], false)
 16682              m.emit(0x0f)
 16683              m.emit(0x3a)
 16684              m.emit(0x42)
 16685              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 16686              m.imm1(toImmAny(v[0]))
 16687          })
 16688      }
 16689      // MPSADBW imm8, m128, xmm
 16690      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 16691          self.require(ISA_SSE4_1)
 16692          p.domain = DomainMMXSSE
 16693          p.add(0, func(m *_Encoding, v []interface{}) {
 16694              m.emit(0x66)
 16695              m.rexo(hcode(v[2]), addr(v[1]), false)
 16696              m.emit(0x0f)
 16697              m.emit(0x3a)
 16698              m.emit(0x42)
 16699              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 16700              m.imm1(toImmAny(v[0]))
 16701          })
 16702      }
 16703      if p.len == 0 {
 16704          panic("invalid operands for MPSADBW")
 16705      }
 16706      return p
 16707  }
 16708  
 16709  // MULB performs "Unsigned Multiply".
 16710  //
 16711  // Mnemonic        : MUL
 16712  // Supported forms : (2 forms)
 16713  //
 16714  //    * MULB r8
 16715  //    * MULB m8
 16716  //
 16717  func (self *Program) MULB(v0 interface{}) *Instruction {
 16718      p := self.alloc("MULB", 1, Operands { v0 })
 16719      // MULB r8
 16720      if isReg8(v0) {
 16721          p.domain = DomainGeneric
 16722          p.add(0, func(m *_Encoding, v []interface{}) {
 16723              m.rexo(0, v[0], isReg8REX(v[0]))
 16724              m.emit(0xf6)
 16725              m.emit(0xe0 | lcode(v[0]))
 16726          })
 16727      }
 16728      // MULB m8
 16729      if isM8(v0) {
 16730          p.domain = DomainGeneric
 16731          p.add(0, func(m *_Encoding, v []interface{}) {
 16732              m.rexo(0, addr(v[0]), false)
 16733              m.emit(0xf6)
 16734              m.mrsd(4, addr(v[0]), 1)
 16735          })
 16736      }
 16737      if p.len == 0 {
 16738          panic("invalid operands for MULB")
 16739      }
 16740      return p
 16741  }
 16742  
 16743  // MULL performs "Unsigned Multiply".
 16744  //
 16745  // Mnemonic        : MUL
 16746  // Supported forms : (2 forms)
 16747  //
 16748  //    * MULL r32
 16749  //    * MULL m32
 16750  //
 16751  func (self *Program) MULL(v0 interface{}) *Instruction {
 16752      p := self.alloc("MULL", 1, Operands { v0 })
 16753      // MULL r32
 16754      if isReg32(v0) {
 16755          p.domain = DomainGeneric
 16756          p.add(0, func(m *_Encoding, v []interface{}) {
 16757              m.rexo(0, v[0], false)
 16758              m.emit(0xf7)
 16759              m.emit(0xe0 | lcode(v[0]))
 16760          })
 16761      }
 16762      // MULL m32
 16763      if isM32(v0) {
 16764          p.domain = DomainGeneric
 16765          p.add(0, func(m *_Encoding, v []interface{}) {
 16766              m.rexo(0, addr(v[0]), false)
 16767              m.emit(0xf7)
 16768              m.mrsd(4, addr(v[0]), 1)
 16769          })
 16770      }
 16771      if p.len == 0 {
 16772          panic("invalid operands for MULL")
 16773      }
 16774      return p
 16775  }
 16776  
 16777  // MULPD performs "Multiply Packed Double-Precision Floating-Point Values".
 16778  //
 16779  // Mnemonic        : MULPD
 16780  // Supported forms : (2 forms)
 16781  //
 16782  //    * MULPD xmm, xmm     [SSE2]
 16783  //    * MULPD m128, xmm    [SSE2]
 16784  //
 16785  func (self *Program) MULPD(v0 interface{}, v1 interface{}) *Instruction {
 16786      p := self.alloc("MULPD", 2, Operands { v0, v1 })
 16787      // MULPD xmm, xmm
 16788      if isXMM(v0) && isXMM(v1) {
 16789          self.require(ISA_SSE2)
 16790          p.domain = DomainMMXSSE
 16791          p.add(0, func(m *_Encoding, v []interface{}) {
 16792              m.emit(0x66)
 16793              m.rexo(hcode(v[1]), v[0], false)
 16794              m.emit(0x0f)
 16795              m.emit(0x59)
 16796              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16797          })
 16798      }
 16799      // MULPD m128, xmm
 16800      if isM128(v0) && isXMM(v1) {
 16801          self.require(ISA_SSE2)
 16802          p.domain = DomainMMXSSE
 16803          p.add(0, func(m *_Encoding, v []interface{}) {
 16804              m.emit(0x66)
 16805              m.rexo(hcode(v[1]), addr(v[0]), false)
 16806              m.emit(0x0f)
 16807              m.emit(0x59)
 16808              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16809          })
 16810      }
 16811      if p.len == 0 {
 16812          panic("invalid operands for MULPD")
 16813      }
 16814      return p
 16815  }
 16816  
 16817  // MULPS performs "Multiply Packed Single-Precision Floating-Point Values".
 16818  //
 16819  // Mnemonic        : MULPS
 16820  // Supported forms : (2 forms)
 16821  //
 16822  //    * MULPS xmm, xmm     [SSE]
 16823  //    * MULPS m128, xmm    [SSE]
 16824  //
 16825  func (self *Program) MULPS(v0 interface{}, v1 interface{}) *Instruction {
 16826      p := self.alloc("MULPS", 2, Operands { v0, v1 })
 16827      // MULPS xmm, xmm
 16828      if isXMM(v0) && isXMM(v1) {
 16829          self.require(ISA_SSE)
 16830          p.domain = DomainMMXSSE
 16831          p.add(0, func(m *_Encoding, v []interface{}) {
 16832              m.rexo(hcode(v[1]), v[0], false)
 16833              m.emit(0x0f)
 16834              m.emit(0x59)
 16835              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16836          })
 16837      }
 16838      // MULPS m128, xmm
 16839      if isM128(v0) && isXMM(v1) {
 16840          self.require(ISA_SSE)
 16841          p.domain = DomainMMXSSE
 16842          p.add(0, func(m *_Encoding, v []interface{}) {
 16843              m.rexo(hcode(v[1]), addr(v[0]), false)
 16844              m.emit(0x0f)
 16845              m.emit(0x59)
 16846              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16847          })
 16848      }
 16849      if p.len == 0 {
 16850          panic("invalid operands for MULPS")
 16851      }
 16852      return p
 16853  }
 16854  
 16855  // MULQ performs "Unsigned Multiply".
 16856  //
 16857  // Mnemonic        : MUL
 16858  // Supported forms : (2 forms)
 16859  //
 16860  //    * MULQ r64
 16861  //    * MULQ m64
 16862  //
 16863  func (self *Program) MULQ(v0 interface{}) *Instruction {
 16864      p := self.alloc("MULQ", 1, Operands { v0 })
 16865      // MULQ r64
 16866      if isReg64(v0) {
 16867          p.domain = DomainGeneric
 16868          p.add(0, func(m *_Encoding, v []interface{}) {
 16869              m.emit(0x48 | hcode(v[0]))
 16870              m.emit(0xf7)
 16871              m.emit(0xe0 | lcode(v[0]))
 16872          })
 16873      }
 16874      // MULQ m64
 16875      if isM64(v0) {
 16876          p.domain = DomainGeneric
 16877          p.add(0, func(m *_Encoding, v []interface{}) {
 16878              m.rexm(1, 0, addr(v[0]))
 16879              m.emit(0xf7)
 16880              m.mrsd(4, addr(v[0]), 1)
 16881          })
 16882      }
 16883      if p.len == 0 {
 16884          panic("invalid operands for MULQ")
 16885      }
 16886      return p
 16887  }
 16888  
 16889  // MULSD performs "Multiply Scalar Double-Precision Floating-Point Values".
 16890  //
 16891  // Mnemonic        : MULSD
 16892  // Supported forms : (2 forms)
 16893  //
 16894  //    * MULSD xmm, xmm    [SSE2]
 16895  //    * MULSD m64, xmm    [SSE2]
 16896  //
 16897  func (self *Program) MULSD(v0 interface{}, v1 interface{}) *Instruction {
 16898      p := self.alloc("MULSD", 2, Operands { v0, v1 })
 16899      // MULSD xmm, xmm
 16900      if isXMM(v0) && isXMM(v1) {
 16901          self.require(ISA_SSE2)
 16902          p.domain = DomainMMXSSE
 16903          p.add(0, func(m *_Encoding, v []interface{}) {
 16904              m.emit(0xf2)
 16905              m.rexo(hcode(v[1]), v[0], false)
 16906              m.emit(0x0f)
 16907              m.emit(0x59)
 16908              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16909          })
 16910      }
 16911      // MULSD m64, xmm
 16912      if isM64(v0) && isXMM(v1) {
 16913          self.require(ISA_SSE2)
 16914          p.domain = DomainMMXSSE
 16915          p.add(0, func(m *_Encoding, v []interface{}) {
 16916              m.emit(0xf2)
 16917              m.rexo(hcode(v[1]), addr(v[0]), false)
 16918              m.emit(0x0f)
 16919              m.emit(0x59)
 16920              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16921          })
 16922      }
 16923      if p.len == 0 {
 16924          panic("invalid operands for MULSD")
 16925      }
 16926      return p
 16927  }
 16928  
 16929  // MULSS performs "Multiply Scalar Single-Precision Floating-Point Values".
 16930  //
 16931  // Mnemonic        : MULSS
 16932  // Supported forms : (2 forms)
 16933  //
 16934  //    * MULSS xmm, xmm    [SSE]
 16935  //    * MULSS m32, xmm    [SSE]
 16936  //
 16937  func (self *Program) MULSS(v0 interface{}, v1 interface{}) *Instruction {
 16938      p := self.alloc("MULSS", 2, Operands { v0, v1 })
 16939      // MULSS xmm, xmm
 16940      if isXMM(v0) && isXMM(v1) {
 16941          self.require(ISA_SSE)
 16942          p.domain = DomainMMXSSE
 16943          p.add(0, func(m *_Encoding, v []interface{}) {
 16944              m.emit(0xf3)
 16945              m.rexo(hcode(v[1]), v[0], false)
 16946              m.emit(0x0f)
 16947              m.emit(0x59)
 16948              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16949          })
 16950      }
 16951      // MULSS m32, xmm
 16952      if isM32(v0) && isXMM(v1) {
 16953          self.require(ISA_SSE)
 16954          p.domain = DomainMMXSSE
 16955          p.add(0, func(m *_Encoding, v []interface{}) {
 16956              m.emit(0xf3)
 16957              m.rexo(hcode(v[1]), addr(v[0]), false)
 16958              m.emit(0x0f)
 16959              m.emit(0x59)
 16960              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16961          })
 16962      }
 16963      if p.len == 0 {
 16964          panic("invalid operands for MULSS")
 16965      }
 16966      return p
 16967  }
 16968  
 16969  // MULW performs "Unsigned Multiply".
 16970  //
 16971  // Mnemonic        : MUL
 16972  // Supported forms : (2 forms)
 16973  //
 16974  //    * MULW r16
 16975  //    * MULW m16
 16976  //
 16977  func (self *Program) MULW(v0 interface{}) *Instruction {
 16978      p := self.alloc("MULW", 1, Operands { v0 })
 16979      // MULW r16
 16980      if isReg16(v0) {
 16981          p.domain = DomainGeneric
 16982          p.add(0, func(m *_Encoding, v []interface{}) {
 16983              m.emit(0x66)
 16984              m.rexo(0, v[0], false)
 16985              m.emit(0xf7)
 16986              m.emit(0xe0 | lcode(v[0]))
 16987          })
 16988      }
 16989      // MULW m16
 16990      if isM16(v0) {
 16991          p.domain = DomainGeneric
 16992          p.add(0, func(m *_Encoding, v []interface{}) {
 16993              m.emit(0x66)
 16994              m.rexo(0, addr(v[0]), false)
 16995              m.emit(0xf7)
 16996              m.mrsd(4, addr(v[0]), 1)
 16997          })
 16998      }
 16999      if p.len == 0 {
 17000          panic("invalid operands for MULW")
 17001      }
 17002      return p
 17003  }
 17004  
 17005  // MULXL performs "Unsigned Multiply Without Affecting Flags".
 17006  //
 17007  // Mnemonic        : MULX
 17008  // Supported forms : (2 forms)
 17009  //
 17010  //    * MULXL r32, r32, r32    [BMI2]
 17011  //    * MULXL m32, r32, r32    [BMI2]
 17012  //
 17013  func (self *Program) MULXL(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 17014      p := self.alloc("MULXL", 3, Operands { v0, v1, v2 })
 17015      // MULXL r32, r32, r32
 17016      if isReg32(v0) && isReg32(v1) && isReg32(v2) {
 17017          self.require(ISA_BMI2)
 17018          p.domain = DomainGeneric
 17019          p.add(0, func(m *_Encoding, v []interface{}) {
 17020              m.emit(0xc4)
 17021              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 17022              m.emit(0x7b ^ (hlcode(v[1]) << 3))
 17023              m.emit(0xf6)
 17024              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 17025          })
 17026      }
 17027      // MULXL m32, r32, r32
 17028      if isM32(v0) && isReg32(v1) && isReg32(v2) {
 17029          self.require(ISA_BMI2)
 17030          p.domain = DomainGeneric
 17031          p.add(0, func(m *_Encoding, v []interface{}) {
 17032              m.vex3(0xc4, 0b10, 0x03, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 17033              m.emit(0xf6)
 17034              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 17035          })
 17036      }
 17037      if p.len == 0 {
 17038          panic("invalid operands for MULXL")
 17039      }
 17040      return p
 17041  }
 17042  
 17043  // MULXQ performs "Unsigned Multiply Without Affecting Flags".
 17044  //
 17045  // Mnemonic        : MULX
 17046  // Supported forms : (2 forms)
 17047  //
 17048  //    * MULXQ r64, r64, r64    [BMI2]
 17049  //    * MULXQ m64, r64, r64    [BMI2]
 17050  //
 17051  func (self *Program) MULXQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 17052      p := self.alloc("MULXQ", 3, Operands { v0, v1, v2 })
 17053      // MULXQ r64, r64, r64
 17054      if isReg64(v0) && isReg64(v1) && isReg64(v2) {
 17055          self.require(ISA_BMI2)
 17056          p.domain = DomainGeneric
 17057          p.add(0, func(m *_Encoding, v []interface{}) {
 17058              m.emit(0xc4)
 17059              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 17060              m.emit(0xfb ^ (hlcode(v[1]) << 3))
 17061              m.emit(0xf6)
 17062              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 17063          })
 17064      }
 17065      // MULXQ m64, r64, r64
 17066      if isM64(v0) && isReg64(v1) && isReg64(v2) {
 17067          self.require(ISA_BMI2)
 17068          p.domain = DomainGeneric
 17069          p.add(0, func(m *_Encoding, v []interface{}) {
 17070              m.vex3(0xc4, 0b10, 0x83, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 17071              m.emit(0xf6)
 17072              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 17073          })
 17074      }
 17075      if p.len == 0 {
 17076          panic("invalid operands for MULXQ")
 17077      }
 17078      return p
 17079  }
 17080  
 17081  // MWAIT performs "Monitor Wait".
 17082  //
 17083  // Mnemonic        : MWAIT
 17084  // Supported forms : (1 form)
 17085  //
 17086  //    * MWAIT    [MONITOR]
 17087  //
 17088  func (self *Program) MWAIT() *Instruction {
 17089      p := self.alloc("MWAIT", 0, Operands {  })
 17090      // MWAIT
 17091      self.require(ISA_MONITOR)
 17092      p.domain = DomainMisc
 17093      p.add(0, func(m *_Encoding, v []interface{}) {
 17094          m.emit(0x0f)
 17095          m.emit(0x01)
 17096          m.emit(0xc9)
 17097      })
 17098      return p
 17099  }
 17100  
 17101  // MWAITX performs "Monitor Wait with Timeout".
 17102  //
 17103  // Mnemonic        : MWAITX
 17104  // Supported forms : (1 form)
 17105  //
 17106  //    * MWAITX    [MONITORX]
 17107  //
 17108  func (self *Program) MWAITX() *Instruction {
 17109      p := self.alloc("MWAITX", 0, Operands {  })
 17110      // MWAITX
 17111      self.require(ISA_MONITORX)
 17112      p.domain = DomainMisc
 17113      p.add(0, func(m *_Encoding, v []interface{}) {
 17114          m.emit(0x0f)
 17115          m.emit(0x01)
 17116          m.emit(0xfb)
 17117      })
 17118      return p
 17119  }
 17120  
 17121  // NEGB performs "Two's Complement Negation".
 17122  //
 17123  // Mnemonic        : NEG
 17124  // Supported forms : (2 forms)
 17125  //
 17126  //    * NEGB r8
 17127  //    * NEGB m8
 17128  //
 17129  func (self *Program) NEGB(v0 interface{}) *Instruction {
 17130      p := self.alloc("NEGB", 1, Operands { v0 })
 17131      // NEGB r8
 17132      if isReg8(v0) {
 17133          p.domain = DomainGeneric
 17134          p.add(0, func(m *_Encoding, v []interface{}) {
 17135              m.rexo(0, v[0], isReg8REX(v[0]))
 17136              m.emit(0xf6)
 17137              m.emit(0xd8 | lcode(v[0]))
 17138          })
 17139      }
 17140      // NEGB m8
 17141      if isM8(v0) {
 17142          p.domain = DomainGeneric
 17143          p.add(0, func(m *_Encoding, v []interface{}) {
 17144              m.rexo(0, addr(v[0]), false)
 17145              m.emit(0xf6)
 17146              m.mrsd(3, addr(v[0]), 1)
 17147          })
 17148      }
 17149      if p.len == 0 {
 17150          panic("invalid operands for NEGB")
 17151      }
 17152      return p
 17153  }
 17154  
 17155  // NEGL performs "Two's Complement Negation".
 17156  //
 17157  // Mnemonic        : NEG
 17158  // Supported forms : (2 forms)
 17159  //
 17160  //    * NEGL r32
 17161  //    * NEGL m32
 17162  //
 17163  func (self *Program) NEGL(v0 interface{}) *Instruction {
 17164      p := self.alloc("NEGL", 1, Operands { v0 })
 17165      // NEGL r32
 17166      if isReg32(v0) {
 17167          p.domain = DomainGeneric
 17168          p.add(0, func(m *_Encoding, v []interface{}) {
 17169              m.rexo(0, v[0], false)
 17170              m.emit(0xf7)
 17171              m.emit(0xd8 | lcode(v[0]))
 17172          })
 17173      }
 17174      // NEGL m32
 17175      if isM32(v0) {
 17176          p.domain = DomainGeneric
 17177          p.add(0, func(m *_Encoding, v []interface{}) {
 17178              m.rexo(0, addr(v[0]), false)
 17179              m.emit(0xf7)
 17180              m.mrsd(3, addr(v[0]), 1)
 17181          })
 17182      }
 17183      if p.len == 0 {
 17184          panic("invalid operands for NEGL")
 17185      }
 17186      return p
 17187  }
 17188  
 17189  // NEGQ performs "Two's Complement Negation".
 17190  //
 17191  // Mnemonic        : NEG
 17192  // Supported forms : (2 forms)
 17193  //
 17194  //    * NEGQ r64
 17195  //    * NEGQ m64
 17196  //
 17197  func (self *Program) NEGQ(v0 interface{}) *Instruction {
 17198      p := self.alloc("NEGQ", 1, Operands { v0 })
 17199      // NEGQ r64
 17200      if isReg64(v0) {
 17201          p.domain = DomainGeneric
 17202          p.add(0, func(m *_Encoding, v []interface{}) {
 17203              m.emit(0x48 | hcode(v[0]))
 17204              m.emit(0xf7)
 17205              m.emit(0xd8 | lcode(v[0]))
 17206          })
 17207      }
 17208      // NEGQ m64
 17209      if isM64(v0) {
 17210          p.domain = DomainGeneric
 17211          p.add(0, func(m *_Encoding, v []interface{}) {
 17212              m.rexm(1, 0, addr(v[0]))
 17213              m.emit(0xf7)
 17214              m.mrsd(3, addr(v[0]), 1)
 17215          })
 17216      }
 17217      if p.len == 0 {
 17218          panic("invalid operands for NEGQ")
 17219      }
 17220      return p
 17221  }
 17222  
 17223  // NEGW performs "Two's Complement Negation".
 17224  //
 17225  // Mnemonic        : NEG
 17226  // Supported forms : (2 forms)
 17227  //
 17228  //    * NEGW r16
 17229  //    * NEGW m16
 17230  //
 17231  func (self *Program) NEGW(v0 interface{}) *Instruction {
 17232      p := self.alloc("NEGW", 1, Operands { v0 })
 17233      // NEGW r16
 17234      if isReg16(v0) {
 17235          p.domain = DomainGeneric
 17236          p.add(0, func(m *_Encoding, v []interface{}) {
 17237              m.emit(0x66)
 17238              m.rexo(0, v[0], false)
 17239              m.emit(0xf7)
 17240              m.emit(0xd8 | lcode(v[0]))
 17241          })
 17242      }
 17243      // NEGW m16
 17244      if isM16(v0) {
 17245          p.domain = DomainGeneric
 17246          p.add(0, func(m *_Encoding, v []interface{}) {
 17247              m.emit(0x66)
 17248              m.rexo(0, addr(v[0]), false)
 17249              m.emit(0xf7)
 17250              m.mrsd(3, addr(v[0]), 1)
 17251          })
 17252      }
 17253      if p.len == 0 {
 17254          panic("invalid operands for NEGW")
 17255      }
 17256      return p
 17257  }
 17258  
 17259  // NOP performs "No Operation".
 17260  //
 17261  // Mnemonic        : NOP
 17262  // Supported forms : (1 form)
 17263  //
 17264  //    * NOP
 17265  //
 17266  func (self *Program) NOP() *Instruction {
 17267      p := self.alloc("NOP", 0, Operands {  })
 17268      // NOP
 17269      p.domain = DomainGeneric
 17270      p.add(0, func(m *_Encoding, v []interface{}) {
 17271          m.emit(0x90)
 17272      })
 17273      return p
 17274  }
 17275  
 17276  // NOTB performs "One's Complement Negation".
 17277  //
 17278  // Mnemonic        : NOT
 17279  // Supported forms : (2 forms)
 17280  //
 17281  //    * NOTB r8
 17282  //    * NOTB m8
 17283  //
 17284  func (self *Program) NOTB(v0 interface{}) *Instruction {
 17285      p := self.alloc("NOTB", 1, Operands { v0 })
 17286      // NOTB r8
 17287      if isReg8(v0) {
 17288          p.domain = DomainGeneric
 17289          p.add(0, func(m *_Encoding, v []interface{}) {
 17290              m.rexo(0, v[0], isReg8REX(v[0]))
 17291              m.emit(0xf6)
 17292              m.emit(0xd0 | lcode(v[0]))
 17293          })
 17294      }
 17295      // NOTB m8
 17296      if isM8(v0) {
 17297          p.domain = DomainGeneric
 17298          p.add(0, func(m *_Encoding, v []interface{}) {
 17299              m.rexo(0, addr(v[0]), false)
 17300              m.emit(0xf6)
 17301              m.mrsd(2, addr(v[0]), 1)
 17302          })
 17303      }
 17304      if p.len == 0 {
 17305          panic("invalid operands for NOTB")
 17306      }
 17307      return p
 17308  }
 17309  
 17310  // NOTL performs "One's Complement Negation".
 17311  //
 17312  // Mnemonic        : NOT
 17313  // Supported forms : (2 forms)
 17314  //
 17315  //    * NOTL r32
 17316  //    * NOTL m32
 17317  //
 17318  func (self *Program) NOTL(v0 interface{}) *Instruction {
 17319      p := self.alloc("NOTL", 1, Operands { v0 })
 17320      // NOTL r32
 17321      if isReg32(v0) {
 17322          p.domain = DomainGeneric
 17323          p.add(0, func(m *_Encoding, v []interface{}) {
 17324              m.rexo(0, v[0], false)
 17325              m.emit(0xf7)
 17326              m.emit(0xd0 | lcode(v[0]))
 17327          })
 17328      }
 17329      // NOTL m32
 17330      if isM32(v0) {
 17331          p.domain = DomainGeneric
 17332          p.add(0, func(m *_Encoding, v []interface{}) {
 17333              m.rexo(0, addr(v[0]), false)
 17334              m.emit(0xf7)
 17335              m.mrsd(2, addr(v[0]), 1)
 17336          })
 17337      }
 17338      if p.len == 0 {
 17339          panic("invalid operands for NOTL")
 17340      }
 17341      return p
 17342  }
 17343  
 17344  // NOTQ performs "One's Complement Negation".
 17345  //
 17346  // Mnemonic        : NOT
 17347  // Supported forms : (2 forms)
 17348  //
 17349  //    * NOTQ r64
 17350  //    * NOTQ m64
 17351  //
 17352  func (self *Program) NOTQ(v0 interface{}) *Instruction {
 17353      p := self.alloc("NOTQ", 1, Operands { v0 })
 17354      // NOTQ r64
 17355      if isReg64(v0) {
 17356          p.domain = DomainGeneric
 17357          p.add(0, func(m *_Encoding, v []interface{}) {
 17358              m.emit(0x48 | hcode(v[0]))
 17359              m.emit(0xf7)
 17360              m.emit(0xd0 | lcode(v[0]))
 17361          })
 17362      }
 17363      // NOTQ m64
 17364      if isM64(v0) {
 17365          p.domain = DomainGeneric
 17366          p.add(0, func(m *_Encoding, v []interface{}) {
 17367              m.rexm(1, 0, addr(v[0]))
 17368              m.emit(0xf7)
 17369              m.mrsd(2, addr(v[0]), 1)
 17370          })
 17371      }
 17372      if p.len == 0 {
 17373          panic("invalid operands for NOTQ")
 17374      }
 17375      return p
 17376  }
 17377  
 17378  // NOTW performs "One's Complement Negation".
 17379  //
 17380  // Mnemonic        : NOT
 17381  // Supported forms : (2 forms)
 17382  //
 17383  //    * NOTW r16
 17384  //    * NOTW m16
 17385  //
 17386  func (self *Program) NOTW(v0 interface{}) *Instruction {
 17387      p := self.alloc("NOTW", 1, Operands { v0 })
 17388      // NOTW r16
 17389      if isReg16(v0) {
 17390          p.domain = DomainGeneric
 17391          p.add(0, func(m *_Encoding, v []interface{}) {
 17392              m.emit(0x66)
 17393              m.rexo(0, v[0], false)
 17394              m.emit(0xf7)
 17395              m.emit(0xd0 | lcode(v[0]))
 17396          })
 17397      }
 17398      // NOTW m16
 17399      if isM16(v0) {
 17400          p.domain = DomainGeneric
 17401          p.add(0, func(m *_Encoding, v []interface{}) {
 17402              m.emit(0x66)
 17403              m.rexo(0, addr(v[0]), false)
 17404              m.emit(0xf7)
 17405              m.mrsd(2, addr(v[0]), 1)
 17406          })
 17407      }
 17408      if p.len == 0 {
 17409          panic("invalid operands for NOTW")
 17410      }
 17411      return p
 17412  }
 17413  
 17414  // ORB performs "Logical Inclusive OR".
 17415  //
 17416  // Mnemonic        : OR
 17417  // Supported forms : (6 forms)
 17418  //
 17419  //    * ORB imm8, al
 17420  //    * ORB imm8, r8
 17421  //    * ORB r8, r8
 17422  //    * ORB m8, r8
 17423  //    * ORB imm8, m8
 17424  //    * ORB r8, m8
 17425  //
 17426  func (self *Program) ORB(v0 interface{}, v1 interface{}) *Instruction {
 17427      p := self.alloc("ORB", 2, Operands { v0, v1 })
 17428      // ORB imm8, al
 17429      if isImm8(v0) && v1 == AL {
 17430          p.domain = DomainGeneric
 17431          p.add(0, func(m *_Encoding, v []interface{}) {
 17432              m.emit(0x0c)
 17433              m.imm1(toImmAny(v[0]))
 17434          })
 17435      }
 17436      // ORB imm8, r8
 17437      if isImm8(v0) && isReg8(v1) {
 17438          p.domain = DomainGeneric
 17439          p.add(0, func(m *_Encoding, v []interface{}) {
 17440              m.rexo(0, v[1], isReg8REX(v[1]))
 17441              m.emit(0x80)
 17442              m.emit(0xc8 | lcode(v[1]))
 17443              m.imm1(toImmAny(v[0]))
 17444          })
 17445      }
 17446      // ORB r8, r8
 17447      if isReg8(v0) && isReg8(v1) {
 17448          p.domain = DomainGeneric
 17449          p.add(0, func(m *_Encoding, v []interface{}) {
 17450              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
 17451              m.emit(0x08)
 17452              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 17453          })
 17454          p.add(0, func(m *_Encoding, v []interface{}) {
 17455              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]) || isReg8REX(v[1]))
 17456              m.emit(0x0a)
 17457              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 17458          })
 17459      }
 17460      // ORB m8, r8
 17461      if isM8(v0) && isReg8(v1) {
 17462          p.domain = DomainGeneric
 17463          p.add(0, func(m *_Encoding, v []interface{}) {
 17464              m.rexo(hcode(v[1]), addr(v[0]), isReg8REX(v[1]))
 17465              m.emit(0x0a)
 17466              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 17467          })
 17468      }
 17469      // ORB imm8, m8
 17470      if isImm8(v0) && isM8(v1) {
 17471          p.domain = DomainGeneric
 17472          p.add(0, func(m *_Encoding, v []interface{}) {
 17473              m.rexo(0, addr(v[1]), false)
 17474              m.emit(0x80)
 17475              m.mrsd(1, addr(v[1]), 1)
 17476              m.imm1(toImmAny(v[0]))
 17477          })
 17478      }
 17479      // ORB r8, m8
 17480      if isReg8(v0) && isM8(v1) {
 17481          p.domain = DomainGeneric
 17482          p.add(0, func(m *_Encoding, v []interface{}) {
 17483              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
 17484              m.emit(0x08)
 17485              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 17486          })
 17487      }
 17488      if p.len == 0 {
 17489          panic("invalid operands for ORB")
 17490      }
 17491      return p
 17492  }
 17493  
 17494  // ORL performs "Logical Inclusive OR".
 17495  //
 17496  // Mnemonic        : OR
 17497  // Supported forms : (8 forms)
 17498  //
 17499  //    * ORL imm32, eax
 17500  //    * ORL imm8, r32
 17501  //    * ORL imm32, r32
 17502  //    * ORL r32, r32
 17503  //    * ORL m32, r32
 17504  //    * ORL imm8, m32
 17505  //    * ORL imm32, m32
 17506  //    * ORL r32, m32
 17507  //
 17508  func (self *Program) ORL(v0 interface{}, v1 interface{}) *Instruction {
 17509      p := self.alloc("ORL", 2, Operands { v0, v1 })
 17510      // ORL imm32, eax
 17511      if isImm32(v0) && v1 == EAX {
 17512          p.domain = DomainGeneric
 17513          p.add(0, func(m *_Encoding, v []interface{}) {
 17514              m.emit(0x0d)
 17515              m.imm4(toImmAny(v[0]))
 17516          })
 17517      }
 17518      // ORL imm8, r32
 17519      if isImm8Ext(v0, 4) && isReg32(v1) {
 17520          p.domain = DomainGeneric
 17521          p.add(0, func(m *_Encoding, v []interface{}) {
 17522              m.rexo(0, v[1], false)
 17523              m.emit(0x83)
 17524              m.emit(0xc8 | lcode(v[1]))
 17525              m.imm1(toImmAny(v[0]))
 17526          })
 17527      }
 17528      // ORL imm32, r32
 17529      if isImm32(v0) && isReg32(v1) {
 17530          p.domain = DomainGeneric
 17531          p.add(0, func(m *_Encoding, v []interface{}) {
 17532              m.rexo(0, v[1], false)
 17533              m.emit(0x81)
 17534              m.emit(0xc8 | lcode(v[1]))
 17535              m.imm4(toImmAny(v[0]))
 17536          })
 17537      }
 17538      // ORL r32, r32
 17539      if isReg32(v0) && isReg32(v1) {
 17540          p.domain = DomainGeneric
 17541          p.add(0, func(m *_Encoding, v []interface{}) {
 17542              m.rexo(hcode(v[0]), v[1], false)
 17543              m.emit(0x09)
 17544              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 17545          })
 17546          p.add(0, func(m *_Encoding, v []interface{}) {
 17547              m.rexo(hcode(v[1]), v[0], false)
 17548              m.emit(0x0b)
 17549              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 17550          })
 17551      }
 17552      // ORL m32, r32
 17553      if isM32(v0) && isReg32(v1) {
 17554          p.domain = DomainGeneric
 17555          p.add(0, func(m *_Encoding, v []interface{}) {
 17556              m.rexo(hcode(v[1]), addr(v[0]), false)
 17557              m.emit(0x0b)
 17558              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 17559          })
 17560      }
 17561      // ORL imm8, m32
 17562      if isImm8Ext(v0, 4) && isM32(v1) {
 17563          p.domain = DomainGeneric
 17564          p.add(0, func(m *_Encoding, v []interface{}) {
 17565              m.rexo(0, addr(v[1]), false)
 17566              m.emit(0x83)
 17567              m.mrsd(1, addr(v[1]), 1)
 17568              m.imm1(toImmAny(v[0]))
 17569          })
 17570      }
 17571      // ORL imm32, m32
 17572      if isImm32(v0) && isM32(v1) {
 17573          p.domain = DomainGeneric
 17574          p.add(0, func(m *_Encoding, v []interface{}) {
 17575              m.rexo(0, addr(v[1]), false)
 17576              m.emit(0x81)
 17577              m.mrsd(1, addr(v[1]), 1)
 17578              m.imm4(toImmAny(v[0]))
 17579          })
 17580      }
 17581      // ORL r32, m32
 17582      if isReg32(v0) && isM32(v1) {
 17583          p.domain = DomainGeneric
 17584          p.add(0, func(m *_Encoding, v []interface{}) {
 17585              m.rexo(hcode(v[0]), addr(v[1]), false)
 17586              m.emit(0x09)
 17587              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 17588          })
 17589      }
 17590      if p.len == 0 {
 17591          panic("invalid operands for ORL")
 17592      }
 17593      return p
 17594  }
 17595  
 17596  // ORPD performs "Bitwise Logical OR of Double-Precision Floating-Point Values".
 17597  //
 17598  // Mnemonic        : ORPD
 17599  // Supported forms : (2 forms)
 17600  //
 17601  //    * ORPD xmm, xmm     [SSE2]
 17602  //    * ORPD m128, xmm    [SSE2]
 17603  //
 17604  func (self *Program) ORPD(v0 interface{}, v1 interface{}) *Instruction {
 17605      p := self.alloc("ORPD", 2, Operands { v0, v1 })
 17606      // ORPD xmm, xmm
 17607      if isXMM(v0) && isXMM(v1) {
 17608          self.require(ISA_SSE2)
 17609          p.domain = DomainMMXSSE
 17610          p.add(0, func(m *_Encoding, v []interface{}) {
 17611              m.emit(0x66)
 17612              m.rexo(hcode(v[1]), v[0], false)
 17613              m.emit(0x0f)
 17614              m.emit(0x56)
 17615              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 17616          })
 17617      }
 17618      // ORPD m128, xmm
 17619      if isM128(v0) && isXMM(v1) {
 17620          self.require(ISA_SSE2)
 17621          p.domain = DomainMMXSSE
 17622          p.add(0, func(m *_Encoding, v []interface{}) {
 17623              m.emit(0x66)
 17624              m.rexo(hcode(v[1]), addr(v[0]), false)
 17625              m.emit(0x0f)
 17626              m.emit(0x56)
 17627              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 17628          })
 17629      }
 17630      if p.len == 0 {
 17631          panic("invalid operands for ORPD")
 17632      }
 17633      return p
 17634  }
 17635  
 17636  // ORPS performs "Bitwise Logical OR of Single-Precision Floating-Point Values".
 17637  //
 17638  // Mnemonic        : ORPS
 17639  // Supported forms : (2 forms)
 17640  //
 17641  //    * ORPS xmm, xmm     [SSE]
 17642  //    * ORPS m128, xmm    [SSE]
 17643  //
 17644  func (self *Program) ORPS(v0 interface{}, v1 interface{}) *Instruction {
 17645      p := self.alloc("ORPS", 2, Operands { v0, v1 })
 17646      // ORPS xmm, xmm
 17647      if isXMM(v0) && isXMM(v1) {
 17648          self.require(ISA_SSE)
 17649          p.domain = DomainMMXSSE
 17650          p.add(0, func(m *_Encoding, v []interface{}) {
 17651              m.rexo(hcode(v[1]), v[0], false)
 17652              m.emit(0x0f)
 17653              m.emit(0x56)
 17654              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 17655          })
 17656      }
 17657      // ORPS m128, xmm
 17658      if isM128(v0) && isXMM(v1) {
 17659          self.require(ISA_SSE)
 17660          p.domain = DomainMMXSSE
 17661          p.add(0, func(m *_Encoding, v []interface{}) {
 17662              m.rexo(hcode(v[1]), addr(v[0]), false)
 17663              m.emit(0x0f)
 17664              m.emit(0x56)
 17665              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 17666          })
 17667      }
 17668      if p.len == 0 {
 17669          panic("invalid operands for ORPS")
 17670      }
 17671      return p
 17672  }
 17673  
 17674  // ORQ performs "Logical Inclusive OR".
 17675  //
 17676  // Mnemonic        : OR
 17677  // Supported forms : (8 forms)
 17678  //
 17679  //    * ORQ imm32, rax
 17680  //    * ORQ imm8, r64
 17681  //    * ORQ imm32, r64
 17682  //    * ORQ r64, r64
 17683  //    * ORQ m64, r64
 17684  //    * ORQ imm8, m64
 17685  //    * ORQ imm32, m64
 17686  //    * ORQ r64, m64
 17687  //
 17688  func (self *Program) ORQ(v0 interface{}, v1 interface{}) *Instruction {
 17689      p := self.alloc("ORQ", 2, Operands { v0, v1 })
 17690      // ORQ imm32, rax
 17691      if isImm32(v0) && v1 == RAX {
 17692          p.domain = DomainGeneric
 17693          p.add(0, func(m *_Encoding, v []interface{}) {
 17694              m.emit(0x48)
 17695              m.emit(0x0d)
 17696              m.imm4(toImmAny(v[0]))
 17697          })
 17698      }
 17699      // ORQ imm8, r64
 17700      if isImm8Ext(v0, 8) && isReg64(v1) {
 17701          p.domain = DomainGeneric
 17702          p.add(0, func(m *_Encoding, v []interface{}) {
 17703              m.emit(0x48 | hcode(v[1]))
 17704              m.emit(0x83)
 17705              m.emit(0xc8 | lcode(v[1]))
 17706              m.imm1(toImmAny(v[0]))
 17707          })
 17708      }
 17709      // ORQ imm32, r64
 17710      if isImm32Ext(v0, 8) && isReg64(v1) {
 17711          p.domain = DomainGeneric
 17712          p.add(0, func(m *_Encoding, v []interface{}) {
 17713              m.emit(0x48 | hcode(v[1]))
 17714              m.emit(0x81)
 17715              m.emit(0xc8 | lcode(v[1]))
 17716              m.imm4(toImmAny(v[0]))
 17717          })
 17718      }
 17719      // ORQ r64, r64
 17720      if isReg64(v0) && isReg64(v1) {
 17721          p.domain = DomainGeneric
 17722          p.add(0, func(m *_Encoding, v []interface{}) {
 17723              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
 17724              m.emit(0x09)
 17725              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 17726          })
 17727          p.add(0, func(m *_Encoding, v []interface{}) {
 17728              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 17729              m.emit(0x0b)
 17730              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 17731          })
 17732      }
 17733      // ORQ m64, r64
 17734      if isM64(v0) && isReg64(v1) {
 17735          p.domain = DomainGeneric
 17736          p.add(0, func(m *_Encoding, v []interface{}) {
 17737              m.rexm(1, hcode(v[1]), addr(v[0]))
 17738              m.emit(0x0b)
 17739              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 17740          })
 17741      }
 17742      // ORQ imm8, m64
 17743      if isImm8Ext(v0, 8) && isM64(v1) {
 17744          p.domain = DomainGeneric
 17745          p.add(0, func(m *_Encoding, v []interface{}) {
 17746              m.rexm(1, 0, addr(v[1]))
 17747              m.emit(0x83)
 17748              m.mrsd(1, addr(v[1]), 1)
 17749              m.imm1(toImmAny(v[0]))
 17750          })
 17751      }
 17752      // ORQ imm32, m64
 17753      if isImm32Ext(v0, 8) && isM64(v1) {
 17754          p.domain = DomainGeneric
 17755          p.add(0, func(m *_Encoding, v []interface{}) {
 17756              m.rexm(1, 0, addr(v[1]))
 17757              m.emit(0x81)
 17758              m.mrsd(1, addr(v[1]), 1)
 17759              m.imm4(toImmAny(v[0]))
 17760          })
 17761      }
 17762      // ORQ r64, m64
 17763      if isReg64(v0) && isM64(v1) {
 17764          p.domain = DomainGeneric
 17765          p.add(0, func(m *_Encoding, v []interface{}) {
 17766              m.rexm(1, hcode(v[0]), addr(v[1]))
 17767              m.emit(0x09)
 17768              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 17769          })
 17770      }
 17771      if p.len == 0 {
 17772          panic("invalid operands for ORQ")
 17773      }
 17774      return p
 17775  }
 17776  
 17777  // ORW performs "Logical Inclusive OR".
 17778  //
 17779  // Mnemonic        : OR
 17780  // Supported forms : (8 forms)
 17781  //
 17782  //    * ORW imm16, ax
 17783  //    * ORW imm8, r16
 17784  //    * ORW imm16, r16
 17785  //    * ORW r16, r16
 17786  //    * ORW m16, r16
 17787  //    * ORW imm8, m16
 17788  //    * ORW imm16, m16
 17789  //    * ORW r16, m16
 17790  //
 17791  func (self *Program) ORW(v0 interface{}, v1 interface{}) *Instruction {
 17792      p := self.alloc("ORW", 2, Operands { v0, v1 })
 17793      // ORW imm16, ax
 17794      if isImm16(v0) && v1 == AX {
 17795          p.domain = DomainGeneric
 17796          p.add(0, func(m *_Encoding, v []interface{}) {
 17797              m.emit(0x66)
 17798              m.emit(0x0d)
 17799              m.imm2(toImmAny(v[0]))
 17800          })
 17801      }
 17802      // ORW imm8, r16
 17803      if isImm8Ext(v0, 2) && isReg16(v1) {
 17804          p.domain = DomainGeneric
 17805          p.add(0, func(m *_Encoding, v []interface{}) {
 17806              m.emit(0x66)
 17807              m.rexo(0, v[1], false)
 17808              m.emit(0x83)
 17809              m.emit(0xc8 | lcode(v[1]))
 17810              m.imm1(toImmAny(v[0]))
 17811          })
 17812      }
 17813      // ORW imm16, r16
 17814      if isImm16(v0) && isReg16(v1) {
 17815          p.domain = DomainGeneric
 17816          p.add(0, func(m *_Encoding, v []interface{}) {
 17817              m.emit(0x66)
 17818              m.rexo(0, v[1], false)
 17819              m.emit(0x81)
 17820              m.emit(0xc8 | lcode(v[1]))
 17821              m.imm2(toImmAny(v[0]))
 17822          })
 17823      }
 17824      // ORW r16, r16
 17825      if isReg16(v0) && isReg16(v1) {
 17826          p.domain = DomainGeneric
 17827          p.add(0, func(m *_Encoding, v []interface{}) {
 17828              m.emit(0x66)
 17829              m.rexo(hcode(v[0]), v[1], false)
 17830              m.emit(0x09)
 17831              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 17832          })
 17833          p.add(0, func(m *_Encoding, v []interface{}) {
 17834              m.emit(0x66)
 17835              m.rexo(hcode(v[1]), v[0], false)
 17836              m.emit(0x0b)
 17837              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 17838          })
 17839      }
 17840      // ORW m16, r16
 17841      if isM16(v0) && isReg16(v1) {
 17842          p.domain = DomainGeneric
 17843          p.add(0, func(m *_Encoding, v []interface{}) {
 17844              m.emit(0x66)
 17845              m.rexo(hcode(v[1]), addr(v[0]), false)
 17846              m.emit(0x0b)
 17847              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 17848          })
 17849      }
 17850      // ORW imm8, m16
 17851      if isImm8Ext(v0, 2) && isM16(v1) {
 17852          p.domain = DomainGeneric
 17853          p.add(0, func(m *_Encoding, v []interface{}) {
 17854              m.emit(0x66)
 17855              m.rexo(0, addr(v[1]), false)
 17856              m.emit(0x83)
 17857              m.mrsd(1, addr(v[1]), 1)
 17858              m.imm1(toImmAny(v[0]))
 17859          })
 17860      }
 17861      // ORW imm16, m16
 17862      if isImm16(v0) && isM16(v1) {
 17863          p.domain = DomainGeneric
 17864          p.add(0, func(m *_Encoding, v []interface{}) {
 17865              m.emit(0x66)
 17866              m.rexo(0, addr(v[1]), false)
 17867              m.emit(0x81)
 17868              m.mrsd(1, addr(v[1]), 1)
 17869              m.imm2(toImmAny(v[0]))
 17870          })
 17871      }
 17872      // ORW r16, m16
 17873      if isReg16(v0) && isM16(v1) {
 17874          p.domain = DomainGeneric
 17875          p.add(0, func(m *_Encoding, v []interface{}) {
 17876              m.emit(0x66)
 17877              m.rexo(hcode(v[0]), addr(v[1]), false)
 17878              m.emit(0x09)
 17879              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 17880          })
 17881      }
 17882      if p.len == 0 {
 17883          panic("invalid operands for ORW")
 17884      }
 17885      return p
 17886  }
 17887  
 17888  // PABSB performs "Packed Absolute Value of Byte Integers".
 17889  //
 17890  // Mnemonic        : PABSB
 17891  // Supported forms : (4 forms)
 17892  //
 17893  //    * PABSB mm, mm       [SSSE3]
 17894  //    * PABSB m64, mm      [SSSE3]
 17895  //    * PABSB xmm, xmm     [SSSE3]
 17896  //    * PABSB m128, xmm    [SSSE3]
 17897  //
 17898  func (self *Program) PABSB(v0 interface{}, v1 interface{}) *Instruction {
 17899      p := self.alloc("PABSB", 2, Operands { v0, v1 })
 17900      // PABSB mm, mm
 17901      if isMM(v0) && isMM(v1) {
 17902          self.require(ISA_SSSE3)
 17903          p.domain = DomainMMXSSE
 17904          p.add(0, func(m *_Encoding, v []interface{}) {
 17905              m.rexo(hcode(v[1]), v[0], false)
 17906              m.emit(0x0f)
 17907              m.emit(0x38)
 17908              m.emit(0x1c)
 17909              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 17910          })
 17911      }
 17912      // PABSB m64, mm
 17913      if isM64(v0) && isMM(v1) {
 17914          self.require(ISA_SSSE3)
 17915          p.domain = DomainMMXSSE
 17916          p.add(0, func(m *_Encoding, v []interface{}) {
 17917              m.rexo(hcode(v[1]), addr(v[0]), false)
 17918              m.emit(0x0f)
 17919              m.emit(0x38)
 17920              m.emit(0x1c)
 17921              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 17922          })
 17923      }
 17924      // PABSB xmm, xmm
 17925      if isXMM(v0) && isXMM(v1) {
 17926          self.require(ISA_SSSE3)
 17927          p.domain = DomainMMXSSE
 17928          p.add(0, func(m *_Encoding, v []interface{}) {
 17929              m.emit(0x66)
 17930              m.rexo(hcode(v[1]), v[0], false)
 17931              m.emit(0x0f)
 17932              m.emit(0x38)
 17933              m.emit(0x1c)
 17934              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 17935          })
 17936      }
 17937      // PABSB m128, xmm
 17938      if isM128(v0) && isXMM(v1) {
 17939          self.require(ISA_SSSE3)
 17940          p.domain = DomainMMXSSE
 17941          p.add(0, func(m *_Encoding, v []interface{}) {
 17942              m.emit(0x66)
 17943              m.rexo(hcode(v[1]), addr(v[0]), false)
 17944              m.emit(0x0f)
 17945              m.emit(0x38)
 17946              m.emit(0x1c)
 17947              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 17948          })
 17949      }
 17950      if p.len == 0 {
 17951          panic("invalid operands for PABSB")
 17952      }
 17953      return p
 17954  }
 17955  
 17956  // PABSD performs "Packed Absolute Value of Doubleword Integers".
 17957  //
 17958  // Mnemonic        : PABSD
 17959  // Supported forms : (4 forms)
 17960  //
 17961  //    * PABSD mm, mm       [SSSE3]
 17962  //    * PABSD m64, mm      [SSSE3]
 17963  //    * PABSD xmm, xmm     [SSSE3]
 17964  //    * PABSD m128, xmm    [SSSE3]
 17965  //
 17966  func (self *Program) PABSD(v0 interface{}, v1 interface{}) *Instruction {
 17967      p := self.alloc("PABSD", 2, Operands { v0, v1 })
 17968      // PABSD mm, mm
 17969      if isMM(v0) && isMM(v1) {
 17970          self.require(ISA_SSSE3)
 17971          p.domain = DomainMMXSSE
 17972          p.add(0, func(m *_Encoding, v []interface{}) {
 17973              m.rexo(hcode(v[1]), v[0], false)
 17974              m.emit(0x0f)
 17975              m.emit(0x38)
 17976              m.emit(0x1e)
 17977              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 17978          })
 17979      }
 17980      // PABSD m64, mm
 17981      if isM64(v0) && isMM(v1) {
 17982          self.require(ISA_SSSE3)
 17983          p.domain = DomainMMXSSE
 17984          p.add(0, func(m *_Encoding, v []interface{}) {
 17985              m.rexo(hcode(v[1]), addr(v[0]), false)
 17986              m.emit(0x0f)
 17987              m.emit(0x38)
 17988              m.emit(0x1e)
 17989              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 17990          })
 17991      }
 17992      // PABSD xmm, xmm
 17993      if isXMM(v0) && isXMM(v1) {
 17994          self.require(ISA_SSSE3)
 17995          p.domain = DomainMMXSSE
 17996          p.add(0, func(m *_Encoding, v []interface{}) {
 17997              m.emit(0x66)
 17998              m.rexo(hcode(v[1]), v[0], false)
 17999              m.emit(0x0f)
 18000              m.emit(0x38)
 18001              m.emit(0x1e)
 18002              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18003          })
 18004      }
 18005      // PABSD m128, xmm
 18006      if isM128(v0) && isXMM(v1) {
 18007          self.require(ISA_SSSE3)
 18008          p.domain = DomainMMXSSE
 18009          p.add(0, func(m *_Encoding, v []interface{}) {
 18010              m.emit(0x66)
 18011              m.rexo(hcode(v[1]), addr(v[0]), false)
 18012              m.emit(0x0f)
 18013              m.emit(0x38)
 18014              m.emit(0x1e)
 18015              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18016          })
 18017      }
 18018      if p.len == 0 {
 18019          panic("invalid operands for PABSD")
 18020      }
 18021      return p
 18022  }
 18023  
 18024  // PABSW performs "Packed Absolute Value of Word Integers".
 18025  //
 18026  // Mnemonic        : PABSW
 18027  // Supported forms : (4 forms)
 18028  //
 18029  //    * PABSW mm, mm       [SSSE3]
 18030  //    * PABSW m64, mm      [SSSE3]
 18031  //    * PABSW xmm, xmm     [SSSE3]
 18032  //    * PABSW m128, xmm    [SSSE3]
 18033  //
 18034  func (self *Program) PABSW(v0 interface{}, v1 interface{}) *Instruction {
 18035      p := self.alloc("PABSW", 2, Operands { v0, v1 })
 18036      // PABSW mm, mm
 18037      if isMM(v0) && isMM(v1) {
 18038          self.require(ISA_SSSE3)
 18039          p.domain = DomainMMXSSE
 18040          p.add(0, func(m *_Encoding, v []interface{}) {
 18041              m.rexo(hcode(v[1]), v[0], false)
 18042              m.emit(0x0f)
 18043              m.emit(0x38)
 18044              m.emit(0x1d)
 18045              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18046          })
 18047      }
 18048      // PABSW m64, mm
 18049      if isM64(v0) && isMM(v1) {
 18050          self.require(ISA_SSSE3)
 18051          p.domain = DomainMMXSSE
 18052          p.add(0, func(m *_Encoding, v []interface{}) {
 18053              m.rexo(hcode(v[1]), addr(v[0]), false)
 18054              m.emit(0x0f)
 18055              m.emit(0x38)
 18056              m.emit(0x1d)
 18057              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18058          })
 18059      }
 18060      // PABSW xmm, xmm
 18061      if isXMM(v0) && isXMM(v1) {
 18062          self.require(ISA_SSSE3)
 18063          p.domain = DomainMMXSSE
 18064          p.add(0, func(m *_Encoding, v []interface{}) {
 18065              m.emit(0x66)
 18066              m.rexo(hcode(v[1]), v[0], false)
 18067              m.emit(0x0f)
 18068              m.emit(0x38)
 18069              m.emit(0x1d)
 18070              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18071          })
 18072      }
 18073      // PABSW m128, xmm
 18074      if isM128(v0) && isXMM(v1) {
 18075          self.require(ISA_SSSE3)
 18076          p.domain = DomainMMXSSE
 18077          p.add(0, func(m *_Encoding, v []interface{}) {
 18078              m.emit(0x66)
 18079              m.rexo(hcode(v[1]), addr(v[0]), false)
 18080              m.emit(0x0f)
 18081              m.emit(0x38)
 18082              m.emit(0x1d)
 18083              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18084          })
 18085      }
 18086      if p.len == 0 {
 18087          panic("invalid operands for PABSW")
 18088      }
 18089      return p
 18090  }
 18091  
 18092  // PACKSSDW performs "Pack Doublewords into Words with Signed Saturation".
 18093  //
 18094  // Mnemonic        : PACKSSDW
 18095  // Supported forms : (4 forms)
 18096  //
 18097  //    * PACKSSDW mm, mm       [MMX]
 18098  //    * PACKSSDW m64, mm      [MMX]
 18099  //    * PACKSSDW xmm, xmm     [SSE2]
 18100  //    * PACKSSDW m128, xmm    [SSE2]
 18101  //
 18102  func (self *Program) PACKSSDW(v0 interface{}, v1 interface{}) *Instruction {
 18103      p := self.alloc("PACKSSDW", 2, Operands { v0, v1 })
 18104      // PACKSSDW mm, mm
 18105      if isMM(v0) && isMM(v1) {
 18106          self.require(ISA_MMX)
 18107          p.domain = DomainMMXSSE
 18108          p.add(0, func(m *_Encoding, v []interface{}) {
 18109              m.rexo(hcode(v[1]), v[0], false)
 18110              m.emit(0x0f)
 18111              m.emit(0x6b)
 18112              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18113          })
 18114      }
 18115      // PACKSSDW m64, mm
 18116      if isM64(v0) && isMM(v1) {
 18117          self.require(ISA_MMX)
 18118          p.domain = DomainMMXSSE
 18119          p.add(0, func(m *_Encoding, v []interface{}) {
 18120              m.rexo(hcode(v[1]), addr(v[0]), false)
 18121              m.emit(0x0f)
 18122              m.emit(0x6b)
 18123              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18124          })
 18125      }
 18126      // PACKSSDW xmm, xmm
 18127      if isXMM(v0) && isXMM(v1) {
 18128          self.require(ISA_SSE2)
 18129          p.domain = DomainMMXSSE
 18130          p.add(0, func(m *_Encoding, v []interface{}) {
 18131              m.emit(0x66)
 18132              m.rexo(hcode(v[1]), v[0], false)
 18133              m.emit(0x0f)
 18134              m.emit(0x6b)
 18135              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18136          })
 18137      }
 18138      // PACKSSDW m128, xmm
 18139      if isM128(v0) && isXMM(v1) {
 18140          self.require(ISA_SSE2)
 18141          p.domain = DomainMMXSSE
 18142          p.add(0, func(m *_Encoding, v []interface{}) {
 18143              m.emit(0x66)
 18144              m.rexo(hcode(v[1]), addr(v[0]), false)
 18145              m.emit(0x0f)
 18146              m.emit(0x6b)
 18147              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18148          })
 18149      }
 18150      if p.len == 0 {
 18151          panic("invalid operands for PACKSSDW")
 18152      }
 18153      return p
 18154  }
 18155  
 18156  // PACKSSWB performs "Pack Words into Bytes with Signed Saturation".
 18157  //
 18158  // Mnemonic        : PACKSSWB
 18159  // Supported forms : (4 forms)
 18160  //
 18161  //    * PACKSSWB mm, mm       [MMX]
 18162  //    * PACKSSWB m64, mm      [MMX]
 18163  //    * PACKSSWB xmm, xmm     [SSE2]
 18164  //    * PACKSSWB m128, xmm    [SSE2]
 18165  //
 18166  func (self *Program) PACKSSWB(v0 interface{}, v1 interface{}) *Instruction {
 18167      p := self.alloc("PACKSSWB", 2, Operands { v0, v1 })
 18168      // PACKSSWB mm, mm
 18169      if isMM(v0) && isMM(v1) {
 18170          self.require(ISA_MMX)
 18171          p.domain = DomainMMXSSE
 18172          p.add(0, func(m *_Encoding, v []interface{}) {
 18173              m.rexo(hcode(v[1]), v[0], false)
 18174              m.emit(0x0f)
 18175              m.emit(0x63)
 18176              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18177          })
 18178      }
 18179      // PACKSSWB m64, mm
 18180      if isM64(v0) && isMM(v1) {
 18181          self.require(ISA_MMX)
 18182          p.domain = DomainMMXSSE
 18183          p.add(0, func(m *_Encoding, v []interface{}) {
 18184              m.rexo(hcode(v[1]), addr(v[0]), false)
 18185              m.emit(0x0f)
 18186              m.emit(0x63)
 18187              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18188          })
 18189      }
 18190      // PACKSSWB xmm, xmm
 18191      if isXMM(v0) && isXMM(v1) {
 18192          self.require(ISA_SSE2)
 18193          p.domain = DomainMMXSSE
 18194          p.add(0, func(m *_Encoding, v []interface{}) {
 18195              m.emit(0x66)
 18196              m.rexo(hcode(v[1]), v[0], false)
 18197              m.emit(0x0f)
 18198              m.emit(0x63)
 18199              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18200          })
 18201      }
 18202      // PACKSSWB m128, xmm
 18203      if isM128(v0) && isXMM(v1) {
 18204          self.require(ISA_SSE2)
 18205          p.domain = DomainMMXSSE
 18206          p.add(0, func(m *_Encoding, v []interface{}) {
 18207              m.emit(0x66)
 18208              m.rexo(hcode(v[1]), addr(v[0]), false)
 18209              m.emit(0x0f)
 18210              m.emit(0x63)
 18211              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18212          })
 18213      }
 18214      if p.len == 0 {
 18215          panic("invalid operands for PACKSSWB")
 18216      }
 18217      return p
 18218  }
 18219  
 18220  // PACKUSDW performs "Pack Doublewords into Words with Unsigned Saturation".
 18221  //
 18222  // Mnemonic        : PACKUSDW
 18223  // Supported forms : (2 forms)
 18224  //
 18225  //    * PACKUSDW xmm, xmm     [SSE4.1]
 18226  //    * PACKUSDW m128, xmm    [SSE4.1]
 18227  //
 18228  func (self *Program) PACKUSDW(v0 interface{}, v1 interface{}) *Instruction {
 18229      p := self.alloc("PACKUSDW", 2, Operands { v0, v1 })
 18230      // PACKUSDW xmm, xmm
 18231      if isXMM(v0) && isXMM(v1) {
 18232          self.require(ISA_SSE4_1)
 18233          p.domain = DomainMMXSSE
 18234          p.add(0, func(m *_Encoding, v []interface{}) {
 18235              m.emit(0x66)
 18236              m.rexo(hcode(v[1]), v[0], false)
 18237              m.emit(0x0f)
 18238              m.emit(0x38)
 18239              m.emit(0x2b)
 18240              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18241          })
 18242      }
 18243      // PACKUSDW m128, xmm
 18244      if isM128(v0) && isXMM(v1) {
 18245          self.require(ISA_SSE4_1)
 18246          p.domain = DomainMMXSSE
 18247          p.add(0, func(m *_Encoding, v []interface{}) {
 18248              m.emit(0x66)
 18249              m.rexo(hcode(v[1]), addr(v[0]), false)
 18250              m.emit(0x0f)
 18251              m.emit(0x38)
 18252              m.emit(0x2b)
 18253              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18254          })
 18255      }
 18256      if p.len == 0 {
 18257          panic("invalid operands for PACKUSDW")
 18258      }
 18259      return p
 18260  }
 18261  
 18262  // PACKUSWB performs "Pack Words into Bytes with Unsigned Saturation".
 18263  //
 18264  // Mnemonic        : PACKUSWB
 18265  // Supported forms : (4 forms)
 18266  //
 18267  //    * PACKUSWB mm, mm       [MMX]
 18268  //    * PACKUSWB m64, mm      [MMX]
 18269  //    * PACKUSWB xmm, xmm     [SSE2]
 18270  //    * PACKUSWB m128, xmm    [SSE2]
 18271  //
 18272  func (self *Program) PACKUSWB(v0 interface{}, v1 interface{}) *Instruction {
 18273      p := self.alloc("PACKUSWB", 2, Operands { v0, v1 })
 18274      // PACKUSWB mm, mm
 18275      if isMM(v0) && isMM(v1) {
 18276          self.require(ISA_MMX)
 18277          p.domain = DomainMMXSSE
 18278          p.add(0, func(m *_Encoding, v []interface{}) {
 18279              m.rexo(hcode(v[1]), v[0], false)
 18280              m.emit(0x0f)
 18281              m.emit(0x67)
 18282              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18283          })
 18284      }
 18285      // PACKUSWB m64, mm
 18286      if isM64(v0) && isMM(v1) {
 18287          self.require(ISA_MMX)
 18288          p.domain = DomainMMXSSE
 18289          p.add(0, func(m *_Encoding, v []interface{}) {
 18290              m.rexo(hcode(v[1]), addr(v[0]), false)
 18291              m.emit(0x0f)
 18292              m.emit(0x67)
 18293              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18294          })
 18295      }
 18296      // PACKUSWB xmm, xmm
 18297      if isXMM(v0) && isXMM(v1) {
 18298          self.require(ISA_SSE2)
 18299          p.domain = DomainMMXSSE
 18300          p.add(0, func(m *_Encoding, v []interface{}) {
 18301              m.emit(0x66)
 18302              m.rexo(hcode(v[1]), v[0], false)
 18303              m.emit(0x0f)
 18304              m.emit(0x67)
 18305              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18306          })
 18307      }
 18308      // PACKUSWB m128, xmm
 18309      if isM128(v0) && isXMM(v1) {
 18310          self.require(ISA_SSE2)
 18311          p.domain = DomainMMXSSE
 18312          p.add(0, func(m *_Encoding, v []interface{}) {
 18313              m.emit(0x66)
 18314              m.rexo(hcode(v[1]), addr(v[0]), false)
 18315              m.emit(0x0f)
 18316              m.emit(0x67)
 18317              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18318          })
 18319      }
 18320      if p.len == 0 {
 18321          panic("invalid operands for PACKUSWB")
 18322      }
 18323      return p
 18324  }
 18325  
 18326  // PADDB performs "Add Packed Byte Integers".
 18327  //
 18328  // Mnemonic        : PADDB
 18329  // Supported forms : (4 forms)
 18330  //
 18331  //    * PADDB mm, mm       [MMX]
 18332  //    * PADDB m64, mm      [MMX]
 18333  //    * PADDB xmm, xmm     [SSE2]
 18334  //    * PADDB m128, xmm    [SSE2]
 18335  //
 18336  func (self *Program) PADDB(v0 interface{}, v1 interface{}) *Instruction {
 18337      p := self.alloc("PADDB", 2, Operands { v0, v1 })
 18338      // PADDB mm, mm
 18339      if isMM(v0) && isMM(v1) {
 18340          self.require(ISA_MMX)
 18341          p.domain = DomainMMXSSE
 18342          p.add(0, func(m *_Encoding, v []interface{}) {
 18343              m.rexo(hcode(v[1]), v[0], false)
 18344              m.emit(0x0f)
 18345              m.emit(0xfc)
 18346              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18347          })
 18348      }
 18349      // PADDB m64, mm
 18350      if isM64(v0) && isMM(v1) {
 18351          self.require(ISA_MMX)
 18352          p.domain = DomainMMXSSE
 18353          p.add(0, func(m *_Encoding, v []interface{}) {
 18354              m.rexo(hcode(v[1]), addr(v[0]), false)
 18355              m.emit(0x0f)
 18356              m.emit(0xfc)
 18357              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18358          })
 18359      }
 18360      // PADDB xmm, xmm
 18361      if isXMM(v0) && isXMM(v1) {
 18362          self.require(ISA_SSE2)
 18363          p.domain = DomainMMXSSE
 18364          p.add(0, func(m *_Encoding, v []interface{}) {
 18365              m.emit(0x66)
 18366              m.rexo(hcode(v[1]), v[0], false)
 18367              m.emit(0x0f)
 18368              m.emit(0xfc)
 18369              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18370          })
 18371      }
 18372      // PADDB m128, xmm
 18373      if isM128(v0) && isXMM(v1) {
 18374          self.require(ISA_SSE2)
 18375          p.domain = DomainMMXSSE
 18376          p.add(0, func(m *_Encoding, v []interface{}) {
 18377              m.emit(0x66)
 18378              m.rexo(hcode(v[1]), addr(v[0]), false)
 18379              m.emit(0x0f)
 18380              m.emit(0xfc)
 18381              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18382          })
 18383      }
 18384      if p.len == 0 {
 18385          panic("invalid operands for PADDB")
 18386      }
 18387      return p
 18388  }
 18389  
 18390  // PADDD performs "Add Packed Doubleword Integers".
 18391  //
 18392  // Mnemonic        : PADDD
 18393  // Supported forms : (4 forms)
 18394  //
 18395  //    * PADDD mm, mm       [MMX]
 18396  //    * PADDD m64, mm      [MMX]
 18397  //    * PADDD xmm, xmm     [SSE2]
 18398  //    * PADDD m128, xmm    [SSE2]
 18399  //
 18400  func (self *Program) PADDD(v0 interface{}, v1 interface{}) *Instruction {
 18401      p := self.alloc("PADDD", 2, Operands { v0, v1 })
 18402      // PADDD mm, mm
 18403      if isMM(v0) && isMM(v1) {
 18404          self.require(ISA_MMX)
 18405          p.domain = DomainMMXSSE
 18406          p.add(0, func(m *_Encoding, v []interface{}) {
 18407              m.rexo(hcode(v[1]), v[0], false)
 18408              m.emit(0x0f)
 18409              m.emit(0xfe)
 18410              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18411          })
 18412      }
 18413      // PADDD m64, mm
 18414      if isM64(v0) && isMM(v1) {
 18415          self.require(ISA_MMX)
 18416          p.domain = DomainMMXSSE
 18417          p.add(0, func(m *_Encoding, v []interface{}) {
 18418              m.rexo(hcode(v[1]), addr(v[0]), false)
 18419              m.emit(0x0f)
 18420              m.emit(0xfe)
 18421              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18422          })
 18423      }
 18424      // PADDD xmm, xmm
 18425      if isXMM(v0) && isXMM(v1) {
 18426          self.require(ISA_SSE2)
 18427          p.domain = DomainMMXSSE
 18428          p.add(0, func(m *_Encoding, v []interface{}) {
 18429              m.emit(0x66)
 18430              m.rexo(hcode(v[1]), v[0], false)
 18431              m.emit(0x0f)
 18432              m.emit(0xfe)
 18433              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18434          })
 18435      }
 18436      // PADDD m128, xmm
 18437      if isM128(v0) && isXMM(v1) {
 18438          self.require(ISA_SSE2)
 18439          p.domain = DomainMMXSSE
 18440          p.add(0, func(m *_Encoding, v []interface{}) {
 18441              m.emit(0x66)
 18442              m.rexo(hcode(v[1]), addr(v[0]), false)
 18443              m.emit(0x0f)
 18444              m.emit(0xfe)
 18445              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18446          })
 18447      }
 18448      if p.len == 0 {
 18449          panic("invalid operands for PADDD")
 18450      }
 18451      return p
 18452  }
 18453  
 18454  // PADDQ performs "Add Packed Quadword Integers".
 18455  //
 18456  // Mnemonic        : PADDQ
 18457  // Supported forms : (4 forms)
 18458  //
 18459  //    * PADDQ mm, mm       [SSE2]
 18460  //    * PADDQ m64, mm      [SSE2]
 18461  //    * PADDQ xmm, xmm     [SSE2]
 18462  //    * PADDQ m128, xmm    [SSE2]
 18463  //
 18464  func (self *Program) PADDQ(v0 interface{}, v1 interface{}) *Instruction {
 18465      p := self.alloc("PADDQ", 2, Operands { v0, v1 })
 18466      // PADDQ mm, mm
 18467      if isMM(v0) && isMM(v1) {
 18468          self.require(ISA_SSE2)
 18469          p.domain = DomainMMXSSE
 18470          p.add(0, func(m *_Encoding, v []interface{}) {
 18471              m.rexo(hcode(v[1]), v[0], false)
 18472              m.emit(0x0f)
 18473              m.emit(0xd4)
 18474              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18475          })
 18476      }
 18477      // PADDQ m64, mm
 18478      if isM64(v0) && isMM(v1) {
 18479          self.require(ISA_SSE2)
 18480          p.domain = DomainMMXSSE
 18481          p.add(0, func(m *_Encoding, v []interface{}) {
 18482              m.rexo(hcode(v[1]), addr(v[0]), false)
 18483              m.emit(0x0f)
 18484              m.emit(0xd4)
 18485              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18486          })
 18487      }
 18488      // PADDQ xmm, xmm
 18489      if isXMM(v0) && isXMM(v1) {
 18490          self.require(ISA_SSE2)
 18491          p.domain = DomainMMXSSE
 18492          p.add(0, func(m *_Encoding, v []interface{}) {
 18493              m.emit(0x66)
 18494              m.rexo(hcode(v[1]), v[0], false)
 18495              m.emit(0x0f)
 18496              m.emit(0xd4)
 18497              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18498          })
 18499      }
 18500      // PADDQ m128, xmm
 18501      if isM128(v0) && isXMM(v1) {
 18502          self.require(ISA_SSE2)
 18503          p.domain = DomainMMXSSE
 18504          p.add(0, func(m *_Encoding, v []interface{}) {
 18505              m.emit(0x66)
 18506              m.rexo(hcode(v[1]), addr(v[0]), false)
 18507              m.emit(0x0f)
 18508              m.emit(0xd4)
 18509              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18510          })
 18511      }
 18512      if p.len == 0 {
 18513          panic("invalid operands for PADDQ")
 18514      }
 18515      return p
 18516  }
 18517  
 18518  // PADDSB performs "Add Packed Signed Byte Integers with Signed Saturation".
 18519  //
 18520  // Mnemonic        : PADDSB
 18521  // Supported forms : (4 forms)
 18522  //
 18523  //    * PADDSB mm, mm       [MMX]
 18524  //    * PADDSB m64, mm      [MMX]
 18525  //    * PADDSB xmm, xmm     [SSE2]
 18526  //    * PADDSB m128, xmm    [SSE2]
 18527  //
 18528  func (self *Program) PADDSB(v0 interface{}, v1 interface{}) *Instruction {
 18529      p := self.alloc("PADDSB", 2, Operands { v0, v1 })
 18530      // PADDSB mm, mm
 18531      if isMM(v0) && isMM(v1) {
 18532          self.require(ISA_MMX)
 18533          p.domain = DomainMMXSSE
 18534          p.add(0, func(m *_Encoding, v []interface{}) {
 18535              m.rexo(hcode(v[1]), v[0], false)
 18536              m.emit(0x0f)
 18537              m.emit(0xec)
 18538              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18539          })
 18540      }
 18541      // PADDSB m64, mm
 18542      if isM64(v0) && isMM(v1) {
 18543          self.require(ISA_MMX)
 18544          p.domain = DomainMMXSSE
 18545          p.add(0, func(m *_Encoding, v []interface{}) {
 18546              m.rexo(hcode(v[1]), addr(v[0]), false)
 18547              m.emit(0x0f)
 18548              m.emit(0xec)
 18549              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18550          })
 18551      }
 18552      // PADDSB xmm, xmm
 18553      if isXMM(v0) && isXMM(v1) {
 18554          self.require(ISA_SSE2)
 18555          p.domain = DomainMMXSSE
 18556          p.add(0, func(m *_Encoding, v []interface{}) {
 18557              m.emit(0x66)
 18558              m.rexo(hcode(v[1]), v[0], false)
 18559              m.emit(0x0f)
 18560              m.emit(0xec)
 18561              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18562          })
 18563      }
 18564      // PADDSB m128, xmm
 18565      if isM128(v0) && isXMM(v1) {
 18566          self.require(ISA_SSE2)
 18567          p.domain = DomainMMXSSE
 18568          p.add(0, func(m *_Encoding, v []interface{}) {
 18569              m.emit(0x66)
 18570              m.rexo(hcode(v[1]), addr(v[0]), false)
 18571              m.emit(0x0f)
 18572              m.emit(0xec)
 18573              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18574          })
 18575      }
 18576      if p.len == 0 {
 18577          panic("invalid operands for PADDSB")
 18578      }
 18579      return p
 18580  }
 18581  
 18582  // PADDSW performs "Add Packed Signed Word Integers with Signed Saturation".
 18583  //
 18584  // Mnemonic        : PADDSW
 18585  // Supported forms : (4 forms)
 18586  //
 18587  //    * PADDSW mm, mm       [MMX]
 18588  //    * PADDSW m64, mm      [MMX]
 18589  //    * PADDSW xmm, xmm     [SSE2]
 18590  //    * PADDSW m128, xmm    [SSE2]
 18591  //
 18592  func (self *Program) PADDSW(v0 interface{}, v1 interface{}) *Instruction {
 18593      p := self.alloc("PADDSW", 2, Operands { v0, v1 })
 18594      // PADDSW mm, mm
 18595      if isMM(v0) && isMM(v1) {
 18596          self.require(ISA_MMX)
 18597          p.domain = DomainMMXSSE
 18598          p.add(0, func(m *_Encoding, v []interface{}) {
 18599              m.rexo(hcode(v[1]), v[0], false)
 18600              m.emit(0x0f)
 18601              m.emit(0xed)
 18602              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18603          })
 18604      }
 18605      // PADDSW m64, mm
 18606      if isM64(v0) && isMM(v1) {
 18607          self.require(ISA_MMX)
 18608          p.domain = DomainMMXSSE
 18609          p.add(0, func(m *_Encoding, v []interface{}) {
 18610              m.rexo(hcode(v[1]), addr(v[0]), false)
 18611              m.emit(0x0f)
 18612              m.emit(0xed)
 18613              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18614          })
 18615      }
 18616      // PADDSW xmm, xmm
 18617      if isXMM(v0) && isXMM(v1) {
 18618          self.require(ISA_SSE2)
 18619          p.domain = DomainMMXSSE
 18620          p.add(0, func(m *_Encoding, v []interface{}) {
 18621              m.emit(0x66)
 18622              m.rexo(hcode(v[1]), v[0], false)
 18623              m.emit(0x0f)
 18624              m.emit(0xed)
 18625              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18626          })
 18627      }
 18628      // PADDSW m128, xmm
 18629      if isM128(v0) && isXMM(v1) {
 18630          self.require(ISA_SSE2)
 18631          p.domain = DomainMMXSSE
 18632          p.add(0, func(m *_Encoding, v []interface{}) {
 18633              m.emit(0x66)
 18634              m.rexo(hcode(v[1]), addr(v[0]), false)
 18635              m.emit(0x0f)
 18636              m.emit(0xed)
 18637              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18638          })
 18639      }
 18640      if p.len == 0 {
 18641          panic("invalid operands for PADDSW")
 18642      }
 18643      return p
 18644  }
 18645  
 18646  // PADDUSB performs "Add Packed Unsigned Byte Integers with Unsigned Saturation".
 18647  //
 18648  // Mnemonic        : PADDUSB
 18649  // Supported forms : (4 forms)
 18650  //
 18651  //    * PADDUSB mm, mm       [MMX]
 18652  //    * PADDUSB m64, mm      [MMX]
 18653  //    * PADDUSB xmm, xmm     [SSE2]
 18654  //    * PADDUSB m128, xmm    [SSE2]
 18655  //
 18656  func (self *Program) PADDUSB(v0 interface{}, v1 interface{}) *Instruction {
 18657      p := self.alloc("PADDUSB", 2, Operands { v0, v1 })
 18658      // PADDUSB mm, mm
 18659      if isMM(v0) && isMM(v1) {
 18660          self.require(ISA_MMX)
 18661          p.domain = DomainMMXSSE
 18662          p.add(0, func(m *_Encoding, v []interface{}) {
 18663              m.rexo(hcode(v[1]), v[0], false)
 18664              m.emit(0x0f)
 18665              m.emit(0xdc)
 18666              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18667          })
 18668      }
 18669      // PADDUSB m64, mm
 18670      if isM64(v0) && isMM(v1) {
 18671          self.require(ISA_MMX)
 18672          p.domain = DomainMMXSSE
 18673          p.add(0, func(m *_Encoding, v []interface{}) {
 18674              m.rexo(hcode(v[1]), addr(v[0]), false)
 18675              m.emit(0x0f)
 18676              m.emit(0xdc)
 18677              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18678          })
 18679      }
 18680      // PADDUSB xmm, xmm
 18681      if isXMM(v0) && isXMM(v1) {
 18682          self.require(ISA_SSE2)
 18683          p.domain = DomainMMXSSE
 18684          p.add(0, func(m *_Encoding, v []interface{}) {
 18685              m.emit(0x66)
 18686              m.rexo(hcode(v[1]), v[0], false)
 18687              m.emit(0x0f)
 18688              m.emit(0xdc)
 18689              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18690          })
 18691      }
 18692      // PADDUSB m128, xmm
 18693      if isM128(v0) && isXMM(v1) {
 18694          self.require(ISA_SSE2)
 18695          p.domain = DomainMMXSSE
 18696          p.add(0, func(m *_Encoding, v []interface{}) {
 18697              m.emit(0x66)
 18698              m.rexo(hcode(v[1]), addr(v[0]), false)
 18699              m.emit(0x0f)
 18700              m.emit(0xdc)
 18701              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18702          })
 18703      }
 18704      if p.len == 0 {
 18705          panic("invalid operands for PADDUSB")
 18706      }
 18707      return p
 18708  }
 18709  
 18710  // PADDUSW performs "Add Packed Unsigned Word Integers with Unsigned Saturation".
 18711  //
 18712  // Mnemonic        : PADDUSW
 18713  // Supported forms : (4 forms)
 18714  //
 18715  //    * PADDUSW mm, mm       [MMX]
 18716  //    * PADDUSW m64, mm      [MMX]
 18717  //    * PADDUSW xmm, xmm     [SSE2]
 18718  //    * PADDUSW m128, xmm    [SSE2]
 18719  //
 18720  func (self *Program) PADDUSW(v0 interface{}, v1 interface{}) *Instruction {
 18721      p := self.alloc("PADDUSW", 2, Operands { v0, v1 })
 18722      // PADDUSW mm, mm
 18723      if isMM(v0) && isMM(v1) {
 18724          self.require(ISA_MMX)
 18725          p.domain = DomainMMXSSE
 18726          p.add(0, func(m *_Encoding, v []interface{}) {
 18727              m.rexo(hcode(v[1]), v[0], false)
 18728              m.emit(0x0f)
 18729              m.emit(0xdd)
 18730              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18731          })
 18732      }
 18733      // PADDUSW m64, mm
 18734      if isM64(v0) && isMM(v1) {
 18735          self.require(ISA_MMX)
 18736          p.domain = DomainMMXSSE
 18737          p.add(0, func(m *_Encoding, v []interface{}) {
 18738              m.rexo(hcode(v[1]), addr(v[0]), false)
 18739              m.emit(0x0f)
 18740              m.emit(0xdd)
 18741              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18742          })
 18743      }
 18744      // PADDUSW xmm, xmm
 18745      if isXMM(v0) && isXMM(v1) {
 18746          self.require(ISA_SSE2)
 18747          p.domain = DomainMMXSSE
 18748          p.add(0, func(m *_Encoding, v []interface{}) {
 18749              m.emit(0x66)
 18750              m.rexo(hcode(v[1]), v[0], false)
 18751              m.emit(0x0f)
 18752              m.emit(0xdd)
 18753              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18754          })
 18755      }
 18756      // PADDUSW m128, xmm
 18757      if isM128(v0) && isXMM(v1) {
 18758          self.require(ISA_SSE2)
 18759          p.domain = DomainMMXSSE
 18760          p.add(0, func(m *_Encoding, v []interface{}) {
 18761              m.emit(0x66)
 18762              m.rexo(hcode(v[1]), addr(v[0]), false)
 18763              m.emit(0x0f)
 18764              m.emit(0xdd)
 18765              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18766          })
 18767      }
 18768      if p.len == 0 {
 18769          panic("invalid operands for PADDUSW")
 18770      }
 18771      return p
 18772  }
 18773  
 18774  // PADDW performs "Add Packed Word Integers".
 18775  //
 18776  // Mnemonic        : PADDW
 18777  // Supported forms : (4 forms)
 18778  //
 18779  //    * PADDW mm, mm       [MMX]
 18780  //    * PADDW m64, mm      [MMX]
 18781  //    * PADDW xmm, xmm     [SSE2]
 18782  //    * PADDW m128, xmm    [SSE2]
 18783  //
 18784  func (self *Program) PADDW(v0 interface{}, v1 interface{}) *Instruction {
 18785      p := self.alloc("PADDW", 2, Operands { v0, v1 })
 18786      // PADDW mm, mm
 18787      if isMM(v0) && isMM(v1) {
 18788          self.require(ISA_MMX)
 18789          p.domain = DomainMMXSSE
 18790          p.add(0, func(m *_Encoding, v []interface{}) {
 18791              m.rexo(hcode(v[1]), v[0], false)
 18792              m.emit(0x0f)
 18793              m.emit(0xfd)
 18794              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18795          })
 18796      }
 18797      // PADDW m64, mm
 18798      if isM64(v0) && isMM(v1) {
 18799          self.require(ISA_MMX)
 18800          p.domain = DomainMMXSSE
 18801          p.add(0, func(m *_Encoding, v []interface{}) {
 18802              m.rexo(hcode(v[1]), addr(v[0]), false)
 18803              m.emit(0x0f)
 18804              m.emit(0xfd)
 18805              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18806          })
 18807      }
 18808      // PADDW xmm, xmm
 18809      if isXMM(v0) && isXMM(v1) {
 18810          self.require(ISA_SSE2)
 18811          p.domain = DomainMMXSSE
 18812          p.add(0, func(m *_Encoding, v []interface{}) {
 18813              m.emit(0x66)
 18814              m.rexo(hcode(v[1]), v[0], false)
 18815              m.emit(0x0f)
 18816              m.emit(0xfd)
 18817              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18818          })
 18819      }
 18820      // PADDW m128, xmm
 18821      if isM128(v0) && isXMM(v1) {
 18822          self.require(ISA_SSE2)
 18823          p.domain = DomainMMXSSE
 18824          p.add(0, func(m *_Encoding, v []interface{}) {
 18825              m.emit(0x66)
 18826              m.rexo(hcode(v[1]), addr(v[0]), false)
 18827              m.emit(0x0f)
 18828              m.emit(0xfd)
 18829              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18830          })
 18831      }
 18832      if p.len == 0 {
 18833          panic("invalid operands for PADDW")
 18834      }
 18835      return p
 18836  }
 18837  
 18838  // PALIGNR performs "Packed Align Right".
 18839  //
 18840  // Mnemonic        : PALIGNR
 18841  // Supported forms : (4 forms)
 18842  //
 18843  //    * PALIGNR imm8, mm, mm       [SSSE3]
 18844  //    * PALIGNR imm8, m64, mm      [SSSE3]
 18845  //    * PALIGNR imm8, xmm, xmm     [SSSE3]
 18846  //    * PALIGNR imm8, m128, xmm    [SSSE3]
 18847  //
 18848  func (self *Program) PALIGNR(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 18849      p := self.alloc("PALIGNR", 3, Operands { v0, v1, v2 })
 18850      // PALIGNR imm8, mm, mm
 18851      if isImm8(v0) && isMM(v1) && isMM(v2) {
 18852          self.require(ISA_SSSE3)
 18853          p.domain = DomainMMXSSE
 18854          p.add(0, func(m *_Encoding, v []interface{}) {
 18855              m.rexo(hcode(v[2]), v[1], false)
 18856              m.emit(0x0f)
 18857              m.emit(0x3a)
 18858              m.emit(0x0f)
 18859              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 18860              m.imm1(toImmAny(v[0]))
 18861          })
 18862      }
 18863      // PALIGNR imm8, m64, mm
 18864      if isImm8(v0) && isM64(v1) && isMM(v2) {
 18865          self.require(ISA_SSSE3)
 18866          p.domain = DomainMMXSSE
 18867          p.add(0, func(m *_Encoding, v []interface{}) {
 18868              m.rexo(hcode(v[2]), addr(v[1]), false)
 18869              m.emit(0x0f)
 18870              m.emit(0x3a)
 18871              m.emit(0x0f)
 18872              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 18873              m.imm1(toImmAny(v[0]))
 18874          })
 18875      }
 18876      // PALIGNR imm8, xmm, xmm
 18877      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 18878          self.require(ISA_SSSE3)
 18879          p.domain = DomainMMXSSE
 18880          p.add(0, func(m *_Encoding, v []interface{}) {
 18881              m.emit(0x66)
 18882              m.rexo(hcode(v[2]), v[1], false)
 18883              m.emit(0x0f)
 18884              m.emit(0x3a)
 18885              m.emit(0x0f)
 18886              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 18887              m.imm1(toImmAny(v[0]))
 18888          })
 18889      }
 18890      // PALIGNR imm8, m128, xmm
 18891      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 18892          self.require(ISA_SSSE3)
 18893          p.domain = DomainMMXSSE
 18894          p.add(0, func(m *_Encoding, v []interface{}) {
 18895              m.emit(0x66)
 18896              m.rexo(hcode(v[2]), addr(v[1]), false)
 18897              m.emit(0x0f)
 18898              m.emit(0x3a)
 18899              m.emit(0x0f)
 18900              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 18901              m.imm1(toImmAny(v[0]))
 18902          })
 18903      }
 18904      if p.len == 0 {
 18905          panic("invalid operands for PALIGNR")
 18906      }
 18907      return p
 18908  }
 18909  
 18910  // PAND performs "Packed Bitwise Logical AND".
 18911  //
 18912  // Mnemonic        : PAND
 18913  // Supported forms : (4 forms)
 18914  //
 18915  //    * PAND mm, mm       [MMX]
 18916  //    * PAND m64, mm      [MMX]
 18917  //    * PAND xmm, xmm     [SSE2]
 18918  //    * PAND m128, xmm    [SSE2]
 18919  //
 18920  func (self *Program) PAND(v0 interface{}, v1 interface{}) *Instruction {
 18921      p := self.alloc("PAND", 2, Operands { v0, v1 })
 18922      // PAND mm, mm
 18923      if isMM(v0) && isMM(v1) {
 18924          self.require(ISA_MMX)
 18925          p.domain = DomainMMXSSE
 18926          p.add(0, func(m *_Encoding, v []interface{}) {
 18927              m.rexo(hcode(v[1]), v[0], false)
 18928              m.emit(0x0f)
 18929              m.emit(0xdb)
 18930              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18931          })
 18932      }
 18933      // PAND m64, mm
 18934      if isM64(v0) && isMM(v1) {
 18935          self.require(ISA_MMX)
 18936          p.domain = DomainMMXSSE
 18937          p.add(0, func(m *_Encoding, v []interface{}) {
 18938              m.rexo(hcode(v[1]), addr(v[0]), false)
 18939              m.emit(0x0f)
 18940              m.emit(0xdb)
 18941              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18942          })
 18943      }
 18944      // PAND xmm, xmm
 18945      if isXMM(v0) && isXMM(v1) {
 18946          self.require(ISA_SSE2)
 18947          p.domain = DomainMMXSSE
 18948          p.add(0, func(m *_Encoding, v []interface{}) {
 18949              m.emit(0x66)
 18950              m.rexo(hcode(v[1]), v[0], false)
 18951              m.emit(0x0f)
 18952              m.emit(0xdb)
 18953              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18954          })
 18955      }
 18956      // PAND m128, xmm
 18957      if isM128(v0) && isXMM(v1) {
 18958          self.require(ISA_SSE2)
 18959          p.domain = DomainMMXSSE
 18960          p.add(0, func(m *_Encoding, v []interface{}) {
 18961              m.emit(0x66)
 18962              m.rexo(hcode(v[1]), addr(v[0]), false)
 18963              m.emit(0x0f)
 18964              m.emit(0xdb)
 18965              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18966          })
 18967      }
 18968      if p.len == 0 {
 18969          panic("invalid operands for PAND")
 18970      }
 18971      return p
 18972  }
 18973  
 18974  // PANDN performs "Packed Bitwise Logical AND NOT".
 18975  //
 18976  // Mnemonic        : PANDN
 18977  // Supported forms : (4 forms)
 18978  //
 18979  //    * PANDN mm, mm       [MMX]
 18980  //    * PANDN m64, mm      [MMX]
 18981  //    * PANDN xmm, xmm     [SSE2]
 18982  //    * PANDN m128, xmm    [SSE2]
 18983  //
 18984  func (self *Program) PANDN(v0 interface{}, v1 interface{}) *Instruction {
 18985      p := self.alloc("PANDN", 2, Operands { v0, v1 })
 18986      // PANDN mm, mm
 18987      if isMM(v0) && isMM(v1) {
 18988          self.require(ISA_MMX)
 18989          p.domain = DomainMMXSSE
 18990          p.add(0, func(m *_Encoding, v []interface{}) {
 18991              m.rexo(hcode(v[1]), v[0], false)
 18992              m.emit(0x0f)
 18993              m.emit(0xdf)
 18994              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18995          })
 18996      }
 18997      // PANDN m64, mm
 18998      if isM64(v0) && isMM(v1) {
 18999          self.require(ISA_MMX)
 19000          p.domain = DomainMMXSSE
 19001          p.add(0, func(m *_Encoding, v []interface{}) {
 19002              m.rexo(hcode(v[1]), addr(v[0]), false)
 19003              m.emit(0x0f)
 19004              m.emit(0xdf)
 19005              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19006          })
 19007      }
 19008      // PANDN xmm, xmm
 19009      if isXMM(v0) && isXMM(v1) {
 19010          self.require(ISA_SSE2)
 19011          p.domain = DomainMMXSSE
 19012          p.add(0, func(m *_Encoding, v []interface{}) {
 19013              m.emit(0x66)
 19014              m.rexo(hcode(v[1]), v[0], false)
 19015              m.emit(0x0f)
 19016              m.emit(0xdf)
 19017              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19018          })
 19019      }
 19020      // PANDN m128, xmm
 19021      if isM128(v0) && isXMM(v1) {
 19022          self.require(ISA_SSE2)
 19023          p.domain = DomainMMXSSE
 19024          p.add(0, func(m *_Encoding, v []interface{}) {
 19025              m.emit(0x66)
 19026              m.rexo(hcode(v[1]), addr(v[0]), false)
 19027              m.emit(0x0f)
 19028              m.emit(0xdf)
 19029              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19030          })
 19031      }
 19032      if p.len == 0 {
 19033          panic("invalid operands for PANDN")
 19034      }
 19035      return p
 19036  }
 19037  
 19038  // PAUSE performs "Spin Loop Hint".
 19039  //
 19040  // Mnemonic        : PAUSE
 19041  // Supported forms : (1 form)
 19042  //
 19043  //    * PAUSE
 19044  //
 19045  func (self *Program) PAUSE() *Instruction {
 19046      p := self.alloc("PAUSE", 0, Operands {  })
 19047      // PAUSE
 19048      p.domain = DomainGeneric
 19049      p.add(0, func(m *_Encoding, v []interface{}) {
 19050          m.emit(0xf3)
 19051          m.emit(0x90)
 19052      })
 19053      return p
 19054  }
 19055  
 19056  // PAVGB performs "Average Packed Byte Integers".
 19057  //
 19058  // Mnemonic        : PAVGB
 19059  // Supported forms : (4 forms)
 19060  //
 19061  //    * PAVGB mm, mm       [MMX+]
 19062  //    * PAVGB m64, mm      [MMX+]
 19063  //    * PAVGB xmm, xmm     [SSE2]
 19064  //    * PAVGB m128, xmm    [SSE2]
 19065  //
 19066  func (self *Program) PAVGB(v0 interface{}, v1 interface{}) *Instruction {
 19067      p := self.alloc("PAVGB", 2, Operands { v0, v1 })
 19068      // PAVGB mm, mm
 19069      if isMM(v0) && isMM(v1) {
 19070          self.require(ISA_MMX_PLUS)
 19071          p.domain = DomainMMXSSE
 19072          p.add(0, func(m *_Encoding, v []interface{}) {
 19073              m.rexo(hcode(v[1]), v[0], false)
 19074              m.emit(0x0f)
 19075              m.emit(0xe0)
 19076              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19077          })
 19078      }
 19079      // PAVGB m64, mm
 19080      if isM64(v0) && isMM(v1) {
 19081          self.require(ISA_MMX_PLUS)
 19082          p.domain = DomainMMXSSE
 19083          p.add(0, func(m *_Encoding, v []interface{}) {
 19084              m.rexo(hcode(v[1]), addr(v[0]), false)
 19085              m.emit(0x0f)
 19086              m.emit(0xe0)
 19087              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19088          })
 19089      }
 19090      // PAVGB xmm, xmm
 19091      if isXMM(v0) && isXMM(v1) {
 19092          self.require(ISA_SSE2)
 19093          p.domain = DomainMMXSSE
 19094          p.add(0, func(m *_Encoding, v []interface{}) {
 19095              m.emit(0x66)
 19096              m.rexo(hcode(v[1]), v[0], false)
 19097              m.emit(0x0f)
 19098              m.emit(0xe0)
 19099              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19100          })
 19101      }
 19102      // PAVGB m128, xmm
 19103      if isM128(v0) && isXMM(v1) {
 19104          self.require(ISA_SSE2)
 19105          p.domain = DomainMMXSSE
 19106          p.add(0, func(m *_Encoding, v []interface{}) {
 19107              m.emit(0x66)
 19108              m.rexo(hcode(v[1]), addr(v[0]), false)
 19109              m.emit(0x0f)
 19110              m.emit(0xe0)
 19111              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19112          })
 19113      }
 19114      if p.len == 0 {
 19115          panic("invalid operands for PAVGB")
 19116      }
 19117      return p
 19118  }
 19119  
 19120  // PAVGUSB performs "Average Packed Byte Integers".
 19121  //
 19122  // Mnemonic        : PAVGUSB
 19123  // Supported forms : (2 forms)
 19124  //
 19125  //    * PAVGUSB mm, mm     [3dnow!]
 19126  //    * PAVGUSB m64, mm    [3dnow!]
 19127  //
 19128  func (self *Program) PAVGUSB(v0 interface{}, v1 interface{}) *Instruction {
 19129      p := self.alloc("PAVGUSB", 2, Operands { v0, v1 })
 19130      // PAVGUSB mm, mm
 19131      if isMM(v0) && isMM(v1) {
 19132          self.require(ISA_3DNOW)
 19133          p.domain = DomainAMDSpecific
 19134          p.add(0, func(m *_Encoding, v []interface{}) {
 19135              m.rexo(hcode(v[1]), v[0], false)
 19136              m.emit(0x0f)
 19137              m.emit(0x0f)
 19138              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19139              m.emit(0xbf)
 19140          })
 19141      }
 19142      // PAVGUSB m64, mm
 19143      if isM64(v0) && isMM(v1) {
 19144          self.require(ISA_3DNOW)
 19145          p.domain = DomainAMDSpecific
 19146          p.add(0, func(m *_Encoding, v []interface{}) {
 19147              m.rexo(hcode(v[1]), addr(v[0]), false)
 19148              m.emit(0x0f)
 19149              m.emit(0x0f)
 19150              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19151              m.emit(0xbf)
 19152          })
 19153      }
 19154      if p.len == 0 {
 19155          panic("invalid operands for PAVGUSB")
 19156      }
 19157      return p
 19158  }
 19159  
 19160  // PAVGW performs "Average Packed Word Integers".
 19161  //
 19162  // Mnemonic        : PAVGW
 19163  // Supported forms : (4 forms)
 19164  //
 19165  //    * PAVGW mm, mm       [MMX+]
 19166  //    * PAVGW m64, mm      [MMX+]
 19167  //    * PAVGW xmm, xmm     [SSE2]
 19168  //    * PAVGW m128, xmm    [SSE2]
 19169  //
 19170  func (self *Program) PAVGW(v0 interface{}, v1 interface{}) *Instruction {
 19171      p := self.alloc("PAVGW", 2, Operands { v0, v1 })
 19172      // PAVGW mm, mm
 19173      if isMM(v0) && isMM(v1) {
 19174          self.require(ISA_MMX_PLUS)
 19175          p.domain = DomainMMXSSE
 19176          p.add(0, func(m *_Encoding, v []interface{}) {
 19177              m.rexo(hcode(v[1]), v[0], false)
 19178              m.emit(0x0f)
 19179              m.emit(0xe3)
 19180              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19181          })
 19182      }
 19183      // PAVGW m64, mm
 19184      if isM64(v0) && isMM(v1) {
 19185          self.require(ISA_MMX_PLUS)
 19186          p.domain = DomainMMXSSE
 19187          p.add(0, func(m *_Encoding, v []interface{}) {
 19188              m.rexo(hcode(v[1]), addr(v[0]), false)
 19189              m.emit(0x0f)
 19190              m.emit(0xe3)
 19191              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19192          })
 19193      }
 19194      // PAVGW xmm, xmm
 19195      if isXMM(v0) && isXMM(v1) {
 19196          self.require(ISA_SSE2)
 19197          p.domain = DomainMMXSSE
 19198          p.add(0, func(m *_Encoding, v []interface{}) {
 19199              m.emit(0x66)
 19200              m.rexo(hcode(v[1]), v[0], false)
 19201              m.emit(0x0f)
 19202              m.emit(0xe3)
 19203              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19204          })
 19205      }
 19206      // PAVGW m128, xmm
 19207      if isM128(v0) && isXMM(v1) {
 19208          self.require(ISA_SSE2)
 19209          p.domain = DomainMMXSSE
 19210          p.add(0, func(m *_Encoding, v []interface{}) {
 19211              m.emit(0x66)
 19212              m.rexo(hcode(v[1]), addr(v[0]), false)
 19213              m.emit(0x0f)
 19214              m.emit(0xe3)
 19215              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19216          })
 19217      }
 19218      if p.len == 0 {
 19219          panic("invalid operands for PAVGW")
 19220      }
 19221      return p
 19222  }
 19223  
 19224  // PBLENDVB performs "Variable Blend Packed Bytes".
 19225  //
 19226  // Mnemonic        : PBLENDVB
 19227  // Supported forms : (2 forms)
 19228  //
 19229  //    * PBLENDVB xmm0, xmm, xmm     [SSE4.1]
 19230  //    * PBLENDVB xmm0, m128, xmm    [SSE4.1]
 19231  //
 19232  func (self *Program) PBLENDVB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 19233      p := self.alloc("PBLENDVB", 3, Operands { v0, v1, v2 })
 19234      // PBLENDVB xmm0, xmm, xmm
 19235      if v0 == XMM0 && isXMM(v1) && isXMM(v2) {
 19236          self.require(ISA_SSE4_1)
 19237          p.domain = DomainMMXSSE
 19238          p.add(0, func(m *_Encoding, v []interface{}) {
 19239              m.emit(0x66)
 19240              m.rexo(hcode(v[2]), v[1], false)
 19241              m.emit(0x0f)
 19242              m.emit(0x38)
 19243              m.emit(0x10)
 19244              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 19245          })
 19246      }
 19247      // PBLENDVB xmm0, m128, xmm
 19248      if v0 == XMM0 && isM128(v1) && isXMM(v2) {
 19249          self.require(ISA_SSE4_1)
 19250          p.domain = DomainMMXSSE
 19251          p.add(0, func(m *_Encoding, v []interface{}) {
 19252              m.emit(0x66)
 19253              m.rexo(hcode(v[2]), addr(v[1]), false)
 19254              m.emit(0x0f)
 19255              m.emit(0x38)
 19256              m.emit(0x10)
 19257              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 19258          })
 19259      }
 19260      if p.len == 0 {
 19261          panic("invalid operands for PBLENDVB")
 19262      }
 19263      return p
 19264  }
 19265  
 19266  // PBLENDW performs "Blend Packed Words".
 19267  //
 19268  // Mnemonic        : PBLENDW
 19269  // Supported forms : (2 forms)
 19270  //
 19271  //    * PBLENDW imm8, xmm, xmm     [SSE4.1]
 19272  //    * PBLENDW imm8, m128, xmm    [SSE4.1]
 19273  //
 19274  func (self *Program) PBLENDW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 19275      p := self.alloc("PBLENDW", 3, Operands { v0, v1, v2 })
 19276      // PBLENDW imm8, xmm, xmm
 19277      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 19278          self.require(ISA_SSE4_1)
 19279          p.domain = DomainMMXSSE
 19280          p.add(0, func(m *_Encoding, v []interface{}) {
 19281              m.emit(0x66)
 19282              m.rexo(hcode(v[2]), v[1], false)
 19283              m.emit(0x0f)
 19284              m.emit(0x3a)
 19285              m.emit(0x0e)
 19286              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 19287              m.imm1(toImmAny(v[0]))
 19288          })
 19289      }
 19290      // PBLENDW imm8, m128, xmm
 19291      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 19292          self.require(ISA_SSE4_1)
 19293          p.domain = DomainMMXSSE
 19294          p.add(0, func(m *_Encoding, v []interface{}) {
 19295              m.emit(0x66)
 19296              m.rexo(hcode(v[2]), addr(v[1]), false)
 19297              m.emit(0x0f)
 19298              m.emit(0x3a)
 19299              m.emit(0x0e)
 19300              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 19301              m.imm1(toImmAny(v[0]))
 19302          })
 19303      }
 19304      if p.len == 0 {
 19305          panic("invalid operands for PBLENDW")
 19306      }
 19307      return p
 19308  }
 19309  
 19310  // PCLMULQDQ performs "Carry-Less Quadword Multiplication".
 19311  //
 19312  // Mnemonic        : PCLMULQDQ
 19313  // Supported forms : (2 forms)
 19314  //
 19315  //    * PCLMULQDQ imm8, xmm, xmm     [PCLMULQDQ]
 19316  //    * PCLMULQDQ imm8, m128, xmm    [PCLMULQDQ]
 19317  //
 19318  func (self *Program) PCLMULQDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 19319      p := self.alloc("PCLMULQDQ", 3, Operands { v0, v1, v2 })
 19320      // PCLMULQDQ imm8, xmm, xmm
 19321      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 19322          self.require(ISA_PCLMULQDQ)
 19323          p.domain = DomainCrypto
 19324          p.add(0, func(m *_Encoding, v []interface{}) {
 19325              m.emit(0x66)
 19326              m.rexo(hcode(v[2]), v[1], false)
 19327              m.emit(0x0f)
 19328              m.emit(0x3a)
 19329              m.emit(0x44)
 19330              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 19331              m.imm1(toImmAny(v[0]))
 19332          })
 19333      }
 19334      // PCLMULQDQ imm8, m128, xmm
 19335      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 19336          self.require(ISA_PCLMULQDQ)
 19337          p.domain = DomainCrypto
 19338          p.add(0, func(m *_Encoding, v []interface{}) {
 19339              m.emit(0x66)
 19340              m.rexo(hcode(v[2]), addr(v[1]), false)
 19341              m.emit(0x0f)
 19342              m.emit(0x3a)
 19343              m.emit(0x44)
 19344              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 19345              m.imm1(toImmAny(v[0]))
 19346          })
 19347      }
 19348      if p.len == 0 {
 19349          panic("invalid operands for PCLMULQDQ")
 19350      }
 19351      return p
 19352  }
 19353  
 19354  // PCMPEQB performs "Compare Packed Byte Data for Equality".
 19355  //
 19356  // Mnemonic        : PCMPEQB
 19357  // Supported forms : (4 forms)
 19358  //
 19359  //    * PCMPEQB mm, mm       [MMX]
 19360  //    * PCMPEQB m64, mm      [MMX]
 19361  //    * PCMPEQB xmm, xmm     [SSE2]
 19362  //    * PCMPEQB m128, xmm    [SSE2]
 19363  //
 19364  func (self *Program) PCMPEQB(v0 interface{}, v1 interface{}) *Instruction {
 19365      p := self.alloc("PCMPEQB", 2, Operands { v0, v1 })
 19366      // PCMPEQB mm, mm
 19367      if isMM(v0) && isMM(v1) {
 19368          self.require(ISA_MMX)
 19369          p.domain = DomainMMXSSE
 19370          p.add(0, func(m *_Encoding, v []interface{}) {
 19371              m.rexo(hcode(v[1]), v[0], false)
 19372              m.emit(0x0f)
 19373              m.emit(0x74)
 19374              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19375          })
 19376      }
 19377      // PCMPEQB m64, mm
 19378      if isM64(v0) && isMM(v1) {
 19379          self.require(ISA_MMX)
 19380          p.domain = DomainMMXSSE
 19381          p.add(0, func(m *_Encoding, v []interface{}) {
 19382              m.rexo(hcode(v[1]), addr(v[0]), false)
 19383              m.emit(0x0f)
 19384              m.emit(0x74)
 19385              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19386          })
 19387      }
 19388      // PCMPEQB xmm, xmm
 19389      if isXMM(v0) && isXMM(v1) {
 19390          self.require(ISA_SSE2)
 19391          p.domain = DomainMMXSSE
 19392          p.add(0, func(m *_Encoding, v []interface{}) {
 19393              m.emit(0x66)
 19394              m.rexo(hcode(v[1]), v[0], false)
 19395              m.emit(0x0f)
 19396              m.emit(0x74)
 19397              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19398          })
 19399      }
 19400      // PCMPEQB m128, xmm
 19401      if isM128(v0) && isXMM(v1) {
 19402          self.require(ISA_SSE2)
 19403          p.domain = DomainMMXSSE
 19404          p.add(0, func(m *_Encoding, v []interface{}) {
 19405              m.emit(0x66)
 19406              m.rexo(hcode(v[1]), addr(v[0]), false)
 19407              m.emit(0x0f)
 19408              m.emit(0x74)
 19409              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19410          })
 19411      }
 19412      if p.len == 0 {
 19413          panic("invalid operands for PCMPEQB")
 19414      }
 19415      return p
 19416  }
 19417  
 19418  // PCMPEQD performs "Compare Packed Doubleword Data for Equality".
 19419  //
 19420  // Mnemonic        : PCMPEQD
 19421  // Supported forms : (4 forms)
 19422  //
 19423  //    * PCMPEQD mm, mm       [MMX]
 19424  //    * PCMPEQD m64, mm      [MMX]
 19425  //    * PCMPEQD xmm, xmm     [SSE2]
 19426  //    * PCMPEQD m128, xmm    [SSE2]
 19427  //
 19428  func (self *Program) PCMPEQD(v0 interface{}, v1 interface{}) *Instruction {
 19429      p := self.alloc("PCMPEQD", 2, Operands { v0, v1 })
 19430      // PCMPEQD mm, mm
 19431      if isMM(v0) && isMM(v1) {
 19432          self.require(ISA_MMX)
 19433          p.domain = DomainMMXSSE
 19434          p.add(0, func(m *_Encoding, v []interface{}) {
 19435              m.rexo(hcode(v[1]), v[0], false)
 19436              m.emit(0x0f)
 19437              m.emit(0x76)
 19438              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19439          })
 19440      }
 19441      // PCMPEQD m64, mm
 19442      if isM64(v0) && isMM(v1) {
 19443          self.require(ISA_MMX)
 19444          p.domain = DomainMMXSSE
 19445          p.add(0, func(m *_Encoding, v []interface{}) {
 19446              m.rexo(hcode(v[1]), addr(v[0]), false)
 19447              m.emit(0x0f)
 19448              m.emit(0x76)
 19449              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19450          })
 19451      }
 19452      // PCMPEQD xmm, xmm
 19453      if isXMM(v0) && isXMM(v1) {
 19454          self.require(ISA_SSE2)
 19455          p.domain = DomainMMXSSE
 19456          p.add(0, func(m *_Encoding, v []interface{}) {
 19457              m.emit(0x66)
 19458              m.rexo(hcode(v[1]), v[0], false)
 19459              m.emit(0x0f)
 19460              m.emit(0x76)
 19461              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19462          })
 19463      }
 19464      // PCMPEQD m128, xmm
 19465      if isM128(v0) && isXMM(v1) {
 19466          self.require(ISA_SSE2)
 19467          p.domain = DomainMMXSSE
 19468          p.add(0, func(m *_Encoding, v []interface{}) {
 19469              m.emit(0x66)
 19470              m.rexo(hcode(v[1]), addr(v[0]), false)
 19471              m.emit(0x0f)
 19472              m.emit(0x76)
 19473              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19474          })
 19475      }
 19476      if p.len == 0 {
 19477          panic("invalid operands for PCMPEQD")
 19478      }
 19479      return p
 19480  }
 19481  
 19482  // PCMPEQQ performs "Compare Packed Quadword Data for Equality".
 19483  //
 19484  // Mnemonic        : PCMPEQQ
 19485  // Supported forms : (2 forms)
 19486  //
 19487  //    * PCMPEQQ xmm, xmm     [SSE4.1]
 19488  //    * PCMPEQQ m128, xmm    [SSE4.1]
 19489  //
 19490  func (self *Program) PCMPEQQ(v0 interface{}, v1 interface{}) *Instruction {
 19491      p := self.alloc("PCMPEQQ", 2, Operands { v0, v1 })
 19492      // PCMPEQQ xmm, xmm
 19493      if isXMM(v0) && isXMM(v1) {
 19494          self.require(ISA_SSE4_1)
 19495          p.domain = DomainMMXSSE
 19496          p.add(0, func(m *_Encoding, v []interface{}) {
 19497              m.emit(0x66)
 19498              m.rexo(hcode(v[1]), v[0], false)
 19499              m.emit(0x0f)
 19500              m.emit(0x38)
 19501              m.emit(0x29)
 19502              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19503          })
 19504      }
 19505      // PCMPEQQ m128, xmm
 19506      if isM128(v0) && isXMM(v1) {
 19507          self.require(ISA_SSE4_1)
 19508          p.domain = DomainMMXSSE
 19509          p.add(0, func(m *_Encoding, v []interface{}) {
 19510              m.emit(0x66)
 19511              m.rexo(hcode(v[1]), addr(v[0]), false)
 19512              m.emit(0x0f)
 19513              m.emit(0x38)
 19514              m.emit(0x29)
 19515              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19516          })
 19517      }
 19518      if p.len == 0 {
 19519          panic("invalid operands for PCMPEQQ")
 19520      }
 19521      return p
 19522  }
 19523  
 19524  // PCMPEQW performs "Compare Packed Word Data for Equality".
 19525  //
 19526  // Mnemonic        : PCMPEQW
 19527  // Supported forms : (4 forms)
 19528  //
 19529  //    * PCMPEQW mm, mm       [MMX]
 19530  //    * PCMPEQW m64, mm      [MMX]
 19531  //    * PCMPEQW xmm, xmm     [SSE2]
 19532  //    * PCMPEQW m128, xmm    [SSE2]
 19533  //
 19534  func (self *Program) PCMPEQW(v0 interface{}, v1 interface{}) *Instruction {
 19535      p := self.alloc("PCMPEQW", 2, Operands { v0, v1 })
 19536      // PCMPEQW mm, mm
 19537      if isMM(v0) && isMM(v1) {
 19538          self.require(ISA_MMX)
 19539          p.domain = DomainMMXSSE
 19540          p.add(0, func(m *_Encoding, v []interface{}) {
 19541              m.rexo(hcode(v[1]), v[0], false)
 19542              m.emit(0x0f)
 19543              m.emit(0x75)
 19544              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19545          })
 19546      }
 19547      // PCMPEQW m64, mm
 19548      if isM64(v0) && isMM(v1) {
 19549          self.require(ISA_MMX)
 19550          p.domain = DomainMMXSSE
 19551          p.add(0, func(m *_Encoding, v []interface{}) {
 19552              m.rexo(hcode(v[1]), addr(v[0]), false)
 19553              m.emit(0x0f)
 19554              m.emit(0x75)
 19555              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19556          })
 19557      }
 19558      // PCMPEQW xmm, xmm
 19559      if isXMM(v0) && isXMM(v1) {
 19560          self.require(ISA_SSE2)
 19561          p.domain = DomainMMXSSE
 19562          p.add(0, func(m *_Encoding, v []interface{}) {
 19563              m.emit(0x66)
 19564              m.rexo(hcode(v[1]), v[0], false)
 19565              m.emit(0x0f)
 19566              m.emit(0x75)
 19567              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19568          })
 19569      }
 19570      // PCMPEQW m128, xmm
 19571      if isM128(v0) && isXMM(v1) {
 19572          self.require(ISA_SSE2)
 19573          p.domain = DomainMMXSSE
 19574          p.add(0, func(m *_Encoding, v []interface{}) {
 19575              m.emit(0x66)
 19576              m.rexo(hcode(v[1]), addr(v[0]), false)
 19577              m.emit(0x0f)
 19578              m.emit(0x75)
 19579              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19580          })
 19581      }
 19582      if p.len == 0 {
 19583          panic("invalid operands for PCMPEQW")
 19584      }
 19585      return p
 19586  }
 19587  
 19588  // PCMPESTRI performs "Packed Compare Explicit Length Strings, Return Index".
 19589  //
 19590  // Mnemonic        : PCMPESTRI
 19591  // Supported forms : (2 forms)
 19592  //
 19593  //    * PCMPESTRI imm8, xmm, xmm     [SSE4.2]
 19594  //    * PCMPESTRI imm8, m128, xmm    [SSE4.2]
 19595  //
 19596  func (self *Program) PCMPESTRI(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 19597      p := self.alloc("PCMPESTRI", 3, Operands { v0, v1, v2 })
 19598      // PCMPESTRI imm8, xmm, xmm
 19599      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 19600          self.require(ISA_SSE4_2)
 19601          p.domain = DomainMMXSSE
 19602          p.add(0, func(m *_Encoding, v []interface{}) {
 19603              m.emit(0x66)
 19604              m.rexo(hcode(v[2]), v[1], false)
 19605              m.emit(0x0f)
 19606              m.emit(0x3a)
 19607              m.emit(0x61)
 19608              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 19609              m.imm1(toImmAny(v[0]))
 19610          })
 19611      }
 19612      // PCMPESTRI imm8, m128, xmm
 19613      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 19614          self.require(ISA_SSE4_2)
 19615          p.domain = DomainMMXSSE
 19616          p.add(0, func(m *_Encoding, v []interface{}) {
 19617              m.emit(0x66)
 19618              m.rexo(hcode(v[2]), addr(v[1]), false)
 19619              m.emit(0x0f)
 19620              m.emit(0x3a)
 19621              m.emit(0x61)
 19622              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 19623              m.imm1(toImmAny(v[0]))
 19624          })
 19625      }
 19626      if p.len == 0 {
 19627          panic("invalid operands for PCMPESTRI")
 19628      }
 19629      return p
 19630  }
 19631  
 19632  // PCMPESTRM performs "Packed Compare Explicit Length Strings, Return Mask".
 19633  //
 19634  // Mnemonic        : PCMPESTRM
 19635  // Supported forms : (2 forms)
 19636  //
 19637  //    * PCMPESTRM imm8, xmm, xmm     [SSE4.2]
 19638  //    * PCMPESTRM imm8, m128, xmm    [SSE4.2]
 19639  //
 19640  func (self *Program) PCMPESTRM(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 19641      p := self.alloc("PCMPESTRM", 3, Operands { v0, v1, v2 })
 19642      // PCMPESTRM imm8, xmm, xmm
 19643      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 19644          self.require(ISA_SSE4_2)
 19645          p.domain = DomainMMXSSE
 19646          p.add(0, func(m *_Encoding, v []interface{}) {
 19647              m.emit(0x66)
 19648              m.rexo(hcode(v[2]), v[1], false)
 19649              m.emit(0x0f)
 19650              m.emit(0x3a)
 19651              m.emit(0x60)
 19652              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 19653              m.imm1(toImmAny(v[0]))
 19654          })
 19655      }
 19656      // PCMPESTRM imm8, m128, xmm
 19657      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 19658          self.require(ISA_SSE4_2)
 19659          p.domain = DomainMMXSSE
 19660          p.add(0, func(m *_Encoding, v []interface{}) {
 19661              m.emit(0x66)
 19662              m.rexo(hcode(v[2]), addr(v[1]), false)
 19663              m.emit(0x0f)
 19664              m.emit(0x3a)
 19665              m.emit(0x60)
 19666              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 19667              m.imm1(toImmAny(v[0]))
 19668          })
 19669      }
 19670      if p.len == 0 {
 19671          panic("invalid operands for PCMPESTRM")
 19672      }
 19673      return p
 19674  }
 19675  
 19676  // PCMPGTB performs "Compare Packed Signed Byte Integers for Greater Than".
 19677  //
 19678  // Mnemonic        : PCMPGTB
 19679  // Supported forms : (4 forms)
 19680  //
 19681  //    * PCMPGTB mm, mm       [MMX]
 19682  //    * PCMPGTB m64, mm      [MMX]
 19683  //    * PCMPGTB xmm, xmm     [SSE2]
 19684  //    * PCMPGTB m128, xmm    [SSE2]
 19685  //
 19686  func (self *Program) PCMPGTB(v0 interface{}, v1 interface{}) *Instruction {
 19687      p := self.alloc("PCMPGTB", 2, Operands { v0, v1 })
 19688      // PCMPGTB mm, mm
 19689      if isMM(v0) && isMM(v1) {
 19690          self.require(ISA_MMX)
 19691          p.domain = DomainMMXSSE
 19692          p.add(0, func(m *_Encoding, v []interface{}) {
 19693              m.rexo(hcode(v[1]), v[0], false)
 19694              m.emit(0x0f)
 19695              m.emit(0x64)
 19696              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19697          })
 19698      }
 19699      // PCMPGTB m64, mm
 19700      if isM64(v0) && isMM(v1) {
 19701          self.require(ISA_MMX)
 19702          p.domain = DomainMMXSSE
 19703          p.add(0, func(m *_Encoding, v []interface{}) {
 19704              m.rexo(hcode(v[1]), addr(v[0]), false)
 19705              m.emit(0x0f)
 19706              m.emit(0x64)
 19707              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19708          })
 19709      }
 19710      // PCMPGTB xmm, xmm
 19711      if isXMM(v0) && isXMM(v1) {
 19712          self.require(ISA_SSE2)
 19713          p.domain = DomainMMXSSE
 19714          p.add(0, func(m *_Encoding, v []interface{}) {
 19715              m.emit(0x66)
 19716              m.rexo(hcode(v[1]), v[0], false)
 19717              m.emit(0x0f)
 19718              m.emit(0x64)
 19719              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19720          })
 19721      }
 19722      // PCMPGTB m128, xmm
 19723      if isM128(v0) && isXMM(v1) {
 19724          self.require(ISA_SSE2)
 19725          p.domain = DomainMMXSSE
 19726          p.add(0, func(m *_Encoding, v []interface{}) {
 19727              m.emit(0x66)
 19728              m.rexo(hcode(v[1]), addr(v[0]), false)
 19729              m.emit(0x0f)
 19730              m.emit(0x64)
 19731              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19732          })
 19733      }
 19734      if p.len == 0 {
 19735          panic("invalid operands for PCMPGTB")
 19736      }
 19737      return p
 19738  }
 19739  
 19740  // PCMPGTD performs "Compare Packed Signed Doubleword Integers for Greater Than".
 19741  //
 19742  // Mnemonic        : PCMPGTD
 19743  // Supported forms : (4 forms)
 19744  //
 19745  //    * PCMPGTD mm, mm       [MMX]
 19746  //    * PCMPGTD m64, mm      [MMX]
 19747  //    * PCMPGTD xmm, xmm     [SSE2]
 19748  //    * PCMPGTD m128, xmm    [SSE2]
 19749  //
 19750  func (self *Program) PCMPGTD(v0 interface{}, v1 interface{}) *Instruction {
 19751      p := self.alloc("PCMPGTD", 2, Operands { v0, v1 })
 19752      // PCMPGTD mm, mm
 19753      if isMM(v0) && isMM(v1) {
 19754          self.require(ISA_MMX)
 19755          p.domain = DomainMMXSSE
 19756          p.add(0, func(m *_Encoding, v []interface{}) {
 19757              m.rexo(hcode(v[1]), v[0], false)
 19758              m.emit(0x0f)
 19759              m.emit(0x66)
 19760              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19761          })
 19762      }
 19763      // PCMPGTD m64, mm
 19764      if isM64(v0) && isMM(v1) {
 19765          self.require(ISA_MMX)
 19766          p.domain = DomainMMXSSE
 19767          p.add(0, func(m *_Encoding, v []interface{}) {
 19768              m.rexo(hcode(v[1]), addr(v[0]), false)
 19769              m.emit(0x0f)
 19770              m.emit(0x66)
 19771              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19772          })
 19773      }
 19774      // PCMPGTD xmm, xmm
 19775      if isXMM(v0) && isXMM(v1) {
 19776          self.require(ISA_SSE2)
 19777          p.domain = DomainMMXSSE
 19778          p.add(0, func(m *_Encoding, v []interface{}) {
 19779              m.emit(0x66)
 19780              m.rexo(hcode(v[1]), v[0], false)
 19781              m.emit(0x0f)
 19782              m.emit(0x66)
 19783              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19784          })
 19785      }
 19786      // PCMPGTD m128, xmm
 19787      if isM128(v0) && isXMM(v1) {
 19788          self.require(ISA_SSE2)
 19789          p.domain = DomainMMXSSE
 19790          p.add(0, func(m *_Encoding, v []interface{}) {
 19791              m.emit(0x66)
 19792              m.rexo(hcode(v[1]), addr(v[0]), false)
 19793              m.emit(0x0f)
 19794              m.emit(0x66)
 19795              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19796          })
 19797      }
 19798      if p.len == 0 {
 19799          panic("invalid operands for PCMPGTD")
 19800      }
 19801      return p
 19802  }
 19803  
 19804  // PCMPGTQ performs "Compare Packed Data for Greater Than".
 19805  //
 19806  // Mnemonic        : PCMPGTQ
 19807  // Supported forms : (2 forms)
 19808  //
 19809  //    * PCMPGTQ xmm, xmm     [SSE4.2]
 19810  //    * PCMPGTQ m128, xmm    [SSE4.2]
 19811  //
 19812  func (self *Program) PCMPGTQ(v0 interface{}, v1 interface{}) *Instruction {
 19813      p := self.alloc("PCMPGTQ", 2, Operands { v0, v1 })
 19814      // PCMPGTQ xmm, xmm
 19815      if isXMM(v0) && isXMM(v1) {
 19816          self.require(ISA_SSE4_2)
 19817          p.domain = DomainMMXSSE
 19818          p.add(0, func(m *_Encoding, v []interface{}) {
 19819              m.emit(0x66)
 19820              m.rexo(hcode(v[1]), v[0], false)
 19821              m.emit(0x0f)
 19822              m.emit(0x38)
 19823              m.emit(0x37)
 19824              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19825          })
 19826      }
 19827      // PCMPGTQ m128, xmm
 19828      if isM128(v0) && isXMM(v1) {
 19829          self.require(ISA_SSE4_2)
 19830          p.domain = DomainMMXSSE
 19831          p.add(0, func(m *_Encoding, v []interface{}) {
 19832              m.emit(0x66)
 19833              m.rexo(hcode(v[1]), addr(v[0]), false)
 19834              m.emit(0x0f)
 19835              m.emit(0x38)
 19836              m.emit(0x37)
 19837              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19838          })
 19839      }
 19840      if p.len == 0 {
 19841          panic("invalid operands for PCMPGTQ")
 19842      }
 19843      return p
 19844  }
 19845  
 19846  // PCMPGTW performs "Compare Packed Signed Word Integers for Greater Than".
 19847  //
 19848  // Mnemonic        : PCMPGTW
 19849  // Supported forms : (4 forms)
 19850  //
 19851  //    * PCMPGTW mm, mm       [MMX]
 19852  //    * PCMPGTW m64, mm      [MMX]
 19853  //    * PCMPGTW xmm, xmm     [SSE2]
 19854  //    * PCMPGTW m128, xmm    [SSE2]
 19855  //
 19856  func (self *Program) PCMPGTW(v0 interface{}, v1 interface{}) *Instruction {
 19857      p := self.alloc("PCMPGTW", 2, Operands { v0, v1 })
 19858      // PCMPGTW mm, mm
 19859      if isMM(v0) && isMM(v1) {
 19860          self.require(ISA_MMX)
 19861          p.domain = DomainMMXSSE
 19862          p.add(0, func(m *_Encoding, v []interface{}) {
 19863              m.rexo(hcode(v[1]), v[0], false)
 19864              m.emit(0x0f)
 19865              m.emit(0x65)
 19866              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19867          })
 19868      }
 19869      // PCMPGTW m64, mm
 19870      if isM64(v0) && isMM(v1) {
 19871          self.require(ISA_MMX)
 19872          p.domain = DomainMMXSSE
 19873          p.add(0, func(m *_Encoding, v []interface{}) {
 19874              m.rexo(hcode(v[1]), addr(v[0]), false)
 19875              m.emit(0x0f)
 19876              m.emit(0x65)
 19877              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19878          })
 19879      }
 19880      // PCMPGTW xmm, xmm
 19881      if isXMM(v0) && isXMM(v1) {
 19882          self.require(ISA_SSE2)
 19883          p.domain = DomainMMXSSE
 19884          p.add(0, func(m *_Encoding, v []interface{}) {
 19885              m.emit(0x66)
 19886              m.rexo(hcode(v[1]), v[0], false)
 19887              m.emit(0x0f)
 19888              m.emit(0x65)
 19889              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19890          })
 19891      }
 19892      // PCMPGTW m128, xmm
 19893      if isM128(v0) && isXMM(v1) {
 19894          self.require(ISA_SSE2)
 19895          p.domain = DomainMMXSSE
 19896          p.add(0, func(m *_Encoding, v []interface{}) {
 19897              m.emit(0x66)
 19898              m.rexo(hcode(v[1]), addr(v[0]), false)
 19899              m.emit(0x0f)
 19900              m.emit(0x65)
 19901              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19902          })
 19903      }
 19904      if p.len == 0 {
 19905          panic("invalid operands for PCMPGTW")
 19906      }
 19907      return p
 19908  }
 19909  
 19910  // PCMPISTRI performs "Packed Compare Implicit Length Strings, Return Index".
 19911  //
 19912  // Mnemonic        : PCMPISTRI
 19913  // Supported forms : (2 forms)
 19914  //
 19915  //    * PCMPISTRI imm8, xmm, xmm     [SSE4.2]
 19916  //    * PCMPISTRI imm8, m128, xmm    [SSE4.2]
 19917  //
 19918  func (self *Program) PCMPISTRI(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 19919      p := self.alloc("PCMPISTRI", 3, Operands { v0, v1, v2 })
 19920      // PCMPISTRI imm8, xmm, xmm
 19921      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 19922          self.require(ISA_SSE4_2)
 19923          p.domain = DomainMMXSSE
 19924          p.add(0, func(m *_Encoding, v []interface{}) {
 19925              m.emit(0x66)
 19926              m.rexo(hcode(v[2]), v[1], false)
 19927              m.emit(0x0f)
 19928              m.emit(0x3a)
 19929              m.emit(0x63)
 19930              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 19931              m.imm1(toImmAny(v[0]))
 19932          })
 19933      }
 19934      // PCMPISTRI imm8, m128, xmm
 19935      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 19936          self.require(ISA_SSE4_2)
 19937          p.domain = DomainMMXSSE
 19938          p.add(0, func(m *_Encoding, v []interface{}) {
 19939              m.emit(0x66)
 19940              m.rexo(hcode(v[2]), addr(v[1]), false)
 19941              m.emit(0x0f)
 19942              m.emit(0x3a)
 19943              m.emit(0x63)
 19944              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 19945              m.imm1(toImmAny(v[0]))
 19946          })
 19947      }
 19948      if p.len == 0 {
 19949          panic("invalid operands for PCMPISTRI")
 19950      }
 19951      return p
 19952  }
 19953  
 19954  // PCMPISTRM performs "Packed Compare Implicit Length Strings, Return Mask".
 19955  //
 19956  // Mnemonic        : PCMPISTRM
 19957  // Supported forms : (2 forms)
 19958  //
 19959  //    * PCMPISTRM imm8, xmm, xmm     [SSE4.2]
 19960  //    * PCMPISTRM imm8, m128, xmm    [SSE4.2]
 19961  //
 19962  func (self *Program) PCMPISTRM(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 19963      p := self.alloc("PCMPISTRM", 3, Operands { v0, v1, v2 })
 19964      // PCMPISTRM imm8, xmm, xmm
 19965      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 19966          self.require(ISA_SSE4_2)
 19967          p.domain = DomainMMXSSE
 19968          p.add(0, func(m *_Encoding, v []interface{}) {
 19969              m.emit(0x66)
 19970              m.rexo(hcode(v[2]), v[1], false)
 19971              m.emit(0x0f)
 19972              m.emit(0x3a)
 19973              m.emit(0x62)
 19974              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 19975              m.imm1(toImmAny(v[0]))
 19976          })
 19977      }
 19978      // PCMPISTRM imm8, m128, xmm
 19979      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 19980          self.require(ISA_SSE4_2)
 19981          p.domain = DomainMMXSSE
 19982          p.add(0, func(m *_Encoding, v []interface{}) {
 19983              m.emit(0x66)
 19984              m.rexo(hcode(v[2]), addr(v[1]), false)
 19985              m.emit(0x0f)
 19986              m.emit(0x3a)
 19987              m.emit(0x62)
 19988              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 19989              m.imm1(toImmAny(v[0]))
 19990          })
 19991      }
 19992      if p.len == 0 {
 19993          panic("invalid operands for PCMPISTRM")
 19994      }
 19995      return p
 19996  }
 19997  
 19998  // PDEP performs "Parallel Bits Deposit".
 19999  //
 20000  // Mnemonic        : PDEP
 20001  // Supported forms : (4 forms)
 20002  //
 20003  //    * PDEP r32, r32, r32    [BMI2]
 20004  //    * PDEP m32, r32, r32    [BMI2]
 20005  //    * PDEP r64, r64, r64    [BMI2]
 20006  //    * PDEP m64, r64, r64    [BMI2]
 20007  //
 20008  func (self *Program) PDEP(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 20009      p := self.alloc("PDEP", 3, Operands { v0, v1, v2 })
 20010      // PDEP r32, r32, r32
 20011      if isReg32(v0) && isReg32(v1) && isReg32(v2) {
 20012          self.require(ISA_BMI2)
 20013          p.domain = DomainGeneric
 20014          p.add(0, func(m *_Encoding, v []interface{}) {
 20015              m.emit(0xc4)
 20016              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 20017              m.emit(0x7b ^ (hlcode(v[1]) << 3))
 20018              m.emit(0xf5)
 20019              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 20020          })
 20021      }
 20022      // PDEP m32, r32, r32
 20023      if isM32(v0) && isReg32(v1) && isReg32(v2) {
 20024          self.require(ISA_BMI2)
 20025          p.domain = DomainGeneric
 20026          p.add(0, func(m *_Encoding, v []interface{}) {
 20027              m.vex3(0xc4, 0b10, 0x03, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 20028              m.emit(0xf5)
 20029              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 20030          })
 20031      }
 20032      // PDEP r64, r64, r64
 20033      if isReg64(v0) && isReg64(v1) && isReg64(v2) {
 20034          self.require(ISA_BMI2)
 20035          p.domain = DomainGeneric
 20036          p.add(0, func(m *_Encoding, v []interface{}) {
 20037              m.emit(0xc4)
 20038              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 20039              m.emit(0xfb ^ (hlcode(v[1]) << 3))
 20040              m.emit(0xf5)
 20041              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 20042          })
 20043      }
 20044      // PDEP m64, r64, r64
 20045      if isM64(v0) && isReg64(v1) && isReg64(v2) {
 20046          self.require(ISA_BMI2)
 20047          p.domain = DomainGeneric
 20048          p.add(0, func(m *_Encoding, v []interface{}) {
 20049              m.vex3(0xc4, 0b10, 0x83, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 20050              m.emit(0xf5)
 20051              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 20052          })
 20053      }
 20054      if p.len == 0 {
 20055          panic("invalid operands for PDEP")
 20056      }
 20057      return p
 20058  }
 20059  
 20060  // PEXT performs "Parallel Bits Extract".
 20061  //
 20062  // Mnemonic        : PEXT
 20063  // Supported forms : (4 forms)
 20064  //
 20065  //    * PEXT r32, r32, r32    [BMI2]
 20066  //    * PEXT m32, r32, r32    [BMI2]
 20067  //    * PEXT r64, r64, r64    [BMI2]
 20068  //    * PEXT m64, r64, r64    [BMI2]
 20069  //
 20070  func (self *Program) PEXT(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 20071      p := self.alloc("PEXT", 3, Operands { v0, v1, v2 })
 20072      // PEXT r32, r32, r32
 20073      if isReg32(v0) && isReg32(v1) && isReg32(v2) {
 20074          self.require(ISA_BMI2)
 20075          p.domain = DomainGeneric
 20076          p.add(0, func(m *_Encoding, v []interface{}) {
 20077              m.emit(0xc4)
 20078              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 20079              m.emit(0x7a ^ (hlcode(v[1]) << 3))
 20080              m.emit(0xf5)
 20081              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 20082          })
 20083      }
 20084      // PEXT m32, r32, r32
 20085      if isM32(v0) && isReg32(v1) && isReg32(v2) {
 20086          self.require(ISA_BMI2)
 20087          p.domain = DomainGeneric
 20088          p.add(0, func(m *_Encoding, v []interface{}) {
 20089              m.vex3(0xc4, 0b10, 0x02, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 20090              m.emit(0xf5)
 20091              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 20092          })
 20093      }
 20094      // PEXT r64, r64, r64
 20095      if isReg64(v0) && isReg64(v1) && isReg64(v2) {
 20096          self.require(ISA_BMI2)
 20097          p.domain = DomainGeneric
 20098          p.add(0, func(m *_Encoding, v []interface{}) {
 20099              m.emit(0xc4)
 20100              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 20101              m.emit(0xfa ^ (hlcode(v[1]) << 3))
 20102              m.emit(0xf5)
 20103              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 20104          })
 20105      }
 20106      // PEXT m64, r64, r64
 20107      if isM64(v0) && isReg64(v1) && isReg64(v2) {
 20108          self.require(ISA_BMI2)
 20109          p.domain = DomainGeneric
 20110          p.add(0, func(m *_Encoding, v []interface{}) {
 20111              m.vex3(0xc4, 0b10, 0x82, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 20112              m.emit(0xf5)
 20113              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 20114          })
 20115      }
 20116      if p.len == 0 {
 20117          panic("invalid operands for PEXT")
 20118      }
 20119      return p
 20120  }
 20121  
 20122  // PEXTRB performs "Extract Byte".
 20123  //
 20124  // Mnemonic        : PEXTRB
 20125  // Supported forms : (2 forms)
 20126  //
 20127  //    * PEXTRB imm8, xmm, r32    [SSE4.1]
 20128  //    * PEXTRB imm8, xmm, m8     [SSE4.1]
 20129  //
 20130  func (self *Program) PEXTRB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 20131      p := self.alloc("PEXTRB", 3, Operands { v0, v1, v2 })
 20132      // PEXTRB imm8, xmm, r32
 20133      if isImm8(v0) && isXMM(v1) && isReg32(v2) {
 20134          self.require(ISA_SSE4_1)
 20135          p.domain = DomainMMXSSE
 20136          p.add(0, func(m *_Encoding, v []interface{}) {
 20137              m.emit(0x66)
 20138              m.rexo(hcode(v[1]), v[2], false)
 20139              m.emit(0x0f)
 20140              m.emit(0x3a)
 20141              m.emit(0x14)
 20142              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 20143              m.imm1(toImmAny(v[0]))
 20144          })
 20145      }
 20146      // PEXTRB imm8, xmm, m8
 20147      if isImm8(v0) && isXMM(v1) && isM8(v2) {
 20148          self.require(ISA_SSE4_1)
 20149          p.domain = DomainMMXSSE
 20150          p.add(0, func(m *_Encoding, v []interface{}) {
 20151              m.emit(0x66)
 20152              m.rexo(hcode(v[1]), addr(v[2]), false)
 20153              m.emit(0x0f)
 20154              m.emit(0x3a)
 20155              m.emit(0x14)
 20156              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 20157              m.imm1(toImmAny(v[0]))
 20158          })
 20159      }
 20160      if p.len == 0 {
 20161          panic("invalid operands for PEXTRB")
 20162      }
 20163      return p
 20164  }
 20165  
 20166  // PEXTRD performs "Extract Doubleword".
 20167  //
 20168  // Mnemonic        : PEXTRD
 20169  // Supported forms : (2 forms)
 20170  //
 20171  //    * PEXTRD imm8, xmm, r32    [SSE4.1]
 20172  //    * PEXTRD imm8, xmm, m32    [SSE4.1]
 20173  //
 20174  func (self *Program) PEXTRD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 20175      p := self.alloc("PEXTRD", 3, Operands { v0, v1, v2 })
 20176      // PEXTRD imm8, xmm, r32
 20177      if isImm8(v0) && isXMM(v1) && isReg32(v2) {
 20178          self.require(ISA_SSE4_1)
 20179          p.domain = DomainMMXSSE
 20180          p.add(0, func(m *_Encoding, v []interface{}) {
 20181              m.emit(0x66)
 20182              m.rexo(hcode(v[1]), v[2], false)
 20183              m.emit(0x0f)
 20184              m.emit(0x3a)
 20185              m.emit(0x16)
 20186              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 20187              m.imm1(toImmAny(v[0]))
 20188          })
 20189      }
 20190      // PEXTRD imm8, xmm, m32
 20191      if isImm8(v0) && isXMM(v1) && isM32(v2) {
 20192          self.require(ISA_SSE4_1)
 20193          p.domain = DomainMMXSSE
 20194          p.add(0, func(m *_Encoding, v []interface{}) {
 20195              m.emit(0x66)
 20196              m.rexo(hcode(v[1]), addr(v[2]), false)
 20197              m.emit(0x0f)
 20198              m.emit(0x3a)
 20199              m.emit(0x16)
 20200              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 20201              m.imm1(toImmAny(v[0]))
 20202          })
 20203      }
 20204      if p.len == 0 {
 20205          panic("invalid operands for PEXTRD")
 20206      }
 20207      return p
 20208  }
 20209  
 20210  // PEXTRQ performs "Extract Quadword".
 20211  //
 20212  // Mnemonic        : PEXTRQ
 20213  // Supported forms : (2 forms)
 20214  //
 20215  //    * PEXTRQ imm8, xmm, r64    [SSE4.1]
 20216  //    * PEXTRQ imm8, xmm, m64    [SSE4.1]
 20217  //
 20218  func (self *Program) PEXTRQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 20219      p := self.alloc("PEXTRQ", 3, Operands { v0, v1, v2 })
 20220      // PEXTRQ imm8, xmm, r64
 20221      if isImm8(v0) && isXMM(v1) && isReg64(v2) {
 20222          self.require(ISA_SSE4_1)
 20223          p.domain = DomainMMXSSE
 20224          p.add(0, func(m *_Encoding, v []interface{}) {
 20225              m.emit(0x66)
 20226              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[2]))
 20227              m.emit(0x0f)
 20228              m.emit(0x3a)
 20229              m.emit(0x16)
 20230              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 20231              m.imm1(toImmAny(v[0]))
 20232          })
 20233      }
 20234      // PEXTRQ imm8, xmm, m64
 20235      if isImm8(v0) && isXMM(v1) && isM64(v2) {
 20236          self.require(ISA_SSE4_1)
 20237          p.domain = DomainMMXSSE
 20238          p.add(0, func(m *_Encoding, v []interface{}) {
 20239              m.emit(0x66)
 20240              m.rexm(1, hcode(v[1]), addr(v[2]))
 20241              m.emit(0x0f)
 20242              m.emit(0x3a)
 20243              m.emit(0x16)
 20244              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 20245              m.imm1(toImmAny(v[0]))
 20246          })
 20247      }
 20248      if p.len == 0 {
 20249          panic("invalid operands for PEXTRQ")
 20250      }
 20251      return p
 20252  }
 20253  
 20254  // PEXTRW performs "Extract Word".
 20255  //
 20256  // Mnemonic        : PEXTRW
 20257  // Supported forms : (3 forms)
 20258  //
 20259  //    * PEXTRW imm8, mm, r32     [MMX+]
 20260  //    * PEXTRW imm8, xmm, r32    [SSE4.1]
 20261  //    * PEXTRW imm8, xmm, m16    [SSE4.1]
 20262  //
 20263  func (self *Program) PEXTRW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 20264      p := self.alloc("PEXTRW", 3, Operands { v0, v1, v2 })
 20265      // PEXTRW imm8, mm, r32
 20266      if isImm8(v0) && isMM(v1) && isReg32(v2) {
 20267          self.require(ISA_MMX_PLUS)
 20268          p.domain = DomainMMXSSE
 20269          p.add(0, func(m *_Encoding, v []interface{}) {
 20270              m.rexo(hcode(v[2]), v[1], false)
 20271              m.emit(0x0f)
 20272              m.emit(0xc5)
 20273              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 20274              m.imm1(toImmAny(v[0]))
 20275          })
 20276      }
 20277      // PEXTRW imm8, xmm, r32
 20278      if isImm8(v0) && isXMM(v1) && isReg32(v2) {
 20279          self.require(ISA_SSE4_1)
 20280          p.domain = DomainMMXSSE
 20281          p.add(0, func(m *_Encoding, v []interface{}) {
 20282              m.emit(0x66)
 20283              m.rexo(hcode(v[1]), v[2], false)
 20284              m.emit(0x0f)
 20285              m.emit(0x3a)
 20286              m.emit(0x15)
 20287              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 20288              m.imm1(toImmAny(v[0]))
 20289          })
 20290          p.add(0, func(m *_Encoding, v []interface{}) {
 20291              m.emit(0x66)
 20292              m.rexo(hcode(v[2]), v[1], false)
 20293              m.emit(0x0f)
 20294              m.emit(0xc5)
 20295              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 20296              m.imm1(toImmAny(v[0]))
 20297          })
 20298      }
 20299      // PEXTRW imm8, xmm, m16
 20300      if isImm8(v0) && isXMM(v1) && isM16(v2) {
 20301          self.require(ISA_SSE4_1)
 20302          p.domain = DomainMMXSSE
 20303          p.add(0, func(m *_Encoding, v []interface{}) {
 20304              m.emit(0x66)
 20305              m.rexo(hcode(v[1]), addr(v[2]), false)
 20306              m.emit(0x0f)
 20307              m.emit(0x3a)
 20308              m.emit(0x15)
 20309              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 20310              m.imm1(toImmAny(v[0]))
 20311          })
 20312      }
 20313      if p.len == 0 {
 20314          panic("invalid operands for PEXTRW")
 20315      }
 20316      return p
 20317  }
 20318  
 20319  // PF2ID performs "Packed Floating-Point to Integer Doubleword Converson".
 20320  //
 20321  // Mnemonic        : PF2ID
 20322  // Supported forms : (2 forms)
 20323  //
 20324  //    * PF2ID mm, mm     [3dnow!]
 20325  //    * PF2ID m64, mm    [3dnow!]
 20326  //
 20327  func (self *Program) PF2ID(v0 interface{}, v1 interface{}) *Instruction {
 20328      p := self.alloc("PF2ID", 2, Operands { v0, v1 })
 20329      // PF2ID mm, mm
 20330      if isMM(v0) && isMM(v1) {
 20331          self.require(ISA_3DNOW)
 20332          p.domain = DomainAMDSpecific
 20333          p.add(0, func(m *_Encoding, v []interface{}) {
 20334              m.rexo(hcode(v[1]), v[0], false)
 20335              m.emit(0x0f)
 20336              m.emit(0x0f)
 20337              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20338              m.emit(0x1d)
 20339          })
 20340      }
 20341      // PF2ID m64, mm
 20342      if isM64(v0) && isMM(v1) {
 20343          self.require(ISA_3DNOW)
 20344          p.domain = DomainAMDSpecific
 20345          p.add(0, func(m *_Encoding, v []interface{}) {
 20346              m.rexo(hcode(v[1]), addr(v[0]), false)
 20347              m.emit(0x0f)
 20348              m.emit(0x0f)
 20349              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20350              m.emit(0x1d)
 20351          })
 20352      }
 20353      if p.len == 0 {
 20354          panic("invalid operands for PF2ID")
 20355      }
 20356      return p
 20357  }
 20358  
 20359  // PF2IW performs "Packed Floating-Point to Integer Word Conversion".
 20360  //
 20361  // Mnemonic        : PF2IW
 20362  // Supported forms : (2 forms)
 20363  //
 20364  //    * PF2IW mm, mm     [3dnow!+]
 20365  //    * PF2IW m64, mm    [3dnow!+]
 20366  //
 20367  func (self *Program) PF2IW(v0 interface{}, v1 interface{}) *Instruction {
 20368      p := self.alloc("PF2IW", 2, Operands { v0, v1 })
 20369      // PF2IW mm, mm
 20370      if isMM(v0) && isMM(v1) {
 20371          self.require(ISA_3DNOW_PLUS)
 20372          p.domain = DomainAMDSpecific
 20373          p.add(0, func(m *_Encoding, v []interface{}) {
 20374              m.rexo(hcode(v[1]), v[0], false)
 20375              m.emit(0x0f)
 20376              m.emit(0x0f)
 20377              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20378              m.emit(0x1c)
 20379          })
 20380      }
 20381      // PF2IW m64, mm
 20382      if isM64(v0) && isMM(v1) {
 20383          self.require(ISA_3DNOW_PLUS)
 20384          p.domain = DomainAMDSpecific
 20385          p.add(0, func(m *_Encoding, v []interface{}) {
 20386              m.rexo(hcode(v[1]), addr(v[0]), false)
 20387              m.emit(0x0f)
 20388              m.emit(0x0f)
 20389              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20390              m.emit(0x1c)
 20391          })
 20392      }
 20393      if p.len == 0 {
 20394          panic("invalid operands for PF2IW")
 20395      }
 20396      return p
 20397  }
 20398  
 20399  // PFACC performs "Packed Floating-Point Accumulate".
 20400  //
 20401  // Mnemonic        : PFACC
 20402  // Supported forms : (2 forms)
 20403  //
 20404  //    * PFACC mm, mm     [3dnow!]
 20405  //    * PFACC m64, mm    [3dnow!]
 20406  //
 20407  func (self *Program) PFACC(v0 interface{}, v1 interface{}) *Instruction {
 20408      p := self.alloc("PFACC", 2, Operands { v0, v1 })
 20409      // PFACC mm, mm
 20410      if isMM(v0) && isMM(v1) {
 20411          self.require(ISA_3DNOW)
 20412          p.domain = DomainAMDSpecific
 20413          p.add(0, func(m *_Encoding, v []interface{}) {
 20414              m.rexo(hcode(v[1]), v[0], false)
 20415              m.emit(0x0f)
 20416              m.emit(0x0f)
 20417              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20418              m.emit(0xae)
 20419          })
 20420      }
 20421      // PFACC m64, mm
 20422      if isM64(v0) && isMM(v1) {
 20423          self.require(ISA_3DNOW)
 20424          p.domain = DomainAMDSpecific
 20425          p.add(0, func(m *_Encoding, v []interface{}) {
 20426              m.rexo(hcode(v[1]), addr(v[0]), false)
 20427              m.emit(0x0f)
 20428              m.emit(0x0f)
 20429              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20430              m.emit(0xae)
 20431          })
 20432      }
 20433      if p.len == 0 {
 20434          panic("invalid operands for PFACC")
 20435      }
 20436      return p
 20437  }
 20438  
 20439  // PFADD performs "Packed Floating-Point Add".
 20440  //
 20441  // Mnemonic        : PFADD
 20442  // Supported forms : (2 forms)
 20443  //
 20444  //    * PFADD mm, mm     [3dnow!]
 20445  //    * PFADD m64, mm    [3dnow!]
 20446  //
 20447  func (self *Program) PFADD(v0 interface{}, v1 interface{}) *Instruction {
 20448      p := self.alloc("PFADD", 2, Operands { v0, v1 })
 20449      // PFADD mm, mm
 20450      if isMM(v0) && isMM(v1) {
 20451          self.require(ISA_3DNOW)
 20452          p.domain = DomainAMDSpecific
 20453          p.add(0, func(m *_Encoding, v []interface{}) {
 20454              m.rexo(hcode(v[1]), v[0], false)
 20455              m.emit(0x0f)
 20456              m.emit(0x0f)
 20457              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20458              m.emit(0x9e)
 20459          })
 20460      }
 20461      // PFADD m64, mm
 20462      if isM64(v0) && isMM(v1) {
 20463          self.require(ISA_3DNOW)
 20464          p.domain = DomainAMDSpecific
 20465          p.add(0, func(m *_Encoding, v []interface{}) {
 20466              m.rexo(hcode(v[1]), addr(v[0]), false)
 20467              m.emit(0x0f)
 20468              m.emit(0x0f)
 20469              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20470              m.emit(0x9e)
 20471          })
 20472      }
 20473      if p.len == 0 {
 20474          panic("invalid operands for PFADD")
 20475      }
 20476      return p
 20477  }
 20478  
 20479  // PFCMPEQ performs "Packed Floating-Point Compare for Equal".
 20480  //
 20481  // Mnemonic        : PFCMPEQ
 20482  // Supported forms : (2 forms)
 20483  //
 20484  //    * PFCMPEQ mm, mm     [3dnow!]
 20485  //    * PFCMPEQ m64, mm    [3dnow!]
 20486  //
 20487  func (self *Program) PFCMPEQ(v0 interface{}, v1 interface{}) *Instruction {
 20488      p := self.alloc("PFCMPEQ", 2, Operands { v0, v1 })
 20489      // PFCMPEQ mm, mm
 20490      if isMM(v0) && isMM(v1) {
 20491          self.require(ISA_3DNOW)
 20492          p.domain = DomainAMDSpecific
 20493          p.add(0, func(m *_Encoding, v []interface{}) {
 20494              m.rexo(hcode(v[1]), v[0], false)
 20495              m.emit(0x0f)
 20496              m.emit(0x0f)
 20497              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20498              m.emit(0xb0)
 20499          })
 20500      }
 20501      // PFCMPEQ m64, mm
 20502      if isM64(v0) && isMM(v1) {
 20503          self.require(ISA_3DNOW)
 20504          p.domain = DomainAMDSpecific
 20505          p.add(0, func(m *_Encoding, v []interface{}) {
 20506              m.rexo(hcode(v[1]), addr(v[0]), false)
 20507              m.emit(0x0f)
 20508              m.emit(0x0f)
 20509              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20510              m.emit(0xb0)
 20511          })
 20512      }
 20513      if p.len == 0 {
 20514          panic("invalid operands for PFCMPEQ")
 20515      }
 20516      return p
 20517  }
 20518  
 20519  // PFCMPGE performs "Packed Floating-Point Compare for Greater or Equal".
 20520  //
 20521  // Mnemonic        : PFCMPGE
 20522  // Supported forms : (2 forms)
 20523  //
 20524  //    * PFCMPGE mm, mm     [3dnow!]
 20525  //    * PFCMPGE m64, mm    [3dnow!]
 20526  //
 20527  func (self *Program) PFCMPGE(v0 interface{}, v1 interface{}) *Instruction {
 20528      p := self.alloc("PFCMPGE", 2, Operands { v0, v1 })
 20529      // PFCMPGE mm, mm
 20530      if isMM(v0) && isMM(v1) {
 20531          self.require(ISA_3DNOW)
 20532          p.domain = DomainAMDSpecific
 20533          p.add(0, func(m *_Encoding, v []interface{}) {
 20534              m.rexo(hcode(v[1]), v[0], false)
 20535              m.emit(0x0f)
 20536              m.emit(0x0f)
 20537              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20538              m.emit(0x90)
 20539          })
 20540      }
 20541      // PFCMPGE m64, mm
 20542      if isM64(v0) && isMM(v1) {
 20543          self.require(ISA_3DNOW)
 20544          p.domain = DomainAMDSpecific
 20545          p.add(0, func(m *_Encoding, v []interface{}) {
 20546              m.rexo(hcode(v[1]), addr(v[0]), false)
 20547              m.emit(0x0f)
 20548              m.emit(0x0f)
 20549              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20550              m.emit(0x90)
 20551          })
 20552      }
 20553      if p.len == 0 {
 20554          panic("invalid operands for PFCMPGE")
 20555      }
 20556      return p
 20557  }
 20558  
 20559  // PFCMPGT performs "Packed Floating-Point Compare for Greater Than".
 20560  //
 20561  // Mnemonic        : PFCMPGT
 20562  // Supported forms : (2 forms)
 20563  //
 20564  //    * PFCMPGT mm, mm     [3dnow!]
 20565  //    * PFCMPGT m64, mm    [3dnow!]
 20566  //
 20567  func (self *Program) PFCMPGT(v0 interface{}, v1 interface{}) *Instruction {
 20568      p := self.alloc("PFCMPGT", 2, Operands { v0, v1 })
 20569      // PFCMPGT mm, mm
 20570      if isMM(v0) && isMM(v1) {
 20571          self.require(ISA_3DNOW)
 20572          p.domain = DomainAMDSpecific
 20573          p.add(0, func(m *_Encoding, v []interface{}) {
 20574              m.rexo(hcode(v[1]), v[0], false)
 20575              m.emit(0x0f)
 20576              m.emit(0x0f)
 20577              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20578              m.emit(0xa0)
 20579          })
 20580      }
 20581      // PFCMPGT m64, mm
 20582      if isM64(v0) && isMM(v1) {
 20583          self.require(ISA_3DNOW)
 20584          p.domain = DomainAMDSpecific
 20585          p.add(0, func(m *_Encoding, v []interface{}) {
 20586              m.rexo(hcode(v[1]), addr(v[0]), false)
 20587              m.emit(0x0f)
 20588              m.emit(0x0f)
 20589              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20590              m.emit(0xa0)
 20591          })
 20592      }
 20593      if p.len == 0 {
 20594          panic("invalid operands for PFCMPGT")
 20595      }
 20596      return p
 20597  }
 20598  
 20599  // PFMAX performs "Packed Floating-Point Maximum".
 20600  //
 20601  // Mnemonic        : PFMAX
 20602  // Supported forms : (2 forms)
 20603  //
 20604  //    * PFMAX mm, mm     [3dnow!]
 20605  //    * PFMAX m64, mm    [3dnow!]
 20606  //
 20607  func (self *Program) PFMAX(v0 interface{}, v1 interface{}) *Instruction {
 20608      p := self.alloc("PFMAX", 2, Operands { v0, v1 })
 20609      // PFMAX mm, mm
 20610      if isMM(v0) && isMM(v1) {
 20611          self.require(ISA_3DNOW)
 20612          p.domain = DomainAMDSpecific
 20613          p.add(0, func(m *_Encoding, v []interface{}) {
 20614              m.rexo(hcode(v[1]), v[0], false)
 20615              m.emit(0x0f)
 20616              m.emit(0x0f)
 20617              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20618              m.emit(0xa4)
 20619          })
 20620      }
 20621      // PFMAX m64, mm
 20622      if isM64(v0) && isMM(v1) {
 20623          self.require(ISA_3DNOW)
 20624          p.domain = DomainAMDSpecific
 20625          p.add(0, func(m *_Encoding, v []interface{}) {
 20626              m.rexo(hcode(v[1]), addr(v[0]), false)
 20627              m.emit(0x0f)
 20628              m.emit(0x0f)
 20629              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20630              m.emit(0xa4)
 20631          })
 20632      }
 20633      if p.len == 0 {
 20634          panic("invalid operands for PFMAX")
 20635      }
 20636      return p
 20637  }
 20638  
 20639  // PFMIN performs "Packed Floating-Point Minimum".
 20640  //
 20641  // Mnemonic        : PFMIN
 20642  // Supported forms : (2 forms)
 20643  //
 20644  //    * PFMIN mm, mm     [3dnow!]
 20645  //    * PFMIN m64, mm    [3dnow!]
 20646  //
 20647  func (self *Program) PFMIN(v0 interface{}, v1 interface{}) *Instruction {
 20648      p := self.alloc("PFMIN", 2, Operands { v0, v1 })
 20649      // PFMIN mm, mm
 20650      if isMM(v0) && isMM(v1) {
 20651          self.require(ISA_3DNOW)
 20652          p.domain = DomainAMDSpecific
 20653          p.add(0, func(m *_Encoding, v []interface{}) {
 20654              m.rexo(hcode(v[1]), v[0], false)
 20655              m.emit(0x0f)
 20656              m.emit(0x0f)
 20657              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20658              m.emit(0x94)
 20659          })
 20660      }
 20661      // PFMIN m64, mm
 20662      if isM64(v0) && isMM(v1) {
 20663          self.require(ISA_3DNOW)
 20664          p.domain = DomainAMDSpecific
 20665          p.add(0, func(m *_Encoding, v []interface{}) {
 20666              m.rexo(hcode(v[1]), addr(v[0]), false)
 20667              m.emit(0x0f)
 20668              m.emit(0x0f)
 20669              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20670              m.emit(0x94)
 20671          })
 20672      }
 20673      if p.len == 0 {
 20674          panic("invalid operands for PFMIN")
 20675      }
 20676      return p
 20677  }
 20678  
 20679  // PFMUL performs "Packed Floating-Point Multiply".
 20680  //
 20681  // Mnemonic        : PFMUL
 20682  // Supported forms : (2 forms)
 20683  //
 20684  //    * PFMUL mm, mm     [3dnow!]
 20685  //    * PFMUL m64, mm    [3dnow!]
 20686  //
 20687  func (self *Program) PFMUL(v0 interface{}, v1 interface{}) *Instruction {
 20688      p := self.alloc("PFMUL", 2, Operands { v0, v1 })
 20689      // PFMUL mm, mm
 20690      if isMM(v0) && isMM(v1) {
 20691          self.require(ISA_3DNOW)
 20692          p.domain = DomainAMDSpecific
 20693          p.add(0, func(m *_Encoding, v []interface{}) {
 20694              m.rexo(hcode(v[1]), v[0], false)
 20695              m.emit(0x0f)
 20696              m.emit(0x0f)
 20697              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20698              m.emit(0xb4)
 20699          })
 20700      }
 20701      // PFMUL m64, mm
 20702      if isM64(v0) && isMM(v1) {
 20703          self.require(ISA_3DNOW)
 20704          p.domain = DomainAMDSpecific
 20705          p.add(0, func(m *_Encoding, v []interface{}) {
 20706              m.rexo(hcode(v[1]), addr(v[0]), false)
 20707              m.emit(0x0f)
 20708              m.emit(0x0f)
 20709              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20710              m.emit(0xb4)
 20711          })
 20712      }
 20713      if p.len == 0 {
 20714          panic("invalid operands for PFMUL")
 20715      }
 20716      return p
 20717  }
 20718  
 20719  // PFNACC performs "Packed Floating-Point Negative Accumulate".
 20720  //
 20721  // Mnemonic        : PFNACC
 20722  // Supported forms : (2 forms)
 20723  //
 20724  //    * PFNACC mm, mm     [3dnow!+]
 20725  //    * PFNACC m64, mm    [3dnow!+]
 20726  //
 20727  func (self *Program) PFNACC(v0 interface{}, v1 interface{}) *Instruction {
 20728      p := self.alloc("PFNACC", 2, Operands { v0, v1 })
 20729      // PFNACC mm, mm
 20730      if isMM(v0) && isMM(v1) {
 20731          self.require(ISA_3DNOW_PLUS)
 20732          p.domain = DomainAMDSpecific
 20733          p.add(0, func(m *_Encoding, v []interface{}) {
 20734              m.rexo(hcode(v[1]), v[0], false)
 20735              m.emit(0x0f)
 20736              m.emit(0x0f)
 20737              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20738              m.emit(0x8a)
 20739          })
 20740      }
 20741      // PFNACC m64, mm
 20742      if isM64(v0) && isMM(v1) {
 20743          self.require(ISA_3DNOW_PLUS)
 20744          p.domain = DomainAMDSpecific
 20745          p.add(0, func(m *_Encoding, v []interface{}) {
 20746              m.rexo(hcode(v[1]), addr(v[0]), false)
 20747              m.emit(0x0f)
 20748              m.emit(0x0f)
 20749              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20750              m.emit(0x8a)
 20751          })
 20752      }
 20753      if p.len == 0 {
 20754          panic("invalid operands for PFNACC")
 20755      }
 20756      return p
 20757  }
 20758  
 20759  // PFPNACC performs "Packed Floating-Point Positive-Negative Accumulate".
 20760  //
 20761  // Mnemonic        : PFPNACC
 20762  // Supported forms : (2 forms)
 20763  //
 20764  //    * PFPNACC mm, mm     [3dnow!+]
 20765  //    * PFPNACC m64, mm    [3dnow!+]
 20766  //
 20767  func (self *Program) PFPNACC(v0 interface{}, v1 interface{}) *Instruction {
 20768      p := self.alloc("PFPNACC", 2, Operands { v0, v1 })
 20769      // PFPNACC mm, mm
 20770      if isMM(v0) && isMM(v1) {
 20771          self.require(ISA_3DNOW_PLUS)
 20772          p.domain = DomainAMDSpecific
 20773          p.add(0, func(m *_Encoding, v []interface{}) {
 20774              m.rexo(hcode(v[1]), v[0], false)
 20775              m.emit(0x0f)
 20776              m.emit(0x0f)
 20777              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20778              m.emit(0x8e)
 20779          })
 20780      }
 20781      // PFPNACC m64, mm
 20782      if isM64(v0) && isMM(v1) {
 20783          self.require(ISA_3DNOW_PLUS)
 20784          p.domain = DomainAMDSpecific
 20785          p.add(0, func(m *_Encoding, v []interface{}) {
 20786              m.rexo(hcode(v[1]), addr(v[0]), false)
 20787              m.emit(0x0f)
 20788              m.emit(0x0f)
 20789              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20790              m.emit(0x8e)
 20791          })
 20792      }
 20793      if p.len == 0 {
 20794          panic("invalid operands for PFPNACC")
 20795      }
 20796      return p
 20797  }
 20798  
 20799  // PFRCP performs "Packed Floating-Point Reciprocal Approximation".
 20800  //
 20801  // Mnemonic        : PFRCP
 20802  // Supported forms : (2 forms)
 20803  //
 20804  //    * PFRCP mm, mm     [3dnow!]
 20805  //    * PFRCP m64, mm    [3dnow!]
 20806  //
 20807  func (self *Program) PFRCP(v0 interface{}, v1 interface{}) *Instruction {
 20808      p := self.alloc("PFRCP", 2, Operands { v0, v1 })
 20809      // PFRCP mm, mm
 20810      if isMM(v0) && isMM(v1) {
 20811          self.require(ISA_3DNOW)
 20812          p.domain = DomainAMDSpecific
 20813          p.add(0, func(m *_Encoding, v []interface{}) {
 20814              m.rexo(hcode(v[1]), v[0], false)
 20815              m.emit(0x0f)
 20816              m.emit(0x0f)
 20817              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20818              m.emit(0x96)
 20819          })
 20820      }
 20821      // PFRCP m64, mm
 20822      if isM64(v0) && isMM(v1) {
 20823          self.require(ISA_3DNOW)
 20824          p.domain = DomainAMDSpecific
 20825          p.add(0, func(m *_Encoding, v []interface{}) {
 20826              m.rexo(hcode(v[1]), addr(v[0]), false)
 20827              m.emit(0x0f)
 20828              m.emit(0x0f)
 20829              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20830              m.emit(0x96)
 20831          })
 20832      }
 20833      if p.len == 0 {
 20834          panic("invalid operands for PFRCP")
 20835      }
 20836      return p
 20837  }
 20838  
 20839  // PFRCPIT1 performs "Packed Floating-Point Reciprocal Iteration 1".
 20840  //
 20841  // Mnemonic        : PFRCPIT1
 20842  // Supported forms : (2 forms)
 20843  //
 20844  //    * PFRCPIT1 mm, mm     [3dnow!]
 20845  //    * PFRCPIT1 m64, mm    [3dnow!]
 20846  //
 20847  func (self *Program) PFRCPIT1(v0 interface{}, v1 interface{}) *Instruction {
 20848      p := self.alloc("PFRCPIT1", 2, Operands { v0, v1 })
 20849      // PFRCPIT1 mm, mm
 20850      if isMM(v0) && isMM(v1) {
 20851          self.require(ISA_3DNOW)
 20852          p.domain = DomainAMDSpecific
 20853          p.add(0, func(m *_Encoding, v []interface{}) {
 20854              m.rexo(hcode(v[1]), v[0], false)
 20855              m.emit(0x0f)
 20856              m.emit(0x0f)
 20857              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20858              m.emit(0xa6)
 20859          })
 20860      }
 20861      // PFRCPIT1 m64, mm
 20862      if isM64(v0) && isMM(v1) {
 20863          self.require(ISA_3DNOW)
 20864          p.domain = DomainAMDSpecific
 20865          p.add(0, func(m *_Encoding, v []interface{}) {
 20866              m.rexo(hcode(v[1]), addr(v[0]), false)
 20867              m.emit(0x0f)
 20868              m.emit(0x0f)
 20869              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20870              m.emit(0xa6)
 20871          })
 20872      }
 20873      if p.len == 0 {
 20874          panic("invalid operands for PFRCPIT1")
 20875      }
 20876      return p
 20877  }
 20878  
 20879  // PFRCPIT2 performs "Packed Floating-Point Reciprocal Iteration 2".
 20880  //
 20881  // Mnemonic        : PFRCPIT2
 20882  // Supported forms : (2 forms)
 20883  //
 20884  //    * PFRCPIT2 mm, mm     [3dnow!]
 20885  //    * PFRCPIT2 m64, mm    [3dnow!]
 20886  //
 20887  func (self *Program) PFRCPIT2(v0 interface{}, v1 interface{}) *Instruction {
 20888      p := self.alloc("PFRCPIT2", 2, Operands { v0, v1 })
 20889      // PFRCPIT2 mm, mm
 20890      if isMM(v0) && isMM(v1) {
 20891          self.require(ISA_3DNOW)
 20892          p.domain = DomainAMDSpecific
 20893          p.add(0, func(m *_Encoding, v []interface{}) {
 20894              m.rexo(hcode(v[1]), v[0], false)
 20895              m.emit(0x0f)
 20896              m.emit(0x0f)
 20897              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20898              m.emit(0xb6)
 20899          })
 20900      }
 20901      // PFRCPIT2 m64, mm
 20902      if isM64(v0) && isMM(v1) {
 20903          self.require(ISA_3DNOW)
 20904          p.domain = DomainAMDSpecific
 20905          p.add(0, func(m *_Encoding, v []interface{}) {
 20906              m.rexo(hcode(v[1]), addr(v[0]), false)
 20907              m.emit(0x0f)
 20908              m.emit(0x0f)
 20909              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20910              m.emit(0xb6)
 20911          })
 20912      }
 20913      if p.len == 0 {
 20914          panic("invalid operands for PFRCPIT2")
 20915      }
 20916      return p
 20917  }
 20918  
 20919  // PFRSQIT1 performs "Packed Floating-Point Reciprocal Square Root Iteration 1".
 20920  //
 20921  // Mnemonic        : PFRSQIT1
 20922  // Supported forms : (2 forms)
 20923  //
 20924  //    * PFRSQIT1 mm, mm     [3dnow!]
 20925  //    * PFRSQIT1 m64, mm    [3dnow!]
 20926  //
 20927  func (self *Program) PFRSQIT1(v0 interface{}, v1 interface{}) *Instruction {
 20928      p := self.alloc("PFRSQIT1", 2, Operands { v0, v1 })
 20929      // PFRSQIT1 mm, mm
 20930      if isMM(v0) && isMM(v1) {
 20931          self.require(ISA_3DNOW)
 20932          p.domain = DomainAMDSpecific
 20933          p.add(0, func(m *_Encoding, v []interface{}) {
 20934              m.rexo(hcode(v[1]), v[0], false)
 20935              m.emit(0x0f)
 20936              m.emit(0x0f)
 20937              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20938              m.emit(0xa7)
 20939          })
 20940      }
 20941      // PFRSQIT1 m64, mm
 20942      if isM64(v0) && isMM(v1) {
 20943          self.require(ISA_3DNOW)
 20944          p.domain = DomainAMDSpecific
 20945          p.add(0, func(m *_Encoding, v []interface{}) {
 20946              m.rexo(hcode(v[1]), addr(v[0]), false)
 20947              m.emit(0x0f)
 20948              m.emit(0x0f)
 20949              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20950              m.emit(0xa7)
 20951          })
 20952      }
 20953      if p.len == 0 {
 20954          panic("invalid operands for PFRSQIT1")
 20955      }
 20956      return p
 20957  }
 20958  
 20959  // PFRSQRT performs "Packed Floating-Point Reciprocal Square Root Approximation".
 20960  //
 20961  // Mnemonic        : PFRSQRT
 20962  // Supported forms : (2 forms)
 20963  //
 20964  //    * PFRSQRT mm, mm     [3dnow!]
 20965  //    * PFRSQRT m64, mm    [3dnow!]
 20966  //
 20967  func (self *Program) PFRSQRT(v0 interface{}, v1 interface{}) *Instruction {
 20968      p := self.alloc("PFRSQRT", 2, Operands { v0, v1 })
 20969      // PFRSQRT mm, mm
 20970      if isMM(v0) && isMM(v1) {
 20971          self.require(ISA_3DNOW)
 20972          p.domain = DomainAMDSpecific
 20973          p.add(0, func(m *_Encoding, v []interface{}) {
 20974              m.rexo(hcode(v[1]), v[0], false)
 20975              m.emit(0x0f)
 20976              m.emit(0x0f)
 20977              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20978              m.emit(0x97)
 20979          })
 20980      }
 20981      // PFRSQRT m64, mm
 20982      if isM64(v0) && isMM(v1) {
 20983          self.require(ISA_3DNOW)
 20984          p.domain = DomainAMDSpecific
 20985          p.add(0, func(m *_Encoding, v []interface{}) {
 20986              m.rexo(hcode(v[1]), addr(v[0]), false)
 20987              m.emit(0x0f)
 20988              m.emit(0x0f)
 20989              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20990              m.emit(0x97)
 20991          })
 20992      }
 20993      if p.len == 0 {
 20994          panic("invalid operands for PFRSQRT")
 20995      }
 20996      return p
 20997  }
 20998  
 20999  // PFSUB performs "Packed Floating-Point Subtract".
 21000  //
 21001  // Mnemonic        : PFSUB
 21002  // Supported forms : (2 forms)
 21003  //
 21004  //    * PFSUB mm, mm     [3dnow!]
 21005  //    * PFSUB m64, mm    [3dnow!]
 21006  //
 21007  func (self *Program) PFSUB(v0 interface{}, v1 interface{}) *Instruction {
 21008      p := self.alloc("PFSUB", 2, Operands { v0, v1 })
 21009      // PFSUB mm, mm
 21010      if isMM(v0) && isMM(v1) {
 21011          self.require(ISA_3DNOW)
 21012          p.domain = DomainAMDSpecific
 21013          p.add(0, func(m *_Encoding, v []interface{}) {
 21014              m.rexo(hcode(v[1]), v[0], false)
 21015              m.emit(0x0f)
 21016              m.emit(0x0f)
 21017              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21018              m.emit(0x9a)
 21019          })
 21020      }
 21021      // PFSUB m64, mm
 21022      if isM64(v0) && isMM(v1) {
 21023          self.require(ISA_3DNOW)
 21024          p.domain = DomainAMDSpecific
 21025          p.add(0, func(m *_Encoding, v []interface{}) {
 21026              m.rexo(hcode(v[1]), addr(v[0]), false)
 21027              m.emit(0x0f)
 21028              m.emit(0x0f)
 21029              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21030              m.emit(0x9a)
 21031          })
 21032      }
 21033      if p.len == 0 {
 21034          panic("invalid operands for PFSUB")
 21035      }
 21036      return p
 21037  }
 21038  
 21039  // PFSUBR performs "Packed Floating-Point Subtract Reverse".
 21040  //
 21041  // Mnemonic        : PFSUBR
 21042  // Supported forms : (2 forms)
 21043  //
 21044  //    * PFSUBR mm, mm     [3dnow!]
 21045  //    * PFSUBR m64, mm    [3dnow!]
 21046  //
 21047  func (self *Program) PFSUBR(v0 interface{}, v1 interface{}) *Instruction {
 21048      p := self.alloc("PFSUBR", 2, Operands { v0, v1 })
 21049      // PFSUBR mm, mm
 21050      if isMM(v0) && isMM(v1) {
 21051          self.require(ISA_3DNOW)
 21052          p.domain = DomainAMDSpecific
 21053          p.add(0, func(m *_Encoding, v []interface{}) {
 21054              m.rexo(hcode(v[1]), v[0], false)
 21055              m.emit(0x0f)
 21056              m.emit(0x0f)
 21057              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21058              m.emit(0xaa)
 21059          })
 21060      }
 21061      // PFSUBR m64, mm
 21062      if isM64(v0) && isMM(v1) {
 21063          self.require(ISA_3DNOW)
 21064          p.domain = DomainAMDSpecific
 21065          p.add(0, func(m *_Encoding, v []interface{}) {
 21066              m.rexo(hcode(v[1]), addr(v[0]), false)
 21067              m.emit(0x0f)
 21068              m.emit(0x0f)
 21069              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21070              m.emit(0xaa)
 21071          })
 21072      }
 21073      if p.len == 0 {
 21074          panic("invalid operands for PFSUBR")
 21075      }
 21076      return p
 21077  }
 21078  
 21079  // PHADDD performs "Packed Horizontal Add Doubleword Integer".
 21080  //
 21081  // Mnemonic        : PHADDD
 21082  // Supported forms : (4 forms)
 21083  //
 21084  //    * PHADDD mm, mm       [SSSE3]
 21085  //    * PHADDD m64, mm      [SSSE3]
 21086  //    * PHADDD xmm, xmm     [SSSE3]
 21087  //    * PHADDD m128, xmm    [SSSE3]
 21088  //
 21089  func (self *Program) PHADDD(v0 interface{}, v1 interface{}) *Instruction {
 21090      p := self.alloc("PHADDD", 2, Operands { v0, v1 })
 21091      // PHADDD mm, mm
 21092      if isMM(v0) && isMM(v1) {
 21093          self.require(ISA_SSSE3)
 21094          p.domain = DomainMMXSSE
 21095          p.add(0, func(m *_Encoding, v []interface{}) {
 21096              m.rexo(hcode(v[1]), v[0], false)
 21097              m.emit(0x0f)
 21098              m.emit(0x38)
 21099              m.emit(0x02)
 21100              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21101          })
 21102      }
 21103      // PHADDD m64, mm
 21104      if isM64(v0) && isMM(v1) {
 21105          self.require(ISA_SSSE3)
 21106          p.domain = DomainMMXSSE
 21107          p.add(0, func(m *_Encoding, v []interface{}) {
 21108              m.rexo(hcode(v[1]), addr(v[0]), false)
 21109              m.emit(0x0f)
 21110              m.emit(0x38)
 21111              m.emit(0x02)
 21112              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21113          })
 21114      }
 21115      // PHADDD xmm, xmm
 21116      if isXMM(v0) && isXMM(v1) {
 21117          self.require(ISA_SSSE3)
 21118          p.domain = DomainMMXSSE
 21119          p.add(0, func(m *_Encoding, v []interface{}) {
 21120              m.emit(0x66)
 21121              m.rexo(hcode(v[1]), v[0], false)
 21122              m.emit(0x0f)
 21123              m.emit(0x38)
 21124              m.emit(0x02)
 21125              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21126          })
 21127      }
 21128      // PHADDD m128, xmm
 21129      if isM128(v0) && isXMM(v1) {
 21130          self.require(ISA_SSSE3)
 21131          p.domain = DomainMMXSSE
 21132          p.add(0, func(m *_Encoding, v []interface{}) {
 21133              m.emit(0x66)
 21134              m.rexo(hcode(v[1]), addr(v[0]), false)
 21135              m.emit(0x0f)
 21136              m.emit(0x38)
 21137              m.emit(0x02)
 21138              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21139          })
 21140      }
 21141      if p.len == 0 {
 21142          panic("invalid operands for PHADDD")
 21143      }
 21144      return p
 21145  }
 21146  
 21147  // PHADDSW performs "Packed Horizontal Add Signed Word Integers with Signed Saturation".
 21148  //
 21149  // Mnemonic        : PHADDSW
 21150  // Supported forms : (4 forms)
 21151  //
 21152  //    * PHADDSW mm, mm       [SSSE3]
 21153  //    * PHADDSW m64, mm      [SSSE3]
 21154  //    * PHADDSW xmm, xmm     [SSSE3]
 21155  //    * PHADDSW m128, xmm    [SSSE3]
 21156  //
 21157  func (self *Program) PHADDSW(v0 interface{}, v1 interface{}) *Instruction {
 21158      p := self.alloc("PHADDSW", 2, Operands { v0, v1 })
 21159      // PHADDSW mm, mm
 21160      if isMM(v0) && isMM(v1) {
 21161          self.require(ISA_SSSE3)
 21162          p.domain = DomainMMXSSE
 21163          p.add(0, func(m *_Encoding, v []interface{}) {
 21164              m.rexo(hcode(v[1]), v[0], false)
 21165              m.emit(0x0f)
 21166              m.emit(0x38)
 21167              m.emit(0x03)
 21168              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21169          })
 21170      }
 21171      // PHADDSW m64, mm
 21172      if isM64(v0) && isMM(v1) {
 21173          self.require(ISA_SSSE3)
 21174          p.domain = DomainMMXSSE
 21175          p.add(0, func(m *_Encoding, v []interface{}) {
 21176              m.rexo(hcode(v[1]), addr(v[0]), false)
 21177              m.emit(0x0f)
 21178              m.emit(0x38)
 21179              m.emit(0x03)
 21180              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21181          })
 21182      }
 21183      // PHADDSW xmm, xmm
 21184      if isXMM(v0) && isXMM(v1) {
 21185          self.require(ISA_SSSE3)
 21186          p.domain = DomainMMXSSE
 21187          p.add(0, func(m *_Encoding, v []interface{}) {
 21188              m.emit(0x66)
 21189              m.rexo(hcode(v[1]), v[0], false)
 21190              m.emit(0x0f)
 21191              m.emit(0x38)
 21192              m.emit(0x03)
 21193              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21194          })
 21195      }
 21196      // PHADDSW m128, xmm
 21197      if isM128(v0) && isXMM(v1) {
 21198          self.require(ISA_SSSE3)
 21199          p.domain = DomainMMXSSE
 21200          p.add(0, func(m *_Encoding, v []interface{}) {
 21201              m.emit(0x66)
 21202              m.rexo(hcode(v[1]), addr(v[0]), false)
 21203              m.emit(0x0f)
 21204              m.emit(0x38)
 21205              m.emit(0x03)
 21206              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21207          })
 21208      }
 21209      if p.len == 0 {
 21210          panic("invalid operands for PHADDSW")
 21211      }
 21212      return p
 21213  }
 21214  
 21215  // PHADDW performs "Packed Horizontal Add Word Integers".
 21216  //
 21217  // Mnemonic        : PHADDW
 21218  // Supported forms : (4 forms)
 21219  //
 21220  //    * PHADDW mm, mm       [SSSE3]
 21221  //    * PHADDW m64, mm      [SSSE3]
 21222  //    * PHADDW xmm, xmm     [SSSE3]
 21223  //    * PHADDW m128, xmm    [SSSE3]
 21224  //
 21225  func (self *Program) PHADDW(v0 interface{}, v1 interface{}) *Instruction {
 21226      p := self.alloc("PHADDW", 2, Operands { v0, v1 })
 21227      // PHADDW mm, mm
 21228      if isMM(v0) && isMM(v1) {
 21229          self.require(ISA_SSSE3)
 21230          p.domain = DomainMMXSSE
 21231          p.add(0, func(m *_Encoding, v []interface{}) {
 21232              m.rexo(hcode(v[1]), v[0], false)
 21233              m.emit(0x0f)
 21234              m.emit(0x38)
 21235              m.emit(0x01)
 21236              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21237          })
 21238      }
 21239      // PHADDW m64, mm
 21240      if isM64(v0) && isMM(v1) {
 21241          self.require(ISA_SSSE3)
 21242          p.domain = DomainMMXSSE
 21243          p.add(0, func(m *_Encoding, v []interface{}) {
 21244              m.rexo(hcode(v[1]), addr(v[0]), false)
 21245              m.emit(0x0f)
 21246              m.emit(0x38)
 21247              m.emit(0x01)
 21248              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21249          })
 21250      }
 21251      // PHADDW xmm, xmm
 21252      if isXMM(v0) && isXMM(v1) {
 21253          self.require(ISA_SSSE3)
 21254          p.domain = DomainMMXSSE
 21255          p.add(0, func(m *_Encoding, v []interface{}) {
 21256              m.emit(0x66)
 21257              m.rexo(hcode(v[1]), v[0], false)
 21258              m.emit(0x0f)
 21259              m.emit(0x38)
 21260              m.emit(0x01)
 21261              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21262          })
 21263      }
 21264      // PHADDW m128, xmm
 21265      if isM128(v0) && isXMM(v1) {
 21266          self.require(ISA_SSSE3)
 21267          p.domain = DomainMMXSSE
 21268          p.add(0, func(m *_Encoding, v []interface{}) {
 21269              m.emit(0x66)
 21270              m.rexo(hcode(v[1]), addr(v[0]), false)
 21271              m.emit(0x0f)
 21272              m.emit(0x38)
 21273              m.emit(0x01)
 21274              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21275          })
 21276      }
 21277      if p.len == 0 {
 21278          panic("invalid operands for PHADDW")
 21279      }
 21280      return p
 21281  }
 21282  
 21283  // PHMINPOSUW performs "Packed Horizontal Minimum of Unsigned Word Integers".
 21284  //
 21285  // Mnemonic        : PHMINPOSUW
 21286  // Supported forms : (2 forms)
 21287  //
 21288  //    * PHMINPOSUW xmm, xmm     [SSE4.1]
 21289  //    * PHMINPOSUW m128, xmm    [SSE4.1]
 21290  //
 21291  func (self *Program) PHMINPOSUW(v0 interface{}, v1 interface{}) *Instruction {
 21292      p := self.alloc("PHMINPOSUW", 2, Operands { v0, v1 })
 21293      // PHMINPOSUW xmm, xmm
 21294      if isXMM(v0) && isXMM(v1) {
 21295          self.require(ISA_SSE4_1)
 21296          p.domain = DomainMMXSSE
 21297          p.add(0, func(m *_Encoding, v []interface{}) {
 21298              m.emit(0x66)
 21299              m.rexo(hcode(v[1]), v[0], false)
 21300              m.emit(0x0f)
 21301              m.emit(0x38)
 21302              m.emit(0x41)
 21303              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21304          })
 21305      }
 21306      // PHMINPOSUW m128, xmm
 21307      if isM128(v0) && isXMM(v1) {
 21308          self.require(ISA_SSE4_1)
 21309          p.domain = DomainMMXSSE
 21310          p.add(0, func(m *_Encoding, v []interface{}) {
 21311              m.emit(0x66)
 21312              m.rexo(hcode(v[1]), addr(v[0]), false)
 21313              m.emit(0x0f)
 21314              m.emit(0x38)
 21315              m.emit(0x41)
 21316              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21317          })
 21318      }
 21319      if p.len == 0 {
 21320          panic("invalid operands for PHMINPOSUW")
 21321      }
 21322      return p
 21323  }
 21324  
 21325  // PHSUBD performs "Packed Horizontal Subtract Doubleword Integers".
 21326  //
 21327  // Mnemonic        : PHSUBD
 21328  // Supported forms : (4 forms)
 21329  //
 21330  //    * PHSUBD mm, mm       [SSSE3]
 21331  //    * PHSUBD m64, mm      [SSSE3]
 21332  //    * PHSUBD xmm, xmm     [SSSE3]
 21333  //    * PHSUBD m128, xmm    [SSSE3]
 21334  //
 21335  func (self *Program) PHSUBD(v0 interface{}, v1 interface{}) *Instruction {
 21336      p := self.alloc("PHSUBD", 2, Operands { v0, v1 })
 21337      // PHSUBD mm, mm
 21338      if isMM(v0) && isMM(v1) {
 21339          self.require(ISA_SSSE3)
 21340          p.domain = DomainMMXSSE
 21341          p.add(0, func(m *_Encoding, v []interface{}) {
 21342              m.rexo(hcode(v[1]), v[0], false)
 21343              m.emit(0x0f)
 21344              m.emit(0x38)
 21345              m.emit(0x06)
 21346              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21347          })
 21348      }
 21349      // PHSUBD m64, mm
 21350      if isM64(v0) && isMM(v1) {
 21351          self.require(ISA_SSSE3)
 21352          p.domain = DomainMMXSSE
 21353          p.add(0, func(m *_Encoding, v []interface{}) {
 21354              m.rexo(hcode(v[1]), addr(v[0]), false)
 21355              m.emit(0x0f)
 21356              m.emit(0x38)
 21357              m.emit(0x06)
 21358              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21359          })
 21360      }
 21361      // PHSUBD xmm, xmm
 21362      if isXMM(v0) && isXMM(v1) {
 21363          self.require(ISA_SSSE3)
 21364          p.domain = DomainMMXSSE
 21365          p.add(0, func(m *_Encoding, v []interface{}) {
 21366              m.emit(0x66)
 21367              m.rexo(hcode(v[1]), v[0], false)
 21368              m.emit(0x0f)
 21369              m.emit(0x38)
 21370              m.emit(0x06)
 21371              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21372          })
 21373      }
 21374      // PHSUBD m128, xmm
 21375      if isM128(v0) && isXMM(v1) {
 21376          self.require(ISA_SSSE3)
 21377          p.domain = DomainMMXSSE
 21378          p.add(0, func(m *_Encoding, v []interface{}) {
 21379              m.emit(0x66)
 21380              m.rexo(hcode(v[1]), addr(v[0]), false)
 21381              m.emit(0x0f)
 21382              m.emit(0x38)
 21383              m.emit(0x06)
 21384              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21385          })
 21386      }
 21387      if p.len == 0 {
 21388          panic("invalid operands for PHSUBD")
 21389      }
 21390      return p
 21391  }
 21392  
 21393  // PHSUBSW performs "Packed Horizontal Subtract Signed Word Integers with Signed Saturation".
 21394  //
 21395  // Mnemonic        : PHSUBSW
 21396  // Supported forms : (4 forms)
 21397  //
 21398  //    * PHSUBSW mm, mm       [SSSE3]
 21399  //    * PHSUBSW m64, mm      [SSSE3]
 21400  //    * PHSUBSW xmm, xmm     [SSSE3]
 21401  //    * PHSUBSW m128, xmm    [SSSE3]
 21402  //
 21403  func (self *Program) PHSUBSW(v0 interface{}, v1 interface{}) *Instruction {
 21404      p := self.alloc("PHSUBSW", 2, Operands { v0, v1 })
 21405      // PHSUBSW mm, mm
 21406      if isMM(v0) && isMM(v1) {
 21407          self.require(ISA_SSSE3)
 21408          p.domain = DomainMMXSSE
 21409          p.add(0, func(m *_Encoding, v []interface{}) {
 21410              m.rexo(hcode(v[1]), v[0], false)
 21411              m.emit(0x0f)
 21412              m.emit(0x38)
 21413              m.emit(0x07)
 21414              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21415          })
 21416      }
 21417      // PHSUBSW m64, mm
 21418      if isM64(v0) && isMM(v1) {
 21419          self.require(ISA_SSSE3)
 21420          p.domain = DomainMMXSSE
 21421          p.add(0, func(m *_Encoding, v []interface{}) {
 21422              m.rexo(hcode(v[1]), addr(v[0]), false)
 21423              m.emit(0x0f)
 21424              m.emit(0x38)
 21425              m.emit(0x07)
 21426              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21427          })
 21428      }
 21429      // PHSUBSW xmm, xmm
 21430      if isXMM(v0) && isXMM(v1) {
 21431          self.require(ISA_SSSE3)
 21432          p.domain = DomainMMXSSE
 21433          p.add(0, func(m *_Encoding, v []interface{}) {
 21434              m.emit(0x66)
 21435              m.rexo(hcode(v[1]), v[0], false)
 21436              m.emit(0x0f)
 21437              m.emit(0x38)
 21438              m.emit(0x07)
 21439              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21440          })
 21441      }
 21442      // PHSUBSW m128, xmm
 21443      if isM128(v0) && isXMM(v1) {
 21444          self.require(ISA_SSSE3)
 21445          p.domain = DomainMMXSSE
 21446          p.add(0, func(m *_Encoding, v []interface{}) {
 21447              m.emit(0x66)
 21448              m.rexo(hcode(v[1]), addr(v[0]), false)
 21449              m.emit(0x0f)
 21450              m.emit(0x38)
 21451              m.emit(0x07)
 21452              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21453          })
 21454      }
 21455      if p.len == 0 {
 21456          panic("invalid operands for PHSUBSW")
 21457      }
 21458      return p
 21459  }
 21460  
 21461  // PHSUBW performs "Packed Horizontal Subtract Word Integers".
 21462  //
 21463  // Mnemonic        : PHSUBW
 21464  // Supported forms : (4 forms)
 21465  //
 21466  //    * PHSUBW mm, mm       [SSSE3]
 21467  //    * PHSUBW m64, mm      [SSSE3]
 21468  //    * PHSUBW xmm, xmm     [SSSE3]
 21469  //    * PHSUBW m128, xmm    [SSSE3]
 21470  //
 21471  func (self *Program) PHSUBW(v0 interface{}, v1 interface{}) *Instruction {
 21472      p := self.alloc("PHSUBW", 2, Operands { v0, v1 })
 21473      // PHSUBW mm, mm
 21474      if isMM(v0) && isMM(v1) {
 21475          self.require(ISA_SSSE3)
 21476          p.domain = DomainMMXSSE
 21477          p.add(0, func(m *_Encoding, v []interface{}) {
 21478              m.rexo(hcode(v[1]), v[0], false)
 21479              m.emit(0x0f)
 21480              m.emit(0x38)
 21481              m.emit(0x05)
 21482              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21483          })
 21484      }
 21485      // PHSUBW m64, mm
 21486      if isM64(v0) && isMM(v1) {
 21487          self.require(ISA_SSSE3)
 21488          p.domain = DomainMMXSSE
 21489          p.add(0, func(m *_Encoding, v []interface{}) {
 21490              m.rexo(hcode(v[1]), addr(v[0]), false)
 21491              m.emit(0x0f)
 21492              m.emit(0x38)
 21493              m.emit(0x05)
 21494              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21495          })
 21496      }
 21497      // PHSUBW xmm, xmm
 21498      if isXMM(v0) && isXMM(v1) {
 21499          self.require(ISA_SSSE3)
 21500          p.domain = DomainMMXSSE
 21501          p.add(0, func(m *_Encoding, v []interface{}) {
 21502              m.emit(0x66)
 21503              m.rexo(hcode(v[1]), v[0], false)
 21504              m.emit(0x0f)
 21505              m.emit(0x38)
 21506              m.emit(0x05)
 21507              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21508          })
 21509      }
 21510      // PHSUBW m128, xmm
 21511      if isM128(v0) && isXMM(v1) {
 21512          self.require(ISA_SSSE3)
 21513          p.domain = DomainMMXSSE
 21514          p.add(0, func(m *_Encoding, v []interface{}) {
 21515              m.emit(0x66)
 21516              m.rexo(hcode(v[1]), addr(v[0]), false)
 21517              m.emit(0x0f)
 21518              m.emit(0x38)
 21519              m.emit(0x05)
 21520              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21521          })
 21522      }
 21523      if p.len == 0 {
 21524          panic("invalid operands for PHSUBW")
 21525      }
 21526      return p
 21527  }
 21528  
 21529  // PI2FD performs "Packed Integer to Floating-Point Doubleword Conversion".
 21530  //
 21531  // Mnemonic        : PI2FD
 21532  // Supported forms : (2 forms)
 21533  //
 21534  //    * PI2FD mm, mm     [3dnow!]
 21535  //    * PI2FD m64, mm    [3dnow!]
 21536  //
 21537  func (self *Program) PI2FD(v0 interface{}, v1 interface{}) *Instruction {
 21538      p := self.alloc("PI2FD", 2, Operands { v0, v1 })
 21539      // PI2FD mm, mm
 21540      if isMM(v0) && isMM(v1) {
 21541          self.require(ISA_3DNOW)
 21542          p.domain = DomainAMDSpecific
 21543          p.add(0, func(m *_Encoding, v []interface{}) {
 21544              m.rexo(hcode(v[1]), v[0], false)
 21545              m.emit(0x0f)
 21546              m.emit(0x0f)
 21547              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21548              m.emit(0x0d)
 21549          })
 21550      }
 21551      // PI2FD m64, mm
 21552      if isM64(v0) && isMM(v1) {
 21553          self.require(ISA_3DNOW)
 21554          p.domain = DomainAMDSpecific
 21555          p.add(0, func(m *_Encoding, v []interface{}) {
 21556              m.rexo(hcode(v[1]), addr(v[0]), false)
 21557              m.emit(0x0f)
 21558              m.emit(0x0f)
 21559              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21560              m.emit(0x0d)
 21561          })
 21562      }
 21563      if p.len == 0 {
 21564          panic("invalid operands for PI2FD")
 21565      }
 21566      return p
 21567  }
 21568  
 21569  // PI2FW performs "Packed Integer to Floating-Point Word Conversion".
 21570  //
 21571  // Mnemonic        : PI2FW
 21572  // Supported forms : (2 forms)
 21573  //
 21574  //    * PI2FW mm, mm     [3dnow!+]
 21575  //    * PI2FW m64, mm    [3dnow!+]
 21576  //
 21577  func (self *Program) PI2FW(v0 interface{}, v1 interface{}) *Instruction {
 21578      p := self.alloc("PI2FW", 2, Operands { v0, v1 })
 21579      // PI2FW mm, mm
 21580      if isMM(v0) && isMM(v1) {
 21581          self.require(ISA_3DNOW_PLUS)
 21582          p.domain = DomainAMDSpecific
 21583          p.add(0, func(m *_Encoding, v []interface{}) {
 21584              m.rexo(hcode(v[1]), v[0], false)
 21585              m.emit(0x0f)
 21586              m.emit(0x0f)
 21587              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21588              m.emit(0x0c)
 21589          })
 21590      }
 21591      // PI2FW m64, mm
 21592      if isM64(v0) && isMM(v1) {
 21593          self.require(ISA_3DNOW_PLUS)
 21594          p.domain = DomainAMDSpecific
 21595          p.add(0, func(m *_Encoding, v []interface{}) {
 21596              m.rexo(hcode(v[1]), addr(v[0]), false)
 21597              m.emit(0x0f)
 21598              m.emit(0x0f)
 21599              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21600              m.emit(0x0c)
 21601          })
 21602      }
 21603      if p.len == 0 {
 21604          panic("invalid operands for PI2FW")
 21605      }
 21606      return p
 21607  }
 21608  
 21609  // PINSRB performs "Insert Byte".
 21610  //
 21611  // Mnemonic        : PINSRB
 21612  // Supported forms : (2 forms)
 21613  //
 21614  //    * PINSRB imm8, r32, xmm    [SSE4.1]
 21615  //    * PINSRB imm8, m8, xmm     [SSE4.1]
 21616  //
 21617  func (self *Program) PINSRB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 21618      p := self.alloc("PINSRB", 3, Operands { v0, v1, v2 })
 21619      // PINSRB imm8, r32, xmm
 21620      if isImm8(v0) && isReg32(v1) && isXMM(v2) {
 21621          self.require(ISA_SSE4_1)
 21622          p.domain = DomainMMXSSE
 21623          p.add(0, func(m *_Encoding, v []interface{}) {
 21624              m.emit(0x66)
 21625              m.rexo(hcode(v[2]), v[1], false)
 21626              m.emit(0x0f)
 21627              m.emit(0x3a)
 21628              m.emit(0x20)
 21629              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 21630              m.imm1(toImmAny(v[0]))
 21631          })
 21632      }
 21633      // PINSRB imm8, m8, xmm
 21634      if isImm8(v0) && isM8(v1) && isXMM(v2) {
 21635          self.require(ISA_SSE4_1)
 21636          p.domain = DomainMMXSSE
 21637          p.add(0, func(m *_Encoding, v []interface{}) {
 21638              m.emit(0x66)
 21639              m.rexo(hcode(v[2]), addr(v[1]), false)
 21640              m.emit(0x0f)
 21641              m.emit(0x3a)
 21642              m.emit(0x20)
 21643              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 21644              m.imm1(toImmAny(v[0]))
 21645          })
 21646      }
 21647      if p.len == 0 {
 21648          panic("invalid operands for PINSRB")
 21649      }
 21650      return p
 21651  }
 21652  
 21653  // PINSRD performs "Insert Doubleword".
 21654  //
 21655  // Mnemonic        : PINSRD
 21656  // Supported forms : (2 forms)
 21657  //
 21658  //    * PINSRD imm8, r32, xmm    [SSE4.1]
 21659  //    * PINSRD imm8, m32, xmm    [SSE4.1]
 21660  //
 21661  func (self *Program) PINSRD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 21662      p := self.alloc("PINSRD", 3, Operands { v0, v1, v2 })
 21663      // PINSRD imm8, r32, xmm
 21664      if isImm8(v0) && isReg32(v1) && isXMM(v2) {
 21665          self.require(ISA_SSE4_1)
 21666          p.domain = DomainMMXSSE
 21667          p.add(0, func(m *_Encoding, v []interface{}) {
 21668              m.emit(0x66)
 21669              m.rexo(hcode(v[2]), v[1], false)
 21670              m.emit(0x0f)
 21671              m.emit(0x3a)
 21672              m.emit(0x22)
 21673              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 21674              m.imm1(toImmAny(v[0]))
 21675          })
 21676      }
 21677      // PINSRD imm8, m32, xmm
 21678      if isImm8(v0) && isM32(v1) && isXMM(v2) {
 21679          self.require(ISA_SSE4_1)
 21680          p.domain = DomainMMXSSE
 21681          p.add(0, func(m *_Encoding, v []interface{}) {
 21682              m.emit(0x66)
 21683              m.rexo(hcode(v[2]), addr(v[1]), false)
 21684              m.emit(0x0f)
 21685              m.emit(0x3a)
 21686              m.emit(0x22)
 21687              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 21688              m.imm1(toImmAny(v[0]))
 21689          })
 21690      }
 21691      if p.len == 0 {
 21692          panic("invalid operands for PINSRD")
 21693      }
 21694      return p
 21695  }
 21696  
 21697  // PINSRQ performs "Insert Quadword".
 21698  //
 21699  // Mnemonic        : PINSRQ
 21700  // Supported forms : (2 forms)
 21701  //
 21702  //    * PINSRQ imm8, r64, xmm    [SSE4.1]
 21703  //    * PINSRQ imm8, m64, xmm    [SSE4.1]
 21704  //
 21705  func (self *Program) PINSRQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 21706      p := self.alloc("PINSRQ", 3, Operands { v0, v1, v2 })
 21707      // PINSRQ imm8, r64, xmm
 21708      if isImm8(v0) && isReg64(v1) && isXMM(v2) {
 21709          self.require(ISA_SSE4_1)
 21710          p.domain = DomainMMXSSE
 21711          p.add(0, func(m *_Encoding, v []interface{}) {
 21712              m.emit(0x66)
 21713              m.emit(0x48 | hcode(v[2]) << 2 | hcode(v[1]))
 21714              m.emit(0x0f)
 21715              m.emit(0x3a)
 21716              m.emit(0x22)
 21717              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 21718              m.imm1(toImmAny(v[0]))
 21719          })
 21720      }
 21721      // PINSRQ imm8, m64, xmm
 21722      if isImm8(v0) && isM64(v1) && isXMM(v2) {
 21723          self.require(ISA_SSE4_1)
 21724          p.domain = DomainMMXSSE
 21725          p.add(0, func(m *_Encoding, v []interface{}) {
 21726              m.emit(0x66)
 21727              m.rexm(1, hcode(v[2]), addr(v[1]))
 21728              m.emit(0x0f)
 21729              m.emit(0x3a)
 21730              m.emit(0x22)
 21731              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 21732              m.imm1(toImmAny(v[0]))
 21733          })
 21734      }
 21735      if p.len == 0 {
 21736          panic("invalid operands for PINSRQ")
 21737      }
 21738      return p
 21739  }
 21740  
 21741  // PINSRW performs "Insert Word".
 21742  //
 21743  // Mnemonic        : PINSRW
 21744  // Supported forms : (4 forms)
 21745  //
 21746  //    * PINSRW imm8, r32, mm     [MMX+]
 21747  //    * PINSRW imm8, m16, mm     [MMX+]
 21748  //    * PINSRW imm8, r32, xmm    [SSE2]
 21749  //    * PINSRW imm8, m16, xmm    [SSE2]
 21750  //
 21751  func (self *Program) PINSRW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 21752      p := self.alloc("PINSRW", 3, Operands { v0, v1, v2 })
 21753      // PINSRW imm8, r32, mm
 21754      if isImm8(v0) && isReg32(v1) && isMM(v2) {
 21755          self.require(ISA_MMX_PLUS)
 21756          p.domain = DomainMMXSSE
 21757          p.add(0, func(m *_Encoding, v []interface{}) {
 21758              m.rexo(hcode(v[2]), v[1], false)
 21759              m.emit(0x0f)
 21760              m.emit(0xc4)
 21761              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 21762              m.imm1(toImmAny(v[0]))
 21763          })
 21764      }
 21765      // PINSRW imm8, m16, mm
 21766      if isImm8(v0) && isM16(v1) && isMM(v2) {
 21767          self.require(ISA_MMX_PLUS)
 21768          p.domain = DomainMMXSSE
 21769          p.add(0, func(m *_Encoding, v []interface{}) {
 21770              m.rexo(hcode(v[2]), addr(v[1]), false)
 21771              m.emit(0x0f)
 21772              m.emit(0xc4)
 21773              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 21774              m.imm1(toImmAny(v[0]))
 21775          })
 21776      }
 21777      // PINSRW imm8, r32, xmm
 21778      if isImm8(v0) && isReg32(v1) && isXMM(v2) {
 21779          self.require(ISA_SSE2)
 21780          p.domain = DomainMMXSSE
 21781          p.add(0, func(m *_Encoding, v []interface{}) {
 21782              m.emit(0x66)
 21783              m.rexo(hcode(v[2]), v[1], false)
 21784              m.emit(0x0f)
 21785              m.emit(0xc4)
 21786              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 21787              m.imm1(toImmAny(v[0]))
 21788          })
 21789      }
 21790      // PINSRW imm8, m16, xmm
 21791      if isImm8(v0) && isM16(v1) && isXMM(v2) {
 21792          self.require(ISA_SSE2)
 21793          p.domain = DomainMMXSSE
 21794          p.add(0, func(m *_Encoding, v []interface{}) {
 21795              m.emit(0x66)
 21796              m.rexo(hcode(v[2]), addr(v[1]), false)
 21797              m.emit(0x0f)
 21798              m.emit(0xc4)
 21799              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 21800              m.imm1(toImmAny(v[0]))
 21801          })
 21802      }
 21803      if p.len == 0 {
 21804          panic("invalid operands for PINSRW")
 21805      }
 21806      return p
 21807  }
 21808  
 21809  // PMADDUBSW performs "Multiply and Add Packed Signed and Unsigned Byte Integers".
 21810  //
 21811  // Mnemonic        : PMADDUBSW
 21812  // Supported forms : (4 forms)
 21813  //
 21814  //    * PMADDUBSW mm, mm       [SSSE3]
 21815  //    * PMADDUBSW m64, mm      [SSSE3]
 21816  //    * PMADDUBSW xmm, xmm     [SSSE3]
 21817  //    * PMADDUBSW m128, xmm    [SSSE3]
 21818  //
 21819  func (self *Program) PMADDUBSW(v0 interface{}, v1 interface{}) *Instruction {
 21820      p := self.alloc("PMADDUBSW", 2, Operands { v0, v1 })
 21821      // PMADDUBSW mm, mm
 21822      if isMM(v0) && isMM(v1) {
 21823          self.require(ISA_SSSE3)
 21824          p.domain = DomainMMXSSE
 21825          p.add(0, func(m *_Encoding, v []interface{}) {
 21826              m.rexo(hcode(v[1]), v[0], false)
 21827              m.emit(0x0f)
 21828              m.emit(0x38)
 21829              m.emit(0x04)
 21830              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21831          })
 21832      }
 21833      // PMADDUBSW m64, mm
 21834      if isM64(v0) && isMM(v1) {
 21835          self.require(ISA_SSSE3)
 21836          p.domain = DomainMMXSSE
 21837          p.add(0, func(m *_Encoding, v []interface{}) {
 21838              m.rexo(hcode(v[1]), addr(v[0]), false)
 21839              m.emit(0x0f)
 21840              m.emit(0x38)
 21841              m.emit(0x04)
 21842              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21843          })
 21844      }
 21845      // PMADDUBSW xmm, xmm
 21846      if isXMM(v0) && isXMM(v1) {
 21847          self.require(ISA_SSSE3)
 21848          p.domain = DomainMMXSSE
 21849          p.add(0, func(m *_Encoding, v []interface{}) {
 21850              m.emit(0x66)
 21851              m.rexo(hcode(v[1]), v[0], false)
 21852              m.emit(0x0f)
 21853              m.emit(0x38)
 21854              m.emit(0x04)
 21855              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21856          })
 21857      }
 21858      // PMADDUBSW m128, xmm
 21859      if isM128(v0) && isXMM(v1) {
 21860          self.require(ISA_SSSE3)
 21861          p.domain = DomainMMXSSE
 21862          p.add(0, func(m *_Encoding, v []interface{}) {
 21863              m.emit(0x66)
 21864              m.rexo(hcode(v[1]), addr(v[0]), false)
 21865              m.emit(0x0f)
 21866              m.emit(0x38)
 21867              m.emit(0x04)
 21868              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21869          })
 21870      }
 21871      if p.len == 0 {
 21872          panic("invalid operands for PMADDUBSW")
 21873      }
 21874      return p
 21875  }
 21876  
 21877  // PMADDWD performs "Multiply and Add Packed Signed Word Integers".
 21878  //
 21879  // Mnemonic        : PMADDWD
 21880  // Supported forms : (4 forms)
 21881  //
 21882  //    * PMADDWD mm, mm       [MMX]
 21883  //    * PMADDWD m64, mm      [MMX]
 21884  //    * PMADDWD xmm, xmm     [SSE2]
 21885  //    * PMADDWD m128, xmm    [SSE2]
 21886  //
 21887  func (self *Program) PMADDWD(v0 interface{}, v1 interface{}) *Instruction {
 21888      p := self.alloc("PMADDWD", 2, Operands { v0, v1 })
 21889      // PMADDWD mm, mm
 21890      if isMM(v0) && isMM(v1) {
 21891          self.require(ISA_MMX)
 21892          p.domain = DomainMMXSSE
 21893          p.add(0, func(m *_Encoding, v []interface{}) {
 21894              m.rexo(hcode(v[1]), v[0], false)
 21895              m.emit(0x0f)
 21896              m.emit(0xf5)
 21897              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21898          })
 21899      }
 21900      // PMADDWD m64, mm
 21901      if isM64(v0) && isMM(v1) {
 21902          self.require(ISA_MMX)
 21903          p.domain = DomainMMXSSE
 21904          p.add(0, func(m *_Encoding, v []interface{}) {
 21905              m.rexo(hcode(v[1]), addr(v[0]), false)
 21906              m.emit(0x0f)
 21907              m.emit(0xf5)
 21908              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21909          })
 21910      }
 21911      // PMADDWD xmm, xmm
 21912      if isXMM(v0) && isXMM(v1) {
 21913          self.require(ISA_SSE2)
 21914          p.domain = DomainMMXSSE
 21915          p.add(0, func(m *_Encoding, v []interface{}) {
 21916              m.emit(0x66)
 21917              m.rexo(hcode(v[1]), v[0], false)
 21918              m.emit(0x0f)
 21919              m.emit(0xf5)
 21920              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21921          })
 21922      }
 21923      // PMADDWD m128, xmm
 21924      if isM128(v0) && isXMM(v1) {
 21925          self.require(ISA_SSE2)
 21926          p.domain = DomainMMXSSE
 21927          p.add(0, func(m *_Encoding, v []interface{}) {
 21928              m.emit(0x66)
 21929              m.rexo(hcode(v[1]), addr(v[0]), false)
 21930              m.emit(0x0f)
 21931              m.emit(0xf5)
 21932              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21933          })
 21934      }
 21935      if p.len == 0 {
 21936          panic("invalid operands for PMADDWD")
 21937      }
 21938      return p
 21939  }
 21940  
 21941  // PMAXSB performs "Maximum of Packed Signed Byte Integers".
 21942  //
 21943  // Mnemonic        : PMAXSB
 21944  // Supported forms : (2 forms)
 21945  //
 21946  //    * PMAXSB xmm, xmm     [SSE4.1]
 21947  //    * PMAXSB m128, xmm    [SSE4.1]
 21948  //
 21949  func (self *Program) PMAXSB(v0 interface{}, v1 interface{}) *Instruction {
 21950      p := self.alloc("PMAXSB", 2, Operands { v0, v1 })
 21951      // PMAXSB xmm, xmm
 21952      if isXMM(v0) && isXMM(v1) {
 21953          self.require(ISA_SSE4_1)
 21954          p.domain = DomainMMXSSE
 21955          p.add(0, func(m *_Encoding, v []interface{}) {
 21956              m.emit(0x66)
 21957              m.rexo(hcode(v[1]), v[0], false)
 21958              m.emit(0x0f)
 21959              m.emit(0x38)
 21960              m.emit(0x3c)
 21961              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21962          })
 21963      }
 21964      // PMAXSB m128, xmm
 21965      if isM128(v0) && isXMM(v1) {
 21966          self.require(ISA_SSE4_1)
 21967          p.domain = DomainMMXSSE
 21968          p.add(0, func(m *_Encoding, v []interface{}) {
 21969              m.emit(0x66)
 21970              m.rexo(hcode(v[1]), addr(v[0]), false)
 21971              m.emit(0x0f)
 21972              m.emit(0x38)
 21973              m.emit(0x3c)
 21974              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21975          })
 21976      }
 21977      if p.len == 0 {
 21978          panic("invalid operands for PMAXSB")
 21979      }
 21980      return p
 21981  }
 21982  
 21983  // PMAXSD performs "Maximum of Packed Signed Doubleword Integers".
 21984  //
 21985  // Mnemonic        : PMAXSD
 21986  // Supported forms : (2 forms)
 21987  //
 21988  //    * PMAXSD xmm, xmm     [SSE4.1]
 21989  //    * PMAXSD m128, xmm    [SSE4.1]
 21990  //
 21991  func (self *Program) PMAXSD(v0 interface{}, v1 interface{}) *Instruction {
 21992      p := self.alloc("PMAXSD", 2, Operands { v0, v1 })
 21993      // PMAXSD xmm, xmm
 21994      if isXMM(v0) && isXMM(v1) {
 21995          self.require(ISA_SSE4_1)
 21996          p.domain = DomainMMXSSE
 21997          p.add(0, func(m *_Encoding, v []interface{}) {
 21998              m.emit(0x66)
 21999              m.rexo(hcode(v[1]), v[0], false)
 22000              m.emit(0x0f)
 22001              m.emit(0x38)
 22002              m.emit(0x3d)
 22003              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22004          })
 22005      }
 22006      // PMAXSD m128, xmm
 22007      if isM128(v0) && isXMM(v1) {
 22008          self.require(ISA_SSE4_1)
 22009          p.domain = DomainMMXSSE
 22010          p.add(0, func(m *_Encoding, v []interface{}) {
 22011              m.emit(0x66)
 22012              m.rexo(hcode(v[1]), addr(v[0]), false)
 22013              m.emit(0x0f)
 22014              m.emit(0x38)
 22015              m.emit(0x3d)
 22016              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22017          })
 22018      }
 22019      if p.len == 0 {
 22020          panic("invalid operands for PMAXSD")
 22021      }
 22022      return p
 22023  }
 22024  
 22025  // PMAXSW performs "Maximum of Packed Signed Word Integers".
 22026  //
 22027  // Mnemonic        : PMAXSW
 22028  // Supported forms : (4 forms)
 22029  //
 22030  //    * PMAXSW mm, mm       [MMX+]
 22031  //    * PMAXSW m64, mm      [MMX+]
 22032  //    * PMAXSW xmm, xmm     [SSE2]
 22033  //    * PMAXSW m128, xmm    [SSE2]
 22034  //
 22035  func (self *Program) PMAXSW(v0 interface{}, v1 interface{}) *Instruction {
 22036      p := self.alloc("PMAXSW", 2, Operands { v0, v1 })
 22037      // PMAXSW mm, mm
 22038      if isMM(v0) && isMM(v1) {
 22039          self.require(ISA_MMX_PLUS)
 22040          p.domain = DomainMMXSSE
 22041          p.add(0, func(m *_Encoding, v []interface{}) {
 22042              m.rexo(hcode(v[1]), v[0], false)
 22043              m.emit(0x0f)
 22044              m.emit(0xee)
 22045              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22046          })
 22047      }
 22048      // PMAXSW m64, mm
 22049      if isM64(v0) && isMM(v1) {
 22050          self.require(ISA_MMX_PLUS)
 22051          p.domain = DomainMMXSSE
 22052          p.add(0, func(m *_Encoding, v []interface{}) {
 22053              m.rexo(hcode(v[1]), addr(v[0]), false)
 22054              m.emit(0x0f)
 22055              m.emit(0xee)
 22056              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22057          })
 22058      }
 22059      // PMAXSW xmm, xmm
 22060      if isXMM(v0) && isXMM(v1) {
 22061          self.require(ISA_SSE2)
 22062          p.domain = DomainMMXSSE
 22063          p.add(0, func(m *_Encoding, v []interface{}) {
 22064              m.emit(0x66)
 22065              m.rexo(hcode(v[1]), v[0], false)
 22066              m.emit(0x0f)
 22067              m.emit(0xee)
 22068              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22069          })
 22070      }
 22071      // PMAXSW m128, xmm
 22072      if isM128(v0) && isXMM(v1) {
 22073          self.require(ISA_SSE2)
 22074          p.domain = DomainMMXSSE
 22075          p.add(0, func(m *_Encoding, v []interface{}) {
 22076              m.emit(0x66)
 22077              m.rexo(hcode(v[1]), addr(v[0]), false)
 22078              m.emit(0x0f)
 22079              m.emit(0xee)
 22080              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22081          })
 22082      }
 22083      if p.len == 0 {
 22084          panic("invalid operands for PMAXSW")
 22085      }
 22086      return p
 22087  }
 22088  
 22089  // PMAXUB performs "Maximum of Packed Unsigned Byte Integers".
 22090  //
 22091  // Mnemonic        : PMAXUB
 22092  // Supported forms : (4 forms)
 22093  //
 22094  //    * PMAXUB mm, mm       [MMX+]
 22095  //    * PMAXUB m64, mm      [MMX+]
 22096  //    * PMAXUB xmm, xmm     [SSE2]
 22097  //    * PMAXUB m128, xmm    [SSE2]
 22098  //
 22099  func (self *Program) PMAXUB(v0 interface{}, v1 interface{}) *Instruction {
 22100      p := self.alloc("PMAXUB", 2, Operands { v0, v1 })
 22101      // PMAXUB mm, mm
 22102      if isMM(v0) && isMM(v1) {
 22103          self.require(ISA_MMX_PLUS)
 22104          p.domain = DomainMMXSSE
 22105          p.add(0, func(m *_Encoding, v []interface{}) {
 22106              m.rexo(hcode(v[1]), v[0], false)
 22107              m.emit(0x0f)
 22108              m.emit(0xde)
 22109              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22110          })
 22111      }
 22112      // PMAXUB m64, mm
 22113      if isM64(v0) && isMM(v1) {
 22114          self.require(ISA_MMX_PLUS)
 22115          p.domain = DomainMMXSSE
 22116          p.add(0, func(m *_Encoding, v []interface{}) {
 22117              m.rexo(hcode(v[1]), addr(v[0]), false)
 22118              m.emit(0x0f)
 22119              m.emit(0xde)
 22120              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22121          })
 22122      }
 22123      // PMAXUB xmm, xmm
 22124      if isXMM(v0) && isXMM(v1) {
 22125          self.require(ISA_SSE2)
 22126          p.domain = DomainMMXSSE
 22127          p.add(0, func(m *_Encoding, v []interface{}) {
 22128              m.emit(0x66)
 22129              m.rexo(hcode(v[1]), v[0], false)
 22130              m.emit(0x0f)
 22131              m.emit(0xde)
 22132              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22133          })
 22134      }
 22135      // PMAXUB m128, xmm
 22136      if isM128(v0) && isXMM(v1) {
 22137          self.require(ISA_SSE2)
 22138          p.domain = DomainMMXSSE
 22139          p.add(0, func(m *_Encoding, v []interface{}) {
 22140              m.emit(0x66)
 22141              m.rexo(hcode(v[1]), addr(v[0]), false)
 22142              m.emit(0x0f)
 22143              m.emit(0xde)
 22144              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22145          })
 22146      }
 22147      if p.len == 0 {
 22148          panic("invalid operands for PMAXUB")
 22149      }
 22150      return p
 22151  }
 22152  
 22153  // PMAXUD performs "Maximum of Packed Unsigned Doubleword Integers".
 22154  //
 22155  // Mnemonic        : PMAXUD
 22156  // Supported forms : (2 forms)
 22157  //
 22158  //    * PMAXUD xmm, xmm     [SSE4.1]
 22159  //    * PMAXUD m128, xmm    [SSE4.1]
 22160  //
 22161  func (self *Program) PMAXUD(v0 interface{}, v1 interface{}) *Instruction {
 22162      p := self.alloc("PMAXUD", 2, Operands { v0, v1 })
 22163      // PMAXUD xmm, xmm
 22164      if isXMM(v0) && isXMM(v1) {
 22165          self.require(ISA_SSE4_1)
 22166          p.domain = DomainMMXSSE
 22167          p.add(0, func(m *_Encoding, v []interface{}) {
 22168              m.emit(0x66)
 22169              m.rexo(hcode(v[1]), v[0], false)
 22170              m.emit(0x0f)
 22171              m.emit(0x38)
 22172              m.emit(0x3f)
 22173              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22174          })
 22175      }
 22176      // PMAXUD m128, xmm
 22177      if isM128(v0) && isXMM(v1) {
 22178          self.require(ISA_SSE4_1)
 22179          p.domain = DomainMMXSSE
 22180          p.add(0, func(m *_Encoding, v []interface{}) {
 22181              m.emit(0x66)
 22182              m.rexo(hcode(v[1]), addr(v[0]), false)
 22183              m.emit(0x0f)
 22184              m.emit(0x38)
 22185              m.emit(0x3f)
 22186              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22187          })
 22188      }
 22189      if p.len == 0 {
 22190          panic("invalid operands for PMAXUD")
 22191      }
 22192      return p
 22193  }
 22194  
 22195  // PMAXUW performs "Maximum of Packed Unsigned Word Integers".
 22196  //
 22197  // Mnemonic        : PMAXUW
 22198  // Supported forms : (2 forms)
 22199  //
 22200  //    * PMAXUW xmm, xmm     [SSE4.1]
 22201  //    * PMAXUW m128, xmm    [SSE4.1]
 22202  //
 22203  func (self *Program) PMAXUW(v0 interface{}, v1 interface{}) *Instruction {
 22204      p := self.alloc("PMAXUW", 2, Operands { v0, v1 })
 22205      // PMAXUW xmm, xmm
 22206      if isXMM(v0) && isXMM(v1) {
 22207          self.require(ISA_SSE4_1)
 22208          p.domain = DomainMMXSSE
 22209          p.add(0, func(m *_Encoding, v []interface{}) {
 22210              m.emit(0x66)
 22211              m.rexo(hcode(v[1]), v[0], false)
 22212              m.emit(0x0f)
 22213              m.emit(0x38)
 22214              m.emit(0x3e)
 22215              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22216          })
 22217      }
 22218      // PMAXUW m128, xmm
 22219      if isM128(v0) && isXMM(v1) {
 22220          self.require(ISA_SSE4_1)
 22221          p.domain = DomainMMXSSE
 22222          p.add(0, func(m *_Encoding, v []interface{}) {
 22223              m.emit(0x66)
 22224              m.rexo(hcode(v[1]), addr(v[0]), false)
 22225              m.emit(0x0f)
 22226              m.emit(0x38)
 22227              m.emit(0x3e)
 22228              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22229          })
 22230      }
 22231      if p.len == 0 {
 22232          panic("invalid operands for PMAXUW")
 22233      }
 22234      return p
 22235  }
 22236  
 22237  // PMINSB performs "Minimum of Packed Signed Byte Integers".
 22238  //
 22239  // Mnemonic        : PMINSB
 22240  // Supported forms : (2 forms)
 22241  //
 22242  //    * PMINSB xmm, xmm     [SSE4.1]
 22243  //    * PMINSB m128, xmm    [SSE4.1]
 22244  //
 22245  func (self *Program) PMINSB(v0 interface{}, v1 interface{}) *Instruction {
 22246      p := self.alloc("PMINSB", 2, Operands { v0, v1 })
 22247      // PMINSB xmm, xmm
 22248      if isXMM(v0) && isXMM(v1) {
 22249          self.require(ISA_SSE4_1)
 22250          p.domain = DomainMMXSSE
 22251          p.add(0, func(m *_Encoding, v []interface{}) {
 22252              m.emit(0x66)
 22253              m.rexo(hcode(v[1]), v[0], false)
 22254              m.emit(0x0f)
 22255              m.emit(0x38)
 22256              m.emit(0x38)
 22257              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22258          })
 22259      }
 22260      // PMINSB m128, xmm
 22261      if isM128(v0) && isXMM(v1) {
 22262          self.require(ISA_SSE4_1)
 22263          p.domain = DomainMMXSSE
 22264          p.add(0, func(m *_Encoding, v []interface{}) {
 22265              m.emit(0x66)
 22266              m.rexo(hcode(v[1]), addr(v[0]), false)
 22267              m.emit(0x0f)
 22268              m.emit(0x38)
 22269              m.emit(0x38)
 22270              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22271          })
 22272      }
 22273      if p.len == 0 {
 22274          panic("invalid operands for PMINSB")
 22275      }
 22276      return p
 22277  }
 22278  
 22279  // PMINSD performs "Minimum of Packed Signed Doubleword Integers".
 22280  //
 22281  // Mnemonic        : PMINSD
 22282  // Supported forms : (2 forms)
 22283  //
 22284  //    * PMINSD xmm, xmm     [SSE4.1]
 22285  //    * PMINSD m128, xmm    [SSE4.1]
 22286  //
 22287  func (self *Program) PMINSD(v0 interface{}, v1 interface{}) *Instruction {
 22288      p := self.alloc("PMINSD", 2, Operands { v0, v1 })
 22289      // PMINSD xmm, xmm
 22290      if isXMM(v0) && isXMM(v1) {
 22291          self.require(ISA_SSE4_1)
 22292          p.domain = DomainMMXSSE
 22293          p.add(0, func(m *_Encoding, v []interface{}) {
 22294              m.emit(0x66)
 22295              m.rexo(hcode(v[1]), v[0], false)
 22296              m.emit(0x0f)
 22297              m.emit(0x38)
 22298              m.emit(0x39)
 22299              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22300          })
 22301      }
 22302      // PMINSD m128, xmm
 22303      if isM128(v0) && isXMM(v1) {
 22304          self.require(ISA_SSE4_1)
 22305          p.domain = DomainMMXSSE
 22306          p.add(0, func(m *_Encoding, v []interface{}) {
 22307              m.emit(0x66)
 22308              m.rexo(hcode(v[1]), addr(v[0]), false)
 22309              m.emit(0x0f)
 22310              m.emit(0x38)
 22311              m.emit(0x39)
 22312              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22313          })
 22314      }
 22315      if p.len == 0 {
 22316          panic("invalid operands for PMINSD")
 22317      }
 22318      return p
 22319  }
 22320  
 22321  // PMINSW performs "Minimum of Packed Signed Word Integers".
 22322  //
 22323  // Mnemonic        : PMINSW
 22324  // Supported forms : (4 forms)
 22325  //
 22326  //    * PMINSW mm, mm       [MMX+]
 22327  //    * PMINSW m64, mm      [MMX+]
 22328  //    * PMINSW xmm, xmm     [SSE2]
 22329  //    * PMINSW m128, xmm    [SSE2]
 22330  //
 22331  func (self *Program) PMINSW(v0 interface{}, v1 interface{}) *Instruction {
 22332      p := self.alloc("PMINSW", 2, Operands { v0, v1 })
 22333      // PMINSW mm, mm
 22334      if isMM(v0) && isMM(v1) {
 22335          self.require(ISA_MMX_PLUS)
 22336          p.domain = DomainMMXSSE
 22337          p.add(0, func(m *_Encoding, v []interface{}) {
 22338              m.rexo(hcode(v[1]), v[0], false)
 22339              m.emit(0x0f)
 22340              m.emit(0xea)
 22341              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22342          })
 22343      }
 22344      // PMINSW m64, mm
 22345      if isM64(v0) && isMM(v1) {
 22346          self.require(ISA_MMX_PLUS)
 22347          p.domain = DomainMMXSSE
 22348          p.add(0, func(m *_Encoding, v []interface{}) {
 22349              m.rexo(hcode(v[1]), addr(v[0]), false)
 22350              m.emit(0x0f)
 22351              m.emit(0xea)
 22352              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22353          })
 22354      }
 22355      // PMINSW xmm, xmm
 22356      if isXMM(v0) && isXMM(v1) {
 22357          self.require(ISA_SSE2)
 22358          p.domain = DomainMMXSSE
 22359          p.add(0, func(m *_Encoding, v []interface{}) {
 22360              m.emit(0x66)
 22361              m.rexo(hcode(v[1]), v[0], false)
 22362              m.emit(0x0f)
 22363              m.emit(0xea)
 22364              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22365          })
 22366      }
 22367      // PMINSW m128, xmm
 22368      if isM128(v0) && isXMM(v1) {
 22369          self.require(ISA_SSE2)
 22370          p.domain = DomainMMXSSE
 22371          p.add(0, func(m *_Encoding, v []interface{}) {
 22372              m.emit(0x66)
 22373              m.rexo(hcode(v[1]), addr(v[0]), false)
 22374              m.emit(0x0f)
 22375              m.emit(0xea)
 22376              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22377          })
 22378      }
 22379      if p.len == 0 {
 22380          panic("invalid operands for PMINSW")
 22381      }
 22382      return p
 22383  }
 22384  
 22385  // PMINUB performs "Minimum of Packed Unsigned Byte Integers".
 22386  //
 22387  // Mnemonic        : PMINUB
 22388  // Supported forms : (4 forms)
 22389  //
 22390  //    * PMINUB mm, mm       [MMX+]
 22391  //    * PMINUB m64, mm      [MMX+]
 22392  //    * PMINUB xmm, xmm     [SSE2]
 22393  //    * PMINUB m128, xmm    [SSE2]
 22394  //
 22395  func (self *Program) PMINUB(v0 interface{}, v1 interface{}) *Instruction {
 22396      p := self.alloc("PMINUB", 2, Operands { v0, v1 })
 22397      // PMINUB mm, mm
 22398      if isMM(v0) && isMM(v1) {
 22399          self.require(ISA_MMX_PLUS)
 22400          p.domain = DomainMMXSSE
 22401          p.add(0, func(m *_Encoding, v []interface{}) {
 22402              m.rexo(hcode(v[1]), v[0], false)
 22403              m.emit(0x0f)
 22404              m.emit(0xda)
 22405              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22406          })
 22407      }
 22408      // PMINUB m64, mm
 22409      if isM64(v0) && isMM(v1) {
 22410          self.require(ISA_MMX_PLUS)
 22411          p.domain = DomainMMXSSE
 22412          p.add(0, func(m *_Encoding, v []interface{}) {
 22413              m.rexo(hcode(v[1]), addr(v[0]), false)
 22414              m.emit(0x0f)
 22415              m.emit(0xda)
 22416              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22417          })
 22418      }
 22419      // PMINUB xmm, xmm
 22420      if isXMM(v0) && isXMM(v1) {
 22421          self.require(ISA_SSE2)
 22422          p.domain = DomainMMXSSE
 22423          p.add(0, func(m *_Encoding, v []interface{}) {
 22424              m.emit(0x66)
 22425              m.rexo(hcode(v[1]), v[0], false)
 22426              m.emit(0x0f)
 22427              m.emit(0xda)
 22428              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22429          })
 22430      }
 22431      // PMINUB m128, xmm
 22432      if isM128(v0) && isXMM(v1) {
 22433          self.require(ISA_SSE2)
 22434          p.domain = DomainMMXSSE
 22435          p.add(0, func(m *_Encoding, v []interface{}) {
 22436              m.emit(0x66)
 22437              m.rexo(hcode(v[1]), addr(v[0]), false)
 22438              m.emit(0x0f)
 22439              m.emit(0xda)
 22440              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22441          })
 22442      }
 22443      if p.len == 0 {
 22444          panic("invalid operands for PMINUB")
 22445      }
 22446      return p
 22447  }
 22448  
 22449  // PMINUD performs "Minimum of Packed Unsigned Doubleword Integers".
 22450  //
 22451  // Mnemonic        : PMINUD
 22452  // Supported forms : (2 forms)
 22453  //
 22454  //    * PMINUD xmm, xmm     [SSE4.1]
 22455  //    * PMINUD m128, xmm    [SSE4.1]
 22456  //
 22457  func (self *Program) PMINUD(v0 interface{}, v1 interface{}) *Instruction {
 22458      p := self.alloc("PMINUD", 2, Operands { v0, v1 })
 22459      // PMINUD xmm, xmm
 22460      if isXMM(v0) && isXMM(v1) {
 22461          self.require(ISA_SSE4_1)
 22462          p.domain = DomainMMXSSE
 22463          p.add(0, func(m *_Encoding, v []interface{}) {
 22464              m.emit(0x66)
 22465              m.rexo(hcode(v[1]), v[0], false)
 22466              m.emit(0x0f)
 22467              m.emit(0x38)
 22468              m.emit(0x3b)
 22469              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22470          })
 22471      }
 22472      // PMINUD m128, xmm
 22473      if isM128(v0) && isXMM(v1) {
 22474          self.require(ISA_SSE4_1)
 22475          p.domain = DomainMMXSSE
 22476          p.add(0, func(m *_Encoding, v []interface{}) {
 22477              m.emit(0x66)
 22478              m.rexo(hcode(v[1]), addr(v[0]), false)
 22479              m.emit(0x0f)
 22480              m.emit(0x38)
 22481              m.emit(0x3b)
 22482              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22483          })
 22484      }
 22485      if p.len == 0 {
 22486          panic("invalid operands for PMINUD")
 22487      }
 22488      return p
 22489  }
 22490  
 22491  // PMINUW performs "Minimum of Packed Unsigned Word Integers".
 22492  //
 22493  // Mnemonic        : PMINUW
 22494  // Supported forms : (2 forms)
 22495  //
 22496  //    * PMINUW xmm, xmm     [SSE4.1]
 22497  //    * PMINUW m128, xmm    [SSE4.1]
 22498  //
 22499  func (self *Program) PMINUW(v0 interface{}, v1 interface{}) *Instruction {
 22500      p := self.alloc("PMINUW", 2, Operands { v0, v1 })
 22501      // PMINUW xmm, xmm
 22502      if isXMM(v0) && isXMM(v1) {
 22503          self.require(ISA_SSE4_1)
 22504          p.domain = DomainMMXSSE
 22505          p.add(0, func(m *_Encoding, v []interface{}) {
 22506              m.emit(0x66)
 22507              m.rexo(hcode(v[1]), v[0], false)
 22508              m.emit(0x0f)
 22509              m.emit(0x38)
 22510              m.emit(0x3a)
 22511              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22512          })
 22513      }
 22514      // PMINUW m128, xmm
 22515      if isM128(v0) && isXMM(v1) {
 22516          self.require(ISA_SSE4_1)
 22517          p.domain = DomainMMXSSE
 22518          p.add(0, func(m *_Encoding, v []interface{}) {
 22519              m.emit(0x66)
 22520              m.rexo(hcode(v[1]), addr(v[0]), false)
 22521              m.emit(0x0f)
 22522              m.emit(0x38)
 22523              m.emit(0x3a)
 22524              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22525          })
 22526      }
 22527      if p.len == 0 {
 22528          panic("invalid operands for PMINUW")
 22529      }
 22530      return p
 22531  }
 22532  
 22533  // PMOVMSKB performs "Move Byte Mask".
 22534  //
 22535  // Mnemonic        : PMOVMSKB
 22536  // Supported forms : (2 forms)
 22537  //
 22538  //    * PMOVMSKB mm, r32     [MMX+]
 22539  //    * PMOVMSKB xmm, r32    [SSE2]
 22540  //
 22541  func (self *Program) PMOVMSKB(v0 interface{}, v1 interface{}) *Instruction {
 22542      p := self.alloc("PMOVMSKB", 2, Operands { v0, v1 })
 22543      // PMOVMSKB mm, r32
 22544      if isMM(v0) && isReg32(v1) {
 22545          self.require(ISA_MMX_PLUS)
 22546          p.domain = DomainMMXSSE
 22547          p.add(0, func(m *_Encoding, v []interface{}) {
 22548              m.rexo(hcode(v[1]), v[0], false)
 22549              m.emit(0x0f)
 22550              m.emit(0xd7)
 22551              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22552          })
 22553      }
 22554      // PMOVMSKB xmm, r32
 22555      if isXMM(v0) && isReg32(v1) {
 22556          self.require(ISA_SSE2)
 22557          p.domain = DomainMMXSSE
 22558          p.add(0, func(m *_Encoding, v []interface{}) {
 22559              m.emit(0x66)
 22560              m.rexo(hcode(v[1]), v[0], false)
 22561              m.emit(0x0f)
 22562              m.emit(0xd7)
 22563              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22564          })
 22565      }
 22566      if p.len == 0 {
 22567          panic("invalid operands for PMOVMSKB")
 22568      }
 22569      return p
 22570  }
 22571  
 22572  // PMOVSXBD performs "Move Packed Byte Integers to Doubleword Integers with Sign Extension".
 22573  //
 22574  // Mnemonic        : PMOVSXBD
 22575  // Supported forms : (2 forms)
 22576  //
 22577  //    * PMOVSXBD xmm, xmm    [SSE4.1]
 22578  //    * PMOVSXBD m32, xmm    [SSE4.1]
 22579  //
 22580  func (self *Program) PMOVSXBD(v0 interface{}, v1 interface{}) *Instruction {
 22581      p := self.alloc("PMOVSXBD", 2, Operands { v0, v1 })
 22582      // PMOVSXBD xmm, xmm
 22583      if isXMM(v0) && isXMM(v1) {
 22584          self.require(ISA_SSE4_1)
 22585          p.domain = DomainMMXSSE
 22586          p.add(0, func(m *_Encoding, v []interface{}) {
 22587              m.emit(0x66)
 22588              m.rexo(hcode(v[1]), v[0], false)
 22589              m.emit(0x0f)
 22590              m.emit(0x38)
 22591              m.emit(0x21)
 22592              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22593          })
 22594      }
 22595      // PMOVSXBD m32, xmm
 22596      if isM32(v0) && isXMM(v1) {
 22597          self.require(ISA_SSE4_1)
 22598          p.domain = DomainMMXSSE
 22599          p.add(0, func(m *_Encoding, v []interface{}) {
 22600              m.emit(0x66)
 22601              m.rexo(hcode(v[1]), addr(v[0]), false)
 22602              m.emit(0x0f)
 22603              m.emit(0x38)
 22604              m.emit(0x21)
 22605              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22606          })
 22607      }
 22608      if p.len == 0 {
 22609          panic("invalid operands for PMOVSXBD")
 22610      }
 22611      return p
 22612  }
 22613  
 22614  // PMOVSXBQ performs "Move Packed Byte Integers to Quadword Integers with Sign Extension".
 22615  //
 22616  // Mnemonic        : PMOVSXBQ
 22617  // Supported forms : (2 forms)
 22618  //
 22619  //    * PMOVSXBQ xmm, xmm    [SSE4.1]
 22620  //    * PMOVSXBQ m16, xmm    [SSE4.1]
 22621  //
 22622  func (self *Program) PMOVSXBQ(v0 interface{}, v1 interface{}) *Instruction {
 22623      p := self.alloc("PMOVSXBQ", 2, Operands { v0, v1 })
 22624      // PMOVSXBQ xmm, xmm
 22625      if isXMM(v0) && isXMM(v1) {
 22626          self.require(ISA_SSE4_1)
 22627          p.domain = DomainMMXSSE
 22628          p.add(0, func(m *_Encoding, v []interface{}) {
 22629              m.emit(0x66)
 22630              m.rexo(hcode(v[1]), v[0], false)
 22631              m.emit(0x0f)
 22632              m.emit(0x38)
 22633              m.emit(0x22)
 22634              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22635          })
 22636      }
 22637      // PMOVSXBQ m16, xmm
 22638      if isM16(v0) && isXMM(v1) {
 22639          self.require(ISA_SSE4_1)
 22640          p.domain = DomainMMXSSE
 22641          p.add(0, func(m *_Encoding, v []interface{}) {
 22642              m.emit(0x66)
 22643              m.rexo(hcode(v[1]), addr(v[0]), false)
 22644              m.emit(0x0f)
 22645              m.emit(0x38)
 22646              m.emit(0x22)
 22647              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22648          })
 22649      }
 22650      if p.len == 0 {
 22651          panic("invalid operands for PMOVSXBQ")
 22652      }
 22653      return p
 22654  }
 22655  
 22656  // PMOVSXBW performs "Move Packed Byte Integers to Word Integers with Sign Extension".
 22657  //
 22658  // Mnemonic        : PMOVSXBW
 22659  // Supported forms : (2 forms)
 22660  //
 22661  //    * PMOVSXBW xmm, xmm    [SSE4.1]
 22662  //    * PMOVSXBW m64, xmm    [SSE4.1]
 22663  //
 22664  func (self *Program) PMOVSXBW(v0 interface{}, v1 interface{}) *Instruction {
 22665      p := self.alloc("PMOVSXBW", 2, Operands { v0, v1 })
 22666      // PMOVSXBW xmm, xmm
 22667      if isXMM(v0) && isXMM(v1) {
 22668          self.require(ISA_SSE4_1)
 22669          p.domain = DomainMMXSSE
 22670          p.add(0, func(m *_Encoding, v []interface{}) {
 22671              m.emit(0x66)
 22672              m.rexo(hcode(v[1]), v[0], false)
 22673              m.emit(0x0f)
 22674              m.emit(0x38)
 22675              m.emit(0x20)
 22676              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22677          })
 22678      }
 22679      // PMOVSXBW m64, xmm
 22680      if isM64(v0) && isXMM(v1) {
 22681          self.require(ISA_SSE4_1)
 22682          p.domain = DomainMMXSSE
 22683          p.add(0, func(m *_Encoding, v []interface{}) {
 22684              m.emit(0x66)
 22685              m.rexo(hcode(v[1]), addr(v[0]), false)
 22686              m.emit(0x0f)
 22687              m.emit(0x38)
 22688              m.emit(0x20)
 22689              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22690          })
 22691      }
 22692      if p.len == 0 {
 22693          panic("invalid operands for PMOVSXBW")
 22694      }
 22695      return p
 22696  }
 22697  
 22698  // PMOVSXDQ performs "Move Packed Doubleword Integers to Quadword Integers with Sign Extension".
 22699  //
 22700  // Mnemonic        : PMOVSXDQ
 22701  // Supported forms : (2 forms)
 22702  //
 22703  //    * PMOVSXDQ xmm, xmm    [SSE4.1]
 22704  //    * PMOVSXDQ m64, xmm    [SSE4.1]
 22705  //
 22706  func (self *Program) PMOVSXDQ(v0 interface{}, v1 interface{}) *Instruction {
 22707      p := self.alloc("PMOVSXDQ", 2, Operands { v0, v1 })
 22708      // PMOVSXDQ xmm, xmm
 22709      if isXMM(v0) && isXMM(v1) {
 22710          self.require(ISA_SSE4_1)
 22711          p.domain = DomainMMXSSE
 22712          p.add(0, func(m *_Encoding, v []interface{}) {
 22713              m.emit(0x66)
 22714              m.rexo(hcode(v[1]), v[0], false)
 22715              m.emit(0x0f)
 22716              m.emit(0x38)
 22717              m.emit(0x25)
 22718              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22719          })
 22720      }
 22721      // PMOVSXDQ m64, xmm
 22722      if isM64(v0) && isXMM(v1) {
 22723          self.require(ISA_SSE4_1)
 22724          p.domain = DomainMMXSSE
 22725          p.add(0, func(m *_Encoding, v []interface{}) {
 22726              m.emit(0x66)
 22727              m.rexo(hcode(v[1]), addr(v[0]), false)
 22728              m.emit(0x0f)
 22729              m.emit(0x38)
 22730              m.emit(0x25)
 22731              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22732          })
 22733      }
 22734      if p.len == 0 {
 22735          panic("invalid operands for PMOVSXDQ")
 22736      }
 22737      return p
 22738  }
 22739  
 22740  // PMOVSXWD performs "Move Packed Word Integers to Doubleword Integers with Sign Extension".
 22741  //
 22742  // Mnemonic        : PMOVSXWD
 22743  // Supported forms : (2 forms)
 22744  //
 22745  //    * PMOVSXWD xmm, xmm    [SSE4.1]
 22746  //    * PMOVSXWD m64, xmm    [SSE4.1]
 22747  //
 22748  func (self *Program) PMOVSXWD(v0 interface{}, v1 interface{}) *Instruction {
 22749      p := self.alloc("PMOVSXWD", 2, Operands { v0, v1 })
 22750      // PMOVSXWD xmm, xmm
 22751      if isXMM(v0) && isXMM(v1) {
 22752          self.require(ISA_SSE4_1)
 22753          p.domain = DomainMMXSSE
 22754          p.add(0, func(m *_Encoding, v []interface{}) {
 22755              m.emit(0x66)
 22756              m.rexo(hcode(v[1]), v[0], false)
 22757              m.emit(0x0f)
 22758              m.emit(0x38)
 22759              m.emit(0x23)
 22760              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22761          })
 22762      }
 22763      // PMOVSXWD m64, xmm
 22764      if isM64(v0) && isXMM(v1) {
 22765          self.require(ISA_SSE4_1)
 22766          p.domain = DomainMMXSSE
 22767          p.add(0, func(m *_Encoding, v []interface{}) {
 22768              m.emit(0x66)
 22769              m.rexo(hcode(v[1]), addr(v[0]), false)
 22770              m.emit(0x0f)
 22771              m.emit(0x38)
 22772              m.emit(0x23)
 22773              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22774          })
 22775      }
 22776      if p.len == 0 {
 22777          panic("invalid operands for PMOVSXWD")
 22778      }
 22779      return p
 22780  }
 22781  
 22782  // PMOVSXWQ performs "Move Packed Word Integers to Quadword Integers with Sign Extension".
 22783  //
 22784  // Mnemonic        : PMOVSXWQ
 22785  // Supported forms : (2 forms)
 22786  //
 22787  //    * PMOVSXWQ xmm, xmm    [SSE4.1]
 22788  //    * PMOVSXWQ m32, xmm    [SSE4.1]
 22789  //
 22790  func (self *Program) PMOVSXWQ(v0 interface{}, v1 interface{}) *Instruction {
 22791      p := self.alloc("PMOVSXWQ", 2, Operands { v0, v1 })
 22792      // PMOVSXWQ xmm, xmm
 22793      if isXMM(v0) && isXMM(v1) {
 22794          self.require(ISA_SSE4_1)
 22795          p.domain = DomainMMXSSE
 22796          p.add(0, func(m *_Encoding, v []interface{}) {
 22797              m.emit(0x66)
 22798              m.rexo(hcode(v[1]), v[0], false)
 22799              m.emit(0x0f)
 22800              m.emit(0x38)
 22801              m.emit(0x24)
 22802              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22803          })
 22804      }
 22805      // PMOVSXWQ m32, xmm
 22806      if isM32(v0) && isXMM(v1) {
 22807          self.require(ISA_SSE4_1)
 22808          p.domain = DomainMMXSSE
 22809          p.add(0, func(m *_Encoding, v []interface{}) {
 22810              m.emit(0x66)
 22811              m.rexo(hcode(v[1]), addr(v[0]), false)
 22812              m.emit(0x0f)
 22813              m.emit(0x38)
 22814              m.emit(0x24)
 22815              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22816          })
 22817      }
 22818      if p.len == 0 {
 22819          panic("invalid operands for PMOVSXWQ")
 22820      }
 22821      return p
 22822  }
 22823  
 22824  // PMOVZXBD performs "Move Packed Byte Integers to Doubleword Integers with Zero Extension".
 22825  //
 22826  // Mnemonic        : PMOVZXBD
 22827  // Supported forms : (2 forms)
 22828  //
 22829  //    * PMOVZXBD xmm, xmm    [SSE4.1]
 22830  //    * PMOVZXBD m32, xmm    [SSE4.1]
 22831  //
 22832  func (self *Program) PMOVZXBD(v0 interface{}, v1 interface{}) *Instruction {
 22833      p := self.alloc("PMOVZXBD", 2, Operands { v0, v1 })
 22834      // PMOVZXBD xmm, xmm
 22835      if isXMM(v0) && isXMM(v1) {
 22836          self.require(ISA_SSE4_1)
 22837          p.domain = DomainMMXSSE
 22838          p.add(0, func(m *_Encoding, v []interface{}) {
 22839              m.emit(0x66)
 22840              m.rexo(hcode(v[1]), v[0], false)
 22841              m.emit(0x0f)
 22842              m.emit(0x38)
 22843              m.emit(0x31)
 22844              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22845          })
 22846      }
 22847      // PMOVZXBD m32, xmm
 22848      if isM32(v0) && isXMM(v1) {
 22849          self.require(ISA_SSE4_1)
 22850          p.domain = DomainMMXSSE
 22851          p.add(0, func(m *_Encoding, v []interface{}) {
 22852              m.emit(0x66)
 22853              m.rexo(hcode(v[1]), addr(v[0]), false)
 22854              m.emit(0x0f)
 22855              m.emit(0x38)
 22856              m.emit(0x31)
 22857              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22858          })
 22859      }
 22860      if p.len == 0 {
 22861          panic("invalid operands for PMOVZXBD")
 22862      }
 22863      return p
 22864  }
 22865  
 22866  // PMOVZXBQ performs "Move Packed Byte Integers to Quadword Integers with Zero Extension".
 22867  //
 22868  // Mnemonic        : PMOVZXBQ
 22869  // Supported forms : (2 forms)
 22870  //
 22871  //    * PMOVZXBQ xmm, xmm    [SSE4.1]
 22872  //    * PMOVZXBQ m16, xmm    [SSE4.1]
 22873  //
 22874  func (self *Program) PMOVZXBQ(v0 interface{}, v1 interface{}) *Instruction {
 22875      p := self.alloc("PMOVZXBQ", 2, Operands { v0, v1 })
 22876      // PMOVZXBQ xmm, xmm
 22877      if isXMM(v0) && isXMM(v1) {
 22878          self.require(ISA_SSE4_1)
 22879          p.domain = DomainMMXSSE
 22880          p.add(0, func(m *_Encoding, v []interface{}) {
 22881              m.emit(0x66)
 22882              m.rexo(hcode(v[1]), v[0], false)
 22883              m.emit(0x0f)
 22884              m.emit(0x38)
 22885              m.emit(0x32)
 22886              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22887          })
 22888      }
 22889      // PMOVZXBQ m16, xmm
 22890      if isM16(v0) && isXMM(v1) {
 22891          self.require(ISA_SSE4_1)
 22892          p.domain = DomainMMXSSE
 22893          p.add(0, func(m *_Encoding, v []interface{}) {
 22894              m.emit(0x66)
 22895              m.rexo(hcode(v[1]), addr(v[0]), false)
 22896              m.emit(0x0f)
 22897              m.emit(0x38)
 22898              m.emit(0x32)
 22899              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22900          })
 22901      }
 22902      if p.len == 0 {
 22903          panic("invalid operands for PMOVZXBQ")
 22904      }
 22905      return p
 22906  }
 22907  
 22908  // PMOVZXBW performs "Move Packed Byte Integers to Word Integers with Zero Extension".
 22909  //
 22910  // Mnemonic        : PMOVZXBW
 22911  // Supported forms : (2 forms)
 22912  //
 22913  //    * PMOVZXBW xmm, xmm    [SSE4.1]
 22914  //    * PMOVZXBW m64, xmm    [SSE4.1]
 22915  //
 22916  func (self *Program) PMOVZXBW(v0 interface{}, v1 interface{}) *Instruction {
 22917      p := self.alloc("PMOVZXBW", 2, Operands { v0, v1 })
 22918      // PMOVZXBW xmm, xmm
 22919      if isXMM(v0) && isXMM(v1) {
 22920          self.require(ISA_SSE4_1)
 22921          p.domain = DomainMMXSSE
 22922          p.add(0, func(m *_Encoding, v []interface{}) {
 22923              m.emit(0x66)
 22924              m.rexo(hcode(v[1]), v[0], false)
 22925              m.emit(0x0f)
 22926              m.emit(0x38)
 22927              m.emit(0x30)
 22928              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22929          })
 22930      }
 22931      // PMOVZXBW m64, xmm
 22932      if isM64(v0) && isXMM(v1) {
 22933          self.require(ISA_SSE4_1)
 22934          p.domain = DomainMMXSSE
 22935          p.add(0, func(m *_Encoding, v []interface{}) {
 22936              m.emit(0x66)
 22937              m.rexo(hcode(v[1]), addr(v[0]), false)
 22938              m.emit(0x0f)
 22939              m.emit(0x38)
 22940              m.emit(0x30)
 22941              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22942          })
 22943      }
 22944      if p.len == 0 {
 22945          panic("invalid operands for PMOVZXBW")
 22946      }
 22947      return p
 22948  }
 22949  
 22950  // PMOVZXDQ performs "Move Packed Doubleword Integers to Quadword Integers with Zero Extension".
 22951  //
 22952  // Mnemonic        : PMOVZXDQ
 22953  // Supported forms : (2 forms)
 22954  //
 22955  //    * PMOVZXDQ xmm, xmm    [SSE4.1]
 22956  //    * PMOVZXDQ m64, xmm    [SSE4.1]
 22957  //
 22958  func (self *Program) PMOVZXDQ(v0 interface{}, v1 interface{}) *Instruction {
 22959      p := self.alloc("PMOVZXDQ", 2, Operands { v0, v1 })
 22960      // PMOVZXDQ xmm, xmm
 22961      if isXMM(v0) && isXMM(v1) {
 22962          self.require(ISA_SSE4_1)
 22963          p.domain = DomainMMXSSE
 22964          p.add(0, func(m *_Encoding, v []interface{}) {
 22965              m.emit(0x66)
 22966              m.rexo(hcode(v[1]), v[0], false)
 22967              m.emit(0x0f)
 22968              m.emit(0x38)
 22969              m.emit(0x35)
 22970              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22971          })
 22972      }
 22973      // PMOVZXDQ m64, xmm
 22974      if isM64(v0) && isXMM(v1) {
 22975          self.require(ISA_SSE4_1)
 22976          p.domain = DomainMMXSSE
 22977          p.add(0, func(m *_Encoding, v []interface{}) {
 22978              m.emit(0x66)
 22979              m.rexo(hcode(v[1]), addr(v[0]), false)
 22980              m.emit(0x0f)
 22981              m.emit(0x38)
 22982              m.emit(0x35)
 22983              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22984          })
 22985      }
 22986      if p.len == 0 {
 22987          panic("invalid operands for PMOVZXDQ")
 22988      }
 22989      return p
 22990  }
 22991  
 22992  // PMOVZXWD performs "Move Packed Word Integers to Doubleword Integers with Zero Extension".
 22993  //
 22994  // Mnemonic        : PMOVZXWD
 22995  // Supported forms : (2 forms)
 22996  //
 22997  //    * PMOVZXWD xmm, xmm    [SSE4.1]
 22998  //    * PMOVZXWD m64, xmm    [SSE4.1]
 22999  //
 23000  func (self *Program) PMOVZXWD(v0 interface{}, v1 interface{}) *Instruction {
 23001      p := self.alloc("PMOVZXWD", 2, Operands { v0, v1 })
 23002      // PMOVZXWD xmm, xmm
 23003      if isXMM(v0) && isXMM(v1) {
 23004          self.require(ISA_SSE4_1)
 23005          p.domain = DomainMMXSSE
 23006          p.add(0, func(m *_Encoding, v []interface{}) {
 23007              m.emit(0x66)
 23008              m.rexo(hcode(v[1]), v[0], false)
 23009              m.emit(0x0f)
 23010              m.emit(0x38)
 23011              m.emit(0x33)
 23012              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23013          })
 23014      }
 23015      // PMOVZXWD m64, xmm
 23016      if isM64(v0) && isXMM(v1) {
 23017          self.require(ISA_SSE4_1)
 23018          p.domain = DomainMMXSSE
 23019          p.add(0, func(m *_Encoding, v []interface{}) {
 23020              m.emit(0x66)
 23021              m.rexo(hcode(v[1]), addr(v[0]), false)
 23022              m.emit(0x0f)
 23023              m.emit(0x38)
 23024              m.emit(0x33)
 23025              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23026          })
 23027      }
 23028      if p.len == 0 {
 23029          panic("invalid operands for PMOVZXWD")
 23030      }
 23031      return p
 23032  }
 23033  
 23034  // PMOVZXWQ performs "Move Packed Word Integers to Quadword Integers with Zero Extension".
 23035  //
 23036  // Mnemonic        : PMOVZXWQ
 23037  // Supported forms : (2 forms)
 23038  //
 23039  //    * PMOVZXWQ xmm, xmm    [SSE4.1]
 23040  //    * PMOVZXWQ m32, xmm    [SSE4.1]
 23041  //
 23042  func (self *Program) PMOVZXWQ(v0 interface{}, v1 interface{}) *Instruction {
 23043      p := self.alloc("PMOVZXWQ", 2, Operands { v0, v1 })
 23044      // PMOVZXWQ xmm, xmm
 23045      if isXMM(v0) && isXMM(v1) {
 23046          self.require(ISA_SSE4_1)
 23047          p.domain = DomainMMXSSE
 23048          p.add(0, func(m *_Encoding, v []interface{}) {
 23049              m.emit(0x66)
 23050              m.rexo(hcode(v[1]), v[0], false)
 23051              m.emit(0x0f)
 23052              m.emit(0x38)
 23053              m.emit(0x34)
 23054              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23055          })
 23056      }
 23057      // PMOVZXWQ m32, xmm
 23058      if isM32(v0) && isXMM(v1) {
 23059          self.require(ISA_SSE4_1)
 23060          p.domain = DomainMMXSSE
 23061          p.add(0, func(m *_Encoding, v []interface{}) {
 23062              m.emit(0x66)
 23063              m.rexo(hcode(v[1]), addr(v[0]), false)
 23064              m.emit(0x0f)
 23065              m.emit(0x38)
 23066              m.emit(0x34)
 23067              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23068          })
 23069      }
 23070      if p.len == 0 {
 23071          panic("invalid operands for PMOVZXWQ")
 23072      }
 23073      return p
 23074  }
 23075  
 23076  // PMULDQ performs "Multiply Packed Signed Doubleword Integers and Store Quadword Result".
 23077  //
 23078  // Mnemonic        : PMULDQ
 23079  // Supported forms : (2 forms)
 23080  //
 23081  //    * PMULDQ xmm, xmm     [SSE4.1]
 23082  //    * PMULDQ m128, xmm    [SSE4.1]
 23083  //
 23084  func (self *Program) PMULDQ(v0 interface{}, v1 interface{}) *Instruction {
 23085      p := self.alloc("PMULDQ", 2, Operands { v0, v1 })
 23086      // PMULDQ xmm, xmm
 23087      if isXMM(v0) && isXMM(v1) {
 23088          self.require(ISA_SSE4_1)
 23089          p.domain = DomainMMXSSE
 23090          p.add(0, func(m *_Encoding, v []interface{}) {
 23091              m.emit(0x66)
 23092              m.rexo(hcode(v[1]), v[0], false)
 23093              m.emit(0x0f)
 23094              m.emit(0x38)
 23095              m.emit(0x28)
 23096              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23097          })
 23098      }
 23099      // PMULDQ m128, xmm
 23100      if isM128(v0) && isXMM(v1) {
 23101          self.require(ISA_SSE4_1)
 23102          p.domain = DomainMMXSSE
 23103          p.add(0, func(m *_Encoding, v []interface{}) {
 23104              m.emit(0x66)
 23105              m.rexo(hcode(v[1]), addr(v[0]), false)
 23106              m.emit(0x0f)
 23107              m.emit(0x38)
 23108              m.emit(0x28)
 23109              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23110          })
 23111      }
 23112      if p.len == 0 {
 23113          panic("invalid operands for PMULDQ")
 23114      }
 23115      return p
 23116  }
 23117  
 23118  // PMULHRSW performs "Packed Multiply Signed Word Integers and Store High Result with Round and Scale".
 23119  //
 23120  // Mnemonic        : PMULHRSW
 23121  // Supported forms : (4 forms)
 23122  //
 23123  //    * PMULHRSW mm, mm       [SSSE3]
 23124  //    * PMULHRSW m64, mm      [SSSE3]
 23125  //    * PMULHRSW xmm, xmm     [SSSE3]
 23126  //    * PMULHRSW m128, xmm    [SSSE3]
 23127  //
 23128  func (self *Program) PMULHRSW(v0 interface{}, v1 interface{}) *Instruction {
 23129      p := self.alloc("PMULHRSW", 2, Operands { v0, v1 })
 23130      // PMULHRSW mm, mm
 23131      if isMM(v0) && isMM(v1) {
 23132          self.require(ISA_SSSE3)
 23133          p.domain = DomainMMXSSE
 23134          p.add(0, func(m *_Encoding, v []interface{}) {
 23135              m.rexo(hcode(v[1]), v[0], false)
 23136              m.emit(0x0f)
 23137              m.emit(0x38)
 23138              m.emit(0x0b)
 23139              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23140          })
 23141      }
 23142      // PMULHRSW m64, mm
 23143      if isM64(v0) && isMM(v1) {
 23144          self.require(ISA_SSSE3)
 23145          p.domain = DomainMMXSSE
 23146          p.add(0, func(m *_Encoding, v []interface{}) {
 23147              m.rexo(hcode(v[1]), addr(v[0]), false)
 23148              m.emit(0x0f)
 23149              m.emit(0x38)
 23150              m.emit(0x0b)
 23151              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23152          })
 23153      }
 23154      // PMULHRSW xmm, xmm
 23155      if isXMM(v0) && isXMM(v1) {
 23156          self.require(ISA_SSSE3)
 23157          p.domain = DomainMMXSSE
 23158          p.add(0, func(m *_Encoding, v []interface{}) {
 23159              m.emit(0x66)
 23160              m.rexo(hcode(v[1]), v[0], false)
 23161              m.emit(0x0f)
 23162              m.emit(0x38)
 23163              m.emit(0x0b)
 23164              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23165          })
 23166      }
 23167      // PMULHRSW m128, xmm
 23168      if isM128(v0) && isXMM(v1) {
 23169          self.require(ISA_SSSE3)
 23170          p.domain = DomainMMXSSE
 23171          p.add(0, func(m *_Encoding, v []interface{}) {
 23172              m.emit(0x66)
 23173              m.rexo(hcode(v[1]), addr(v[0]), false)
 23174              m.emit(0x0f)
 23175              m.emit(0x38)
 23176              m.emit(0x0b)
 23177              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23178          })
 23179      }
 23180      if p.len == 0 {
 23181          panic("invalid operands for PMULHRSW")
 23182      }
 23183      return p
 23184  }
 23185  
 23186  // PMULHRW performs "Packed Multiply High Rounded Word".
 23187  //
 23188  // Mnemonic        : PMULHRW
 23189  // Supported forms : (2 forms)
 23190  //
 23191  //    * PMULHRW mm, mm     [3dnow!]
 23192  //    * PMULHRW m64, mm    [3dnow!]
 23193  //
 23194  func (self *Program) PMULHRW(v0 interface{}, v1 interface{}) *Instruction {
 23195      p := self.alloc("PMULHRW", 2, Operands { v0, v1 })
 23196      // PMULHRW mm, mm
 23197      if isMM(v0) && isMM(v1) {
 23198          self.require(ISA_3DNOW)
 23199          p.domain = DomainAMDSpecific
 23200          p.add(0, func(m *_Encoding, v []interface{}) {
 23201              m.rexo(hcode(v[1]), v[0], false)
 23202              m.emit(0x0f)
 23203              m.emit(0x0f)
 23204              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23205              m.emit(0xb7)
 23206          })
 23207      }
 23208      // PMULHRW m64, mm
 23209      if isM64(v0) && isMM(v1) {
 23210          self.require(ISA_3DNOW)
 23211          p.domain = DomainAMDSpecific
 23212          p.add(0, func(m *_Encoding, v []interface{}) {
 23213              m.rexo(hcode(v[1]), addr(v[0]), false)
 23214              m.emit(0x0f)
 23215              m.emit(0x0f)
 23216              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23217              m.emit(0xb7)
 23218          })
 23219      }
 23220      if p.len == 0 {
 23221          panic("invalid operands for PMULHRW")
 23222      }
 23223      return p
 23224  }
 23225  
 23226  // PMULHUW performs "Multiply Packed Unsigned Word Integers and Store High Result".
 23227  //
 23228  // Mnemonic        : PMULHUW
 23229  // Supported forms : (4 forms)
 23230  //
 23231  //    * PMULHUW mm, mm       [MMX+]
 23232  //    * PMULHUW m64, mm      [MMX+]
 23233  //    * PMULHUW xmm, xmm     [SSE2]
 23234  //    * PMULHUW m128, xmm    [SSE2]
 23235  //
 23236  func (self *Program) PMULHUW(v0 interface{}, v1 interface{}) *Instruction {
 23237      p := self.alloc("PMULHUW", 2, Operands { v0, v1 })
 23238      // PMULHUW mm, mm
 23239      if isMM(v0) && isMM(v1) {
 23240          self.require(ISA_MMX_PLUS)
 23241          p.domain = DomainMMXSSE
 23242          p.add(0, func(m *_Encoding, v []interface{}) {
 23243              m.rexo(hcode(v[1]), v[0], false)
 23244              m.emit(0x0f)
 23245              m.emit(0xe4)
 23246              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23247          })
 23248      }
 23249      // PMULHUW m64, mm
 23250      if isM64(v0) && isMM(v1) {
 23251          self.require(ISA_MMX_PLUS)
 23252          p.domain = DomainMMXSSE
 23253          p.add(0, func(m *_Encoding, v []interface{}) {
 23254              m.rexo(hcode(v[1]), addr(v[0]), false)
 23255              m.emit(0x0f)
 23256              m.emit(0xe4)
 23257              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23258          })
 23259      }
 23260      // PMULHUW xmm, xmm
 23261      if isXMM(v0) && isXMM(v1) {
 23262          self.require(ISA_SSE2)
 23263          p.domain = DomainMMXSSE
 23264          p.add(0, func(m *_Encoding, v []interface{}) {
 23265              m.emit(0x66)
 23266              m.rexo(hcode(v[1]), v[0], false)
 23267              m.emit(0x0f)
 23268              m.emit(0xe4)
 23269              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23270          })
 23271      }
 23272      // PMULHUW m128, xmm
 23273      if isM128(v0) && isXMM(v1) {
 23274          self.require(ISA_SSE2)
 23275          p.domain = DomainMMXSSE
 23276          p.add(0, func(m *_Encoding, v []interface{}) {
 23277              m.emit(0x66)
 23278              m.rexo(hcode(v[1]), addr(v[0]), false)
 23279              m.emit(0x0f)
 23280              m.emit(0xe4)
 23281              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23282          })
 23283      }
 23284      if p.len == 0 {
 23285          panic("invalid operands for PMULHUW")
 23286      }
 23287      return p
 23288  }
 23289  
 23290  // PMULHW performs "Multiply Packed Signed Word Integers and Store High Result".
 23291  //
 23292  // Mnemonic        : PMULHW
 23293  // Supported forms : (4 forms)
 23294  //
 23295  //    * PMULHW mm, mm       [MMX]
 23296  //    * PMULHW m64, mm      [MMX]
 23297  //    * PMULHW xmm, xmm     [SSE2]
 23298  //    * PMULHW m128, xmm    [SSE2]
 23299  //
 23300  func (self *Program) PMULHW(v0 interface{}, v1 interface{}) *Instruction {
 23301      p := self.alloc("PMULHW", 2, Operands { v0, v1 })
 23302      // PMULHW mm, mm
 23303      if isMM(v0) && isMM(v1) {
 23304          self.require(ISA_MMX)
 23305          p.domain = DomainMMXSSE
 23306          p.add(0, func(m *_Encoding, v []interface{}) {
 23307              m.rexo(hcode(v[1]), v[0], false)
 23308              m.emit(0x0f)
 23309              m.emit(0xe5)
 23310              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23311          })
 23312      }
 23313      // PMULHW m64, mm
 23314      if isM64(v0) && isMM(v1) {
 23315          self.require(ISA_MMX)
 23316          p.domain = DomainMMXSSE
 23317          p.add(0, func(m *_Encoding, v []interface{}) {
 23318              m.rexo(hcode(v[1]), addr(v[0]), false)
 23319              m.emit(0x0f)
 23320              m.emit(0xe5)
 23321              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23322          })
 23323      }
 23324      // PMULHW xmm, xmm
 23325      if isXMM(v0) && isXMM(v1) {
 23326          self.require(ISA_SSE2)
 23327          p.domain = DomainMMXSSE
 23328          p.add(0, func(m *_Encoding, v []interface{}) {
 23329              m.emit(0x66)
 23330              m.rexo(hcode(v[1]), v[0], false)
 23331              m.emit(0x0f)
 23332              m.emit(0xe5)
 23333              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23334          })
 23335      }
 23336      // PMULHW m128, xmm
 23337      if isM128(v0) && isXMM(v1) {
 23338          self.require(ISA_SSE2)
 23339          p.domain = DomainMMXSSE
 23340          p.add(0, func(m *_Encoding, v []interface{}) {
 23341              m.emit(0x66)
 23342              m.rexo(hcode(v[1]), addr(v[0]), false)
 23343              m.emit(0x0f)
 23344              m.emit(0xe5)
 23345              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23346          })
 23347      }
 23348      if p.len == 0 {
 23349          panic("invalid operands for PMULHW")
 23350      }
 23351      return p
 23352  }
 23353  
 23354  // PMULLD performs "Multiply Packed Signed Doubleword Integers and Store Low Result".
 23355  //
 23356  // Mnemonic        : PMULLD
 23357  // Supported forms : (2 forms)
 23358  //
 23359  //    * PMULLD xmm, xmm     [SSE4.1]
 23360  //    * PMULLD m128, xmm    [SSE4.1]
 23361  //
 23362  func (self *Program) PMULLD(v0 interface{}, v1 interface{}) *Instruction {
 23363      p := self.alloc("PMULLD", 2, Operands { v0, v1 })
 23364      // PMULLD xmm, xmm
 23365      if isXMM(v0) && isXMM(v1) {
 23366          self.require(ISA_SSE4_1)
 23367          p.domain = DomainMMXSSE
 23368          p.add(0, func(m *_Encoding, v []interface{}) {
 23369              m.emit(0x66)
 23370              m.rexo(hcode(v[1]), v[0], false)
 23371              m.emit(0x0f)
 23372              m.emit(0x38)
 23373              m.emit(0x40)
 23374              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23375          })
 23376      }
 23377      // PMULLD m128, xmm
 23378      if isM128(v0) && isXMM(v1) {
 23379          self.require(ISA_SSE4_1)
 23380          p.domain = DomainMMXSSE
 23381          p.add(0, func(m *_Encoding, v []interface{}) {
 23382              m.emit(0x66)
 23383              m.rexo(hcode(v[1]), addr(v[0]), false)
 23384              m.emit(0x0f)
 23385              m.emit(0x38)
 23386              m.emit(0x40)
 23387              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23388          })
 23389      }
 23390      if p.len == 0 {
 23391          panic("invalid operands for PMULLD")
 23392      }
 23393      return p
 23394  }
 23395  
 23396  // PMULLW performs "Multiply Packed Signed Word Integers and Store Low Result".
 23397  //
 23398  // Mnemonic        : PMULLW
 23399  // Supported forms : (4 forms)
 23400  //
 23401  //    * PMULLW mm, mm       [MMX]
 23402  //    * PMULLW m64, mm      [MMX]
 23403  //    * PMULLW xmm, xmm     [SSE2]
 23404  //    * PMULLW m128, xmm    [SSE2]
 23405  //
 23406  func (self *Program) PMULLW(v0 interface{}, v1 interface{}) *Instruction {
 23407      p := self.alloc("PMULLW", 2, Operands { v0, v1 })
 23408      // PMULLW mm, mm
 23409      if isMM(v0) && isMM(v1) {
 23410          self.require(ISA_MMX)
 23411          p.domain = DomainMMXSSE
 23412          p.add(0, func(m *_Encoding, v []interface{}) {
 23413              m.rexo(hcode(v[1]), v[0], false)
 23414              m.emit(0x0f)
 23415              m.emit(0xd5)
 23416              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23417          })
 23418      }
 23419      // PMULLW m64, mm
 23420      if isM64(v0) && isMM(v1) {
 23421          self.require(ISA_MMX)
 23422          p.domain = DomainMMXSSE
 23423          p.add(0, func(m *_Encoding, v []interface{}) {
 23424              m.rexo(hcode(v[1]), addr(v[0]), false)
 23425              m.emit(0x0f)
 23426              m.emit(0xd5)
 23427              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23428          })
 23429      }
 23430      // PMULLW xmm, xmm
 23431      if isXMM(v0) && isXMM(v1) {
 23432          self.require(ISA_SSE2)
 23433          p.domain = DomainMMXSSE
 23434          p.add(0, func(m *_Encoding, v []interface{}) {
 23435              m.emit(0x66)
 23436              m.rexo(hcode(v[1]), v[0], false)
 23437              m.emit(0x0f)
 23438              m.emit(0xd5)
 23439              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23440          })
 23441      }
 23442      // PMULLW m128, xmm
 23443      if isM128(v0) && isXMM(v1) {
 23444          self.require(ISA_SSE2)
 23445          p.domain = DomainMMXSSE
 23446          p.add(0, func(m *_Encoding, v []interface{}) {
 23447              m.emit(0x66)
 23448              m.rexo(hcode(v[1]), addr(v[0]), false)
 23449              m.emit(0x0f)
 23450              m.emit(0xd5)
 23451              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23452          })
 23453      }
 23454      if p.len == 0 {
 23455          panic("invalid operands for PMULLW")
 23456      }
 23457      return p
 23458  }
 23459  
 23460  // PMULUDQ performs "Multiply Packed Unsigned Doubleword Integers".
 23461  //
 23462  // Mnemonic        : PMULUDQ
 23463  // Supported forms : (4 forms)
 23464  //
 23465  //    * PMULUDQ mm, mm       [SSE2]
 23466  //    * PMULUDQ m64, mm      [SSE2]
 23467  //    * PMULUDQ xmm, xmm     [SSE2]
 23468  //    * PMULUDQ m128, xmm    [SSE2]
 23469  //
 23470  func (self *Program) PMULUDQ(v0 interface{}, v1 interface{}) *Instruction {
 23471      p := self.alloc("PMULUDQ", 2, Operands { v0, v1 })
 23472      // PMULUDQ mm, mm
 23473      if isMM(v0) && isMM(v1) {
 23474          self.require(ISA_SSE2)
 23475          p.domain = DomainMMXSSE
 23476          p.add(0, func(m *_Encoding, v []interface{}) {
 23477              m.rexo(hcode(v[1]), v[0], false)
 23478              m.emit(0x0f)
 23479              m.emit(0xf4)
 23480              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23481          })
 23482      }
 23483      // PMULUDQ m64, mm
 23484      if isM64(v0) && isMM(v1) {
 23485          self.require(ISA_SSE2)
 23486          p.domain = DomainMMXSSE
 23487          p.add(0, func(m *_Encoding, v []interface{}) {
 23488              m.rexo(hcode(v[1]), addr(v[0]), false)
 23489              m.emit(0x0f)
 23490              m.emit(0xf4)
 23491              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23492          })
 23493      }
 23494      // PMULUDQ xmm, xmm
 23495      if isXMM(v0) && isXMM(v1) {
 23496          self.require(ISA_SSE2)
 23497          p.domain = DomainMMXSSE
 23498          p.add(0, func(m *_Encoding, v []interface{}) {
 23499              m.emit(0x66)
 23500              m.rexo(hcode(v[1]), v[0], false)
 23501              m.emit(0x0f)
 23502              m.emit(0xf4)
 23503              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23504          })
 23505      }
 23506      // PMULUDQ m128, xmm
 23507      if isM128(v0) && isXMM(v1) {
 23508          self.require(ISA_SSE2)
 23509          p.domain = DomainMMXSSE
 23510          p.add(0, func(m *_Encoding, v []interface{}) {
 23511              m.emit(0x66)
 23512              m.rexo(hcode(v[1]), addr(v[0]), false)
 23513              m.emit(0x0f)
 23514              m.emit(0xf4)
 23515              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23516          })
 23517      }
 23518      if p.len == 0 {
 23519          panic("invalid operands for PMULUDQ")
 23520      }
 23521      return p
 23522  }
 23523  
 23524  // POPCNTL performs "Count of Number of Bits Set to 1".
 23525  //
 23526  // Mnemonic        : POPCNT
 23527  // Supported forms : (2 forms)
 23528  //
 23529  //    * POPCNTL r32, r32    [POPCNT]
 23530  //    * POPCNTL m32, r32    [POPCNT]
 23531  //
 23532  func (self *Program) POPCNTL(v0 interface{}, v1 interface{}) *Instruction {
 23533      p := self.alloc("POPCNTL", 2, Operands { v0, v1 })
 23534      // POPCNTL r32, r32
 23535      if isReg32(v0) && isReg32(v1) {
 23536          self.require(ISA_POPCNT)
 23537          p.domain = DomainGeneric
 23538          p.add(0, func(m *_Encoding, v []interface{}) {
 23539              m.emit(0xf3)
 23540              m.rexo(hcode(v[1]), v[0], false)
 23541              m.emit(0x0f)
 23542              m.emit(0xb8)
 23543              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23544          })
 23545      }
 23546      // POPCNTL m32, r32
 23547      if isM32(v0) && isReg32(v1) {
 23548          self.require(ISA_POPCNT)
 23549          p.domain = DomainGeneric
 23550          p.add(0, func(m *_Encoding, v []interface{}) {
 23551              m.emit(0xf3)
 23552              m.rexo(hcode(v[1]), addr(v[0]), false)
 23553              m.emit(0x0f)
 23554              m.emit(0xb8)
 23555              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23556          })
 23557      }
 23558      if p.len == 0 {
 23559          panic("invalid operands for POPCNTL")
 23560      }
 23561      return p
 23562  }
 23563  
 23564  // POPCNTQ performs "Count of Number of Bits Set to 1".
 23565  //
 23566  // Mnemonic        : POPCNT
 23567  // Supported forms : (2 forms)
 23568  //
 23569  //    * POPCNTQ r64, r64    [POPCNT]
 23570  //    * POPCNTQ m64, r64    [POPCNT]
 23571  //
 23572  func (self *Program) POPCNTQ(v0 interface{}, v1 interface{}) *Instruction {
 23573      p := self.alloc("POPCNTQ", 2, Operands { v0, v1 })
 23574      // POPCNTQ r64, r64
 23575      if isReg64(v0) && isReg64(v1) {
 23576          self.require(ISA_POPCNT)
 23577          p.domain = DomainGeneric
 23578          p.add(0, func(m *_Encoding, v []interface{}) {
 23579              m.emit(0xf3)
 23580              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 23581              m.emit(0x0f)
 23582              m.emit(0xb8)
 23583              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23584          })
 23585      }
 23586      // POPCNTQ m64, r64
 23587      if isM64(v0) && isReg64(v1) {
 23588          self.require(ISA_POPCNT)
 23589          p.domain = DomainGeneric
 23590          p.add(0, func(m *_Encoding, v []interface{}) {
 23591              m.emit(0xf3)
 23592              m.rexm(1, hcode(v[1]), addr(v[0]))
 23593              m.emit(0x0f)
 23594              m.emit(0xb8)
 23595              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23596          })
 23597      }
 23598      if p.len == 0 {
 23599          panic("invalid operands for POPCNTQ")
 23600      }
 23601      return p
 23602  }
 23603  
 23604  // POPCNTW performs "Count of Number of Bits Set to 1".
 23605  //
 23606  // Mnemonic        : POPCNT
 23607  // Supported forms : (2 forms)
 23608  //
 23609  //    * POPCNTW r16, r16    [POPCNT]
 23610  //    * POPCNTW m16, r16    [POPCNT]
 23611  //
 23612  func (self *Program) POPCNTW(v0 interface{}, v1 interface{}) *Instruction {
 23613      p := self.alloc("POPCNTW", 2, Operands { v0, v1 })
 23614      // POPCNTW r16, r16
 23615      if isReg16(v0) && isReg16(v1) {
 23616          self.require(ISA_POPCNT)
 23617          p.domain = DomainGeneric
 23618          p.add(0, func(m *_Encoding, v []interface{}) {
 23619              m.emit(0x66)
 23620              m.emit(0xf3)
 23621              m.rexo(hcode(v[1]), v[0], false)
 23622              m.emit(0x0f)
 23623              m.emit(0xb8)
 23624              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23625          })
 23626      }
 23627      // POPCNTW m16, r16
 23628      if isM16(v0) && isReg16(v1) {
 23629          self.require(ISA_POPCNT)
 23630          p.domain = DomainGeneric
 23631          p.add(0, func(m *_Encoding, v []interface{}) {
 23632              m.emit(0x66)
 23633              m.emit(0xf3)
 23634              m.rexo(hcode(v[1]), addr(v[0]), false)
 23635              m.emit(0x0f)
 23636              m.emit(0xb8)
 23637              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23638          })
 23639      }
 23640      if p.len == 0 {
 23641          panic("invalid operands for POPCNTW")
 23642      }
 23643      return p
 23644  }
 23645  
 23646  // POPQ performs "Pop a Value from the Stack".
 23647  //
 23648  // Mnemonic        : POP
 23649  // Supported forms : (2 forms)
 23650  //
 23651  //    * POPQ r64
 23652  //    * POPQ m64
 23653  //
 23654  func (self *Program) POPQ(v0 interface{}) *Instruction {
 23655      p := self.alloc("POPQ", 1, Operands { v0 })
 23656      // POPQ r64
 23657      if isReg64(v0) {
 23658          p.domain = DomainGeneric
 23659          p.add(0, func(m *_Encoding, v []interface{}) {
 23660              m.rexo(0, v[0], false)
 23661              m.emit(0x58 | lcode(v[0]))
 23662          })
 23663          p.add(0, func(m *_Encoding, v []interface{}) {
 23664              m.rexo(0, v[0], false)
 23665              m.emit(0x8f)
 23666              m.emit(0xc0 | lcode(v[0]))
 23667          })
 23668      }
 23669      // POPQ m64
 23670      if isM64(v0) {
 23671          p.domain = DomainGeneric
 23672          p.add(0, func(m *_Encoding, v []interface{}) {
 23673              m.rexo(0, addr(v[0]), false)
 23674              m.emit(0x8f)
 23675              m.mrsd(0, addr(v[0]), 1)
 23676          })
 23677      }
 23678      if p.len == 0 {
 23679          panic("invalid operands for POPQ")
 23680      }
 23681      return p
 23682  }
 23683  
 23684  // POPW performs "Pop a Value from the Stack".
 23685  //
 23686  // Mnemonic        : POP
 23687  // Supported forms : (2 forms)
 23688  //
 23689  //    * POPW r16
 23690  //    * POPW m16
 23691  //
 23692  func (self *Program) POPW(v0 interface{}) *Instruction {
 23693      p := self.alloc("POPW", 1, Operands { v0 })
 23694      // POPW r16
 23695      if isReg16(v0) {
 23696          p.domain = DomainGeneric
 23697          p.add(0, func(m *_Encoding, v []interface{}) {
 23698              m.emit(0x66)
 23699              m.rexo(0, v[0], false)
 23700              m.emit(0x58 | lcode(v[0]))
 23701          })
 23702          p.add(0, func(m *_Encoding, v []interface{}) {
 23703              m.emit(0x66)
 23704              m.rexo(0, v[0], false)
 23705              m.emit(0x8f)
 23706              m.emit(0xc0 | lcode(v[0]))
 23707          })
 23708      }
 23709      // POPW m16
 23710      if isM16(v0) {
 23711          p.domain = DomainGeneric
 23712          p.add(0, func(m *_Encoding, v []interface{}) {
 23713              m.emit(0x66)
 23714              m.rexo(0, addr(v[0]), false)
 23715              m.emit(0x8f)
 23716              m.mrsd(0, addr(v[0]), 1)
 23717          })
 23718      }
 23719      if p.len == 0 {
 23720          panic("invalid operands for POPW")
 23721      }
 23722      return p
 23723  }
 23724  
 23725  // POR performs "Packed Bitwise Logical OR".
 23726  //
 23727  // Mnemonic        : POR
 23728  // Supported forms : (4 forms)
 23729  //
 23730  //    * POR mm, mm       [MMX]
 23731  //    * POR m64, mm      [MMX]
 23732  //    * POR xmm, xmm     [SSE2]
 23733  //    * POR m128, xmm    [SSE2]
 23734  //
 23735  func (self *Program) POR(v0 interface{}, v1 interface{}) *Instruction {
 23736      p := self.alloc("POR", 2, Operands { v0, v1 })
 23737      // POR mm, mm
 23738      if isMM(v0) && isMM(v1) {
 23739          self.require(ISA_MMX)
 23740          p.domain = DomainMMXSSE
 23741          p.add(0, func(m *_Encoding, v []interface{}) {
 23742              m.rexo(hcode(v[1]), v[0], false)
 23743              m.emit(0x0f)
 23744              m.emit(0xeb)
 23745              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23746          })
 23747      }
 23748      // POR m64, mm
 23749      if isM64(v0) && isMM(v1) {
 23750          self.require(ISA_MMX)
 23751          p.domain = DomainMMXSSE
 23752          p.add(0, func(m *_Encoding, v []interface{}) {
 23753              m.rexo(hcode(v[1]), addr(v[0]), false)
 23754              m.emit(0x0f)
 23755              m.emit(0xeb)
 23756              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23757          })
 23758      }
 23759      // POR xmm, xmm
 23760      if isXMM(v0) && isXMM(v1) {
 23761          self.require(ISA_SSE2)
 23762          p.domain = DomainMMXSSE
 23763          p.add(0, func(m *_Encoding, v []interface{}) {
 23764              m.emit(0x66)
 23765              m.rexo(hcode(v[1]), v[0], false)
 23766              m.emit(0x0f)
 23767              m.emit(0xeb)
 23768              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23769          })
 23770      }
 23771      // POR m128, xmm
 23772      if isM128(v0) && isXMM(v1) {
 23773          self.require(ISA_SSE2)
 23774          p.domain = DomainMMXSSE
 23775          p.add(0, func(m *_Encoding, v []interface{}) {
 23776              m.emit(0x66)
 23777              m.rexo(hcode(v[1]), addr(v[0]), false)
 23778              m.emit(0x0f)
 23779              m.emit(0xeb)
 23780              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23781          })
 23782      }
 23783      if p.len == 0 {
 23784          panic("invalid operands for POR")
 23785      }
 23786      return p
 23787  }
 23788  
 23789  // PREFETCH performs "Prefetch Data into Caches".
 23790  //
 23791  // Mnemonic        : PREFETCH
 23792  // Supported forms : (1 form)
 23793  //
 23794  //    * PREFETCH m8    [PREFETCH]
 23795  //
 23796  func (self *Program) PREFETCH(v0 interface{}) *Instruction {
 23797      p := self.alloc("PREFETCH", 1, Operands { v0 })
 23798      // PREFETCH m8
 23799      if isM8(v0) {
 23800          self.require(ISA_PREFETCH)
 23801          p.domain = DomainGeneric
 23802          p.add(0, func(m *_Encoding, v []interface{}) {
 23803              m.rexo(0, addr(v[0]), false)
 23804              m.emit(0x0f)
 23805              m.emit(0x0d)
 23806              m.mrsd(0, addr(v[0]), 1)
 23807          })
 23808      }
 23809      if p.len == 0 {
 23810          panic("invalid operands for PREFETCH")
 23811      }
 23812      return p
 23813  }
 23814  
 23815  // PREFETCHNTA performs "Prefetch Data Into Caches using NTA Hint".
 23816  //
 23817  // Mnemonic        : PREFETCHNTA
 23818  // Supported forms : (1 form)
 23819  //
 23820  //    * PREFETCHNTA m8    [MMX+]
 23821  //
 23822  func (self *Program) PREFETCHNTA(v0 interface{}) *Instruction {
 23823      p := self.alloc("PREFETCHNTA", 1, Operands { v0 })
 23824      // PREFETCHNTA m8
 23825      if isM8(v0) {
 23826          self.require(ISA_MMX_PLUS)
 23827          p.domain = DomainGeneric
 23828          p.add(0, func(m *_Encoding, v []interface{}) {
 23829              m.rexo(0, addr(v[0]), false)
 23830              m.emit(0x0f)
 23831              m.emit(0x18)
 23832              m.mrsd(0, addr(v[0]), 1)
 23833          })
 23834      }
 23835      if p.len == 0 {
 23836          panic("invalid operands for PREFETCHNTA")
 23837      }
 23838      return p
 23839  }
 23840  
 23841  // PREFETCHT0 performs "Prefetch Data Into Caches using T0 Hint".
 23842  //
 23843  // Mnemonic        : PREFETCHT0
 23844  // Supported forms : (1 form)
 23845  //
 23846  //    * PREFETCHT0 m8    [MMX+]
 23847  //
 23848  func (self *Program) PREFETCHT0(v0 interface{}) *Instruction {
 23849      p := self.alloc("PREFETCHT0", 1, Operands { v0 })
 23850      // PREFETCHT0 m8
 23851      if isM8(v0) {
 23852          self.require(ISA_MMX_PLUS)
 23853          p.domain = DomainGeneric
 23854          p.add(0, func(m *_Encoding, v []interface{}) {
 23855              m.rexo(0, addr(v[0]), false)
 23856              m.emit(0x0f)
 23857              m.emit(0x18)
 23858              m.mrsd(1, addr(v[0]), 1)
 23859          })
 23860      }
 23861      if p.len == 0 {
 23862          panic("invalid operands for PREFETCHT0")
 23863      }
 23864      return p
 23865  }
 23866  
 23867  // PREFETCHT1 performs "Prefetch Data Into Caches using T1 Hint".
 23868  //
 23869  // Mnemonic        : PREFETCHT1
 23870  // Supported forms : (1 form)
 23871  //
 23872  //    * PREFETCHT1 m8    [MMX+]
 23873  //
 23874  func (self *Program) PREFETCHT1(v0 interface{}) *Instruction {
 23875      p := self.alloc("PREFETCHT1", 1, Operands { v0 })
 23876      // PREFETCHT1 m8
 23877      if isM8(v0) {
 23878          self.require(ISA_MMX_PLUS)
 23879          p.domain = DomainGeneric
 23880          p.add(0, func(m *_Encoding, v []interface{}) {
 23881              m.rexo(0, addr(v[0]), false)
 23882              m.emit(0x0f)
 23883              m.emit(0x18)
 23884              m.mrsd(2, addr(v[0]), 1)
 23885          })
 23886      }
 23887      if p.len == 0 {
 23888          panic("invalid operands for PREFETCHT1")
 23889      }
 23890      return p
 23891  }
 23892  
 23893  // PREFETCHT2 performs "Prefetch Data Into Caches using T2 Hint".
 23894  //
 23895  // Mnemonic        : PREFETCHT2
 23896  // Supported forms : (1 form)
 23897  //
 23898  //    * PREFETCHT2 m8    [MMX+]
 23899  //
 23900  func (self *Program) PREFETCHT2(v0 interface{}) *Instruction {
 23901      p := self.alloc("PREFETCHT2", 1, Operands { v0 })
 23902      // PREFETCHT2 m8
 23903      if isM8(v0) {
 23904          self.require(ISA_MMX_PLUS)
 23905          p.domain = DomainGeneric
 23906          p.add(0, func(m *_Encoding, v []interface{}) {
 23907              m.rexo(0, addr(v[0]), false)
 23908              m.emit(0x0f)
 23909              m.emit(0x18)
 23910              m.mrsd(3, addr(v[0]), 1)
 23911          })
 23912      }
 23913      if p.len == 0 {
 23914          panic("invalid operands for PREFETCHT2")
 23915      }
 23916      return p
 23917  }
 23918  
 23919  // PREFETCHW performs "Prefetch Data into Caches in Anticipation of a Write".
 23920  //
 23921  // Mnemonic        : PREFETCHW
 23922  // Supported forms : (1 form)
 23923  //
 23924  //    * PREFETCHW m8    [PREFETCHW]
 23925  //
 23926  func (self *Program) PREFETCHW(v0 interface{}) *Instruction {
 23927      p := self.alloc("PREFETCHW", 1, Operands { v0 })
 23928      // PREFETCHW m8
 23929      if isM8(v0) {
 23930          self.require(ISA_PREFETCHW)
 23931          p.domain = DomainGeneric
 23932          p.add(0, func(m *_Encoding, v []interface{}) {
 23933              m.rexo(0, addr(v[0]), false)
 23934              m.emit(0x0f)
 23935              m.emit(0x0d)
 23936              m.mrsd(1, addr(v[0]), 1)
 23937          })
 23938      }
 23939      if p.len == 0 {
 23940          panic("invalid operands for PREFETCHW")
 23941      }
 23942      return p
 23943  }
 23944  
 23945  // PREFETCHWT1 performs "Prefetch Vector Data Into Caches with Intent to Write and T1 Hint".
 23946  //
 23947  // Mnemonic        : PREFETCHWT1
 23948  // Supported forms : (1 form)
 23949  //
 23950  //    * PREFETCHWT1 m8    [PREFETCHWT1]
 23951  //
 23952  func (self *Program) PREFETCHWT1(v0 interface{}) *Instruction {
 23953      p := self.alloc("PREFETCHWT1", 1, Operands { v0 })
 23954      // PREFETCHWT1 m8
 23955      if isM8(v0) {
 23956          self.require(ISA_PREFETCHWT1)
 23957          p.domain = DomainGeneric
 23958          p.add(0, func(m *_Encoding, v []interface{}) {
 23959              m.rexo(0, addr(v[0]), false)
 23960              m.emit(0x0f)
 23961              m.emit(0x0d)
 23962              m.mrsd(2, addr(v[0]), 1)
 23963          })
 23964      }
 23965      if p.len == 0 {
 23966          panic("invalid operands for PREFETCHWT1")
 23967      }
 23968      return p
 23969  }
 23970  
 23971  // PSADBW performs "Compute Sum of Absolute Differences".
 23972  //
 23973  // Mnemonic        : PSADBW
 23974  // Supported forms : (4 forms)
 23975  //
 23976  //    * PSADBW mm, mm       [MMX+]
 23977  //    * PSADBW m64, mm      [MMX+]
 23978  //    * PSADBW xmm, xmm     [SSE2]
 23979  //    * PSADBW m128, xmm    [SSE2]
 23980  //
 23981  func (self *Program) PSADBW(v0 interface{}, v1 interface{}) *Instruction {
 23982      p := self.alloc("PSADBW", 2, Operands { v0, v1 })
 23983      // PSADBW mm, mm
 23984      if isMM(v0) && isMM(v1) {
 23985          self.require(ISA_MMX_PLUS)
 23986          p.domain = DomainMMXSSE
 23987          p.add(0, func(m *_Encoding, v []interface{}) {
 23988              m.rexo(hcode(v[1]), v[0], false)
 23989              m.emit(0x0f)
 23990              m.emit(0xf6)
 23991              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23992          })
 23993      }
 23994      // PSADBW m64, mm
 23995      if isM64(v0) && isMM(v1) {
 23996          self.require(ISA_MMX_PLUS)
 23997          p.domain = DomainMMXSSE
 23998          p.add(0, func(m *_Encoding, v []interface{}) {
 23999              m.rexo(hcode(v[1]), addr(v[0]), false)
 24000              m.emit(0x0f)
 24001              m.emit(0xf6)
 24002              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24003          })
 24004      }
 24005      // PSADBW xmm, xmm
 24006      if isXMM(v0) && isXMM(v1) {
 24007          self.require(ISA_SSE2)
 24008          p.domain = DomainMMXSSE
 24009          p.add(0, func(m *_Encoding, v []interface{}) {
 24010              m.emit(0x66)
 24011              m.rexo(hcode(v[1]), v[0], false)
 24012              m.emit(0x0f)
 24013              m.emit(0xf6)
 24014              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24015          })
 24016      }
 24017      // PSADBW m128, xmm
 24018      if isM128(v0) && isXMM(v1) {
 24019          self.require(ISA_SSE2)
 24020          p.domain = DomainMMXSSE
 24021          p.add(0, func(m *_Encoding, v []interface{}) {
 24022              m.emit(0x66)
 24023              m.rexo(hcode(v[1]), addr(v[0]), false)
 24024              m.emit(0x0f)
 24025              m.emit(0xf6)
 24026              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24027          })
 24028      }
 24029      if p.len == 0 {
 24030          panic("invalid operands for PSADBW")
 24031      }
 24032      return p
 24033  }
 24034  
 24035  // PSHUFB performs "Packed Shuffle Bytes".
 24036  //
 24037  // Mnemonic        : PSHUFB
 24038  // Supported forms : (4 forms)
 24039  //
 24040  //    * PSHUFB mm, mm       [SSSE3]
 24041  //    * PSHUFB m64, mm      [SSSE3]
 24042  //    * PSHUFB xmm, xmm     [SSSE3]
 24043  //    * PSHUFB m128, xmm    [SSSE3]
 24044  //
 24045  func (self *Program) PSHUFB(v0 interface{}, v1 interface{}) *Instruction {
 24046      p := self.alloc("PSHUFB", 2, Operands { v0, v1 })
 24047      // PSHUFB mm, mm
 24048      if isMM(v0) && isMM(v1) {
 24049          self.require(ISA_SSSE3)
 24050          p.domain = DomainMMXSSE
 24051          p.add(0, func(m *_Encoding, v []interface{}) {
 24052              m.rexo(hcode(v[1]), v[0], false)
 24053              m.emit(0x0f)
 24054              m.emit(0x38)
 24055              m.emit(0x00)
 24056              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24057          })
 24058      }
 24059      // PSHUFB m64, mm
 24060      if isM64(v0) && isMM(v1) {
 24061          self.require(ISA_SSSE3)
 24062          p.domain = DomainMMXSSE
 24063          p.add(0, func(m *_Encoding, v []interface{}) {
 24064              m.rexo(hcode(v[1]), addr(v[0]), false)
 24065              m.emit(0x0f)
 24066              m.emit(0x38)
 24067              m.emit(0x00)
 24068              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24069          })
 24070      }
 24071      // PSHUFB xmm, xmm
 24072      if isXMM(v0) && isXMM(v1) {
 24073          self.require(ISA_SSSE3)
 24074          p.domain = DomainMMXSSE
 24075          p.add(0, func(m *_Encoding, v []interface{}) {
 24076              m.emit(0x66)
 24077              m.rexo(hcode(v[1]), v[0], false)
 24078              m.emit(0x0f)
 24079              m.emit(0x38)
 24080              m.emit(0x00)
 24081              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24082          })
 24083      }
 24084      // PSHUFB m128, xmm
 24085      if isM128(v0) && isXMM(v1) {
 24086          self.require(ISA_SSSE3)
 24087          p.domain = DomainMMXSSE
 24088          p.add(0, func(m *_Encoding, v []interface{}) {
 24089              m.emit(0x66)
 24090              m.rexo(hcode(v[1]), addr(v[0]), false)
 24091              m.emit(0x0f)
 24092              m.emit(0x38)
 24093              m.emit(0x00)
 24094              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24095          })
 24096      }
 24097      if p.len == 0 {
 24098          panic("invalid operands for PSHUFB")
 24099      }
 24100      return p
 24101  }
 24102  
 24103  // PSHUFD performs "Shuffle Packed Doublewords".
 24104  //
 24105  // Mnemonic        : PSHUFD
 24106  // Supported forms : (2 forms)
 24107  //
 24108  //    * PSHUFD imm8, xmm, xmm     [SSE2]
 24109  //    * PSHUFD imm8, m128, xmm    [SSE2]
 24110  //
 24111  func (self *Program) PSHUFD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 24112      p := self.alloc("PSHUFD", 3, Operands { v0, v1, v2 })
 24113      // PSHUFD imm8, xmm, xmm
 24114      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 24115          self.require(ISA_SSE2)
 24116          p.domain = DomainMMXSSE
 24117          p.add(0, func(m *_Encoding, v []interface{}) {
 24118              m.emit(0x66)
 24119              m.rexo(hcode(v[2]), v[1], false)
 24120              m.emit(0x0f)
 24121              m.emit(0x70)
 24122              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 24123              m.imm1(toImmAny(v[0]))
 24124          })
 24125      }
 24126      // PSHUFD imm8, m128, xmm
 24127      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 24128          self.require(ISA_SSE2)
 24129          p.domain = DomainMMXSSE
 24130          p.add(0, func(m *_Encoding, v []interface{}) {
 24131              m.emit(0x66)
 24132              m.rexo(hcode(v[2]), addr(v[1]), false)
 24133              m.emit(0x0f)
 24134              m.emit(0x70)
 24135              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 24136              m.imm1(toImmAny(v[0]))
 24137          })
 24138      }
 24139      if p.len == 0 {
 24140          panic("invalid operands for PSHUFD")
 24141      }
 24142      return p
 24143  }
 24144  
 24145  // PSHUFHW performs "Shuffle Packed High Words".
 24146  //
 24147  // Mnemonic        : PSHUFHW
 24148  // Supported forms : (2 forms)
 24149  //
 24150  //    * PSHUFHW imm8, xmm, xmm     [SSE2]
 24151  //    * PSHUFHW imm8, m128, xmm    [SSE2]
 24152  //
 24153  func (self *Program) PSHUFHW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 24154      p := self.alloc("PSHUFHW", 3, Operands { v0, v1, v2 })
 24155      // PSHUFHW imm8, xmm, xmm
 24156      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 24157          self.require(ISA_SSE2)
 24158          p.domain = DomainMMXSSE
 24159          p.add(0, func(m *_Encoding, v []interface{}) {
 24160              m.emit(0xf3)
 24161              m.rexo(hcode(v[2]), v[1], false)
 24162              m.emit(0x0f)
 24163              m.emit(0x70)
 24164              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 24165              m.imm1(toImmAny(v[0]))
 24166          })
 24167      }
 24168      // PSHUFHW imm8, m128, xmm
 24169      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 24170          self.require(ISA_SSE2)
 24171          p.domain = DomainMMXSSE
 24172          p.add(0, func(m *_Encoding, v []interface{}) {
 24173              m.emit(0xf3)
 24174              m.rexo(hcode(v[2]), addr(v[1]), false)
 24175              m.emit(0x0f)
 24176              m.emit(0x70)
 24177              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 24178              m.imm1(toImmAny(v[0]))
 24179          })
 24180      }
 24181      if p.len == 0 {
 24182          panic("invalid operands for PSHUFHW")
 24183      }
 24184      return p
 24185  }
 24186  
 24187  // PSHUFLW performs "Shuffle Packed Low Words".
 24188  //
 24189  // Mnemonic        : PSHUFLW
 24190  // Supported forms : (2 forms)
 24191  //
 24192  //    * PSHUFLW imm8, xmm, xmm     [SSE2]
 24193  //    * PSHUFLW imm8, m128, xmm    [SSE2]
 24194  //
 24195  func (self *Program) PSHUFLW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 24196      p := self.alloc("PSHUFLW", 3, Operands { v0, v1, v2 })
 24197      // PSHUFLW imm8, xmm, xmm
 24198      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 24199          self.require(ISA_SSE2)
 24200          p.domain = DomainMMXSSE
 24201          p.add(0, func(m *_Encoding, v []interface{}) {
 24202              m.emit(0xf2)
 24203              m.rexo(hcode(v[2]), v[1], false)
 24204              m.emit(0x0f)
 24205              m.emit(0x70)
 24206              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 24207              m.imm1(toImmAny(v[0]))
 24208          })
 24209      }
 24210      // PSHUFLW imm8, m128, xmm
 24211      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 24212          self.require(ISA_SSE2)
 24213          p.domain = DomainMMXSSE
 24214          p.add(0, func(m *_Encoding, v []interface{}) {
 24215              m.emit(0xf2)
 24216              m.rexo(hcode(v[2]), addr(v[1]), false)
 24217              m.emit(0x0f)
 24218              m.emit(0x70)
 24219              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 24220              m.imm1(toImmAny(v[0]))
 24221          })
 24222      }
 24223      if p.len == 0 {
 24224          panic("invalid operands for PSHUFLW")
 24225      }
 24226      return p
 24227  }
 24228  
 24229  // PSHUFW performs "Shuffle Packed Words".
 24230  //
 24231  // Mnemonic        : PSHUFW
 24232  // Supported forms : (2 forms)
 24233  //
 24234  //    * PSHUFW imm8, mm, mm     [MMX+]
 24235  //    * PSHUFW imm8, m64, mm    [MMX+]
 24236  //
 24237  func (self *Program) PSHUFW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 24238      p := self.alloc("PSHUFW", 3, Operands { v0, v1, v2 })
 24239      // PSHUFW imm8, mm, mm
 24240      if isImm8(v0) && isMM(v1) && isMM(v2) {
 24241          self.require(ISA_MMX_PLUS)
 24242          p.domain = DomainMMXSSE
 24243          p.add(0, func(m *_Encoding, v []interface{}) {
 24244              m.rexo(hcode(v[2]), v[1], false)
 24245              m.emit(0x0f)
 24246              m.emit(0x70)
 24247              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 24248              m.imm1(toImmAny(v[0]))
 24249          })
 24250      }
 24251      // PSHUFW imm8, m64, mm
 24252      if isImm8(v0) && isM64(v1) && isMM(v2) {
 24253          self.require(ISA_MMX_PLUS)
 24254          p.domain = DomainMMXSSE
 24255          p.add(0, func(m *_Encoding, v []interface{}) {
 24256              m.rexo(hcode(v[2]), addr(v[1]), false)
 24257              m.emit(0x0f)
 24258              m.emit(0x70)
 24259              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 24260              m.imm1(toImmAny(v[0]))
 24261          })
 24262      }
 24263      if p.len == 0 {
 24264          panic("invalid operands for PSHUFW")
 24265      }
 24266      return p
 24267  }
 24268  
 24269  // PSIGNB performs "Packed Sign of Byte Integers".
 24270  //
 24271  // Mnemonic        : PSIGNB
 24272  // Supported forms : (4 forms)
 24273  //
 24274  //    * PSIGNB mm, mm       [SSSE3]
 24275  //    * PSIGNB m64, mm      [SSSE3]
 24276  //    * PSIGNB xmm, xmm     [SSSE3]
 24277  //    * PSIGNB m128, xmm    [SSSE3]
 24278  //
 24279  func (self *Program) PSIGNB(v0 interface{}, v1 interface{}) *Instruction {
 24280      p := self.alloc("PSIGNB", 2, Operands { v0, v1 })
 24281      // PSIGNB mm, mm
 24282      if isMM(v0) && isMM(v1) {
 24283          self.require(ISA_SSSE3)
 24284          p.domain = DomainMMXSSE
 24285          p.add(0, func(m *_Encoding, v []interface{}) {
 24286              m.rexo(hcode(v[1]), v[0], false)
 24287              m.emit(0x0f)
 24288              m.emit(0x38)
 24289              m.emit(0x08)
 24290              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24291          })
 24292      }
 24293      // PSIGNB m64, mm
 24294      if isM64(v0) && isMM(v1) {
 24295          self.require(ISA_SSSE3)
 24296          p.domain = DomainMMXSSE
 24297          p.add(0, func(m *_Encoding, v []interface{}) {
 24298              m.rexo(hcode(v[1]), addr(v[0]), false)
 24299              m.emit(0x0f)
 24300              m.emit(0x38)
 24301              m.emit(0x08)
 24302              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24303          })
 24304      }
 24305      // PSIGNB xmm, xmm
 24306      if isXMM(v0) && isXMM(v1) {
 24307          self.require(ISA_SSSE3)
 24308          p.domain = DomainMMXSSE
 24309          p.add(0, func(m *_Encoding, v []interface{}) {
 24310              m.emit(0x66)
 24311              m.rexo(hcode(v[1]), v[0], false)
 24312              m.emit(0x0f)
 24313              m.emit(0x38)
 24314              m.emit(0x08)
 24315              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24316          })
 24317      }
 24318      // PSIGNB m128, xmm
 24319      if isM128(v0) && isXMM(v1) {
 24320          self.require(ISA_SSSE3)
 24321          p.domain = DomainMMXSSE
 24322          p.add(0, func(m *_Encoding, v []interface{}) {
 24323              m.emit(0x66)
 24324              m.rexo(hcode(v[1]), addr(v[0]), false)
 24325              m.emit(0x0f)
 24326              m.emit(0x38)
 24327              m.emit(0x08)
 24328              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24329          })
 24330      }
 24331      if p.len == 0 {
 24332          panic("invalid operands for PSIGNB")
 24333      }
 24334      return p
 24335  }
 24336  
 24337  // PSIGND performs "Packed Sign of Doubleword Integers".
 24338  //
 24339  // Mnemonic        : PSIGND
 24340  // Supported forms : (4 forms)
 24341  //
 24342  //    * PSIGND mm, mm       [SSSE3]
 24343  //    * PSIGND m64, mm      [SSSE3]
 24344  //    * PSIGND xmm, xmm     [SSSE3]
 24345  //    * PSIGND m128, xmm    [SSSE3]
 24346  //
 24347  func (self *Program) PSIGND(v0 interface{}, v1 interface{}) *Instruction {
 24348      p := self.alloc("PSIGND", 2, Operands { v0, v1 })
 24349      // PSIGND mm, mm
 24350      if isMM(v0) && isMM(v1) {
 24351          self.require(ISA_SSSE3)
 24352          p.domain = DomainMMXSSE
 24353          p.add(0, func(m *_Encoding, v []interface{}) {
 24354              m.rexo(hcode(v[1]), v[0], false)
 24355              m.emit(0x0f)
 24356              m.emit(0x38)
 24357              m.emit(0x0a)
 24358              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24359          })
 24360      }
 24361      // PSIGND m64, mm
 24362      if isM64(v0) && isMM(v1) {
 24363          self.require(ISA_SSSE3)
 24364          p.domain = DomainMMXSSE
 24365          p.add(0, func(m *_Encoding, v []interface{}) {
 24366              m.rexo(hcode(v[1]), addr(v[0]), false)
 24367              m.emit(0x0f)
 24368              m.emit(0x38)
 24369              m.emit(0x0a)
 24370              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24371          })
 24372      }
 24373      // PSIGND xmm, xmm
 24374      if isXMM(v0) && isXMM(v1) {
 24375          self.require(ISA_SSSE3)
 24376          p.domain = DomainMMXSSE
 24377          p.add(0, func(m *_Encoding, v []interface{}) {
 24378              m.emit(0x66)
 24379              m.rexo(hcode(v[1]), v[0], false)
 24380              m.emit(0x0f)
 24381              m.emit(0x38)
 24382              m.emit(0x0a)
 24383              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24384          })
 24385      }
 24386      // PSIGND m128, xmm
 24387      if isM128(v0) && isXMM(v1) {
 24388          self.require(ISA_SSSE3)
 24389          p.domain = DomainMMXSSE
 24390          p.add(0, func(m *_Encoding, v []interface{}) {
 24391              m.emit(0x66)
 24392              m.rexo(hcode(v[1]), addr(v[0]), false)
 24393              m.emit(0x0f)
 24394              m.emit(0x38)
 24395              m.emit(0x0a)
 24396              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24397          })
 24398      }
 24399      if p.len == 0 {
 24400          panic("invalid operands for PSIGND")
 24401      }
 24402      return p
 24403  }
 24404  
 24405  // PSIGNW performs "Packed Sign of Word Integers".
 24406  //
 24407  // Mnemonic        : PSIGNW
 24408  // Supported forms : (4 forms)
 24409  //
 24410  //    * PSIGNW mm, mm       [SSSE3]
 24411  //    * PSIGNW m64, mm      [SSSE3]
 24412  //    * PSIGNW xmm, xmm     [SSSE3]
 24413  //    * PSIGNW m128, xmm    [SSSE3]
 24414  //
 24415  func (self *Program) PSIGNW(v0 interface{}, v1 interface{}) *Instruction {
 24416      p := self.alloc("PSIGNW", 2, Operands { v0, v1 })
 24417      // PSIGNW mm, mm
 24418      if isMM(v0) && isMM(v1) {
 24419          self.require(ISA_SSSE3)
 24420          p.domain = DomainMMXSSE
 24421          p.add(0, func(m *_Encoding, v []interface{}) {
 24422              m.rexo(hcode(v[1]), v[0], false)
 24423              m.emit(0x0f)
 24424              m.emit(0x38)
 24425              m.emit(0x09)
 24426              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24427          })
 24428      }
 24429      // PSIGNW m64, mm
 24430      if isM64(v0) && isMM(v1) {
 24431          self.require(ISA_SSSE3)
 24432          p.domain = DomainMMXSSE
 24433          p.add(0, func(m *_Encoding, v []interface{}) {
 24434              m.rexo(hcode(v[1]), addr(v[0]), false)
 24435              m.emit(0x0f)
 24436              m.emit(0x38)
 24437              m.emit(0x09)
 24438              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24439          })
 24440      }
 24441      // PSIGNW xmm, xmm
 24442      if isXMM(v0) && isXMM(v1) {
 24443          self.require(ISA_SSSE3)
 24444          p.domain = DomainMMXSSE
 24445          p.add(0, func(m *_Encoding, v []interface{}) {
 24446              m.emit(0x66)
 24447              m.rexo(hcode(v[1]), v[0], false)
 24448              m.emit(0x0f)
 24449              m.emit(0x38)
 24450              m.emit(0x09)
 24451              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24452          })
 24453      }
 24454      // PSIGNW m128, xmm
 24455      if isM128(v0) && isXMM(v1) {
 24456          self.require(ISA_SSSE3)
 24457          p.domain = DomainMMXSSE
 24458          p.add(0, func(m *_Encoding, v []interface{}) {
 24459              m.emit(0x66)
 24460              m.rexo(hcode(v[1]), addr(v[0]), false)
 24461              m.emit(0x0f)
 24462              m.emit(0x38)
 24463              m.emit(0x09)
 24464              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24465          })
 24466      }
 24467      if p.len == 0 {
 24468          panic("invalid operands for PSIGNW")
 24469      }
 24470      return p
 24471  }
 24472  
 24473  // PSLLD performs "Shift Packed Doubleword Data Left Logical".
 24474  //
 24475  // Mnemonic        : PSLLD
 24476  // Supported forms : (6 forms)
 24477  //
 24478  //    * PSLLD imm8, mm     [MMX]
 24479  //    * PSLLD mm, mm       [MMX]
 24480  //    * PSLLD m64, mm      [MMX]
 24481  //    * PSLLD imm8, xmm    [SSE2]
 24482  //    * PSLLD xmm, xmm     [SSE2]
 24483  //    * PSLLD m128, xmm    [SSE2]
 24484  //
 24485  func (self *Program) PSLLD(v0 interface{}, v1 interface{}) *Instruction {
 24486      p := self.alloc("PSLLD", 2, Operands { v0, v1 })
 24487      // PSLLD imm8, mm
 24488      if isImm8(v0) && isMM(v1) {
 24489          self.require(ISA_MMX)
 24490          p.domain = DomainMMXSSE
 24491          p.add(0, func(m *_Encoding, v []interface{}) {
 24492              m.rexo(0, v[1], false)
 24493              m.emit(0x0f)
 24494              m.emit(0x72)
 24495              m.emit(0xf0 | lcode(v[1]))
 24496              m.imm1(toImmAny(v[0]))
 24497          })
 24498      }
 24499      // PSLLD mm, mm
 24500      if isMM(v0) && isMM(v1) {
 24501          self.require(ISA_MMX)
 24502          p.domain = DomainMMXSSE
 24503          p.add(0, func(m *_Encoding, v []interface{}) {
 24504              m.rexo(hcode(v[1]), v[0], false)
 24505              m.emit(0x0f)
 24506              m.emit(0xf2)
 24507              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24508          })
 24509      }
 24510      // PSLLD m64, mm
 24511      if isM64(v0) && isMM(v1) {
 24512          self.require(ISA_MMX)
 24513          p.domain = DomainMMXSSE
 24514          p.add(0, func(m *_Encoding, v []interface{}) {
 24515              m.rexo(hcode(v[1]), addr(v[0]), false)
 24516              m.emit(0x0f)
 24517              m.emit(0xf2)
 24518              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24519          })
 24520      }
 24521      // PSLLD imm8, xmm
 24522      if isImm8(v0) && isXMM(v1) {
 24523          self.require(ISA_SSE2)
 24524          p.domain = DomainMMXSSE
 24525          p.add(0, func(m *_Encoding, v []interface{}) {
 24526              m.emit(0x66)
 24527              m.rexo(0, v[1], false)
 24528              m.emit(0x0f)
 24529              m.emit(0x72)
 24530              m.emit(0xf0 | lcode(v[1]))
 24531              m.imm1(toImmAny(v[0]))
 24532          })
 24533      }
 24534      // PSLLD xmm, xmm
 24535      if isXMM(v0) && isXMM(v1) {
 24536          self.require(ISA_SSE2)
 24537          p.domain = DomainMMXSSE
 24538          p.add(0, func(m *_Encoding, v []interface{}) {
 24539              m.emit(0x66)
 24540              m.rexo(hcode(v[1]), v[0], false)
 24541              m.emit(0x0f)
 24542              m.emit(0xf2)
 24543              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24544          })
 24545      }
 24546      // PSLLD m128, xmm
 24547      if isM128(v0) && isXMM(v1) {
 24548          self.require(ISA_SSE2)
 24549          p.domain = DomainMMXSSE
 24550          p.add(0, func(m *_Encoding, v []interface{}) {
 24551              m.emit(0x66)
 24552              m.rexo(hcode(v[1]), addr(v[0]), false)
 24553              m.emit(0x0f)
 24554              m.emit(0xf2)
 24555              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24556          })
 24557      }
 24558      if p.len == 0 {
 24559          panic("invalid operands for PSLLD")
 24560      }
 24561      return p
 24562  }
 24563  
 24564  // PSLLDQ performs "Shift Packed Double Quadword Left Logical".
 24565  //
 24566  // Mnemonic        : PSLLDQ
 24567  // Supported forms : (1 form)
 24568  //
 24569  //    * PSLLDQ imm8, xmm    [SSE2]
 24570  //
 24571  func (self *Program) PSLLDQ(v0 interface{}, v1 interface{}) *Instruction {
 24572      p := self.alloc("PSLLDQ", 2, Operands { v0, v1 })
 24573      // PSLLDQ imm8, xmm
 24574      if isImm8(v0) && isXMM(v1) {
 24575          self.require(ISA_SSE2)
 24576          p.domain = DomainMMXSSE
 24577          p.add(0, func(m *_Encoding, v []interface{}) {
 24578              m.emit(0x66)
 24579              m.rexo(0, v[1], false)
 24580              m.emit(0x0f)
 24581              m.emit(0x73)
 24582              m.emit(0xf8 | lcode(v[1]))
 24583              m.imm1(toImmAny(v[0]))
 24584          })
 24585      }
 24586      if p.len == 0 {
 24587          panic("invalid operands for PSLLDQ")
 24588      }
 24589      return p
 24590  }
 24591  
 24592  // PSLLQ performs "Shift Packed Quadword Data Left Logical".
 24593  //
 24594  // Mnemonic        : PSLLQ
 24595  // Supported forms : (6 forms)
 24596  //
 24597  //    * PSLLQ imm8, mm     [MMX]
 24598  //    * PSLLQ mm, mm       [MMX]
 24599  //    * PSLLQ m64, mm      [MMX]
 24600  //    * PSLLQ imm8, xmm    [SSE2]
 24601  //    * PSLLQ xmm, xmm     [SSE2]
 24602  //    * PSLLQ m128, xmm    [SSE2]
 24603  //
 24604  func (self *Program) PSLLQ(v0 interface{}, v1 interface{}) *Instruction {
 24605      p := self.alloc("PSLLQ", 2, Operands { v0, v1 })
 24606      // PSLLQ imm8, mm
 24607      if isImm8(v0) && isMM(v1) {
 24608          self.require(ISA_MMX)
 24609          p.domain = DomainMMXSSE
 24610          p.add(0, func(m *_Encoding, v []interface{}) {
 24611              m.rexo(0, v[1], false)
 24612              m.emit(0x0f)
 24613              m.emit(0x73)
 24614              m.emit(0xf0 | lcode(v[1]))
 24615              m.imm1(toImmAny(v[0]))
 24616          })
 24617      }
 24618      // PSLLQ mm, mm
 24619      if isMM(v0) && isMM(v1) {
 24620          self.require(ISA_MMX)
 24621          p.domain = DomainMMXSSE
 24622          p.add(0, func(m *_Encoding, v []interface{}) {
 24623              m.rexo(hcode(v[1]), v[0], false)
 24624              m.emit(0x0f)
 24625              m.emit(0xf3)
 24626              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24627          })
 24628      }
 24629      // PSLLQ m64, mm
 24630      if isM64(v0) && isMM(v1) {
 24631          self.require(ISA_MMX)
 24632          p.domain = DomainMMXSSE
 24633          p.add(0, func(m *_Encoding, v []interface{}) {
 24634              m.rexo(hcode(v[1]), addr(v[0]), false)
 24635              m.emit(0x0f)
 24636              m.emit(0xf3)
 24637              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24638          })
 24639      }
 24640      // PSLLQ imm8, xmm
 24641      if isImm8(v0) && isXMM(v1) {
 24642          self.require(ISA_SSE2)
 24643          p.domain = DomainMMXSSE
 24644          p.add(0, func(m *_Encoding, v []interface{}) {
 24645              m.emit(0x66)
 24646              m.rexo(0, v[1], false)
 24647              m.emit(0x0f)
 24648              m.emit(0x73)
 24649              m.emit(0xf0 | lcode(v[1]))
 24650              m.imm1(toImmAny(v[0]))
 24651          })
 24652      }
 24653      // PSLLQ xmm, xmm
 24654      if isXMM(v0) && isXMM(v1) {
 24655          self.require(ISA_SSE2)
 24656          p.domain = DomainMMXSSE
 24657          p.add(0, func(m *_Encoding, v []interface{}) {
 24658              m.emit(0x66)
 24659              m.rexo(hcode(v[1]), v[0], false)
 24660              m.emit(0x0f)
 24661              m.emit(0xf3)
 24662              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24663          })
 24664      }
 24665      // PSLLQ m128, xmm
 24666      if isM128(v0) && isXMM(v1) {
 24667          self.require(ISA_SSE2)
 24668          p.domain = DomainMMXSSE
 24669          p.add(0, func(m *_Encoding, v []interface{}) {
 24670              m.emit(0x66)
 24671              m.rexo(hcode(v[1]), addr(v[0]), false)
 24672              m.emit(0x0f)
 24673              m.emit(0xf3)
 24674              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24675          })
 24676      }
 24677      if p.len == 0 {
 24678          panic("invalid operands for PSLLQ")
 24679      }
 24680      return p
 24681  }
 24682  
 24683  // PSLLW performs "Shift Packed Word Data Left Logical".
 24684  //
 24685  // Mnemonic        : PSLLW
 24686  // Supported forms : (6 forms)
 24687  //
 24688  //    * PSLLW imm8, mm     [MMX]
 24689  //    * PSLLW mm, mm       [MMX]
 24690  //    * PSLLW m64, mm      [MMX]
 24691  //    * PSLLW imm8, xmm    [SSE2]
 24692  //    * PSLLW xmm, xmm     [SSE2]
 24693  //    * PSLLW m128, xmm    [SSE2]
 24694  //
 24695  func (self *Program) PSLLW(v0 interface{}, v1 interface{}) *Instruction {
 24696      p := self.alloc("PSLLW", 2, Operands { v0, v1 })
 24697      // PSLLW imm8, mm
 24698      if isImm8(v0) && isMM(v1) {
 24699          self.require(ISA_MMX)
 24700          p.domain = DomainMMXSSE
 24701          p.add(0, func(m *_Encoding, v []interface{}) {
 24702              m.rexo(0, v[1], false)
 24703              m.emit(0x0f)
 24704              m.emit(0x71)
 24705              m.emit(0xf0 | lcode(v[1]))
 24706              m.imm1(toImmAny(v[0]))
 24707          })
 24708      }
 24709      // PSLLW mm, mm
 24710      if isMM(v0) && isMM(v1) {
 24711          self.require(ISA_MMX)
 24712          p.domain = DomainMMXSSE
 24713          p.add(0, func(m *_Encoding, v []interface{}) {
 24714              m.rexo(hcode(v[1]), v[0], false)
 24715              m.emit(0x0f)
 24716              m.emit(0xf1)
 24717              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24718          })
 24719      }
 24720      // PSLLW m64, mm
 24721      if isM64(v0) && isMM(v1) {
 24722          self.require(ISA_MMX)
 24723          p.domain = DomainMMXSSE
 24724          p.add(0, func(m *_Encoding, v []interface{}) {
 24725              m.rexo(hcode(v[1]), addr(v[0]), false)
 24726              m.emit(0x0f)
 24727              m.emit(0xf1)
 24728              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24729          })
 24730      }
 24731      // PSLLW imm8, xmm
 24732      if isImm8(v0) && isXMM(v1) {
 24733          self.require(ISA_SSE2)
 24734          p.domain = DomainMMXSSE
 24735          p.add(0, func(m *_Encoding, v []interface{}) {
 24736              m.emit(0x66)
 24737              m.rexo(0, v[1], false)
 24738              m.emit(0x0f)
 24739              m.emit(0x71)
 24740              m.emit(0xf0 | lcode(v[1]))
 24741              m.imm1(toImmAny(v[0]))
 24742          })
 24743      }
 24744      // PSLLW xmm, xmm
 24745      if isXMM(v0) && isXMM(v1) {
 24746          self.require(ISA_SSE2)
 24747          p.domain = DomainMMXSSE
 24748          p.add(0, func(m *_Encoding, v []interface{}) {
 24749              m.emit(0x66)
 24750              m.rexo(hcode(v[1]), v[0], false)
 24751              m.emit(0x0f)
 24752              m.emit(0xf1)
 24753              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24754          })
 24755      }
 24756      // PSLLW m128, xmm
 24757      if isM128(v0) && isXMM(v1) {
 24758          self.require(ISA_SSE2)
 24759          p.domain = DomainMMXSSE
 24760          p.add(0, func(m *_Encoding, v []interface{}) {
 24761              m.emit(0x66)
 24762              m.rexo(hcode(v[1]), addr(v[0]), false)
 24763              m.emit(0x0f)
 24764              m.emit(0xf1)
 24765              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24766          })
 24767      }
 24768      if p.len == 0 {
 24769          panic("invalid operands for PSLLW")
 24770      }
 24771      return p
 24772  }
 24773  
 24774  // PSRAD performs "Shift Packed Doubleword Data Right Arithmetic".
 24775  //
 24776  // Mnemonic        : PSRAD
 24777  // Supported forms : (6 forms)
 24778  //
 24779  //    * PSRAD imm8, mm     [MMX]
 24780  //    * PSRAD mm, mm       [MMX]
 24781  //    * PSRAD m64, mm      [MMX]
 24782  //    * PSRAD imm8, xmm    [SSE2]
 24783  //    * PSRAD xmm, xmm     [SSE2]
 24784  //    * PSRAD m128, xmm    [SSE2]
 24785  //
 24786  func (self *Program) PSRAD(v0 interface{}, v1 interface{}) *Instruction {
 24787      p := self.alloc("PSRAD", 2, Operands { v0, v1 })
 24788      // PSRAD imm8, mm
 24789      if isImm8(v0) && isMM(v1) {
 24790          self.require(ISA_MMX)
 24791          p.domain = DomainMMXSSE
 24792          p.add(0, func(m *_Encoding, v []interface{}) {
 24793              m.rexo(0, v[1], false)
 24794              m.emit(0x0f)
 24795              m.emit(0x72)
 24796              m.emit(0xe0 | lcode(v[1]))
 24797              m.imm1(toImmAny(v[0]))
 24798          })
 24799      }
 24800      // PSRAD mm, mm
 24801      if isMM(v0) && isMM(v1) {
 24802          self.require(ISA_MMX)
 24803          p.domain = DomainMMXSSE
 24804          p.add(0, func(m *_Encoding, v []interface{}) {
 24805              m.rexo(hcode(v[1]), v[0], false)
 24806              m.emit(0x0f)
 24807              m.emit(0xe2)
 24808              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24809          })
 24810      }
 24811      // PSRAD m64, mm
 24812      if isM64(v0) && isMM(v1) {
 24813          self.require(ISA_MMX)
 24814          p.domain = DomainMMXSSE
 24815          p.add(0, func(m *_Encoding, v []interface{}) {
 24816              m.rexo(hcode(v[1]), addr(v[0]), false)
 24817              m.emit(0x0f)
 24818              m.emit(0xe2)
 24819              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24820          })
 24821      }
 24822      // PSRAD imm8, xmm
 24823      if isImm8(v0) && isXMM(v1) {
 24824          self.require(ISA_SSE2)
 24825          p.domain = DomainMMXSSE
 24826          p.add(0, func(m *_Encoding, v []interface{}) {
 24827              m.emit(0x66)
 24828              m.rexo(0, v[1], false)
 24829              m.emit(0x0f)
 24830              m.emit(0x72)
 24831              m.emit(0xe0 | lcode(v[1]))
 24832              m.imm1(toImmAny(v[0]))
 24833          })
 24834      }
 24835      // PSRAD xmm, xmm
 24836      if isXMM(v0) && isXMM(v1) {
 24837          self.require(ISA_SSE2)
 24838          p.domain = DomainMMXSSE
 24839          p.add(0, func(m *_Encoding, v []interface{}) {
 24840              m.emit(0x66)
 24841              m.rexo(hcode(v[1]), v[0], false)
 24842              m.emit(0x0f)
 24843              m.emit(0xe2)
 24844              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24845          })
 24846      }
 24847      // PSRAD m128, xmm
 24848      if isM128(v0) && isXMM(v1) {
 24849          self.require(ISA_SSE2)
 24850          p.domain = DomainMMXSSE
 24851          p.add(0, func(m *_Encoding, v []interface{}) {
 24852              m.emit(0x66)
 24853              m.rexo(hcode(v[1]), addr(v[0]), false)
 24854              m.emit(0x0f)
 24855              m.emit(0xe2)
 24856              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24857          })
 24858      }
 24859      if p.len == 0 {
 24860          panic("invalid operands for PSRAD")
 24861      }
 24862      return p
 24863  }
 24864  
 24865  // PSRAW performs "Shift Packed Word Data Right Arithmetic".
 24866  //
 24867  // Mnemonic        : PSRAW
 24868  // Supported forms : (6 forms)
 24869  //
 24870  //    * PSRAW imm8, mm     [MMX]
 24871  //    * PSRAW mm, mm       [MMX]
 24872  //    * PSRAW m64, mm      [MMX]
 24873  //    * PSRAW imm8, xmm    [SSE2]
 24874  //    * PSRAW xmm, xmm     [SSE2]
 24875  //    * PSRAW m128, xmm    [SSE2]
 24876  //
 24877  func (self *Program) PSRAW(v0 interface{}, v1 interface{}) *Instruction {
 24878      p := self.alloc("PSRAW", 2, Operands { v0, v1 })
 24879      // PSRAW imm8, mm
 24880      if isImm8(v0) && isMM(v1) {
 24881          self.require(ISA_MMX)
 24882          p.domain = DomainMMXSSE
 24883          p.add(0, func(m *_Encoding, v []interface{}) {
 24884              m.rexo(0, v[1], false)
 24885              m.emit(0x0f)
 24886              m.emit(0x71)
 24887              m.emit(0xe0 | lcode(v[1]))
 24888              m.imm1(toImmAny(v[0]))
 24889          })
 24890      }
 24891      // PSRAW mm, mm
 24892      if isMM(v0) && isMM(v1) {
 24893          self.require(ISA_MMX)
 24894          p.domain = DomainMMXSSE
 24895          p.add(0, func(m *_Encoding, v []interface{}) {
 24896              m.rexo(hcode(v[1]), v[0], false)
 24897              m.emit(0x0f)
 24898              m.emit(0xe1)
 24899              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24900          })
 24901      }
 24902      // PSRAW m64, mm
 24903      if isM64(v0) && isMM(v1) {
 24904          self.require(ISA_MMX)
 24905          p.domain = DomainMMXSSE
 24906          p.add(0, func(m *_Encoding, v []interface{}) {
 24907              m.rexo(hcode(v[1]), addr(v[0]), false)
 24908              m.emit(0x0f)
 24909              m.emit(0xe1)
 24910              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24911          })
 24912      }
 24913      // PSRAW imm8, xmm
 24914      if isImm8(v0) && isXMM(v1) {
 24915          self.require(ISA_SSE2)
 24916          p.domain = DomainMMXSSE
 24917          p.add(0, func(m *_Encoding, v []interface{}) {
 24918              m.emit(0x66)
 24919              m.rexo(0, v[1], false)
 24920              m.emit(0x0f)
 24921              m.emit(0x71)
 24922              m.emit(0xe0 | lcode(v[1]))
 24923              m.imm1(toImmAny(v[0]))
 24924          })
 24925      }
 24926      // PSRAW xmm, xmm
 24927      if isXMM(v0) && isXMM(v1) {
 24928          self.require(ISA_SSE2)
 24929          p.domain = DomainMMXSSE
 24930          p.add(0, func(m *_Encoding, v []interface{}) {
 24931              m.emit(0x66)
 24932              m.rexo(hcode(v[1]), v[0], false)
 24933              m.emit(0x0f)
 24934              m.emit(0xe1)
 24935              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24936          })
 24937      }
 24938      // PSRAW m128, xmm
 24939      if isM128(v0) && isXMM(v1) {
 24940          self.require(ISA_SSE2)
 24941          p.domain = DomainMMXSSE
 24942          p.add(0, func(m *_Encoding, v []interface{}) {
 24943              m.emit(0x66)
 24944              m.rexo(hcode(v[1]), addr(v[0]), false)
 24945              m.emit(0x0f)
 24946              m.emit(0xe1)
 24947              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24948          })
 24949      }
 24950      if p.len == 0 {
 24951          panic("invalid operands for PSRAW")
 24952      }
 24953      return p
 24954  }
 24955  
 24956  // PSRLD performs "Shift Packed Doubleword Data Right Logical".
 24957  //
 24958  // Mnemonic        : PSRLD
 24959  // Supported forms : (6 forms)
 24960  //
 24961  //    * PSRLD imm8, mm     [MMX]
 24962  //    * PSRLD mm, mm       [MMX]
 24963  //    * PSRLD m64, mm      [MMX]
 24964  //    * PSRLD imm8, xmm    [SSE2]
 24965  //    * PSRLD xmm, xmm     [SSE2]
 24966  //    * PSRLD m128, xmm    [SSE2]
 24967  //
 24968  func (self *Program) PSRLD(v0 interface{}, v1 interface{}) *Instruction {
 24969      p := self.alloc("PSRLD", 2, Operands { v0, v1 })
 24970      // PSRLD imm8, mm
 24971      if isImm8(v0) && isMM(v1) {
 24972          self.require(ISA_MMX)
 24973          p.domain = DomainMMXSSE
 24974          p.add(0, func(m *_Encoding, v []interface{}) {
 24975              m.rexo(0, v[1], false)
 24976              m.emit(0x0f)
 24977              m.emit(0x72)
 24978              m.emit(0xd0 | lcode(v[1]))
 24979              m.imm1(toImmAny(v[0]))
 24980          })
 24981      }
 24982      // PSRLD mm, mm
 24983      if isMM(v0) && isMM(v1) {
 24984          self.require(ISA_MMX)
 24985          p.domain = DomainMMXSSE
 24986          p.add(0, func(m *_Encoding, v []interface{}) {
 24987              m.rexo(hcode(v[1]), v[0], false)
 24988              m.emit(0x0f)
 24989              m.emit(0xd2)
 24990              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24991          })
 24992      }
 24993      // PSRLD m64, mm
 24994      if isM64(v0) && isMM(v1) {
 24995          self.require(ISA_MMX)
 24996          p.domain = DomainMMXSSE
 24997          p.add(0, func(m *_Encoding, v []interface{}) {
 24998              m.rexo(hcode(v[1]), addr(v[0]), false)
 24999              m.emit(0x0f)
 25000              m.emit(0xd2)
 25001              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25002          })
 25003      }
 25004      // PSRLD imm8, xmm
 25005      if isImm8(v0) && isXMM(v1) {
 25006          self.require(ISA_SSE2)
 25007          p.domain = DomainMMXSSE
 25008          p.add(0, func(m *_Encoding, v []interface{}) {
 25009              m.emit(0x66)
 25010              m.rexo(0, v[1], false)
 25011              m.emit(0x0f)
 25012              m.emit(0x72)
 25013              m.emit(0xd0 | lcode(v[1]))
 25014              m.imm1(toImmAny(v[0]))
 25015          })
 25016      }
 25017      // PSRLD xmm, xmm
 25018      if isXMM(v0) && isXMM(v1) {
 25019          self.require(ISA_SSE2)
 25020          p.domain = DomainMMXSSE
 25021          p.add(0, func(m *_Encoding, v []interface{}) {
 25022              m.emit(0x66)
 25023              m.rexo(hcode(v[1]), v[0], false)
 25024              m.emit(0x0f)
 25025              m.emit(0xd2)
 25026              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25027          })
 25028      }
 25029      // PSRLD m128, xmm
 25030      if isM128(v0) && isXMM(v1) {
 25031          self.require(ISA_SSE2)
 25032          p.domain = DomainMMXSSE
 25033          p.add(0, func(m *_Encoding, v []interface{}) {
 25034              m.emit(0x66)
 25035              m.rexo(hcode(v[1]), addr(v[0]), false)
 25036              m.emit(0x0f)
 25037              m.emit(0xd2)
 25038              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25039          })
 25040      }
 25041      if p.len == 0 {
 25042          panic("invalid operands for PSRLD")
 25043      }
 25044      return p
 25045  }
 25046  
 25047  // PSRLDQ performs "Shift Packed Double Quadword Right Logical".
 25048  //
 25049  // Mnemonic        : PSRLDQ
 25050  // Supported forms : (1 form)
 25051  //
 25052  //    * PSRLDQ imm8, xmm    [SSE2]
 25053  //
 25054  func (self *Program) PSRLDQ(v0 interface{}, v1 interface{}) *Instruction {
 25055      p := self.alloc("PSRLDQ", 2, Operands { v0, v1 })
 25056      // PSRLDQ imm8, xmm
 25057      if isImm8(v0) && isXMM(v1) {
 25058          self.require(ISA_SSE2)
 25059          p.domain = DomainMMXSSE
 25060          p.add(0, func(m *_Encoding, v []interface{}) {
 25061              m.emit(0x66)
 25062              m.rexo(0, v[1], false)
 25063              m.emit(0x0f)
 25064              m.emit(0x73)
 25065              m.emit(0xd8 | lcode(v[1]))
 25066              m.imm1(toImmAny(v[0]))
 25067          })
 25068      }
 25069      if p.len == 0 {
 25070          panic("invalid operands for PSRLDQ")
 25071      }
 25072      return p
 25073  }
 25074  
 25075  // PSRLQ performs "Shift Packed Quadword Data Right Logical".
 25076  //
 25077  // Mnemonic        : PSRLQ
 25078  // Supported forms : (6 forms)
 25079  //
 25080  //    * PSRLQ imm8, mm     [MMX]
 25081  //    * PSRLQ mm, mm       [MMX]
 25082  //    * PSRLQ m64, mm      [MMX]
 25083  //    * PSRLQ imm8, xmm    [SSE2]
 25084  //    * PSRLQ xmm, xmm     [SSE2]
 25085  //    * PSRLQ m128, xmm    [SSE2]
 25086  //
 25087  func (self *Program) PSRLQ(v0 interface{}, v1 interface{}) *Instruction {
 25088      p := self.alloc("PSRLQ", 2, Operands { v0, v1 })
 25089      // PSRLQ imm8, mm
 25090      if isImm8(v0) && isMM(v1) {
 25091          self.require(ISA_MMX)
 25092          p.domain = DomainMMXSSE
 25093          p.add(0, func(m *_Encoding, v []interface{}) {
 25094              m.rexo(0, v[1], false)
 25095              m.emit(0x0f)
 25096              m.emit(0x73)
 25097              m.emit(0xd0 | lcode(v[1]))
 25098              m.imm1(toImmAny(v[0]))
 25099          })
 25100      }
 25101      // PSRLQ mm, mm
 25102      if isMM(v0) && isMM(v1) {
 25103          self.require(ISA_MMX)
 25104          p.domain = DomainMMXSSE
 25105          p.add(0, func(m *_Encoding, v []interface{}) {
 25106              m.rexo(hcode(v[1]), v[0], false)
 25107              m.emit(0x0f)
 25108              m.emit(0xd3)
 25109              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25110          })
 25111      }
 25112      // PSRLQ m64, mm
 25113      if isM64(v0) && isMM(v1) {
 25114          self.require(ISA_MMX)
 25115          p.domain = DomainMMXSSE
 25116          p.add(0, func(m *_Encoding, v []interface{}) {
 25117              m.rexo(hcode(v[1]), addr(v[0]), false)
 25118              m.emit(0x0f)
 25119              m.emit(0xd3)
 25120              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25121          })
 25122      }
 25123      // PSRLQ imm8, xmm
 25124      if isImm8(v0) && isXMM(v1) {
 25125          self.require(ISA_SSE2)
 25126          p.domain = DomainMMXSSE
 25127          p.add(0, func(m *_Encoding, v []interface{}) {
 25128              m.emit(0x66)
 25129              m.rexo(0, v[1], false)
 25130              m.emit(0x0f)
 25131              m.emit(0x73)
 25132              m.emit(0xd0 | lcode(v[1]))
 25133              m.imm1(toImmAny(v[0]))
 25134          })
 25135      }
 25136      // PSRLQ xmm, xmm
 25137      if isXMM(v0) && isXMM(v1) {
 25138          self.require(ISA_SSE2)
 25139          p.domain = DomainMMXSSE
 25140          p.add(0, func(m *_Encoding, v []interface{}) {
 25141              m.emit(0x66)
 25142              m.rexo(hcode(v[1]), v[0], false)
 25143              m.emit(0x0f)
 25144              m.emit(0xd3)
 25145              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25146          })
 25147      }
 25148      // PSRLQ m128, xmm
 25149      if isM128(v0) && isXMM(v1) {
 25150          self.require(ISA_SSE2)
 25151          p.domain = DomainMMXSSE
 25152          p.add(0, func(m *_Encoding, v []interface{}) {
 25153              m.emit(0x66)
 25154              m.rexo(hcode(v[1]), addr(v[0]), false)
 25155              m.emit(0x0f)
 25156              m.emit(0xd3)
 25157              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25158          })
 25159      }
 25160      if p.len == 0 {
 25161          panic("invalid operands for PSRLQ")
 25162      }
 25163      return p
 25164  }
 25165  
 25166  // PSRLW performs "Shift Packed Word Data Right Logical".
 25167  //
 25168  // Mnemonic        : PSRLW
 25169  // Supported forms : (6 forms)
 25170  //
 25171  //    * PSRLW imm8, mm     [MMX]
 25172  //    * PSRLW mm, mm       [MMX]
 25173  //    * PSRLW m64, mm      [MMX]
 25174  //    * PSRLW imm8, xmm    [SSE2]
 25175  //    * PSRLW xmm, xmm     [SSE2]
 25176  //    * PSRLW m128, xmm    [SSE2]
 25177  //
 25178  func (self *Program) PSRLW(v0 interface{}, v1 interface{}) *Instruction {
 25179      p := self.alloc("PSRLW", 2, Operands { v0, v1 })
 25180      // PSRLW imm8, mm
 25181      if isImm8(v0) && isMM(v1) {
 25182          self.require(ISA_MMX)
 25183          p.domain = DomainMMXSSE
 25184          p.add(0, func(m *_Encoding, v []interface{}) {
 25185              m.rexo(0, v[1], false)
 25186              m.emit(0x0f)
 25187              m.emit(0x71)
 25188              m.emit(0xd0 | lcode(v[1]))
 25189              m.imm1(toImmAny(v[0]))
 25190          })
 25191      }
 25192      // PSRLW mm, mm
 25193      if isMM(v0) && isMM(v1) {
 25194          self.require(ISA_MMX)
 25195          p.domain = DomainMMXSSE
 25196          p.add(0, func(m *_Encoding, v []interface{}) {
 25197              m.rexo(hcode(v[1]), v[0], false)
 25198              m.emit(0x0f)
 25199              m.emit(0xd1)
 25200              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25201          })
 25202      }
 25203      // PSRLW m64, mm
 25204      if isM64(v0) && isMM(v1) {
 25205          self.require(ISA_MMX)
 25206          p.domain = DomainMMXSSE
 25207          p.add(0, func(m *_Encoding, v []interface{}) {
 25208              m.rexo(hcode(v[1]), addr(v[0]), false)
 25209              m.emit(0x0f)
 25210              m.emit(0xd1)
 25211              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25212          })
 25213      }
 25214      // PSRLW imm8, xmm
 25215      if isImm8(v0) && isXMM(v1) {
 25216          self.require(ISA_SSE2)
 25217          p.domain = DomainMMXSSE
 25218          p.add(0, func(m *_Encoding, v []interface{}) {
 25219              m.emit(0x66)
 25220              m.rexo(0, v[1], false)
 25221              m.emit(0x0f)
 25222              m.emit(0x71)
 25223              m.emit(0xd0 | lcode(v[1]))
 25224              m.imm1(toImmAny(v[0]))
 25225          })
 25226      }
 25227      // PSRLW xmm, xmm
 25228      if isXMM(v0) && isXMM(v1) {
 25229          self.require(ISA_SSE2)
 25230          p.domain = DomainMMXSSE
 25231          p.add(0, func(m *_Encoding, v []interface{}) {
 25232              m.emit(0x66)
 25233              m.rexo(hcode(v[1]), v[0], false)
 25234              m.emit(0x0f)
 25235              m.emit(0xd1)
 25236              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25237          })
 25238      }
 25239      // PSRLW m128, xmm
 25240      if isM128(v0) && isXMM(v1) {
 25241          self.require(ISA_SSE2)
 25242          p.domain = DomainMMXSSE
 25243          p.add(0, func(m *_Encoding, v []interface{}) {
 25244              m.emit(0x66)
 25245              m.rexo(hcode(v[1]), addr(v[0]), false)
 25246              m.emit(0x0f)
 25247              m.emit(0xd1)
 25248              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25249          })
 25250      }
 25251      if p.len == 0 {
 25252          panic("invalid operands for PSRLW")
 25253      }
 25254      return p
 25255  }
 25256  
 25257  // PSUBB performs "Subtract Packed Byte Integers".
 25258  //
 25259  // Mnemonic        : PSUBB
 25260  // Supported forms : (4 forms)
 25261  //
 25262  //    * PSUBB mm, mm       [MMX]
 25263  //    * PSUBB m64, mm      [MMX]
 25264  //    * PSUBB xmm, xmm     [SSE2]
 25265  //    * PSUBB m128, xmm    [SSE2]
 25266  //
 25267  func (self *Program) PSUBB(v0 interface{}, v1 interface{}) *Instruction {
 25268      p := self.alloc("PSUBB", 2, Operands { v0, v1 })
 25269      // PSUBB mm, mm
 25270      if isMM(v0) && isMM(v1) {
 25271          self.require(ISA_MMX)
 25272          p.domain = DomainMMXSSE
 25273          p.add(0, func(m *_Encoding, v []interface{}) {
 25274              m.rexo(hcode(v[1]), v[0], false)
 25275              m.emit(0x0f)
 25276              m.emit(0xf8)
 25277              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25278          })
 25279      }
 25280      // PSUBB m64, mm
 25281      if isM64(v0) && isMM(v1) {
 25282          self.require(ISA_MMX)
 25283          p.domain = DomainMMXSSE
 25284          p.add(0, func(m *_Encoding, v []interface{}) {
 25285              m.rexo(hcode(v[1]), addr(v[0]), false)
 25286              m.emit(0x0f)
 25287              m.emit(0xf8)
 25288              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25289          })
 25290      }
 25291      // PSUBB xmm, xmm
 25292      if isXMM(v0) && isXMM(v1) {
 25293          self.require(ISA_SSE2)
 25294          p.domain = DomainMMXSSE
 25295          p.add(0, func(m *_Encoding, v []interface{}) {
 25296              m.emit(0x66)
 25297              m.rexo(hcode(v[1]), v[0], false)
 25298              m.emit(0x0f)
 25299              m.emit(0xf8)
 25300              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25301          })
 25302      }
 25303      // PSUBB m128, xmm
 25304      if isM128(v0) && isXMM(v1) {
 25305          self.require(ISA_SSE2)
 25306          p.domain = DomainMMXSSE
 25307          p.add(0, func(m *_Encoding, v []interface{}) {
 25308              m.emit(0x66)
 25309              m.rexo(hcode(v[1]), addr(v[0]), false)
 25310              m.emit(0x0f)
 25311              m.emit(0xf8)
 25312              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25313          })
 25314      }
 25315      if p.len == 0 {
 25316          panic("invalid operands for PSUBB")
 25317      }
 25318      return p
 25319  }
 25320  
 25321  // PSUBD performs "Subtract Packed Doubleword Integers".
 25322  //
 25323  // Mnemonic        : PSUBD
 25324  // Supported forms : (4 forms)
 25325  //
 25326  //    * PSUBD mm, mm       [MMX]
 25327  //    * PSUBD m64, mm      [MMX]
 25328  //    * PSUBD xmm, xmm     [SSE2]
 25329  //    * PSUBD m128, xmm    [SSE2]
 25330  //
 25331  func (self *Program) PSUBD(v0 interface{}, v1 interface{}) *Instruction {
 25332      p := self.alloc("PSUBD", 2, Operands { v0, v1 })
 25333      // PSUBD mm, mm
 25334      if isMM(v0) && isMM(v1) {
 25335          self.require(ISA_MMX)
 25336          p.domain = DomainMMXSSE
 25337          p.add(0, func(m *_Encoding, v []interface{}) {
 25338              m.rexo(hcode(v[1]), v[0], false)
 25339              m.emit(0x0f)
 25340              m.emit(0xfa)
 25341              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25342          })
 25343      }
 25344      // PSUBD m64, mm
 25345      if isM64(v0) && isMM(v1) {
 25346          self.require(ISA_MMX)
 25347          p.domain = DomainMMXSSE
 25348          p.add(0, func(m *_Encoding, v []interface{}) {
 25349              m.rexo(hcode(v[1]), addr(v[0]), false)
 25350              m.emit(0x0f)
 25351              m.emit(0xfa)
 25352              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25353          })
 25354      }
 25355      // PSUBD xmm, xmm
 25356      if isXMM(v0) && isXMM(v1) {
 25357          self.require(ISA_SSE2)
 25358          p.domain = DomainMMXSSE
 25359          p.add(0, func(m *_Encoding, v []interface{}) {
 25360              m.emit(0x66)
 25361              m.rexo(hcode(v[1]), v[0], false)
 25362              m.emit(0x0f)
 25363              m.emit(0xfa)
 25364              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25365          })
 25366      }
 25367      // PSUBD m128, xmm
 25368      if isM128(v0) && isXMM(v1) {
 25369          self.require(ISA_SSE2)
 25370          p.domain = DomainMMXSSE
 25371          p.add(0, func(m *_Encoding, v []interface{}) {
 25372              m.emit(0x66)
 25373              m.rexo(hcode(v[1]), addr(v[0]), false)
 25374              m.emit(0x0f)
 25375              m.emit(0xfa)
 25376              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25377          })
 25378      }
 25379      if p.len == 0 {
 25380          panic("invalid operands for PSUBD")
 25381      }
 25382      return p
 25383  }
 25384  
 25385  // PSUBQ performs "Subtract Packed Quadword Integers".
 25386  //
 25387  // Mnemonic        : PSUBQ
 25388  // Supported forms : (4 forms)
 25389  //
 25390  //    * PSUBQ mm, mm       [SSE2]
 25391  //    * PSUBQ m64, mm      [SSE2]
 25392  //    * PSUBQ xmm, xmm     [SSE2]
 25393  //    * PSUBQ m128, xmm    [SSE2]
 25394  //
 25395  func (self *Program) PSUBQ(v0 interface{}, v1 interface{}) *Instruction {
 25396      p := self.alloc("PSUBQ", 2, Operands { v0, v1 })
 25397      // PSUBQ mm, mm
 25398      if isMM(v0) && isMM(v1) {
 25399          self.require(ISA_SSE2)
 25400          p.domain = DomainMMXSSE
 25401          p.add(0, func(m *_Encoding, v []interface{}) {
 25402              m.rexo(hcode(v[1]), v[0], false)
 25403              m.emit(0x0f)
 25404              m.emit(0xfb)
 25405              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25406          })
 25407      }
 25408      // PSUBQ m64, mm
 25409      if isM64(v0) && isMM(v1) {
 25410          self.require(ISA_SSE2)
 25411          p.domain = DomainMMXSSE
 25412          p.add(0, func(m *_Encoding, v []interface{}) {
 25413              m.rexo(hcode(v[1]), addr(v[0]), false)
 25414              m.emit(0x0f)
 25415              m.emit(0xfb)
 25416              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25417          })
 25418      }
 25419      // PSUBQ xmm, xmm
 25420      if isXMM(v0) && isXMM(v1) {
 25421          self.require(ISA_SSE2)
 25422          p.domain = DomainMMXSSE
 25423          p.add(0, func(m *_Encoding, v []interface{}) {
 25424              m.emit(0x66)
 25425              m.rexo(hcode(v[1]), v[0], false)
 25426              m.emit(0x0f)
 25427              m.emit(0xfb)
 25428              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25429          })
 25430      }
 25431      // PSUBQ m128, xmm
 25432      if isM128(v0) && isXMM(v1) {
 25433          self.require(ISA_SSE2)
 25434          p.domain = DomainMMXSSE
 25435          p.add(0, func(m *_Encoding, v []interface{}) {
 25436              m.emit(0x66)
 25437              m.rexo(hcode(v[1]), addr(v[0]), false)
 25438              m.emit(0x0f)
 25439              m.emit(0xfb)
 25440              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25441          })
 25442      }
 25443      if p.len == 0 {
 25444          panic("invalid operands for PSUBQ")
 25445      }
 25446      return p
 25447  }
 25448  
 25449  // PSUBSB performs "Subtract Packed Signed Byte Integers with Signed Saturation".
 25450  //
 25451  // Mnemonic        : PSUBSB
 25452  // Supported forms : (4 forms)
 25453  //
 25454  //    * PSUBSB mm, mm       [MMX]
 25455  //    * PSUBSB m64, mm      [MMX]
 25456  //    * PSUBSB xmm, xmm     [SSE2]
 25457  //    * PSUBSB m128, xmm    [SSE2]
 25458  //
 25459  func (self *Program) PSUBSB(v0 interface{}, v1 interface{}) *Instruction {
 25460      p := self.alloc("PSUBSB", 2, Operands { v0, v1 })
 25461      // PSUBSB mm, mm
 25462      if isMM(v0) && isMM(v1) {
 25463          self.require(ISA_MMX)
 25464          p.domain = DomainMMXSSE
 25465          p.add(0, func(m *_Encoding, v []interface{}) {
 25466              m.rexo(hcode(v[1]), v[0], false)
 25467              m.emit(0x0f)
 25468              m.emit(0xe8)
 25469              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25470          })
 25471      }
 25472      // PSUBSB m64, mm
 25473      if isM64(v0) && isMM(v1) {
 25474          self.require(ISA_MMX)
 25475          p.domain = DomainMMXSSE
 25476          p.add(0, func(m *_Encoding, v []interface{}) {
 25477              m.rexo(hcode(v[1]), addr(v[0]), false)
 25478              m.emit(0x0f)
 25479              m.emit(0xe8)
 25480              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25481          })
 25482      }
 25483      // PSUBSB xmm, xmm
 25484      if isXMM(v0) && isXMM(v1) {
 25485          self.require(ISA_SSE2)
 25486          p.domain = DomainMMXSSE
 25487          p.add(0, func(m *_Encoding, v []interface{}) {
 25488              m.emit(0x66)
 25489              m.rexo(hcode(v[1]), v[0], false)
 25490              m.emit(0x0f)
 25491              m.emit(0xe8)
 25492              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25493          })
 25494      }
 25495      // PSUBSB m128, xmm
 25496      if isM128(v0) && isXMM(v1) {
 25497          self.require(ISA_SSE2)
 25498          p.domain = DomainMMXSSE
 25499          p.add(0, func(m *_Encoding, v []interface{}) {
 25500              m.emit(0x66)
 25501              m.rexo(hcode(v[1]), addr(v[0]), false)
 25502              m.emit(0x0f)
 25503              m.emit(0xe8)
 25504              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25505          })
 25506      }
 25507      if p.len == 0 {
 25508          panic("invalid operands for PSUBSB")
 25509      }
 25510      return p
 25511  }
 25512  
 25513  // PSUBSW performs "Subtract Packed Signed Word Integers with Signed Saturation".
 25514  //
 25515  // Mnemonic        : PSUBSW
 25516  // Supported forms : (4 forms)
 25517  //
 25518  //    * PSUBSW mm, mm       [MMX]
 25519  //    * PSUBSW m64, mm      [MMX]
 25520  //    * PSUBSW xmm, xmm     [SSE2]
 25521  //    * PSUBSW m128, xmm    [SSE2]
 25522  //
 25523  func (self *Program) PSUBSW(v0 interface{}, v1 interface{}) *Instruction {
 25524      p := self.alloc("PSUBSW", 2, Operands { v0, v1 })
 25525      // PSUBSW mm, mm
 25526      if isMM(v0) && isMM(v1) {
 25527          self.require(ISA_MMX)
 25528          p.domain = DomainMMXSSE
 25529          p.add(0, func(m *_Encoding, v []interface{}) {
 25530              m.rexo(hcode(v[1]), v[0], false)
 25531              m.emit(0x0f)
 25532              m.emit(0xe9)
 25533              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25534          })
 25535      }
 25536      // PSUBSW m64, mm
 25537      if isM64(v0) && isMM(v1) {
 25538          self.require(ISA_MMX)
 25539          p.domain = DomainMMXSSE
 25540          p.add(0, func(m *_Encoding, v []interface{}) {
 25541              m.rexo(hcode(v[1]), addr(v[0]), false)
 25542              m.emit(0x0f)
 25543              m.emit(0xe9)
 25544              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25545          })
 25546      }
 25547      // PSUBSW xmm, xmm
 25548      if isXMM(v0) && isXMM(v1) {
 25549          self.require(ISA_SSE2)
 25550          p.domain = DomainMMXSSE
 25551          p.add(0, func(m *_Encoding, v []interface{}) {
 25552              m.emit(0x66)
 25553              m.rexo(hcode(v[1]), v[0], false)
 25554              m.emit(0x0f)
 25555              m.emit(0xe9)
 25556              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25557          })
 25558      }
 25559      // PSUBSW m128, xmm
 25560      if isM128(v0) && isXMM(v1) {
 25561          self.require(ISA_SSE2)
 25562          p.domain = DomainMMXSSE
 25563          p.add(0, func(m *_Encoding, v []interface{}) {
 25564              m.emit(0x66)
 25565              m.rexo(hcode(v[1]), addr(v[0]), false)
 25566              m.emit(0x0f)
 25567              m.emit(0xe9)
 25568              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25569          })
 25570      }
 25571      if p.len == 0 {
 25572          panic("invalid operands for PSUBSW")
 25573      }
 25574      return p
 25575  }
 25576  
 25577  // PSUBUSB performs "Subtract Packed Unsigned Byte Integers with Unsigned Saturation".
 25578  //
 25579  // Mnemonic        : PSUBUSB
 25580  // Supported forms : (4 forms)
 25581  //
 25582  //    * PSUBUSB mm, mm       [MMX]
 25583  //    * PSUBUSB m64, mm      [MMX]
 25584  //    * PSUBUSB xmm, xmm     [SSE2]
 25585  //    * PSUBUSB m128, xmm    [SSE2]
 25586  //
 25587  func (self *Program) PSUBUSB(v0 interface{}, v1 interface{}) *Instruction {
 25588      p := self.alloc("PSUBUSB", 2, Operands { v0, v1 })
 25589      // PSUBUSB mm, mm
 25590      if isMM(v0) && isMM(v1) {
 25591          self.require(ISA_MMX)
 25592          p.domain = DomainMMXSSE
 25593          p.add(0, func(m *_Encoding, v []interface{}) {
 25594              m.rexo(hcode(v[1]), v[0], false)
 25595              m.emit(0x0f)
 25596              m.emit(0xd8)
 25597              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25598          })
 25599      }
 25600      // PSUBUSB m64, mm
 25601      if isM64(v0) && isMM(v1) {
 25602          self.require(ISA_MMX)
 25603          p.domain = DomainMMXSSE
 25604          p.add(0, func(m *_Encoding, v []interface{}) {
 25605              m.rexo(hcode(v[1]), addr(v[0]), false)
 25606              m.emit(0x0f)
 25607              m.emit(0xd8)
 25608              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25609          })
 25610      }
 25611      // PSUBUSB xmm, xmm
 25612      if isXMM(v0) && isXMM(v1) {
 25613          self.require(ISA_SSE2)
 25614          p.domain = DomainMMXSSE
 25615          p.add(0, func(m *_Encoding, v []interface{}) {
 25616              m.emit(0x66)
 25617              m.rexo(hcode(v[1]), v[0], false)
 25618              m.emit(0x0f)
 25619              m.emit(0xd8)
 25620              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25621          })
 25622      }
 25623      // PSUBUSB m128, xmm
 25624      if isM128(v0) && isXMM(v1) {
 25625          self.require(ISA_SSE2)
 25626          p.domain = DomainMMXSSE
 25627          p.add(0, func(m *_Encoding, v []interface{}) {
 25628              m.emit(0x66)
 25629              m.rexo(hcode(v[1]), addr(v[0]), false)
 25630              m.emit(0x0f)
 25631              m.emit(0xd8)
 25632              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25633          })
 25634      }
 25635      if p.len == 0 {
 25636          panic("invalid operands for PSUBUSB")
 25637      }
 25638      return p
 25639  }
 25640  
 25641  // PSUBUSW performs "Subtract Packed Unsigned Word Integers with Unsigned Saturation".
 25642  //
 25643  // Mnemonic        : PSUBUSW
 25644  // Supported forms : (4 forms)
 25645  //
 25646  //    * PSUBUSW mm, mm       [MMX]
 25647  //    * PSUBUSW m64, mm      [MMX]
 25648  //    * PSUBUSW xmm, xmm     [SSE2]
 25649  //    * PSUBUSW m128, xmm    [SSE2]
 25650  //
 25651  func (self *Program) PSUBUSW(v0 interface{}, v1 interface{}) *Instruction {
 25652      p := self.alloc("PSUBUSW", 2, Operands { v0, v1 })
 25653      // PSUBUSW mm, mm
 25654      if isMM(v0) && isMM(v1) {
 25655          self.require(ISA_MMX)
 25656          p.domain = DomainMMXSSE
 25657          p.add(0, func(m *_Encoding, v []interface{}) {
 25658              m.rexo(hcode(v[1]), v[0], false)
 25659              m.emit(0x0f)
 25660              m.emit(0xd9)
 25661              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25662          })
 25663      }
 25664      // PSUBUSW m64, mm
 25665      if isM64(v0) && isMM(v1) {
 25666          self.require(ISA_MMX)
 25667          p.domain = DomainMMXSSE
 25668          p.add(0, func(m *_Encoding, v []interface{}) {
 25669              m.rexo(hcode(v[1]), addr(v[0]), false)
 25670              m.emit(0x0f)
 25671              m.emit(0xd9)
 25672              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25673          })
 25674      }
 25675      // PSUBUSW xmm, xmm
 25676      if isXMM(v0) && isXMM(v1) {
 25677          self.require(ISA_SSE2)
 25678          p.domain = DomainMMXSSE
 25679          p.add(0, func(m *_Encoding, v []interface{}) {
 25680              m.emit(0x66)
 25681              m.rexo(hcode(v[1]), v[0], false)
 25682              m.emit(0x0f)
 25683              m.emit(0xd9)
 25684              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25685          })
 25686      }
 25687      // PSUBUSW m128, xmm
 25688      if isM128(v0) && isXMM(v1) {
 25689          self.require(ISA_SSE2)
 25690          p.domain = DomainMMXSSE
 25691          p.add(0, func(m *_Encoding, v []interface{}) {
 25692              m.emit(0x66)
 25693              m.rexo(hcode(v[1]), addr(v[0]), false)
 25694              m.emit(0x0f)
 25695              m.emit(0xd9)
 25696              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25697          })
 25698      }
 25699      if p.len == 0 {
 25700          panic("invalid operands for PSUBUSW")
 25701      }
 25702      return p
 25703  }
 25704  
 25705  // PSUBW performs "Subtract Packed Word Integers".
 25706  //
 25707  // Mnemonic        : PSUBW
 25708  // Supported forms : (4 forms)
 25709  //
 25710  //    * PSUBW mm, mm       [MMX]
 25711  //    * PSUBW m64, mm      [MMX]
 25712  //    * PSUBW xmm, xmm     [SSE2]
 25713  //    * PSUBW m128, xmm    [SSE2]
 25714  //
 25715  func (self *Program) PSUBW(v0 interface{}, v1 interface{}) *Instruction {
 25716      p := self.alloc("PSUBW", 2, Operands { v0, v1 })
 25717      // PSUBW mm, mm
 25718      if isMM(v0) && isMM(v1) {
 25719          self.require(ISA_MMX)
 25720          p.domain = DomainMMXSSE
 25721          p.add(0, func(m *_Encoding, v []interface{}) {
 25722              m.rexo(hcode(v[1]), v[0], false)
 25723              m.emit(0x0f)
 25724              m.emit(0xf9)
 25725              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25726          })
 25727      }
 25728      // PSUBW m64, mm
 25729      if isM64(v0) && isMM(v1) {
 25730          self.require(ISA_MMX)
 25731          p.domain = DomainMMXSSE
 25732          p.add(0, func(m *_Encoding, v []interface{}) {
 25733              m.rexo(hcode(v[1]), addr(v[0]), false)
 25734              m.emit(0x0f)
 25735              m.emit(0xf9)
 25736              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25737          })
 25738      }
 25739      // PSUBW xmm, xmm
 25740      if isXMM(v0) && isXMM(v1) {
 25741          self.require(ISA_SSE2)
 25742          p.domain = DomainMMXSSE
 25743          p.add(0, func(m *_Encoding, v []interface{}) {
 25744              m.emit(0x66)
 25745              m.rexo(hcode(v[1]), v[0], false)
 25746              m.emit(0x0f)
 25747              m.emit(0xf9)
 25748              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25749          })
 25750      }
 25751      // PSUBW m128, xmm
 25752      if isM128(v0) && isXMM(v1) {
 25753          self.require(ISA_SSE2)
 25754          p.domain = DomainMMXSSE
 25755          p.add(0, func(m *_Encoding, v []interface{}) {
 25756              m.emit(0x66)
 25757              m.rexo(hcode(v[1]), addr(v[0]), false)
 25758              m.emit(0x0f)
 25759              m.emit(0xf9)
 25760              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25761          })
 25762      }
 25763      if p.len == 0 {
 25764          panic("invalid operands for PSUBW")
 25765      }
 25766      return p
 25767  }
 25768  
 25769  // PSWAPD performs "Packed Swap Doubleword".
 25770  //
 25771  // Mnemonic        : PSWAPD
 25772  // Supported forms : (2 forms)
 25773  //
 25774  //    * PSWAPD mm, mm     [3dnow!+]
 25775  //    * PSWAPD m64, mm    [3dnow!+]
 25776  //
 25777  func (self *Program) PSWAPD(v0 interface{}, v1 interface{}) *Instruction {
 25778      p := self.alloc("PSWAPD", 2, Operands { v0, v1 })
 25779      // PSWAPD mm, mm
 25780      if isMM(v0) && isMM(v1) {
 25781          self.require(ISA_3DNOW_PLUS)
 25782          p.domain = DomainAMDSpecific
 25783          p.add(0, func(m *_Encoding, v []interface{}) {
 25784              m.rexo(hcode(v[1]), v[0], false)
 25785              m.emit(0x0f)
 25786              m.emit(0x0f)
 25787              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25788              m.emit(0xbb)
 25789          })
 25790      }
 25791      // PSWAPD m64, mm
 25792      if isM64(v0) && isMM(v1) {
 25793          self.require(ISA_3DNOW_PLUS)
 25794          p.domain = DomainAMDSpecific
 25795          p.add(0, func(m *_Encoding, v []interface{}) {
 25796              m.rexo(hcode(v[1]), addr(v[0]), false)
 25797              m.emit(0x0f)
 25798              m.emit(0x0f)
 25799              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25800              m.emit(0xbb)
 25801          })
 25802      }
 25803      if p.len == 0 {
 25804          panic("invalid operands for PSWAPD")
 25805      }
 25806      return p
 25807  }
 25808  
 25809  // PTEST performs "Packed Logical Compare".
 25810  //
 25811  // Mnemonic        : PTEST
 25812  // Supported forms : (2 forms)
 25813  //
 25814  //    * PTEST xmm, xmm     [SSE4.1]
 25815  //    * PTEST m128, xmm    [SSE4.1]
 25816  //
 25817  func (self *Program) PTEST(v0 interface{}, v1 interface{}) *Instruction {
 25818      p := self.alloc("PTEST", 2, Operands { v0, v1 })
 25819      // PTEST xmm, xmm
 25820      if isXMM(v0) && isXMM(v1) {
 25821          self.require(ISA_SSE4_1)
 25822          p.domain = DomainMMXSSE
 25823          p.add(0, func(m *_Encoding, v []interface{}) {
 25824              m.emit(0x66)
 25825              m.rexo(hcode(v[1]), v[0], false)
 25826              m.emit(0x0f)
 25827              m.emit(0x38)
 25828              m.emit(0x17)
 25829              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25830          })
 25831      }
 25832      // PTEST m128, xmm
 25833      if isM128(v0) && isXMM(v1) {
 25834          self.require(ISA_SSE4_1)
 25835          p.domain = DomainMMXSSE
 25836          p.add(0, func(m *_Encoding, v []interface{}) {
 25837              m.emit(0x66)
 25838              m.rexo(hcode(v[1]), addr(v[0]), false)
 25839              m.emit(0x0f)
 25840              m.emit(0x38)
 25841              m.emit(0x17)
 25842              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25843          })
 25844      }
 25845      if p.len == 0 {
 25846          panic("invalid operands for PTEST")
 25847      }
 25848      return p
 25849  }
 25850  
 25851  // PUNPCKHBW performs "Unpack and Interleave High-Order Bytes into Words".
 25852  //
 25853  // Mnemonic        : PUNPCKHBW
 25854  // Supported forms : (4 forms)
 25855  //
 25856  //    * PUNPCKHBW mm, mm       [MMX]
 25857  //    * PUNPCKHBW m64, mm      [MMX]
 25858  //    * PUNPCKHBW xmm, xmm     [SSE2]
 25859  //    * PUNPCKHBW m128, xmm    [SSE2]
 25860  //
 25861  func (self *Program) PUNPCKHBW(v0 interface{}, v1 interface{}) *Instruction {
 25862      p := self.alloc("PUNPCKHBW", 2, Operands { v0, v1 })
 25863      // PUNPCKHBW mm, mm
 25864      if isMM(v0) && isMM(v1) {
 25865          self.require(ISA_MMX)
 25866          p.domain = DomainMMXSSE
 25867          p.add(0, func(m *_Encoding, v []interface{}) {
 25868              m.rexo(hcode(v[1]), v[0], false)
 25869              m.emit(0x0f)
 25870              m.emit(0x68)
 25871              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25872          })
 25873      }
 25874      // PUNPCKHBW m64, mm
 25875      if isM64(v0) && isMM(v1) {
 25876          self.require(ISA_MMX)
 25877          p.domain = DomainMMXSSE
 25878          p.add(0, func(m *_Encoding, v []interface{}) {
 25879              m.rexo(hcode(v[1]), addr(v[0]), false)
 25880              m.emit(0x0f)
 25881              m.emit(0x68)
 25882              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25883          })
 25884      }
 25885      // PUNPCKHBW xmm, xmm
 25886      if isXMM(v0) && isXMM(v1) {
 25887          self.require(ISA_SSE2)
 25888          p.domain = DomainMMXSSE
 25889          p.add(0, func(m *_Encoding, v []interface{}) {
 25890              m.emit(0x66)
 25891              m.rexo(hcode(v[1]), v[0], false)
 25892              m.emit(0x0f)
 25893              m.emit(0x68)
 25894              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25895          })
 25896      }
 25897      // PUNPCKHBW m128, xmm
 25898      if isM128(v0) && isXMM(v1) {
 25899          self.require(ISA_SSE2)
 25900          p.domain = DomainMMXSSE
 25901          p.add(0, func(m *_Encoding, v []interface{}) {
 25902              m.emit(0x66)
 25903              m.rexo(hcode(v[1]), addr(v[0]), false)
 25904              m.emit(0x0f)
 25905              m.emit(0x68)
 25906              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25907          })
 25908      }
 25909      if p.len == 0 {
 25910          panic("invalid operands for PUNPCKHBW")
 25911      }
 25912      return p
 25913  }
 25914  
 25915  // PUNPCKHDQ performs "Unpack and Interleave High-Order Doublewords into Quadwords".
 25916  //
 25917  // Mnemonic        : PUNPCKHDQ
 25918  // Supported forms : (4 forms)
 25919  //
 25920  //    * PUNPCKHDQ mm, mm       [MMX]
 25921  //    * PUNPCKHDQ m64, mm      [MMX]
 25922  //    * PUNPCKHDQ xmm, xmm     [SSE2]
 25923  //    * PUNPCKHDQ m128, xmm    [SSE2]
 25924  //
 25925  func (self *Program) PUNPCKHDQ(v0 interface{}, v1 interface{}) *Instruction {
 25926      p := self.alloc("PUNPCKHDQ", 2, Operands { v0, v1 })
 25927      // PUNPCKHDQ mm, mm
 25928      if isMM(v0) && isMM(v1) {
 25929          self.require(ISA_MMX)
 25930          p.domain = DomainMMXSSE
 25931          p.add(0, func(m *_Encoding, v []interface{}) {
 25932              m.rexo(hcode(v[1]), v[0], false)
 25933              m.emit(0x0f)
 25934              m.emit(0x6a)
 25935              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25936          })
 25937      }
 25938      // PUNPCKHDQ m64, mm
 25939      if isM64(v0) && isMM(v1) {
 25940          self.require(ISA_MMX)
 25941          p.domain = DomainMMXSSE
 25942          p.add(0, func(m *_Encoding, v []interface{}) {
 25943              m.rexo(hcode(v[1]), addr(v[0]), false)
 25944              m.emit(0x0f)
 25945              m.emit(0x6a)
 25946              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25947          })
 25948      }
 25949      // PUNPCKHDQ xmm, xmm
 25950      if isXMM(v0) && isXMM(v1) {
 25951          self.require(ISA_SSE2)
 25952          p.domain = DomainMMXSSE
 25953          p.add(0, func(m *_Encoding, v []interface{}) {
 25954              m.emit(0x66)
 25955              m.rexo(hcode(v[1]), v[0], false)
 25956              m.emit(0x0f)
 25957              m.emit(0x6a)
 25958              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25959          })
 25960      }
 25961      // PUNPCKHDQ m128, xmm
 25962      if isM128(v0) && isXMM(v1) {
 25963          self.require(ISA_SSE2)
 25964          p.domain = DomainMMXSSE
 25965          p.add(0, func(m *_Encoding, v []interface{}) {
 25966              m.emit(0x66)
 25967              m.rexo(hcode(v[1]), addr(v[0]), false)
 25968              m.emit(0x0f)
 25969              m.emit(0x6a)
 25970              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25971          })
 25972      }
 25973      if p.len == 0 {
 25974          panic("invalid operands for PUNPCKHDQ")
 25975      }
 25976      return p
 25977  }
 25978  
 25979  // PUNPCKHQDQ performs "Unpack and Interleave High-Order Quadwords into Double Quadwords".
 25980  //
 25981  // Mnemonic        : PUNPCKHQDQ
 25982  // Supported forms : (2 forms)
 25983  //
 25984  //    * PUNPCKHQDQ xmm, xmm     [SSE2]
 25985  //    * PUNPCKHQDQ m128, xmm    [SSE2]
 25986  //
 25987  func (self *Program) PUNPCKHQDQ(v0 interface{}, v1 interface{}) *Instruction {
 25988      p := self.alloc("PUNPCKHQDQ", 2, Operands { v0, v1 })
 25989      // PUNPCKHQDQ xmm, xmm
 25990      if isXMM(v0) && isXMM(v1) {
 25991          self.require(ISA_SSE2)
 25992          p.domain = DomainMMXSSE
 25993          p.add(0, func(m *_Encoding, v []interface{}) {
 25994              m.emit(0x66)
 25995              m.rexo(hcode(v[1]), v[0], false)
 25996              m.emit(0x0f)
 25997              m.emit(0x6d)
 25998              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25999          })
 26000      }
 26001      // PUNPCKHQDQ m128, xmm
 26002      if isM128(v0) && isXMM(v1) {
 26003          self.require(ISA_SSE2)
 26004          p.domain = DomainMMXSSE
 26005          p.add(0, func(m *_Encoding, v []interface{}) {
 26006              m.emit(0x66)
 26007              m.rexo(hcode(v[1]), addr(v[0]), false)
 26008              m.emit(0x0f)
 26009              m.emit(0x6d)
 26010              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26011          })
 26012      }
 26013      if p.len == 0 {
 26014          panic("invalid operands for PUNPCKHQDQ")
 26015      }
 26016      return p
 26017  }
 26018  
 26019  // PUNPCKHWD performs "Unpack and Interleave High-Order Words into Doublewords".
 26020  //
 26021  // Mnemonic        : PUNPCKHWD
 26022  // Supported forms : (4 forms)
 26023  //
 26024  //    * PUNPCKHWD mm, mm       [MMX]
 26025  //    * PUNPCKHWD m64, mm      [MMX]
 26026  //    * PUNPCKHWD xmm, xmm     [SSE2]
 26027  //    * PUNPCKHWD m128, xmm    [SSE2]
 26028  //
 26029  func (self *Program) PUNPCKHWD(v0 interface{}, v1 interface{}) *Instruction {
 26030      p := self.alloc("PUNPCKHWD", 2, Operands { v0, v1 })
 26031      // PUNPCKHWD mm, mm
 26032      if isMM(v0) && isMM(v1) {
 26033          self.require(ISA_MMX)
 26034          p.domain = DomainMMXSSE
 26035          p.add(0, func(m *_Encoding, v []interface{}) {
 26036              m.rexo(hcode(v[1]), v[0], false)
 26037              m.emit(0x0f)
 26038              m.emit(0x69)
 26039              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26040          })
 26041      }
 26042      // PUNPCKHWD m64, mm
 26043      if isM64(v0) && isMM(v1) {
 26044          self.require(ISA_MMX)
 26045          p.domain = DomainMMXSSE
 26046          p.add(0, func(m *_Encoding, v []interface{}) {
 26047              m.rexo(hcode(v[1]), addr(v[0]), false)
 26048              m.emit(0x0f)
 26049              m.emit(0x69)
 26050              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26051          })
 26052      }
 26053      // PUNPCKHWD xmm, xmm
 26054      if isXMM(v0) && isXMM(v1) {
 26055          self.require(ISA_SSE2)
 26056          p.domain = DomainMMXSSE
 26057          p.add(0, func(m *_Encoding, v []interface{}) {
 26058              m.emit(0x66)
 26059              m.rexo(hcode(v[1]), v[0], false)
 26060              m.emit(0x0f)
 26061              m.emit(0x69)
 26062              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26063          })
 26064      }
 26065      // PUNPCKHWD m128, xmm
 26066      if isM128(v0) && isXMM(v1) {
 26067          self.require(ISA_SSE2)
 26068          p.domain = DomainMMXSSE
 26069          p.add(0, func(m *_Encoding, v []interface{}) {
 26070              m.emit(0x66)
 26071              m.rexo(hcode(v[1]), addr(v[0]), false)
 26072              m.emit(0x0f)
 26073              m.emit(0x69)
 26074              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26075          })
 26076      }
 26077      if p.len == 0 {
 26078          panic("invalid operands for PUNPCKHWD")
 26079      }
 26080      return p
 26081  }
 26082  
 26083  // PUNPCKLBW performs "Unpack and Interleave Low-Order Bytes into Words".
 26084  //
 26085  // Mnemonic        : PUNPCKLBW
 26086  // Supported forms : (4 forms)
 26087  //
 26088  //    * PUNPCKLBW mm, mm       [MMX]
 26089  //    * PUNPCKLBW m32, mm      [MMX]
 26090  //    * PUNPCKLBW xmm, xmm     [SSE2]
 26091  //    * PUNPCKLBW m128, xmm    [SSE2]
 26092  //
 26093  func (self *Program) PUNPCKLBW(v0 interface{}, v1 interface{}) *Instruction {
 26094      p := self.alloc("PUNPCKLBW", 2, Operands { v0, v1 })
 26095      // PUNPCKLBW mm, mm
 26096      if isMM(v0) && isMM(v1) {
 26097          self.require(ISA_MMX)
 26098          p.domain = DomainMMXSSE
 26099          p.add(0, func(m *_Encoding, v []interface{}) {
 26100              m.rexo(hcode(v[1]), v[0], false)
 26101              m.emit(0x0f)
 26102              m.emit(0x60)
 26103              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26104          })
 26105      }
 26106      // PUNPCKLBW m32, mm
 26107      if isM32(v0) && isMM(v1) {
 26108          self.require(ISA_MMX)
 26109          p.domain = DomainMMXSSE
 26110          p.add(0, func(m *_Encoding, v []interface{}) {
 26111              m.rexo(hcode(v[1]), addr(v[0]), false)
 26112              m.emit(0x0f)
 26113              m.emit(0x60)
 26114              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26115          })
 26116      }
 26117      // PUNPCKLBW xmm, xmm
 26118      if isXMM(v0) && isXMM(v1) {
 26119          self.require(ISA_SSE2)
 26120          p.domain = DomainMMXSSE
 26121          p.add(0, func(m *_Encoding, v []interface{}) {
 26122              m.emit(0x66)
 26123              m.rexo(hcode(v[1]), v[0], false)
 26124              m.emit(0x0f)
 26125              m.emit(0x60)
 26126              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26127          })
 26128      }
 26129      // PUNPCKLBW m128, xmm
 26130      if isM128(v0) && isXMM(v1) {
 26131          self.require(ISA_SSE2)
 26132          p.domain = DomainMMXSSE
 26133          p.add(0, func(m *_Encoding, v []interface{}) {
 26134              m.emit(0x66)
 26135              m.rexo(hcode(v[1]), addr(v[0]), false)
 26136              m.emit(0x0f)
 26137              m.emit(0x60)
 26138              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26139          })
 26140      }
 26141      if p.len == 0 {
 26142          panic("invalid operands for PUNPCKLBW")
 26143      }
 26144      return p
 26145  }
 26146  
 26147  // PUNPCKLDQ performs "Unpack and Interleave Low-Order Doublewords into Quadwords".
 26148  //
 26149  // Mnemonic        : PUNPCKLDQ
 26150  // Supported forms : (4 forms)
 26151  //
 26152  //    * PUNPCKLDQ mm, mm       [MMX]
 26153  //    * PUNPCKLDQ m32, mm      [MMX]
 26154  //    * PUNPCKLDQ xmm, xmm     [SSE2]
 26155  //    * PUNPCKLDQ m128, xmm    [SSE2]
 26156  //
 26157  func (self *Program) PUNPCKLDQ(v0 interface{}, v1 interface{}) *Instruction {
 26158      p := self.alloc("PUNPCKLDQ", 2, Operands { v0, v1 })
 26159      // PUNPCKLDQ mm, mm
 26160      if isMM(v0) && isMM(v1) {
 26161          self.require(ISA_MMX)
 26162          p.domain = DomainMMXSSE
 26163          p.add(0, func(m *_Encoding, v []interface{}) {
 26164              m.rexo(hcode(v[1]), v[0], false)
 26165              m.emit(0x0f)
 26166              m.emit(0x62)
 26167              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26168          })
 26169      }
 26170      // PUNPCKLDQ m32, mm
 26171      if isM32(v0) && isMM(v1) {
 26172          self.require(ISA_MMX)
 26173          p.domain = DomainMMXSSE
 26174          p.add(0, func(m *_Encoding, v []interface{}) {
 26175              m.rexo(hcode(v[1]), addr(v[0]), false)
 26176              m.emit(0x0f)
 26177              m.emit(0x62)
 26178              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26179          })
 26180      }
 26181      // PUNPCKLDQ xmm, xmm
 26182      if isXMM(v0) && isXMM(v1) {
 26183          self.require(ISA_SSE2)
 26184          p.domain = DomainMMXSSE
 26185          p.add(0, func(m *_Encoding, v []interface{}) {
 26186              m.emit(0x66)
 26187              m.rexo(hcode(v[1]), v[0], false)
 26188              m.emit(0x0f)
 26189              m.emit(0x62)
 26190              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26191          })
 26192      }
 26193      // PUNPCKLDQ m128, xmm
 26194      if isM128(v0) && isXMM(v1) {
 26195          self.require(ISA_SSE2)
 26196          p.domain = DomainMMXSSE
 26197          p.add(0, func(m *_Encoding, v []interface{}) {
 26198              m.emit(0x66)
 26199              m.rexo(hcode(v[1]), addr(v[0]), false)
 26200              m.emit(0x0f)
 26201              m.emit(0x62)
 26202              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26203          })
 26204      }
 26205      if p.len == 0 {
 26206          panic("invalid operands for PUNPCKLDQ")
 26207      }
 26208      return p
 26209  }
 26210  
 26211  // PUNPCKLQDQ performs "Unpack and Interleave Low-Order Quadwords into Double Quadwords".
 26212  //
 26213  // Mnemonic        : PUNPCKLQDQ
 26214  // Supported forms : (2 forms)
 26215  //
 26216  //    * PUNPCKLQDQ xmm, xmm     [SSE2]
 26217  //    * PUNPCKLQDQ m128, xmm    [SSE2]
 26218  //
 26219  func (self *Program) PUNPCKLQDQ(v0 interface{}, v1 interface{}) *Instruction {
 26220      p := self.alloc("PUNPCKLQDQ", 2, Operands { v0, v1 })
 26221      // PUNPCKLQDQ xmm, xmm
 26222      if isXMM(v0) && isXMM(v1) {
 26223          self.require(ISA_SSE2)
 26224          p.domain = DomainMMXSSE
 26225          p.add(0, func(m *_Encoding, v []interface{}) {
 26226              m.emit(0x66)
 26227              m.rexo(hcode(v[1]), v[0], false)
 26228              m.emit(0x0f)
 26229              m.emit(0x6c)
 26230              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26231          })
 26232      }
 26233      // PUNPCKLQDQ m128, xmm
 26234      if isM128(v0) && isXMM(v1) {
 26235          self.require(ISA_SSE2)
 26236          p.domain = DomainMMXSSE
 26237          p.add(0, func(m *_Encoding, v []interface{}) {
 26238              m.emit(0x66)
 26239              m.rexo(hcode(v[1]), addr(v[0]), false)
 26240              m.emit(0x0f)
 26241              m.emit(0x6c)
 26242              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26243          })
 26244      }
 26245      if p.len == 0 {
 26246          panic("invalid operands for PUNPCKLQDQ")
 26247      }
 26248      return p
 26249  }
 26250  
 26251  // PUNPCKLWD performs "Unpack and Interleave Low-Order Words into Doublewords".
 26252  //
 26253  // Mnemonic        : PUNPCKLWD
 26254  // Supported forms : (4 forms)
 26255  //
 26256  //    * PUNPCKLWD mm, mm       [MMX]
 26257  //    * PUNPCKLWD m32, mm      [MMX]
 26258  //    * PUNPCKLWD xmm, xmm     [SSE2]
 26259  //    * PUNPCKLWD m128, xmm    [SSE2]
 26260  //
 26261  func (self *Program) PUNPCKLWD(v0 interface{}, v1 interface{}) *Instruction {
 26262      p := self.alloc("PUNPCKLWD", 2, Operands { v0, v1 })
 26263      // PUNPCKLWD mm, mm
 26264      if isMM(v0) && isMM(v1) {
 26265          self.require(ISA_MMX)
 26266          p.domain = DomainMMXSSE
 26267          p.add(0, func(m *_Encoding, v []interface{}) {
 26268              m.rexo(hcode(v[1]), v[0], false)
 26269              m.emit(0x0f)
 26270              m.emit(0x61)
 26271              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26272          })
 26273      }
 26274      // PUNPCKLWD m32, mm
 26275      if isM32(v0) && isMM(v1) {
 26276          self.require(ISA_MMX)
 26277          p.domain = DomainMMXSSE
 26278          p.add(0, func(m *_Encoding, v []interface{}) {
 26279              m.rexo(hcode(v[1]), addr(v[0]), false)
 26280              m.emit(0x0f)
 26281              m.emit(0x61)
 26282              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26283          })
 26284      }
 26285      // PUNPCKLWD xmm, xmm
 26286      if isXMM(v0) && isXMM(v1) {
 26287          self.require(ISA_SSE2)
 26288          p.domain = DomainMMXSSE
 26289          p.add(0, func(m *_Encoding, v []interface{}) {
 26290              m.emit(0x66)
 26291              m.rexo(hcode(v[1]), v[0], false)
 26292              m.emit(0x0f)
 26293              m.emit(0x61)
 26294              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26295          })
 26296      }
 26297      // PUNPCKLWD m128, xmm
 26298      if isM128(v0) && isXMM(v1) {
 26299          self.require(ISA_SSE2)
 26300          p.domain = DomainMMXSSE
 26301          p.add(0, func(m *_Encoding, v []interface{}) {
 26302              m.emit(0x66)
 26303              m.rexo(hcode(v[1]), addr(v[0]), false)
 26304              m.emit(0x0f)
 26305              m.emit(0x61)
 26306              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26307          })
 26308      }
 26309      if p.len == 0 {
 26310          panic("invalid operands for PUNPCKLWD")
 26311      }
 26312      return p
 26313  }
 26314  
 26315  // PUSHQ performs "Push Value Onto the Stack".
 26316  //
 26317  // Mnemonic        : PUSH
 26318  // Supported forms : (4 forms)
 26319  //
 26320  //    * PUSHQ imm8
 26321  //    * PUSHQ imm32
 26322  //    * PUSHQ r64
 26323  //    * PUSHQ m64
 26324  //
 26325  func (self *Program) PUSHQ(v0 interface{}) *Instruction {
 26326      p := self.alloc("PUSHQ", 1, Operands { v0 })
 26327      // PUSHQ imm8
 26328      if isImm8Ext(v0, 8) {
 26329          p.domain = DomainGeneric
 26330          p.add(0, func(m *_Encoding, v []interface{}) {
 26331              m.emit(0x6a)
 26332              m.imm1(toImmAny(v[0]))
 26333          })
 26334      }
 26335      // PUSHQ imm32
 26336      if isImm32Ext(v0, 8) {
 26337          p.domain = DomainGeneric
 26338          p.add(0, func(m *_Encoding, v []interface{}) {
 26339              m.emit(0x68)
 26340              m.imm4(toImmAny(v[0]))
 26341          })
 26342      }
 26343      // PUSHQ r64
 26344      if isReg64(v0) {
 26345          p.domain = DomainGeneric
 26346          p.add(0, func(m *_Encoding, v []interface{}) {
 26347              m.rexo(0, v[0], false)
 26348              m.emit(0x50 | lcode(v[0]))
 26349          })
 26350          p.add(0, func(m *_Encoding, v []interface{}) {
 26351              m.rexo(0, v[0], false)
 26352              m.emit(0xff)
 26353              m.emit(0xf0 | lcode(v[0]))
 26354          })
 26355      }
 26356      // PUSHQ m64
 26357      if isM64(v0) {
 26358          p.domain = DomainGeneric
 26359          p.add(0, func(m *_Encoding, v []interface{}) {
 26360              m.rexo(0, addr(v[0]), false)
 26361              m.emit(0xff)
 26362              m.mrsd(6, addr(v[0]), 1)
 26363          })
 26364      }
 26365      if p.len == 0 {
 26366          panic("invalid operands for PUSHQ")
 26367      }
 26368      return p
 26369  }
 26370  
 26371  // PUSHW performs "Push Value Onto the Stack".
 26372  //
 26373  // Mnemonic        : PUSH
 26374  // Supported forms : (2 forms)
 26375  //
 26376  //    * PUSHW r16
 26377  //    * PUSHW m16
 26378  //
 26379  func (self *Program) PUSHW(v0 interface{}) *Instruction {
 26380      p := self.alloc("PUSHW", 1, Operands { v0 })
 26381      // PUSHW r16
 26382      if isReg16(v0) {
 26383          p.domain = DomainGeneric
 26384          p.add(0, func(m *_Encoding, v []interface{}) {
 26385              m.emit(0x66)
 26386              m.rexo(0, v[0], false)
 26387              m.emit(0x50 | lcode(v[0]))
 26388          })
 26389          p.add(0, func(m *_Encoding, v []interface{}) {
 26390              m.emit(0x66)
 26391              m.rexo(0, v[0], false)
 26392              m.emit(0xff)
 26393              m.emit(0xf0 | lcode(v[0]))
 26394          })
 26395      }
 26396      // PUSHW m16
 26397      if isM16(v0) {
 26398          p.domain = DomainGeneric
 26399          p.add(0, func(m *_Encoding, v []interface{}) {
 26400              m.emit(0x66)
 26401              m.rexo(0, addr(v[0]), false)
 26402              m.emit(0xff)
 26403              m.mrsd(6, addr(v[0]), 1)
 26404          })
 26405      }
 26406      if p.len == 0 {
 26407          panic("invalid operands for PUSHW")
 26408      }
 26409      return p
 26410  }
 26411  
 26412  // PXOR performs "Packed Bitwise Logical Exclusive OR".
 26413  //
 26414  // Mnemonic        : PXOR
 26415  // Supported forms : (4 forms)
 26416  //
 26417  //    * PXOR mm, mm       [MMX]
 26418  //    * PXOR m64, mm      [MMX]
 26419  //    * PXOR xmm, xmm     [SSE2]
 26420  //    * PXOR m128, xmm    [SSE2]
 26421  //
 26422  func (self *Program) PXOR(v0 interface{}, v1 interface{}) *Instruction {
 26423      p := self.alloc("PXOR", 2, Operands { v0, v1 })
 26424      // PXOR mm, mm
 26425      if isMM(v0) && isMM(v1) {
 26426          self.require(ISA_MMX)
 26427          p.domain = DomainMMXSSE
 26428          p.add(0, func(m *_Encoding, v []interface{}) {
 26429              m.rexo(hcode(v[1]), v[0], false)
 26430              m.emit(0x0f)
 26431              m.emit(0xef)
 26432              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26433          })
 26434      }
 26435      // PXOR m64, mm
 26436      if isM64(v0) && isMM(v1) {
 26437          self.require(ISA_MMX)
 26438          p.domain = DomainMMXSSE
 26439          p.add(0, func(m *_Encoding, v []interface{}) {
 26440              m.rexo(hcode(v[1]), addr(v[0]), false)
 26441              m.emit(0x0f)
 26442              m.emit(0xef)
 26443              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26444          })
 26445      }
 26446      // PXOR xmm, xmm
 26447      if isXMM(v0) && isXMM(v1) {
 26448          self.require(ISA_SSE2)
 26449          p.domain = DomainMMXSSE
 26450          p.add(0, func(m *_Encoding, v []interface{}) {
 26451              m.emit(0x66)
 26452              m.rexo(hcode(v[1]), v[0], false)
 26453              m.emit(0x0f)
 26454              m.emit(0xef)
 26455              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26456          })
 26457      }
 26458      // PXOR m128, xmm
 26459      if isM128(v0) && isXMM(v1) {
 26460          self.require(ISA_SSE2)
 26461          p.domain = DomainMMXSSE
 26462          p.add(0, func(m *_Encoding, v []interface{}) {
 26463              m.emit(0x66)
 26464              m.rexo(hcode(v[1]), addr(v[0]), false)
 26465              m.emit(0x0f)
 26466              m.emit(0xef)
 26467              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26468          })
 26469      }
 26470      if p.len == 0 {
 26471          panic("invalid operands for PXOR")
 26472      }
 26473      return p
 26474  }
 26475  
 26476  // RCLB performs "Rotate Left through Carry Flag".
 26477  //
 26478  // Mnemonic        : RCL
 26479  // Supported forms : (6 forms)
 26480  //
 26481  //    * RCLB 1, r8
 26482  //    * RCLB imm8, r8
 26483  //    * RCLB cl, r8
 26484  //    * RCLB 1, m8
 26485  //    * RCLB imm8, m8
 26486  //    * RCLB cl, m8
 26487  //
 26488  func (self *Program) RCLB(v0 interface{}, v1 interface{}) *Instruction {
 26489      p := self.alloc("RCLB", 2, Operands { v0, v1 })
 26490      // RCLB 1, r8
 26491      if isConst1(v0) && isReg8(v1) {
 26492          p.domain = DomainGeneric
 26493          p.add(0, func(m *_Encoding, v []interface{}) {
 26494              m.rexo(0, v[1], isReg8REX(v[1]))
 26495              m.emit(0xd0)
 26496              m.emit(0xd0 | lcode(v[1]))
 26497          })
 26498      }
 26499      // RCLB imm8, r8
 26500      if isImm8(v0) && isReg8(v1) {
 26501          p.domain = DomainGeneric
 26502          p.add(0, func(m *_Encoding, v []interface{}) {
 26503              m.rexo(0, v[1], isReg8REX(v[1]))
 26504              m.emit(0xc0)
 26505              m.emit(0xd0 | lcode(v[1]))
 26506              m.imm1(toImmAny(v[0]))
 26507          })
 26508      }
 26509      // RCLB cl, r8
 26510      if v0 == CL && isReg8(v1) {
 26511          p.domain = DomainGeneric
 26512          p.add(0, func(m *_Encoding, v []interface{}) {
 26513              m.rexo(0, v[1], isReg8REX(v[1]))
 26514              m.emit(0xd2)
 26515              m.emit(0xd0 | lcode(v[1]))
 26516          })
 26517      }
 26518      // RCLB 1, m8
 26519      if isConst1(v0) && isM8(v1) {
 26520          p.domain = DomainGeneric
 26521          p.add(0, func(m *_Encoding, v []interface{}) {
 26522              m.rexo(0, addr(v[1]), false)
 26523              m.emit(0xd0)
 26524              m.mrsd(2, addr(v[1]), 1)
 26525          })
 26526      }
 26527      // RCLB imm8, m8
 26528      if isImm8(v0) && isM8(v1) {
 26529          p.domain = DomainGeneric
 26530          p.add(0, func(m *_Encoding, v []interface{}) {
 26531              m.rexo(0, addr(v[1]), false)
 26532              m.emit(0xc0)
 26533              m.mrsd(2, addr(v[1]), 1)
 26534              m.imm1(toImmAny(v[0]))
 26535          })
 26536      }
 26537      // RCLB cl, m8
 26538      if v0 == CL && isM8(v1) {
 26539          p.domain = DomainGeneric
 26540          p.add(0, func(m *_Encoding, v []interface{}) {
 26541              m.rexo(0, addr(v[1]), false)
 26542              m.emit(0xd2)
 26543              m.mrsd(2, addr(v[1]), 1)
 26544          })
 26545      }
 26546      if p.len == 0 {
 26547          panic("invalid operands for RCLB")
 26548      }
 26549      return p
 26550  }
 26551  
 26552  // RCLL performs "Rotate Left through Carry Flag".
 26553  //
 26554  // Mnemonic        : RCL
 26555  // Supported forms : (6 forms)
 26556  //
 26557  //    * RCLL 1, r32
 26558  //    * RCLL imm8, r32
 26559  //    * RCLL cl, r32
 26560  //    * RCLL 1, m32
 26561  //    * RCLL imm8, m32
 26562  //    * RCLL cl, m32
 26563  //
 26564  func (self *Program) RCLL(v0 interface{}, v1 interface{}) *Instruction {
 26565      p := self.alloc("RCLL", 2, Operands { v0, v1 })
 26566      // RCLL 1, r32
 26567      if isConst1(v0) && isReg32(v1) {
 26568          p.domain = DomainGeneric
 26569          p.add(0, func(m *_Encoding, v []interface{}) {
 26570              m.rexo(0, v[1], false)
 26571              m.emit(0xd1)
 26572              m.emit(0xd0 | lcode(v[1]))
 26573          })
 26574      }
 26575      // RCLL imm8, r32
 26576      if isImm8(v0) && isReg32(v1) {
 26577          p.domain = DomainGeneric
 26578          p.add(0, func(m *_Encoding, v []interface{}) {
 26579              m.rexo(0, v[1], false)
 26580              m.emit(0xc1)
 26581              m.emit(0xd0 | lcode(v[1]))
 26582              m.imm1(toImmAny(v[0]))
 26583          })
 26584      }
 26585      // RCLL cl, r32
 26586      if v0 == CL && isReg32(v1) {
 26587          p.domain = DomainGeneric
 26588          p.add(0, func(m *_Encoding, v []interface{}) {
 26589              m.rexo(0, v[1], false)
 26590              m.emit(0xd3)
 26591              m.emit(0xd0 | lcode(v[1]))
 26592          })
 26593      }
 26594      // RCLL 1, m32
 26595      if isConst1(v0) && isM32(v1) {
 26596          p.domain = DomainGeneric
 26597          p.add(0, func(m *_Encoding, v []interface{}) {
 26598              m.rexo(0, addr(v[1]), false)
 26599              m.emit(0xd1)
 26600              m.mrsd(2, addr(v[1]), 1)
 26601          })
 26602      }
 26603      // RCLL imm8, m32
 26604      if isImm8(v0) && isM32(v1) {
 26605          p.domain = DomainGeneric
 26606          p.add(0, func(m *_Encoding, v []interface{}) {
 26607              m.rexo(0, addr(v[1]), false)
 26608              m.emit(0xc1)
 26609              m.mrsd(2, addr(v[1]), 1)
 26610              m.imm1(toImmAny(v[0]))
 26611          })
 26612      }
 26613      // RCLL cl, m32
 26614      if v0 == CL && isM32(v1) {
 26615          p.domain = DomainGeneric
 26616          p.add(0, func(m *_Encoding, v []interface{}) {
 26617              m.rexo(0, addr(v[1]), false)
 26618              m.emit(0xd3)
 26619              m.mrsd(2, addr(v[1]), 1)
 26620          })
 26621      }
 26622      if p.len == 0 {
 26623          panic("invalid operands for RCLL")
 26624      }
 26625      return p
 26626  }
 26627  
 26628  // RCLQ performs "Rotate Left through Carry Flag".
 26629  //
 26630  // Mnemonic        : RCL
 26631  // Supported forms : (6 forms)
 26632  //
 26633  //    * RCLQ 1, r64
 26634  //    * RCLQ imm8, r64
 26635  //    * RCLQ cl, r64
 26636  //    * RCLQ 1, m64
 26637  //    * RCLQ imm8, m64
 26638  //    * RCLQ cl, m64
 26639  //
 26640  func (self *Program) RCLQ(v0 interface{}, v1 interface{}) *Instruction {
 26641      p := self.alloc("RCLQ", 2, Operands { v0, v1 })
 26642      // RCLQ 1, r64
 26643      if isConst1(v0) && isReg64(v1) {
 26644          p.domain = DomainGeneric
 26645          p.add(0, func(m *_Encoding, v []interface{}) {
 26646              m.emit(0x48 | hcode(v[1]))
 26647              m.emit(0xd1)
 26648              m.emit(0xd0 | lcode(v[1]))
 26649          })
 26650      }
 26651      // RCLQ imm8, r64
 26652      if isImm8(v0) && isReg64(v1) {
 26653          p.domain = DomainGeneric
 26654          p.add(0, func(m *_Encoding, v []interface{}) {
 26655              m.emit(0x48 | hcode(v[1]))
 26656              m.emit(0xc1)
 26657              m.emit(0xd0 | lcode(v[1]))
 26658              m.imm1(toImmAny(v[0]))
 26659          })
 26660      }
 26661      // RCLQ cl, r64
 26662      if v0 == CL && isReg64(v1) {
 26663          p.domain = DomainGeneric
 26664          p.add(0, func(m *_Encoding, v []interface{}) {
 26665              m.emit(0x48 | hcode(v[1]))
 26666              m.emit(0xd3)
 26667              m.emit(0xd0 | lcode(v[1]))
 26668          })
 26669      }
 26670      // RCLQ 1, m64
 26671      if isConst1(v0) && isM64(v1) {
 26672          p.domain = DomainGeneric
 26673          p.add(0, func(m *_Encoding, v []interface{}) {
 26674              m.rexm(1, 0, addr(v[1]))
 26675              m.emit(0xd1)
 26676              m.mrsd(2, addr(v[1]), 1)
 26677          })
 26678      }
 26679      // RCLQ imm8, m64
 26680      if isImm8(v0) && isM64(v1) {
 26681          p.domain = DomainGeneric
 26682          p.add(0, func(m *_Encoding, v []interface{}) {
 26683              m.rexm(1, 0, addr(v[1]))
 26684              m.emit(0xc1)
 26685              m.mrsd(2, addr(v[1]), 1)
 26686              m.imm1(toImmAny(v[0]))
 26687          })
 26688      }
 26689      // RCLQ cl, m64
 26690      if v0 == CL && isM64(v1) {
 26691          p.domain = DomainGeneric
 26692          p.add(0, func(m *_Encoding, v []interface{}) {
 26693              m.rexm(1, 0, addr(v[1]))
 26694              m.emit(0xd3)
 26695              m.mrsd(2, addr(v[1]), 1)
 26696          })
 26697      }
 26698      if p.len == 0 {
 26699          panic("invalid operands for RCLQ")
 26700      }
 26701      return p
 26702  }
 26703  
 26704  // RCLW performs "Rotate Left through Carry Flag".
 26705  //
 26706  // Mnemonic        : RCL
 26707  // Supported forms : (6 forms)
 26708  //
 26709  //    * RCLW 1, r16
 26710  //    * RCLW imm8, r16
 26711  //    * RCLW cl, r16
 26712  //    * RCLW 1, m16
 26713  //    * RCLW imm8, m16
 26714  //    * RCLW cl, m16
 26715  //
 26716  func (self *Program) RCLW(v0 interface{}, v1 interface{}) *Instruction {
 26717      p := self.alloc("RCLW", 2, Operands { v0, v1 })
 26718      // RCLW 1, r16
 26719      if isConst1(v0) && isReg16(v1) {
 26720          p.domain = DomainGeneric
 26721          p.add(0, func(m *_Encoding, v []interface{}) {
 26722              m.emit(0x66)
 26723              m.rexo(0, v[1], false)
 26724              m.emit(0xd1)
 26725              m.emit(0xd0 | lcode(v[1]))
 26726          })
 26727      }
 26728      // RCLW imm8, r16
 26729      if isImm8(v0) && isReg16(v1) {
 26730          p.domain = DomainGeneric
 26731          p.add(0, func(m *_Encoding, v []interface{}) {
 26732              m.emit(0x66)
 26733              m.rexo(0, v[1], false)
 26734              m.emit(0xc1)
 26735              m.emit(0xd0 | lcode(v[1]))
 26736              m.imm1(toImmAny(v[0]))
 26737          })
 26738      }
 26739      // RCLW cl, r16
 26740      if v0 == CL && isReg16(v1) {
 26741          p.domain = DomainGeneric
 26742          p.add(0, func(m *_Encoding, v []interface{}) {
 26743              m.emit(0x66)
 26744              m.rexo(0, v[1], false)
 26745              m.emit(0xd3)
 26746              m.emit(0xd0 | lcode(v[1]))
 26747          })
 26748      }
 26749      // RCLW 1, m16
 26750      if isConst1(v0) && isM16(v1) {
 26751          p.domain = DomainGeneric
 26752          p.add(0, func(m *_Encoding, v []interface{}) {
 26753              m.emit(0x66)
 26754              m.rexo(0, addr(v[1]), false)
 26755              m.emit(0xd1)
 26756              m.mrsd(2, addr(v[1]), 1)
 26757          })
 26758      }
 26759      // RCLW imm8, m16
 26760      if isImm8(v0) && isM16(v1) {
 26761          p.domain = DomainGeneric
 26762          p.add(0, func(m *_Encoding, v []interface{}) {
 26763              m.emit(0x66)
 26764              m.rexo(0, addr(v[1]), false)
 26765              m.emit(0xc1)
 26766              m.mrsd(2, addr(v[1]), 1)
 26767              m.imm1(toImmAny(v[0]))
 26768          })
 26769      }
 26770      // RCLW cl, m16
 26771      if v0 == CL && isM16(v1) {
 26772          p.domain = DomainGeneric
 26773          p.add(0, func(m *_Encoding, v []interface{}) {
 26774              m.emit(0x66)
 26775              m.rexo(0, addr(v[1]), false)
 26776              m.emit(0xd3)
 26777              m.mrsd(2, addr(v[1]), 1)
 26778          })
 26779      }
 26780      if p.len == 0 {
 26781          panic("invalid operands for RCLW")
 26782      }
 26783      return p
 26784  }
 26785  
 26786  // RCPPS performs "Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values".
 26787  //
 26788  // Mnemonic        : RCPPS
 26789  // Supported forms : (2 forms)
 26790  //
 26791  //    * RCPPS xmm, xmm     [SSE]
 26792  //    * RCPPS m128, xmm    [SSE]
 26793  //
 26794  func (self *Program) RCPPS(v0 interface{}, v1 interface{}) *Instruction {
 26795      p := self.alloc("RCPPS", 2, Operands { v0, v1 })
 26796      // RCPPS xmm, xmm
 26797      if isXMM(v0) && isXMM(v1) {
 26798          self.require(ISA_SSE)
 26799          p.domain = DomainMMXSSE
 26800          p.add(0, func(m *_Encoding, v []interface{}) {
 26801              m.rexo(hcode(v[1]), v[0], false)
 26802              m.emit(0x0f)
 26803              m.emit(0x53)
 26804              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26805          })
 26806      }
 26807      // RCPPS m128, xmm
 26808      if isM128(v0) && isXMM(v1) {
 26809          self.require(ISA_SSE)
 26810          p.domain = DomainMMXSSE
 26811          p.add(0, func(m *_Encoding, v []interface{}) {
 26812              m.rexo(hcode(v[1]), addr(v[0]), false)
 26813              m.emit(0x0f)
 26814              m.emit(0x53)
 26815              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26816          })
 26817      }
 26818      if p.len == 0 {
 26819          panic("invalid operands for RCPPS")
 26820      }
 26821      return p
 26822  }
 26823  
 26824  // RCPSS performs "Compute Approximate Reciprocal of Scalar Single-Precision Floating-Point Values".
 26825  //
 26826  // Mnemonic        : RCPSS
 26827  // Supported forms : (2 forms)
 26828  //
 26829  //    * RCPSS xmm, xmm    [SSE]
 26830  //    * RCPSS m32, xmm    [SSE]
 26831  //
 26832  func (self *Program) RCPSS(v0 interface{}, v1 interface{}) *Instruction {
 26833      p := self.alloc("RCPSS", 2, Operands { v0, v1 })
 26834      // RCPSS xmm, xmm
 26835      if isXMM(v0) && isXMM(v1) {
 26836          self.require(ISA_SSE)
 26837          p.domain = DomainMMXSSE
 26838          p.add(0, func(m *_Encoding, v []interface{}) {
 26839              m.emit(0xf3)
 26840              m.rexo(hcode(v[1]), v[0], false)
 26841              m.emit(0x0f)
 26842              m.emit(0x53)
 26843              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26844          })
 26845      }
 26846      // RCPSS m32, xmm
 26847      if isM32(v0) && isXMM(v1) {
 26848          self.require(ISA_SSE)
 26849          p.domain = DomainMMXSSE
 26850          p.add(0, func(m *_Encoding, v []interface{}) {
 26851              m.emit(0xf3)
 26852              m.rexo(hcode(v[1]), addr(v[0]), false)
 26853              m.emit(0x0f)
 26854              m.emit(0x53)
 26855              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26856          })
 26857      }
 26858      if p.len == 0 {
 26859          panic("invalid operands for RCPSS")
 26860      }
 26861      return p
 26862  }
 26863  
 26864  // RCRB performs "Rotate Right through Carry Flag".
 26865  //
 26866  // Mnemonic        : RCR
 26867  // Supported forms : (6 forms)
 26868  //
 26869  //    * RCRB 1, r8
 26870  //    * RCRB imm8, r8
 26871  //    * RCRB cl, r8
 26872  //    * RCRB 1, m8
 26873  //    * RCRB imm8, m8
 26874  //    * RCRB cl, m8
 26875  //
 26876  func (self *Program) RCRB(v0 interface{}, v1 interface{}) *Instruction {
 26877      p := self.alloc("RCRB", 2, Operands { v0, v1 })
 26878      // RCRB 1, r8
 26879      if isConst1(v0) && isReg8(v1) {
 26880          p.domain = DomainGeneric
 26881          p.add(0, func(m *_Encoding, v []interface{}) {
 26882              m.rexo(0, v[1], isReg8REX(v[1]))
 26883              m.emit(0xd0)
 26884              m.emit(0xd8 | lcode(v[1]))
 26885          })
 26886      }
 26887      // RCRB imm8, r8
 26888      if isImm8(v0) && isReg8(v1) {
 26889          p.domain = DomainGeneric
 26890          p.add(0, func(m *_Encoding, v []interface{}) {
 26891              m.rexo(0, v[1], isReg8REX(v[1]))
 26892              m.emit(0xc0)
 26893              m.emit(0xd8 | lcode(v[1]))
 26894              m.imm1(toImmAny(v[0]))
 26895          })
 26896      }
 26897      // RCRB cl, r8
 26898      if v0 == CL && isReg8(v1) {
 26899          p.domain = DomainGeneric
 26900          p.add(0, func(m *_Encoding, v []interface{}) {
 26901              m.rexo(0, v[1], isReg8REX(v[1]))
 26902              m.emit(0xd2)
 26903              m.emit(0xd8 | lcode(v[1]))
 26904          })
 26905      }
 26906      // RCRB 1, m8
 26907      if isConst1(v0) && isM8(v1) {
 26908          p.domain = DomainGeneric
 26909          p.add(0, func(m *_Encoding, v []interface{}) {
 26910              m.rexo(0, addr(v[1]), false)
 26911              m.emit(0xd0)
 26912              m.mrsd(3, addr(v[1]), 1)
 26913          })
 26914      }
 26915      // RCRB imm8, m8
 26916      if isImm8(v0) && isM8(v1) {
 26917          p.domain = DomainGeneric
 26918          p.add(0, func(m *_Encoding, v []interface{}) {
 26919              m.rexo(0, addr(v[1]), false)
 26920              m.emit(0xc0)
 26921              m.mrsd(3, addr(v[1]), 1)
 26922              m.imm1(toImmAny(v[0]))
 26923          })
 26924      }
 26925      // RCRB cl, m8
 26926      if v0 == CL && isM8(v1) {
 26927          p.domain = DomainGeneric
 26928          p.add(0, func(m *_Encoding, v []interface{}) {
 26929              m.rexo(0, addr(v[1]), false)
 26930              m.emit(0xd2)
 26931              m.mrsd(3, addr(v[1]), 1)
 26932          })
 26933      }
 26934      if p.len == 0 {
 26935          panic("invalid operands for RCRB")
 26936      }
 26937      return p
 26938  }
 26939  
 26940  // RCRL performs "Rotate Right through Carry Flag".
 26941  //
 26942  // Mnemonic        : RCR
 26943  // Supported forms : (6 forms)
 26944  //
 26945  //    * RCRL 1, r32
 26946  //    * RCRL imm8, r32
 26947  //    * RCRL cl, r32
 26948  //    * RCRL 1, m32
 26949  //    * RCRL imm8, m32
 26950  //    * RCRL cl, m32
 26951  //
 26952  func (self *Program) RCRL(v0 interface{}, v1 interface{}) *Instruction {
 26953      p := self.alloc("RCRL", 2, Operands { v0, v1 })
 26954      // RCRL 1, r32
 26955      if isConst1(v0) && isReg32(v1) {
 26956          p.domain = DomainGeneric
 26957          p.add(0, func(m *_Encoding, v []interface{}) {
 26958              m.rexo(0, v[1], false)
 26959              m.emit(0xd1)
 26960              m.emit(0xd8 | lcode(v[1]))
 26961          })
 26962      }
 26963      // RCRL imm8, r32
 26964      if isImm8(v0) && isReg32(v1) {
 26965          p.domain = DomainGeneric
 26966          p.add(0, func(m *_Encoding, v []interface{}) {
 26967              m.rexo(0, v[1], false)
 26968              m.emit(0xc1)
 26969              m.emit(0xd8 | lcode(v[1]))
 26970              m.imm1(toImmAny(v[0]))
 26971          })
 26972      }
 26973      // RCRL cl, r32
 26974      if v0 == CL && isReg32(v1) {
 26975          p.domain = DomainGeneric
 26976          p.add(0, func(m *_Encoding, v []interface{}) {
 26977              m.rexo(0, v[1], false)
 26978              m.emit(0xd3)
 26979              m.emit(0xd8 | lcode(v[1]))
 26980          })
 26981      }
 26982      // RCRL 1, m32
 26983      if isConst1(v0) && isM32(v1) {
 26984          p.domain = DomainGeneric
 26985          p.add(0, func(m *_Encoding, v []interface{}) {
 26986              m.rexo(0, addr(v[1]), false)
 26987              m.emit(0xd1)
 26988              m.mrsd(3, addr(v[1]), 1)
 26989          })
 26990      }
 26991      // RCRL imm8, m32
 26992      if isImm8(v0) && isM32(v1) {
 26993          p.domain = DomainGeneric
 26994          p.add(0, func(m *_Encoding, v []interface{}) {
 26995              m.rexo(0, addr(v[1]), false)
 26996              m.emit(0xc1)
 26997              m.mrsd(3, addr(v[1]), 1)
 26998              m.imm1(toImmAny(v[0]))
 26999          })
 27000      }
 27001      // RCRL cl, m32
 27002      if v0 == CL && isM32(v1) {
 27003          p.domain = DomainGeneric
 27004          p.add(0, func(m *_Encoding, v []interface{}) {
 27005              m.rexo(0, addr(v[1]), false)
 27006              m.emit(0xd3)
 27007              m.mrsd(3, addr(v[1]), 1)
 27008          })
 27009      }
 27010      if p.len == 0 {
 27011          panic("invalid operands for RCRL")
 27012      }
 27013      return p
 27014  }
 27015  
 27016  // RCRQ performs "Rotate Right through Carry Flag".
 27017  //
 27018  // Mnemonic        : RCR
 27019  // Supported forms : (6 forms)
 27020  //
 27021  //    * RCRQ 1, r64
 27022  //    * RCRQ imm8, r64
 27023  //    * RCRQ cl, r64
 27024  //    * RCRQ 1, m64
 27025  //    * RCRQ imm8, m64
 27026  //    * RCRQ cl, m64
 27027  //
 27028  func (self *Program) RCRQ(v0 interface{}, v1 interface{}) *Instruction {
 27029      p := self.alloc("RCRQ", 2, Operands { v0, v1 })
 27030      // RCRQ 1, r64
 27031      if isConst1(v0) && isReg64(v1) {
 27032          p.domain = DomainGeneric
 27033          p.add(0, func(m *_Encoding, v []interface{}) {
 27034              m.emit(0x48 | hcode(v[1]))
 27035              m.emit(0xd1)
 27036              m.emit(0xd8 | lcode(v[1]))
 27037          })
 27038      }
 27039      // RCRQ imm8, r64
 27040      if isImm8(v0) && isReg64(v1) {
 27041          p.domain = DomainGeneric
 27042          p.add(0, func(m *_Encoding, v []interface{}) {
 27043              m.emit(0x48 | hcode(v[1]))
 27044              m.emit(0xc1)
 27045              m.emit(0xd8 | lcode(v[1]))
 27046              m.imm1(toImmAny(v[0]))
 27047          })
 27048      }
 27049      // RCRQ cl, r64
 27050      if v0 == CL && isReg64(v1) {
 27051          p.domain = DomainGeneric
 27052          p.add(0, func(m *_Encoding, v []interface{}) {
 27053              m.emit(0x48 | hcode(v[1]))
 27054              m.emit(0xd3)
 27055              m.emit(0xd8 | lcode(v[1]))
 27056          })
 27057      }
 27058      // RCRQ 1, m64
 27059      if isConst1(v0) && isM64(v1) {
 27060          p.domain = DomainGeneric
 27061          p.add(0, func(m *_Encoding, v []interface{}) {
 27062              m.rexm(1, 0, addr(v[1]))
 27063              m.emit(0xd1)
 27064              m.mrsd(3, addr(v[1]), 1)
 27065          })
 27066      }
 27067      // RCRQ imm8, m64
 27068      if isImm8(v0) && isM64(v1) {
 27069          p.domain = DomainGeneric
 27070          p.add(0, func(m *_Encoding, v []interface{}) {
 27071              m.rexm(1, 0, addr(v[1]))
 27072              m.emit(0xc1)
 27073              m.mrsd(3, addr(v[1]), 1)
 27074              m.imm1(toImmAny(v[0]))
 27075          })
 27076      }
 27077      // RCRQ cl, m64
 27078      if v0 == CL && isM64(v1) {
 27079          p.domain = DomainGeneric
 27080          p.add(0, func(m *_Encoding, v []interface{}) {
 27081              m.rexm(1, 0, addr(v[1]))
 27082              m.emit(0xd3)
 27083              m.mrsd(3, addr(v[1]), 1)
 27084          })
 27085      }
 27086      if p.len == 0 {
 27087          panic("invalid operands for RCRQ")
 27088      }
 27089      return p
 27090  }
 27091  
 27092  // RCRW performs "Rotate Right through Carry Flag".
 27093  //
 27094  // Mnemonic        : RCR
 27095  // Supported forms : (6 forms)
 27096  //
 27097  //    * RCRW 1, r16
 27098  //    * RCRW imm8, r16
 27099  //    * RCRW cl, r16
 27100  //    * RCRW 1, m16
 27101  //    * RCRW imm8, m16
 27102  //    * RCRW cl, m16
 27103  //
 27104  func (self *Program) RCRW(v0 interface{}, v1 interface{}) *Instruction {
 27105      p := self.alloc("RCRW", 2, Operands { v0, v1 })
 27106      // RCRW 1, r16
 27107      if isConst1(v0) && isReg16(v1) {
 27108          p.domain = DomainGeneric
 27109          p.add(0, func(m *_Encoding, v []interface{}) {
 27110              m.emit(0x66)
 27111              m.rexo(0, v[1], false)
 27112              m.emit(0xd1)
 27113              m.emit(0xd8 | lcode(v[1]))
 27114          })
 27115      }
 27116      // RCRW imm8, r16
 27117      if isImm8(v0) && isReg16(v1) {
 27118          p.domain = DomainGeneric
 27119          p.add(0, func(m *_Encoding, v []interface{}) {
 27120              m.emit(0x66)
 27121              m.rexo(0, v[1], false)
 27122              m.emit(0xc1)
 27123              m.emit(0xd8 | lcode(v[1]))
 27124              m.imm1(toImmAny(v[0]))
 27125          })
 27126      }
 27127      // RCRW cl, r16
 27128      if v0 == CL && isReg16(v1) {
 27129          p.domain = DomainGeneric
 27130          p.add(0, func(m *_Encoding, v []interface{}) {
 27131              m.emit(0x66)
 27132              m.rexo(0, v[1], false)
 27133              m.emit(0xd3)
 27134              m.emit(0xd8 | lcode(v[1]))
 27135          })
 27136      }
 27137      // RCRW 1, m16
 27138      if isConst1(v0) && isM16(v1) {
 27139          p.domain = DomainGeneric
 27140          p.add(0, func(m *_Encoding, v []interface{}) {
 27141              m.emit(0x66)
 27142              m.rexo(0, addr(v[1]), false)
 27143              m.emit(0xd1)
 27144              m.mrsd(3, addr(v[1]), 1)
 27145          })
 27146      }
 27147      // RCRW imm8, m16
 27148      if isImm8(v0) && isM16(v1) {
 27149          p.domain = DomainGeneric
 27150          p.add(0, func(m *_Encoding, v []interface{}) {
 27151              m.emit(0x66)
 27152              m.rexo(0, addr(v[1]), false)
 27153              m.emit(0xc1)
 27154              m.mrsd(3, addr(v[1]), 1)
 27155              m.imm1(toImmAny(v[0]))
 27156          })
 27157      }
 27158      // RCRW cl, m16
 27159      if v0 == CL && isM16(v1) {
 27160          p.domain = DomainGeneric
 27161          p.add(0, func(m *_Encoding, v []interface{}) {
 27162              m.emit(0x66)
 27163              m.rexo(0, addr(v[1]), false)
 27164              m.emit(0xd3)
 27165              m.mrsd(3, addr(v[1]), 1)
 27166          })
 27167      }
 27168      if p.len == 0 {
 27169          panic("invalid operands for RCRW")
 27170      }
 27171      return p
 27172  }
 27173  
 27174  // RDRAND performs "Read Random Number".
 27175  //
 27176  // Mnemonic        : RDRAND
 27177  // Supported forms : (3 forms)
 27178  //
 27179  //    * RDRAND r16    [RDRAND]
 27180  //    * RDRAND r32    [RDRAND]
 27181  //    * RDRAND r64    [RDRAND]
 27182  //
 27183  func (self *Program) RDRAND(v0 interface{}) *Instruction {
 27184      p := self.alloc("RDRAND", 1, Operands { v0 })
 27185      // RDRAND r16
 27186      if isReg16(v0) {
 27187          self.require(ISA_RDRAND)
 27188          p.domain = DomainCrypto
 27189          p.add(0, func(m *_Encoding, v []interface{}) {
 27190              m.emit(0x66)
 27191              m.rexo(0, v[0], false)
 27192              m.emit(0x0f)
 27193              m.emit(0xc7)
 27194              m.emit(0xf0 | lcode(v[0]))
 27195          })
 27196      }
 27197      // RDRAND r32
 27198      if isReg32(v0) {
 27199          self.require(ISA_RDRAND)
 27200          p.domain = DomainCrypto
 27201          p.add(0, func(m *_Encoding, v []interface{}) {
 27202              m.rexo(0, v[0], false)
 27203              m.emit(0x0f)
 27204              m.emit(0xc7)
 27205              m.emit(0xf0 | lcode(v[0]))
 27206          })
 27207      }
 27208      // RDRAND r64
 27209      if isReg64(v0) {
 27210          self.require(ISA_RDRAND)
 27211          p.domain = DomainCrypto
 27212          p.add(0, func(m *_Encoding, v []interface{}) {
 27213              m.emit(0x48 | hcode(v[0]))
 27214              m.emit(0x0f)
 27215              m.emit(0xc7)
 27216              m.emit(0xf0 | lcode(v[0]))
 27217          })
 27218      }
 27219      if p.len == 0 {
 27220          panic("invalid operands for RDRAND")
 27221      }
 27222      return p
 27223  }
 27224  
 27225  // RDSEED performs "Read Random SEED".
 27226  //
 27227  // Mnemonic        : RDSEED
 27228  // Supported forms : (3 forms)
 27229  //
 27230  //    * RDSEED r16    [RDSEED]
 27231  //    * RDSEED r32    [RDSEED]
 27232  //    * RDSEED r64    [RDSEED]
 27233  //
 27234  func (self *Program) RDSEED(v0 interface{}) *Instruction {
 27235      p := self.alloc("RDSEED", 1, Operands { v0 })
 27236      // RDSEED r16
 27237      if isReg16(v0) {
 27238          self.require(ISA_RDSEED)
 27239          p.domain = DomainCrypto
 27240          p.add(0, func(m *_Encoding, v []interface{}) {
 27241              m.emit(0x66)
 27242              m.rexo(0, v[0], false)
 27243              m.emit(0x0f)
 27244              m.emit(0xc7)
 27245              m.emit(0xf8 | lcode(v[0]))
 27246          })
 27247      }
 27248      // RDSEED r32
 27249      if isReg32(v0) {
 27250          self.require(ISA_RDSEED)
 27251          p.domain = DomainCrypto
 27252          p.add(0, func(m *_Encoding, v []interface{}) {
 27253              m.rexo(0, v[0], false)
 27254              m.emit(0x0f)
 27255              m.emit(0xc7)
 27256              m.emit(0xf8 | lcode(v[0]))
 27257          })
 27258      }
 27259      // RDSEED r64
 27260      if isReg64(v0) {
 27261          self.require(ISA_RDSEED)
 27262          p.domain = DomainCrypto
 27263          p.add(0, func(m *_Encoding, v []interface{}) {
 27264              m.emit(0x48 | hcode(v[0]))
 27265              m.emit(0x0f)
 27266              m.emit(0xc7)
 27267              m.emit(0xf8 | lcode(v[0]))
 27268          })
 27269      }
 27270      if p.len == 0 {
 27271          panic("invalid operands for RDSEED")
 27272      }
 27273      return p
 27274  }
 27275  
 27276  // RDTSC performs "Read Time-Stamp Counter".
 27277  //
 27278  // Mnemonic        : RDTSC
 27279  // Supported forms : (1 form)
 27280  //
 27281  //    * RDTSC    [RDTSC]
 27282  //
 27283  func (self *Program) RDTSC() *Instruction {
 27284      p := self.alloc("RDTSC", 0, Operands {  })
 27285      // RDTSC
 27286      self.require(ISA_RDTSC)
 27287      p.domain = DomainGeneric
 27288      p.add(0, func(m *_Encoding, v []interface{}) {
 27289          m.emit(0x0f)
 27290          m.emit(0x31)
 27291      })
 27292      return p
 27293  }
 27294  
 27295  // RDTSCP performs "Read Time-Stamp Counter and Processor ID".
 27296  //
 27297  // Mnemonic        : RDTSCP
 27298  // Supported forms : (1 form)
 27299  //
 27300  //    * RDTSCP    [RDTSCP]
 27301  //
 27302  func (self *Program) RDTSCP() *Instruction {
 27303      p := self.alloc("RDTSCP", 0, Operands {  })
 27304      // RDTSCP
 27305      self.require(ISA_RDTSCP)
 27306      p.domain = DomainGeneric
 27307      p.add(0, func(m *_Encoding, v []interface{}) {
 27308          m.emit(0x0f)
 27309          m.emit(0x01)
 27310          m.emit(0xf9)
 27311      })
 27312      return p
 27313  }
 27314  
 27315  // RET performs "Return from Procedure".
 27316  //
 27317  // Mnemonic        : RET
 27318  // Supported forms : (2 forms)
 27319  //
 27320  //    * RET
 27321  //    * RET imm16
 27322  //
 27323  func (self *Program) RET(vv ...interface{}) *Instruction {
 27324      var p *Instruction
 27325      switch len(vv) {
 27326          case 0  : p = self.alloc("RET", 0, Operands {  })
 27327          case 1  : p = self.alloc("RET", 1, Operands { vv[0] })
 27328          default : panic("instruction RET takes 0 or 1 operands")
 27329      }
 27330      // RET
 27331      if len(vv) == 0 {
 27332          p.domain = DomainGeneric
 27333          p.add(0, func(m *_Encoding, v []interface{}) {
 27334              m.emit(0xc3)
 27335          })
 27336      }
 27337      // RET imm16
 27338      if len(vv) == 1 && isImm16(vv[0]) {
 27339          p.domain = DomainGeneric
 27340          p.add(0, func(m *_Encoding, v []interface{}) {
 27341              m.emit(0xc2)
 27342              m.imm2(toImmAny(v[0]))
 27343          })
 27344      }
 27345      if p.len == 0 {
 27346          panic("invalid operands for RET")
 27347      }
 27348      return p
 27349  }
 27350  
 27351  // ROLB performs "Rotate Left".
 27352  //
 27353  // Mnemonic        : ROL
 27354  // Supported forms : (6 forms)
 27355  //
 27356  //    * ROLB 1, r8
 27357  //    * ROLB imm8, r8
 27358  //    * ROLB cl, r8
 27359  //    * ROLB 1, m8
 27360  //    * ROLB imm8, m8
 27361  //    * ROLB cl, m8
 27362  //
 27363  func (self *Program) ROLB(v0 interface{}, v1 interface{}) *Instruction {
 27364      p := self.alloc("ROLB", 2, Operands { v0, v1 })
 27365      // ROLB 1, r8
 27366      if isConst1(v0) && isReg8(v1) {
 27367          p.domain = DomainGeneric
 27368          p.add(0, func(m *_Encoding, v []interface{}) {
 27369              m.rexo(0, v[1], isReg8REX(v[1]))
 27370              m.emit(0xd0)
 27371              m.emit(0xc0 | lcode(v[1]))
 27372          })
 27373      }
 27374      // ROLB imm8, r8
 27375      if isImm8(v0) && isReg8(v1) {
 27376          p.domain = DomainGeneric
 27377          p.add(0, func(m *_Encoding, v []interface{}) {
 27378              m.rexo(0, v[1], isReg8REX(v[1]))
 27379              m.emit(0xc0)
 27380              m.emit(0xc0 | lcode(v[1]))
 27381              m.imm1(toImmAny(v[0]))
 27382          })
 27383      }
 27384      // ROLB cl, r8
 27385      if v0 == CL && isReg8(v1) {
 27386          p.domain = DomainGeneric
 27387          p.add(0, func(m *_Encoding, v []interface{}) {
 27388              m.rexo(0, v[1], isReg8REX(v[1]))
 27389              m.emit(0xd2)
 27390              m.emit(0xc0 | lcode(v[1]))
 27391          })
 27392      }
 27393      // ROLB 1, m8
 27394      if isConst1(v0) && isM8(v1) {
 27395          p.domain = DomainGeneric
 27396          p.add(0, func(m *_Encoding, v []interface{}) {
 27397              m.rexo(0, addr(v[1]), false)
 27398              m.emit(0xd0)
 27399              m.mrsd(0, addr(v[1]), 1)
 27400          })
 27401      }
 27402      // ROLB imm8, m8
 27403      if isImm8(v0) && isM8(v1) {
 27404          p.domain = DomainGeneric
 27405          p.add(0, func(m *_Encoding, v []interface{}) {
 27406              m.rexo(0, addr(v[1]), false)
 27407              m.emit(0xc0)
 27408              m.mrsd(0, addr(v[1]), 1)
 27409              m.imm1(toImmAny(v[0]))
 27410          })
 27411      }
 27412      // ROLB cl, m8
 27413      if v0 == CL && isM8(v1) {
 27414          p.domain = DomainGeneric
 27415          p.add(0, func(m *_Encoding, v []interface{}) {
 27416              m.rexo(0, addr(v[1]), false)
 27417              m.emit(0xd2)
 27418              m.mrsd(0, addr(v[1]), 1)
 27419          })
 27420      }
 27421      if p.len == 0 {
 27422          panic("invalid operands for ROLB")
 27423      }
 27424      return p
 27425  }
 27426  
 27427  // ROLL performs "Rotate Left".
 27428  //
 27429  // Mnemonic        : ROL
 27430  // Supported forms : (6 forms)
 27431  //
 27432  //    * ROLL 1, r32
 27433  //    * ROLL imm8, r32
 27434  //    * ROLL cl, r32
 27435  //    * ROLL 1, m32
 27436  //    * ROLL imm8, m32
 27437  //    * ROLL cl, m32
 27438  //
 27439  func (self *Program) ROLL(v0 interface{}, v1 interface{}) *Instruction {
 27440      p := self.alloc("ROLL", 2, Operands { v0, v1 })
 27441      // ROLL 1, r32
 27442      if isConst1(v0) && isReg32(v1) {
 27443          p.domain = DomainGeneric
 27444          p.add(0, func(m *_Encoding, v []interface{}) {
 27445              m.rexo(0, v[1], false)
 27446              m.emit(0xd1)
 27447              m.emit(0xc0 | lcode(v[1]))
 27448          })
 27449      }
 27450      // ROLL imm8, r32
 27451      if isImm8(v0) && isReg32(v1) {
 27452          p.domain = DomainGeneric
 27453          p.add(0, func(m *_Encoding, v []interface{}) {
 27454              m.rexo(0, v[1], false)
 27455              m.emit(0xc1)
 27456              m.emit(0xc0 | lcode(v[1]))
 27457              m.imm1(toImmAny(v[0]))
 27458          })
 27459      }
 27460      // ROLL cl, r32
 27461      if v0 == CL && isReg32(v1) {
 27462          p.domain = DomainGeneric
 27463          p.add(0, func(m *_Encoding, v []interface{}) {
 27464              m.rexo(0, v[1], false)
 27465              m.emit(0xd3)
 27466              m.emit(0xc0 | lcode(v[1]))
 27467          })
 27468      }
 27469      // ROLL 1, m32
 27470      if isConst1(v0) && isM32(v1) {
 27471          p.domain = DomainGeneric
 27472          p.add(0, func(m *_Encoding, v []interface{}) {
 27473              m.rexo(0, addr(v[1]), false)
 27474              m.emit(0xd1)
 27475              m.mrsd(0, addr(v[1]), 1)
 27476          })
 27477      }
 27478      // ROLL imm8, m32
 27479      if isImm8(v0) && isM32(v1) {
 27480          p.domain = DomainGeneric
 27481          p.add(0, func(m *_Encoding, v []interface{}) {
 27482              m.rexo(0, addr(v[1]), false)
 27483              m.emit(0xc1)
 27484              m.mrsd(0, addr(v[1]), 1)
 27485              m.imm1(toImmAny(v[0]))
 27486          })
 27487      }
 27488      // ROLL cl, m32
 27489      if v0 == CL && isM32(v1) {
 27490          p.domain = DomainGeneric
 27491          p.add(0, func(m *_Encoding, v []interface{}) {
 27492              m.rexo(0, addr(v[1]), false)
 27493              m.emit(0xd3)
 27494              m.mrsd(0, addr(v[1]), 1)
 27495          })
 27496      }
 27497      if p.len == 0 {
 27498          panic("invalid operands for ROLL")
 27499      }
 27500      return p
 27501  }
 27502  
 27503  // ROLQ performs "Rotate Left".
 27504  //
 27505  // Mnemonic        : ROL
 27506  // Supported forms : (6 forms)
 27507  //
 27508  //    * ROLQ 1, r64
 27509  //    * ROLQ imm8, r64
 27510  //    * ROLQ cl, r64
 27511  //    * ROLQ 1, m64
 27512  //    * ROLQ imm8, m64
 27513  //    * ROLQ cl, m64
 27514  //
 27515  func (self *Program) ROLQ(v0 interface{}, v1 interface{}) *Instruction {
 27516      p := self.alloc("ROLQ", 2, Operands { v0, v1 })
 27517      // ROLQ 1, r64
 27518      if isConst1(v0) && isReg64(v1) {
 27519          p.domain = DomainGeneric
 27520          p.add(0, func(m *_Encoding, v []interface{}) {
 27521              m.emit(0x48 | hcode(v[1]))
 27522              m.emit(0xd1)
 27523              m.emit(0xc0 | lcode(v[1]))
 27524          })
 27525      }
 27526      // ROLQ imm8, r64
 27527      if isImm8(v0) && isReg64(v1) {
 27528          p.domain = DomainGeneric
 27529          p.add(0, func(m *_Encoding, v []interface{}) {
 27530              m.emit(0x48 | hcode(v[1]))
 27531              m.emit(0xc1)
 27532              m.emit(0xc0 | lcode(v[1]))
 27533              m.imm1(toImmAny(v[0]))
 27534          })
 27535      }
 27536      // ROLQ cl, r64
 27537      if v0 == CL && isReg64(v1) {
 27538          p.domain = DomainGeneric
 27539          p.add(0, func(m *_Encoding, v []interface{}) {
 27540              m.emit(0x48 | hcode(v[1]))
 27541              m.emit(0xd3)
 27542              m.emit(0xc0 | lcode(v[1]))
 27543          })
 27544      }
 27545      // ROLQ 1, m64
 27546      if isConst1(v0) && isM64(v1) {
 27547          p.domain = DomainGeneric
 27548          p.add(0, func(m *_Encoding, v []interface{}) {
 27549              m.rexm(1, 0, addr(v[1]))
 27550              m.emit(0xd1)
 27551              m.mrsd(0, addr(v[1]), 1)
 27552          })
 27553      }
 27554      // ROLQ imm8, m64
 27555      if isImm8(v0) && isM64(v1) {
 27556          p.domain = DomainGeneric
 27557          p.add(0, func(m *_Encoding, v []interface{}) {
 27558              m.rexm(1, 0, addr(v[1]))
 27559              m.emit(0xc1)
 27560              m.mrsd(0, addr(v[1]), 1)
 27561              m.imm1(toImmAny(v[0]))
 27562          })
 27563      }
 27564      // ROLQ cl, m64
 27565      if v0 == CL && isM64(v1) {
 27566          p.domain = DomainGeneric
 27567          p.add(0, func(m *_Encoding, v []interface{}) {
 27568              m.rexm(1, 0, addr(v[1]))
 27569              m.emit(0xd3)
 27570              m.mrsd(0, addr(v[1]), 1)
 27571          })
 27572      }
 27573      if p.len == 0 {
 27574          panic("invalid operands for ROLQ")
 27575      }
 27576      return p
 27577  }
 27578  
 27579  // ROLW performs "Rotate Left".
 27580  //
 27581  // Mnemonic        : ROL
 27582  // Supported forms : (6 forms)
 27583  //
 27584  //    * ROLW 1, r16
 27585  //    * ROLW imm8, r16
 27586  //    * ROLW cl, r16
 27587  //    * ROLW 1, m16
 27588  //    * ROLW imm8, m16
 27589  //    * ROLW cl, m16
 27590  //
 27591  func (self *Program) ROLW(v0 interface{}, v1 interface{}) *Instruction {
 27592      p := self.alloc("ROLW", 2, Operands { v0, v1 })
 27593      // ROLW 1, r16
 27594      if isConst1(v0) && isReg16(v1) {
 27595          p.domain = DomainGeneric
 27596          p.add(0, func(m *_Encoding, v []interface{}) {
 27597              m.emit(0x66)
 27598              m.rexo(0, v[1], false)
 27599              m.emit(0xd1)
 27600              m.emit(0xc0 | lcode(v[1]))
 27601          })
 27602      }
 27603      // ROLW imm8, r16
 27604      if isImm8(v0) && isReg16(v1) {
 27605          p.domain = DomainGeneric
 27606          p.add(0, func(m *_Encoding, v []interface{}) {
 27607              m.emit(0x66)
 27608              m.rexo(0, v[1], false)
 27609              m.emit(0xc1)
 27610              m.emit(0xc0 | lcode(v[1]))
 27611              m.imm1(toImmAny(v[0]))
 27612          })
 27613      }
 27614      // ROLW cl, r16
 27615      if v0 == CL && isReg16(v1) {
 27616          p.domain = DomainGeneric
 27617          p.add(0, func(m *_Encoding, v []interface{}) {
 27618              m.emit(0x66)
 27619              m.rexo(0, v[1], false)
 27620              m.emit(0xd3)
 27621              m.emit(0xc0 | lcode(v[1]))
 27622          })
 27623      }
 27624      // ROLW 1, m16
 27625      if isConst1(v0) && isM16(v1) {
 27626          p.domain = DomainGeneric
 27627          p.add(0, func(m *_Encoding, v []interface{}) {
 27628              m.emit(0x66)
 27629              m.rexo(0, addr(v[1]), false)
 27630              m.emit(0xd1)
 27631              m.mrsd(0, addr(v[1]), 1)
 27632          })
 27633      }
 27634      // ROLW imm8, m16
 27635      if isImm8(v0) && isM16(v1) {
 27636          p.domain = DomainGeneric
 27637          p.add(0, func(m *_Encoding, v []interface{}) {
 27638              m.emit(0x66)
 27639              m.rexo(0, addr(v[1]), false)
 27640              m.emit(0xc1)
 27641              m.mrsd(0, addr(v[1]), 1)
 27642              m.imm1(toImmAny(v[0]))
 27643          })
 27644      }
 27645      // ROLW cl, m16
 27646      if v0 == CL && isM16(v1) {
 27647          p.domain = DomainGeneric
 27648          p.add(0, func(m *_Encoding, v []interface{}) {
 27649              m.emit(0x66)
 27650              m.rexo(0, addr(v[1]), false)
 27651              m.emit(0xd3)
 27652              m.mrsd(0, addr(v[1]), 1)
 27653          })
 27654      }
 27655      if p.len == 0 {
 27656          panic("invalid operands for ROLW")
 27657      }
 27658      return p
 27659  }
 27660  
 27661  // RORB performs "Rotate Right".
 27662  //
 27663  // Mnemonic        : ROR
 27664  // Supported forms : (6 forms)
 27665  //
 27666  //    * RORB 1, r8
 27667  //    * RORB imm8, r8
 27668  //    * RORB cl, r8
 27669  //    * RORB 1, m8
 27670  //    * RORB imm8, m8
 27671  //    * RORB cl, m8
 27672  //
 27673  func (self *Program) RORB(v0 interface{}, v1 interface{}) *Instruction {
 27674      p := self.alloc("RORB", 2, Operands { v0, v1 })
 27675      // RORB 1, r8
 27676      if isConst1(v0) && isReg8(v1) {
 27677          p.domain = DomainGeneric
 27678          p.add(0, func(m *_Encoding, v []interface{}) {
 27679              m.rexo(0, v[1], isReg8REX(v[1]))
 27680              m.emit(0xd0)
 27681              m.emit(0xc8 | lcode(v[1]))
 27682          })
 27683      }
 27684      // RORB imm8, r8
 27685      if isImm8(v0) && isReg8(v1) {
 27686          p.domain = DomainGeneric
 27687          p.add(0, func(m *_Encoding, v []interface{}) {
 27688              m.rexo(0, v[1], isReg8REX(v[1]))
 27689              m.emit(0xc0)
 27690              m.emit(0xc8 | lcode(v[1]))
 27691              m.imm1(toImmAny(v[0]))
 27692          })
 27693      }
 27694      // RORB cl, r8
 27695      if v0 == CL && isReg8(v1) {
 27696          p.domain = DomainGeneric
 27697          p.add(0, func(m *_Encoding, v []interface{}) {
 27698              m.rexo(0, v[1], isReg8REX(v[1]))
 27699              m.emit(0xd2)
 27700              m.emit(0xc8 | lcode(v[1]))
 27701          })
 27702      }
 27703      // RORB 1, m8
 27704      if isConst1(v0) && isM8(v1) {
 27705          p.domain = DomainGeneric
 27706          p.add(0, func(m *_Encoding, v []interface{}) {
 27707              m.rexo(0, addr(v[1]), false)
 27708              m.emit(0xd0)
 27709              m.mrsd(1, addr(v[1]), 1)
 27710          })
 27711      }
 27712      // RORB imm8, m8
 27713      if isImm8(v0) && isM8(v1) {
 27714          p.domain = DomainGeneric
 27715          p.add(0, func(m *_Encoding, v []interface{}) {
 27716              m.rexo(0, addr(v[1]), false)
 27717              m.emit(0xc0)
 27718              m.mrsd(1, addr(v[1]), 1)
 27719              m.imm1(toImmAny(v[0]))
 27720          })
 27721      }
 27722      // RORB cl, m8
 27723      if v0 == CL && isM8(v1) {
 27724          p.domain = DomainGeneric
 27725          p.add(0, func(m *_Encoding, v []interface{}) {
 27726              m.rexo(0, addr(v[1]), false)
 27727              m.emit(0xd2)
 27728              m.mrsd(1, addr(v[1]), 1)
 27729          })
 27730      }
 27731      if p.len == 0 {
 27732          panic("invalid operands for RORB")
 27733      }
 27734      return p
 27735  }
 27736  
 27737  // RORL performs "Rotate Right".
 27738  //
 27739  // Mnemonic        : ROR
 27740  // Supported forms : (6 forms)
 27741  //
 27742  //    * RORL 1, r32
 27743  //    * RORL imm8, r32
 27744  //    * RORL cl, r32
 27745  //    * RORL 1, m32
 27746  //    * RORL imm8, m32
 27747  //    * RORL cl, m32
 27748  //
 27749  func (self *Program) RORL(v0 interface{}, v1 interface{}) *Instruction {
 27750      p := self.alloc("RORL", 2, Operands { v0, v1 })
 27751      // RORL 1, r32
 27752      if isConst1(v0) && isReg32(v1) {
 27753          p.domain = DomainGeneric
 27754          p.add(0, func(m *_Encoding, v []interface{}) {
 27755              m.rexo(0, v[1], false)
 27756              m.emit(0xd1)
 27757              m.emit(0xc8 | lcode(v[1]))
 27758          })
 27759      }
 27760      // RORL imm8, r32
 27761      if isImm8(v0) && isReg32(v1) {
 27762          p.domain = DomainGeneric
 27763          p.add(0, func(m *_Encoding, v []interface{}) {
 27764              m.rexo(0, v[1], false)
 27765              m.emit(0xc1)
 27766              m.emit(0xc8 | lcode(v[1]))
 27767              m.imm1(toImmAny(v[0]))
 27768          })
 27769      }
 27770      // RORL cl, r32
 27771      if v0 == CL && isReg32(v1) {
 27772          p.domain = DomainGeneric
 27773          p.add(0, func(m *_Encoding, v []interface{}) {
 27774              m.rexo(0, v[1], false)
 27775              m.emit(0xd3)
 27776              m.emit(0xc8 | lcode(v[1]))
 27777          })
 27778      }
 27779      // RORL 1, m32
 27780      if isConst1(v0) && isM32(v1) {
 27781          p.domain = DomainGeneric
 27782          p.add(0, func(m *_Encoding, v []interface{}) {
 27783              m.rexo(0, addr(v[1]), false)
 27784              m.emit(0xd1)
 27785              m.mrsd(1, addr(v[1]), 1)
 27786          })
 27787      }
 27788      // RORL imm8, m32
 27789      if isImm8(v0) && isM32(v1) {
 27790          p.domain = DomainGeneric
 27791          p.add(0, func(m *_Encoding, v []interface{}) {
 27792              m.rexo(0, addr(v[1]), false)
 27793              m.emit(0xc1)
 27794              m.mrsd(1, addr(v[1]), 1)
 27795              m.imm1(toImmAny(v[0]))
 27796          })
 27797      }
 27798      // RORL cl, m32
 27799      if v0 == CL && isM32(v1) {
 27800          p.domain = DomainGeneric
 27801          p.add(0, func(m *_Encoding, v []interface{}) {
 27802              m.rexo(0, addr(v[1]), false)
 27803              m.emit(0xd3)
 27804              m.mrsd(1, addr(v[1]), 1)
 27805          })
 27806      }
 27807      if p.len == 0 {
 27808          panic("invalid operands for RORL")
 27809      }
 27810      return p
 27811  }
 27812  
 27813  // RORQ performs "Rotate Right".
 27814  //
 27815  // Mnemonic        : ROR
 27816  // Supported forms : (6 forms)
 27817  //
 27818  //    * RORQ 1, r64
 27819  //    * RORQ imm8, r64
 27820  //    * RORQ cl, r64
 27821  //    * RORQ 1, m64
 27822  //    * RORQ imm8, m64
 27823  //    * RORQ cl, m64
 27824  //
 27825  func (self *Program) RORQ(v0 interface{}, v1 interface{}) *Instruction {
 27826      p := self.alloc("RORQ", 2, Operands { v0, v1 })
 27827      // RORQ 1, r64
 27828      if isConst1(v0) && isReg64(v1) {
 27829          p.domain = DomainGeneric
 27830          p.add(0, func(m *_Encoding, v []interface{}) {
 27831              m.emit(0x48 | hcode(v[1]))
 27832              m.emit(0xd1)
 27833              m.emit(0xc8 | lcode(v[1]))
 27834          })
 27835      }
 27836      // RORQ imm8, r64
 27837      if isImm8(v0) && isReg64(v1) {
 27838          p.domain = DomainGeneric
 27839          p.add(0, func(m *_Encoding, v []interface{}) {
 27840              m.emit(0x48 | hcode(v[1]))
 27841              m.emit(0xc1)
 27842              m.emit(0xc8 | lcode(v[1]))
 27843              m.imm1(toImmAny(v[0]))
 27844          })
 27845      }
 27846      // RORQ cl, r64
 27847      if v0 == CL && isReg64(v1) {
 27848          p.domain = DomainGeneric
 27849          p.add(0, func(m *_Encoding, v []interface{}) {
 27850              m.emit(0x48 | hcode(v[1]))
 27851              m.emit(0xd3)
 27852              m.emit(0xc8 | lcode(v[1]))
 27853          })
 27854      }
 27855      // RORQ 1, m64
 27856      if isConst1(v0) && isM64(v1) {
 27857          p.domain = DomainGeneric
 27858          p.add(0, func(m *_Encoding, v []interface{}) {
 27859              m.rexm(1, 0, addr(v[1]))
 27860              m.emit(0xd1)
 27861              m.mrsd(1, addr(v[1]), 1)
 27862          })
 27863      }
 27864      // RORQ imm8, m64
 27865      if isImm8(v0) && isM64(v1) {
 27866          p.domain = DomainGeneric
 27867          p.add(0, func(m *_Encoding, v []interface{}) {
 27868              m.rexm(1, 0, addr(v[1]))
 27869              m.emit(0xc1)
 27870              m.mrsd(1, addr(v[1]), 1)
 27871              m.imm1(toImmAny(v[0]))
 27872          })
 27873      }
 27874      // RORQ cl, m64
 27875      if v0 == CL && isM64(v1) {
 27876          p.domain = DomainGeneric
 27877          p.add(0, func(m *_Encoding, v []interface{}) {
 27878              m.rexm(1, 0, addr(v[1]))
 27879              m.emit(0xd3)
 27880              m.mrsd(1, addr(v[1]), 1)
 27881          })
 27882      }
 27883      if p.len == 0 {
 27884          panic("invalid operands for RORQ")
 27885      }
 27886      return p
 27887  }
 27888  
 27889  // RORW performs "Rotate Right".
 27890  //
 27891  // Mnemonic        : ROR
 27892  // Supported forms : (6 forms)
 27893  //
 27894  //    * RORW 1, r16
 27895  //    * RORW imm8, r16
 27896  //    * RORW cl, r16
 27897  //    * RORW 1, m16
 27898  //    * RORW imm8, m16
 27899  //    * RORW cl, m16
 27900  //
 27901  func (self *Program) RORW(v0 interface{}, v1 interface{}) *Instruction {
 27902      p := self.alloc("RORW", 2, Operands { v0, v1 })
 27903      // RORW 1, r16
 27904      if isConst1(v0) && isReg16(v1) {
 27905          p.domain = DomainGeneric
 27906          p.add(0, func(m *_Encoding, v []interface{}) {
 27907              m.emit(0x66)
 27908              m.rexo(0, v[1], false)
 27909              m.emit(0xd1)
 27910              m.emit(0xc8 | lcode(v[1]))
 27911          })
 27912      }
 27913      // RORW imm8, r16
 27914      if isImm8(v0) && isReg16(v1) {
 27915          p.domain = DomainGeneric
 27916          p.add(0, func(m *_Encoding, v []interface{}) {
 27917              m.emit(0x66)
 27918              m.rexo(0, v[1], false)
 27919              m.emit(0xc1)
 27920              m.emit(0xc8 | lcode(v[1]))
 27921              m.imm1(toImmAny(v[0]))
 27922          })
 27923      }
 27924      // RORW cl, r16
 27925      if v0 == CL && isReg16(v1) {
 27926          p.domain = DomainGeneric
 27927          p.add(0, func(m *_Encoding, v []interface{}) {
 27928              m.emit(0x66)
 27929              m.rexo(0, v[1], false)
 27930              m.emit(0xd3)
 27931              m.emit(0xc8 | lcode(v[1]))
 27932          })
 27933      }
 27934      // RORW 1, m16
 27935      if isConst1(v0) && isM16(v1) {
 27936          p.domain = DomainGeneric
 27937          p.add(0, func(m *_Encoding, v []interface{}) {
 27938              m.emit(0x66)
 27939              m.rexo(0, addr(v[1]), false)
 27940              m.emit(0xd1)
 27941              m.mrsd(1, addr(v[1]), 1)
 27942          })
 27943      }
 27944      // RORW imm8, m16
 27945      if isImm8(v0) && isM16(v1) {
 27946          p.domain = DomainGeneric
 27947          p.add(0, func(m *_Encoding, v []interface{}) {
 27948              m.emit(0x66)
 27949              m.rexo(0, addr(v[1]), false)
 27950              m.emit(0xc1)
 27951              m.mrsd(1, addr(v[1]), 1)
 27952              m.imm1(toImmAny(v[0]))
 27953          })
 27954      }
 27955      // RORW cl, m16
 27956      if v0 == CL && isM16(v1) {
 27957          p.domain = DomainGeneric
 27958          p.add(0, func(m *_Encoding, v []interface{}) {
 27959              m.emit(0x66)
 27960              m.rexo(0, addr(v[1]), false)
 27961              m.emit(0xd3)
 27962              m.mrsd(1, addr(v[1]), 1)
 27963          })
 27964      }
 27965      if p.len == 0 {
 27966          panic("invalid operands for RORW")
 27967      }
 27968      return p
 27969  }
 27970  
 27971  // RORXL performs "Rotate Right Logical Without Affecting Flags".
 27972  //
 27973  // Mnemonic        : RORX
 27974  // Supported forms : (2 forms)
 27975  //
 27976  //    * RORXL imm8, r32, r32    [BMI2]
 27977  //    * RORXL imm8, m32, r32    [BMI2]
 27978  //
 27979  func (self *Program) RORXL(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 27980      p := self.alloc("RORXL", 3, Operands { v0, v1, v2 })
 27981      // RORXL imm8, r32, r32
 27982      if isImm8(v0) && isReg32(v1) && isReg32(v2) {
 27983          self.require(ISA_BMI2)
 27984          p.domain = DomainGeneric
 27985          p.add(0, func(m *_Encoding, v []interface{}) {
 27986              m.emit(0xc4)
 27987              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 27988              m.emit(0x7b)
 27989              m.emit(0xf0)
 27990              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 27991              m.imm1(toImmAny(v[0]))
 27992          })
 27993      }
 27994      // RORXL imm8, m32, r32
 27995      if isImm8(v0) && isM32(v1) && isReg32(v2) {
 27996          self.require(ISA_BMI2)
 27997          p.domain = DomainGeneric
 27998          p.add(0, func(m *_Encoding, v []interface{}) {
 27999              m.vex3(0xc4, 0b11, 0x03, hcode(v[2]), addr(v[1]), 0)
 28000              m.emit(0xf0)
 28001              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 28002              m.imm1(toImmAny(v[0]))
 28003          })
 28004      }
 28005      if p.len == 0 {
 28006          panic("invalid operands for RORXL")
 28007      }
 28008      return p
 28009  }
 28010  
 28011  // RORXQ performs "Rotate Right Logical Without Affecting Flags".
 28012  //
 28013  // Mnemonic        : RORX
 28014  // Supported forms : (2 forms)
 28015  //
 28016  //    * RORXQ imm8, r64, r64    [BMI2]
 28017  //    * RORXQ imm8, m64, r64    [BMI2]
 28018  //
 28019  func (self *Program) RORXQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 28020      p := self.alloc("RORXQ", 3, Operands { v0, v1, v2 })
 28021      // RORXQ imm8, r64, r64
 28022      if isImm8(v0) && isReg64(v1) && isReg64(v2) {
 28023          self.require(ISA_BMI2)
 28024          p.domain = DomainGeneric
 28025          p.add(0, func(m *_Encoding, v []interface{}) {
 28026              m.emit(0xc4)
 28027              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 28028              m.emit(0xfb)
 28029              m.emit(0xf0)
 28030              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 28031              m.imm1(toImmAny(v[0]))
 28032          })
 28033      }
 28034      // RORXQ imm8, m64, r64
 28035      if isImm8(v0) && isM64(v1) && isReg64(v2) {
 28036          self.require(ISA_BMI2)
 28037          p.domain = DomainGeneric
 28038          p.add(0, func(m *_Encoding, v []interface{}) {
 28039              m.vex3(0xc4, 0b11, 0x83, hcode(v[2]), addr(v[1]), 0)
 28040              m.emit(0xf0)
 28041              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 28042              m.imm1(toImmAny(v[0]))
 28043          })
 28044      }
 28045      if p.len == 0 {
 28046          panic("invalid operands for RORXQ")
 28047      }
 28048      return p
 28049  }
 28050  
 28051  // ROUNDPD performs "Round Packed Double Precision Floating-Point Values".
 28052  //
 28053  // Mnemonic        : ROUNDPD
 28054  // Supported forms : (2 forms)
 28055  //
 28056  //    * ROUNDPD imm8, xmm, xmm     [SSE4.1]
 28057  //    * ROUNDPD imm8, m128, xmm    [SSE4.1]
 28058  //
 28059  func (self *Program) ROUNDPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 28060      p := self.alloc("ROUNDPD", 3, Operands { v0, v1, v2 })
 28061      // ROUNDPD imm8, xmm, xmm
 28062      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 28063          self.require(ISA_SSE4_1)
 28064          p.domain = DomainMMXSSE
 28065          p.add(0, func(m *_Encoding, v []interface{}) {
 28066              m.emit(0x66)
 28067              m.rexo(hcode(v[2]), v[1], false)
 28068              m.emit(0x0f)
 28069              m.emit(0x3a)
 28070              m.emit(0x09)
 28071              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 28072              m.imm1(toImmAny(v[0]))
 28073          })
 28074      }
 28075      // ROUNDPD imm8, m128, xmm
 28076      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 28077          self.require(ISA_SSE4_1)
 28078          p.domain = DomainMMXSSE
 28079          p.add(0, func(m *_Encoding, v []interface{}) {
 28080              m.emit(0x66)
 28081              m.rexo(hcode(v[2]), addr(v[1]), false)
 28082              m.emit(0x0f)
 28083              m.emit(0x3a)
 28084              m.emit(0x09)
 28085              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 28086              m.imm1(toImmAny(v[0]))
 28087          })
 28088      }
 28089      if p.len == 0 {
 28090          panic("invalid operands for ROUNDPD")
 28091      }
 28092      return p
 28093  }
 28094  
 28095  // ROUNDPS performs "Round Packed Single Precision Floating-Point Values".
 28096  //
 28097  // Mnemonic        : ROUNDPS
 28098  // Supported forms : (2 forms)
 28099  //
 28100  //    * ROUNDPS imm8, xmm, xmm     [SSE4.1]
 28101  //    * ROUNDPS imm8, m128, xmm    [SSE4.1]
 28102  //
 28103  func (self *Program) ROUNDPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 28104      p := self.alloc("ROUNDPS", 3, Operands { v0, v1, v2 })
 28105      // ROUNDPS imm8, xmm, xmm
 28106      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 28107          self.require(ISA_SSE4_1)
 28108          p.domain = DomainMMXSSE
 28109          p.add(0, func(m *_Encoding, v []interface{}) {
 28110              m.emit(0x66)
 28111              m.rexo(hcode(v[2]), v[1], false)
 28112              m.emit(0x0f)
 28113              m.emit(0x3a)
 28114              m.emit(0x08)
 28115              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 28116              m.imm1(toImmAny(v[0]))
 28117          })
 28118      }
 28119      // ROUNDPS imm8, m128, xmm
 28120      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 28121          self.require(ISA_SSE4_1)
 28122          p.domain = DomainMMXSSE
 28123          p.add(0, func(m *_Encoding, v []interface{}) {
 28124              m.emit(0x66)
 28125              m.rexo(hcode(v[2]), addr(v[1]), false)
 28126              m.emit(0x0f)
 28127              m.emit(0x3a)
 28128              m.emit(0x08)
 28129              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 28130              m.imm1(toImmAny(v[0]))
 28131          })
 28132      }
 28133      if p.len == 0 {
 28134          panic("invalid operands for ROUNDPS")
 28135      }
 28136      return p
 28137  }
 28138  
 28139  // ROUNDSD performs "Round Scalar Double Precision Floating-Point Values".
 28140  //
 28141  // Mnemonic        : ROUNDSD
 28142  // Supported forms : (2 forms)
 28143  //
 28144  //    * ROUNDSD imm8, xmm, xmm    [SSE4.1]
 28145  //    * ROUNDSD imm8, m64, xmm    [SSE4.1]
 28146  //
 28147  func (self *Program) ROUNDSD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 28148      p := self.alloc("ROUNDSD", 3, Operands { v0, v1, v2 })
 28149      // ROUNDSD imm8, xmm, xmm
 28150      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 28151          self.require(ISA_SSE4_1)
 28152          p.domain = DomainMMXSSE
 28153          p.add(0, func(m *_Encoding, v []interface{}) {
 28154              m.emit(0x66)
 28155              m.rexo(hcode(v[2]), v[1], false)
 28156              m.emit(0x0f)
 28157              m.emit(0x3a)
 28158              m.emit(0x0b)
 28159              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 28160              m.imm1(toImmAny(v[0]))
 28161          })
 28162      }
 28163      // ROUNDSD imm8, m64, xmm
 28164      if isImm8(v0) && isM64(v1) && isXMM(v2) {
 28165          self.require(ISA_SSE4_1)
 28166          p.domain = DomainMMXSSE
 28167          p.add(0, func(m *_Encoding, v []interface{}) {
 28168              m.emit(0x66)
 28169              m.rexo(hcode(v[2]), addr(v[1]), false)
 28170              m.emit(0x0f)
 28171              m.emit(0x3a)
 28172              m.emit(0x0b)
 28173              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 28174              m.imm1(toImmAny(v[0]))
 28175          })
 28176      }
 28177      if p.len == 0 {
 28178          panic("invalid operands for ROUNDSD")
 28179      }
 28180      return p
 28181  }
 28182  
 28183  // ROUNDSS performs "Round Scalar Single Precision Floating-Point Values".
 28184  //
 28185  // Mnemonic        : ROUNDSS
 28186  // Supported forms : (2 forms)
 28187  //
 28188  //    * ROUNDSS imm8, xmm, xmm    [SSE4.1]
 28189  //    * ROUNDSS imm8, m32, xmm    [SSE4.1]
 28190  //
 28191  func (self *Program) ROUNDSS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 28192      p := self.alloc("ROUNDSS", 3, Operands { v0, v1, v2 })
 28193      // ROUNDSS imm8, xmm, xmm
 28194      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 28195          self.require(ISA_SSE4_1)
 28196          p.domain = DomainMMXSSE
 28197          p.add(0, func(m *_Encoding, v []interface{}) {
 28198              m.emit(0x66)
 28199              m.rexo(hcode(v[2]), v[1], false)
 28200              m.emit(0x0f)
 28201              m.emit(0x3a)
 28202              m.emit(0x0a)
 28203              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 28204              m.imm1(toImmAny(v[0]))
 28205          })
 28206      }
 28207      // ROUNDSS imm8, m32, xmm
 28208      if isImm8(v0) && isM32(v1) && isXMM(v2) {
 28209          self.require(ISA_SSE4_1)
 28210          p.domain = DomainMMXSSE
 28211          p.add(0, func(m *_Encoding, v []interface{}) {
 28212              m.emit(0x66)
 28213              m.rexo(hcode(v[2]), addr(v[1]), false)
 28214              m.emit(0x0f)
 28215              m.emit(0x3a)
 28216              m.emit(0x0a)
 28217              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 28218              m.imm1(toImmAny(v[0]))
 28219          })
 28220      }
 28221      if p.len == 0 {
 28222          panic("invalid operands for ROUNDSS")
 28223      }
 28224      return p
 28225  }
 28226  
 28227  // RSQRTPS performs "Compute Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values".
 28228  //
 28229  // Mnemonic        : RSQRTPS
 28230  // Supported forms : (2 forms)
 28231  //
 28232  //    * RSQRTPS xmm, xmm     [SSE]
 28233  //    * RSQRTPS m128, xmm    [SSE]
 28234  //
 28235  func (self *Program) RSQRTPS(v0 interface{}, v1 interface{}) *Instruction {
 28236      p := self.alloc("RSQRTPS", 2, Operands { v0, v1 })
 28237      // RSQRTPS xmm, xmm
 28238      if isXMM(v0) && isXMM(v1) {
 28239          self.require(ISA_SSE)
 28240          p.domain = DomainMMXSSE
 28241          p.add(0, func(m *_Encoding, v []interface{}) {
 28242              m.rexo(hcode(v[1]), v[0], false)
 28243              m.emit(0x0f)
 28244              m.emit(0x52)
 28245              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 28246          })
 28247      }
 28248      // RSQRTPS m128, xmm
 28249      if isM128(v0) && isXMM(v1) {
 28250          self.require(ISA_SSE)
 28251          p.domain = DomainMMXSSE
 28252          p.add(0, func(m *_Encoding, v []interface{}) {
 28253              m.rexo(hcode(v[1]), addr(v[0]), false)
 28254              m.emit(0x0f)
 28255              m.emit(0x52)
 28256              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 28257          })
 28258      }
 28259      if p.len == 0 {
 28260          panic("invalid operands for RSQRTPS")
 28261      }
 28262      return p
 28263  }
 28264  
 28265  // RSQRTSS performs "Compute Reciprocal of Square Root of Scalar Single-Precision Floating-Point Value".
 28266  //
 28267  // Mnemonic        : RSQRTSS
 28268  // Supported forms : (2 forms)
 28269  //
 28270  //    * RSQRTSS xmm, xmm    [SSE]
 28271  //    * RSQRTSS m32, xmm    [SSE]
 28272  //
 28273  func (self *Program) RSQRTSS(v0 interface{}, v1 interface{}) *Instruction {
 28274      p := self.alloc("RSQRTSS", 2, Operands { v0, v1 })
 28275      // RSQRTSS xmm, xmm
 28276      if isXMM(v0) && isXMM(v1) {
 28277          self.require(ISA_SSE)
 28278          p.domain = DomainMMXSSE
 28279          p.add(0, func(m *_Encoding, v []interface{}) {
 28280              m.emit(0xf3)
 28281              m.rexo(hcode(v[1]), v[0], false)
 28282              m.emit(0x0f)
 28283              m.emit(0x52)
 28284              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 28285          })
 28286      }
 28287      // RSQRTSS m32, xmm
 28288      if isM32(v0) && isXMM(v1) {
 28289          self.require(ISA_SSE)
 28290          p.domain = DomainMMXSSE
 28291          p.add(0, func(m *_Encoding, v []interface{}) {
 28292              m.emit(0xf3)
 28293              m.rexo(hcode(v[1]), addr(v[0]), false)
 28294              m.emit(0x0f)
 28295              m.emit(0x52)
 28296              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 28297          })
 28298      }
 28299      if p.len == 0 {
 28300          panic("invalid operands for RSQRTSS")
 28301      }
 28302      return p
 28303  }
 28304  
 28305  // SALB performs "Arithmetic Shift Left".
 28306  //
 28307  // Mnemonic        : SAL
 28308  // Supported forms : (6 forms)
 28309  //
 28310  //    * SALB 1, r8
 28311  //    * SALB imm8, r8
 28312  //    * SALB cl, r8
 28313  //    * SALB 1, m8
 28314  //    * SALB imm8, m8
 28315  //    * SALB cl, m8
 28316  //
 28317  func (self *Program) SALB(v0 interface{}, v1 interface{}) *Instruction {
 28318      p := self.alloc("SALB", 2, Operands { v0, v1 })
 28319      // SALB 1, r8
 28320      if isConst1(v0) && isReg8(v1) {
 28321          p.domain = DomainGeneric
 28322          p.add(0, func(m *_Encoding, v []interface{}) {
 28323              m.rexo(0, v[1], isReg8REX(v[1]))
 28324              m.emit(0xd0)
 28325              m.emit(0xe0 | lcode(v[1]))
 28326          })
 28327      }
 28328      // SALB imm8, r8
 28329      if isImm8(v0) && isReg8(v1) {
 28330          p.domain = DomainGeneric
 28331          p.add(0, func(m *_Encoding, v []interface{}) {
 28332              m.rexo(0, v[1], isReg8REX(v[1]))
 28333              m.emit(0xc0)
 28334              m.emit(0xe0 | lcode(v[1]))
 28335              m.imm1(toImmAny(v[0]))
 28336          })
 28337      }
 28338      // SALB cl, r8
 28339      if v0 == CL && isReg8(v1) {
 28340          p.domain = DomainGeneric
 28341          p.add(0, func(m *_Encoding, v []interface{}) {
 28342              m.rexo(0, v[1], isReg8REX(v[1]))
 28343              m.emit(0xd2)
 28344              m.emit(0xe0 | lcode(v[1]))
 28345          })
 28346      }
 28347      // SALB 1, m8
 28348      if isConst1(v0) && isM8(v1) {
 28349          p.domain = DomainGeneric
 28350          p.add(0, func(m *_Encoding, v []interface{}) {
 28351              m.rexo(0, addr(v[1]), false)
 28352              m.emit(0xd0)
 28353              m.mrsd(4, addr(v[1]), 1)
 28354          })
 28355      }
 28356      // SALB imm8, m8
 28357      if isImm8(v0) && isM8(v1) {
 28358          p.domain = DomainGeneric
 28359          p.add(0, func(m *_Encoding, v []interface{}) {
 28360              m.rexo(0, addr(v[1]), false)
 28361              m.emit(0xc0)
 28362              m.mrsd(4, addr(v[1]), 1)
 28363              m.imm1(toImmAny(v[0]))
 28364          })
 28365      }
 28366      // SALB cl, m8
 28367      if v0 == CL && isM8(v1) {
 28368          p.domain = DomainGeneric
 28369          p.add(0, func(m *_Encoding, v []interface{}) {
 28370              m.rexo(0, addr(v[1]), false)
 28371              m.emit(0xd2)
 28372              m.mrsd(4, addr(v[1]), 1)
 28373          })
 28374      }
 28375      if p.len == 0 {
 28376          panic("invalid operands for SALB")
 28377      }
 28378      return p
 28379  }
 28380  
 28381  // SALL performs "Arithmetic Shift Left".
 28382  //
 28383  // Mnemonic        : SAL
 28384  // Supported forms : (6 forms)
 28385  //
 28386  //    * SALL 1, r32
 28387  //    * SALL imm8, r32
 28388  //    * SALL cl, r32
 28389  //    * SALL 1, m32
 28390  //    * SALL imm8, m32
 28391  //    * SALL cl, m32
 28392  //
 28393  func (self *Program) SALL(v0 interface{}, v1 interface{}) *Instruction {
 28394      p := self.alloc("SALL", 2, Operands { v0, v1 })
 28395      // SALL 1, r32
 28396      if isConst1(v0) && isReg32(v1) {
 28397          p.domain = DomainGeneric
 28398          p.add(0, func(m *_Encoding, v []interface{}) {
 28399              m.rexo(0, v[1], false)
 28400              m.emit(0xd1)
 28401              m.emit(0xe0 | lcode(v[1]))
 28402          })
 28403      }
 28404      // SALL imm8, r32
 28405      if isImm8(v0) && isReg32(v1) {
 28406          p.domain = DomainGeneric
 28407          p.add(0, func(m *_Encoding, v []interface{}) {
 28408              m.rexo(0, v[1], false)
 28409              m.emit(0xc1)
 28410              m.emit(0xe0 | lcode(v[1]))
 28411              m.imm1(toImmAny(v[0]))
 28412          })
 28413      }
 28414      // SALL cl, r32
 28415      if v0 == CL && isReg32(v1) {
 28416          p.domain = DomainGeneric
 28417          p.add(0, func(m *_Encoding, v []interface{}) {
 28418              m.rexo(0, v[1], false)
 28419              m.emit(0xd3)
 28420              m.emit(0xe0 | lcode(v[1]))
 28421          })
 28422      }
 28423      // SALL 1, m32
 28424      if isConst1(v0) && isM32(v1) {
 28425          p.domain = DomainGeneric
 28426          p.add(0, func(m *_Encoding, v []interface{}) {
 28427              m.rexo(0, addr(v[1]), false)
 28428              m.emit(0xd1)
 28429              m.mrsd(4, addr(v[1]), 1)
 28430          })
 28431      }
 28432      // SALL imm8, m32
 28433      if isImm8(v0) && isM32(v1) {
 28434          p.domain = DomainGeneric
 28435          p.add(0, func(m *_Encoding, v []interface{}) {
 28436              m.rexo(0, addr(v[1]), false)
 28437              m.emit(0xc1)
 28438              m.mrsd(4, addr(v[1]), 1)
 28439              m.imm1(toImmAny(v[0]))
 28440          })
 28441      }
 28442      // SALL cl, m32
 28443      if v0 == CL && isM32(v1) {
 28444          p.domain = DomainGeneric
 28445          p.add(0, func(m *_Encoding, v []interface{}) {
 28446              m.rexo(0, addr(v[1]), false)
 28447              m.emit(0xd3)
 28448              m.mrsd(4, addr(v[1]), 1)
 28449          })
 28450      }
 28451      if p.len == 0 {
 28452          panic("invalid operands for SALL")
 28453      }
 28454      return p
 28455  }
 28456  
 28457  // SALQ performs "Arithmetic Shift Left".
 28458  //
 28459  // Mnemonic        : SAL
 28460  // Supported forms : (6 forms)
 28461  //
 28462  //    * SALQ 1, r64
 28463  //    * SALQ imm8, r64
 28464  //    * SALQ cl, r64
 28465  //    * SALQ 1, m64
 28466  //    * SALQ imm8, m64
 28467  //    * SALQ cl, m64
 28468  //
 28469  func (self *Program) SALQ(v0 interface{}, v1 interface{}) *Instruction {
 28470      p := self.alloc("SALQ", 2, Operands { v0, v1 })
 28471      // SALQ 1, r64
 28472      if isConst1(v0) && isReg64(v1) {
 28473          p.domain = DomainGeneric
 28474          p.add(0, func(m *_Encoding, v []interface{}) {
 28475              m.emit(0x48 | hcode(v[1]))
 28476              m.emit(0xd1)
 28477              m.emit(0xe0 | lcode(v[1]))
 28478          })
 28479      }
 28480      // SALQ imm8, r64
 28481      if isImm8(v0) && isReg64(v1) {
 28482          p.domain = DomainGeneric
 28483          p.add(0, func(m *_Encoding, v []interface{}) {
 28484              m.emit(0x48 | hcode(v[1]))
 28485              m.emit(0xc1)
 28486              m.emit(0xe0 | lcode(v[1]))
 28487              m.imm1(toImmAny(v[0]))
 28488          })
 28489      }
 28490      // SALQ cl, r64
 28491      if v0 == CL && isReg64(v1) {
 28492          p.domain = DomainGeneric
 28493          p.add(0, func(m *_Encoding, v []interface{}) {
 28494              m.emit(0x48 | hcode(v[1]))
 28495              m.emit(0xd3)
 28496              m.emit(0xe0 | lcode(v[1]))
 28497          })
 28498      }
 28499      // SALQ 1, m64
 28500      if isConst1(v0) && isM64(v1) {
 28501          p.domain = DomainGeneric
 28502          p.add(0, func(m *_Encoding, v []interface{}) {
 28503              m.rexm(1, 0, addr(v[1]))
 28504              m.emit(0xd1)
 28505              m.mrsd(4, addr(v[1]), 1)
 28506          })
 28507      }
 28508      // SALQ imm8, m64
 28509      if isImm8(v0) && isM64(v1) {
 28510          p.domain = DomainGeneric
 28511          p.add(0, func(m *_Encoding, v []interface{}) {
 28512              m.rexm(1, 0, addr(v[1]))
 28513              m.emit(0xc1)
 28514              m.mrsd(4, addr(v[1]), 1)
 28515              m.imm1(toImmAny(v[0]))
 28516          })
 28517      }
 28518      // SALQ cl, m64
 28519      if v0 == CL && isM64(v1) {
 28520          p.domain = DomainGeneric
 28521          p.add(0, func(m *_Encoding, v []interface{}) {
 28522              m.rexm(1, 0, addr(v[1]))
 28523              m.emit(0xd3)
 28524              m.mrsd(4, addr(v[1]), 1)
 28525          })
 28526      }
 28527      if p.len == 0 {
 28528          panic("invalid operands for SALQ")
 28529      }
 28530      return p
 28531  }
 28532  
 28533  // SALW performs "Arithmetic Shift Left".
 28534  //
 28535  // Mnemonic        : SAL
 28536  // Supported forms : (6 forms)
 28537  //
 28538  //    * SALW 1, r16
 28539  //    * SALW imm8, r16
 28540  //    * SALW cl, r16
 28541  //    * SALW 1, m16
 28542  //    * SALW imm8, m16
 28543  //    * SALW cl, m16
 28544  //
 28545  func (self *Program) SALW(v0 interface{}, v1 interface{}) *Instruction {
 28546      p := self.alloc("SALW", 2, Operands { v0, v1 })
 28547      // SALW 1, r16
 28548      if isConst1(v0) && isReg16(v1) {
 28549          p.domain = DomainGeneric
 28550          p.add(0, func(m *_Encoding, v []interface{}) {
 28551              m.emit(0x66)
 28552              m.rexo(0, v[1], false)
 28553              m.emit(0xd1)
 28554              m.emit(0xe0 | lcode(v[1]))
 28555          })
 28556      }
 28557      // SALW imm8, r16
 28558      if isImm8(v0) && isReg16(v1) {
 28559          p.domain = DomainGeneric
 28560          p.add(0, func(m *_Encoding, v []interface{}) {
 28561              m.emit(0x66)
 28562              m.rexo(0, v[1], false)
 28563              m.emit(0xc1)
 28564              m.emit(0xe0 | lcode(v[1]))
 28565              m.imm1(toImmAny(v[0]))
 28566          })
 28567      }
 28568      // SALW cl, r16
 28569      if v0 == CL && isReg16(v1) {
 28570          p.domain = DomainGeneric
 28571          p.add(0, func(m *_Encoding, v []interface{}) {
 28572              m.emit(0x66)
 28573              m.rexo(0, v[1], false)
 28574              m.emit(0xd3)
 28575              m.emit(0xe0 | lcode(v[1]))
 28576          })
 28577      }
 28578      // SALW 1, m16
 28579      if isConst1(v0) && isM16(v1) {
 28580          p.domain = DomainGeneric
 28581          p.add(0, func(m *_Encoding, v []interface{}) {
 28582              m.emit(0x66)
 28583              m.rexo(0, addr(v[1]), false)
 28584              m.emit(0xd1)
 28585              m.mrsd(4, addr(v[1]), 1)
 28586          })
 28587      }
 28588      // SALW imm8, m16
 28589      if isImm8(v0) && isM16(v1) {
 28590          p.domain = DomainGeneric
 28591          p.add(0, func(m *_Encoding, v []interface{}) {
 28592              m.emit(0x66)
 28593              m.rexo(0, addr(v[1]), false)
 28594              m.emit(0xc1)
 28595              m.mrsd(4, addr(v[1]), 1)
 28596              m.imm1(toImmAny(v[0]))
 28597          })
 28598      }
 28599      // SALW cl, m16
 28600      if v0 == CL && isM16(v1) {
 28601          p.domain = DomainGeneric
 28602          p.add(0, func(m *_Encoding, v []interface{}) {
 28603              m.emit(0x66)
 28604              m.rexo(0, addr(v[1]), false)
 28605              m.emit(0xd3)
 28606              m.mrsd(4, addr(v[1]), 1)
 28607          })
 28608      }
 28609      if p.len == 0 {
 28610          panic("invalid operands for SALW")
 28611      }
 28612      return p
 28613  }
 28614  
 28615  // SARB performs "Arithmetic Shift Right".
 28616  //
 28617  // Mnemonic        : SAR
 28618  // Supported forms : (6 forms)
 28619  //
 28620  //    * SARB 1, r8
 28621  //    * SARB imm8, r8
 28622  //    * SARB cl, r8
 28623  //    * SARB 1, m8
 28624  //    * SARB imm8, m8
 28625  //    * SARB cl, m8
 28626  //
 28627  func (self *Program) SARB(v0 interface{}, v1 interface{}) *Instruction {
 28628      p := self.alloc("SARB", 2, Operands { v0, v1 })
 28629      // SARB 1, r8
 28630      if isConst1(v0) && isReg8(v1) {
 28631          p.domain = DomainGeneric
 28632          p.add(0, func(m *_Encoding, v []interface{}) {
 28633              m.rexo(0, v[1], isReg8REX(v[1]))
 28634              m.emit(0xd0)
 28635              m.emit(0xf8 | lcode(v[1]))
 28636          })
 28637      }
 28638      // SARB imm8, r8
 28639      if isImm8(v0) && isReg8(v1) {
 28640          p.domain = DomainGeneric
 28641          p.add(0, func(m *_Encoding, v []interface{}) {
 28642              m.rexo(0, v[1], isReg8REX(v[1]))
 28643              m.emit(0xc0)
 28644              m.emit(0xf8 | lcode(v[1]))
 28645              m.imm1(toImmAny(v[0]))
 28646          })
 28647      }
 28648      // SARB cl, r8
 28649      if v0 == CL && isReg8(v1) {
 28650          p.domain = DomainGeneric
 28651          p.add(0, func(m *_Encoding, v []interface{}) {
 28652              m.rexo(0, v[1], isReg8REX(v[1]))
 28653              m.emit(0xd2)
 28654              m.emit(0xf8 | lcode(v[1]))
 28655          })
 28656      }
 28657      // SARB 1, m8
 28658      if isConst1(v0) && isM8(v1) {
 28659          p.domain = DomainGeneric
 28660          p.add(0, func(m *_Encoding, v []interface{}) {
 28661              m.rexo(0, addr(v[1]), false)
 28662              m.emit(0xd0)
 28663              m.mrsd(7, addr(v[1]), 1)
 28664          })
 28665      }
 28666      // SARB imm8, m8
 28667      if isImm8(v0) && isM8(v1) {
 28668          p.domain = DomainGeneric
 28669          p.add(0, func(m *_Encoding, v []interface{}) {
 28670              m.rexo(0, addr(v[1]), false)
 28671              m.emit(0xc0)
 28672              m.mrsd(7, addr(v[1]), 1)
 28673              m.imm1(toImmAny(v[0]))
 28674          })
 28675      }
 28676      // SARB cl, m8
 28677      if v0 == CL && isM8(v1) {
 28678          p.domain = DomainGeneric
 28679          p.add(0, func(m *_Encoding, v []interface{}) {
 28680              m.rexo(0, addr(v[1]), false)
 28681              m.emit(0xd2)
 28682              m.mrsd(7, addr(v[1]), 1)
 28683          })
 28684      }
 28685      if p.len == 0 {
 28686          panic("invalid operands for SARB")
 28687      }
 28688      return p
 28689  }
 28690  
 28691  // SARL performs "Arithmetic Shift Right".
 28692  //
 28693  // Mnemonic        : SAR
 28694  // Supported forms : (6 forms)
 28695  //
 28696  //    * SARL 1, r32
 28697  //    * SARL imm8, r32
 28698  //    * SARL cl, r32
 28699  //    * SARL 1, m32
 28700  //    * SARL imm8, m32
 28701  //    * SARL cl, m32
 28702  //
 28703  func (self *Program) SARL(v0 interface{}, v1 interface{}) *Instruction {
 28704      p := self.alloc("SARL", 2, Operands { v0, v1 })
 28705      // SARL 1, r32
 28706      if isConst1(v0) && isReg32(v1) {
 28707          p.domain = DomainGeneric
 28708          p.add(0, func(m *_Encoding, v []interface{}) {
 28709              m.rexo(0, v[1], false)
 28710              m.emit(0xd1)
 28711              m.emit(0xf8 | lcode(v[1]))
 28712          })
 28713      }
 28714      // SARL imm8, r32
 28715      if isImm8(v0) && isReg32(v1) {
 28716          p.domain = DomainGeneric
 28717          p.add(0, func(m *_Encoding, v []interface{}) {
 28718              m.rexo(0, v[1], false)
 28719              m.emit(0xc1)
 28720              m.emit(0xf8 | lcode(v[1]))
 28721              m.imm1(toImmAny(v[0]))
 28722          })
 28723      }
 28724      // SARL cl, r32
 28725      if v0 == CL && isReg32(v1) {
 28726          p.domain = DomainGeneric
 28727          p.add(0, func(m *_Encoding, v []interface{}) {
 28728              m.rexo(0, v[1], false)
 28729              m.emit(0xd3)
 28730              m.emit(0xf8 | lcode(v[1]))
 28731          })
 28732      }
 28733      // SARL 1, m32
 28734      if isConst1(v0) && isM32(v1) {
 28735          p.domain = DomainGeneric
 28736          p.add(0, func(m *_Encoding, v []interface{}) {
 28737              m.rexo(0, addr(v[1]), false)
 28738              m.emit(0xd1)
 28739              m.mrsd(7, addr(v[1]), 1)
 28740          })
 28741      }
 28742      // SARL imm8, m32
 28743      if isImm8(v0) && isM32(v1) {
 28744          p.domain = DomainGeneric
 28745          p.add(0, func(m *_Encoding, v []interface{}) {
 28746              m.rexo(0, addr(v[1]), false)
 28747              m.emit(0xc1)
 28748              m.mrsd(7, addr(v[1]), 1)
 28749              m.imm1(toImmAny(v[0]))
 28750          })
 28751      }
 28752      // SARL cl, m32
 28753      if v0 == CL && isM32(v1) {
 28754          p.domain = DomainGeneric
 28755          p.add(0, func(m *_Encoding, v []interface{}) {
 28756              m.rexo(0, addr(v[1]), false)
 28757              m.emit(0xd3)
 28758              m.mrsd(7, addr(v[1]), 1)
 28759          })
 28760      }
 28761      if p.len == 0 {
 28762          panic("invalid operands for SARL")
 28763      }
 28764      return p
 28765  }
 28766  
 28767  // SARQ performs "Arithmetic Shift Right".
 28768  //
 28769  // Mnemonic        : SAR
 28770  // Supported forms : (6 forms)
 28771  //
 28772  //    * SARQ 1, r64
 28773  //    * SARQ imm8, r64
 28774  //    * SARQ cl, r64
 28775  //    * SARQ 1, m64
 28776  //    * SARQ imm8, m64
 28777  //    * SARQ cl, m64
 28778  //
 28779  func (self *Program) SARQ(v0 interface{}, v1 interface{}) *Instruction {
 28780      p := self.alloc("SARQ", 2, Operands { v0, v1 })
 28781      // SARQ 1, r64
 28782      if isConst1(v0) && isReg64(v1) {
 28783          p.domain = DomainGeneric
 28784          p.add(0, func(m *_Encoding, v []interface{}) {
 28785              m.emit(0x48 | hcode(v[1]))
 28786              m.emit(0xd1)
 28787              m.emit(0xf8 | lcode(v[1]))
 28788          })
 28789      }
 28790      // SARQ imm8, r64
 28791      if isImm8(v0) && isReg64(v1) {
 28792          p.domain = DomainGeneric
 28793          p.add(0, func(m *_Encoding, v []interface{}) {
 28794              m.emit(0x48 | hcode(v[1]))
 28795              m.emit(0xc1)
 28796              m.emit(0xf8 | lcode(v[1]))
 28797              m.imm1(toImmAny(v[0]))
 28798          })
 28799      }
 28800      // SARQ cl, r64
 28801      if v0 == CL && isReg64(v1) {
 28802          p.domain = DomainGeneric
 28803          p.add(0, func(m *_Encoding, v []interface{}) {
 28804              m.emit(0x48 | hcode(v[1]))
 28805              m.emit(0xd3)
 28806              m.emit(0xf8 | lcode(v[1]))
 28807          })
 28808      }
 28809      // SARQ 1, m64
 28810      if isConst1(v0) && isM64(v1) {
 28811          p.domain = DomainGeneric
 28812          p.add(0, func(m *_Encoding, v []interface{}) {
 28813              m.rexm(1, 0, addr(v[1]))
 28814              m.emit(0xd1)
 28815              m.mrsd(7, addr(v[1]), 1)
 28816          })
 28817      }
 28818      // SARQ imm8, m64
 28819      if isImm8(v0) && isM64(v1) {
 28820          p.domain = DomainGeneric
 28821          p.add(0, func(m *_Encoding, v []interface{}) {
 28822              m.rexm(1, 0, addr(v[1]))
 28823              m.emit(0xc1)
 28824              m.mrsd(7, addr(v[1]), 1)
 28825              m.imm1(toImmAny(v[0]))
 28826          })
 28827      }
 28828      // SARQ cl, m64
 28829      if v0 == CL && isM64(v1) {
 28830          p.domain = DomainGeneric
 28831          p.add(0, func(m *_Encoding, v []interface{}) {
 28832              m.rexm(1, 0, addr(v[1]))
 28833              m.emit(0xd3)
 28834              m.mrsd(7, addr(v[1]), 1)
 28835          })
 28836      }
 28837      if p.len == 0 {
 28838          panic("invalid operands for SARQ")
 28839      }
 28840      return p
 28841  }
 28842  
 28843  // SARW performs "Arithmetic Shift Right".
 28844  //
 28845  // Mnemonic        : SAR
 28846  // Supported forms : (6 forms)
 28847  //
 28848  //    * SARW 1, r16
 28849  //    * SARW imm8, r16
 28850  //    * SARW cl, r16
 28851  //    * SARW 1, m16
 28852  //    * SARW imm8, m16
 28853  //    * SARW cl, m16
 28854  //
 28855  func (self *Program) SARW(v0 interface{}, v1 interface{}) *Instruction {
 28856      p := self.alloc("SARW", 2, Operands { v0, v1 })
 28857      // SARW 1, r16
 28858      if isConst1(v0) && isReg16(v1) {
 28859          p.domain = DomainGeneric
 28860          p.add(0, func(m *_Encoding, v []interface{}) {
 28861              m.emit(0x66)
 28862              m.rexo(0, v[1], false)
 28863              m.emit(0xd1)
 28864              m.emit(0xf8 | lcode(v[1]))
 28865          })
 28866      }
 28867      // SARW imm8, r16
 28868      if isImm8(v0) && isReg16(v1) {
 28869          p.domain = DomainGeneric
 28870          p.add(0, func(m *_Encoding, v []interface{}) {
 28871              m.emit(0x66)
 28872              m.rexo(0, v[1], false)
 28873              m.emit(0xc1)
 28874              m.emit(0xf8 | lcode(v[1]))
 28875              m.imm1(toImmAny(v[0]))
 28876          })
 28877      }
 28878      // SARW cl, r16
 28879      if v0 == CL && isReg16(v1) {
 28880          p.domain = DomainGeneric
 28881          p.add(0, func(m *_Encoding, v []interface{}) {
 28882              m.emit(0x66)
 28883              m.rexo(0, v[1], false)
 28884              m.emit(0xd3)
 28885              m.emit(0xf8 | lcode(v[1]))
 28886          })
 28887      }
 28888      // SARW 1, m16
 28889      if isConst1(v0) && isM16(v1) {
 28890          p.domain = DomainGeneric
 28891          p.add(0, func(m *_Encoding, v []interface{}) {
 28892              m.emit(0x66)
 28893              m.rexo(0, addr(v[1]), false)
 28894              m.emit(0xd1)
 28895              m.mrsd(7, addr(v[1]), 1)
 28896          })
 28897      }
 28898      // SARW imm8, m16
 28899      if isImm8(v0) && isM16(v1) {
 28900          p.domain = DomainGeneric
 28901          p.add(0, func(m *_Encoding, v []interface{}) {
 28902              m.emit(0x66)
 28903              m.rexo(0, addr(v[1]), false)
 28904              m.emit(0xc1)
 28905              m.mrsd(7, addr(v[1]), 1)
 28906              m.imm1(toImmAny(v[0]))
 28907          })
 28908      }
 28909      // SARW cl, m16
 28910      if v0 == CL && isM16(v1) {
 28911          p.domain = DomainGeneric
 28912          p.add(0, func(m *_Encoding, v []interface{}) {
 28913              m.emit(0x66)
 28914              m.rexo(0, addr(v[1]), false)
 28915              m.emit(0xd3)
 28916              m.mrsd(7, addr(v[1]), 1)
 28917          })
 28918      }
 28919      if p.len == 0 {
 28920          panic("invalid operands for SARW")
 28921      }
 28922      return p
 28923  }
 28924  
 28925  // SARXL performs "Arithmetic Shift Right Without Affecting Flags".
 28926  //
 28927  // Mnemonic        : SARX
 28928  // Supported forms : (2 forms)
 28929  //
 28930  //    * SARXL r32, r32, r32    [BMI2]
 28931  //    * SARXL r32, m32, r32    [BMI2]
 28932  //
 28933  func (self *Program) SARXL(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 28934      p := self.alloc("SARXL", 3, Operands { v0, v1, v2 })
 28935      // SARXL r32, r32, r32
 28936      if isReg32(v0) && isReg32(v1) && isReg32(v2) {
 28937          self.require(ISA_BMI2)
 28938          p.domain = DomainGeneric
 28939          p.add(0, func(m *_Encoding, v []interface{}) {
 28940              m.emit(0xc4)
 28941              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 28942              m.emit(0x7a ^ (hlcode(v[0]) << 3))
 28943              m.emit(0xf7)
 28944              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 28945          })
 28946      }
 28947      // SARXL r32, m32, r32
 28948      if isReg32(v0) && isM32(v1) && isReg32(v2) {
 28949          self.require(ISA_BMI2)
 28950          p.domain = DomainGeneric
 28951          p.add(0, func(m *_Encoding, v []interface{}) {
 28952              m.vex3(0xc4, 0b10, 0x02, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 28953              m.emit(0xf7)
 28954              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 28955          })
 28956      }
 28957      if p.len == 0 {
 28958          panic("invalid operands for SARXL")
 28959      }
 28960      return p
 28961  }
 28962  
 28963  // SARXQ performs "Arithmetic Shift Right Without Affecting Flags".
 28964  //
 28965  // Mnemonic        : SARX
 28966  // Supported forms : (2 forms)
 28967  //
 28968  //    * SARXQ r64, r64, r64    [BMI2]
 28969  //    * SARXQ r64, m64, r64    [BMI2]
 28970  //
 28971  func (self *Program) SARXQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 28972      p := self.alloc("SARXQ", 3, Operands { v0, v1, v2 })
 28973      // SARXQ r64, r64, r64
 28974      if isReg64(v0) && isReg64(v1) && isReg64(v2) {
 28975          self.require(ISA_BMI2)
 28976          p.domain = DomainGeneric
 28977          p.add(0, func(m *_Encoding, v []interface{}) {
 28978              m.emit(0xc4)
 28979              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 28980              m.emit(0xfa ^ (hlcode(v[0]) << 3))
 28981              m.emit(0xf7)
 28982              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 28983          })
 28984      }
 28985      // SARXQ r64, m64, r64
 28986      if isReg64(v0) && isM64(v1) && isReg64(v2) {
 28987          self.require(ISA_BMI2)
 28988          p.domain = DomainGeneric
 28989          p.add(0, func(m *_Encoding, v []interface{}) {
 28990              m.vex3(0xc4, 0b10, 0x82, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 28991              m.emit(0xf7)
 28992              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 28993          })
 28994      }
 28995      if p.len == 0 {
 28996          panic("invalid operands for SARXQ")
 28997      }
 28998      return p
 28999  }
 29000  
 29001  // SBBB performs "Subtract with Borrow".
 29002  //
 29003  // Mnemonic        : SBB
 29004  // Supported forms : (6 forms)
 29005  //
 29006  //    * SBBB imm8, al
 29007  //    * SBBB imm8, r8
 29008  //    * SBBB r8, r8
 29009  //    * SBBB m8, r8
 29010  //    * SBBB imm8, m8
 29011  //    * SBBB r8, m8
 29012  //
 29013  func (self *Program) SBBB(v0 interface{}, v1 interface{}) *Instruction {
 29014      p := self.alloc("SBBB", 2, Operands { v0, v1 })
 29015      // SBBB imm8, al
 29016      if isImm8(v0) && v1 == AL {
 29017          p.domain = DomainGeneric
 29018          p.add(0, func(m *_Encoding, v []interface{}) {
 29019              m.emit(0x1c)
 29020              m.imm1(toImmAny(v[0]))
 29021          })
 29022      }
 29023      // SBBB imm8, r8
 29024      if isImm8(v0) && isReg8(v1) {
 29025          p.domain = DomainGeneric
 29026          p.add(0, func(m *_Encoding, v []interface{}) {
 29027              m.rexo(0, v[1], isReg8REX(v[1]))
 29028              m.emit(0x80)
 29029              m.emit(0xd8 | lcode(v[1]))
 29030              m.imm1(toImmAny(v[0]))
 29031          })
 29032      }
 29033      // SBBB r8, r8
 29034      if isReg8(v0) && isReg8(v1) {
 29035          p.domain = DomainGeneric
 29036          p.add(0, func(m *_Encoding, v []interface{}) {
 29037              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
 29038              m.emit(0x18)
 29039              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 29040          })
 29041          p.add(0, func(m *_Encoding, v []interface{}) {
 29042              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]) || isReg8REX(v[1]))
 29043              m.emit(0x1a)
 29044              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 29045          })
 29046      }
 29047      // SBBB m8, r8
 29048      if isM8(v0) && isReg8(v1) {
 29049          p.domain = DomainGeneric
 29050          p.add(0, func(m *_Encoding, v []interface{}) {
 29051              m.rexo(hcode(v[1]), addr(v[0]), isReg8REX(v[1]))
 29052              m.emit(0x1a)
 29053              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 29054          })
 29055      }
 29056      // SBBB imm8, m8
 29057      if isImm8(v0) && isM8(v1) {
 29058          p.domain = DomainGeneric
 29059          p.add(0, func(m *_Encoding, v []interface{}) {
 29060              m.rexo(0, addr(v[1]), false)
 29061              m.emit(0x80)
 29062              m.mrsd(3, addr(v[1]), 1)
 29063              m.imm1(toImmAny(v[0]))
 29064          })
 29065      }
 29066      // SBBB r8, m8
 29067      if isReg8(v0) && isM8(v1) {
 29068          p.domain = DomainGeneric
 29069          p.add(0, func(m *_Encoding, v []interface{}) {
 29070              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
 29071              m.emit(0x18)
 29072              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 29073          })
 29074      }
 29075      if p.len == 0 {
 29076          panic("invalid operands for SBBB")
 29077      }
 29078      return p
 29079  }
 29080  
 29081  // SBBL performs "Subtract with Borrow".
 29082  //
 29083  // Mnemonic        : SBB
 29084  // Supported forms : (8 forms)
 29085  //
 29086  //    * SBBL imm32, eax
 29087  //    * SBBL imm8, r32
 29088  //    * SBBL imm32, r32
 29089  //    * SBBL r32, r32
 29090  //    * SBBL m32, r32
 29091  //    * SBBL imm8, m32
 29092  //    * SBBL imm32, m32
 29093  //    * SBBL r32, m32
 29094  //
 29095  func (self *Program) SBBL(v0 interface{}, v1 interface{}) *Instruction {
 29096      p := self.alloc("SBBL", 2, Operands { v0, v1 })
 29097      // SBBL imm32, eax
 29098      if isImm32(v0) && v1 == EAX {
 29099          p.domain = DomainGeneric
 29100          p.add(0, func(m *_Encoding, v []interface{}) {
 29101              m.emit(0x1d)
 29102              m.imm4(toImmAny(v[0]))
 29103          })
 29104      }
 29105      // SBBL imm8, r32
 29106      if isImm8Ext(v0, 4) && isReg32(v1) {
 29107          p.domain = DomainGeneric
 29108          p.add(0, func(m *_Encoding, v []interface{}) {
 29109              m.rexo(0, v[1], false)
 29110              m.emit(0x83)
 29111              m.emit(0xd8 | lcode(v[1]))
 29112              m.imm1(toImmAny(v[0]))
 29113          })
 29114      }
 29115      // SBBL imm32, r32
 29116      if isImm32(v0) && isReg32(v1) {
 29117          p.domain = DomainGeneric
 29118          p.add(0, func(m *_Encoding, v []interface{}) {
 29119              m.rexo(0, v[1], false)
 29120              m.emit(0x81)
 29121              m.emit(0xd8 | lcode(v[1]))
 29122              m.imm4(toImmAny(v[0]))
 29123          })
 29124      }
 29125      // SBBL r32, r32
 29126      if isReg32(v0) && isReg32(v1) {
 29127          p.domain = DomainGeneric
 29128          p.add(0, func(m *_Encoding, v []interface{}) {
 29129              m.rexo(hcode(v[0]), v[1], false)
 29130              m.emit(0x19)
 29131              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 29132          })
 29133          p.add(0, func(m *_Encoding, v []interface{}) {
 29134              m.rexo(hcode(v[1]), v[0], false)
 29135              m.emit(0x1b)
 29136              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 29137          })
 29138      }
 29139      // SBBL m32, r32
 29140      if isM32(v0) && isReg32(v1) {
 29141          p.domain = DomainGeneric
 29142          p.add(0, func(m *_Encoding, v []interface{}) {
 29143              m.rexo(hcode(v[1]), addr(v[0]), false)
 29144              m.emit(0x1b)
 29145              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 29146          })
 29147      }
 29148      // SBBL imm8, m32
 29149      if isImm8Ext(v0, 4) && isM32(v1) {
 29150          p.domain = DomainGeneric
 29151          p.add(0, func(m *_Encoding, v []interface{}) {
 29152              m.rexo(0, addr(v[1]), false)
 29153              m.emit(0x83)
 29154              m.mrsd(3, addr(v[1]), 1)
 29155              m.imm1(toImmAny(v[0]))
 29156          })
 29157      }
 29158      // SBBL imm32, m32
 29159      if isImm32(v0) && isM32(v1) {
 29160          p.domain = DomainGeneric
 29161          p.add(0, func(m *_Encoding, v []interface{}) {
 29162              m.rexo(0, addr(v[1]), false)
 29163              m.emit(0x81)
 29164              m.mrsd(3, addr(v[1]), 1)
 29165              m.imm4(toImmAny(v[0]))
 29166          })
 29167      }
 29168      // SBBL r32, m32
 29169      if isReg32(v0) && isM32(v1) {
 29170          p.domain = DomainGeneric
 29171          p.add(0, func(m *_Encoding, v []interface{}) {
 29172              m.rexo(hcode(v[0]), addr(v[1]), false)
 29173              m.emit(0x19)
 29174              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 29175          })
 29176      }
 29177      if p.len == 0 {
 29178          panic("invalid operands for SBBL")
 29179      }
 29180      return p
 29181  }
 29182  
 29183  // SBBQ performs "Subtract with Borrow".
 29184  //
 29185  // Mnemonic        : SBB
 29186  // Supported forms : (8 forms)
 29187  //
 29188  //    * SBBQ imm32, rax
 29189  //    * SBBQ imm8, r64
 29190  //    * SBBQ imm32, r64
 29191  //    * SBBQ r64, r64
 29192  //    * SBBQ m64, r64
 29193  //    * SBBQ imm8, m64
 29194  //    * SBBQ imm32, m64
 29195  //    * SBBQ r64, m64
 29196  //
 29197  func (self *Program) SBBQ(v0 interface{}, v1 interface{}) *Instruction {
 29198      p := self.alloc("SBBQ", 2, Operands { v0, v1 })
 29199      // SBBQ imm32, rax
 29200      if isImm32(v0) && v1 == RAX {
 29201          p.domain = DomainGeneric
 29202          p.add(0, func(m *_Encoding, v []interface{}) {
 29203              m.emit(0x48)
 29204              m.emit(0x1d)
 29205              m.imm4(toImmAny(v[0]))
 29206          })
 29207      }
 29208      // SBBQ imm8, r64
 29209      if isImm8Ext(v0, 8) && isReg64(v1) {
 29210          p.domain = DomainGeneric
 29211          p.add(0, func(m *_Encoding, v []interface{}) {
 29212              m.emit(0x48 | hcode(v[1]))
 29213              m.emit(0x83)
 29214              m.emit(0xd8 | lcode(v[1]))
 29215              m.imm1(toImmAny(v[0]))
 29216          })
 29217      }
 29218      // SBBQ imm32, r64
 29219      if isImm32Ext(v0, 8) && isReg64(v1) {
 29220          p.domain = DomainGeneric
 29221          p.add(0, func(m *_Encoding, v []interface{}) {
 29222              m.emit(0x48 | hcode(v[1]))
 29223              m.emit(0x81)
 29224              m.emit(0xd8 | lcode(v[1]))
 29225              m.imm4(toImmAny(v[0]))
 29226          })
 29227      }
 29228      // SBBQ r64, r64
 29229      if isReg64(v0) && isReg64(v1) {
 29230          p.domain = DomainGeneric
 29231          p.add(0, func(m *_Encoding, v []interface{}) {
 29232              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
 29233              m.emit(0x19)
 29234              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 29235          })
 29236          p.add(0, func(m *_Encoding, v []interface{}) {
 29237              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 29238              m.emit(0x1b)
 29239              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 29240          })
 29241      }
 29242      // SBBQ m64, r64
 29243      if isM64(v0) && isReg64(v1) {
 29244          p.domain = DomainGeneric
 29245          p.add(0, func(m *_Encoding, v []interface{}) {
 29246              m.rexm(1, hcode(v[1]), addr(v[0]))
 29247              m.emit(0x1b)
 29248              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 29249          })
 29250      }
 29251      // SBBQ imm8, m64
 29252      if isImm8Ext(v0, 8) && isM64(v1) {
 29253          p.domain = DomainGeneric
 29254          p.add(0, func(m *_Encoding, v []interface{}) {
 29255              m.rexm(1, 0, addr(v[1]))
 29256              m.emit(0x83)
 29257              m.mrsd(3, addr(v[1]), 1)
 29258              m.imm1(toImmAny(v[0]))
 29259          })
 29260      }
 29261      // SBBQ imm32, m64
 29262      if isImm32Ext(v0, 8) && isM64(v1) {
 29263          p.domain = DomainGeneric
 29264          p.add(0, func(m *_Encoding, v []interface{}) {
 29265              m.rexm(1, 0, addr(v[1]))
 29266              m.emit(0x81)
 29267              m.mrsd(3, addr(v[1]), 1)
 29268              m.imm4(toImmAny(v[0]))
 29269          })
 29270      }
 29271      // SBBQ r64, m64
 29272      if isReg64(v0) && isM64(v1) {
 29273          p.domain = DomainGeneric
 29274          p.add(0, func(m *_Encoding, v []interface{}) {
 29275              m.rexm(1, hcode(v[0]), addr(v[1]))
 29276              m.emit(0x19)
 29277              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 29278          })
 29279      }
 29280      if p.len == 0 {
 29281          panic("invalid operands for SBBQ")
 29282      }
 29283      return p
 29284  }
 29285  
 29286  // SBBW performs "Subtract with Borrow".
 29287  //
 29288  // Mnemonic        : SBB
 29289  // Supported forms : (8 forms)
 29290  //
 29291  //    * SBBW imm16, ax
 29292  //    * SBBW imm8, r16
 29293  //    * SBBW imm16, r16
 29294  //    * SBBW r16, r16
 29295  //    * SBBW m16, r16
 29296  //    * SBBW imm8, m16
 29297  //    * SBBW imm16, m16
 29298  //    * SBBW r16, m16
 29299  //
 29300  func (self *Program) SBBW(v0 interface{}, v1 interface{}) *Instruction {
 29301      p := self.alloc("SBBW", 2, Operands { v0, v1 })
 29302      // SBBW imm16, ax
 29303      if isImm16(v0) && v1 == AX {
 29304          p.domain = DomainGeneric
 29305          p.add(0, func(m *_Encoding, v []interface{}) {
 29306              m.emit(0x66)
 29307              m.emit(0x1d)
 29308              m.imm2(toImmAny(v[0]))
 29309          })
 29310      }
 29311      // SBBW imm8, r16
 29312      if isImm8Ext(v0, 2) && isReg16(v1) {
 29313          p.domain = DomainGeneric
 29314          p.add(0, func(m *_Encoding, v []interface{}) {
 29315              m.emit(0x66)
 29316              m.rexo(0, v[1], false)
 29317              m.emit(0x83)
 29318              m.emit(0xd8 | lcode(v[1]))
 29319              m.imm1(toImmAny(v[0]))
 29320          })
 29321      }
 29322      // SBBW imm16, r16
 29323      if isImm16(v0) && isReg16(v1) {
 29324          p.domain = DomainGeneric
 29325          p.add(0, func(m *_Encoding, v []interface{}) {
 29326              m.emit(0x66)
 29327              m.rexo(0, v[1], false)
 29328              m.emit(0x81)
 29329              m.emit(0xd8 | lcode(v[1]))
 29330              m.imm2(toImmAny(v[0]))
 29331          })
 29332      }
 29333      // SBBW r16, r16
 29334      if isReg16(v0) && isReg16(v1) {
 29335          p.domain = DomainGeneric
 29336          p.add(0, func(m *_Encoding, v []interface{}) {
 29337              m.emit(0x66)
 29338              m.rexo(hcode(v[0]), v[1], false)
 29339              m.emit(0x19)
 29340              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 29341          })
 29342          p.add(0, func(m *_Encoding, v []interface{}) {
 29343              m.emit(0x66)
 29344              m.rexo(hcode(v[1]), v[0], false)
 29345              m.emit(0x1b)
 29346              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 29347          })
 29348      }
 29349      // SBBW m16, r16
 29350      if isM16(v0) && isReg16(v1) {
 29351          p.domain = DomainGeneric
 29352          p.add(0, func(m *_Encoding, v []interface{}) {
 29353              m.emit(0x66)
 29354              m.rexo(hcode(v[1]), addr(v[0]), false)
 29355              m.emit(0x1b)
 29356              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 29357          })
 29358      }
 29359      // SBBW imm8, m16
 29360      if isImm8Ext(v0, 2) && isM16(v1) {
 29361          p.domain = DomainGeneric
 29362          p.add(0, func(m *_Encoding, v []interface{}) {
 29363              m.emit(0x66)
 29364              m.rexo(0, addr(v[1]), false)
 29365              m.emit(0x83)
 29366              m.mrsd(3, addr(v[1]), 1)
 29367              m.imm1(toImmAny(v[0]))
 29368          })
 29369      }
 29370      // SBBW imm16, m16
 29371      if isImm16(v0) && isM16(v1) {
 29372          p.domain = DomainGeneric
 29373          p.add(0, func(m *_Encoding, v []interface{}) {
 29374              m.emit(0x66)
 29375              m.rexo(0, addr(v[1]), false)
 29376              m.emit(0x81)
 29377              m.mrsd(3, addr(v[1]), 1)
 29378              m.imm2(toImmAny(v[0]))
 29379          })
 29380      }
 29381      // SBBW r16, m16
 29382      if isReg16(v0) && isM16(v1) {
 29383          p.domain = DomainGeneric
 29384          p.add(0, func(m *_Encoding, v []interface{}) {
 29385              m.emit(0x66)
 29386              m.rexo(hcode(v[0]), addr(v[1]), false)
 29387              m.emit(0x19)
 29388              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 29389          })
 29390      }
 29391      if p.len == 0 {
 29392          panic("invalid operands for SBBW")
 29393      }
 29394      return p
 29395  }
 29396  
 29397  // SETA performs "Set byte if above (CF == 0 and ZF == 0)".
 29398  //
 29399  // Mnemonic        : SETA
 29400  // Supported forms : (2 forms)
 29401  //
 29402  //    * SETA r8
 29403  //    * SETA m8
 29404  //
 29405  func (self *Program) SETA(v0 interface{}) *Instruction {
 29406      p := self.alloc("SETA", 1, Operands { v0 })
 29407      // SETA r8
 29408      if isReg8(v0) {
 29409          p.domain = DomainGeneric
 29410          p.add(0, func(m *_Encoding, v []interface{}) {
 29411              m.rexo(0, v[0], isReg8REX(v[0]))
 29412              m.emit(0x0f)
 29413              m.emit(0x97)
 29414              m.emit(0xc0 | lcode(v[0]))
 29415          })
 29416      }
 29417      // SETA m8
 29418      if isM8(v0) {
 29419          p.domain = DomainGeneric
 29420          p.add(0, func(m *_Encoding, v []interface{}) {
 29421              m.rexo(0, addr(v[0]), false)
 29422              m.emit(0x0f)
 29423              m.emit(0x97)
 29424              m.mrsd(0, addr(v[0]), 1)
 29425          })
 29426      }
 29427      if p.len == 0 {
 29428          panic("invalid operands for SETA")
 29429      }
 29430      return p
 29431  }
 29432  
 29433  // SETAE performs "Set byte if above or equal (CF == 0)".
 29434  //
 29435  // Mnemonic        : SETAE
 29436  // Supported forms : (2 forms)
 29437  //
 29438  //    * SETAE r8
 29439  //    * SETAE m8
 29440  //
 29441  func (self *Program) SETAE(v0 interface{}) *Instruction {
 29442      p := self.alloc("SETAE", 1, Operands { v0 })
 29443      // SETAE r8
 29444      if isReg8(v0) {
 29445          p.domain = DomainGeneric
 29446          p.add(0, func(m *_Encoding, v []interface{}) {
 29447              m.rexo(0, v[0], isReg8REX(v[0]))
 29448              m.emit(0x0f)
 29449              m.emit(0x93)
 29450              m.emit(0xc0 | lcode(v[0]))
 29451          })
 29452      }
 29453      // SETAE m8
 29454      if isM8(v0) {
 29455          p.domain = DomainGeneric
 29456          p.add(0, func(m *_Encoding, v []interface{}) {
 29457              m.rexo(0, addr(v[0]), false)
 29458              m.emit(0x0f)
 29459              m.emit(0x93)
 29460              m.mrsd(0, addr(v[0]), 1)
 29461          })
 29462      }
 29463      if p.len == 0 {
 29464          panic("invalid operands for SETAE")
 29465      }
 29466      return p
 29467  }
 29468  
 29469  // SETB performs "Set byte if below (CF == 1)".
 29470  //
 29471  // Mnemonic        : SETB
 29472  // Supported forms : (2 forms)
 29473  //
 29474  //    * SETB r8
 29475  //    * SETB m8
 29476  //
 29477  func (self *Program) SETB(v0 interface{}) *Instruction {
 29478      p := self.alloc("SETB", 1, Operands { v0 })
 29479      // SETB r8
 29480      if isReg8(v0) {
 29481          p.domain = DomainGeneric
 29482          p.add(0, func(m *_Encoding, v []interface{}) {
 29483              m.rexo(0, v[0], isReg8REX(v[0]))
 29484              m.emit(0x0f)
 29485              m.emit(0x92)
 29486              m.emit(0xc0 | lcode(v[0]))
 29487          })
 29488      }
 29489      // SETB m8
 29490      if isM8(v0) {
 29491          p.domain = DomainGeneric
 29492          p.add(0, func(m *_Encoding, v []interface{}) {
 29493              m.rexo(0, addr(v[0]), false)
 29494              m.emit(0x0f)
 29495              m.emit(0x92)
 29496              m.mrsd(0, addr(v[0]), 1)
 29497          })
 29498      }
 29499      if p.len == 0 {
 29500          panic("invalid operands for SETB")
 29501      }
 29502      return p
 29503  }
 29504  
 29505  // SETBE performs "Set byte if below or equal (CF == 1 or ZF == 1)".
 29506  //
 29507  // Mnemonic        : SETBE
 29508  // Supported forms : (2 forms)
 29509  //
 29510  //    * SETBE r8
 29511  //    * SETBE m8
 29512  //
 29513  func (self *Program) SETBE(v0 interface{}) *Instruction {
 29514      p := self.alloc("SETBE", 1, Operands { v0 })
 29515      // SETBE r8
 29516      if isReg8(v0) {
 29517          p.domain = DomainGeneric
 29518          p.add(0, func(m *_Encoding, v []interface{}) {
 29519              m.rexo(0, v[0], isReg8REX(v[0]))
 29520              m.emit(0x0f)
 29521              m.emit(0x96)
 29522              m.emit(0xc0 | lcode(v[0]))
 29523          })
 29524      }
 29525      // SETBE m8
 29526      if isM8(v0) {
 29527          p.domain = DomainGeneric
 29528          p.add(0, func(m *_Encoding, v []interface{}) {
 29529              m.rexo(0, addr(v[0]), false)
 29530              m.emit(0x0f)
 29531              m.emit(0x96)
 29532              m.mrsd(0, addr(v[0]), 1)
 29533          })
 29534      }
 29535      if p.len == 0 {
 29536          panic("invalid operands for SETBE")
 29537      }
 29538      return p
 29539  }
 29540  
 29541  // SETC performs "Set byte if carry (CF == 1)".
 29542  //
 29543  // Mnemonic        : SETC
 29544  // Supported forms : (2 forms)
 29545  //
 29546  //    * SETC r8
 29547  //    * SETC m8
 29548  //
 29549  func (self *Program) SETC(v0 interface{}) *Instruction {
 29550      p := self.alloc("SETC", 1, Operands { v0 })
 29551      // SETC r8
 29552      if isReg8(v0) {
 29553          p.domain = DomainGeneric
 29554          p.add(0, func(m *_Encoding, v []interface{}) {
 29555              m.rexo(0, v[0], isReg8REX(v[0]))
 29556              m.emit(0x0f)
 29557              m.emit(0x92)
 29558              m.emit(0xc0 | lcode(v[0]))
 29559          })
 29560      }
 29561      // SETC m8
 29562      if isM8(v0) {
 29563          p.domain = DomainGeneric
 29564          p.add(0, func(m *_Encoding, v []interface{}) {
 29565              m.rexo(0, addr(v[0]), false)
 29566              m.emit(0x0f)
 29567              m.emit(0x92)
 29568              m.mrsd(0, addr(v[0]), 1)
 29569          })
 29570      }
 29571      if p.len == 0 {
 29572          panic("invalid operands for SETC")
 29573      }
 29574      return p
 29575  }
 29576  
 29577  // SETE performs "Set byte if equal (ZF == 1)".
 29578  //
 29579  // Mnemonic        : SETE
 29580  // Supported forms : (2 forms)
 29581  //
 29582  //    * SETE r8
 29583  //    * SETE m8
 29584  //
 29585  func (self *Program) SETE(v0 interface{}) *Instruction {
 29586      p := self.alloc("SETE", 1, Operands { v0 })
 29587      // SETE r8
 29588      if isReg8(v0) {
 29589          p.domain = DomainGeneric
 29590          p.add(0, func(m *_Encoding, v []interface{}) {
 29591              m.rexo(0, v[0], isReg8REX(v[0]))
 29592              m.emit(0x0f)
 29593              m.emit(0x94)
 29594              m.emit(0xc0 | lcode(v[0]))
 29595          })
 29596      }
 29597      // SETE m8
 29598      if isM8(v0) {
 29599          p.domain = DomainGeneric
 29600          p.add(0, func(m *_Encoding, v []interface{}) {
 29601              m.rexo(0, addr(v[0]), false)
 29602              m.emit(0x0f)
 29603              m.emit(0x94)
 29604              m.mrsd(0, addr(v[0]), 1)
 29605          })
 29606      }
 29607      if p.len == 0 {
 29608          panic("invalid operands for SETE")
 29609      }
 29610      return p
 29611  }
 29612  
 29613  // SETG performs "Set byte if greater (ZF == 0 and SF == OF)".
 29614  //
 29615  // Mnemonic        : SETG
 29616  // Supported forms : (2 forms)
 29617  //
 29618  //    * SETG r8
 29619  //    * SETG m8
 29620  //
 29621  func (self *Program) SETG(v0 interface{}) *Instruction {
 29622      p := self.alloc("SETG", 1, Operands { v0 })
 29623      // SETG r8
 29624      if isReg8(v0) {
 29625          p.domain = DomainGeneric
 29626          p.add(0, func(m *_Encoding, v []interface{}) {
 29627              m.rexo(0, v[0], isReg8REX(v[0]))
 29628              m.emit(0x0f)
 29629              m.emit(0x9f)
 29630              m.emit(0xc0 | lcode(v[0]))
 29631          })
 29632      }
 29633      // SETG m8
 29634      if isM8(v0) {
 29635          p.domain = DomainGeneric
 29636          p.add(0, func(m *_Encoding, v []interface{}) {
 29637              m.rexo(0, addr(v[0]), false)
 29638              m.emit(0x0f)
 29639              m.emit(0x9f)
 29640              m.mrsd(0, addr(v[0]), 1)
 29641          })
 29642      }
 29643      if p.len == 0 {
 29644          panic("invalid operands for SETG")
 29645      }
 29646      return p
 29647  }
 29648  
 29649  // SETGE performs "Set byte if greater or equal (SF == OF)".
 29650  //
 29651  // Mnemonic        : SETGE
 29652  // Supported forms : (2 forms)
 29653  //
 29654  //    * SETGE r8
 29655  //    * SETGE m8
 29656  //
 29657  func (self *Program) SETGE(v0 interface{}) *Instruction {
 29658      p := self.alloc("SETGE", 1, Operands { v0 })
 29659      // SETGE r8
 29660      if isReg8(v0) {
 29661          p.domain = DomainGeneric
 29662          p.add(0, func(m *_Encoding, v []interface{}) {
 29663              m.rexo(0, v[0], isReg8REX(v[0]))
 29664              m.emit(0x0f)
 29665              m.emit(0x9d)
 29666              m.emit(0xc0 | lcode(v[0]))
 29667          })
 29668      }
 29669      // SETGE m8
 29670      if isM8(v0) {
 29671          p.domain = DomainGeneric
 29672          p.add(0, func(m *_Encoding, v []interface{}) {
 29673              m.rexo(0, addr(v[0]), false)
 29674              m.emit(0x0f)
 29675              m.emit(0x9d)
 29676              m.mrsd(0, addr(v[0]), 1)
 29677          })
 29678      }
 29679      if p.len == 0 {
 29680          panic("invalid operands for SETGE")
 29681      }
 29682      return p
 29683  }
 29684  
 29685  // SETL performs "Set byte if less (SF != OF)".
 29686  //
 29687  // Mnemonic        : SETL
 29688  // Supported forms : (2 forms)
 29689  //
 29690  //    * SETL r8
 29691  //    * SETL m8
 29692  //
 29693  func (self *Program) SETL(v0 interface{}) *Instruction {
 29694      p := self.alloc("SETL", 1, Operands { v0 })
 29695      // SETL r8
 29696      if isReg8(v0) {
 29697          p.domain = DomainGeneric
 29698          p.add(0, func(m *_Encoding, v []interface{}) {
 29699              m.rexo(0, v[0], isReg8REX(v[0]))
 29700              m.emit(0x0f)
 29701              m.emit(0x9c)
 29702              m.emit(0xc0 | lcode(v[0]))
 29703          })
 29704      }
 29705      // SETL m8
 29706      if isM8(v0) {
 29707          p.domain = DomainGeneric
 29708          p.add(0, func(m *_Encoding, v []interface{}) {
 29709              m.rexo(0, addr(v[0]), false)
 29710              m.emit(0x0f)
 29711              m.emit(0x9c)
 29712              m.mrsd(0, addr(v[0]), 1)
 29713          })
 29714      }
 29715      if p.len == 0 {
 29716          panic("invalid operands for SETL")
 29717      }
 29718      return p
 29719  }
 29720  
 29721  // SETLE performs "Set byte if less or equal (ZF == 1 or SF != OF)".
 29722  //
 29723  // Mnemonic        : SETLE
 29724  // Supported forms : (2 forms)
 29725  //
 29726  //    * SETLE r8
 29727  //    * SETLE m8
 29728  //
 29729  func (self *Program) SETLE(v0 interface{}) *Instruction {
 29730      p := self.alloc("SETLE", 1, Operands { v0 })
 29731      // SETLE r8
 29732      if isReg8(v0) {
 29733          p.domain = DomainGeneric
 29734          p.add(0, func(m *_Encoding, v []interface{}) {
 29735              m.rexo(0, v[0], isReg8REX(v[0]))
 29736              m.emit(0x0f)
 29737              m.emit(0x9e)
 29738              m.emit(0xc0 | lcode(v[0]))
 29739          })
 29740      }
 29741      // SETLE m8
 29742      if isM8(v0) {
 29743          p.domain = DomainGeneric
 29744          p.add(0, func(m *_Encoding, v []interface{}) {
 29745              m.rexo(0, addr(v[0]), false)
 29746              m.emit(0x0f)
 29747              m.emit(0x9e)
 29748              m.mrsd(0, addr(v[0]), 1)
 29749          })
 29750      }
 29751      if p.len == 0 {
 29752          panic("invalid operands for SETLE")
 29753      }
 29754      return p
 29755  }
 29756  
 29757  // SETNA performs "Set byte if not above (CF == 1 or ZF == 1)".
 29758  //
 29759  // Mnemonic        : SETNA
 29760  // Supported forms : (2 forms)
 29761  //
 29762  //    * SETNA r8
 29763  //    * SETNA m8
 29764  //
 29765  func (self *Program) SETNA(v0 interface{}) *Instruction {
 29766      p := self.alloc("SETNA", 1, Operands { v0 })
 29767      // SETNA r8
 29768      if isReg8(v0) {
 29769          p.domain = DomainGeneric
 29770          p.add(0, func(m *_Encoding, v []interface{}) {
 29771              m.rexo(0, v[0], isReg8REX(v[0]))
 29772              m.emit(0x0f)
 29773              m.emit(0x96)
 29774              m.emit(0xc0 | lcode(v[0]))
 29775          })
 29776      }
 29777      // SETNA m8
 29778      if isM8(v0) {
 29779          p.domain = DomainGeneric
 29780          p.add(0, func(m *_Encoding, v []interface{}) {
 29781              m.rexo(0, addr(v[0]), false)
 29782              m.emit(0x0f)
 29783              m.emit(0x96)
 29784              m.mrsd(0, addr(v[0]), 1)
 29785          })
 29786      }
 29787      if p.len == 0 {
 29788          panic("invalid operands for SETNA")
 29789      }
 29790      return p
 29791  }
 29792  
 29793  // SETNAE performs "Set byte if not above or equal (CF == 1)".
 29794  //
 29795  // Mnemonic        : SETNAE
 29796  // Supported forms : (2 forms)
 29797  //
 29798  //    * SETNAE r8
 29799  //    * SETNAE m8
 29800  //
 29801  func (self *Program) SETNAE(v0 interface{}) *Instruction {
 29802      p := self.alloc("SETNAE", 1, Operands { v0 })
 29803      // SETNAE r8
 29804      if isReg8(v0) {
 29805          p.domain = DomainGeneric
 29806          p.add(0, func(m *_Encoding, v []interface{}) {
 29807              m.rexo(0, v[0], isReg8REX(v[0]))
 29808              m.emit(0x0f)
 29809              m.emit(0x92)
 29810              m.emit(0xc0 | lcode(v[0]))
 29811          })
 29812      }
 29813      // SETNAE m8
 29814      if isM8(v0) {
 29815          p.domain = DomainGeneric
 29816          p.add(0, func(m *_Encoding, v []interface{}) {
 29817              m.rexo(0, addr(v[0]), false)
 29818              m.emit(0x0f)
 29819              m.emit(0x92)
 29820              m.mrsd(0, addr(v[0]), 1)
 29821          })
 29822      }
 29823      if p.len == 0 {
 29824          panic("invalid operands for SETNAE")
 29825      }
 29826      return p
 29827  }
 29828  
 29829  // SETNB performs "Set byte if not below (CF == 0)".
 29830  //
 29831  // Mnemonic        : SETNB
 29832  // Supported forms : (2 forms)
 29833  //
 29834  //    * SETNB r8
 29835  //    * SETNB m8
 29836  //
 29837  func (self *Program) SETNB(v0 interface{}) *Instruction {
 29838      p := self.alloc("SETNB", 1, Operands { v0 })
 29839      // SETNB r8
 29840      if isReg8(v0) {
 29841          p.domain = DomainGeneric
 29842          p.add(0, func(m *_Encoding, v []interface{}) {
 29843              m.rexo(0, v[0], isReg8REX(v[0]))
 29844              m.emit(0x0f)
 29845              m.emit(0x93)
 29846              m.emit(0xc0 | lcode(v[0]))
 29847          })
 29848      }
 29849      // SETNB m8
 29850      if isM8(v0) {
 29851          p.domain = DomainGeneric
 29852          p.add(0, func(m *_Encoding, v []interface{}) {
 29853              m.rexo(0, addr(v[0]), false)
 29854              m.emit(0x0f)
 29855              m.emit(0x93)
 29856              m.mrsd(0, addr(v[0]), 1)
 29857          })
 29858      }
 29859      if p.len == 0 {
 29860          panic("invalid operands for SETNB")
 29861      }
 29862      return p
 29863  }
 29864  
 29865  // SETNBE performs "Set byte if not below or equal (CF == 0 and ZF == 0)".
 29866  //
 29867  // Mnemonic        : SETNBE
 29868  // Supported forms : (2 forms)
 29869  //
 29870  //    * SETNBE r8
 29871  //    * SETNBE m8
 29872  //
 29873  func (self *Program) SETNBE(v0 interface{}) *Instruction {
 29874      p := self.alloc("SETNBE", 1, Operands { v0 })
 29875      // SETNBE r8
 29876      if isReg8(v0) {
 29877          p.domain = DomainGeneric
 29878          p.add(0, func(m *_Encoding, v []interface{}) {
 29879              m.rexo(0, v[0], isReg8REX(v[0]))
 29880              m.emit(0x0f)
 29881              m.emit(0x97)
 29882              m.emit(0xc0 | lcode(v[0]))
 29883          })
 29884      }
 29885      // SETNBE m8
 29886      if isM8(v0) {
 29887          p.domain = DomainGeneric
 29888          p.add(0, func(m *_Encoding, v []interface{}) {
 29889              m.rexo(0, addr(v[0]), false)
 29890              m.emit(0x0f)
 29891              m.emit(0x97)
 29892              m.mrsd(0, addr(v[0]), 1)
 29893          })
 29894      }
 29895      if p.len == 0 {
 29896          panic("invalid operands for SETNBE")
 29897      }
 29898      return p
 29899  }
 29900  
 29901  // SETNC performs "Set byte if not carry (CF == 0)".
 29902  //
 29903  // Mnemonic        : SETNC
 29904  // Supported forms : (2 forms)
 29905  //
 29906  //    * SETNC r8
 29907  //    * SETNC m8
 29908  //
 29909  func (self *Program) SETNC(v0 interface{}) *Instruction {
 29910      p := self.alloc("SETNC", 1, Operands { v0 })
 29911      // SETNC r8
 29912      if isReg8(v0) {
 29913          p.domain = DomainGeneric
 29914          p.add(0, func(m *_Encoding, v []interface{}) {
 29915              m.rexo(0, v[0], isReg8REX(v[0]))
 29916              m.emit(0x0f)
 29917              m.emit(0x93)
 29918              m.emit(0xc0 | lcode(v[0]))
 29919          })
 29920      }
 29921      // SETNC m8
 29922      if isM8(v0) {
 29923          p.domain = DomainGeneric
 29924          p.add(0, func(m *_Encoding, v []interface{}) {
 29925              m.rexo(0, addr(v[0]), false)
 29926              m.emit(0x0f)
 29927              m.emit(0x93)
 29928              m.mrsd(0, addr(v[0]), 1)
 29929          })
 29930      }
 29931      if p.len == 0 {
 29932          panic("invalid operands for SETNC")
 29933      }
 29934      return p
 29935  }
 29936  
 29937  // SETNE performs "Set byte if not equal (ZF == 0)".
 29938  //
 29939  // Mnemonic        : SETNE
 29940  // Supported forms : (2 forms)
 29941  //
 29942  //    * SETNE r8
 29943  //    * SETNE m8
 29944  //
 29945  func (self *Program) SETNE(v0 interface{}) *Instruction {
 29946      p := self.alloc("SETNE", 1, Operands { v0 })
 29947      // SETNE r8
 29948      if isReg8(v0) {
 29949          p.domain = DomainGeneric
 29950          p.add(0, func(m *_Encoding, v []interface{}) {
 29951              m.rexo(0, v[0], isReg8REX(v[0]))
 29952              m.emit(0x0f)
 29953              m.emit(0x95)
 29954              m.emit(0xc0 | lcode(v[0]))
 29955          })
 29956      }
 29957      // SETNE m8
 29958      if isM8(v0) {
 29959          p.domain = DomainGeneric
 29960          p.add(0, func(m *_Encoding, v []interface{}) {
 29961              m.rexo(0, addr(v[0]), false)
 29962              m.emit(0x0f)
 29963              m.emit(0x95)
 29964              m.mrsd(0, addr(v[0]), 1)
 29965          })
 29966      }
 29967      if p.len == 0 {
 29968          panic("invalid operands for SETNE")
 29969      }
 29970      return p
 29971  }
 29972  
 29973  // SETNG performs "Set byte if not greater (ZF == 1 or SF != OF)".
 29974  //
 29975  // Mnemonic        : SETNG
 29976  // Supported forms : (2 forms)
 29977  //
 29978  //    * SETNG r8
 29979  //    * SETNG m8
 29980  //
 29981  func (self *Program) SETNG(v0 interface{}) *Instruction {
 29982      p := self.alloc("SETNG", 1, Operands { v0 })
 29983      // SETNG r8
 29984      if isReg8(v0) {
 29985          p.domain = DomainGeneric
 29986          p.add(0, func(m *_Encoding, v []interface{}) {
 29987              m.rexo(0, v[0], isReg8REX(v[0]))
 29988              m.emit(0x0f)
 29989              m.emit(0x9e)
 29990              m.emit(0xc0 | lcode(v[0]))
 29991          })
 29992      }
 29993      // SETNG m8
 29994      if isM8(v0) {
 29995          p.domain = DomainGeneric
 29996          p.add(0, func(m *_Encoding, v []interface{}) {
 29997              m.rexo(0, addr(v[0]), false)
 29998              m.emit(0x0f)
 29999              m.emit(0x9e)
 30000              m.mrsd(0, addr(v[0]), 1)
 30001          })
 30002      }
 30003      if p.len == 0 {
 30004          panic("invalid operands for SETNG")
 30005      }
 30006      return p
 30007  }
 30008  
 30009  // SETNGE performs "Set byte if not greater or equal (SF != OF)".
 30010  //
 30011  // Mnemonic        : SETNGE
 30012  // Supported forms : (2 forms)
 30013  //
 30014  //    * SETNGE r8
 30015  //    * SETNGE m8
 30016  //
 30017  func (self *Program) SETNGE(v0 interface{}) *Instruction {
 30018      p := self.alloc("SETNGE", 1, Operands { v0 })
 30019      // SETNGE r8
 30020      if isReg8(v0) {
 30021          p.domain = DomainGeneric
 30022          p.add(0, func(m *_Encoding, v []interface{}) {
 30023              m.rexo(0, v[0], isReg8REX(v[0]))
 30024              m.emit(0x0f)
 30025              m.emit(0x9c)
 30026              m.emit(0xc0 | lcode(v[0]))
 30027          })
 30028      }
 30029      // SETNGE m8
 30030      if isM8(v0) {
 30031          p.domain = DomainGeneric
 30032          p.add(0, func(m *_Encoding, v []interface{}) {
 30033              m.rexo(0, addr(v[0]), false)
 30034              m.emit(0x0f)
 30035              m.emit(0x9c)
 30036              m.mrsd(0, addr(v[0]), 1)
 30037          })
 30038      }
 30039      if p.len == 0 {
 30040          panic("invalid operands for SETNGE")
 30041      }
 30042      return p
 30043  }
 30044  
 30045  // SETNL performs "Set byte if not less (SF == OF)".
 30046  //
 30047  // Mnemonic        : SETNL
 30048  // Supported forms : (2 forms)
 30049  //
 30050  //    * SETNL r8
 30051  //    * SETNL m8
 30052  //
 30053  func (self *Program) SETNL(v0 interface{}) *Instruction {
 30054      p := self.alloc("SETNL", 1, Operands { v0 })
 30055      // SETNL r8
 30056      if isReg8(v0) {
 30057          p.domain = DomainGeneric
 30058          p.add(0, func(m *_Encoding, v []interface{}) {
 30059              m.rexo(0, v[0], isReg8REX(v[0]))
 30060              m.emit(0x0f)
 30061              m.emit(0x9d)
 30062              m.emit(0xc0 | lcode(v[0]))
 30063          })
 30064      }
 30065      // SETNL m8
 30066      if isM8(v0) {
 30067          p.domain = DomainGeneric
 30068          p.add(0, func(m *_Encoding, v []interface{}) {
 30069              m.rexo(0, addr(v[0]), false)
 30070              m.emit(0x0f)
 30071              m.emit(0x9d)
 30072              m.mrsd(0, addr(v[0]), 1)
 30073          })
 30074      }
 30075      if p.len == 0 {
 30076          panic("invalid operands for SETNL")
 30077      }
 30078      return p
 30079  }
 30080  
 30081  // SETNLE performs "Set byte if not less or equal (ZF == 0 and SF == OF)".
 30082  //
 30083  // Mnemonic        : SETNLE
 30084  // Supported forms : (2 forms)
 30085  //
 30086  //    * SETNLE r8
 30087  //    * SETNLE m8
 30088  //
 30089  func (self *Program) SETNLE(v0 interface{}) *Instruction {
 30090      p := self.alloc("SETNLE", 1, Operands { v0 })
 30091      // SETNLE r8
 30092      if isReg8(v0) {
 30093          p.domain = DomainGeneric
 30094          p.add(0, func(m *_Encoding, v []interface{}) {
 30095              m.rexo(0, v[0], isReg8REX(v[0]))
 30096              m.emit(0x0f)
 30097              m.emit(0x9f)
 30098              m.emit(0xc0 | lcode(v[0]))
 30099          })
 30100      }
 30101      // SETNLE m8
 30102      if isM8(v0) {
 30103          p.domain = DomainGeneric
 30104          p.add(0, func(m *_Encoding, v []interface{}) {
 30105              m.rexo(0, addr(v[0]), false)
 30106              m.emit(0x0f)
 30107              m.emit(0x9f)
 30108              m.mrsd(0, addr(v[0]), 1)
 30109          })
 30110      }
 30111      if p.len == 0 {
 30112          panic("invalid operands for SETNLE")
 30113      }
 30114      return p
 30115  }
 30116  
 30117  // SETNO performs "Set byte if not overflow (OF == 0)".
 30118  //
 30119  // Mnemonic        : SETNO
 30120  // Supported forms : (2 forms)
 30121  //
 30122  //    * SETNO r8
 30123  //    * SETNO m8
 30124  //
 30125  func (self *Program) SETNO(v0 interface{}) *Instruction {
 30126      p := self.alloc("SETNO", 1, Operands { v0 })
 30127      // SETNO r8
 30128      if isReg8(v0) {
 30129          p.domain = DomainGeneric
 30130          p.add(0, func(m *_Encoding, v []interface{}) {
 30131              m.rexo(0, v[0], isReg8REX(v[0]))
 30132              m.emit(0x0f)
 30133              m.emit(0x91)
 30134              m.emit(0xc0 | lcode(v[0]))
 30135          })
 30136      }
 30137      // SETNO m8
 30138      if isM8(v0) {
 30139          p.domain = DomainGeneric
 30140          p.add(0, func(m *_Encoding, v []interface{}) {
 30141              m.rexo(0, addr(v[0]), false)
 30142              m.emit(0x0f)
 30143              m.emit(0x91)
 30144              m.mrsd(0, addr(v[0]), 1)
 30145          })
 30146      }
 30147      if p.len == 0 {
 30148          panic("invalid operands for SETNO")
 30149      }
 30150      return p
 30151  }
 30152  
 30153  // SETNP performs "Set byte if not parity (PF == 0)".
 30154  //
 30155  // Mnemonic        : SETNP
 30156  // Supported forms : (2 forms)
 30157  //
 30158  //    * SETNP r8
 30159  //    * SETNP m8
 30160  //
 30161  func (self *Program) SETNP(v0 interface{}) *Instruction {
 30162      p := self.alloc("SETNP", 1, Operands { v0 })
 30163      // SETNP r8
 30164      if isReg8(v0) {
 30165          p.domain = DomainGeneric
 30166          p.add(0, func(m *_Encoding, v []interface{}) {
 30167              m.rexo(0, v[0], isReg8REX(v[0]))
 30168              m.emit(0x0f)
 30169              m.emit(0x9b)
 30170              m.emit(0xc0 | lcode(v[0]))
 30171          })
 30172      }
 30173      // SETNP m8
 30174      if isM8(v0) {
 30175          p.domain = DomainGeneric
 30176          p.add(0, func(m *_Encoding, v []interface{}) {
 30177              m.rexo(0, addr(v[0]), false)
 30178              m.emit(0x0f)
 30179              m.emit(0x9b)
 30180              m.mrsd(0, addr(v[0]), 1)
 30181          })
 30182      }
 30183      if p.len == 0 {
 30184          panic("invalid operands for SETNP")
 30185      }
 30186      return p
 30187  }
 30188  
 30189  // SETNS performs "Set byte if not sign (SF == 0)".
 30190  //
 30191  // Mnemonic        : SETNS
 30192  // Supported forms : (2 forms)
 30193  //
 30194  //    * SETNS r8
 30195  //    * SETNS m8
 30196  //
 30197  func (self *Program) SETNS(v0 interface{}) *Instruction {
 30198      p := self.alloc("SETNS", 1, Operands { v0 })
 30199      // SETNS r8
 30200      if isReg8(v0) {
 30201          p.domain = DomainGeneric
 30202          p.add(0, func(m *_Encoding, v []interface{}) {
 30203              m.rexo(0, v[0], isReg8REX(v[0]))
 30204              m.emit(0x0f)
 30205              m.emit(0x99)
 30206              m.emit(0xc0 | lcode(v[0]))
 30207          })
 30208      }
 30209      // SETNS m8
 30210      if isM8(v0) {
 30211          p.domain = DomainGeneric
 30212          p.add(0, func(m *_Encoding, v []interface{}) {
 30213              m.rexo(0, addr(v[0]), false)
 30214              m.emit(0x0f)
 30215              m.emit(0x99)
 30216              m.mrsd(0, addr(v[0]), 1)
 30217          })
 30218      }
 30219      if p.len == 0 {
 30220          panic("invalid operands for SETNS")
 30221      }
 30222      return p
 30223  }
 30224  
 30225  // SETNZ performs "Set byte if not zero (ZF == 0)".
 30226  //
 30227  // Mnemonic        : SETNZ
 30228  // Supported forms : (2 forms)
 30229  //
 30230  //    * SETNZ r8
 30231  //    * SETNZ m8
 30232  //
 30233  func (self *Program) SETNZ(v0 interface{}) *Instruction {
 30234      p := self.alloc("SETNZ", 1, Operands { v0 })
 30235      // SETNZ r8
 30236      if isReg8(v0) {
 30237          p.domain = DomainGeneric
 30238          p.add(0, func(m *_Encoding, v []interface{}) {
 30239              m.rexo(0, v[0], isReg8REX(v[0]))
 30240              m.emit(0x0f)
 30241              m.emit(0x95)
 30242              m.emit(0xc0 | lcode(v[0]))
 30243          })
 30244      }
 30245      // SETNZ m8
 30246      if isM8(v0) {
 30247          p.domain = DomainGeneric
 30248          p.add(0, func(m *_Encoding, v []interface{}) {
 30249              m.rexo(0, addr(v[0]), false)
 30250              m.emit(0x0f)
 30251              m.emit(0x95)
 30252              m.mrsd(0, addr(v[0]), 1)
 30253          })
 30254      }
 30255      if p.len == 0 {
 30256          panic("invalid operands for SETNZ")
 30257      }
 30258      return p
 30259  }
 30260  
 30261  // SETO performs "Set byte if overflow (OF == 1)".
 30262  //
 30263  // Mnemonic        : SETO
 30264  // Supported forms : (2 forms)
 30265  //
 30266  //    * SETO r8
 30267  //    * SETO m8
 30268  //
 30269  func (self *Program) SETO(v0 interface{}) *Instruction {
 30270      p := self.alloc("SETO", 1, Operands { v0 })
 30271      // SETO r8
 30272      if isReg8(v0) {
 30273          p.domain = DomainGeneric
 30274          p.add(0, func(m *_Encoding, v []interface{}) {
 30275              m.rexo(0, v[0], isReg8REX(v[0]))
 30276              m.emit(0x0f)
 30277              m.emit(0x90)
 30278              m.emit(0xc0 | lcode(v[0]))
 30279          })
 30280      }
 30281      // SETO m8
 30282      if isM8(v0) {
 30283          p.domain = DomainGeneric
 30284          p.add(0, func(m *_Encoding, v []interface{}) {
 30285              m.rexo(0, addr(v[0]), false)
 30286              m.emit(0x0f)
 30287              m.emit(0x90)
 30288              m.mrsd(0, addr(v[0]), 1)
 30289          })
 30290      }
 30291      if p.len == 0 {
 30292          panic("invalid operands for SETO")
 30293      }
 30294      return p
 30295  }
 30296  
 30297  // SETP performs "Set byte if parity (PF == 1)".
 30298  //
 30299  // Mnemonic        : SETP
 30300  // Supported forms : (2 forms)
 30301  //
 30302  //    * SETP r8
 30303  //    * SETP m8
 30304  //
 30305  func (self *Program) SETP(v0 interface{}) *Instruction {
 30306      p := self.alloc("SETP", 1, Operands { v0 })
 30307      // SETP r8
 30308      if isReg8(v0) {
 30309          p.domain = DomainGeneric
 30310          p.add(0, func(m *_Encoding, v []interface{}) {
 30311              m.rexo(0, v[0], isReg8REX(v[0]))
 30312              m.emit(0x0f)
 30313              m.emit(0x9a)
 30314              m.emit(0xc0 | lcode(v[0]))
 30315          })
 30316      }
 30317      // SETP m8
 30318      if isM8(v0) {
 30319          p.domain = DomainGeneric
 30320          p.add(0, func(m *_Encoding, v []interface{}) {
 30321              m.rexo(0, addr(v[0]), false)
 30322              m.emit(0x0f)
 30323              m.emit(0x9a)
 30324              m.mrsd(0, addr(v[0]), 1)
 30325          })
 30326      }
 30327      if p.len == 0 {
 30328          panic("invalid operands for SETP")
 30329      }
 30330      return p
 30331  }
 30332  
 30333  // SETPE performs "Set byte if parity even (PF == 1)".
 30334  //
 30335  // Mnemonic        : SETPE
 30336  // Supported forms : (2 forms)
 30337  //
 30338  //    * SETPE r8
 30339  //    * SETPE m8
 30340  //
 30341  func (self *Program) SETPE(v0 interface{}) *Instruction {
 30342      p := self.alloc("SETPE", 1, Operands { v0 })
 30343      // SETPE r8
 30344      if isReg8(v0) {
 30345          p.domain = DomainGeneric
 30346          p.add(0, func(m *_Encoding, v []interface{}) {
 30347              m.rexo(0, v[0], isReg8REX(v[0]))
 30348              m.emit(0x0f)
 30349              m.emit(0x9a)
 30350              m.emit(0xc0 | lcode(v[0]))
 30351          })
 30352      }
 30353      // SETPE m8
 30354      if isM8(v0) {
 30355          p.domain = DomainGeneric
 30356          p.add(0, func(m *_Encoding, v []interface{}) {
 30357              m.rexo(0, addr(v[0]), false)
 30358              m.emit(0x0f)
 30359              m.emit(0x9a)
 30360              m.mrsd(0, addr(v[0]), 1)
 30361          })
 30362      }
 30363      if p.len == 0 {
 30364          panic("invalid operands for SETPE")
 30365      }
 30366      return p
 30367  }
 30368  
 30369  // SETPO performs "Set byte if parity odd (PF == 0)".
 30370  //
 30371  // Mnemonic        : SETPO
 30372  // Supported forms : (2 forms)
 30373  //
 30374  //    * SETPO r8
 30375  //    * SETPO m8
 30376  //
 30377  func (self *Program) SETPO(v0 interface{}) *Instruction {
 30378      p := self.alloc("SETPO", 1, Operands { v0 })
 30379      // SETPO r8
 30380      if isReg8(v0) {
 30381          p.domain = DomainGeneric
 30382          p.add(0, func(m *_Encoding, v []interface{}) {
 30383              m.rexo(0, v[0], isReg8REX(v[0]))
 30384              m.emit(0x0f)
 30385              m.emit(0x9b)
 30386              m.emit(0xc0 | lcode(v[0]))
 30387          })
 30388      }
 30389      // SETPO m8
 30390      if isM8(v0) {
 30391          p.domain = DomainGeneric
 30392          p.add(0, func(m *_Encoding, v []interface{}) {
 30393              m.rexo(0, addr(v[0]), false)
 30394              m.emit(0x0f)
 30395              m.emit(0x9b)
 30396              m.mrsd(0, addr(v[0]), 1)
 30397          })
 30398      }
 30399      if p.len == 0 {
 30400          panic("invalid operands for SETPO")
 30401      }
 30402      return p
 30403  }
 30404  
 30405  // SETS performs "Set byte if sign (SF == 1)".
 30406  //
 30407  // Mnemonic        : SETS
 30408  // Supported forms : (2 forms)
 30409  //
 30410  //    * SETS r8
 30411  //    * SETS m8
 30412  //
 30413  func (self *Program) SETS(v0 interface{}) *Instruction {
 30414      p := self.alloc("SETS", 1, Operands { v0 })
 30415      // SETS r8
 30416      if isReg8(v0) {
 30417          p.domain = DomainGeneric
 30418          p.add(0, func(m *_Encoding, v []interface{}) {
 30419              m.rexo(0, v[0], isReg8REX(v[0]))
 30420              m.emit(0x0f)
 30421              m.emit(0x98)
 30422              m.emit(0xc0 | lcode(v[0]))
 30423          })
 30424      }
 30425      // SETS m8
 30426      if isM8(v0) {
 30427          p.domain = DomainGeneric
 30428          p.add(0, func(m *_Encoding, v []interface{}) {
 30429              m.rexo(0, addr(v[0]), false)
 30430              m.emit(0x0f)
 30431              m.emit(0x98)
 30432              m.mrsd(0, addr(v[0]), 1)
 30433          })
 30434      }
 30435      if p.len == 0 {
 30436          panic("invalid operands for SETS")
 30437      }
 30438      return p
 30439  }
 30440  
 30441  // SETZ performs "Set byte if zero (ZF == 1)".
 30442  //
 30443  // Mnemonic        : SETZ
 30444  // Supported forms : (2 forms)
 30445  //
 30446  //    * SETZ r8
 30447  //    * SETZ m8
 30448  //
 30449  func (self *Program) SETZ(v0 interface{}) *Instruction {
 30450      p := self.alloc("SETZ", 1, Operands { v0 })
 30451      // SETZ r8
 30452      if isReg8(v0) {
 30453          p.domain = DomainGeneric
 30454          p.add(0, func(m *_Encoding, v []interface{}) {
 30455              m.rexo(0, v[0], isReg8REX(v[0]))
 30456              m.emit(0x0f)
 30457              m.emit(0x94)
 30458              m.emit(0xc0 | lcode(v[0]))
 30459          })
 30460      }
 30461      // SETZ m8
 30462      if isM8(v0) {
 30463          p.domain = DomainGeneric
 30464          p.add(0, func(m *_Encoding, v []interface{}) {
 30465              m.rexo(0, addr(v[0]), false)
 30466              m.emit(0x0f)
 30467              m.emit(0x94)
 30468              m.mrsd(0, addr(v[0]), 1)
 30469          })
 30470      }
 30471      if p.len == 0 {
 30472          panic("invalid operands for SETZ")
 30473      }
 30474      return p
 30475  }
 30476  
 30477  // SFENCE performs "Store Fence".
 30478  //
 30479  // Mnemonic        : SFENCE
 30480  // Supported forms : (1 form)
 30481  //
 30482  //    * SFENCE    [MMX+]
 30483  //
 30484  func (self *Program) SFENCE() *Instruction {
 30485      p := self.alloc("SFENCE", 0, Operands {  })
 30486      // SFENCE
 30487      self.require(ISA_MMX_PLUS)
 30488      p.domain = DomainGeneric
 30489      p.add(0, func(m *_Encoding, v []interface{}) {
 30490          m.emit(0x0f)
 30491          m.emit(0xae)
 30492          m.emit(0xf8)
 30493      })
 30494      return p
 30495  }
 30496  
 30497  // SHA1MSG1 performs "Perform an Intermediate Calculation for the Next Four SHA1 Message Doublewords".
 30498  //
 30499  // Mnemonic        : SHA1MSG1
 30500  // Supported forms : (2 forms)
 30501  //
 30502  //    * SHA1MSG1 xmm, xmm     [SHA]
 30503  //    * SHA1MSG1 m128, xmm    [SHA]
 30504  //
 30505  func (self *Program) SHA1MSG1(v0 interface{}, v1 interface{}) *Instruction {
 30506      p := self.alloc("SHA1MSG1", 2, Operands { v0, v1 })
 30507      // SHA1MSG1 xmm, xmm
 30508      if isXMM(v0) && isXMM(v1) {
 30509          self.require(ISA_SHA)
 30510          p.domain = DomainCrypto
 30511          p.add(0, func(m *_Encoding, v []interface{}) {
 30512              m.rexo(hcode(v[1]), v[0], false)
 30513              m.emit(0x0f)
 30514              m.emit(0x38)
 30515              m.emit(0xc9)
 30516              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 30517          })
 30518      }
 30519      // SHA1MSG1 m128, xmm
 30520      if isM128(v0) && isXMM(v1) {
 30521          self.require(ISA_SHA)
 30522          p.domain = DomainCrypto
 30523          p.add(0, func(m *_Encoding, v []interface{}) {
 30524              m.rexo(hcode(v[1]), addr(v[0]), false)
 30525              m.emit(0x0f)
 30526              m.emit(0x38)
 30527              m.emit(0xc9)
 30528              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 30529          })
 30530      }
 30531      if p.len == 0 {
 30532          panic("invalid operands for SHA1MSG1")
 30533      }
 30534      return p
 30535  }
 30536  
 30537  // SHA1MSG2 performs "Perform a Final Calculation for the Next Four SHA1 Message Doublewords".
 30538  //
 30539  // Mnemonic        : SHA1MSG2
 30540  // Supported forms : (2 forms)
 30541  //
 30542  //    * SHA1MSG2 xmm, xmm     [SHA]
 30543  //    * SHA1MSG2 m128, xmm    [SHA]
 30544  //
 30545  func (self *Program) SHA1MSG2(v0 interface{}, v1 interface{}) *Instruction {
 30546      p := self.alloc("SHA1MSG2", 2, Operands { v0, v1 })
 30547      // SHA1MSG2 xmm, xmm
 30548      if isXMM(v0) && isXMM(v1) {
 30549          self.require(ISA_SHA)
 30550          p.domain = DomainCrypto
 30551          p.add(0, func(m *_Encoding, v []interface{}) {
 30552              m.rexo(hcode(v[1]), v[0], false)
 30553              m.emit(0x0f)
 30554              m.emit(0x38)
 30555              m.emit(0xca)
 30556              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 30557          })
 30558      }
 30559      // SHA1MSG2 m128, xmm
 30560      if isM128(v0) && isXMM(v1) {
 30561          self.require(ISA_SHA)
 30562          p.domain = DomainCrypto
 30563          p.add(0, func(m *_Encoding, v []interface{}) {
 30564              m.rexo(hcode(v[1]), addr(v[0]), false)
 30565              m.emit(0x0f)
 30566              m.emit(0x38)
 30567              m.emit(0xca)
 30568              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 30569          })
 30570      }
 30571      if p.len == 0 {
 30572          panic("invalid operands for SHA1MSG2")
 30573      }
 30574      return p
 30575  }
 30576  
 30577  // SHA1NEXTE performs "Calculate SHA1 State Variable E after Four Rounds".
 30578  //
 30579  // Mnemonic        : SHA1NEXTE
 30580  // Supported forms : (2 forms)
 30581  //
 30582  //    * SHA1NEXTE xmm, xmm     [SHA]
 30583  //    * SHA1NEXTE m128, xmm    [SHA]
 30584  //
 30585  func (self *Program) SHA1NEXTE(v0 interface{}, v1 interface{}) *Instruction {
 30586      p := self.alloc("SHA1NEXTE", 2, Operands { v0, v1 })
 30587      // SHA1NEXTE xmm, xmm
 30588      if isXMM(v0) && isXMM(v1) {
 30589          self.require(ISA_SHA)
 30590          p.domain = DomainCrypto
 30591          p.add(0, func(m *_Encoding, v []interface{}) {
 30592              m.rexo(hcode(v[1]), v[0], false)
 30593              m.emit(0x0f)
 30594              m.emit(0x38)
 30595              m.emit(0xc8)
 30596              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 30597          })
 30598      }
 30599      // SHA1NEXTE m128, xmm
 30600      if isM128(v0) && isXMM(v1) {
 30601          self.require(ISA_SHA)
 30602          p.domain = DomainCrypto
 30603          p.add(0, func(m *_Encoding, v []interface{}) {
 30604              m.rexo(hcode(v[1]), addr(v[0]), false)
 30605              m.emit(0x0f)
 30606              m.emit(0x38)
 30607              m.emit(0xc8)
 30608              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 30609          })
 30610      }
 30611      if p.len == 0 {
 30612          panic("invalid operands for SHA1NEXTE")
 30613      }
 30614      return p
 30615  }
 30616  
 30617  // SHA1RNDS4 performs "Perform Four Rounds of SHA1 Operation".
 30618  //
 30619  // Mnemonic        : SHA1RNDS4
 30620  // Supported forms : (2 forms)
 30621  //
 30622  //    * SHA1RNDS4 imm8, xmm, xmm     [SHA]
 30623  //    * SHA1RNDS4 imm8, m128, xmm    [SHA]
 30624  //
 30625  func (self *Program) SHA1RNDS4(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 30626      p := self.alloc("SHA1RNDS4", 3, Operands { v0, v1, v2 })
 30627      // SHA1RNDS4 imm8, xmm, xmm
 30628      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 30629          self.require(ISA_SHA)
 30630          p.domain = DomainCrypto
 30631          p.add(0, func(m *_Encoding, v []interface{}) {
 30632              m.rexo(hcode(v[2]), v[1], false)
 30633              m.emit(0x0f)
 30634              m.emit(0x3a)
 30635              m.emit(0xcc)
 30636              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 30637              m.imm1(toImmAny(v[0]))
 30638          })
 30639      }
 30640      // SHA1RNDS4 imm8, m128, xmm
 30641      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 30642          self.require(ISA_SHA)
 30643          p.domain = DomainCrypto
 30644          p.add(0, func(m *_Encoding, v []interface{}) {
 30645              m.rexo(hcode(v[2]), addr(v[1]), false)
 30646              m.emit(0x0f)
 30647              m.emit(0x3a)
 30648              m.emit(0xcc)
 30649              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 30650              m.imm1(toImmAny(v[0]))
 30651          })
 30652      }
 30653      if p.len == 0 {
 30654          panic("invalid operands for SHA1RNDS4")
 30655      }
 30656      return p
 30657  }
 30658  
 30659  // SHA256MSG1 performs "Perform an Intermediate Calculation for the Next Four SHA256 Message Doublewords".
 30660  //
 30661  // Mnemonic        : SHA256MSG1
 30662  // Supported forms : (2 forms)
 30663  //
 30664  //    * SHA256MSG1 xmm, xmm     [SHA]
 30665  //    * SHA256MSG1 m128, xmm    [SHA]
 30666  //
 30667  func (self *Program) SHA256MSG1(v0 interface{}, v1 interface{}) *Instruction {
 30668      p := self.alloc("SHA256MSG1", 2, Operands { v0, v1 })
 30669      // SHA256MSG1 xmm, xmm
 30670      if isXMM(v0) && isXMM(v1) {
 30671          self.require(ISA_SHA)
 30672          p.domain = DomainCrypto
 30673          p.add(0, func(m *_Encoding, v []interface{}) {
 30674              m.rexo(hcode(v[1]), v[0], false)
 30675              m.emit(0x0f)
 30676              m.emit(0x38)
 30677              m.emit(0xcc)
 30678              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 30679          })
 30680      }
 30681      // SHA256MSG1 m128, xmm
 30682      if isM128(v0) && isXMM(v1) {
 30683          self.require(ISA_SHA)
 30684          p.domain = DomainCrypto
 30685          p.add(0, func(m *_Encoding, v []interface{}) {
 30686              m.rexo(hcode(v[1]), addr(v[0]), false)
 30687              m.emit(0x0f)
 30688              m.emit(0x38)
 30689              m.emit(0xcc)
 30690              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 30691          })
 30692      }
 30693      if p.len == 0 {
 30694          panic("invalid operands for SHA256MSG1")
 30695      }
 30696      return p
 30697  }
 30698  
 30699  // SHA256MSG2 performs "Perform a Final Calculation for the Next Four SHA256 Message Doublewords".
 30700  //
 30701  // Mnemonic        : SHA256MSG2
 30702  // Supported forms : (2 forms)
 30703  //
 30704  //    * SHA256MSG2 xmm, xmm     [SHA]
 30705  //    * SHA256MSG2 m128, xmm    [SHA]
 30706  //
 30707  func (self *Program) SHA256MSG2(v0 interface{}, v1 interface{}) *Instruction {
 30708      p := self.alloc("SHA256MSG2", 2, Operands { v0, v1 })
 30709      // SHA256MSG2 xmm, xmm
 30710      if isXMM(v0) && isXMM(v1) {
 30711          self.require(ISA_SHA)
 30712          p.domain = DomainCrypto
 30713          p.add(0, func(m *_Encoding, v []interface{}) {
 30714              m.rexo(hcode(v[1]), v[0], false)
 30715              m.emit(0x0f)
 30716              m.emit(0x38)
 30717              m.emit(0xcd)
 30718              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 30719          })
 30720      }
 30721      // SHA256MSG2 m128, xmm
 30722      if isM128(v0) && isXMM(v1) {
 30723          self.require(ISA_SHA)
 30724          p.domain = DomainCrypto
 30725          p.add(0, func(m *_Encoding, v []interface{}) {
 30726              m.rexo(hcode(v[1]), addr(v[0]), false)
 30727              m.emit(0x0f)
 30728              m.emit(0x38)
 30729              m.emit(0xcd)
 30730              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 30731          })
 30732      }
 30733      if p.len == 0 {
 30734          panic("invalid operands for SHA256MSG2")
 30735      }
 30736      return p
 30737  }
 30738  
 30739  // SHA256RNDS2 performs "Perform Two Rounds of SHA256 Operation".
 30740  //
 30741  // Mnemonic        : SHA256RNDS2
 30742  // Supported forms : (2 forms)
 30743  //
 30744  //    * SHA256RNDS2 xmm0, xmm, xmm     [SHA]
 30745  //    * SHA256RNDS2 xmm0, m128, xmm    [SHA]
 30746  //
 30747  func (self *Program) SHA256RNDS2(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 30748      p := self.alloc("SHA256RNDS2", 3, Operands { v0, v1, v2 })
 30749      // SHA256RNDS2 xmm0, xmm, xmm
 30750      if v0 == XMM0 && isXMM(v1) && isXMM(v2) {
 30751          self.require(ISA_SHA)
 30752          p.domain = DomainCrypto
 30753          p.add(0, func(m *_Encoding, v []interface{}) {
 30754              m.rexo(hcode(v[2]), v[1], false)
 30755              m.emit(0x0f)
 30756              m.emit(0x38)
 30757              m.emit(0xcb)
 30758              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 30759          })
 30760      }
 30761      // SHA256RNDS2 xmm0, m128, xmm
 30762      if v0 == XMM0 && isM128(v1) && isXMM(v2) {
 30763          self.require(ISA_SHA)
 30764          p.domain = DomainCrypto
 30765          p.add(0, func(m *_Encoding, v []interface{}) {
 30766              m.rexo(hcode(v[2]), addr(v[1]), false)
 30767              m.emit(0x0f)
 30768              m.emit(0x38)
 30769              m.emit(0xcb)
 30770              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 30771          })
 30772      }
 30773      if p.len == 0 {
 30774          panic("invalid operands for SHA256RNDS2")
 30775      }
 30776      return p
 30777  }
 30778  
 30779  // SHLB performs "Logical Shift Left".
 30780  //
 30781  // Mnemonic        : SHL
 30782  // Supported forms : (6 forms)
 30783  //
 30784  //    * SHLB 1, r8
 30785  //    * SHLB imm8, r8
 30786  //    * SHLB cl, r8
 30787  //    * SHLB 1, m8
 30788  //    * SHLB imm8, m8
 30789  //    * SHLB cl, m8
 30790  //
 30791  func (self *Program) SHLB(v0 interface{}, v1 interface{}) *Instruction {
 30792      p := self.alloc("SHLB", 2, Operands { v0, v1 })
 30793      // SHLB 1, r8
 30794      if isConst1(v0) && isReg8(v1) {
 30795          p.domain = DomainGeneric
 30796          p.add(0, func(m *_Encoding, v []interface{}) {
 30797              m.rexo(0, v[1], isReg8REX(v[1]))
 30798              m.emit(0xd0)
 30799              m.emit(0xe0 | lcode(v[1]))
 30800          })
 30801      }
 30802      // SHLB imm8, r8
 30803      if isImm8(v0) && isReg8(v1) {
 30804          p.domain = DomainGeneric
 30805          p.add(0, func(m *_Encoding, v []interface{}) {
 30806              m.rexo(0, v[1], isReg8REX(v[1]))
 30807              m.emit(0xc0)
 30808              m.emit(0xe0 | lcode(v[1]))
 30809              m.imm1(toImmAny(v[0]))
 30810          })
 30811      }
 30812      // SHLB cl, r8
 30813      if v0 == CL && isReg8(v1) {
 30814          p.domain = DomainGeneric
 30815          p.add(0, func(m *_Encoding, v []interface{}) {
 30816              m.rexo(0, v[1], isReg8REX(v[1]))
 30817              m.emit(0xd2)
 30818              m.emit(0xe0 | lcode(v[1]))
 30819          })
 30820      }
 30821      // SHLB 1, m8
 30822      if isConst1(v0) && isM8(v1) {
 30823          p.domain = DomainGeneric
 30824          p.add(0, func(m *_Encoding, v []interface{}) {
 30825              m.rexo(0, addr(v[1]), false)
 30826              m.emit(0xd0)
 30827              m.mrsd(4, addr(v[1]), 1)
 30828          })
 30829      }
 30830      // SHLB imm8, m8
 30831      if isImm8(v0) && isM8(v1) {
 30832          p.domain = DomainGeneric
 30833          p.add(0, func(m *_Encoding, v []interface{}) {
 30834              m.rexo(0, addr(v[1]), false)
 30835              m.emit(0xc0)
 30836              m.mrsd(4, addr(v[1]), 1)
 30837              m.imm1(toImmAny(v[0]))
 30838          })
 30839      }
 30840      // SHLB cl, m8
 30841      if v0 == CL && isM8(v1) {
 30842          p.domain = DomainGeneric
 30843          p.add(0, func(m *_Encoding, v []interface{}) {
 30844              m.rexo(0, addr(v[1]), false)
 30845              m.emit(0xd2)
 30846              m.mrsd(4, addr(v[1]), 1)
 30847          })
 30848      }
 30849      if p.len == 0 {
 30850          panic("invalid operands for SHLB")
 30851      }
 30852      return p
 30853  }
 30854  
 30855  // SHLDL performs "Integer Double Precision Shift Left".
 30856  //
 30857  // Mnemonic        : SHLD
 30858  // Supported forms : (4 forms)
 30859  //
 30860  //    * SHLDL imm8, r32, r32
 30861  //    * SHLDL cl, r32, r32
 30862  //    * SHLDL imm8, r32, m32
 30863  //    * SHLDL cl, r32, m32
 30864  //
 30865  func (self *Program) SHLDL(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 30866      p := self.alloc("SHLDL", 3, Operands { v0, v1, v2 })
 30867      // SHLDL imm8, r32, r32
 30868      if isImm8(v0) && isReg32(v1) && isReg32(v2) {
 30869          p.domain = DomainGeneric
 30870          p.add(0, func(m *_Encoding, v []interface{}) {
 30871              m.rexo(hcode(v[1]), v[2], false)
 30872              m.emit(0x0f)
 30873              m.emit(0xa4)
 30874              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 30875              m.imm1(toImmAny(v[0]))
 30876          })
 30877      }
 30878      // SHLDL cl, r32, r32
 30879      if v0 == CL && isReg32(v1) && isReg32(v2) {
 30880          p.domain = DomainGeneric
 30881          p.add(0, func(m *_Encoding, v []interface{}) {
 30882              m.rexo(hcode(v[1]), v[2], false)
 30883              m.emit(0x0f)
 30884              m.emit(0xa5)
 30885              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 30886          })
 30887      }
 30888      // SHLDL imm8, r32, m32
 30889      if isImm8(v0) && isReg32(v1) && isM32(v2) {
 30890          p.domain = DomainGeneric
 30891          p.add(0, func(m *_Encoding, v []interface{}) {
 30892              m.rexo(hcode(v[1]), addr(v[2]), false)
 30893              m.emit(0x0f)
 30894              m.emit(0xa4)
 30895              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 30896              m.imm1(toImmAny(v[0]))
 30897          })
 30898      }
 30899      // SHLDL cl, r32, m32
 30900      if v0 == CL && isReg32(v1) && isM32(v2) {
 30901          p.domain = DomainGeneric
 30902          p.add(0, func(m *_Encoding, v []interface{}) {
 30903              m.rexo(hcode(v[1]), addr(v[2]), false)
 30904              m.emit(0x0f)
 30905              m.emit(0xa5)
 30906              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 30907          })
 30908      }
 30909      if p.len == 0 {
 30910          panic("invalid operands for SHLDL")
 30911      }
 30912      return p
 30913  }
 30914  
 30915  // SHLDQ performs "Integer Double Precision Shift Left".
 30916  //
 30917  // Mnemonic        : SHLD
 30918  // Supported forms : (4 forms)
 30919  //
 30920  //    * SHLDQ imm8, r64, r64
 30921  //    * SHLDQ cl, r64, r64
 30922  //    * SHLDQ imm8, r64, m64
 30923  //    * SHLDQ cl, r64, m64
 30924  //
 30925  func (self *Program) SHLDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 30926      p := self.alloc("SHLDQ", 3, Operands { v0, v1, v2 })
 30927      // SHLDQ imm8, r64, r64
 30928      if isImm8(v0) && isReg64(v1) && isReg64(v2) {
 30929          p.domain = DomainGeneric
 30930          p.add(0, func(m *_Encoding, v []interface{}) {
 30931              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[2]))
 30932              m.emit(0x0f)
 30933              m.emit(0xa4)
 30934              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 30935              m.imm1(toImmAny(v[0]))
 30936          })
 30937      }
 30938      // SHLDQ cl, r64, r64
 30939      if v0 == CL && isReg64(v1) && isReg64(v2) {
 30940          p.domain = DomainGeneric
 30941          p.add(0, func(m *_Encoding, v []interface{}) {
 30942              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[2]))
 30943              m.emit(0x0f)
 30944              m.emit(0xa5)
 30945              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 30946          })
 30947      }
 30948      // SHLDQ imm8, r64, m64
 30949      if isImm8(v0) && isReg64(v1) && isM64(v2) {
 30950          p.domain = DomainGeneric
 30951          p.add(0, func(m *_Encoding, v []interface{}) {
 30952              m.rexm(1, hcode(v[1]), addr(v[2]))
 30953              m.emit(0x0f)
 30954              m.emit(0xa4)
 30955              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 30956              m.imm1(toImmAny(v[0]))
 30957          })
 30958      }
 30959      // SHLDQ cl, r64, m64
 30960      if v0 == CL && isReg64(v1) && isM64(v2) {
 30961          p.domain = DomainGeneric
 30962          p.add(0, func(m *_Encoding, v []interface{}) {
 30963              m.rexm(1, hcode(v[1]), addr(v[2]))
 30964              m.emit(0x0f)
 30965              m.emit(0xa5)
 30966              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 30967          })
 30968      }
 30969      if p.len == 0 {
 30970          panic("invalid operands for SHLDQ")
 30971      }
 30972      return p
 30973  }
 30974  
 30975  // SHLDW performs "Integer Double Precision Shift Left".
 30976  //
 30977  // Mnemonic        : SHLD
 30978  // Supported forms : (4 forms)
 30979  //
 30980  //    * SHLDW imm8, r16, r16
 30981  //    * SHLDW cl, r16, r16
 30982  //    * SHLDW imm8, r16, m16
 30983  //    * SHLDW cl, r16, m16
 30984  //
 30985  func (self *Program) SHLDW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 30986      p := self.alloc("SHLDW", 3, Operands { v0, v1, v2 })
 30987      // SHLDW imm8, r16, r16
 30988      if isImm8(v0) && isReg16(v1) && isReg16(v2) {
 30989          p.domain = DomainGeneric
 30990          p.add(0, func(m *_Encoding, v []interface{}) {
 30991              m.emit(0x66)
 30992              m.rexo(hcode(v[1]), v[2], false)
 30993              m.emit(0x0f)
 30994              m.emit(0xa4)
 30995              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 30996              m.imm1(toImmAny(v[0]))
 30997          })
 30998      }
 30999      // SHLDW cl, r16, r16
 31000      if v0 == CL && isReg16(v1) && isReg16(v2) {
 31001          p.domain = DomainGeneric
 31002          p.add(0, func(m *_Encoding, v []interface{}) {
 31003              m.emit(0x66)
 31004              m.rexo(hcode(v[1]), v[2], false)
 31005              m.emit(0x0f)
 31006              m.emit(0xa5)
 31007              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 31008          })
 31009      }
 31010      // SHLDW imm8, r16, m16
 31011      if isImm8(v0) && isReg16(v1) && isM16(v2) {
 31012          p.domain = DomainGeneric
 31013          p.add(0, func(m *_Encoding, v []interface{}) {
 31014              m.emit(0x66)
 31015              m.rexo(hcode(v[1]), addr(v[2]), false)
 31016              m.emit(0x0f)
 31017              m.emit(0xa4)
 31018              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 31019              m.imm1(toImmAny(v[0]))
 31020          })
 31021      }
 31022      // SHLDW cl, r16, m16
 31023      if v0 == CL && isReg16(v1) && isM16(v2) {
 31024          p.domain = DomainGeneric
 31025          p.add(0, func(m *_Encoding, v []interface{}) {
 31026              m.emit(0x66)
 31027              m.rexo(hcode(v[1]), addr(v[2]), false)
 31028              m.emit(0x0f)
 31029              m.emit(0xa5)
 31030              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 31031          })
 31032      }
 31033      if p.len == 0 {
 31034          panic("invalid operands for SHLDW")
 31035      }
 31036      return p
 31037  }
 31038  
 31039  // SHLL performs "Logical Shift Left".
 31040  //
 31041  // Mnemonic        : SHL
 31042  // Supported forms : (6 forms)
 31043  //
 31044  //    * SHLL 1, r32
 31045  //    * SHLL imm8, r32
 31046  //    * SHLL cl, r32
 31047  //    * SHLL 1, m32
 31048  //    * SHLL imm8, m32
 31049  //    * SHLL cl, m32
 31050  //
 31051  func (self *Program) SHLL(v0 interface{}, v1 interface{}) *Instruction {
 31052      p := self.alloc("SHLL", 2, Operands { v0, v1 })
 31053      // SHLL 1, r32
 31054      if isConst1(v0) && isReg32(v1) {
 31055          p.domain = DomainGeneric
 31056          p.add(0, func(m *_Encoding, v []interface{}) {
 31057              m.rexo(0, v[1], false)
 31058              m.emit(0xd1)
 31059              m.emit(0xe0 | lcode(v[1]))
 31060          })
 31061      }
 31062      // SHLL imm8, r32
 31063      if isImm8(v0) && isReg32(v1) {
 31064          p.domain = DomainGeneric
 31065          p.add(0, func(m *_Encoding, v []interface{}) {
 31066              m.rexo(0, v[1], false)
 31067              m.emit(0xc1)
 31068              m.emit(0xe0 | lcode(v[1]))
 31069              m.imm1(toImmAny(v[0]))
 31070          })
 31071      }
 31072      // SHLL cl, r32
 31073      if v0 == CL && isReg32(v1) {
 31074          p.domain = DomainGeneric
 31075          p.add(0, func(m *_Encoding, v []interface{}) {
 31076              m.rexo(0, v[1], false)
 31077              m.emit(0xd3)
 31078              m.emit(0xe0 | lcode(v[1]))
 31079          })
 31080      }
 31081      // SHLL 1, m32
 31082      if isConst1(v0) && isM32(v1) {
 31083          p.domain = DomainGeneric
 31084          p.add(0, func(m *_Encoding, v []interface{}) {
 31085              m.rexo(0, addr(v[1]), false)
 31086              m.emit(0xd1)
 31087              m.mrsd(4, addr(v[1]), 1)
 31088          })
 31089      }
 31090      // SHLL imm8, m32
 31091      if isImm8(v0) && isM32(v1) {
 31092          p.domain = DomainGeneric
 31093          p.add(0, func(m *_Encoding, v []interface{}) {
 31094              m.rexo(0, addr(v[1]), false)
 31095              m.emit(0xc1)
 31096              m.mrsd(4, addr(v[1]), 1)
 31097              m.imm1(toImmAny(v[0]))
 31098          })
 31099      }
 31100      // SHLL cl, m32
 31101      if v0 == CL && isM32(v1) {
 31102          p.domain = DomainGeneric
 31103          p.add(0, func(m *_Encoding, v []interface{}) {
 31104              m.rexo(0, addr(v[1]), false)
 31105              m.emit(0xd3)
 31106              m.mrsd(4, addr(v[1]), 1)
 31107          })
 31108      }
 31109      if p.len == 0 {
 31110          panic("invalid operands for SHLL")
 31111      }
 31112      return p
 31113  }
 31114  
 31115  // SHLQ performs "Logical Shift Left".
 31116  //
 31117  // Mnemonic        : SHL
 31118  // Supported forms : (6 forms)
 31119  //
 31120  //    * SHLQ 1, r64
 31121  //    * SHLQ imm8, r64
 31122  //    * SHLQ cl, r64
 31123  //    * SHLQ 1, m64
 31124  //    * SHLQ imm8, m64
 31125  //    * SHLQ cl, m64
 31126  //
 31127  func (self *Program) SHLQ(v0 interface{}, v1 interface{}) *Instruction {
 31128      p := self.alloc("SHLQ", 2, Operands { v0, v1 })
 31129      // SHLQ 1, r64
 31130      if isConst1(v0) && isReg64(v1) {
 31131          p.domain = DomainGeneric
 31132          p.add(0, func(m *_Encoding, v []interface{}) {
 31133              m.emit(0x48 | hcode(v[1]))
 31134              m.emit(0xd1)
 31135              m.emit(0xe0 | lcode(v[1]))
 31136          })
 31137      }
 31138      // SHLQ imm8, r64
 31139      if isImm8(v0) && isReg64(v1) {
 31140          p.domain = DomainGeneric
 31141          p.add(0, func(m *_Encoding, v []interface{}) {
 31142              m.emit(0x48 | hcode(v[1]))
 31143              m.emit(0xc1)
 31144              m.emit(0xe0 | lcode(v[1]))
 31145              m.imm1(toImmAny(v[0]))
 31146          })
 31147      }
 31148      // SHLQ cl, r64
 31149      if v0 == CL && isReg64(v1) {
 31150          p.domain = DomainGeneric
 31151          p.add(0, func(m *_Encoding, v []interface{}) {
 31152              m.emit(0x48 | hcode(v[1]))
 31153              m.emit(0xd3)
 31154              m.emit(0xe0 | lcode(v[1]))
 31155          })
 31156      }
 31157      // SHLQ 1, m64
 31158      if isConst1(v0) && isM64(v1) {
 31159          p.domain = DomainGeneric
 31160          p.add(0, func(m *_Encoding, v []interface{}) {
 31161              m.rexm(1, 0, addr(v[1]))
 31162              m.emit(0xd1)
 31163              m.mrsd(4, addr(v[1]), 1)
 31164          })
 31165      }
 31166      // SHLQ imm8, m64
 31167      if isImm8(v0) && isM64(v1) {
 31168          p.domain = DomainGeneric
 31169          p.add(0, func(m *_Encoding, v []interface{}) {
 31170              m.rexm(1, 0, addr(v[1]))
 31171              m.emit(0xc1)
 31172              m.mrsd(4, addr(v[1]), 1)
 31173              m.imm1(toImmAny(v[0]))
 31174          })
 31175      }
 31176      // SHLQ cl, m64
 31177      if v0 == CL && isM64(v1) {
 31178          p.domain = DomainGeneric
 31179          p.add(0, func(m *_Encoding, v []interface{}) {
 31180              m.rexm(1, 0, addr(v[1]))
 31181              m.emit(0xd3)
 31182              m.mrsd(4, addr(v[1]), 1)
 31183          })
 31184      }
 31185      if p.len == 0 {
 31186          panic("invalid operands for SHLQ")
 31187      }
 31188      return p
 31189  }
 31190  
 31191  // SHLW performs "Logical Shift Left".
 31192  //
 31193  // Mnemonic        : SHL
 31194  // Supported forms : (6 forms)
 31195  //
 31196  //    * SHLW 1, r16
 31197  //    * SHLW imm8, r16
 31198  //    * SHLW cl, r16
 31199  //    * SHLW 1, m16
 31200  //    * SHLW imm8, m16
 31201  //    * SHLW cl, m16
 31202  //
 31203  func (self *Program) SHLW(v0 interface{}, v1 interface{}) *Instruction {
 31204      p := self.alloc("SHLW", 2, Operands { v0, v1 })
 31205      // SHLW 1, r16
 31206      if isConst1(v0) && isReg16(v1) {
 31207          p.domain = DomainGeneric
 31208          p.add(0, func(m *_Encoding, v []interface{}) {
 31209              m.emit(0x66)
 31210              m.rexo(0, v[1], false)
 31211              m.emit(0xd1)
 31212              m.emit(0xe0 | lcode(v[1]))
 31213          })
 31214      }
 31215      // SHLW imm8, r16
 31216      if isImm8(v0) && isReg16(v1) {
 31217          p.domain = DomainGeneric
 31218          p.add(0, func(m *_Encoding, v []interface{}) {
 31219              m.emit(0x66)
 31220              m.rexo(0, v[1], false)
 31221              m.emit(0xc1)
 31222              m.emit(0xe0 | lcode(v[1]))
 31223              m.imm1(toImmAny(v[0]))
 31224          })
 31225      }
 31226      // SHLW cl, r16
 31227      if v0 == CL && isReg16(v1) {
 31228          p.domain = DomainGeneric
 31229          p.add(0, func(m *_Encoding, v []interface{}) {
 31230              m.emit(0x66)
 31231              m.rexo(0, v[1], false)
 31232              m.emit(0xd3)
 31233              m.emit(0xe0 | lcode(v[1]))
 31234          })
 31235      }
 31236      // SHLW 1, m16
 31237      if isConst1(v0) && isM16(v1) {
 31238          p.domain = DomainGeneric
 31239          p.add(0, func(m *_Encoding, v []interface{}) {
 31240              m.emit(0x66)
 31241              m.rexo(0, addr(v[1]), false)
 31242              m.emit(0xd1)
 31243              m.mrsd(4, addr(v[1]), 1)
 31244          })
 31245      }
 31246      // SHLW imm8, m16
 31247      if isImm8(v0) && isM16(v1) {
 31248          p.domain = DomainGeneric
 31249          p.add(0, func(m *_Encoding, v []interface{}) {
 31250              m.emit(0x66)
 31251              m.rexo(0, addr(v[1]), false)
 31252              m.emit(0xc1)
 31253              m.mrsd(4, addr(v[1]), 1)
 31254              m.imm1(toImmAny(v[0]))
 31255          })
 31256      }
 31257      // SHLW cl, m16
 31258      if v0 == CL && isM16(v1) {
 31259          p.domain = DomainGeneric
 31260          p.add(0, func(m *_Encoding, v []interface{}) {
 31261              m.emit(0x66)
 31262              m.rexo(0, addr(v[1]), false)
 31263              m.emit(0xd3)
 31264              m.mrsd(4, addr(v[1]), 1)
 31265          })
 31266      }
 31267      if p.len == 0 {
 31268          panic("invalid operands for SHLW")
 31269      }
 31270      return p
 31271  }
 31272  
 31273  // SHLXL performs "Logical Shift Left Without Affecting Flags".
 31274  //
 31275  // Mnemonic        : SHLX
 31276  // Supported forms : (2 forms)
 31277  //
 31278  //    * SHLXL r32, r32, r32    [BMI2]
 31279  //    * SHLXL r32, m32, r32    [BMI2]
 31280  //
 31281  func (self *Program) SHLXL(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 31282      p := self.alloc("SHLXL", 3, Operands { v0, v1, v2 })
 31283      // SHLXL r32, r32, r32
 31284      if isReg32(v0) && isReg32(v1) && isReg32(v2) {
 31285          self.require(ISA_BMI2)
 31286          p.domain = DomainGeneric
 31287          p.add(0, func(m *_Encoding, v []interface{}) {
 31288              m.emit(0xc4)
 31289              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 31290              m.emit(0x79 ^ (hlcode(v[0]) << 3))
 31291              m.emit(0xf7)
 31292              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 31293          })
 31294      }
 31295      // SHLXL r32, m32, r32
 31296      if isReg32(v0) && isM32(v1) && isReg32(v2) {
 31297          self.require(ISA_BMI2)
 31298          p.domain = DomainGeneric
 31299          p.add(0, func(m *_Encoding, v []interface{}) {
 31300              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 31301              m.emit(0xf7)
 31302              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 31303          })
 31304      }
 31305      if p.len == 0 {
 31306          panic("invalid operands for SHLXL")
 31307      }
 31308      return p
 31309  }
 31310  
 31311  // SHLXQ performs "Logical Shift Left Without Affecting Flags".
 31312  //
 31313  // Mnemonic        : SHLX
 31314  // Supported forms : (2 forms)
 31315  //
 31316  //    * SHLXQ r64, r64, r64    [BMI2]
 31317  //    * SHLXQ r64, m64, r64    [BMI2]
 31318  //
 31319  func (self *Program) SHLXQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 31320      p := self.alloc("SHLXQ", 3, Operands { v0, v1, v2 })
 31321      // SHLXQ r64, r64, r64
 31322      if isReg64(v0) && isReg64(v1) && isReg64(v2) {
 31323          self.require(ISA_BMI2)
 31324          p.domain = DomainGeneric
 31325          p.add(0, func(m *_Encoding, v []interface{}) {
 31326              m.emit(0xc4)
 31327              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 31328              m.emit(0xf9 ^ (hlcode(v[0]) << 3))
 31329              m.emit(0xf7)
 31330              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 31331          })
 31332      }
 31333      // SHLXQ r64, m64, r64
 31334      if isReg64(v0) && isM64(v1) && isReg64(v2) {
 31335          self.require(ISA_BMI2)
 31336          p.domain = DomainGeneric
 31337          p.add(0, func(m *_Encoding, v []interface{}) {
 31338              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 31339              m.emit(0xf7)
 31340              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 31341          })
 31342      }
 31343      if p.len == 0 {
 31344          panic("invalid operands for SHLXQ")
 31345      }
 31346      return p
 31347  }
 31348  
 31349  // SHRB performs "Logical Shift Right".
 31350  //
 31351  // Mnemonic        : SHR
 31352  // Supported forms : (6 forms)
 31353  //
 31354  //    * SHRB 1, r8
 31355  //    * SHRB imm8, r8
 31356  //    * SHRB cl, r8
 31357  //    * SHRB 1, m8
 31358  //    * SHRB imm8, m8
 31359  //    * SHRB cl, m8
 31360  //
 31361  func (self *Program) SHRB(v0 interface{}, v1 interface{}) *Instruction {
 31362      p := self.alloc("SHRB", 2, Operands { v0, v1 })
 31363      // SHRB 1, r8
 31364      if isConst1(v0) && isReg8(v1) {
 31365          p.domain = DomainGeneric
 31366          p.add(0, func(m *_Encoding, v []interface{}) {
 31367              m.rexo(0, v[1], isReg8REX(v[1]))
 31368              m.emit(0xd0)
 31369              m.emit(0xe8 | lcode(v[1]))
 31370          })
 31371      }
 31372      // SHRB imm8, r8
 31373      if isImm8(v0) && isReg8(v1) {
 31374          p.domain = DomainGeneric
 31375          p.add(0, func(m *_Encoding, v []interface{}) {
 31376              m.rexo(0, v[1], isReg8REX(v[1]))
 31377              m.emit(0xc0)
 31378              m.emit(0xe8 | lcode(v[1]))
 31379              m.imm1(toImmAny(v[0]))
 31380          })
 31381      }
 31382      // SHRB cl, r8
 31383      if v0 == CL && isReg8(v1) {
 31384          p.domain = DomainGeneric
 31385          p.add(0, func(m *_Encoding, v []interface{}) {
 31386              m.rexo(0, v[1], isReg8REX(v[1]))
 31387              m.emit(0xd2)
 31388              m.emit(0xe8 | lcode(v[1]))
 31389          })
 31390      }
 31391      // SHRB 1, m8
 31392      if isConst1(v0) && isM8(v1) {
 31393          p.domain = DomainGeneric
 31394          p.add(0, func(m *_Encoding, v []interface{}) {
 31395              m.rexo(0, addr(v[1]), false)
 31396              m.emit(0xd0)
 31397              m.mrsd(5, addr(v[1]), 1)
 31398          })
 31399      }
 31400      // SHRB imm8, m8
 31401      if isImm8(v0) && isM8(v1) {
 31402          p.domain = DomainGeneric
 31403          p.add(0, func(m *_Encoding, v []interface{}) {
 31404              m.rexo(0, addr(v[1]), false)
 31405              m.emit(0xc0)
 31406              m.mrsd(5, addr(v[1]), 1)
 31407              m.imm1(toImmAny(v[0]))
 31408          })
 31409      }
 31410      // SHRB cl, m8
 31411      if v0 == CL && isM8(v1) {
 31412          p.domain = DomainGeneric
 31413          p.add(0, func(m *_Encoding, v []interface{}) {
 31414              m.rexo(0, addr(v[1]), false)
 31415              m.emit(0xd2)
 31416              m.mrsd(5, addr(v[1]), 1)
 31417          })
 31418      }
 31419      if p.len == 0 {
 31420          panic("invalid operands for SHRB")
 31421      }
 31422      return p
 31423  }
 31424  
 31425  // SHRDL performs "Integer Double Precision Shift Right".
 31426  //
 31427  // Mnemonic        : SHRD
 31428  // Supported forms : (4 forms)
 31429  //
 31430  //    * SHRDL imm8, r32, r32
 31431  //    * SHRDL cl, r32, r32
 31432  //    * SHRDL imm8, r32, m32
 31433  //    * SHRDL cl, r32, m32
 31434  //
 31435  func (self *Program) SHRDL(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 31436      p := self.alloc("SHRDL", 3, Operands { v0, v1, v2 })
 31437      // SHRDL imm8, r32, r32
 31438      if isImm8(v0) && isReg32(v1) && isReg32(v2) {
 31439          p.domain = DomainGeneric
 31440          p.add(0, func(m *_Encoding, v []interface{}) {
 31441              m.rexo(hcode(v[1]), v[2], false)
 31442              m.emit(0x0f)
 31443              m.emit(0xac)
 31444              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 31445              m.imm1(toImmAny(v[0]))
 31446          })
 31447      }
 31448      // SHRDL cl, r32, r32
 31449      if v0 == CL && isReg32(v1) && isReg32(v2) {
 31450          p.domain = DomainGeneric
 31451          p.add(0, func(m *_Encoding, v []interface{}) {
 31452              m.rexo(hcode(v[1]), v[2], false)
 31453              m.emit(0x0f)
 31454              m.emit(0xad)
 31455              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 31456          })
 31457      }
 31458      // SHRDL imm8, r32, m32
 31459      if isImm8(v0) && isReg32(v1) && isM32(v2) {
 31460          p.domain = DomainGeneric
 31461          p.add(0, func(m *_Encoding, v []interface{}) {
 31462              m.rexo(hcode(v[1]), addr(v[2]), false)
 31463              m.emit(0x0f)
 31464              m.emit(0xac)
 31465              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 31466              m.imm1(toImmAny(v[0]))
 31467          })
 31468      }
 31469      // SHRDL cl, r32, m32
 31470      if v0 == CL && isReg32(v1) && isM32(v2) {
 31471          p.domain = DomainGeneric
 31472          p.add(0, func(m *_Encoding, v []interface{}) {
 31473              m.rexo(hcode(v[1]), addr(v[2]), false)
 31474              m.emit(0x0f)
 31475              m.emit(0xad)
 31476              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 31477          })
 31478      }
 31479      if p.len == 0 {
 31480          panic("invalid operands for SHRDL")
 31481      }
 31482      return p
 31483  }
 31484  
 31485  // SHRDQ performs "Integer Double Precision Shift Right".
 31486  //
 31487  // Mnemonic        : SHRD
 31488  // Supported forms : (4 forms)
 31489  //
 31490  //    * SHRDQ imm8, r64, r64
 31491  //    * SHRDQ cl, r64, r64
 31492  //    * SHRDQ imm8, r64, m64
 31493  //    * SHRDQ cl, r64, m64
 31494  //
 31495  func (self *Program) SHRDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 31496      p := self.alloc("SHRDQ", 3, Operands { v0, v1, v2 })
 31497      // SHRDQ imm8, r64, r64
 31498      if isImm8(v0) && isReg64(v1) && isReg64(v2) {
 31499          p.domain = DomainGeneric
 31500          p.add(0, func(m *_Encoding, v []interface{}) {
 31501              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[2]))
 31502              m.emit(0x0f)
 31503              m.emit(0xac)
 31504              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 31505              m.imm1(toImmAny(v[0]))
 31506          })
 31507      }
 31508      // SHRDQ cl, r64, r64
 31509      if v0 == CL && isReg64(v1) && isReg64(v2) {
 31510          p.domain = DomainGeneric
 31511          p.add(0, func(m *_Encoding, v []interface{}) {
 31512              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[2]))
 31513              m.emit(0x0f)
 31514              m.emit(0xad)
 31515              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 31516          })
 31517      }
 31518      // SHRDQ imm8, r64, m64
 31519      if isImm8(v0) && isReg64(v1) && isM64(v2) {
 31520          p.domain = DomainGeneric
 31521          p.add(0, func(m *_Encoding, v []interface{}) {
 31522              m.rexm(1, hcode(v[1]), addr(v[2]))
 31523              m.emit(0x0f)
 31524              m.emit(0xac)
 31525              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 31526              m.imm1(toImmAny(v[0]))
 31527          })
 31528      }
 31529      // SHRDQ cl, r64, m64
 31530      if v0 == CL && isReg64(v1) && isM64(v2) {
 31531          p.domain = DomainGeneric
 31532          p.add(0, func(m *_Encoding, v []interface{}) {
 31533              m.rexm(1, hcode(v[1]), addr(v[2]))
 31534              m.emit(0x0f)
 31535              m.emit(0xad)
 31536              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 31537          })
 31538      }
 31539      if p.len == 0 {
 31540          panic("invalid operands for SHRDQ")
 31541      }
 31542      return p
 31543  }
 31544  
 31545  // SHRDW performs "Integer Double Precision Shift Right".
 31546  //
 31547  // Mnemonic        : SHRD
 31548  // Supported forms : (4 forms)
 31549  //
 31550  //    * SHRDW imm8, r16, r16
 31551  //    * SHRDW cl, r16, r16
 31552  //    * SHRDW imm8, r16, m16
 31553  //    * SHRDW cl, r16, m16
 31554  //
 31555  func (self *Program) SHRDW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 31556      p := self.alloc("SHRDW", 3, Operands { v0, v1, v2 })
 31557      // SHRDW imm8, r16, r16
 31558      if isImm8(v0) && isReg16(v1) && isReg16(v2) {
 31559          p.domain = DomainGeneric
 31560          p.add(0, func(m *_Encoding, v []interface{}) {
 31561              m.emit(0x66)
 31562              m.rexo(hcode(v[1]), v[2], false)
 31563              m.emit(0x0f)
 31564              m.emit(0xac)
 31565              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 31566              m.imm1(toImmAny(v[0]))
 31567          })
 31568      }
 31569      // SHRDW cl, r16, r16
 31570      if v0 == CL && isReg16(v1) && isReg16(v2) {
 31571          p.domain = DomainGeneric
 31572          p.add(0, func(m *_Encoding, v []interface{}) {
 31573              m.emit(0x66)
 31574              m.rexo(hcode(v[1]), v[2], false)
 31575              m.emit(0x0f)
 31576              m.emit(0xad)
 31577              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 31578          })
 31579      }
 31580      // SHRDW imm8, r16, m16
 31581      if isImm8(v0) && isReg16(v1) && isM16(v2) {
 31582          p.domain = DomainGeneric
 31583          p.add(0, func(m *_Encoding, v []interface{}) {
 31584              m.emit(0x66)
 31585              m.rexo(hcode(v[1]), addr(v[2]), false)
 31586              m.emit(0x0f)
 31587              m.emit(0xac)
 31588              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 31589              m.imm1(toImmAny(v[0]))
 31590          })
 31591      }
 31592      // SHRDW cl, r16, m16
 31593      if v0 == CL && isReg16(v1) && isM16(v2) {
 31594          p.domain = DomainGeneric
 31595          p.add(0, func(m *_Encoding, v []interface{}) {
 31596              m.emit(0x66)
 31597              m.rexo(hcode(v[1]), addr(v[2]), false)
 31598              m.emit(0x0f)
 31599              m.emit(0xad)
 31600              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 31601          })
 31602      }
 31603      if p.len == 0 {
 31604          panic("invalid operands for SHRDW")
 31605      }
 31606      return p
 31607  }
 31608  
 31609  // SHRL performs "Logical Shift Right".
 31610  //
 31611  // Mnemonic        : SHR
 31612  // Supported forms : (6 forms)
 31613  //
 31614  //    * SHRL 1, r32
 31615  //    * SHRL imm8, r32
 31616  //    * SHRL cl, r32
 31617  //    * SHRL 1, m32
 31618  //    * SHRL imm8, m32
 31619  //    * SHRL cl, m32
 31620  //
 31621  func (self *Program) SHRL(v0 interface{}, v1 interface{}) *Instruction {
 31622      p := self.alloc("SHRL", 2, Operands { v0, v1 })
 31623      // SHRL 1, r32
 31624      if isConst1(v0) && isReg32(v1) {
 31625          p.domain = DomainGeneric
 31626          p.add(0, func(m *_Encoding, v []interface{}) {
 31627              m.rexo(0, v[1], false)
 31628              m.emit(0xd1)
 31629              m.emit(0xe8 | lcode(v[1]))
 31630          })
 31631      }
 31632      // SHRL imm8, r32
 31633      if isImm8(v0) && isReg32(v1) {
 31634          p.domain = DomainGeneric
 31635          p.add(0, func(m *_Encoding, v []interface{}) {
 31636              m.rexo(0, v[1], false)
 31637              m.emit(0xc1)
 31638              m.emit(0xe8 | lcode(v[1]))
 31639              m.imm1(toImmAny(v[0]))
 31640          })
 31641      }
 31642      // SHRL cl, r32
 31643      if v0 == CL && isReg32(v1) {
 31644          p.domain = DomainGeneric
 31645          p.add(0, func(m *_Encoding, v []interface{}) {
 31646              m.rexo(0, v[1], false)
 31647              m.emit(0xd3)
 31648              m.emit(0xe8 | lcode(v[1]))
 31649          })
 31650      }
 31651      // SHRL 1, m32
 31652      if isConst1(v0) && isM32(v1) {
 31653          p.domain = DomainGeneric
 31654          p.add(0, func(m *_Encoding, v []interface{}) {
 31655              m.rexo(0, addr(v[1]), false)
 31656              m.emit(0xd1)
 31657              m.mrsd(5, addr(v[1]), 1)
 31658          })
 31659      }
 31660      // SHRL imm8, m32
 31661      if isImm8(v0) && isM32(v1) {
 31662          p.domain = DomainGeneric
 31663          p.add(0, func(m *_Encoding, v []interface{}) {
 31664              m.rexo(0, addr(v[1]), false)
 31665              m.emit(0xc1)
 31666              m.mrsd(5, addr(v[1]), 1)
 31667              m.imm1(toImmAny(v[0]))
 31668          })
 31669      }
 31670      // SHRL cl, m32
 31671      if v0 == CL && isM32(v1) {
 31672          p.domain = DomainGeneric
 31673          p.add(0, func(m *_Encoding, v []interface{}) {
 31674              m.rexo(0, addr(v[1]), false)
 31675              m.emit(0xd3)
 31676              m.mrsd(5, addr(v[1]), 1)
 31677          })
 31678      }
 31679      if p.len == 0 {
 31680          panic("invalid operands for SHRL")
 31681      }
 31682      return p
 31683  }
 31684  
 31685  // SHRQ performs "Logical Shift Right".
 31686  //
 31687  // Mnemonic        : SHR
 31688  // Supported forms : (6 forms)
 31689  //
 31690  //    * SHRQ 1, r64
 31691  //    * SHRQ imm8, r64
 31692  //    * SHRQ cl, r64
 31693  //    * SHRQ 1, m64
 31694  //    * SHRQ imm8, m64
 31695  //    * SHRQ cl, m64
 31696  //
 31697  func (self *Program) SHRQ(v0 interface{}, v1 interface{}) *Instruction {
 31698      p := self.alloc("SHRQ", 2, Operands { v0, v1 })
 31699      // SHRQ 1, r64
 31700      if isConst1(v0) && isReg64(v1) {
 31701          p.domain = DomainGeneric
 31702          p.add(0, func(m *_Encoding, v []interface{}) {
 31703              m.emit(0x48 | hcode(v[1]))
 31704              m.emit(0xd1)
 31705              m.emit(0xe8 | lcode(v[1]))
 31706          })
 31707      }
 31708      // SHRQ imm8, r64
 31709      if isImm8(v0) && isReg64(v1) {
 31710          p.domain = DomainGeneric
 31711          p.add(0, func(m *_Encoding, v []interface{}) {
 31712              m.emit(0x48 | hcode(v[1]))
 31713              m.emit(0xc1)
 31714              m.emit(0xe8 | lcode(v[1]))
 31715              m.imm1(toImmAny(v[0]))
 31716          })
 31717      }
 31718      // SHRQ cl, r64
 31719      if v0 == CL && isReg64(v1) {
 31720          p.domain = DomainGeneric
 31721          p.add(0, func(m *_Encoding, v []interface{}) {
 31722              m.emit(0x48 | hcode(v[1]))
 31723              m.emit(0xd3)
 31724              m.emit(0xe8 | lcode(v[1]))
 31725          })
 31726      }
 31727      // SHRQ 1, m64
 31728      if isConst1(v0) && isM64(v1) {
 31729          p.domain = DomainGeneric
 31730          p.add(0, func(m *_Encoding, v []interface{}) {
 31731              m.rexm(1, 0, addr(v[1]))
 31732              m.emit(0xd1)
 31733              m.mrsd(5, addr(v[1]), 1)
 31734          })
 31735      }
 31736      // SHRQ imm8, m64
 31737      if isImm8(v0) && isM64(v1) {
 31738          p.domain = DomainGeneric
 31739          p.add(0, func(m *_Encoding, v []interface{}) {
 31740              m.rexm(1, 0, addr(v[1]))
 31741              m.emit(0xc1)
 31742              m.mrsd(5, addr(v[1]), 1)
 31743              m.imm1(toImmAny(v[0]))
 31744          })
 31745      }
 31746      // SHRQ cl, m64
 31747      if v0 == CL && isM64(v1) {
 31748          p.domain = DomainGeneric
 31749          p.add(0, func(m *_Encoding, v []interface{}) {
 31750              m.rexm(1, 0, addr(v[1]))
 31751              m.emit(0xd3)
 31752              m.mrsd(5, addr(v[1]), 1)
 31753          })
 31754      }
 31755      if p.len == 0 {
 31756          panic("invalid operands for SHRQ")
 31757      }
 31758      return p
 31759  }
 31760  
 31761  // SHRW performs "Logical Shift Right".
 31762  //
 31763  // Mnemonic        : SHR
 31764  // Supported forms : (6 forms)
 31765  //
 31766  //    * SHRW 1, r16
 31767  //    * SHRW imm8, r16
 31768  //    * SHRW cl, r16
 31769  //    * SHRW 1, m16
 31770  //    * SHRW imm8, m16
 31771  //    * SHRW cl, m16
 31772  //
 31773  func (self *Program) SHRW(v0 interface{}, v1 interface{}) *Instruction {
 31774      p := self.alloc("SHRW", 2, Operands { v0, v1 })
 31775      // SHRW 1, r16
 31776      if isConst1(v0) && isReg16(v1) {
 31777          p.domain = DomainGeneric
 31778          p.add(0, func(m *_Encoding, v []interface{}) {
 31779              m.emit(0x66)
 31780              m.rexo(0, v[1], false)
 31781              m.emit(0xd1)
 31782              m.emit(0xe8 | lcode(v[1]))
 31783          })
 31784      }
 31785      // SHRW imm8, r16
 31786      if isImm8(v0) && isReg16(v1) {
 31787          p.domain = DomainGeneric
 31788          p.add(0, func(m *_Encoding, v []interface{}) {
 31789              m.emit(0x66)
 31790              m.rexo(0, v[1], false)
 31791              m.emit(0xc1)
 31792              m.emit(0xe8 | lcode(v[1]))
 31793              m.imm1(toImmAny(v[0]))
 31794          })
 31795      }
 31796      // SHRW cl, r16
 31797      if v0 == CL && isReg16(v1) {
 31798          p.domain = DomainGeneric
 31799          p.add(0, func(m *_Encoding, v []interface{}) {
 31800              m.emit(0x66)
 31801              m.rexo(0, v[1], false)
 31802              m.emit(0xd3)
 31803              m.emit(0xe8 | lcode(v[1]))
 31804          })
 31805      }
 31806      // SHRW 1, m16
 31807      if isConst1(v0) && isM16(v1) {
 31808          p.domain = DomainGeneric
 31809          p.add(0, func(m *_Encoding, v []interface{}) {
 31810              m.emit(0x66)
 31811              m.rexo(0, addr(v[1]), false)
 31812              m.emit(0xd1)
 31813              m.mrsd(5, addr(v[1]), 1)
 31814          })
 31815      }
 31816      // SHRW imm8, m16
 31817      if isImm8(v0) && isM16(v1) {
 31818          p.domain = DomainGeneric
 31819          p.add(0, func(m *_Encoding, v []interface{}) {
 31820              m.emit(0x66)
 31821              m.rexo(0, addr(v[1]), false)
 31822              m.emit(0xc1)
 31823              m.mrsd(5, addr(v[1]), 1)
 31824              m.imm1(toImmAny(v[0]))
 31825          })
 31826      }
 31827      // SHRW cl, m16
 31828      if v0 == CL && isM16(v1) {
 31829          p.domain = DomainGeneric
 31830          p.add(0, func(m *_Encoding, v []interface{}) {
 31831              m.emit(0x66)
 31832              m.rexo(0, addr(v[1]), false)
 31833              m.emit(0xd3)
 31834              m.mrsd(5, addr(v[1]), 1)
 31835          })
 31836      }
 31837      if p.len == 0 {
 31838          panic("invalid operands for SHRW")
 31839      }
 31840      return p
 31841  }
 31842  
 31843  // SHRXL performs "Logical Shift Right Without Affecting Flags".
 31844  //
 31845  // Mnemonic        : SHRX
 31846  // Supported forms : (2 forms)
 31847  //
 31848  //    * SHRXL r32, r32, r32    [BMI2]
 31849  //    * SHRXL r32, m32, r32    [BMI2]
 31850  //
 31851  func (self *Program) SHRXL(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 31852      p := self.alloc("SHRXL", 3, Operands { v0, v1, v2 })
 31853      // SHRXL r32, r32, r32
 31854      if isReg32(v0) && isReg32(v1) && isReg32(v2) {
 31855          self.require(ISA_BMI2)
 31856          p.domain = DomainGeneric
 31857          p.add(0, func(m *_Encoding, v []interface{}) {
 31858              m.emit(0xc4)
 31859              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 31860              m.emit(0x7b ^ (hlcode(v[0]) << 3))
 31861              m.emit(0xf7)
 31862              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 31863          })
 31864      }
 31865      // SHRXL r32, m32, r32
 31866      if isReg32(v0) && isM32(v1) && isReg32(v2) {
 31867          self.require(ISA_BMI2)
 31868          p.domain = DomainGeneric
 31869          p.add(0, func(m *_Encoding, v []interface{}) {
 31870              m.vex3(0xc4, 0b10, 0x03, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 31871              m.emit(0xf7)
 31872              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 31873          })
 31874      }
 31875      if p.len == 0 {
 31876          panic("invalid operands for SHRXL")
 31877      }
 31878      return p
 31879  }
 31880  
 31881  // SHRXQ performs "Logical Shift Right Without Affecting Flags".
 31882  //
 31883  // Mnemonic        : SHRX
 31884  // Supported forms : (2 forms)
 31885  //
 31886  //    * SHRXQ r64, r64, r64    [BMI2]
 31887  //    * SHRXQ r64, m64, r64    [BMI2]
 31888  //
 31889  func (self *Program) SHRXQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 31890      p := self.alloc("SHRXQ", 3, Operands { v0, v1, v2 })
 31891      // SHRXQ r64, r64, r64
 31892      if isReg64(v0) && isReg64(v1) && isReg64(v2) {
 31893          self.require(ISA_BMI2)
 31894          p.domain = DomainGeneric
 31895          p.add(0, func(m *_Encoding, v []interface{}) {
 31896              m.emit(0xc4)
 31897              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 31898              m.emit(0xfb ^ (hlcode(v[0]) << 3))
 31899              m.emit(0xf7)
 31900              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 31901          })
 31902      }
 31903      // SHRXQ r64, m64, r64
 31904      if isReg64(v0) && isM64(v1) && isReg64(v2) {
 31905          self.require(ISA_BMI2)
 31906          p.domain = DomainGeneric
 31907          p.add(0, func(m *_Encoding, v []interface{}) {
 31908              m.vex3(0xc4, 0b10, 0x83, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 31909              m.emit(0xf7)
 31910              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 31911          })
 31912      }
 31913      if p.len == 0 {
 31914          panic("invalid operands for SHRXQ")
 31915      }
 31916      return p
 31917  }
 31918  
 31919  // SHUFPD performs "Shuffle Packed Double-Precision Floating-Point Values".
 31920  //
 31921  // Mnemonic        : SHUFPD
 31922  // Supported forms : (2 forms)
 31923  //
 31924  //    * SHUFPD imm8, xmm, xmm     [SSE2]
 31925  //    * SHUFPD imm8, m128, xmm    [SSE2]
 31926  //
 31927  func (self *Program) SHUFPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 31928      p := self.alloc("SHUFPD", 3, Operands { v0, v1, v2 })
 31929      // SHUFPD imm8, xmm, xmm
 31930      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 31931          self.require(ISA_SSE2)
 31932          p.domain = DomainMMXSSE
 31933          p.add(0, func(m *_Encoding, v []interface{}) {
 31934              m.emit(0x66)
 31935              m.rexo(hcode(v[2]), v[1], false)
 31936              m.emit(0x0f)
 31937              m.emit(0xc6)
 31938              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 31939              m.imm1(toImmAny(v[0]))
 31940          })
 31941      }
 31942      // SHUFPD imm8, m128, xmm
 31943      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 31944          self.require(ISA_SSE2)
 31945          p.domain = DomainMMXSSE
 31946          p.add(0, func(m *_Encoding, v []interface{}) {
 31947              m.emit(0x66)
 31948              m.rexo(hcode(v[2]), addr(v[1]), false)
 31949              m.emit(0x0f)
 31950              m.emit(0xc6)
 31951              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 31952              m.imm1(toImmAny(v[0]))
 31953          })
 31954      }
 31955      if p.len == 0 {
 31956          panic("invalid operands for SHUFPD")
 31957      }
 31958      return p
 31959  }
 31960  
 31961  // SHUFPS performs "Shuffle Packed Single-Precision Floating-Point Values".
 31962  //
 31963  // Mnemonic        : SHUFPS
 31964  // Supported forms : (2 forms)
 31965  //
 31966  //    * SHUFPS imm8, xmm, xmm     [SSE]
 31967  //    * SHUFPS imm8, m128, xmm    [SSE]
 31968  //
 31969  func (self *Program) SHUFPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 31970      p := self.alloc("SHUFPS", 3, Operands { v0, v1, v2 })
 31971      // SHUFPS imm8, xmm, xmm
 31972      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 31973          self.require(ISA_SSE)
 31974          p.domain = DomainMMXSSE
 31975          p.add(0, func(m *_Encoding, v []interface{}) {
 31976              m.rexo(hcode(v[2]), v[1], false)
 31977              m.emit(0x0f)
 31978              m.emit(0xc6)
 31979              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 31980              m.imm1(toImmAny(v[0]))
 31981          })
 31982      }
 31983      // SHUFPS imm8, m128, xmm
 31984      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 31985          self.require(ISA_SSE)
 31986          p.domain = DomainMMXSSE
 31987          p.add(0, func(m *_Encoding, v []interface{}) {
 31988              m.rexo(hcode(v[2]), addr(v[1]), false)
 31989              m.emit(0x0f)
 31990              m.emit(0xc6)
 31991              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 31992              m.imm1(toImmAny(v[0]))
 31993          })
 31994      }
 31995      if p.len == 0 {
 31996          panic("invalid operands for SHUFPS")
 31997      }
 31998      return p
 31999  }
 32000  
 32001  // SQRTPD performs "Compute Square Roots of Packed Double-Precision Floating-Point Values".
 32002  //
 32003  // Mnemonic        : SQRTPD
 32004  // Supported forms : (2 forms)
 32005  //
 32006  //    * SQRTPD xmm, xmm     [SSE2]
 32007  //    * SQRTPD m128, xmm    [SSE2]
 32008  //
 32009  func (self *Program) SQRTPD(v0 interface{}, v1 interface{}) *Instruction {
 32010      p := self.alloc("SQRTPD", 2, Operands { v0, v1 })
 32011      // SQRTPD xmm, xmm
 32012      if isXMM(v0) && isXMM(v1) {
 32013          self.require(ISA_SSE2)
 32014          p.domain = DomainMMXSSE
 32015          p.add(0, func(m *_Encoding, v []interface{}) {
 32016              m.emit(0x66)
 32017              m.rexo(hcode(v[1]), v[0], false)
 32018              m.emit(0x0f)
 32019              m.emit(0x51)
 32020              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32021          })
 32022      }
 32023      // SQRTPD m128, xmm
 32024      if isM128(v0) && isXMM(v1) {
 32025          self.require(ISA_SSE2)
 32026          p.domain = DomainMMXSSE
 32027          p.add(0, func(m *_Encoding, v []interface{}) {
 32028              m.emit(0x66)
 32029              m.rexo(hcode(v[1]), addr(v[0]), false)
 32030              m.emit(0x0f)
 32031              m.emit(0x51)
 32032              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32033          })
 32034      }
 32035      if p.len == 0 {
 32036          panic("invalid operands for SQRTPD")
 32037      }
 32038      return p
 32039  }
 32040  
 32041  // SQRTPS performs "Compute Square Roots of Packed Single-Precision Floating-Point Values".
 32042  //
 32043  // Mnemonic        : SQRTPS
 32044  // Supported forms : (2 forms)
 32045  //
 32046  //    * SQRTPS xmm, xmm     [SSE]
 32047  //    * SQRTPS m128, xmm    [SSE]
 32048  //
 32049  func (self *Program) SQRTPS(v0 interface{}, v1 interface{}) *Instruction {
 32050      p := self.alloc("SQRTPS", 2, Operands { v0, v1 })
 32051      // SQRTPS xmm, xmm
 32052      if isXMM(v0) && isXMM(v1) {
 32053          self.require(ISA_SSE)
 32054          p.domain = DomainMMXSSE
 32055          p.add(0, func(m *_Encoding, v []interface{}) {
 32056              m.rexo(hcode(v[1]), v[0], false)
 32057              m.emit(0x0f)
 32058              m.emit(0x51)
 32059              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32060          })
 32061      }
 32062      // SQRTPS m128, xmm
 32063      if isM128(v0) && isXMM(v1) {
 32064          self.require(ISA_SSE)
 32065          p.domain = DomainMMXSSE
 32066          p.add(0, func(m *_Encoding, v []interface{}) {
 32067              m.rexo(hcode(v[1]), addr(v[0]), false)
 32068              m.emit(0x0f)
 32069              m.emit(0x51)
 32070              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32071          })
 32072      }
 32073      if p.len == 0 {
 32074          panic("invalid operands for SQRTPS")
 32075      }
 32076      return p
 32077  }
 32078  
 32079  // SQRTSD performs "Compute Square Root of Scalar Double-Precision Floating-Point Value".
 32080  //
 32081  // Mnemonic        : SQRTSD
 32082  // Supported forms : (2 forms)
 32083  //
 32084  //    * SQRTSD xmm, xmm    [SSE2]
 32085  //    * SQRTSD m64, xmm    [SSE2]
 32086  //
 32087  func (self *Program) SQRTSD(v0 interface{}, v1 interface{}) *Instruction {
 32088      p := self.alloc("SQRTSD", 2, Operands { v0, v1 })
 32089      // SQRTSD xmm, xmm
 32090      if isXMM(v0) && isXMM(v1) {
 32091          self.require(ISA_SSE2)
 32092          p.domain = DomainMMXSSE
 32093          p.add(0, func(m *_Encoding, v []interface{}) {
 32094              m.emit(0xf2)
 32095              m.rexo(hcode(v[1]), v[0], false)
 32096              m.emit(0x0f)
 32097              m.emit(0x51)
 32098              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32099          })
 32100      }
 32101      // SQRTSD m64, xmm
 32102      if isM64(v0) && isXMM(v1) {
 32103          self.require(ISA_SSE2)
 32104          p.domain = DomainMMXSSE
 32105          p.add(0, func(m *_Encoding, v []interface{}) {
 32106              m.emit(0xf2)
 32107              m.rexo(hcode(v[1]), addr(v[0]), false)
 32108              m.emit(0x0f)
 32109              m.emit(0x51)
 32110              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32111          })
 32112      }
 32113      if p.len == 0 {
 32114          panic("invalid operands for SQRTSD")
 32115      }
 32116      return p
 32117  }
 32118  
 32119  // SQRTSS performs "Compute Square Root of Scalar Single-Precision Floating-Point Value".
 32120  //
 32121  // Mnemonic        : SQRTSS
 32122  // Supported forms : (2 forms)
 32123  //
 32124  //    * SQRTSS xmm, xmm    [SSE]
 32125  //    * SQRTSS m32, xmm    [SSE]
 32126  //
 32127  func (self *Program) SQRTSS(v0 interface{}, v1 interface{}) *Instruction {
 32128      p := self.alloc("SQRTSS", 2, Operands { v0, v1 })
 32129      // SQRTSS xmm, xmm
 32130      if isXMM(v0) && isXMM(v1) {
 32131          self.require(ISA_SSE)
 32132          p.domain = DomainMMXSSE
 32133          p.add(0, func(m *_Encoding, v []interface{}) {
 32134              m.emit(0xf3)
 32135              m.rexo(hcode(v[1]), v[0], false)
 32136              m.emit(0x0f)
 32137              m.emit(0x51)
 32138              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32139          })
 32140      }
 32141      // SQRTSS m32, xmm
 32142      if isM32(v0) && isXMM(v1) {
 32143          self.require(ISA_SSE)
 32144          p.domain = DomainMMXSSE
 32145          p.add(0, func(m *_Encoding, v []interface{}) {
 32146              m.emit(0xf3)
 32147              m.rexo(hcode(v[1]), addr(v[0]), false)
 32148              m.emit(0x0f)
 32149              m.emit(0x51)
 32150              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32151          })
 32152      }
 32153      if p.len == 0 {
 32154          panic("invalid operands for SQRTSS")
 32155      }
 32156      return p
 32157  }
 32158  
 32159  // STC performs "Set Carry Flag".
 32160  //
 32161  // Mnemonic        : STC
 32162  // Supported forms : (1 form)
 32163  //
 32164  //    * STC
 32165  //
 32166  func (self *Program) STC() *Instruction {
 32167      p := self.alloc("STC", 0, Operands {  })
 32168      // STC
 32169      p.domain = DomainGeneric
 32170      p.add(0, func(m *_Encoding, v []interface{}) {
 32171          m.emit(0xf9)
 32172      })
 32173      return p
 32174  }
 32175  
 32176  // STD performs "Set Direction Flag".
 32177  //
 32178  // Mnemonic        : STD
 32179  // Supported forms : (1 form)
 32180  //
 32181  //    * STD
 32182  //
 32183  func (self *Program) STD() *Instruction {
 32184      p := self.alloc("STD", 0, Operands {  })
 32185      // STD
 32186      p.domain = DomainGeneric
 32187      p.add(0, func(m *_Encoding, v []interface{}) {
 32188          m.emit(0xfd)
 32189      })
 32190      return p
 32191  }
 32192  
 32193  // STMXCSR performs "Store MXCSR Register State".
 32194  //
 32195  // Mnemonic        : STMXCSR
 32196  // Supported forms : (1 form)
 32197  //
 32198  //    * STMXCSR m32    [SSE]
 32199  //
 32200  func (self *Program) STMXCSR(v0 interface{}) *Instruction {
 32201      p := self.alloc("STMXCSR", 1, Operands { v0 })
 32202      // STMXCSR m32
 32203      if isM32(v0) {
 32204          self.require(ISA_SSE)
 32205          p.domain = DomainMMXSSE
 32206          p.add(0, func(m *_Encoding, v []interface{}) {
 32207              m.rexo(0, addr(v[0]), false)
 32208              m.emit(0x0f)
 32209              m.emit(0xae)
 32210              m.mrsd(3, addr(v[0]), 1)
 32211          })
 32212      }
 32213      if p.len == 0 {
 32214          panic("invalid operands for STMXCSR")
 32215      }
 32216      return p
 32217  }
 32218  
 32219  // SUBB performs "Subtract".
 32220  //
 32221  // Mnemonic        : SUB
 32222  // Supported forms : (6 forms)
 32223  //
 32224  //    * SUBB imm8, al
 32225  //    * SUBB imm8, r8
 32226  //    * SUBB r8, r8
 32227  //    * SUBB m8, r8
 32228  //    * SUBB imm8, m8
 32229  //    * SUBB r8, m8
 32230  //
 32231  func (self *Program) SUBB(v0 interface{}, v1 interface{}) *Instruction {
 32232      p := self.alloc("SUBB", 2, Operands { v0, v1 })
 32233      // SUBB imm8, al
 32234      if isImm8(v0) && v1 == AL {
 32235          p.domain = DomainGeneric
 32236          p.add(0, func(m *_Encoding, v []interface{}) {
 32237              m.emit(0x2c)
 32238              m.imm1(toImmAny(v[0]))
 32239          })
 32240      }
 32241      // SUBB imm8, r8
 32242      if isImm8(v0) && isReg8(v1) {
 32243          p.domain = DomainGeneric
 32244          p.add(0, func(m *_Encoding, v []interface{}) {
 32245              m.rexo(0, v[1], isReg8REX(v[1]))
 32246              m.emit(0x80)
 32247              m.emit(0xe8 | lcode(v[1]))
 32248              m.imm1(toImmAny(v[0]))
 32249          })
 32250      }
 32251      // SUBB r8, r8
 32252      if isReg8(v0) && isReg8(v1) {
 32253          p.domain = DomainGeneric
 32254          p.add(0, func(m *_Encoding, v []interface{}) {
 32255              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
 32256              m.emit(0x28)
 32257              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 32258          })
 32259          p.add(0, func(m *_Encoding, v []interface{}) {
 32260              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]) || isReg8REX(v[1]))
 32261              m.emit(0x2a)
 32262              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32263          })
 32264      }
 32265      // SUBB m8, r8
 32266      if isM8(v0) && isReg8(v1) {
 32267          p.domain = DomainGeneric
 32268          p.add(0, func(m *_Encoding, v []interface{}) {
 32269              m.rexo(hcode(v[1]), addr(v[0]), isReg8REX(v[1]))
 32270              m.emit(0x2a)
 32271              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32272          })
 32273      }
 32274      // SUBB imm8, m8
 32275      if isImm8(v0) && isM8(v1) {
 32276          p.domain = DomainGeneric
 32277          p.add(0, func(m *_Encoding, v []interface{}) {
 32278              m.rexo(0, addr(v[1]), false)
 32279              m.emit(0x80)
 32280              m.mrsd(5, addr(v[1]), 1)
 32281              m.imm1(toImmAny(v[0]))
 32282          })
 32283      }
 32284      // SUBB r8, m8
 32285      if isReg8(v0) && isM8(v1) {
 32286          p.domain = DomainGeneric
 32287          p.add(0, func(m *_Encoding, v []interface{}) {
 32288              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
 32289              m.emit(0x28)
 32290              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 32291          })
 32292      }
 32293      if p.len == 0 {
 32294          panic("invalid operands for SUBB")
 32295      }
 32296      return p
 32297  }
 32298  
 32299  // SUBL performs "Subtract".
 32300  //
 32301  // Mnemonic        : SUB
 32302  // Supported forms : (8 forms)
 32303  //
 32304  //    * SUBL imm32, eax
 32305  //    * SUBL imm8, r32
 32306  //    * SUBL imm32, r32
 32307  //    * SUBL r32, r32
 32308  //    * SUBL m32, r32
 32309  //    * SUBL imm8, m32
 32310  //    * SUBL imm32, m32
 32311  //    * SUBL r32, m32
 32312  //
 32313  func (self *Program) SUBL(v0 interface{}, v1 interface{}) *Instruction {
 32314      p := self.alloc("SUBL", 2, Operands { v0, v1 })
 32315      // SUBL imm32, eax
 32316      if isImm32(v0) && v1 == EAX {
 32317          p.domain = DomainGeneric
 32318          p.add(0, func(m *_Encoding, v []interface{}) {
 32319              m.emit(0x2d)
 32320              m.imm4(toImmAny(v[0]))
 32321          })
 32322      }
 32323      // SUBL imm8, r32
 32324      if isImm8Ext(v0, 4) && isReg32(v1) {
 32325          p.domain = DomainGeneric
 32326          p.add(0, func(m *_Encoding, v []interface{}) {
 32327              m.rexo(0, v[1], false)
 32328              m.emit(0x83)
 32329              m.emit(0xe8 | lcode(v[1]))
 32330              m.imm1(toImmAny(v[0]))
 32331          })
 32332      }
 32333      // SUBL imm32, r32
 32334      if isImm32(v0) && isReg32(v1) {
 32335          p.domain = DomainGeneric
 32336          p.add(0, func(m *_Encoding, v []interface{}) {
 32337              m.rexo(0, v[1], false)
 32338              m.emit(0x81)
 32339              m.emit(0xe8 | lcode(v[1]))
 32340              m.imm4(toImmAny(v[0]))
 32341          })
 32342      }
 32343      // SUBL r32, r32
 32344      if isReg32(v0) && isReg32(v1) {
 32345          p.domain = DomainGeneric
 32346          p.add(0, func(m *_Encoding, v []interface{}) {
 32347              m.rexo(hcode(v[0]), v[1], false)
 32348              m.emit(0x29)
 32349              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 32350          })
 32351          p.add(0, func(m *_Encoding, v []interface{}) {
 32352              m.rexo(hcode(v[1]), v[0], false)
 32353              m.emit(0x2b)
 32354              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32355          })
 32356      }
 32357      // SUBL m32, r32
 32358      if isM32(v0) && isReg32(v1) {
 32359          p.domain = DomainGeneric
 32360          p.add(0, func(m *_Encoding, v []interface{}) {
 32361              m.rexo(hcode(v[1]), addr(v[0]), false)
 32362              m.emit(0x2b)
 32363              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32364          })
 32365      }
 32366      // SUBL imm8, m32
 32367      if isImm8Ext(v0, 4) && isM32(v1) {
 32368          p.domain = DomainGeneric
 32369          p.add(0, func(m *_Encoding, v []interface{}) {
 32370              m.rexo(0, addr(v[1]), false)
 32371              m.emit(0x83)
 32372              m.mrsd(5, addr(v[1]), 1)
 32373              m.imm1(toImmAny(v[0]))
 32374          })
 32375      }
 32376      // SUBL imm32, m32
 32377      if isImm32(v0) && isM32(v1) {
 32378          p.domain = DomainGeneric
 32379          p.add(0, func(m *_Encoding, v []interface{}) {
 32380              m.rexo(0, addr(v[1]), false)
 32381              m.emit(0x81)
 32382              m.mrsd(5, addr(v[1]), 1)
 32383              m.imm4(toImmAny(v[0]))
 32384          })
 32385      }
 32386      // SUBL r32, m32
 32387      if isReg32(v0) && isM32(v1) {
 32388          p.domain = DomainGeneric
 32389          p.add(0, func(m *_Encoding, v []interface{}) {
 32390              m.rexo(hcode(v[0]), addr(v[1]), false)
 32391              m.emit(0x29)
 32392              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 32393          })
 32394      }
 32395      if p.len == 0 {
 32396          panic("invalid operands for SUBL")
 32397      }
 32398      return p
 32399  }
 32400  
 32401  // SUBPD performs "Subtract Packed Double-Precision Floating-Point Values".
 32402  //
 32403  // Mnemonic        : SUBPD
 32404  // Supported forms : (2 forms)
 32405  //
 32406  //    * SUBPD xmm, xmm     [SSE2]
 32407  //    * SUBPD m128, xmm    [SSE2]
 32408  //
 32409  func (self *Program) SUBPD(v0 interface{}, v1 interface{}) *Instruction {
 32410      p := self.alloc("SUBPD", 2, Operands { v0, v1 })
 32411      // SUBPD xmm, xmm
 32412      if isXMM(v0) && isXMM(v1) {
 32413          self.require(ISA_SSE2)
 32414          p.domain = DomainMMXSSE
 32415          p.add(0, func(m *_Encoding, v []interface{}) {
 32416              m.emit(0x66)
 32417              m.rexo(hcode(v[1]), v[0], false)
 32418              m.emit(0x0f)
 32419              m.emit(0x5c)
 32420              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32421          })
 32422      }
 32423      // SUBPD m128, xmm
 32424      if isM128(v0) && isXMM(v1) {
 32425          self.require(ISA_SSE2)
 32426          p.domain = DomainMMXSSE
 32427          p.add(0, func(m *_Encoding, v []interface{}) {
 32428              m.emit(0x66)
 32429              m.rexo(hcode(v[1]), addr(v[0]), false)
 32430              m.emit(0x0f)
 32431              m.emit(0x5c)
 32432              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32433          })
 32434      }
 32435      if p.len == 0 {
 32436          panic("invalid operands for SUBPD")
 32437      }
 32438      return p
 32439  }
 32440  
 32441  // SUBPS performs "Subtract Packed Single-Precision Floating-Point Values".
 32442  //
 32443  // Mnemonic        : SUBPS
 32444  // Supported forms : (2 forms)
 32445  //
 32446  //    * SUBPS xmm, xmm     [SSE]
 32447  //    * SUBPS m128, xmm    [SSE]
 32448  //
 32449  func (self *Program) SUBPS(v0 interface{}, v1 interface{}) *Instruction {
 32450      p := self.alloc("SUBPS", 2, Operands { v0, v1 })
 32451      // SUBPS xmm, xmm
 32452      if isXMM(v0) && isXMM(v1) {
 32453          self.require(ISA_SSE)
 32454          p.domain = DomainMMXSSE
 32455          p.add(0, func(m *_Encoding, v []interface{}) {
 32456              m.rexo(hcode(v[1]), v[0], false)
 32457              m.emit(0x0f)
 32458              m.emit(0x5c)
 32459              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32460          })
 32461      }
 32462      // SUBPS m128, xmm
 32463      if isM128(v0) && isXMM(v1) {
 32464          self.require(ISA_SSE)
 32465          p.domain = DomainMMXSSE
 32466          p.add(0, func(m *_Encoding, v []interface{}) {
 32467              m.rexo(hcode(v[1]), addr(v[0]), false)
 32468              m.emit(0x0f)
 32469              m.emit(0x5c)
 32470              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32471          })
 32472      }
 32473      if p.len == 0 {
 32474          panic("invalid operands for SUBPS")
 32475      }
 32476      return p
 32477  }
 32478  
 32479  // SUBQ performs "Subtract".
 32480  //
 32481  // Mnemonic        : SUB
 32482  // Supported forms : (8 forms)
 32483  //
 32484  //    * SUBQ imm32, rax
 32485  //    * SUBQ imm8, r64
 32486  //    * SUBQ imm32, r64
 32487  //    * SUBQ r64, r64
 32488  //    * SUBQ m64, r64
 32489  //    * SUBQ imm8, m64
 32490  //    * SUBQ imm32, m64
 32491  //    * SUBQ r64, m64
 32492  //
 32493  func (self *Program) SUBQ(v0 interface{}, v1 interface{}) *Instruction {
 32494      p := self.alloc("SUBQ", 2, Operands { v0, v1 })
 32495      // SUBQ imm32, rax
 32496      if isImm32(v0) && v1 == RAX {
 32497          p.domain = DomainGeneric
 32498          p.add(0, func(m *_Encoding, v []interface{}) {
 32499              m.emit(0x48)
 32500              m.emit(0x2d)
 32501              m.imm4(toImmAny(v[0]))
 32502          })
 32503      }
 32504      // SUBQ imm8, r64
 32505      if isImm8Ext(v0, 8) && isReg64(v1) {
 32506          p.domain = DomainGeneric
 32507          p.add(0, func(m *_Encoding, v []interface{}) {
 32508              m.emit(0x48 | hcode(v[1]))
 32509              m.emit(0x83)
 32510              m.emit(0xe8 | lcode(v[1]))
 32511              m.imm1(toImmAny(v[0]))
 32512          })
 32513      }
 32514      // SUBQ imm32, r64
 32515      if isImm32Ext(v0, 8) && isReg64(v1) {
 32516          p.domain = DomainGeneric
 32517          p.add(0, func(m *_Encoding, v []interface{}) {
 32518              m.emit(0x48 | hcode(v[1]))
 32519              m.emit(0x81)
 32520              m.emit(0xe8 | lcode(v[1]))
 32521              m.imm4(toImmAny(v[0]))
 32522          })
 32523      }
 32524      // SUBQ r64, r64
 32525      if isReg64(v0) && isReg64(v1) {
 32526          p.domain = DomainGeneric
 32527          p.add(0, func(m *_Encoding, v []interface{}) {
 32528              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
 32529              m.emit(0x29)
 32530              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 32531          })
 32532          p.add(0, func(m *_Encoding, v []interface{}) {
 32533              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 32534              m.emit(0x2b)
 32535              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32536          })
 32537      }
 32538      // SUBQ m64, r64
 32539      if isM64(v0) && isReg64(v1) {
 32540          p.domain = DomainGeneric
 32541          p.add(0, func(m *_Encoding, v []interface{}) {
 32542              m.rexm(1, hcode(v[1]), addr(v[0]))
 32543              m.emit(0x2b)
 32544              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32545          })
 32546      }
 32547      // SUBQ imm8, m64
 32548      if isImm8Ext(v0, 8) && isM64(v1) {
 32549          p.domain = DomainGeneric
 32550          p.add(0, func(m *_Encoding, v []interface{}) {
 32551              m.rexm(1, 0, addr(v[1]))
 32552              m.emit(0x83)
 32553              m.mrsd(5, addr(v[1]), 1)
 32554              m.imm1(toImmAny(v[0]))
 32555          })
 32556      }
 32557      // SUBQ imm32, m64
 32558      if isImm32Ext(v0, 8) && isM64(v1) {
 32559          p.domain = DomainGeneric
 32560          p.add(0, func(m *_Encoding, v []interface{}) {
 32561              m.rexm(1, 0, addr(v[1]))
 32562              m.emit(0x81)
 32563              m.mrsd(5, addr(v[1]), 1)
 32564              m.imm4(toImmAny(v[0]))
 32565          })
 32566      }
 32567      // SUBQ r64, m64
 32568      if isReg64(v0) && isM64(v1) {
 32569          p.domain = DomainGeneric
 32570          p.add(0, func(m *_Encoding, v []interface{}) {
 32571              m.rexm(1, hcode(v[0]), addr(v[1]))
 32572              m.emit(0x29)
 32573              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 32574          })
 32575      }
 32576      if p.len == 0 {
 32577          panic("invalid operands for SUBQ")
 32578      }
 32579      return p
 32580  }
 32581  
 32582  // SUBSD performs "Subtract Scalar Double-Precision Floating-Point Values".
 32583  //
 32584  // Mnemonic        : SUBSD
 32585  // Supported forms : (2 forms)
 32586  //
 32587  //    * SUBSD xmm, xmm    [SSE2]
 32588  //    * SUBSD m64, xmm    [SSE2]
 32589  //
 32590  func (self *Program) SUBSD(v0 interface{}, v1 interface{}) *Instruction {
 32591      p := self.alloc("SUBSD", 2, Operands { v0, v1 })
 32592      // SUBSD xmm, xmm
 32593      if isXMM(v0) && isXMM(v1) {
 32594          self.require(ISA_SSE2)
 32595          p.domain = DomainMMXSSE
 32596          p.add(0, func(m *_Encoding, v []interface{}) {
 32597              m.emit(0xf2)
 32598              m.rexo(hcode(v[1]), v[0], false)
 32599              m.emit(0x0f)
 32600              m.emit(0x5c)
 32601              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32602          })
 32603      }
 32604      // SUBSD m64, xmm
 32605      if isM64(v0) && isXMM(v1) {
 32606          self.require(ISA_SSE2)
 32607          p.domain = DomainMMXSSE
 32608          p.add(0, func(m *_Encoding, v []interface{}) {
 32609              m.emit(0xf2)
 32610              m.rexo(hcode(v[1]), addr(v[0]), false)
 32611              m.emit(0x0f)
 32612              m.emit(0x5c)
 32613              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32614          })
 32615      }
 32616      if p.len == 0 {
 32617          panic("invalid operands for SUBSD")
 32618      }
 32619      return p
 32620  }
 32621  
 32622  // SUBSS performs "Subtract Scalar Single-Precision Floating-Point Values".
 32623  //
 32624  // Mnemonic        : SUBSS
 32625  // Supported forms : (2 forms)
 32626  //
 32627  //    * SUBSS xmm, xmm    [SSE]
 32628  //    * SUBSS m32, xmm    [SSE]
 32629  //
 32630  func (self *Program) SUBSS(v0 interface{}, v1 interface{}) *Instruction {
 32631      p := self.alloc("SUBSS", 2, Operands { v0, v1 })
 32632      // SUBSS xmm, xmm
 32633      if isXMM(v0) && isXMM(v1) {
 32634          self.require(ISA_SSE)
 32635          p.domain = DomainMMXSSE
 32636          p.add(0, func(m *_Encoding, v []interface{}) {
 32637              m.emit(0xf3)
 32638              m.rexo(hcode(v[1]), v[0], false)
 32639              m.emit(0x0f)
 32640              m.emit(0x5c)
 32641              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32642          })
 32643      }
 32644      // SUBSS m32, xmm
 32645      if isM32(v0) && isXMM(v1) {
 32646          self.require(ISA_SSE)
 32647          p.domain = DomainMMXSSE
 32648          p.add(0, func(m *_Encoding, v []interface{}) {
 32649              m.emit(0xf3)
 32650              m.rexo(hcode(v[1]), addr(v[0]), false)
 32651              m.emit(0x0f)
 32652              m.emit(0x5c)
 32653              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32654          })
 32655      }
 32656      if p.len == 0 {
 32657          panic("invalid operands for SUBSS")
 32658      }
 32659      return p
 32660  }
 32661  
 32662  // SUBW performs "Subtract".
 32663  //
 32664  // Mnemonic        : SUB
 32665  // Supported forms : (8 forms)
 32666  //
 32667  //    * SUBW imm16, ax
 32668  //    * SUBW imm8, r16
 32669  //    * SUBW imm16, r16
 32670  //    * SUBW r16, r16
 32671  //    * SUBW m16, r16
 32672  //    * SUBW imm8, m16
 32673  //    * SUBW imm16, m16
 32674  //    * SUBW r16, m16
 32675  //
 32676  func (self *Program) SUBW(v0 interface{}, v1 interface{}) *Instruction {
 32677      p := self.alloc("SUBW", 2, Operands { v0, v1 })
 32678      // SUBW imm16, ax
 32679      if isImm16(v0) && v1 == AX {
 32680          p.domain = DomainGeneric
 32681          p.add(0, func(m *_Encoding, v []interface{}) {
 32682              m.emit(0x66)
 32683              m.emit(0x2d)
 32684              m.imm2(toImmAny(v[0]))
 32685          })
 32686      }
 32687      // SUBW imm8, r16
 32688      if isImm8Ext(v0, 2) && isReg16(v1) {
 32689          p.domain = DomainGeneric
 32690          p.add(0, func(m *_Encoding, v []interface{}) {
 32691              m.emit(0x66)
 32692              m.rexo(0, v[1], false)
 32693              m.emit(0x83)
 32694              m.emit(0xe8 | lcode(v[1]))
 32695              m.imm1(toImmAny(v[0]))
 32696          })
 32697      }
 32698      // SUBW imm16, r16
 32699      if isImm16(v0) && isReg16(v1) {
 32700          p.domain = DomainGeneric
 32701          p.add(0, func(m *_Encoding, v []interface{}) {
 32702              m.emit(0x66)
 32703              m.rexo(0, v[1], false)
 32704              m.emit(0x81)
 32705              m.emit(0xe8 | lcode(v[1]))
 32706              m.imm2(toImmAny(v[0]))
 32707          })
 32708      }
 32709      // SUBW r16, r16
 32710      if isReg16(v0) && isReg16(v1) {
 32711          p.domain = DomainGeneric
 32712          p.add(0, func(m *_Encoding, v []interface{}) {
 32713              m.emit(0x66)
 32714              m.rexo(hcode(v[0]), v[1], false)
 32715              m.emit(0x29)
 32716              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 32717          })
 32718          p.add(0, func(m *_Encoding, v []interface{}) {
 32719              m.emit(0x66)
 32720              m.rexo(hcode(v[1]), v[0], false)
 32721              m.emit(0x2b)
 32722              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32723          })
 32724      }
 32725      // SUBW m16, r16
 32726      if isM16(v0) && isReg16(v1) {
 32727          p.domain = DomainGeneric
 32728          p.add(0, func(m *_Encoding, v []interface{}) {
 32729              m.emit(0x66)
 32730              m.rexo(hcode(v[1]), addr(v[0]), false)
 32731              m.emit(0x2b)
 32732              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32733          })
 32734      }
 32735      // SUBW imm8, m16
 32736      if isImm8Ext(v0, 2) && isM16(v1) {
 32737          p.domain = DomainGeneric
 32738          p.add(0, func(m *_Encoding, v []interface{}) {
 32739              m.emit(0x66)
 32740              m.rexo(0, addr(v[1]), false)
 32741              m.emit(0x83)
 32742              m.mrsd(5, addr(v[1]), 1)
 32743              m.imm1(toImmAny(v[0]))
 32744          })
 32745      }
 32746      // SUBW imm16, m16
 32747      if isImm16(v0) && isM16(v1) {
 32748          p.domain = DomainGeneric
 32749          p.add(0, func(m *_Encoding, v []interface{}) {
 32750              m.emit(0x66)
 32751              m.rexo(0, addr(v[1]), false)
 32752              m.emit(0x81)
 32753              m.mrsd(5, addr(v[1]), 1)
 32754              m.imm2(toImmAny(v[0]))
 32755          })
 32756      }
 32757      // SUBW r16, m16
 32758      if isReg16(v0) && isM16(v1) {
 32759          p.domain = DomainGeneric
 32760          p.add(0, func(m *_Encoding, v []interface{}) {
 32761              m.emit(0x66)
 32762              m.rexo(hcode(v[0]), addr(v[1]), false)
 32763              m.emit(0x29)
 32764              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 32765          })
 32766      }
 32767      if p.len == 0 {
 32768          panic("invalid operands for SUBW")
 32769      }
 32770      return p
 32771  }
 32772  
 32773  // SYSCALL performs "Fast System Call".
 32774  //
 32775  // Mnemonic        : SYSCALL
 32776  // Supported forms : (1 form)
 32777  //
 32778  //    * SYSCALL
 32779  //
 32780  func (self *Program) SYSCALL() *Instruction {
 32781      p := self.alloc("SYSCALL", 0, Operands {  })
 32782      // SYSCALL
 32783      p.domain = DomainGeneric
 32784      p.add(0, func(m *_Encoding, v []interface{}) {
 32785          m.emit(0x0f)
 32786          m.emit(0x05)
 32787      })
 32788      return p
 32789  }
 32790  
 32791  // T1MSKC performs "Inverse Mask From Trailing Ones".
 32792  //
 32793  // Mnemonic        : T1MSKC
 32794  // Supported forms : (4 forms)
 32795  //
 32796  //    * T1MSKC r32, r32    [TBM]
 32797  //    * T1MSKC m32, r32    [TBM]
 32798  //    * T1MSKC r64, r64    [TBM]
 32799  //    * T1MSKC m64, r64    [TBM]
 32800  //
 32801  func (self *Program) T1MSKC(v0 interface{}, v1 interface{}) *Instruction {
 32802      p := self.alloc("T1MSKC", 2, Operands { v0, v1 })
 32803      // T1MSKC r32, r32
 32804      if isReg32(v0) && isReg32(v1) {
 32805          self.require(ISA_TBM)
 32806          p.domain = DomainGeneric
 32807          p.add(0, func(m *_Encoding, v []interface{}) {
 32808              m.emit(0x8f)
 32809              m.emit(0xe9 ^ (hcode(v[0]) << 5))
 32810              m.emit(0x78 ^ (hlcode(v[1]) << 3))
 32811              m.emit(0x01)
 32812              m.emit(0xf8 | lcode(v[0]))
 32813          })
 32814      }
 32815      // T1MSKC m32, r32
 32816      if isM32(v0) && isReg32(v1) {
 32817          self.require(ISA_TBM)
 32818          p.domain = DomainGeneric
 32819          p.add(0, func(m *_Encoding, v []interface{}) {
 32820              m.vex3(0x8f, 0b1001, 0x00, 0, addr(v[0]), hlcode(v[1]))
 32821              m.emit(0x01)
 32822              m.mrsd(7, addr(v[0]), 1)
 32823          })
 32824      }
 32825      // T1MSKC r64, r64
 32826      if isReg64(v0) && isReg64(v1) {
 32827          self.require(ISA_TBM)
 32828          p.domain = DomainGeneric
 32829          p.add(0, func(m *_Encoding, v []interface{}) {
 32830              m.emit(0x8f)
 32831              m.emit(0xe9 ^ (hcode(v[0]) << 5))
 32832              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 32833              m.emit(0x01)
 32834              m.emit(0xf8 | lcode(v[0]))
 32835          })
 32836      }
 32837      // T1MSKC m64, r64
 32838      if isM64(v0) && isReg64(v1) {
 32839          self.require(ISA_TBM)
 32840          p.domain = DomainGeneric
 32841          p.add(0, func(m *_Encoding, v []interface{}) {
 32842              m.vex3(0x8f, 0b1001, 0x80, 0, addr(v[0]), hlcode(v[1]))
 32843              m.emit(0x01)
 32844              m.mrsd(7, addr(v[0]), 1)
 32845          })
 32846      }
 32847      if p.len == 0 {
 32848          panic("invalid operands for T1MSKC")
 32849      }
 32850      return p
 32851  }
 32852  
 32853  // TESTB performs "Logical Compare".
 32854  //
 32855  // Mnemonic        : TEST
 32856  // Supported forms : (5 forms)
 32857  //
 32858  //    * TESTB imm8, al
 32859  //    * TESTB imm8, r8
 32860  //    * TESTB r8, r8
 32861  //    * TESTB imm8, m8
 32862  //    * TESTB r8, m8
 32863  //
 32864  func (self *Program) TESTB(v0 interface{}, v1 interface{}) *Instruction {
 32865      p := self.alloc("TESTB", 2, Operands { v0, v1 })
 32866      // TESTB imm8, al
 32867      if isImm8(v0) && v1 == AL {
 32868          p.domain = DomainGeneric
 32869          p.add(0, func(m *_Encoding, v []interface{}) {
 32870              m.emit(0xa8)
 32871              m.imm1(toImmAny(v[0]))
 32872          })
 32873      }
 32874      // TESTB imm8, r8
 32875      if isImm8(v0) && isReg8(v1) {
 32876          p.domain = DomainGeneric
 32877          p.add(0, func(m *_Encoding, v []interface{}) {
 32878              m.rexo(0, v[1], isReg8REX(v[1]))
 32879              m.emit(0xf6)
 32880              m.emit(0xc0 | lcode(v[1]))
 32881              m.imm1(toImmAny(v[0]))
 32882          })
 32883      }
 32884      // TESTB r8, r8
 32885      if isReg8(v0) && isReg8(v1) {
 32886          p.domain = DomainGeneric
 32887          p.add(0, func(m *_Encoding, v []interface{}) {
 32888              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
 32889              m.emit(0x84)
 32890              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 32891          })
 32892      }
 32893      // TESTB imm8, m8
 32894      if isImm8(v0) && isM8(v1) {
 32895          p.domain = DomainGeneric
 32896          p.add(0, func(m *_Encoding, v []interface{}) {
 32897              m.rexo(0, addr(v[1]), false)
 32898              m.emit(0xf6)
 32899              m.mrsd(0, addr(v[1]), 1)
 32900              m.imm1(toImmAny(v[0]))
 32901          })
 32902      }
 32903      // TESTB r8, m8
 32904      if isReg8(v0) && isM8(v1) {
 32905          p.domain = DomainGeneric
 32906          p.add(0, func(m *_Encoding, v []interface{}) {
 32907              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
 32908              m.emit(0x84)
 32909              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 32910          })
 32911      }
 32912      if p.len == 0 {
 32913          panic("invalid operands for TESTB")
 32914      }
 32915      return p
 32916  }
 32917  
 32918  // TESTL performs "Logical Compare".
 32919  //
 32920  // Mnemonic        : TEST
 32921  // Supported forms : (5 forms)
 32922  //
 32923  //    * TESTL imm32, eax
 32924  //    * TESTL imm32, r32
 32925  //    * TESTL r32, r32
 32926  //    * TESTL imm32, m32
 32927  //    * TESTL r32, m32
 32928  //
 32929  func (self *Program) TESTL(v0 interface{}, v1 interface{}) *Instruction {
 32930      p := self.alloc("TESTL", 2, Operands { v0, v1 })
 32931      // TESTL imm32, eax
 32932      if isImm32(v0) && v1 == EAX {
 32933          p.domain = DomainGeneric
 32934          p.add(0, func(m *_Encoding, v []interface{}) {
 32935              m.emit(0xa9)
 32936              m.imm4(toImmAny(v[0]))
 32937          })
 32938      }
 32939      // TESTL imm32, r32
 32940      if isImm32(v0) && isReg32(v1) {
 32941          p.domain = DomainGeneric
 32942          p.add(0, func(m *_Encoding, v []interface{}) {
 32943              m.rexo(0, v[1], false)
 32944              m.emit(0xf7)
 32945              m.emit(0xc0 | lcode(v[1]))
 32946              m.imm4(toImmAny(v[0]))
 32947          })
 32948      }
 32949      // TESTL r32, r32
 32950      if isReg32(v0) && isReg32(v1) {
 32951          p.domain = DomainGeneric
 32952          p.add(0, func(m *_Encoding, v []interface{}) {
 32953              m.rexo(hcode(v[0]), v[1], false)
 32954              m.emit(0x85)
 32955              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 32956          })
 32957      }
 32958      // TESTL imm32, m32
 32959      if isImm32(v0) && isM32(v1) {
 32960          p.domain = DomainGeneric
 32961          p.add(0, func(m *_Encoding, v []interface{}) {
 32962              m.rexo(0, addr(v[1]), false)
 32963              m.emit(0xf7)
 32964              m.mrsd(0, addr(v[1]), 1)
 32965              m.imm4(toImmAny(v[0]))
 32966          })
 32967      }
 32968      // TESTL r32, m32
 32969      if isReg32(v0) && isM32(v1) {
 32970          p.domain = DomainGeneric
 32971          p.add(0, func(m *_Encoding, v []interface{}) {
 32972              m.rexo(hcode(v[0]), addr(v[1]), false)
 32973              m.emit(0x85)
 32974              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 32975          })
 32976      }
 32977      if p.len == 0 {
 32978          panic("invalid operands for TESTL")
 32979      }
 32980      return p
 32981  }
 32982  
 32983  // TESTQ performs "Logical Compare".
 32984  //
 32985  // Mnemonic        : TEST
 32986  // Supported forms : (5 forms)
 32987  //
 32988  //    * TESTQ imm32, rax
 32989  //    * TESTQ imm32, r64
 32990  //    * TESTQ r64, r64
 32991  //    * TESTQ imm32, m64
 32992  //    * TESTQ r64, m64
 32993  //
 32994  func (self *Program) TESTQ(v0 interface{}, v1 interface{}) *Instruction {
 32995      p := self.alloc("TESTQ", 2, Operands { v0, v1 })
 32996      // TESTQ imm32, rax
 32997      if isImm32(v0) && v1 == RAX {
 32998          p.domain = DomainGeneric
 32999          p.add(0, func(m *_Encoding, v []interface{}) {
 33000              m.emit(0x48)
 33001              m.emit(0xa9)
 33002              m.imm4(toImmAny(v[0]))
 33003          })
 33004      }
 33005      // TESTQ imm32, r64
 33006      if isImm32Ext(v0, 8) && isReg64(v1) {
 33007          p.domain = DomainGeneric
 33008          p.add(0, func(m *_Encoding, v []interface{}) {
 33009              m.emit(0x48 | hcode(v[1]))
 33010              m.emit(0xf7)
 33011              m.emit(0xc0 | lcode(v[1]))
 33012              m.imm4(toImmAny(v[0]))
 33013          })
 33014      }
 33015      // TESTQ r64, r64
 33016      if isReg64(v0) && isReg64(v1) {
 33017          p.domain = DomainGeneric
 33018          p.add(0, func(m *_Encoding, v []interface{}) {
 33019              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
 33020              m.emit(0x85)
 33021              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 33022          })
 33023      }
 33024      // TESTQ imm32, m64
 33025      if isImm32Ext(v0, 8) && isM64(v1) {
 33026          p.domain = DomainGeneric
 33027          p.add(0, func(m *_Encoding, v []interface{}) {
 33028              m.rexm(1, 0, addr(v[1]))
 33029              m.emit(0xf7)
 33030              m.mrsd(0, addr(v[1]), 1)
 33031              m.imm4(toImmAny(v[0]))
 33032          })
 33033      }
 33034      // TESTQ r64, m64
 33035      if isReg64(v0) && isM64(v1) {
 33036          p.domain = DomainGeneric
 33037          p.add(0, func(m *_Encoding, v []interface{}) {
 33038              m.rexm(1, hcode(v[0]), addr(v[1]))
 33039              m.emit(0x85)
 33040              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 33041          })
 33042      }
 33043      if p.len == 0 {
 33044          panic("invalid operands for TESTQ")
 33045      }
 33046      return p
 33047  }
 33048  
 33049  // TESTW performs "Logical Compare".
 33050  //
 33051  // Mnemonic        : TEST
 33052  // Supported forms : (5 forms)
 33053  //
 33054  //    * TESTW imm16, ax
 33055  //    * TESTW imm16, r16
 33056  //    * TESTW r16, r16
 33057  //    * TESTW imm16, m16
 33058  //    * TESTW r16, m16
 33059  //
 33060  func (self *Program) TESTW(v0 interface{}, v1 interface{}) *Instruction {
 33061      p := self.alloc("TESTW", 2, Operands { v0, v1 })
 33062      // TESTW imm16, ax
 33063      if isImm16(v0) && v1 == AX {
 33064          p.domain = DomainGeneric
 33065          p.add(0, func(m *_Encoding, v []interface{}) {
 33066              m.emit(0x66)
 33067              m.emit(0xa9)
 33068              m.imm2(toImmAny(v[0]))
 33069          })
 33070      }
 33071      // TESTW imm16, r16
 33072      if isImm16(v0) && isReg16(v1) {
 33073          p.domain = DomainGeneric
 33074          p.add(0, func(m *_Encoding, v []interface{}) {
 33075              m.emit(0x66)
 33076              m.rexo(0, v[1], false)
 33077              m.emit(0xf7)
 33078              m.emit(0xc0 | lcode(v[1]))
 33079              m.imm2(toImmAny(v[0]))
 33080          })
 33081      }
 33082      // TESTW r16, r16
 33083      if isReg16(v0) && isReg16(v1) {
 33084          p.domain = DomainGeneric
 33085          p.add(0, func(m *_Encoding, v []interface{}) {
 33086              m.emit(0x66)
 33087              m.rexo(hcode(v[0]), v[1], false)
 33088              m.emit(0x85)
 33089              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 33090          })
 33091      }
 33092      // TESTW imm16, m16
 33093      if isImm16(v0) && isM16(v1) {
 33094          p.domain = DomainGeneric
 33095          p.add(0, func(m *_Encoding, v []interface{}) {
 33096              m.emit(0x66)
 33097              m.rexo(0, addr(v[1]), false)
 33098              m.emit(0xf7)
 33099              m.mrsd(0, addr(v[1]), 1)
 33100              m.imm2(toImmAny(v[0]))
 33101          })
 33102      }
 33103      // TESTW r16, m16
 33104      if isReg16(v0) && isM16(v1) {
 33105          p.domain = DomainGeneric
 33106          p.add(0, func(m *_Encoding, v []interface{}) {
 33107              m.emit(0x66)
 33108              m.rexo(hcode(v[0]), addr(v[1]), false)
 33109              m.emit(0x85)
 33110              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 33111          })
 33112      }
 33113      if p.len == 0 {
 33114          panic("invalid operands for TESTW")
 33115      }
 33116      return p
 33117  }
 33118  
 33119  // TZCNTL performs "Count the Number of Trailing Zero Bits".
 33120  //
 33121  // Mnemonic        : TZCNT
 33122  // Supported forms : (2 forms)
 33123  //
 33124  //    * TZCNTL r32, r32    [BMI]
 33125  //    * TZCNTL m32, r32    [BMI]
 33126  //
 33127  func (self *Program) TZCNTL(v0 interface{}, v1 interface{}) *Instruction {
 33128      p := self.alloc("TZCNTL", 2, Operands { v0, v1 })
 33129      // TZCNTL r32, r32
 33130      if isReg32(v0) && isReg32(v1) {
 33131          self.require(ISA_BMI)
 33132          p.domain = DomainGeneric
 33133          p.add(0, func(m *_Encoding, v []interface{}) {
 33134              m.emit(0xf3)
 33135              m.rexo(hcode(v[1]), v[0], false)
 33136              m.emit(0x0f)
 33137              m.emit(0xbc)
 33138              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 33139          })
 33140      }
 33141      // TZCNTL m32, r32
 33142      if isM32(v0) && isReg32(v1) {
 33143          self.require(ISA_BMI)
 33144          p.domain = DomainGeneric
 33145          p.add(0, func(m *_Encoding, v []interface{}) {
 33146              m.emit(0xf3)
 33147              m.rexo(hcode(v[1]), addr(v[0]), false)
 33148              m.emit(0x0f)
 33149              m.emit(0xbc)
 33150              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 33151          })
 33152      }
 33153      if p.len == 0 {
 33154          panic("invalid operands for TZCNTL")
 33155      }
 33156      return p
 33157  }
 33158  
 33159  // TZCNTQ performs "Count the Number of Trailing Zero Bits".
 33160  //
 33161  // Mnemonic        : TZCNT
 33162  // Supported forms : (2 forms)
 33163  //
 33164  //    * TZCNTQ r64, r64    [BMI]
 33165  //    * TZCNTQ m64, r64    [BMI]
 33166  //
 33167  func (self *Program) TZCNTQ(v0 interface{}, v1 interface{}) *Instruction {
 33168      p := self.alloc("TZCNTQ", 2, Operands { v0, v1 })
 33169      // TZCNTQ r64, r64
 33170      if isReg64(v0) && isReg64(v1) {
 33171          self.require(ISA_BMI)
 33172          p.domain = DomainGeneric
 33173          p.add(0, func(m *_Encoding, v []interface{}) {
 33174              m.emit(0xf3)
 33175              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 33176              m.emit(0x0f)
 33177              m.emit(0xbc)
 33178              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 33179          })
 33180      }
 33181      // TZCNTQ m64, r64
 33182      if isM64(v0) && isReg64(v1) {
 33183          self.require(ISA_BMI)
 33184          p.domain = DomainGeneric
 33185          p.add(0, func(m *_Encoding, v []interface{}) {
 33186              m.emit(0xf3)
 33187              m.rexm(1, hcode(v[1]), addr(v[0]))
 33188              m.emit(0x0f)
 33189              m.emit(0xbc)
 33190              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 33191          })
 33192      }
 33193      if p.len == 0 {
 33194          panic("invalid operands for TZCNTQ")
 33195      }
 33196      return p
 33197  }
 33198  
 33199  // TZCNTW performs "Count the Number of Trailing Zero Bits".
 33200  //
 33201  // Mnemonic        : TZCNT
 33202  // Supported forms : (2 forms)
 33203  //
 33204  //    * TZCNTW r16, r16    [BMI]
 33205  //    * TZCNTW m16, r16    [BMI]
 33206  //
 33207  func (self *Program) TZCNTW(v0 interface{}, v1 interface{}) *Instruction {
 33208      p := self.alloc("TZCNTW", 2, Operands { v0, v1 })
 33209      // TZCNTW r16, r16
 33210      if isReg16(v0) && isReg16(v1) {
 33211          self.require(ISA_BMI)
 33212          p.domain = DomainGeneric
 33213          p.add(0, func(m *_Encoding, v []interface{}) {
 33214              m.emit(0x66)
 33215              m.emit(0xf3)
 33216              m.rexo(hcode(v[1]), v[0], false)
 33217              m.emit(0x0f)
 33218              m.emit(0xbc)
 33219              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 33220          })
 33221      }
 33222      // TZCNTW m16, r16
 33223      if isM16(v0) && isReg16(v1) {
 33224          self.require(ISA_BMI)
 33225          p.domain = DomainGeneric
 33226          p.add(0, func(m *_Encoding, v []interface{}) {
 33227              m.emit(0x66)
 33228              m.emit(0xf3)
 33229              m.rexo(hcode(v[1]), addr(v[0]), false)
 33230              m.emit(0x0f)
 33231              m.emit(0xbc)
 33232              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 33233          })
 33234      }
 33235      if p.len == 0 {
 33236          panic("invalid operands for TZCNTW")
 33237      }
 33238      return p
 33239  }
 33240  
 33241  // TZMSK performs "Mask From Trailing Zeros".
 33242  //
 33243  // Mnemonic        : TZMSK
 33244  // Supported forms : (4 forms)
 33245  //
 33246  //    * TZMSK r32, r32    [TBM]
 33247  //    * TZMSK m32, r32    [TBM]
 33248  //    * TZMSK r64, r64    [TBM]
 33249  //    * TZMSK m64, r64    [TBM]
 33250  //
 33251  func (self *Program) TZMSK(v0 interface{}, v1 interface{}) *Instruction {
 33252      p := self.alloc("TZMSK", 2, Operands { v0, v1 })
 33253      // TZMSK r32, r32
 33254      if isReg32(v0) && isReg32(v1) {
 33255          self.require(ISA_TBM)
 33256          p.domain = DomainGeneric
 33257          p.add(0, func(m *_Encoding, v []interface{}) {
 33258              m.emit(0x8f)
 33259              m.emit(0xe9 ^ (hcode(v[0]) << 5))
 33260              m.emit(0x78 ^ (hlcode(v[1]) << 3))
 33261              m.emit(0x01)
 33262              m.emit(0xe0 | lcode(v[0]))
 33263          })
 33264      }
 33265      // TZMSK m32, r32
 33266      if isM32(v0) && isReg32(v1) {
 33267          self.require(ISA_TBM)
 33268          p.domain = DomainGeneric
 33269          p.add(0, func(m *_Encoding, v []interface{}) {
 33270              m.vex3(0x8f, 0b1001, 0x00, 0, addr(v[0]), hlcode(v[1]))
 33271              m.emit(0x01)
 33272              m.mrsd(4, addr(v[0]), 1)
 33273          })
 33274      }
 33275      // TZMSK r64, r64
 33276      if isReg64(v0) && isReg64(v1) {
 33277          self.require(ISA_TBM)
 33278          p.domain = DomainGeneric
 33279          p.add(0, func(m *_Encoding, v []interface{}) {
 33280              m.emit(0x8f)
 33281              m.emit(0xe9 ^ (hcode(v[0]) << 5))
 33282              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 33283              m.emit(0x01)
 33284              m.emit(0xe0 | lcode(v[0]))
 33285          })
 33286      }
 33287      // TZMSK m64, r64
 33288      if isM64(v0) && isReg64(v1) {
 33289          self.require(ISA_TBM)
 33290          p.domain = DomainGeneric
 33291          p.add(0, func(m *_Encoding, v []interface{}) {
 33292              m.vex3(0x8f, 0b1001, 0x80, 0, addr(v[0]), hlcode(v[1]))
 33293              m.emit(0x01)
 33294              m.mrsd(4, addr(v[0]), 1)
 33295          })
 33296      }
 33297      if p.len == 0 {
 33298          panic("invalid operands for TZMSK")
 33299      }
 33300      return p
 33301  }
 33302  
 33303  // UCOMISD performs "Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS".
 33304  //
 33305  // Mnemonic        : UCOMISD
 33306  // Supported forms : (2 forms)
 33307  //
 33308  //    * UCOMISD xmm, xmm    [SSE2]
 33309  //    * UCOMISD m64, xmm    [SSE2]
 33310  //
 33311  func (self *Program) UCOMISD(v0 interface{}, v1 interface{}) *Instruction {
 33312      p := self.alloc("UCOMISD", 2, Operands { v0, v1 })
 33313      // UCOMISD xmm, xmm
 33314      if isXMM(v0) && isXMM(v1) {
 33315          self.require(ISA_SSE2)
 33316          p.domain = DomainMMXSSE
 33317          p.add(0, func(m *_Encoding, v []interface{}) {
 33318              m.emit(0x66)
 33319              m.rexo(hcode(v[1]), v[0], false)
 33320              m.emit(0x0f)
 33321              m.emit(0x2e)
 33322              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 33323          })
 33324      }
 33325      // UCOMISD m64, xmm
 33326      if isM64(v0) && isXMM(v1) {
 33327          self.require(ISA_SSE2)
 33328          p.domain = DomainMMXSSE
 33329          p.add(0, func(m *_Encoding, v []interface{}) {
 33330              m.emit(0x66)
 33331              m.rexo(hcode(v[1]), addr(v[0]), false)
 33332              m.emit(0x0f)
 33333              m.emit(0x2e)
 33334              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 33335          })
 33336      }
 33337      if p.len == 0 {
 33338          panic("invalid operands for UCOMISD")
 33339      }
 33340      return p
 33341  }
 33342  
 33343  // UCOMISS performs "Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS".
 33344  //
 33345  // Mnemonic        : UCOMISS
 33346  // Supported forms : (2 forms)
 33347  //
 33348  //    * UCOMISS xmm, xmm    [SSE]
 33349  //    * UCOMISS m32, xmm    [SSE]
 33350  //
 33351  func (self *Program) UCOMISS(v0 interface{}, v1 interface{}) *Instruction {
 33352      p := self.alloc("UCOMISS", 2, Operands { v0, v1 })
 33353      // UCOMISS xmm, xmm
 33354      if isXMM(v0) && isXMM(v1) {
 33355          self.require(ISA_SSE)
 33356          p.domain = DomainMMXSSE
 33357          p.add(0, func(m *_Encoding, v []interface{}) {
 33358              m.rexo(hcode(v[1]), v[0], false)
 33359              m.emit(0x0f)
 33360              m.emit(0x2e)
 33361              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 33362          })
 33363      }
 33364      // UCOMISS m32, xmm
 33365      if isM32(v0) && isXMM(v1) {
 33366          self.require(ISA_SSE)
 33367          p.domain = DomainMMXSSE
 33368          p.add(0, func(m *_Encoding, v []interface{}) {
 33369              m.rexo(hcode(v[1]), addr(v[0]), false)
 33370              m.emit(0x0f)
 33371              m.emit(0x2e)
 33372              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 33373          })
 33374      }
 33375      if p.len == 0 {
 33376          panic("invalid operands for UCOMISS")
 33377      }
 33378      return p
 33379  }
 33380  
 33381  // UD2 performs "Undefined Instruction".
 33382  //
 33383  // Mnemonic        : UD2
 33384  // Supported forms : (1 form)
 33385  //
 33386  //    * UD2
 33387  //
 33388  func (self *Program) UD2() *Instruction {
 33389      p := self.alloc("UD2", 0, Operands {  })
 33390      // UD2
 33391      p.domain = DomainGeneric
 33392      p.add(0, func(m *_Encoding, v []interface{}) {
 33393          m.emit(0x0f)
 33394          m.emit(0x0b)
 33395      })
 33396      return p
 33397  }
 33398  
 33399  // UNPCKHPD performs "Unpack and Interleave High Packed Double-Precision Floating-Point Values".
 33400  //
 33401  // Mnemonic        : UNPCKHPD
 33402  // Supported forms : (2 forms)
 33403  //
 33404  //    * UNPCKHPD xmm, xmm     [SSE2]
 33405  //    * UNPCKHPD m128, xmm    [SSE2]
 33406  //
 33407  func (self *Program) UNPCKHPD(v0 interface{}, v1 interface{}) *Instruction {
 33408      p := self.alloc("UNPCKHPD", 2, Operands { v0, v1 })
 33409      // UNPCKHPD xmm, xmm
 33410      if isXMM(v0) && isXMM(v1) {
 33411          self.require(ISA_SSE2)
 33412          p.domain = DomainMMXSSE
 33413          p.add(0, func(m *_Encoding, v []interface{}) {
 33414              m.emit(0x66)
 33415              m.rexo(hcode(v[1]), v[0], false)
 33416              m.emit(0x0f)
 33417              m.emit(0x15)
 33418              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 33419          })
 33420      }
 33421      // UNPCKHPD m128, xmm
 33422      if isM128(v0) && isXMM(v1) {
 33423          self.require(ISA_SSE2)
 33424          p.domain = DomainMMXSSE
 33425          p.add(0, func(m *_Encoding, v []interface{}) {
 33426              m.emit(0x66)
 33427              m.rexo(hcode(v[1]), addr(v[0]), false)
 33428              m.emit(0x0f)
 33429              m.emit(0x15)
 33430              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 33431          })
 33432      }
 33433      if p.len == 0 {
 33434          panic("invalid operands for UNPCKHPD")
 33435      }
 33436      return p
 33437  }
 33438  
 33439  // UNPCKHPS performs "Unpack and Interleave High Packed Single-Precision Floating-Point Values".
 33440  //
 33441  // Mnemonic        : UNPCKHPS
 33442  // Supported forms : (2 forms)
 33443  //
 33444  //    * UNPCKHPS xmm, xmm     [SSE]
 33445  //    * UNPCKHPS m128, xmm    [SSE]
 33446  //
 33447  func (self *Program) UNPCKHPS(v0 interface{}, v1 interface{}) *Instruction {
 33448      p := self.alloc("UNPCKHPS", 2, Operands { v0, v1 })
 33449      // UNPCKHPS xmm, xmm
 33450      if isXMM(v0) && isXMM(v1) {
 33451          self.require(ISA_SSE)
 33452          p.domain = DomainMMXSSE
 33453          p.add(0, func(m *_Encoding, v []interface{}) {
 33454              m.rexo(hcode(v[1]), v[0], false)
 33455              m.emit(0x0f)
 33456              m.emit(0x15)
 33457              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 33458          })
 33459      }
 33460      // UNPCKHPS m128, xmm
 33461      if isM128(v0) && isXMM(v1) {
 33462          self.require(ISA_SSE)
 33463          p.domain = DomainMMXSSE
 33464          p.add(0, func(m *_Encoding, v []interface{}) {
 33465              m.rexo(hcode(v[1]), addr(v[0]), false)
 33466              m.emit(0x0f)
 33467              m.emit(0x15)
 33468              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 33469          })
 33470      }
 33471      if p.len == 0 {
 33472          panic("invalid operands for UNPCKHPS")
 33473      }
 33474      return p
 33475  }
 33476  
 33477  // UNPCKLPD performs "Unpack and Interleave Low Packed Double-Precision Floating-Point Values".
 33478  //
 33479  // Mnemonic        : UNPCKLPD
 33480  // Supported forms : (2 forms)
 33481  //
 33482  //    * UNPCKLPD xmm, xmm     [SSE2]
 33483  //    * UNPCKLPD m128, xmm    [SSE2]
 33484  //
 33485  func (self *Program) UNPCKLPD(v0 interface{}, v1 interface{}) *Instruction {
 33486      p := self.alloc("UNPCKLPD", 2, Operands { v0, v1 })
 33487      // UNPCKLPD xmm, xmm
 33488      if isXMM(v0) && isXMM(v1) {
 33489          self.require(ISA_SSE2)
 33490          p.domain = DomainMMXSSE
 33491          p.add(0, func(m *_Encoding, v []interface{}) {
 33492              m.emit(0x66)
 33493              m.rexo(hcode(v[1]), v[0], false)
 33494              m.emit(0x0f)
 33495              m.emit(0x14)
 33496              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 33497          })
 33498      }
 33499      // UNPCKLPD m128, xmm
 33500      if isM128(v0) && isXMM(v1) {
 33501          self.require(ISA_SSE2)
 33502          p.domain = DomainMMXSSE
 33503          p.add(0, func(m *_Encoding, v []interface{}) {
 33504              m.emit(0x66)
 33505              m.rexo(hcode(v[1]), addr(v[0]), false)
 33506              m.emit(0x0f)
 33507              m.emit(0x14)
 33508              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 33509          })
 33510      }
 33511      if p.len == 0 {
 33512          panic("invalid operands for UNPCKLPD")
 33513      }
 33514      return p
 33515  }
 33516  
 33517  // UNPCKLPS performs "Unpack and Interleave Low Packed Single-Precision Floating-Point Values".
 33518  //
 33519  // Mnemonic        : UNPCKLPS
 33520  // Supported forms : (2 forms)
 33521  //
 33522  //    * UNPCKLPS xmm, xmm     [SSE]
 33523  //    * UNPCKLPS m128, xmm    [SSE]
 33524  //
 33525  func (self *Program) UNPCKLPS(v0 interface{}, v1 interface{}) *Instruction {
 33526      p := self.alloc("UNPCKLPS", 2, Operands { v0, v1 })
 33527      // UNPCKLPS xmm, xmm
 33528      if isXMM(v0) && isXMM(v1) {
 33529          self.require(ISA_SSE)
 33530          p.domain = DomainMMXSSE
 33531          p.add(0, func(m *_Encoding, v []interface{}) {
 33532              m.rexo(hcode(v[1]), v[0], false)
 33533              m.emit(0x0f)
 33534              m.emit(0x14)
 33535              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 33536          })
 33537      }
 33538      // UNPCKLPS m128, xmm
 33539      if isM128(v0) && isXMM(v1) {
 33540          self.require(ISA_SSE)
 33541          p.domain = DomainMMXSSE
 33542          p.add(0, func(m *_Encoding, v []interface{}) {
 33543              m.rexo(hcode(v[1]), addr(v[0]), false)
 33544              m.emit(0x0f)
 33545              m.emit(0x14)
 33546              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 33547          })
 33548      }
 33549      if p.len == 0 {
 33550          panic("invalid operands for UNPCKLPS")
 33551      }
 33552      return p
 33553  }
 33554  
 33555  // VADDPD performs "Add Packed Double-Precision Floating-Point Values".
 33556  //
 33557  // Mnemonic        : VADDPD
 33558  // Supported forms : (11 forms)
 33559  //
 33560  //    * VADDPD xmm, xmm, xmm                   [AVX]
 33561  //    * VADDPD m128, xmm, xmm                  [AVX]
 33562  //    * VADDPD ymm, ymm, ymm                   [AVX]
 33563  //    * VADDPD m256, ymm, ymm                  [AVX]
 33564  //    * VADDPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 33565  //    * VADDPD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 33566  //    * VADDPD zmm, zmm, zmm{k}{z}             [AVX512F]
 33567  //    * VADDPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 33568  //    * VADDPD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 33569  //    * VADDPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 33570  //    * VADDPD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 33571  //
 33572  func (self *Program) VADDPD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 33573      var p *Instruction
 33574      switch len(vv) {
 33575          case 0  : p = self.alloc("VADDPD", 3, Operands { v0, v1, v2 })
 33576          case 1  : p = self.alloc("VADDPD", 4, Operands { v0, v1, v2, vv[0] })
 33577          default : panic("instruction VADDPD takes 3 or 4 operands")
 33578      }
 33579      // VADDPD xmm, xmm, xmm
 33580      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 33581          self.require(ISA_AVX)
 33582          p.domain = DomainAVX
 33583          p.add(0, func(m *_Encoding, v []interface{}) {
 33584              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 33585              m.emit(0x58)
 33586              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33587          })
 33588      }
 33589      // VADDPD m128, xmm, xmm
 33590      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 33591          self.require(ISA_AVX)
 33592          p.domain = DomainAVX
 33593          p.add(0, func(m *_Encoding, v []interface{}) {
 33594              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 33595              m.emit(0x58)
 33596              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 33597          })
 33598      }
 33599      // VADDPD ymm, ymm, ymm
 33600      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 33601          self.require(ISA_AVX)
 33602          p.domain = DomainAVX
 33603          p.add(0, func(m *_Encoding, v []interface{}) {
 33604              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 33605              m.emit(0x58)
 33606              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33607          })
 33608      }
 33609      // VADDPD m256, ymm, ymm
 33610      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 33611          self.require(ISA_AVX)
 33612          p.domain = DomainAVX
 33613          p.add(0, func(m *_Encoding, v []interface{}) {
 33614              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 33615              m.emit(0x58)
 33616              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 33617          })
 33618      }
 33619      // VADDPD m512/m64bcst, zmm, zmm{k}{z}
 33620      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 33621          self.require(ISA_AVX512F)
 33622          p.domain = DomainAVX
 33623          p.add(0, func(m *_Encoding, v []interface{}) {
 33624              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 33625              m.emit(0x58)
 33626              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 33627          })
 33628      }
 33629      // VADDPD {er}, zmm, zmm, zmm{k}{z}
 33630      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 33631          self.require(ISA_AVX512F)
 33632          p.domain = DomainAVX
 33633          p.add(0, func(m *_Encoding, v []interface{}) {
 33634              m.emit(0x62)
 33635              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 33636              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 33637              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 33638              m.emit(0x58)
 33639              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 33640          })
 33641      }
 33642      // VADDPD zmm, zmm, zmm{k}{z}
 33643      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 33644          self.require(ISA_AVX512F)
 33645          p.domain = DomainAVX
 33646          p.add(0, func(m *_Encoding, v []interface{}) {
 33647              m.emit(0x62)
 33648              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 33649              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 33650              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 33651              m.emit(0x58)
 33652              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33653          })
 33654      }
 33655      // VADDPD m128/m64bcst, xmm, xmm{k}{z}
 33656      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 33657          self.require(ISA_AVX512VL | ISA_AVX512F)
 33658          p.domain = DomainAVX
 33659          p.add(0, func(m *_Encoding, v []interface{}) {
 33660              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 33661              m.emit(0x58)
 33662              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 33663          })
 33664      }
 33665      // VADDPD xmm, xmm, xmm{k}{z}
 33666      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 33667          self.require(ISA_AVX512VL | ISA_AVX512F)
 33668          p.domain = DomainAVX
 33669          p.add(0, func(m *_Encoding, v []interface{}) {
 33670              m.emit(0x62)
 33671              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 33672              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 33673              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 33674              m.emit(0x58)
 33675              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33676          })
 33677      }
 33678      // VADDPD m256/m64bcst, ymm, ymm{k}{z}
 33679      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 33680          self.require(ISA_AVX512VL | ISA_AVX512F)
 33681          p.domain = DomainAVX
 33682          p.add(0, func(m *_Encoding, v []interface{}) {
 33683              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 33684              m.emit(0x58)
 33685              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 33686          })
 33687      }
 33688      // VADDPD ymm, ymm, ymm{k}{z}
 33689      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 33690          self.require(ISA_AVX512VL | ISA_AVX512F)
 33691          p.domain = DomainAVX
 33692          p.add(0, func(m *_Encoding, v []interface{}) {
 33693              m.emit(0x62)
 33694              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 33695              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 33696              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 33697              m.emit(0x58)
 33698              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33699          })
 33700      }
 33701      if p.len == 0 {
 33702          panic("invalid operands for VADDPD")
 33703      }
 33704      return p
 33705  }
 33706  
 33707  // VADDPS performs "Add Packed Single-Precision Floating-Point Values".
 33708  //
 33709  // Mnemonic        : VADDPS
 33710  // Supported forms : (11 forms)
 33711  //
 33712  //    * VADDPS xmm, xmm, xmm                   [AVX]
 33713  //    * VADDPS m128, xmm, xmm                  [AVX]
 33714  //    * VADDPS ymm, ymm, ymm                   [AVX]
 33715  //    * VADDPS m256, ymm, ymm                  [AVX]
 33716  //    * VADDPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 33717  //    * VADDPS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 33718  //    * VADDPS zmm, zmm, zmm{k}{z}             [AVX512F]
 33719  //    * VADDPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 33720  //    * VADDPS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 33721  //    * VADDPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 33722  //    * VADDPS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 33723  //
 33724  func (self *Program) VADDPS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 33725      var p *Instruction
 33726      switch len(vv) {
 33727          case 0  : p = self.alloc("VADDPS", 3, Operands { v0, v1, v2 })
 33728          case 1  : p = self.alloc("VADDPS", 4, Operands { v0, v1, v2, vv[0] })
 33729          default : panic("instruction VADDPS takes 3 or 4 operands")
 33730      }
 33731      // VADDPS xmm, xmm, xmm
 33732      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 33733          self.require(ISA_AVX)
 33734          p.domain = DomainAVX
 33735          p.add(0, func(m *_Encoding, v []interface{}) {
 33736              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 33737              m.emit(0x58)
 33738              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33739          })
 33740      }
 33741      // VADDPS m128, xmm, xmm
 33742      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 33743          self.require(ISA_AVX)
 33744          p.domain = DomainAVX
 33745          p.add(0, func(m *_Encoding, v []interface{}) {
 33746              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 33747              m.emit(0x58)
 33748              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 33749          })
 33750      }
 33751      // VADDPS ymm, ymm, ymm
 33752      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 33753          self.require(ISA_AVX)
 33754          p.domain = DomainAVX
 33755          p.add(0, func(m *_Encoding, v []interface{}) {
 33756              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 33757              m.emit(0x58)
 33758              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33759          })
 33760      }
 33761      // VADDPS m256, ymm, ymm
 33762      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 33763          self.require(ISA_AVX)
 33764          p.domain = DomainAVX
 33765          p.add(0, func(m *_Encoding, v []interface{}) {
 33766              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 33767              m.emit(0x58)
 33768              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 33769          })
 33770      }
 33771      // VADDPS m512/m32bcst, zmm, zmm{k}{z}
 33772      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 33773          self.require(ISA_AVX512F)
 33774          p.domain = DomainAVX
 33775          p.add(0, func(m *_Encoding, v []interface{}) {
 33776              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 33777              m.emit(0x58)
 33778              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 33779          })
 33780      }
 33781      // VADDPS {er}, zmm, zmm, zmm{k}{z}
 33782      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 33783          self.require(ISA_AVX512F)
 33784          p.domain = DomainAVX
 33785          p.add(0, func(m *_Encoding, v []interface{}) {
 33786              m.emit(0x62)
 33787              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 33788              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 33789              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 33790              m.emit(0x58)
 33791              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 33792          })
 33793      }
 33794      // VADDPS zmm, zmm, zmm{k}{z}
 33795      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 33796          self.require(ISA_AVX512F)
 33797          p.domain = DomainAVX
 33798          p.add(0, func(m *_Encoding, v []interface{}) {
 33799              m.emit(0x62)
 33800              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 33801              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 33802              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 33803              m.emit(0x58)
 33804              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33805          })
 33806      }
 33807      // VADDPS m128/m32bcst, xmm, xmm{k}{z}
 33808      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 33809          self.require(ISA_AVX512VL | ISA_AVX512F)
 33810          p.domain = DomainAVX
 33811          p.add(0, func(m *_Encoding, v []interface{}) {
 33812              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 33813              m.emit(0x58)
 33814              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 33815          })
 33816      }
 33817      // VADDPS xmm, xmm, xmm{k}{z}
 33818      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 33819          self.require(ISA_AVX512VL | ISA_AVX512F)
 33820          p.domain = DomainAVX
 33821          p.add(0, func(m *_Encoding, v []interface{}) {
 33822              m.emit(0x62)
 33823              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 33824              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 33825              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 33826              m.emit(0x58)
 33827              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33828          })
 33829      }
 33830      // VADDPS m256/m32bcst, ymm, ymm{k}{z}
 33831      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 33832          self.require(ISA_AVX512VL | ISA_AVX512F)
 33833          p.domain = DomainAVX
 33834          p.add(0, func(m *_Encoding, v []interface{}) {
 33835              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 33836              m.emit(0x58)
 33837              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 33838          })
 33839      }
 33840      // VADDPS ymm, ymm, ymm{k}{z}
 33841      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 33842          self.require(ISA_AVX512VL | ISA_AVX512F)
 33843          p.domain = DomainAVX
 33844          p.add(0, func(m *_Encoding, v []interface{}) {
 33845              m.emit(0x62)
 33846              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 33847              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 33848              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 33849              m.emit(0x58)
 33850              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33851          })
 33852      }
 33853      if p.len == 0 {
 33854          panic("invalid operands for VADDPS")
 33855      }
 33856      return p
 33857  }
 33858  
 33859  // VADDSD performs "Add Scalar Double-Precision Floating-Point Values".
 33860  //
 33861  // Mnemonic        : VADDSD
 33862  // Supported forms : (5 forms)
 33863  //
 33864  //    * VADDSD xmm, xmm, xmm                [AVX]
 33865  //    * VADDSD m64, xmm, xmm                [AVX]
 33866  //    * VADDSD m64, xmm, xmm{k}{z}          [AVX512F]
 33867  //    * VADDSD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 33868  //    * VADDSD xmm, xmm, xmm{k}{z}          [AVX512F]
 33869  //
 33870  func (self *Program) VADDSD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 33871      var p *Instruction
 33872      switch len(vv) {
 33873          case 0  : p = self.alloc("VADDSD", 3, Operands { v0, v1, v2 })
 33874          case 1  : p = self.alloc("VADDSD", 4, Operands { v0, v1, v2, vv[0] })
 33875          default : panic("instruction VADDSD takes 3 or 4 operands")
 33876      }
 33877      // VADDSD xmm, xmm, xmm
 33878      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 33879          self.require(ISA_AVX)
 33880          p.domain = DomainAVX
 33881          p.add(0, func(m *_Encoding, v []interface{}) {
 33882              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 33883              m.emit(0x58)
 33884              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33885          })
 33886      }
 33887      // VADDSD m64, xmm, xmm
 33888      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 33889          self.require(ISA_AVX)
 33890          p.domain = DomainAVX
 33891          p.add(0, func(m *_Encoding, v []interface{}) {
 33892              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 33893              m.emit(0x58)
 33894              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 33895          })
 33896      }
 33897      // VADDSD m64, xmm, xmm{k}{z}
 33898      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 33899          self.require(ISA_AVX512F)
 33900          p.domain = DomainAVX
 33901          p.add(0, func(m *_Encoding, v []interface{}) {
 33902              m.evex(0b01, 0x87, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 33903              m.emit(0x58)
 33904              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 33905          })
 33906      }
 33907      // VADDSD {er}, xmm, xmm, xmm{k}{z}
 33908      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 33909          self.require(ISA_AVX512F)
 33910          p.domain = DomainAVX
 33911          p.add(0, func(m *_Encoding, v []interface{}) {
 33912              m.emit(0x62)
 33913              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 33914              m.emit(0xff ^ (hlcode(v[2]) << 3))
 33915              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 33916              m.emit(0x58)
 33917              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 33918          })
 33919      }
 33920      // VADDSD xmm, xmm, xmm{k}{z}
 33921      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 33922          self.require(ISA_AVX512F)
 33923          p.domain = DomainAVX
 33924          p.add(0, func(m *_Encoding, v []interface{}) {
 33925              m.emit(0x62)
 33926              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 33927              m.emit(0xff ^ (hlcode(v[1]) << 3))
 33928              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 33929              m.emit(0x58)
 33930              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33931          })
 33932      }
 33933      if p.len == 0 {
 33934          panic("invalid operands for VADDSD")
 33935      }
 33936      return p
 33937  }
 33938  
 33939  // VADDSS performs "Add Scalar Single-Precision Floating-Point Values".
 33940  //
 33941  // Mnemonic        : VADDSS
 33942  // Supported forms : (5 forms)
 33943  //
 33944  //    * VADDSS xmm, xmm, xmm                [AVX]
 33945  //    * VADDSS m32, xmm, xmm                [AVX]
 33946  //    * VADDSS m32, xmm, xmm{k}{z}          [AVX512F]
 33947  //    * VADDSS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 33948  //    * VADDSS xmm, xmm, xmm{k}{z}          [AVX512F]
 33949  //
 33950  func (self *Program) VADDSS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 33951      var p *Instruction
 33952      switch len(vv) {
 33953          case 0  : p = self.alloc("VADDSS", 3, Operands { v0, v1, v2 })
 33954          case 1  : p = self.alloc("VADDSS", 4, Operands { v0, v1, v2, vv[0] })
 33955          default : panic("instruction VADDSS takes 3 or 4 operands")
 33956      }
 33957      // VADDSS xmm, xmm, xmm
 33958      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 33959          self.require(ISA_AVX)
 33960          p.domain = DomainAVX
 33961          p.add(0, func(m *_Encoding, v []interface{}) {
 33962              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 33963              m.emit(0x58)
 33964              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33965          })
 33966      }
 33967      // VADDSS m32, xmm, xmm
 33968      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 33969          self.require(ISA_AVX)
 33970          p.domain = DomainAVX
 33971          p.add(0, func(m *_Encoding, v []interface{}) {
 33972              m.vex2(2, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 33973              m.emit(0x58)
 33974              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 33975          })
 33976      }
 33977      // VADDSS m32, xmm, xmm{k}{z}
 33978      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 33979          self.require(ISA_AVX512F)
 33980          p.domain = DomainAVX
 33981          p.add(0, func(m *_Encoding, v []interface{}) {
 33982              m.evex(0b01, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 33983              m.emit(0x58)
 33984              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 33985          })
 33986      }
 33987      // VADDSS {er}, xmm, xmm, xmm{k}{z}
 33988      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 33989          self.require(ISA_AVX512F)
 33990          p.domain = DomainAVX
 33991          p.add(0, func(m *_Encoding, v []interface{}) {
 33992              m.emit(0x62)
 33993              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 33994              m.emit(0x7e ^ (hlcode(v[2]) << 3))
 33995              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 33996              m.emit(0x58)
 33997              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 33998          })
 33999      }
 34000      // VADDSS xmm, xmm, xmm{k}{z}
 34001      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 34002          self.require(ISA_AVX512F)
 34003          p.domain = DomainAVX
 34004          p.add(0, func(m *_Encoding, v []interface{}) {
 34005              m.emit(0x62)
 34006              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 34007              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 34008              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 34009              m.emit(0x58)
 34010              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34011          })
 34012      }
 34013      if p.len == 0 {
 34014          panic("invalid operands for VADDSS")
 34015      }
 34016      return p
 34017  }
 34018  
 34019  // VADDSUBPD performs "Packed Double-FP Add/Subtract".
 34020  //
 34021  // Mnemonic        : VADDSUBPD
 34022  // Supported forms : (4 forms)
 34023  //
 34024  //    * VADDSUBPD xmm, xmm, xmm     [AVX]
 34025  //    * VADDSUBPD m128, xmm, xmm    [AVX]
 34026  //    * VADDSUBPD ymm, ymm, ymm     [AVX]
 34027  //    * VADDSUBPD m256, ymm, ymm    [AVX]
 34028  //
 34029  func (self *Program) VADDSUBPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 34030      p := self.alloc("VADDSUBPD", 3, Operands { v0, v1, v2 })
 34031      // VADDSUBPD xmm, xmm, xmm
 34032      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 34033          self.require(ISA_AVX)
 34034          p.domain = DomainAVX
 34035          p.add(0, func(m *_Encoding, v []interface{}) {
 34036              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 34037              m.emit(0xd0)
 34038              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34039          })
 34040      }
 34041      // VADDSUBPD m128, xmm, xmm
 34042      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 34043          self.require(ISA_AVX)
 34044          p.domain = DomainAVX
 34045          p.add(0, func(m *_Encoding, v []interface{}) {
 34046              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34047              m.emit(0xd0)
 34048              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34049          })
 34050      }
 34051      // VADDSUBPD ymm, ymm, ymm
 34052      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 34053          self.require(ISA_AVX)
 34054          p.domain = DomainAVX
 34055          p.add(0, func(m *_Encoding, v []interface{}) {
 34056              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 34057              m.emit(0xd0)
 34058              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34059          })
 34060      }
 34061      // VADDSUBPD m256, ymm, ymm
 34062      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 34063          self.require(ISA_AVX)
 34064          p.domain = DomainAVX
 34065          p.add(0, func(m *_Encoding, v []interface{}) {
 34066              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34067              m.emit(0xd0)
 34068              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34069          })
 34070      }
 34071      if p.len == 0 {
 34072          panic("invalid operands for VADDSUBPD")
 34073      }
 34074      return p
 34075  }
 34076  
 34077  // VADDSUBPS performs "Packed Single-FP Add/Subtract".
 34078  //
 34079  // Mnemonic        : VADDSUBPS
 34080  // Supported forms : (4 forms)
 34081  //
 34082  //    * VADDSUBPS xmm, xmm, xmm     [AVX]
 34083  //    * VADDSUBPS m128, xmm, xmm    [AVX]
 34084  //    * VADDSUBPS ymm, ymm, ymm     [AVX]
 34085  //    * VADDSUBPS m256, ymm, ymm    [AVX]
 34086  //
 34087  func (self *Program) VADDSUBPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 34088      p := self.alloc("VADDSUBPS", 3, Operands { v0, v1, v2 })
 34089      // VADDSUBPS xmm, xmm, xmm
 34090      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 34091          self.require(ISA_AVX)
 34092          p.domain = DomainAVX
 34093          p.add(0, func(m *_Encoding, v []interface{}) {
 34094              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 34095              m.emit(0xd0)
 34096              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34097          })
 34098      }
 34099      // VADDSUBPS m128, xmm, xmm
 34100      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 34101          self.require(ISA_AVX)
 34102          p.domain = DomainAVX
 34103          p.add(0, func(m *_Encoding, v []interface{}) {
 34104              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34105              m.emit(0xd0)
 34106              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34107          })
 34108      }
 34109      // VADDSUBPS ymm, ymm, ymm
 34110      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 34111          self.require(ISA_AVX)
 34112          p.domain = DomainAVX
 34113          p.add(0, func(m *_Encoding, v []interface{}) {
 34114              m.vex2(7, hcode(v[2]), v[0], hlcode(v[1]))
 34115              m.emit(0xd0)
 34116              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34117          })
 34118      }
 34119      // VADDSUBPS m256, ymm, ymm
 34120      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 34121          self.require(ISA_AVX)
 34122          p.domain = DomainAVX
 34123          p.add(0, func(m *_Encoding, v []interface{}) {
 34124              m.vex2(7, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34125              m.emit(0xd0)
 34126              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34127          })
 34128      }
 34129      if p.len == 0 {
 34130          panic("invalid operands for VADDSUBPS")
 34131      }
 34132      return p
 34133  }
 34134  
 34135  // VAESDEC performs "Perform One Round of an AES Decryption Flow".
 34136  //
 34137  // Mnemonic        : VAESDEC
 34138  // Supported forms : (2 forms)
 34139  //
 34140  //    * VAESDEC xmm, xmm, xmm     [AES,AVX]
 34141  //    * VAESDEC m128, xmm, xmm    [AES,AVX]
 34142  //
 34143  func (self *Program) VAESDEC(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 34144      p := self.alloc("VAESDEC", 3, Operands { v0, v1, v2 })
 34145      // VAESDEC xmm, xmm, xmm
 34146      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 34147          self.require(ISA_AVX | ISA_AES)
 34148          p.domain = DomainCrypto
 34149          p.add(0, func(m *_Encoding, v []interface{}) {
 34150              m.emit(0xc4)
 34151              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 34152              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 34153              m.emit(0xde)
 34154              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34155          })
 34156      }
 34157      // VAESDEC m128, xmm, xmm
 34158      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 34159          self.require(ISA_AVX | ISA_AES)
 34160          p.domain = DomainCrypto
 34161          p.add(0, func(m *_Encoding, v []interface{}) {
 34162              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34163              m.emit(0xde)
 34164              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34165          })
 34166      }
 34167      if p.len == 0 {
 34168          panic("invalid operands for VAESDEC")
 34169      }
 34170      return p
 34171  }
 34172  
 34173  // VAESDECLAST performs "Perform Last Round of an AES Decryption Flow".
 34174  //
 34175  // Mnemonic        : VAESDECLAST
 34176  // Supported forms : (2 forms)
 34177  //
 34178  //    * VAESDECLAST xmm, xmm, xmm     [AES,AVX]
 34179  //    * VAESDECLAST m128, xmm, xmm    [AES,AVX]
 34180  //
 34181  func (self *Program) VAESDECLAST(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 34182      p := self.alloc("VAESDECLAST", 3, Operands { v0, v1, v2 })
 34183      // VAESDECLAST xmm, xmm, xmm
 34184      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 34185          self.require(ISA_AVX | ISA_AES)
 34186          p.domain = DomainCrypto
 34187          p.add(0, func(m *_Encoding, v []interface{}) {
 34188              m.emit(0xc4)
 34189              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 34190              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 34191              m.emit(0xdf)
 34192              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34193          })
 34194      }
 34195      // VAESDECLAST m128, xmm, xmm
 34196      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 34197          self.require(ISA_AVX | ISA_AES)
 34198          p.domain = DomainCrypto
 34199          p.add(0, func(m *_Encoding, v []interface{}) {
 34200              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34201              m.emit(0xdf)
 34202              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34203          })
 34204      }
 34205      if p.len == 0 {
 34206          panic("invalid operands for VAESDECLAST")
 34207      }
 34208      return p
 34209  }
 34210  
 34211  // VAESENC performs "Perform One Round of an AES Encryption Flow".
 34212  //
 34213  // Mnemonic        : VAESENC
 34214  // Supported forms : (2 forms)
 34215  //
 34216  //    * VAESENC xmm, xmm, xmm     [AES,AVX]
 34217  //    * VAESENC m128, xmm, xmm    [AES,AVX]
 34218  //
 34219  func (self *Program) VAESENC(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 34220      p := self.alloc("VAESENC", 3, Operands { v0, v1, v2 })
 34221      // VAESENC xmm, xmm, xmm
 34222      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 34223          self.require(ISA_AVX | ISA_AES)
 34224          p.domain = DomainCrypto
 34225          p.add(0, func(m *_Encoding, v []interface{}) {
 34226              m.emit(0xc4)
 34227              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 34228              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 34229              m.emit(0xdc)
 34230              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34231          })
 34232      }
 34233      // VAESENC m128, xmm, xmm
 34234      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 34235          self.require(ISA_AVX | ISA_AES)
 34236          p.domain = DomainCrypto
 34237          p.add(0, func(m *_Encoding, v []interface{}) {
 34238              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34239              m.emit(0xdc)
 34240              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34241          })
 34242      }
 34243      if p.len == 0 {
 34244          panic("invalid operands for VAESENC")
 34245      }
 34246      return p
 34247  }
 34248  
 34249  // VAESENCLAST performs "Perform Last Round of an AES Encryption Flow".
 34250  //
 34251  // Mnemonic        : VAESENCLAST
 34252  // Supported forms : (2 forms)
 34253  //
 34254  //    * VAESENCLAST xmm, xmm, xmm     [AES,AVX]
 34255  //    * VAESENCLAST m128, xmm, xmm    [AES,AVX]
 34256  //
 34257  func (self *Program) VAESENCLAST(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 34258      p := self.alloc("VAESENCLAST", 3, Operands { v0, v1, v2 })
 34259      // VAESENCLAST xmm, xmm, xmm
 34260      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 34261          self.require(ISA_AVX | ISA_AES)
 34262          p.domain = DomainCrypto
 34263          p.add(0, func(m *_Encoding, v []interface{}) {
 34264              m.emit(0xc4)
 34265              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 34266              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 34267              m.emit(0xdd)
 34268              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34269          })
 34270      }
 34271      // VAESENCLAST m128, xmm, xmm
 34272      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 34273          self.require(ISA_AVX | ISA_AES)
 34274          p.domain = DomainCrypto
 34275          p.add(0, func(m *_Encoding, v []interface{}) {
 34276              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34277              m.emit(0xdd)
 34278              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34279          })
 34280      }
 34281      if p.len == 0 {
 34282          panic("invalid operands for VAESENCLAST")
 34283      }
 34284      return p
 34285  }
 34286  
 34287  // VAESIMC performs "Perform the AES InvMixColumn Transformation".
 34288  //
 34289  // Mnemonic        : VAESIMC
 34290  // Supported forms : (2 forms)
 34291  //
 34292  //    * VAESIMC xmm, xmm     [AES,AVX]
 34293  //    * VAESIMC m128, xmm    [AES,AVX]
 34294  //
 34295  func (self *Program) VAESIMC(v0 interface{}, v1 interface{}) *Instruction {
 34296      p := self.alloc("VAESIMC", 2, Operands { v0, v1 })
 34297      // VAESIMC xmm, xmm
 34298      if isXMM(v0) && isXMM(v1) {
 34299          self.require(ISA_AVX | ISA_AES)
 34300          p.domain = DomainCrypto
 34301          p.add(0, func(m *_Encoding, v []interface{}) {
 34302              m.emit(0xc4)
 34303              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 34304              m.emit(0x79)
 34305              m.emit(0xdb)
 34306              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 34307          })
 34308      }
 34309      // VAESIMC m128, xmm
 34310      if isM128(v0) && isXMM(v1) {
 34311          self.require(ISA_AVX | ISA_AES)
 34312          p.domain = DomainCrypto
 34313          p.add(0, func(m *_Encoding, v []interface{}) {
 34314              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 34315              m.emit(0xdb)
 34316              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 34317          })
 34318      }
 34319      if p.len == 0 {
 34320          panic("invalid operands for VAESIMC")
 34321      }
 34322      return p
 34323  }
 34324  
 34325  // VAESKEYGENASSIST performs "AES Round Key Generation Assist".
 34326  //
 34327  // Mnemonic        : VAESKEYGENASSIST
 34328  // Supported forms : (2 forms)
 34329  //
 34330  //    * VAESKEYGENASSIST imm8, xmm, xmm     [AES,AVX]
 34331  //    * VAESKEYGENASSIST imm8, m128, xmm    [AES,AVX]
 34332  //
 34333  func (self *Program) VAESKEYGENASSIST(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 34334      p := self.alloc("VAESKEYGENASSIST", 3, Operands { v0, v1, v2 })
 34335      // VAESKEYGENASSIST imm8, xmm, xmm
 34336      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 34337          self.require(ISA_AVX | ISA_AES)
 34338          p.domain = DomainCrypto
 34339          p.add(0, func(m *_Encoding, v []interface{}) {
 34340              m.emit(0xc4)
 34341              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 34342              m.emit(0x79)
 34343              m.emit(0xdf)
 34344              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 34345              m.imm1(toImmAny(v[0]))
 34346          })
 34347      }
 34348      // VAESKEYGENASSIST imm8, m128, xmm
 34349      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 34350          self.require(ISA_AVX | ISA_AES)
 34351          p.domain = DomainCrypto
 34352          p.add(0, func(m *_Encoding, v []interface{}) {
 34353              m.vex3(0xc4, 0b11, 0x01, hcode(v[2]), addr(v[1]), 0)
 34354              m.emit(0xdf)
 34355              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 34356              m.imm1(toImmAny(v[0]))
 34357          })
 34358      }
 34359      if p.len == 0 {
 34360          panic("invalid operands for VAESKEYGENASSIST")
 34361      }
 34362      return p
 34363  }
 34364  
 34365  // VALIGND performs "Align Doubleword Vectors".
 34366  //
 34367  // Mnemonic        : VALIGND
 34368  // Supported forms : (6 forms)
 34369  //
 34370  //    * VALIGND imm8, m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 34371  //    * VALIGND imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 34372  //    * VALIGND imm8, m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 34373  //    * VALIGND imm8, xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 34374  //    * VALIGND imm8, m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 34375  //    * VALIGND imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 34376  //
 34377  func (self *Program) VALIGND(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 34378      p := self.alloc("VALIGND", 4, Operands { v0, v1, v2, v3 })
 34379      // VALIGND imm8, m512/m32bcst, zmm, zmm{k}{z}
 34380      if isImm8(v0) && isM512M32bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 34381          self.require(ISA_AVX512F)
 34382          p.domain = DomainAVX
 34383          p.add(0, func(m *_Encoding, v []interface{}) {
 34384              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 34385              m.emit(0x03)
 34386              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 34387              m.imm1(toImmAny(v[0]))
 34388          })
 34389      }
 34390      // VALIGND imm8, zmm, zmm, zmm{k}{z}
 34391      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 34392          self.require(ISA_AVX512F)
 34393          p.domain = DomainAVX
 34394          p.add(0, func(m *_Encoding, v []interface{}) {
 34395              m.emit(0x62)
 34396              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 34397              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 34398              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 34399              m.emit(0x03)
 34400              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 34401              m.imm1(toImmAny(v[0]))
 34402          })
 34403      }
 34404      // VALIGND imm8, m128/m32bcst, xmm, xmm{k}{z}
 34405      if isImm8(v0) && isM128M32bcst(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 34406          self.require(ISA_AVX512VL | ISA_AVX512F)
 34407          p.domain = DomainAVX
 34408          p.add(0, func(m *_Encoding, v []interface{}) {
 34409              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 34410              m.emit(0x03)
 34411              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 34412              m.imm1(toImmAny(v[0]))
 34413          })
 34414      }
 34415      // VALIGND imm8, xmm, xmm, xmm{k}{z}
 34416      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 34417          self.require(ISA_AVX512VL | ISA_AVX512F)
 34418          p.domain = DomainAVX
 34419          p.add(0, func(m *_Encoding, v []interface{}) {
 34420              m.emit(0x62)
 34421              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 34422              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 34423              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 34424              m.emit(0x03)
 34425              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 34426              m.imm1(toImmAny(v[0]))
 34427          })
 34428      }
 34429      // VALIGND imm8, m256/m32bcst, ymm, ymm{k}{z}
 34430      if isImm8(v0) && isM256M32bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 34431          self.require(ISA_AVX512VL | ISA_AVX512F)
 34432          p.domain = DomainAVX
 34433          p.add(0, func(m *_Encoding, v []interface{}) {
 34434              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 34435              m.emit(0x03)
 34436              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 34437              m.imm1(toImmAny(v[0]))
 34438          })
 34439      }
 34440      // VALIGND imm8, ymm, ymm, ymm{k}{z}
 34441      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 34442          self.require(ISA_AVX512VL | ISA_AVX512F)
 34443          p.domain = DomainAVX
 34444          p.add(0, func(m *_Encoding, v []interface{}) {
 34445              m.emit(0x62)
 34446              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 34447              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 34448              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 34449              m.emit(0x03)
 34450              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 34451              m.imm1(toImmAny(v[0]))
 34452          })
 34453      }
 34454      if p.len == 0 {
 34455          panic("invalid operands for VALIGND")
 34456      }
 34457      return p
 34458  }
 34459  
 34460  // VALIGNQ performs "Align Quadword Vectors".
 34461  //
 34462  // Mnemonic        : VALIGNQ
 34463  // Supported forms : (6 forms)
 34464  //
 34465  //    * VALIGNQ imm8, m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 34466  //    * VALIGNQ imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 34467  //    * VALIGNQ imm8, m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 34468  //    * VALIGNQ imm8, xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 34469  //    * VALIGNQ imm8, m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 34470  //    * VALIGNQ imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 34471  //
 34472  func (self *Program) VALIGNQ(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 34473      p := self.alloc("VALIGNQ", 4, Operands { v0, v1, v2, v3 })
 34474      // VALIGNQ imm8, m512/m64bcst, zmm, zmm{k}{z}
 34475      if isImm8(v0) && isM512M64bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 34476          self.require(ISA_AVX512F)
 34477          p.domain = DomainAVX
 34478          p.add(0, func(m *_Encoding, v []interface{}) {
 34479              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 34480              m.emit(0x03)
 34481              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 34482              m.imm1(toImmAny(v[0]))
 34483          })
 34484      }
 34485      // VALIGNQ imm8, zmm, zmm, zmm{k}{z}
 34486      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 34487          self.require(ISA_AVX512F)
 34488          p.domain = DomainAVX
 34489          p.add(0, func(m *_Encoding, v []interface{}) {
 34490              m.emit(0x62)
 34491              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 34492              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 34493              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 34494              m.emit(0x03)
 34495              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 34496              m.imm1(toImmAny(v[0]))
 34497          })
 34498      }
 34499      // VALIGNQ imm8, m128/m64bcst, xmm, xmm{k}{z}
 34500      if isImm8(v0) && isM128M64bcst(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 34501          self.require(ISA_AVX512VL | ISA_AVX512F)
 34502          p.domain = DomainAVX
 34503          p.add(0, func(m *_Encoding, v []interface{}) {
 34504              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 34505              m.emit(0x03)
 34506              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 34507              m.imm1(toImmAny(v[0]))
 34508          })
 34509      }
 34510      // VALIGNQ imm8, xmm, xmm, xmm{k}{z}
 34511      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 34512          self.require(ISA_AVX512VL | ISA_AVX512F)
 34513          p.domain = DomainAVX
 34514          p.add(0, func(m *_Encoding, v []interface{}) {
 34515              m.emit(0x62)
 34516              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 34517              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 34518              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 34519              m.emit(0x03)
 34520              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 34521              m.imm1(toImmAny(v[0]))
 34522          })
 34523      }
 34524      // VALIGNQ imm8, m256/m64bcst, ymm, ymm{k}{z}
 34525      if isImm8(v0) && isM256M64bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 34526          self.require(ISA_AVX512VL | ISA_AVX512F)
 34527          p.domain = DomainAVX
 34528          p.add(0, func(m *_Encoding, v []interface{}) {
 34529              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 34530              m.emit(0x03)
 34531              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 34532              m.imm1(toImmAny(v[0]))
 34533          })
 34534      }
 34535      // VALIGNQ imm8, ymm, ymm, ymm{k}{z}
 34536      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 34537          self.require(ISA_AVX512VL | ISA_AVX512F)
 34538          p.domain = DomainAVX
 34539          p.add(0, func(m *_Encoding, v []interface{}) {
 34540              m.emit(0x62)
 34541              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 34542              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 34543              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 34544              m.emit(0x03)
 34545              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 34546              m.imm1(toImmAny(v[0]))
 34547          })
 34548      }
 34549      if p.len == 0 {
 34550          panic("invalid operands for VALIGNQ")
 34551      }
 34552      return p
 34553  }
 34554  
 34555  // VANDNPD performs "Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values".
 34556  //
 34557  // Mnemonic        : VANDNPD
 34558  // Supported forms : (10 forms)
 34559  //
 34560  //    * VANDNPD xmm, xmm, xmm                   [AVX]
 34561  //    * VANDNPD m128, xmm, xmm                  [AVX]
 34562  //    * VANDNPD ymm, ymm, ymm                   [AVX]
 34563  //    * VANDNPD m256, ymm, ymm                  [AVX]
 34564  //    * VANDNPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512DQ]
 34565  //    * VANDNPD zmm, zmm, zmm{k}{z}             [AVX512DQ]
 34566  //    * VANDNPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 34567  //    * VANDNPD xmm, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 34568  //    * VANDNPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 34569  //    * VANDNPD ymm, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 34570  //
 34571  func (self *Program) VANDNPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 34572      p := self.alloc("VANDNPD", 3, Operands { v0, v1, v2 })
 34573      // VANDNPD xmm, xmm, xmm
 34574      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 34575          self.require(ISA_AVX)
 34576          p.domain = DomainAVX
 34577          p.add(0, func(m *_Encoding, v []interface{}) {
 34578              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 34579              m.emit(0x55)
 34580              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34581          })
 34582      }
 34583      // VANDNPD m128, xmm, xmm
 34584      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 34585          self.require(ISA_AVX)
 34586          p.domain = DomainAVX
 34587          p.add(0, func(m *_Encoding, v []interface{}) {
 34588              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34589              m.emit(0x55)
 34590              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34591          })
 34592      }
 34593      // VANDNPD ymm, ymm, ymm
 34594      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 34595          self.require(ISA_AVX)
 34596          p.domain = DomainAVX
 34597          p.add(0, func(m *_Encoding, v []interface{}) {
 34598              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 34599              m.emit(0x55)
 34600              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34601          })
 34602      }
 34603      // VANDNPD m256, ymm, ymm
 34604      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 34605          self.require(ISA_AVX)
 34606          p.domain = DomainAVX
 34607          p.add(0, func(m *_Encoding, v []interface{}) {
 34608              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34609              m.emit(0x55)
 34610              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34611          })
 34612      }
 34613      // VANDNPD m512/m64bcst, zmm, zmm{k}{z}
 34614      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 34615          self.require(ISA_AVX512DQ)
 34616          p.domain = DomainAVX
 34617          p.add(0, func(m *_Encoding, v []interface{}) {
 34618              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 34619              m.emit(0x55)
 34620              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 34621          })
 34622      }
 34623      // VANDNPD zmm, zmm, zmm{k}{z}
 34624      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 34625          self.require(ISA_AVX512DQ)
 34626          p.domain = DomainAVX
 34627          p.add(0, func(m *_Encoding, v []interface{}) {
 34628              m.emit(0x62)
 34629              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 34630              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 34631              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 34632              m.emit(0x55)
 34633              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34634          })
 34635      }
 34636      // VANDNPD m128/m64bcst, xmm, xmm{k}{z}
 34637      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 34638          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34639          p.domain = DomainAVX
 34640          p.add(0, func(m *_Encoding, v []interface{}) {
 34641              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 34642              m.emit(0x55)
 34643              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 34644          })
 34645      }
 34646      // VANDNPD xmm, xmm, xmm{k}{z}
 34647      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 34648          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34649          p.domain = DomainAVX
 34650          p.add(0, func(m *_Encoding, v []interface{}) {
 34651              m.emit(0x62)
 34652              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 34653              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 34654              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 34655              m.emit(0x55)
 34656              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34657          })
 34658      }
 34659      // VANDNPD m256/m64bcst, ymm, ymm{k}{z}
 34660      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 34661          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34662          p.domain = DomainAVX
 34663          p.add(0, func(m *_Encoding, v []interface{}) {
 34664              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 34665              m.emit(0x55)
 34666              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 34667          })
 34668      }
 34669      // VANDNPD ymm, ymm, ymm{k}{z}
 34670      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 34671          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34672          p.domain = DomainAVX
 34673          p.add(0, func(m *_Encoding, v []interface{}) {
 34674              m.emit(0x62)
 34675              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 34676              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 34677              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 34678              m.emit(0x55)
 34679              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34680          })
 34681      }
 34682      if p.len == 0 {
 34683          panic("invalid operands for VANDNPD")
 34684      }
 34685      return p
 34686  }
 34687  
 34688  // VANDNPS performs "Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values".
 34689  //
 34690  // Mnemonic        : VANDNPS
 34691  // Supported forms : (10 forms)
 34692  //
 34693  //    * VANDNPS xmm, xmm, xmm                   [AVX]
 34694  //    * VANDNPS m128, xmm, xmm                  [AVX]
 34695  //    * VANDNPS ymm, ymm, ymm                   [AVX]
 34696  //    * VANDNPS m256, ymm, ymm                  [AVX]
 34697  //    * VANDNPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512DQ]
 34698  //    * VANDNPS zmm, zmm, zmm{k}{z}             [AVX512DQ]
 34699  //    * VANDNPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 34700  //    * VANDNPS xmm, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 34701  //    * VANDNPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 34702  //    * VANDNPS ymm, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 34703  //
 34704  func (self *Program) VANDNPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 34705      p := self.alloc("VANDNPS", 3, Operands { v0, v1, v2 })
 34706      // VANDNPS xmm, xmm, xmm
 34707      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 34708          self.require(ISA_AVX)
 34709          p.domain = DomainAVX
 34710          p.add(0, func(m *_Encoding, v []interface{}) {
 34711              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 34712              m.emit(0x55)
 34713              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34714          })
 34715      }
 34716      // VANDNPS m128, xmm, xmm
 34717      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 34718          self.require(ISA_AVX)
 34719          p.domain = DomainAVX
 34720          p.add(0, func(m *_Encoding, v []interface{}) {
 34721              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34722              m.emit(0x55)
 34723              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34724          })
 34725      }
 34726      // VANDNPS ymm, ymm, ymm
 34727      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 34728          self.require(ISA_AVX)
 34729          p.domain = DomainAVX
 34730          p.add(0, func(m *_Encoding, v []interface{}) {
 34731              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 34732              m.emit(0x55)
 34733              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34734          })
 34735      }
 34736      // VANDNPS m256, ymm, ymm
 34737      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 34738          self.require(ISA_AVX)
 34739          p.domain = DomainAVX
 34740          p.add(0, func(m *_Encoding, v []interface{}) {
 34741              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34742              m.emit(0x55)
 34743              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34744          })
 34745      }
 34746      // VANDNPS m512/m32bcst, zmm, zmm{k}{z}
 34747      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 34748          self.require(ISA_AVX512DQ)
 34749          p.domain = DomainAVX
 34750          p.add(0, func(m *_Encoding, v []interface{}) {
 34751              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 34752              m.emit(0x55)
 34753              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 34754          })
 34755      }
 34756      // VANDNPS zmm, zmm, zmm{k}{z}
 34757      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 34758          self.require(ISA_AVX512DQ)
 34759          p.domain = DomainAVX
 34760          p.add(0, func(m *_Encoding, v []interface{}) {
 34761              m.emit(0x62)
 34762              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 34763              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 34764              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 34765              m.emit(0x55)
 34766              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34767          })
 34768      }
 34769      // VANDNPS m128/m32bcst, xmm, xmm{k}{z}
 34770      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 34771          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34772          p.domain = DomainAVX
 34773          p.add(0, func(m *_Encoding, v []interface{}) {
 34774              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 34775              m.emit(0x55)
 34776              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 34777          })
 34778      }
 34779      // VANDNPS xmm, xmm, xmm{k}{z}
 34780      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 34781          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34782          p.domain = DomainAVX
 34783          p.add(0, func(m *_Encoding, v []interface{}) {
 34784              m.emit(0x62)
 34785              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 34786              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 34787              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 34788              m.emit(0x55)
 34789              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34790          })
 34791      }
 34792      // VANDNPS m256/m32bcst, ymm, ymm{k}{z}
 34793      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 34794          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34795          p.domain = DomainAVX
 34796          p.add(0, func(m *_Encoding, v []interface{}) {
 34797              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 34798              m.emit(0x55)
 34799              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 34800          })
 34801      }
 34802      // VANDNPS ymm, ymm, ymm{k}{z}
 34803      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 34804          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34805          p.domain = DomainAVX
 34806          p.add(0, func(m *_Encoding, v []interface{}) {
 34807              m.emit(0x62)
 34808              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 34809              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 34810              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 34811              m.emit(0x55)
 34812              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34813          })
 34814      }
 34815      if p.len == 0 {
 34816          panic("invalid operands for VANDNPS")
 34817      }
 34818      return p
 34819  }
 34820  
 34821  // VANDPD performs "Bitwise Logical AND of Packed Double-Precision Floating-Point Values".
 34822  //
 34823  // Mnemonic        : VANDPD
 34824  // Supported forms : (10 forms)
 34825  //
 34826  //    * VANDPD xmm, xmm, xmm                   [AVX]
 34827  //    * VANDPD m128, xmm, xmm                  [AVX]
 34828  //    * VANDPD ymm, ymm, ymm                   [AVX]
 34829  //    * VANDPD m256, ymm, ymm                  [AVX]
 34830  //    * VANDPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512DQ]
 34831  //    * VANDPD zmm, zmm, zmm{k}{z}             [AVX512DQ]
 34832  //    * VANDPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 34833  //    * VANDPD xmm, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 34834  //    * VANDPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 34835  //    * VANDPD ymm, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 34836  //
 34837  func (self *Program) VANDPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 34838      p := self.alloc("VANDPD", 3, Operands { v0, v1, v2 })
 34839      // VANDPD xmm, xmm, xmm
 34840      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 34841          self.require(ISA_AVX)
 34842          p.domain = DomainAVX
 34843          p.add(0, func(m *_Encoding, v []interface{}) {
 34844              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 34845              m.emit(0x54)
 34846              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34847          })
 34848      }
 34849      // VANDPD m128, xmm, xmm
 34850      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 34851          self.require(ISA_AVX)
 34852          p.domain = DomainAVX
 34853          p.add(0, func(m *_Encoding, v []interface{}) {
 34854              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34855              m.emit(0x54)
 34856              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34857          })
 34858      }
 34859      // VANDPD ymm, ymm, ymm
 34860      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 34861          self.require(ISA_AVX)
 34862          p.domain = DomainAVX
 34863          p.add(0, func(m *_Encoding, v []interface{}) {
 34864              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 34865              m.emit(0x54)
 34866              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34867          })
 34868      }
 34869      // VANDPD m256, ymm, ymm
 34870      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 34871          self.require(ISA_AVX)
 34872          p.domain = DomainAVX
 34873          p.add(0, func(m *_Encoding, v []interface{}) {
 34874              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34875              m.emit(0x54)
 34876              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34877          })
 34878      }
 34879      // VANDPD m512/m64bcst, zmm, zmm{k}{z}
 34880      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 34881          self.require(ISA_AVX512DQ)
 34882          p.domain = DomainAVX
 34883          p.add(0, func(m *_Encoding, v []interface{}) {
 34884              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 34885              m.emit(0x54)
 34886              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 34887          })
 34888      }
 34889      // VANDPD zmm, zmm, zmm{k}{z}
 34890      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 34891          self.require(ISA_AVX512DQ)
 34892          p.domain = DomainAVX
 34893          p.add(0, func(m *_Encoding, v []interface{}) {
 34894              m.emit(0x62)
 34895              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 34896              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 34897              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 34898              m.emit(0x54)
 34899              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34900          })
 34901      }
 34902      // VANDPD m128/m64bcst, xmm, xmm{k}{z}
 34903      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 34904          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34905          p.domain = DomainAVX
 34906          p.add(0, func(m *_Encoding, v []interface{}) {
 34907              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 34908              m.emit(0x54)
 34909              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 34910          })
 34911      }
 34912      // VANDPD xmm, xmm, xmm{k}{z}
 34913      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 34914          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34915          p.domain = DomainAVX
 34916          p.add(0, func(m *_Encoding, v []interface{}) {
 34917              m.emit(0x62)
 34918              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 34919              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 34920              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 34921              m.emit(0x54)
 34922              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34923          })
 34924      }
 34925      // VANDPD m256/m64bcst, ymm, ymm{k}{z}
 34926      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 34927          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34928          p.domain = DomainAVX
 34929          p.add(0, func(m *_Encoding, v []interface{}) {
 34930              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 34931              m.emit(0x54)
 34932              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 34933          })
 34934      }
 34935      // VANDPD ymm, ymm, ymm{k}{z}
 34936      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 34937          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34938          p.domain = DomainAVX
 34939          p.add(0, func(m *_Encoding, v []interface{}) {
 34940              m.emit(0x62)
 34941              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 34942              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 34943              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 34944              m.emit(0x54)
 34945              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34946          })
 34947      }
 34948      if p.len == 0 {
 34949          panic("invalid operands for VANDPD")
 34950      }
 34951      return p
 34952  }
 34953  
 34954  // VANDPS performs "Bitwise Logical AND of Packed Single-Precision Floating-Point Values".
 34955  //
 34956  // Mnemonic        : VANDPS
 34957  // Supported forms : (10 forms)
 34958  //
 34959  //    * VANDPS xmm, xmm, xmm                   [AVX]
 34960  //    * VANDPS m128, xmm, xmm                  [AVX]
 34961  //    * VANDPS ymm, ymm, ymm                   [AVX]
 34962  //    * VANDPS m256, ymm, ymm                  [AVX]
 34963  //    * VANDPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512DQ]
 34964  //    * VANDPS zmm, zmm, zmm{k}{z}             [AVX512DQ]
 34965  //    * VANDPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 34966  //    * VANDPS xmm, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 34967  //    * VANDPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 34968  //    * VANDPS ymm, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 34969  //
 34970  func (self *Program) VANDPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 34971      p := self.alloc("VANDPS", 3, Operands { v0, v1, v2 })
 34972      // VANDPS xmm, xmm, xmm
 34973      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 34974          self.require(ISA_AVX)
 34975          p.domain = DomainAVX
 34976          p.add(0, func(m *_Encoding, v []interface{}) {
 34977              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 34978              m.emit(0x54)
 34979              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34980          })
 34981      }
 34982      // VANDPS m128, xmm, xmm
 34983      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 34984          self.require(ISA_AVX)
 34985          p.domain = DomainAVX
 34986          p.add(0, func(m *_Encoding, v []interface{}) {
 34987              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34988              m.emit(0x54)
 34989              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34990          })
 34991      }
 34992      // VANDPS ymm, ymm, ymm
 34993      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 34994          self.require(ISA_AVX)
 34995          p.domain = DomainAVX
 34996          p.add(0, func(m *_Encoding, v []interface{}) {
 34997              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 34998              m.emit(0x54)
 34999              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 35000          })
 35001      }
 35002      // VANDPS m256, ymm, ymm
 35003      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 35004          self.require(ISA_AVX)
 35005          p.domain = DomainAVX
 35006          p.add(0, func(m *_Encoding, v []interface{}) {
 35007              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 35008              m.emit(0x54)
 35009              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 35010          })
 35011      }
 35012      // VANDPS m512/m32bcst, zmm, zmm{k}{z}
 35013      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 35014          self.require(ISA_AVX512DQ)
 35015          p.domain = DomainAVX
 35016          p.add(0, func(m *_Encoding, v []interface{}) {
 35017              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 35018              m.emit(0x54)
 35019              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 35020          })
 35021      }
 35022      // VANDPS zmm, zmm, zmm{k}{z}
 35023      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 35024          self.require(ISA_AVX512DQ)
 35025          p.domain = DomainAVX
 35026          p.add(0, func(m *_Encoding, v []interface{}) {
 35027              m.emit(0x62)
 35028              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 35029              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 35030              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 35031              m.emit(0x54)
 35032              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 35033          })
 35034      }
 35035      // VANDPS m128/m32bcst, xmm, xmm{k}{z}
 35036      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 35037          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35038          p.domain = DomainAVX
 35039          p.add(0, func(m *_Encoding, v []interface{}) {
 35040              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 35041              m.emit(0x54)
 35042              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 35043          })
 35044      }
 35045      // VANDPS xmm, xmm, xmm{k}{z}
 35046      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 35047          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35048          p.domain = DomainAVX
 35049          p.add(0, func(m *_Encoding, v []interface{}) {
 35050              m.emit(0x62)
 35051              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 35052              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 35053              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 35054              m.emit(0x54)
 35055              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 35056          })
 35057      }
 35058      // VANDPS m256/m32bcst, ymm, ymm{k}{z}
 35059      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 35060          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35061          p.domain = DomainAVX
 35062          p.add(0, func(m *_Encoding, v []interface{}) {
 35063              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 35064              m.emit(0x54)
 35065              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 35066          })
 35067      }
 35068      // VANDPS ymm, ymm, ymm{k}{z}
 35069      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 35070          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35071          p.domain = DomainAVX
 35072          p.add(0, func(m *_Encoding, v []interface{}) {
 35073              m.emit(0x62)
 35074              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 35075              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 35076              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 35077              m.emit(0x54)
 35078              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 35079          })
 35080      }
 35081      if p.len == 0 {
 35082          panic("invalid operands for VANDPS")
 35083      }
 35084      return p
 35085  }
 35086  
 35087  // VBLENDMPD performs "Blend Packed Double-Precision Floating-Point Vectors Using an OpMask Control".
 35088  //
 35089  // Mnemonic        : VBLENDMPD
 35090  // Supported forms : (6 forms)
 35091  //
 35092  //    * VBLENDMPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 35093  //    * VBLENDMPD zmm, zmm, zmm{k}{z}             [AVX512F]
 35094  //    * VBLENDMPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 35095  //    * VBLENDMPD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 35096  //    * VBLENDMPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 35097  //    * VBLENDMPD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 35098  //
 35099  func (self *Program) VBLENDMPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 35100      p := self.alloc("VBLENDMPD", 3, Operands { v0, v1, v2 })
 35101      // VBLENDMPD m512/m64bcst, zmm, zmm{k}{z}
 35102      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 35103          self.require(ISA_AVX512F)
 35104          p.domain = DomainAVX
 35105          p.add(0, func(m *_Encoding, v []interface{}) {
 35106              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 35107              m.emit(0x65)
 35108              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 35109          })
 35110      }
 35111      // VBLENDMPD zmm, zmm, zmm{k}{z}
 35112      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 35113          self.require(ISA_AVX512F)
 35114          p.domain = DomainAVX
 35115          p.add(0, func(m *_Encoding, v []interface{}) {
 35116              m.emit(0x62)
 35117              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 35118              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 35119              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 35120              m.emit(0x65)
 35121              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 35122          })
 35123      }
 35124      // VBLENDMPD m128/m64bcst, xmm, xmm{k}{z}
 35125      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 35126          self.require(ISA_AVX512VL | ISA_AVX512F)
 35127          p.domain = DomainAVX
 35128          p.add(0, func(m *_Encoding, v []interface{}) {
 35129              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 35130              m.emit(0x65)
 35131              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 35132          })
 35133      }
 35134      // VBLENDMPD xmm, xmm, xmm{k}{z}
 35135      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 35136          self.require(ISA_AVX512VL | ISA_AVX512F)
 35137          p.domain = DomainAVX
 35138          p.add(0, func(m *_Encoding, v []interface{}) {
 35139              m.emit(0x62)
 35140              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 35141              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 35142              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 35143              m.emit(0x65)
 35144              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 35145          })
 35146      }
 35147      // VBLENDMPD m256/m64bcst, ymm, ymm{k}{z}
 35148      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 35149          self.require(ISA_AVX512VL | ISA_AVX512F)
 35150          p.domain = DomainAVX
 35151          p.add(0, func(m *_Encoding, v []interface{}) {
 35152              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 35153              m.emit(0x65)
 35154              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 35155          })
 35156      }
 35157      // VBLENDMPD ymm, ymm, ymm{k}{z}
 35158      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 35159          self.require(ISA_AVX512VL | ISA_AVX512F)
 35160          p.domain = DomainAVX
 35161          p.add(0, func(m *_Encoding, v []interface{}) {
 35162              m.emit(0x62)
 35163              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 35164              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 35165              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 35166              m.emit(0x65)
 35167              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 35168          })
 35169      }
 35170      if p.len == 0 {
 35171          panic("invalid operands for VBLENDMPD")
 35172      }
 35173      return p
 35174  }
 35175  
 35176  // VBLENDMPS performs "Blend Packed Single-Precision Floating-Point Vectors Using an OpMask Control".
 35177  //
 35178  // Mnemonic        : VBLENDMPS
 35179  // Supported forms : (6 forms)
 35180  //
 35181  //    * VBLENDMPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 35182  //    * VBLENDMPS zmm, zmm, zmm{k}{z}             [AVX512F]
 35183  //    * VBLENDMPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 35184  //    * VBLENDMPS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 35185  //    * VBLENDMPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 35186  //    * VBLENDMPS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 35187  //
 35188  func (self *Program) VBLENDMPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 35189      p := self.alloc("VBLENDMPS", 3, Operands { v0, v1, v2 })
 35190      // VBLENDMPS m512/m32bcst, zmm, zmm{k}{z}
 35191      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 35192          self.require(ISA_AVX512F)
 35193          p.domain = DomainAVX
 35194          p.add(0, func(m *_Encoding, v []interface{}) {
 35195              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 35196              m.emit(0x65)
 35197              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 35198          })
 35199      }
 35200      // VBLENDMPS zmm, zmm, zmm{k}{z}
 35201      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 35202          self.require(ISA_AVX512F)
 35203          p.domain = DomainAVX
 35204          p.add(0, func(m *_Encoding, v []interface{}) {
 35205              m.emit(0x62)
 35206              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 35207              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 35208              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 35209              m.emit(0x65)
 35210              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 35211          })
 35212      }
 35213      // VBLENDMPS m128/m32bcst, xmm, xmm{k}{z}
 35214      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 35215          self.require(ISA_AVX512VL | ISA_AVX512F)
 35216          p.domain = DomainAVX
 35217          p.add(0, func(m *_Encoding, v []interface{}) {
 35218              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 35219              m.emit(0x65)
 35220              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 35221          })
 35222      }
 35223      // VBLENDMPS xmm, xmm, xmm{k}{z}
 35224      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 35225          self.require(ISA_AVX512VL | ISA_AVX512F)
 35226          p.domain = DomainAVX
 35227          p.add(0, func(m *_Encoding, v []interface{}) {
 35228              m.emit(0x62)
 35229              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 35230              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 35231              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 35232              m.emit(0x65)
 35233              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 35234          })
 35235      }
 35236      // VBLENDMPS m256/m32bcst, ymm, ymm{k}{z}
 35237      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 35238          self.require(ISA_AVX512VL | ISA_AVX512F)
 35239          p.domain = DomainAVX
 35240          p.add(0, func(m *_Encoding, v []interface{}) {
 35241              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 35242              m.emit(0x65)
 35243              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 35244          })
 35245      }
 35246      // VBLENDMPS ymm, ymm, ymm{k}{z}
 35247      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 35248          self.require(ISA_AVX512VL | ISA_AVX512F)
 35249          p.domain = DomainAVX
 35250          p.add(0, func(m *_Encoding, v []interface{}) {
 35251              m.emit(0x62)
 35252              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 35253              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 35254              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 35255              m.emit(0x65)
 35256              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 35257          })
 35258      }
 35259      if p.len == 0 {
 35260          panic("invalid operands for VBLENDMPS")
 35261      }
 35262      return p
 35263  }
 35264  
 35265  // VBLENDPD performs "Blend Packed Double Precision Floating-Point Values".
 35266  //
 35267  // Mnemonic        : VBLENDPD
 35268  // Supported forms : (4 forms)
 35269  //
 35270  //    * VBLENDPD imm8, xmm, xmm, xmm     [AVX]
 35271  //    * VBLENDPD imm8, m128, xmm, xmm    [AVX]
 35272  //    * VBLENDPD imm8, ymm, ymm, ymm     [AVX]
 35273  //    * VBLENDPD imm8, m256, ymm, ymm    [AVX]
 35274  //
 35275  func (self *Program) VBLENDPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 35276      p := self.alloc("VBLENDPD", 4, Operands { v0, v1, v2, v3 })
 35277      // VBLENDPD imm8, xmm, xmm, xmm
 35278      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 35279          self.require(ISA_AVX)
 35280          p.domain = DomainAVX
 35281          p.add(0, func(m *_Encoding, v []interface{}) {
 35282              m.emit(0xc4)
 35283              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 35284              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 35285              m.emit(0x0d)
 35286              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 35287              m.imm1(toImmAny(v[0]))
 35288          })
 35289      }
 35290      // VBLENDPD imm8, m128, xmm, xmm
 35291      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 35292          self.require(ISA_AVX)
 35293          p.domain = DomainAVX
 35294          p.add(0, func(m *_Encoding, v []interface{}) {
 35295              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 35296              m.emit(0x0d)
 35297              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 35298              m.imm1(toImmAny(v[0]))
 35299          })
 35300      }
 35301      // VBLENDPD imm8, ymm, ymm, ymm
 35302      if isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 35303          self.require(ISA_AVX)
 35304          p.domain = DomainAVX
 35305          p.add(0, func(m *_Encoding, v []interface{}) {
 35306              m.emit(0xc4)
 35307              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 35308              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 35309              m.emit(0x0d)
 35310              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 35311              m.imm1(toImmAny(v[0]))
 35312          })
 35313      }
 35314      // VBLENDPD imm8, m256, ymm, ymm
 35315      if isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 35316          self.require(ISA_AVX)
 35317          p.domain = DomainAVX
 35318          p.add(0, func(m *_Encoding, v []interface{}) {
 35319              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 35320              m.emit(0x0d)
 35321              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 35322              m.imm1(toImmAny(v[0]))
 35323          })
 35324      }
 35325      if p.len == 0 {
 35326          panic("invalid operands for VBLENDPD")
 35327      }
 35328      return p
 35329  }
 35330  
 35331  // VBLENDPS performs " Blend Packed Single Precision Floating-Point Values".
 35332  //
 35333  // Mnemonic        : VBLENDPS
 35334  // Supported forms : (4 forms)
 35335  //
 35336  //    * VBLENDPS imm8, xmm, xmm, xmm     [AVX]
 35337  //    * VBLENDPS imm8, m128, xmm, xmm    [AVX]
 35338  //    * VBLENDPS imm8, ymm, ymm, ymm     [AVX]
 35339  //    * VBLENDPS imm8, m256, ymm, ymm    [AVX]
 35340  //
 35341  func (self *Program) VBLENDPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 35342      p := self.alloc("VBLENDPS", 4, Operands { v0, v1, v2, v3 })
 35343      // VBLENDPS imm8, xmm, xmm, xmm
 35344      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 35345          self.require(ISA_AVX)
 35346          p.domain = DomainAVX
 35347          p.add(0, func(m *_Encoding, v []interface{}) {
 35348              m.emit(0xc4)
 35349              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 35350              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 35351              m.emit(0x0c)
 35352              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 35353              m.imm1(toImmAny(v[0]))
 35354          })
 35355      }
 35356      // VBLENDPS imm8, m128, xmm, xmm
 35357      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 35358          self.require(ISA_AVX)
 35359          p.domain = DomainAVX
 35360          p.add(0, func(m *_Encoding, v []interface{}) {
 35361              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 35362              m.emit(0x0c)
 35363              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 35364              m.imm1(toImmAny(v[0]))
 35365          })
 35366      }
 35367      // VBLENDPS imm8, ymm, ymm, ymm
 35368      if isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 35369          self.require(ISA_AVX)
 35370          p.domain = DomainAVX
 35371          p.add(0, func(m *_Encoding, v []interface{}) {
 35372              m.emit(0xc4)
 35373              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 35374              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 35375              m.emit(0x0c)
 35376              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 35377              m.imm1(toImmAny(v[0]))
 35378          })
 35379      }
 35380      // VBLENDPS imm8, m256, ymm, ymm
 35381      if isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 35382          self.require(ISA_AVX)
 35383          p.domain = DomainAVX
 35384          p.add(0, func(m *_Encoding, v []interface{}) {
 35385              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 35386              m.emit(0x0c)
 35387              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 35388              m.imm1(toImmAny(v[0]))
 35389          })
 35390      }
 35391      if p.len == 0 {
 35392          panic("invalid operands for VBLENDPS")
 35393      }
 35394      return p
 35395  }
 35396  
 35397  // VBLENDVPD performs " Variable Blend Packed Double Precision Floating-Point Values".
 35398  //
 35399  // Mnemonic        : VBLENDVPD
 35400  // Supported forms : (4 forms)
 35401  //
 35402  //    * VBLENDVPD xmm, xmm, xmm, xmm     [AVX]
 35403  //    * VBLENDVPD xmm, m128, xmm, xmm    [AVX]
 35404  //    * VBLENDVPD ymm, ymm, ymm, ymm     [AVX]
 35405  //    * VBLENDVPD ymm, m256, ymm, ymm    [AVX]
 35406  //
 35407  func (self *Program) VBLENDVPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 35408      p := self.alloc("VBLENDVPD", 4, Operands { v0, v1, v2, v3 })
 35409      // VBLENDVPD xmm, xmm, xmm, xmm
 35410      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 35411          self.require(ISA_AVX)
 35412          p.domain = DomainAVX
 35413          p.add(0, func(m *_Encoding, v []interface{}) {
 35414              m.emit(0xc4)
 35415              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 35416              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 35417              m.emit(0x4b)
 35418              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 35419              m.emit(hlcode(v[0]) << 4)
 35420          })
 35421      }
 35422      // VBLENDVPD xmm, m128, xmm, xmm
 35423      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 35424          self.require(ISA_AVX)
 35425          p.domain = DomainAVX
 35426          p.add(0, func(m *_Encoding, v []interface{}) {
 35427              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 35428              m.emit(0x4b)
 35429              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 35430              m.emit(hlcode(v[0]) << 4)
 35431          })
 35432      }
 35433      // VBLENDVPD ymm, ymm, ymm, ymm
 35434      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 35435          self.require(ISA_AVX)
 35436          p.domain = DomainAVX
 35437          p.add(0, func(m *_Encoding, v []interface{}) {
 35438              m.emit(0xc4)
 35439              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 35440              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 35441              m.emit(0x4b)
 35442              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 35443              m.emit(hlcode(v[0]) << 4)
 35444          })
 35445      }
 35446      // VBLENDVPD ymm, m256, ymm, ymm
 35447      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 35448          self.require(ISA_AVX)
 35449          p.domain = DomainAVX
 35450          p.add(0, func(m *_Encoding, v []interface{}) {
 35451              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 35452              m.emit(0x4b)
 35453              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 35454              m.emit(hlcode(v[0]) << 4)
 35455          })
 35456      }
 35457      if p.len == 0 {
 35458          panic("invalid operands for VBLENDVPD")
 35459      }
 35460      return p
 35461  }
 35462  
 35463  // VBLENDVPS performs " Variable Blend Packed Single Precision Floating-Point Values".
 35464  //
 35465  // Mnemonic        : VBLENDVPS
 35466  // Supported forms : (4 forms)
 35467  //
 35468  //    * VBLENDVPS xmm, xmm, xmm, xmm     [AVX]
 35469  //    * VBLENDVPS xmm, m128, xmm, xmm    [AVX]
 35470  //    * VBLENDVPS ymm, ymm, ymm, ymm     [AVX]
 35471  //    * VBLENDVPS ymm, m256, ymm, ymm    [AVX]
 35472  //
 35473  func (self *Program) VBLENDVPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 35474      p := self.alloc("VBLENDVPS", 4, Operands { v0, v1, v2, v3 })
 35475      // VBLENDVPS xmm, xmm, xmm, xmm
 35476      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 35477          self.require(ISA_AVX)
 35478          p.domain = DomainAVX
 35479          p.add(0, func(m *_Encoding, v []interface{}) {
 35480              m.emit(0xc4)
 35481              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 35482              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 35483              m.emit(0x4a)
 35484              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 35485              m.emit(hlcode(v[0]) << 4)
 35486          })
 35487      }
 35488      // VBLENDVPS xmm, m128, xmm, xmm
 35489      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 35490          self.require(ISA_AVX)
 35491          p.domain = DomainAVX
 35492          p.add(0, func(m *_Encoding, v []interface{}) {
 35493              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 35494              m.emit(0x4a)
 35495              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 35496              m.emit(hlcode(v[0]) << 4)
 35497          })
 35498      }
 35499      // VBLENDVPS ymm, ymm, ymm, ymm
 35500      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 35501          self.require(ISA_AVX)
 35502          p.domain = DomainAVX
 35503          p.add(0, func(m *_Encoding, v []interface{}) {
 35504              m.emit(0xc4)
 35505              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 35506              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 35507              m.emit(0x4a)
 35508              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 35509              m.emit(hlcode(v[0]) << 4)
 35510          })
 35511      }
 35512      // VBLENDVPS ymm, m256, ymm, ymm
 35513      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 35514          self.require(ISA_AVX)
 35515          p.domain = DomainAVX
 35516          p.add(0, func(m *_Encoding, v []interface{}) {
 35517              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 35518              m.emit(0x4a)
 35519              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 35520              m.emit(hlcode(v[0]) << 4)
 35521          })
 35522      }
 35523      if p.len == 0 {
 35524          panic("invalid operands for VBLENDVPS")
 35525      }
 35526      return p
 35527  }
 35528  
 35529  // VBROADCASTF128 performs "Broadcast 128 Bit of Floating-Point Data".
 35530  //
 35531  // Mnemonic        : VBROADCASTF128
 35532  // Supported forms : (1 form)
 35533  //
 35534  //    * VBROADCASTF128 m128, ymm    [AVX]
 35535  //
 35536  func (self *Program) VBROADCASTF128(v0 interface{}, v1 interface{}) *Instruction {
 35537      p := self.alloc("VBROADCASTF128", 2, Operands { v0, v1 })
 35538      // VBROADCASTF128 m128, ymm
 35539      if isM128(v0) && isYMM(v1) {
 35540          self.require(ISA_AVX)
 35541          p.domain = DomainAVX
 35542          p.add(0, func(m *_Encoding, v []interface{}) {
 35543              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 35544              m.emit(0x1a)
 35545              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 35546          })
 35547      }
 35548      if p.len == 0 {
 35549          panic("invalid operands for VBROADCASTF128")
 35550      }
 35551      return p
 35552  }
 35553  
 35554  // VBROADCASTF32X2 performs "Broadcast Two Single-Precision Floating-Point Elements".
 35555  //
 35556  // Mnemonic        : VBROADCASTF32X2
 35557  // Supported forms : (4 forms)
 35558  //
 35559  //    * VBROADCASTF32X2 xmm, zmm{k}{z}    [AVX512DQ]
 35560  //    * VBROADCASTF32X2 m64, zmm{k}{z}    [AVX512DQ]
 35561  //    * VBROADCASTF32X2 xmm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 35562  //    * VBROADCASTF32X2 m64, ymm{k}{z}    [AVX512DQ,AVX512VL]
 35563  //
 35564  func (self *Program) VBROADCASTF32X2(v0 interface{}, v1 interface{}) *Instruction {
 35565      p := self.alloc("VBROADCASTF32X2", 2, Operands { v0, v1 })
 35566      // VBROADCASTF32X2 xmm, zmm{k}{z}
 35567      if isEVEXXMM(v0) && isZMMkz(v1) {
 35568          self.require(ISA_AVX512DQ)
 35569          p.domain = DomainAVX
 35570          p.add(0, func(m *_Encoding, v []interface{}) {
 35571              m.emit(0x62)
 35572              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 35573              m.emit(0x7d)
 35574              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 35575              m.emit(0x19)
 35576              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 35577          })
 35578      }
 35579      // VBROADCASTF32X2 m64, zmm{k}{z}
 35580      if isM64(v0) && isZMMkz(v1) {
 35581          self.require(ISA_AVX512DQ)
 35582          p.domain = DomainAVX
 35583          p.add(0, func(m *_Encoding, v []interface{}) {
 35584              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35585              m.emit(0x19)
 35586              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 35587          })
 35588      }
 35589      // VBROADCASTF32X2 xmm, ymm{k}{z}
 35590      if isEVEXXMM(v0) && isYMMkz(v1) {
 35591          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35592          p.domain = DomainAVX
 35593          p.add(0, func(m *_Encoding, v []interface{}) {
 35594              m.emit(0x62)
 35595              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 35596              m.emit(0x7d)
 35597              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 35598              m.emit(0x19)
 35599              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 35600          })
 35601      }
 35602      // VBROADCASTF32X2 m64, ymm{k}{z}
 35603      if isM64(v0) && isYMMkz(v1) {
 35604          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35605          p.domain = DomainAVX
 35606          p.add(0, func(m *_Encoding, v []interface{}) {
 35607              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35608              m.emit(0x19)
 35609              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 35610          })
 35611      }
 35612      if p.len == 0 {
 35613          panic("invalid operands for VBROADCASTF32X2")
 35614      }
 35615      return p
 35616  }
 35617  
 35618  // VBROADCASTF32X4 performs "Broadcast Four Single-Precision Floating-Point Elements".
 35619  //
 35620  // Mnemonic        : VBROADCASTF32X4
 35621  // Supported forms : (2 forms)
 35622  //
 35623  //    * VBROADCASTF32X4 m128, zmm{k}{z}    [AVX512F]
 35624  //    * VBROADCASTF32X4 m128, ymm{k}{z}    [AVX512F,AVX512VL]
 35625  //
 35626  func (self *Program) VBROADCASTF32X4(v0 interface{}, v1 interface{}) *Instruction {
 35627      p := self.alloc("VBROADCASTF32X4", 2, Operands { v0, v1 })
 35628      // VBROADCASTF32X4 m128, zmm{k}{z}
 35629      if isM128(v0) && isZMMkz(v1) {
 35630          self.require(ISA_AVX512F)
 35631          p.domain = DomainAVX
 35632          p.add(0, func(m *_Encoding, v []interface{}) {
 35633              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35634              m.emit(0x1a)
 35635              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 35636          })
 35637      }
 35638      // VBROADCASTF32X4 m128, ymm{k}{z}
 35639      if isM128(v0) && isYMMkz(v1) {
 35640          self.require(ISA_AVX512VL | ISA_AVX512F)
 35641          p.domain = DomainAVX
 35642          p.add(0, func(m *_Encoding, v []interface{}) {
 35643              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35644              m.emit(0x1a)
 35645              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 35646          })
 35647      }
 35648      if p.len == 0 {
 35649          panic("invalid operands for VBROADCASTF32X4")
 35650      }
 35651      return p
 35652  }
 35653  
 35654  // VBROADCASTF32X8 performs "Broadcast Eight Single-Precision Floating-Point Elements".
 35655  //
 35656  // Mnemonic        : VBROADCASTF32X8
 35657  // Supported forms : (1 form)
 35658  //
 35659  //    * VBROADCASTF32X8 m256, zmm{k}{z}    [AVX512DQ]
 35660  //
 35661  func (self *Program) VBROADCASTF32X8(v0 interface{}, v1 interface{}) *Instruction {
 35662      p := self.alloc("VBROADCASTF32X8", 2, Operands { v0, v1 })
 35663      // VBROADCASTF32X8 m256, zmm{k}{z}
 35664      if isM256(v0) && isZMMkz(v1) {
 35665          self.require(ISA_AVX512DQ)
 35666          p.domain = DomainAVX
 35667          p.add(0, func(m *_Encoding, v []interface{}) {
 35668              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35669              m.emit(0x1b)
 35670              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 35671          })
 35672      }
 35673      if p.len == 0 {
 35674          panic("invalid operands for VBROADCASTF32X8")
 35675      }
 35676      return p
 35677  }
 35678  
 35679  // VBROADCASTF64X2 performs "Broadcast Two Double-Precision Floating-Point Elements".
 35680  //
 35681  // Mnemonic        : VBROADCASTF64X2
 35682  // Supported forms : (2 forms)
 35683  //
 35684  //    * VBROADCASTF64X2 m128, zmm{k}{z}    [AVX512DQ]
 35685  //    * VBROADCASTF64X2 m128, ymm{k}{z}    [AVX512DQ,AVX512VL]
 35686  //
 35687  func (self *Program) VBROADCASTF64X2(v0 interface{}, v1 interface{}) *Instruction {
 35688      p := self.alloc("VBROADCASTF64X2", 2, Operands { v0, v1 })
 35689      // VBROADCASTF64X2 m128, zmm{k}{z}
 35690      if isM128(v0) && isZMMkz(v1) {
 35691          self.require(ISA_AVX512DQ)
 35692          p.domain = DomainAVX
 35693          p.add(0, func(m *_Encoding, v []interface{}) {
 35694              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35695              m.emit(0x1a)
 35696              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 35697          })
 35698      }
 35699      // VBROADCASTF64X2 m128, ymm{k}{z}
 35700      if isM128(v0) && isYMMkz(v1) {
 35701          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35702          p.domain = DomainAVX
 35703          p.add(0, func(m *_Encoding, v []interface{}) {
 35704              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35705              m.emit(0x1a)
 35706              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 35707          })
 35708      }
 35709      if p.len == 0 {
 35710          panic("invalid operands for VBROADCASTF64X2")
 35711      }
 35712      return p
 35713  }
 35714  
 35715  // VBROADCASTF64X4 performs "Broadcast Four Double-Precision Floating-Point Elements".
 35716  //
 35717  // Mnemonic        : VBROADCASTF64X4
 35718  // Supported forms : (1 form)
 35719  //
 35720  //    * VBROADCASTF64X4 m256, zmm{k}{z}    [AVX512F]
 35721  //
 35722  func (self *Program) VBROADCASTF64X4(v0 interface{}, v1 interface{}) *Instruction {
 35723      p := self.alloc("VBROADCASTF64X4", 2, Operands { v0, v1 })
 35724      // VBROADCASTF64X4 m256, zmm{k}{z}
 35725      if isM256(v0) && isZMMkz(v1) {
 35726          self.require(ISA_AVX512F)
 35727          p.domain = DomainAVX
 35728          p.add(0, func(m *_Encoding, v []interface{}) {
 35729              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35730              m.emit(0x1b)
 35731              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 35732          })
 35733      }
 35734      if p.len == 0 {
 35735          panic("invalid operands for VBROADCASTF64X4")
 35736      }
 35737      return p
 35738  }
 35739  
 35740  // VBROADCASTI128 performs "Broadcast 128 Bits of Integer Data".
 35741  //
 35742  // Mnemonic        : VBROADCASTI128
 35743  // Supported forms : (1 form)
 35744  //
 35745  //    * VBROADCASTI128 m128, ymm    [AVX2]
 35746  //
 35747  func (self *Program) VBROADCASTI128(v0 interface{}, v1 interface{}) *Instruction {
 35748      p := self.alloc("VBROADCASTI128", 2, Operands { v0, v1 })
 35749      // VBROADCASTI128 m128, ymm
 35750      if isM128(v0) && isYMM(v1) {
 35751          self.require(ISA_AVX2)
 35752          p.domain = DomainAVX
 35753          p.add(0, func(m *_Encoding, v []interface{}) {
 35754              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 35755              m.emit(0x5a)
 35756              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 35757          })
 35758      }
 35759      if p.len == 0 {
 35760          panic("invalid operands for VBROADCASTI128")
 35761      }
 35762      return p
 35763  }
 35764  
 35765  // VBROADCASTI32X2 performs "Broadcast Two Doubleword Elements".
 35766  //
 35767  // Mnemonic        : VBROADCASTI32X2
 35768  // Supported forms : (6 forms)
 35769  //
 35770  //    * VBROADCASTI32X2 xmm, zmm{k}{z}    [AVX512DQ]
 35771  //    * VBROADCASTI32X2 m64, zmm{k}{z}    [AVX512DQ]
 35772  //    * VBROADCASTI32X2 xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 35773  //    * VBROADCASTI32X2 xmm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 35774  //    * VBROADCASTI32X2 m64, xmm{k}{z}    [AVX512DQ,AVX512VL]
 35775  //    * VBROADCASTI32X2 m64, ymm{k}{z}    [AVX512DQ,AVX512VL]
 35776  //
 35777  func (self *Program) VBROADCASTI32X2(v0 interface{}, v1 interface{}) *Instruction {
 35778      p := self.alloc("VBROADCASTI32X2", 2, Operands { v0, v1 })
 35779      // VBROADCASTI32X2 xmm, zmm{k}{z}
 35780      if isEVEXXMM(v0) && isZMMkz(v1) {
 35781          self.require(ISA_AVX512DQ)
 35782          p.domain = DomainAVX
 35783          p.add(0, func(m *_Encoding, v []interface{}) {
 35784              m.emit(0x62)
 35785              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 35786              m.emit(0x7d)
 35787              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 35788              m.emit(0x59)
 35789              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 35790          })
 35791      }
 35792      // VBROADCASTI32X2 m64, zmm{k}{z}
 35793      if isM64(v0) && isZMMkz(v1) {
 35794          self.require(ISA_AVX512DQ)
 35795          p.domain = DomainAVX
 35796          p.add(0, func(m *_Encoding, v []interface{}) {
 35797              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35798              m.emit(0x59)
 35799              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 35800          })
 35801      }
 35802      // VBROADCASTI32X2 xmm, xmm{k}{z}
 35803      if isEVEXXMM(v0) && isXMMkz(v1) {
 35804          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35805          p.domain = DomainAVX
 35806          p.add(0, func(m *_Encoding, v []interface{}) {
 35807              m.emit(0x62)
 35808              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 35809              m.emit(0x7d)
 35810              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 35811              m.emit(0x59)
 35812              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 35813          })
 35814      }
 35815      // VBROADCASTI32X2 xmm, ymm{k}{z}
 35816      if isEVEXXMM(v0) && isYMMkz(v1) {
 35817          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35818          p.domain = DomainAVX
 35819          p.add(0, func(m *_Encoding, v []interface{}) {
 35820              m.emit(0x62)
 35821              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 35822              m.emit(0x7d)
 35823              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 35824              m.emit(0x59)
 35825              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 35826          })
 35827      }
 35828      // VBROADCASTI32X2 m64, xmm{k}{z}
 35829      if isM64(v0) && isXMMkz(v1) {
 35830          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35831          p.domain = DomainAVX
 35832          p.add(0, func(m *_Encoding, v []interface{}) {
 35833              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35834              m.emit(0x59)
 35835              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 35836          })
 35837      }
 35838      // VBROADCASTI32X2 m64, ymm{k}{z}
 35839      if isM64(v0) && isYMMkz(v1) {
 35840          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35841          p.domain = DomainAVX
 35842          p.add(0, func(m *_Encoding, v []interface{}) {
 35843              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35844              m.emit(0x59)
 35845              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 35846          })
 35847      }
 35848      if p.len == 0 {
 35849          panic("invalid operands for VBROADCASTI32X2")
 35850      }
 35851      return p
 35852  }
 35853  
 35854  // VBROADCASTI32X4 performs "Broadcast Four Doubleword Elements".
 35855  //
 35856  // Mnemonic        : VBROADCASTI32X4
 35857  // Supported forms : (2 forms)
 35858  //
 35859  //    * VBROADCASTI32X4 m128, zmm{k}{z}    [AVX512F]
 35860  //    * VBROADCASTI32X4 m128, ymm{k}{z}    [AVX512F,AVX512VL]
 35861  //
 35862  func (self *Program) VBROADCASTI32X4(v0 interface{}, v1 interface{}) *Instruction {
 35863      p := self.alloc("VBROADCASTI32X4", 2, Operands { v0, v1 })
 35864      // VBROADCASTI32X4 m128, zmm{k}{z}
 35865      if isM128(v0) && isZMMkz(v1) {
 35866          self.require(ISA_AVX512F)
 35867          p.domain = DomainAVX
 35868          p.add(0, func(m *_Encoding, v []interface{}) {
 35869              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35870              m.emit(0x5a)
 35871              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 35872          })
 35873      }
 35874      // VBROADCASTI32X4 m128, ymm{k}{z}
 35875      if isM128(v0) && isYMMkz(v1) {
 35876          self.require(ISA_AVX512VL | ISA_AVX512F)
 35877          p.domain = DomainAVX
 35878          p.add(0, func(m *_Encoding, v []interface{}) {
 35879              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35880              m.emit(0x5a)
 35881              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 35882          })
 35883      }
 35884      if p.len == 0 {
 35885          panic("invalid operands for VBROADCASTI32X4")
 35886      }
 35887      return p
 35888  }
 35889  
 35890  // VBROADCASTI32X8 performs "Broadcast Eight Doubleword Elements".
 35891  //
 35892  // Mnemonic        : VBROADCASTI32X8
 35893  // Supported forms : (1 form)
 35894  //
 35895  //    * VBROADCASTI32X8 m256, zmm{k}{z}    [AVX512DQ]
 35896  //
 35897  func (self *Program) VBROADCASTI32X8(v0 interface{}, v1 interface{}) *Instruction {
 35898      p := self.alloc("VBROADCASTI32X8", 2, Operands { v0, v1 })
 35899      // VBROADCASTI32X8 m256, zmm{k}{z}
 35900      if isM256(v0) && isZMMkz(v1) {
 35901          self.require(ISA_AVX512DQ)
 35902          p.domain = DomainAVX
 35903          p.add(0, func(m *_Encoding, v []interface{}) {
 35904              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35905              m.emit(0x5b)
 35906              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 35907          })
 35908      }
 35909      if p.len == 0 {
 35910          panic("invalid operands for VBROADCASTI32X8")
 35911      }
 35912      return p
 35913  }
 35914  
 35915  // VBROADCASTI64X2 performs "Broadcast Two Quadword Elements".
 35916  //
 35917  // Mnemonic        : VBROADCASTI64X2
 35918  // Supported forms : (2 forms)
 35919  //
 35920  //    * VBROADCASTI64X2 m128, zmm{k}{z}    [AVX512DQ]
 35921  //    * VBROADCASTI64X2 m128, ymm{k}{z}    [AVX512DQ,AVX512VL]
 35922  //
 35923  func (self *Program) VBROADCASTI64X2(v0 interface{}, v1 interface{}) *Instruction {
 35924      p := self.alloc("VBROADCASTI64X2", 2, Operands { v0, v1 })
 35925      // VBROADCASTI64X2 m128, zmm{k}{z}
 35926      if isM128(v0) && isZMMkz(v1) {
 35927          self.require(ISA_AVX512DQ)
 35928          p.domain = DomainAVX
 35929          p.add(0, func(m *_Encoding, v []interface{}) {
 35930              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35931              m.emit(0x5a)
 35932              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 35933          })
 35934      }
 35935      // VBROADCASTI64X2 m128, ymm{k}{z}
 35936      if isM128(v0) && isYMMkz(v1) {
 35937          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35938          p.domain = DomainAVX
 35939          p.add(0, func(m *_Encoding, v []interface{}) {
 35940              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35941              m.emit(0x5a)
 35942              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 35943          })
 35944      }
 35945      if p.len == 0 {
 35946          panic("invalid operands for VBROADCASTI64X2")
 35947      }
 35948      return p
 35949  }
 35950  
 35951  // VBROADCASTI64X4 performs "Broadcast Four Quadword Elements".
 35952  //
 35953  // Mnemonic        : VBROADCASTI64X4
 35954  // Supported forms : (1 form)
 35955  //
 35956  //    * VBROADCASTI64X4 m256, zmm{k}{z}    [AVX512F]
 35957  //
 35958  func (self *Program) VBROADCASTI64X4(v0 interface{}, v1 interface{}) *Instruction {
 35959      p := self.alloc("VBROADCASTI64X4", 2, Operands { v0, v1 })
 35960      // VBROADCASTI64X4 m256, zmm{k}{z}
 35961      if isM256(v0) && isZMMkz(v1) {
 35962          self.require(ISA_AVX512F)
 35963          p.domain = DomainAVX
 35964          p.add(0, func(m *_Encoding, v []interface{}) {
 35965              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35966              m.emit(0x5b)
 35967              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 35968          })
 35969      }
 35970      if p.len == 0 {
 35971          panic("invalid operands for VBROADCASTI64X4")
 35972      }
 35973      return p
 35974  }
 35975  
 35976  // VBROADCASTSD performs "Broadcast Double-Precision Floating-Point Element".
 35977  //
 35978  // Mnemonic        : VBROADCASTSD
 35979  // Supported forms : (6 forms)
 35980  //
 35981  //    * VBROADCASTSD m64, ymm          [AVX]
 35982  //    * VBROADCASTSD xmm, ymm          [AVX2]
 35983  //    * VBROADCASTSD xmm, zmm{k}{z}    [AVX512F]
 35984  //    * VBROADCASTSD m64, zmm{k}{z}    [AVX512F]
 35985  //    * VBROADCASTSD xmm, ymm{k}{z}    [AVX512F,AVX512VL]
 35986  //    * VBROADCASTSD m64, ymm{k}{z}    [AVX512F,AVX512VL]
 35987  //
 35988  func (self *Program) VBROADCASTSD(v0 interface{}, v1 interface{}) *Instruction {
 35989      p := self.alloc("VBROADCASTSD", 2, Operands { v0, v1 })
 35990      // VBROADCASTSD m64, ymm
 35991      if isM64(v0) && isYMM(v1) {
 35992          self.require(ISA_AVX)
 35993          p.domain = DomainAVX
 35994          p.add(0, func(m *_Encoding, v []interface{}) {
 35995              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 35996              m.emit(0x19)
 35997              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 35998          })
 35999      }
 36000      // VBROADCASTSD xmm, ymm
 36001      if isXMM(v0) && isYMM(v1) {
 36002          self.require(ISA_AVX2)
 36003          p.domain = DomainAVX
 36004          p.add(0, func(m *_Encoding, v []interface{}) {
 36005              m.emit(0xc4)
 36006              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 36007              m.emit(0x7d)
 36008              m.emit(0x19)
 36009              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 36010          })
 36011      }
 36012      // VBROADCASTSD xmm, zmm{k}{z}
 36013      if isEVEXXMM(v0) && isZMMkz(v1) {
 36014          self.require(ISA_AVX512F)
 36015          p.domain = DomainAVX
 36016          p.add(0, func(m *_Encoding, v []interface{}) {
 36017              m.emit(0x62)
 36018              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 36019              m.emit(0xfd)
 36020              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 36021              m.emit(0x19)
 36022              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 36023          })
 36024      }
 36025      // VBROADCASTSD m64, zmm{k}{z}
 36026      if isM64(v0) && isZMMkz(v1) {
 36027          self.require(ISA_AVX512F)
 36028          p.domain = DomainAVX
 36029          p.add(0, func(m *_Encoding, v []interface{}) {
 36030              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 36031              m.emit(0x19)
 36032              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 36033          })
 36034      }
 36035      // VBROADCASTSD xmm, ymm{k}{z}
 36036      if isEVEXXMM(v0) && isYMMkz(v1) {
 36037          self.require(ISA_AVX512VL | ISA_AVX512F)
 36038          p.domain = DomainAVX
 36039          p.add(0, func(m *_Encoding, v []interface{}) {
 36040              m.emit(0x62)
 36041              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 36042              m.emit(0xfd)
 36043              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 36044              m.emit(0x19)
 36045              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 36046          })
 36047      }
 36048      // VBROADCASTSD m64, ymm{k}{z}
 36049      if isM64(v0) && isYMMkz(v1) {
 36050          self.require(ISA_AVX512VL | ISA_AVX512F)
 36051          p.domain = DomainAVX
 36052          p.add(0, func(m *_Encoding, v []interface{}) {
 36053              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 36054              m.emit(0x19)
 36055              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 36056          })
 36057      }
 36058      if p.len == 0 {
 36059          panic("invalid operands for VBROADCASTSD")
 36060      }
 36061      return p
 36062  }
 36063  
 36064  // VBROADCASTSS performs "Broadcast Single-Precision Floating-Point Element".
 36065  //
 36066  // Mnemonic        : VBROADCASTSS
 36067  // Supported forms : (8 forms)
 36068  //
 36069  //    * VBROADCASTSS m32, xmm          [AVX]
 36070  //    * VBROADCASTSS m32, ymm          [AVX]
 36071  //    * VBROADCASTSS xmm, xmm          [AVX2]
 36072  //    * VBROADCASTSS xmm, ymm          [AVX2]
 36073  //    * VBROADCASTSS xmm, zmm{k}{z}    [AVX512F]
 36074  //    * VBROADCASTSS m32, zmm{k}{z}    [AVX512F]
 36075  //    * VBROADCASTSS xmm, ymm{k}{z}    [AVX512F,AVX512VL]
 36076  //    * VBROADCASTSS m32, ymm{k}{z}    [AVX512F,AVX512VL]
 36077  //
 36078  func (self *Program) VBROADCASTSS(v0 interface{}, v1 interface{}) *Instruction {
 36079      p := self.alloc("VBROADCASTSS", 2, Operands { v0, v1 })
 36080      // VBROADCASTSS m32, xmm
 36081      if isM32(v0) && isXMM(v1) {
 36082          self.require(ISA_AVX)
 36083          p.domain = DomainAVX
 36084          p.add(0, func(m *_Encoding, v []interface{}) {
 36085              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 36086              m.emit(0x18)
 36087              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 36088          })
 36089      }
 36090      // VBROADCASTSS m32, ymm
 36091      if isM32(v0) && isYMM(v1) {
 36092          self.require(ISA_AVX)
 36093          p.domain = DomainAVX
 36094          p.add(0, func(m *_Encoding, v []interface{}) {
 36095              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 36096              m.emit(0x18)
 36097              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 36098          })
 36099      }
 36100      // VBROADCASTSS xmm, xmm
 36101      if isXMM(v0) && isXMM(v1) {
 36102          self.require(ISA_AVX2)
 36103          p.domain = DomainAVX
 36104          p.add(0, func(m *_Encoding, v []interface{}) {
 36105              m.emit(0xc4)
 36106              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 36107              m.emit(0x79)
 36108              m.emit(0x18)
 36109              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 36110          })
 36111      }
 36112      // VBROADCASTSS xmm, ymm
 36113      if isXMM(v0) && isYMM(v1) {
 36114          self.require(ISA_AVX2)
 36115          p.domain = DomainAVX
 36116          p.add(0, func(m *_Encoding, v []interface{}) {
 36117              m.emit(0xc4)
 36118              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 36119              m.emit(0x7d)
 36120              m.emit(0x18)
 36121              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 36122          })
 36123      }
 36124      // VBROADCASTSS xmm, zmm{k}{z}
 36125      if isEVEXXMM(v0) && isZMMkz(v1) {
 36126          self.require(ISA_AVX512F)
 36127          p.domain = DomainAVX
 36128          p.add(0, func(m *_Encoding, v []interface{}) {
 36129              m.emit(0x62)
 36130              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 36131              m.emit(0x7d)
 36132              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 36133              m.emit(0x18)
 36134              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 36135          })
 36136      }
 36137      // VBROADCASTSS m32, zmm{k}{z}
 36138      if isM32(v0) && isZMMkz(v1) {
 36139          self.require(ISA_AVX512F)
 36140          p.domain = DomainAVX
 36141          p.add(0, func(m *_Encoding, v []interface{}) {
 36142              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 36143              m.emit(0x18)
 36144              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 36145          })
 36146      }
 36147      // VBROADCASTSS xmm, ymm{k}{z}
 36148      if isEVEXXMM(v0) && isYMMkz(v1) {
 36149          self.require(ISA_AVX512VL | ISA_AVX512F)
 36150          p.domain = DomainAVX
 36151          p.add(0, func(m *_Encoding, v []interface{}) {
 36152              m.emit(0x62)
 36153              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 36154              m.emit(0x7d)
 36155              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 36156              m.emit(0x18)
 36157              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 36158          })
 36159      }
 36160      // VBROADCASTSS m32, ymm{k}{z}
 36161      if isM32(v0) && isYMMkz(v1) {
 36162          self.require(ISA_AVX512VL | ISA_AVX512F)
 36163          p.domain = DomainAVX
 36164          p.add(0, func(m *_Encoding, v []interface{}) {
 36165              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 36166              m.emit(0x18)
 36167              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 36168          })
 36169      }
 36170      if p.len == 0 {
 36171          panic("invalid operands for VBROADCASTSS")
 36172      }
 36173      return p
 36174  }
 36175  
 36176  // VCMPPD performs "Compare Packed Double-Precision Floating-Point Values".
 36177  //
 36178  // Mnemonic        : VCMPPD
 36179  // Supported forms : (11 forms)
 36180  //
 36181  //    * VCMPPD imm8, xmm, xmm, xmm              [AVX]
 36182  //    * VCMPPD imm8, m128, xmm, xmm             [AVX]
 36183  //    * VCMPPD imm8, ymm, ymm, ymm              [AVX]
 36184  //    * VCMPPD imm8, m256, ymm, ymm             [AVX]
 36185  //    * VCMPPD imm8, m512/m64bcst, zmm, k{k}    [AVX512F]
 36186  //    * VCMPPD imm8, {sae}, zmm, zmm, k{k}      [AVX512F]
 36187  //    * VCMPPD imm8, zmm, zmm, k{k}             [AVX512F]
 36188  //    * VCMPPD imm8, m128/m64bcst, xmm, k{k}    [AVX512F,AVX512VL]
 36189  //    * VCMPPD imm8, xmm, xmm, k{k}             [AVX512F,AVX512VL]
 36190  //    * VCMPPD imm8, m256/m64bcst, ymm, k{k}    [AVX512F,AVX512VL]
 36191  //    * VCMPPD imm8, ymm, ymm, k{k}             [AVX512F,AVX512VL]
 36192  //
 36193  func (self *Program) VCMPPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 36194      var p *Instruction
 36195      switch len(vv) {
 36196          case 0  : p = self.alloc("VCMPPD", 4, Operands { v0, v1, v2, v3 })
 36197          case 1  : p = self.alloc("VCMPPD", 5, Operands { v0, v1, v2, v3, vv[0] })
 36198          default : panic("instruction VCMPPD takes 4 or 5 operands")
 36199      }
 36200      // VCMPPD imm8, xmm, xmm, xmm
 36201      if len(vv) == 0 && isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 36202          self.require(ISA_AVX)
 36203          p.domain = DomainAVX
 36204          p.add(0, func(m *_Encoding, v []interface{}) {
 36205              m.vex2(1, hcode(v[3]), v[1], hlcode(v[2]))
 36206              m.emit(0xc2)
 36207              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36208              m.imm1(toImmAny(v[0]))
 36209          })
 36210      }
 36211      // VCMPPD imm8, m128, xmm, xmm
 36212      if len(vv) == 0 && isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 36213          self.require(ISA_AVX)
 36214          p.domain = DomainAVX
 36215          p.add(0, func(m *_Encoding, v []interface{}) {
 36216              m.vex2(1, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 36217              m.emit(0xc2)
 36218              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 36219              m.imm1(toImmAny(v[0]))
 36220          })
 36221      }
 36222      // VCMPPD imm8, ymm, ymm, ymm
 36223      if len(vv) == 0 && isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 36224          self.require(ISA_AVX)
 36225          p.domain = DomainAVX
 36226          p.add(0, func(m *_Encoding, v []interface{}) {
 36227              m.vex2(5, hcode(v[3]), v[1], hlcode(v[2]))
 36228              m.emit(0xc2)
 36229              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36230              m.imm1(toImmAny(v[0]))
 36231          })
 36232      }
 36233      // VCMPPD imm8, m256, ymm, ymm
 36234      if len(vv) == 0 && isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 36235          self.require(ISA_AVX)
 36236          p.domain = DomainAVX
 36237          p.add(0, func(m *_Encoding, v []interface{}) {
 36238              m.vex2(5, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 36239              m.emit(0xc2)
 36240              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 36241              m.imm1(toImmAny(v[0]))
 36242          })
 36243      }
 36244      // VCMPPD imm8, m512/m64bcst, zmm, k{k}
 36245      if len(vv) == 0 && isImm8(v0) && isM512M64bcst(v1) && isZMM(v2) && isKk(v3) {
 36246          self.require(ISA_AVX512F)
 36247          p.domain = DomainAVX
 36248          p.add(0, func(m *_Encoding, v []interface{}) {
 36249              m.evex(0b01, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 36250              m.emit(0xc2)
 36251              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 36252              m.imm1(toImmAny(v[0]))
 36253          })
 36254      }
 36255      // VCMPPD imm8, {sae}, zmm, zmm, k{k}
 36256      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isZMM(v2) && isZMM(v3) && isKk(vv[0]) {
 36257          self.require(ISA_AVX512F)
 36258          p.domain = DomainAVX
 36259          p.add(0, func(m *_Encoding, v []interface{}) {
 36260              m.emit(0x62)
 36261              m.emit(0xf1 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 36262              m.emit(0xfd ^ (hlcode(v[3]) << 3))
 36263              m.emit((0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 36264              m.emit(0xc2)
 36265              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 36266              m.imm1(toImmAny(v[0]))
 36267          })
 36268      }
 36269      // VCMPPD imm8, zmm, zmm, k{k}
 36270      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isZMM(v2) && isKk(v3) {
 36271          self.require(ISA_AVX512F)
 36272          p.domain = DomainAVX
 36273          p.add(0, func(m *_Encoding, v []interface{}) {
 36274              m.emit(0x62)
 36275              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 36276              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 36277              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 36278              m.emit(0xc2)
 36279              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36280              m.imm1(toImmAny(v[0]))
 36281          })
 36282      }
 36283      // VCMPPD imm8, m128/m64bcst, xmm, k{k}
 36284      if len(vv) == 0 && isImm8(v0) && isM128M64bcst(v1) && isEVEXXMM(v2) && isKk(v3) {
 36285          self.require(ISA_AVX512VL | ISA_AVX512F)
 36286          p.domain = DomainAVX
 36287          p.add(0, func(m *_Encoding, v []interface{}) {
 36288              m.evex(0b01, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 36289              m.emit(0xc2)
 36290              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 36291              m.imm1(toImmAny(v[0]))
 36292          })
 36293      }
 36294      // VCMPPD imm8, xmm, xmm, k{k}
 36295      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 36296          self.require(ISA_AVX512VL | ISA_AVX512F)
 36297          p.domain = DomainAVX
 36298          p.add(0, func(m *_Encoding, v []interface{}) {
 36299              m.emit(0x62)
 36300              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 36301              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 36302              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 36303              m.emit(0xc2)
 36304              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36305              m.imm1(toImmAny(v[0]))
 36306          })
 36307      }
 36308      // VCMPPD imm8, m256/m64bcst, ymm, k{k}
 36309      if len(vv) == 0 && isImm8(v0) && isM256M64bcst(v1) && isEVEXYMM(v2) && isKk(v3) {
 36310          self.require(ISA_AVX512VL | ISA_AVX512F)
 36311          p.domain = DomainAVX
 36312          p.add(0, func(m *_Encoding, v []interface{}) {
 36313              m.evex(0b01, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 36314              m.emit(0xc2)
 36315              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 36316              m.imm1(toImmAny(v[0]))
 36317          })
 36318      }
 36319      // VCMPPD imm8, ymm, ymm, k{k}
 36320      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isKk(v3) {
 36321          self.require(ISA_AVX512VL | ISA_AVX512F)
 36322          p.domain = DomainAVX
 36323          p.add(0, func(m *_Encoding, v []interface{}) {
 36324              m.emit(0x62)
 36325              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 36326              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 36327              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 36328              m.emit(0xc2)
 36329              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36330              m.imm1(toImmAny(v[0]))
 36331          })
 36332      }
 36333      if p.len == 0 {
 36334          panic("invalid operands for VCMPPD")
 36335      }
 36336      return p
 36337  }
 36338  
 36339  // VCMPPS performs "Compare Packed Single-Precision Floating-Point Values".
 36340  //
 36341  // Mnemonic        : VCMPPS
 36342  // Supported forms : (11 forms)
 36343  //
 36344  //    * VCMPPS imm8, xmm, xmm, xmm              [AVX]
 36345  //    * VCMPPS imm8, m128, xmm, xmm             [AVX]
 36346  //    * VCMPPS imm8, ymm, ymm, ymm              [AVX]
 36347  //    * VCMPPS imm8, m256, ymm, ymm             [AVX]
 36348  //    * VCMPPS imm8, m512/m32bcst, zmm, k{k}    [AVX512F]
 36349  //    * VCMPPS imm8, {sae}, zmm, zmm, k{k}      [AVX512F]
 36350  //    * VCMPPS imm8, zmm, zmm, k{k}             [AVX512F]
 36351  //    * VCMPPS imm8, m128/m32bcst, xmm, k{k}    [AVX512F,AVX512VL]
 36352  //    * VCMPPS imm8, xmm, xmm, k{k}             [AVX512F,AVX512VL]
 36353  //    * VCMPPS imm8, m256/m32bcst, ymm, k{k}    [AVX512F,AVX512VL]
 36354  //    * VCMPPS imm8, ymm, ymm, k{k}             [AVX512F,AVX512VL]
 36355  //
 36356  func (self *Program) VCMPPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 36357      var p *Instruction
 36358      switch len(vv) {
 36359          case 0  : p = self.alloc("VCMPPS", 4, Operands { v0, v1, v2, v3 })
 36360          case 1  : p = self.alloc("VCMPPS", 5, Operands { v0, v1, v2, v3, vv[0] })
 36361          default : panic("instruction VCMPPS takes 4 or 5 operands")
 36362      }
 36363      // VCMPPS imm8, xmm, xmm, xmm
 36364      if len(vv) == 0 && isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 36365          self.require(ISA_AVX)
 36366          p.domain = DomainAVX
 36367          p.add(0, func(m *_Encoding, v []interface{}) {
 36368              m.vex2(0, hcode(v[3]), v[1], hlcode(v[2]))
 36369              m.emit(0xc2)
 36370              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36371              m.imm1(toImmAny(v[0]))
 36372          })
 36373      }
 36374      // VCMPPS imm8, m128, xmm, xmm
 36375      if len(vv) == 0 && isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 36376          self.require(ISA_AVX)
 36377          p.domain = DomainAVX
 36378          p.add(0, func(m *_Encoding, v []interface{}) {
 36379              m.vex2(0, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 36380              m.emit(0xc2)
 36381              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 36382              m.imm1(toImmAny(v[0]))
 36383          })
 36384      }
 36385      // VCMPPS imm8, ymm, ymm, ymm
 36386      if len(vv) == 0 && isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 36387          self.require(ISA_AVX)
 36388          p.domain = DomainAVX
 36389          p.add(0, func(m *_Encoding, v []interface{}) {
 36390              m.vex2(4, hcode(v[3]), v[1], hlcode(v[2]))
 36391              m.emit(0xc2)
 36392              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36393              m.imm1(toImmAny(v[0]))
 36394          })
 36395      }
 36396      // VCMPPS imm8, m256, ymm, ymm
 36397      if len(vv) == 0 && isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 36398          self.require(ISA_AVX)
 36399          p.domain = DomainAVX
 36400          p.add(0, func(m *_Encoding, v []interface{}) {
 36401              m.vex2(4, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 36402              m.emit(0xc2)
 36403              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 36404              m.imm1(toImmAny(v[0]))
 36405          })
 36406      }
 36407      // VCMPPS imm8, m512/m32bcst, zmm, k{k}
 36408      if len(vv) == 0 && isImm8(v0) && isM512M32bcst(v1) && isZMM(v2) && isKk(v3) {
 36409          self.require(ISA_AVX512F)
 36410          p.domain = DomainAVX
 36411          p.add(0, func(m *_Encoding, v []interface{}) {
 36412              m.evex(0b01, 0x04, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 36413              m.emit(0xc2)
 36414              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 36415              m.imm1(toImmAny(v[0]))
 36416          })
 36417      }
 36418      // VCMPPS imm8, {sae}, zmm, zmm, k{k}
 36419      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isZMM(v2) && isZMM(v3) && isKk(vv[0]) {
 36420          self.require(ISA_AVX512F)
 36421          p.domain = DomainAVX
 36422          p.add(0, func(m *_Encoding, v []interface{}) {
 36423              m.emit(0x62)
 36424              m.emit(0xf1 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 36425              m.emit(0x7c ^ (hlcode(v[3]) << 3))
 36426              m.emit((0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 36427              m.emit(0xc2)
 36428              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 36429              m.imm1(toImmAny(v[0]))
 36430          })
 36431      }
 36432      // VCMPPS imm8, zmm, zmm, k{k}
 36433      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isZMM(v2) && isKk(v3) {
 36434          self.require(ISA_AVX512F)
 36435          p.domain = DomainAVX
 36436          p.add(0, func(m *_Encoding, v []interface{}) {
 36437              m.emit(0x62)
 36438              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 36439              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 36440              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 36441              m.emit(0xc2)
 36442              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36443              m.imm1(toImmAny(v[0]))
 36444          })
 36445      }
 36446      // VCMPPS imm8, m128/m32bcst, xmm, k{k}
 36447      if len(vv) == 0 && isImm8(v0) && isM128M32bcst(v1) && isEVEXXMM(v2) && isKk(v3) {
 36448          self.require(ISA_AVX512VL | ISA_AVX512F)
 36449          p.domain = DomainAVX
 36450          p.add(0, func(m *_Encoding, v []interface{}) {
 36451              m.evex(0b01, 0x04, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 36452              m.emit(0xc2)
 36453              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 36454              m.imm1(toImmAny(v[0]))
 36455          })
 36456      }
 36457      // VCMPPS imm8, xmm, xmm, k{k}
 36458      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 36459          self.require(ISA_AVX512VL | ISA_AVX512F)
 36460          p.domain = DomainAVX
 36461          p.add(0, func(m *_Encoding, v []interface{}) {
 36462              m.emit(0x62)
 36463              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 36464              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 36465              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 36466              m.emit(0xc2)
 36467              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36468              m.imm1(toImmAny(v[0]))
 36469          })
 36470      }
 36471      // VCMPPS imm8, m256/m32bcst, ymm, k{k}
 36472      if len(vv) == 0 && isImm8(v0) && isM256M32bcst(v1) && isEVEXYMM(v2) && isKk(v3) {
 36473          self.require(ISA_AVX512VL | ISA_AVX512F)
 36474          p.domain = DomainAVX
 36475          p.add(0, func(m *_Encoding, v []interface{}) {
 36476              m.evex(0b01, 0x04, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 36477              m.emit(0xc2)
 36478              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 36479              m.imm1(toImmAny(v[0]))
 36480          })
 36481      }
 36482      // VCMPPS imm8, ymm, ymm, k{k}
 36483      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isKk(v3) {
 36484          self.require(ISA_AVX512VL | ISA_AVX512F)
 36485          p.domain = DomainAVX
 36486          p.add(0, func(m *_Encoding, v []interface{}) {
 36487              m.emit(0x62)
 36488              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 36489              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 36490              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 36491              m.emit(0xc2)
 36492              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36493              m.imm1(toImmAny(v[0]))
 36494          })
 36495      }
 36496      if p.len == 0 {
 36497          panic("invalid operands for VCMPPS")
 36498      }
 36499      return p
 36500  }
 36501  
 36502  // VCMPSD performs "Compare Scalar Double-Precision Floating-Point Values".
 36503  //
 36504  // Mnemonic        : VCMPSD
 36505  // Supported forms : (5 forms)
 36506  //
 36507  //    * VCMPSD imm8, xmm, xmm, xmm            [AVX]
 36508  //    * VCMPSD imm8, m64, xmm, xmm            [AVX]
 36509  //    * VCMPSD imm8, m64, xmm, k{k}           [AVX512F]
 36510  //    * VCMPSD imm8, {sae}, xmm, xmm, k{k}    [AVX512F]
 36511  //    * VCMPSD imm8, xmm, xmm, k{k}           [AVX512F]
 36512  //
 36513  func (self *Program) VCMPSD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 36514      var p *Instruction
 36515      switch len(vv) {
 36516          case 0  : p = self.alloc("VCMPSD", 4, Operands { v0, v1, v2, v3 })
 36517          case 1  : p = self.alloc("VCMPSD", 5, Operands { v0, v1, v2, v3, vv[0] })
 36518          default : panic("instruction VCMPSD takes 4 or 5 operands")
 36519      }
 36520      // VCMPSD imm8, xmm, xmm, xmm
 36521      if len(vv) == 0 && isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 36522          self.require(ISA_AVX)
 36523          p.domain = DomainAVX
 36524          p.add(0, func(m *_Encoding, v []interface{}) {
 36525              m.vex2(3, hcode(v[3]), v[1], hlcode(v[2]))
 36526              m.emit(0xc2)
 36527              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36528              m.imm1(toImmAny(v[0]))
 36529          })
 36530      }
 36531      // VCMPSD imm8, m64, xmm, xmm
 36532      if len(vv) == 0 && isImm8(v0) && isM64(v1) && isXMM(v2) && isXMM(v3) {
 36533          self.require(ISA_AVX)
 36534          p.domain = DomainAVX
 36535          p.add(0, func(m *_Encoding, v []interface{}) {
 36536              m.vex2(3, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 36537              m.emit(0xc2)
 36538              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 36539              m.imm1(toImmAny(v[0]))
 36540          })
 36541      }
 36542      // VCMPSD imm8, m64, xmm, k{k}
 36543      if len(vv) == 0 && isImm8(v0) && isM64(v1) && isEVEXXMM(v2) && isKk(v3) {
 36544          self.require(ISA_AVX512F)
 36545          p.domain = DomainAVX
 36546          p.add(0, func(m *_Encoding, v []interface{}) {
 36547              m.evex(0b01, 0x87, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 36548              m.emit(0xc2)
 36549              m.mrsd(lcode(v[3]), addr(v[1]), 8)
 36550              m.imm1(toImmAny(v[0]))
 36551          })
 36552      }
 36553      // VCMPSD imm8, {sae}, xmm, xmm, k{k}
 36554      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) && isKk(vv[0]) {
 36555          self.require(ISA_AVX512F)
 36556          p.domain = DomainAVX
 36557          p.add(0, func(m *_Encoding, v []interface{}) {
 36558              m.emit(0x62)
 36559              m.emit(0xf1 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 36560              m.emit(0xff ^ (hlcode(v[3]) << 3))
 36561              m.emit((0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 36562              m.emit(0xc2)
 36563              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 36564              m.imm1(toImmAny(v[0]))
 36565          })
 36566      }
 36567      // VCMPSD imm8, xmm, xmm, k{k}
 36568      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 36569          self.require(ISA_AVX512F)
 36570          p.domain = DomainAVX
 36571          p.add(0, func(m *_Encoding, v []interface{}) {
 36572              m.emit(0x62)
 36573              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 36574              m.emit(0xff ^ (hlcode(v[2]) << 3))
 36575              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 36576              m.emit(0xc2)
 36577              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36578              m.imm1(toImmAny(v[0]))
 36579          })
 36580      }
 36581      if p.len == 0 {
 36582          panic("invalid operands for VCMPSD")
 36583      }
 36584      return p
 36585  }
 36586  
 36587  // VCMPSS performs "Compare Scalar Single-Precision Floating-Point Values".
 36588  //
 36589  // Mnemonic        : VCMPSS
 36590  // Supported forms : (5 forms)
 36591  //
 36592  //    * VCMPSS imm8, xmm, xmm, xmm            [AVX]
 36593  //    * VCMPSS imm8, m32, xmm, xmm            [AVX]
 36594  //    * VCMPSS imm8, m32, xmm, k{k}           [AVX512F]
 36595  //    * VCMPSS imm8, {sae}, xmm, xmm, k{k}    [AVX512F]
 36596  //    * VCMPSS imm8, xmm, xmm, k{k}           [AVX512F]
 36597  //
 36598  func (self *Program) VCMPSS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 36599      var p *Instruction
 36600      switch len(vv) {
 36601          case 0  : p = self.alloc("VCMPSS", 4, Operands { v0, v1, v2, v3 })
 36602          case 1  : p = self.alloc("VCMPSS", 5, Operands { v0, v1, v2, v3, vv[0] })
 36603          default : panic("instruction VCMPSS takes 4 or 5 operands")
 36604      }
 36605      // VCMPSS imm8, xmm, xmm, xmm
 36606      if len(vv) == 0 && isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 36607          self.require(ISA_AVX)
 36608          p.domain = DomainAVX
 36609          p.add(0, func(m *_Encoding, v []interface{}) {
 36610              m.vex2(2, hcode(v[3]), v[1], hlcode(v[2]))
 36611              m.emit(0xc2)
 36612              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36613              m.imm1(toImmAny(v[0]))
 36614          })
 36615      }
 36616      // VCMPSS imm8, m32, xmm, xmm
 36617      if len(vv) == 0 && isImm8(v0) && isM32(v1) && isXMM(v2) && isXMM(v3) {
 36618          self.require(ISA_AVX)
 36619          p.domain = DomainAVX
 36620          p.add(0, func(m *_Encoding, v []interface{}) {
 36621              m.vex2(2, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 36622              m.emit(0xc2)
 36623              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 36624              m.imm1(toImmAny(v[0]))
 36625          })
 36626      }
 36627      // VCMPSS imm8, m32, xmm, k{k}
 36628      if len(vv) == 0 && isImm8(v0) && isM32(v1) && isEVEXXMM(v2) && isKk(v3) {
 36629          self.require(ISA_AVX512F)
 36630          p.domain = DomainAVX
 36631          p.add(0, func(m *_Encoding, v []interface{}) {
 36632              m.evex(0b01, 0x06, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 36633              m.emit(0xc2)
 36634              m.mrsd(lcode(v[3]), addr(v[1]), 4)
 36635              m.imm1(toImmAny(v[0]))
 36636          })
 36637      }
 36638      // VCMPSS imm8, {sae}, xmm, xmm, k{k}
 36639      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) && isKk(vv[0]) {
 36640          self.require(ISA_AVX512F)
 36641          p.domain = DomainAVX
 36642          p.add(0, func(m *_Encoding, v []interface{}) {
 36643              m.emit(0x62)
 36644              m.emit(0xf1 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 36645              m.emit(0x7e ^ (hlcode(v[3]) << 3))
 36646              m.emit((0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 36647              m.emit(0xc2)
 36648              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 36649              m.imm1(toImmAny(v[0]))
 36650          })
 36651      }
 36652      // VCMPSS imm8, xmm, xmm, k{k}
 36653      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 36654          self.require(ISA_AVX512F)
 36655          p.domain = DomainAVX
 36656          p.add(0, func(m *_Encoding, v []interface{}) {
 36657              m.emit(0x62)
 36658              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 36659              m.emit(0x7e ^ (hlcode(v[2]) << 3))
 36660              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 36661              m.emit(0xc2)
 36662              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36663              m.imm1(toImmAny(v[0]))
 36664          })
 36665      }
 36666      if p.len == 0 {
 36667          panic("invalid operands for VCMPSS")
 36668      }
 36669      return p
 36670  }
 36671  
 36672  // VCOMISD performs "Compare Scalar Ordered Double-Precision Floating-Point Values and Set EFLAGS".
 36673  //
 36674  // Mnemonic        : VCOMISD
 36675  // Supported forms : (5 forms)
 36676  //
 36677  //    * VCOMISD xmm, xmm           [AVX]
 36678  //    * VCOMISD m64, xmm           [AVX]
 36679  //    * VCOMISD m64, xmm           [AVX512F]
 36680  //    * VCOMISD {sae}, xmm, xmm    [AVX512F]
 36681  //    * VCOMISD xmm, xmm           [AVX512F]
 36682  //
 36683  func (self *Program) VCOMISD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 36684      var p *Instruction
 36685      switch len(vv) {
 36686          case 0  : p = self.alloc("VCOMISD", 2, Operands { v0, v1 })
 36687          case 1  : p = self.alloc("VCOMISD", 3, Operands { v0, v1, vv[0] })
 36688          default : panic("instruction VCOMISD takes 2 or 3 operands")
 36689      }
 36690      // VCOMISD xmm, xmm
 36691      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 36692          self.require(ISA_AVX)
 36693          p.domain = DomainAVX
 36694          p.add(0, func(m *_Encoding, v []interface{}) {
 36695              m.vex2(1, hcode(v[1]), v[0], 0)
 36696              m.emit(0x2f)
 36697              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 36698          })
 36699      }
 36700      // VCOMISD m64, xmm
 36701      if len(vv) == 0 && isM64(v0) && isXMM(v1) {
 36702          self.require(ISA_AVX)
 36703          p.domain = DomainAVX
 36704          p.add(0, func(m *_Encoding, v []interface{}) {
 36705              m.vex2(1, hcode(v[1]), addr(v[0]), 0)
 36706              m.emit(0x2f)
 36707              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 36708          })
 36709      }
 36710      // VCOMISD m64, xmm
 36711      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) {
 36712          self.require(ISA_AVX512F)
 36713          p.domain = DomainAVX
 36714          p.add(0, func(m *_Encoding, v []interface{}) {
 36715              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 36716              m.emit(0x2f)
 36717              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 36718          })
 36719      }
 36720      // VCOMISD {sae}, xmm, xmm
 36721      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(vv[0]) {
 36722          self.require(ISA_AVX512F)
 36723          p.domain = DomainAVX
 36724          p.add(0, func(m *_Encoding, v []interface{}) {
 36725              m.emit(0x62)
 36726              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 36727              m.emit(0xfd)
 36728              m.emit(0x18)
 36729              m.emit(0x2f)
 36730              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 36731          })
 36732      }
 36733      // VCOMISD xmm, xmm
 36734      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) {
 36735          self.require(ISA_AVX512F)
 36736          p.domain = DomainAVX
 36737          p.add(0, func(m *_Encoding, v []interface{}) {
 36738              m.emit(0x62)
 36739              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 36740              m.emit(0xfd)
 36741              m.emit(0x48)
 36742              m.emit(0x2f)
 36743              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 36744          })
 36745      }
 36746      if p.len == 0 {
 36747          panic("invalid operands for VCOMISD")
 36748      }
 36749      return p
 36750  }
 36751  
 36752  // VCOMISS performs "Compare Scalar Ordered Single-Precision Floating-Point Values and Set EFLAGS".
 36753  //
 36754  // Mnemonic        : VCOMISS
 36755  // Supported forms : (5 forms)
 36756  //
 36757  //    * VCOMISS xmm, xmm           [AVX]
 36758  //    * VCOMISS m32, xmm           [AVX]
 36759  //    * VCOMISS m32, xmm           [AVX512F]
 36760  //    * VCOMISS {sae}, xmm, xmm    [AVX512F]
 36761  //    * VCOMISS xmm, xmm           [AVX512F]
 36762  //
 36763  func (self *Program) VCOMISS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 36764      var p *Instruction
 36765      switch len(vv) {
 36766          case 0  : p = self.alloc("VCOMISS", 2, Operands { v0, v1 })
 36767          case 1  : p = self.alloc("VCOMISS", 3, Operands { v0, v1, vv[0] })
 36768          default : panic("instruction VCOMISS takes 2 or 3 operands")
 36769      }
 36770      // VCOMISS xmm, xmm
 36771      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 36772          self.require(ISA_AVX)
 36773          p.domain = DomainAVX
 36774          p.add(0, func(m *_Encoding, v []interface{}) {
 36775              m.vex2(0, hcode(v[1]), v[0], 0)
 36776              m.emit(0x2f)
 36777              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 36778          })
 36779      }
 36780      // VCOMISS m32, xmm
 36781      if len(vv) == 0 && isM32(v0) && isXMM(v1) {
 36782          self.require(ISA_AVX)
 36783          p.domain = DomainAVX
 36784          p.add(0, func(m *_Encoding, v []interface{}) {
 36785              m.vex2(0, hcode(v[1]), addr(v[0]), 0)
 36786              m.emit(0x2f)
 36787              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 36788          })
 36789      }
 36790      // VCOMISS m32, xmm
 36791      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) {
 36792          self.require(ISA_AVX512F)
 36793          p.domain = DomainAVX
 36794          p.add(0, func(m *_Encoding, v []interface{}) {
 36795              m.evex(0b01, 0x04, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 36796              m.emit(0x2f)
 36797              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 36798          })
 36799      }
 36800      // VCOMISS {sae}, xmm, xmm
 36801      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(vv[0]) {
 36802          self.require(ISA_AVX512F)
 36803          p.domain = DomainAVX
 36804          p.add(0, func(m *_Encoding, v []interface{}) {
 36805              m.emit(0x62)
 36806              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 36807              m.emit(0x7c)
 36808              m.emit(0x18)
 36809              m.emit(0x2f)
 36810              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 36811          })
 36812      }
 36813      // VCOMISS xmm, xmm
 36814      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) {
 36815          self.require(ISA_AVX512F)
 36816          p.domain = DomainAVX
 36817          p.add(0, func(m *_Encoding, v []interface{}) {
 36818              m.emit(0x62)
 36819              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 36820              m.emit(0x7c)
 36821              m.emit(0x48)
 36822              m.emit(0x2f)
 36823              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 36824          })
 36825      }
 36826      if p.len == 0 {
 36827          panic("invalid operands for VCOMISS")
 36828      }
 36829      return p
 36830  }
 36831  
 36832  // VCOMPRESSPD performs "Store Sparse Packed Double-Precision Floating-Point Values into Dense Memory/Register".
 36833  //
 36834  // Mnemonic        : VCOMPRESSPD
 36835  // Supported forms : (6 forms)
 36836  //
 36837  //    * VCOMPRESSPD zmm, zmm{k}{z}     [AVX512F]
 36838  //    * VCOMPRESSPD zmm, m512{k}{z}    [AVX512F]
 36839  //    * VCOMPRESSPD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 36840  //    * VCOMPRESSPD xmm, m128{k}{z}    [AVX512F,AVX512VL]
 36841  //    * VCOMPRESSPD ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 36842  //    * VCOMPRESSPD ymm, m256{k}{z}    [AVX512F,AVX512VL]
 36843  //
 36844  func (self *Program) VCOMPRESSPD(v0 interface{}, v1 interface{}) *Instruction {
 36845      p := self.alloc("VCOMPRESSPD", 2, Operands { v0, v1 })
 36846      // VCOMPRESSPD zmm, zmm{k}{z}
 36847      if isZMM(v0) && isZMMkz(v1) {
 36848          self.require(ISA_AVX512F)
 36849          p.domain = DomainAVX
 36850          p.add(0, func(m *_Encoding, v []interface{}) {
 36851              m.emit(0x62)
 36852              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 36853              m.emit(0xfd)
 36854              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 36855              m.emit(0x8a)
 36856              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 36857          })
 36858      }
 36859      // VCOMPRESSPD zmm, m512{k}{z}
 36860      if isZMM(v0) && isM512kz(v1) {
 36861          self.require(ISA_AVX512F)
 36862          p.domain = DomainAVX
 36863          p.add(0, func(m *_Encoding, v []interface{}) {
 36864              m.evex(0b10, 0x85, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 36865              m.emit(0x8a)
 36866              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 36867          })
 36868      }
 36869      // VCOMPRESSPD xmm, xmm{k}{z}
 36870      if isEVEXXMM(v0) && isXMMkz(v1) {
 36871          self.require(ISA_AVX512VL | ISA_AVX512F)
 36872          p.domain = DomainAVX
 36873          p.add(0, func(m *_Encoding, v []interface{}) {
 36874              m.emit(0x62)
 36875              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 36876              m.emit(0xfd)
 36877              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 36878              m.emit(0x8a)
 36879              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 36880          })
 36881      }
 36882      // VCOMPRESSPD xmm, m128{k}{z}
 36883      if isEVEXXMM(v0) && isM128kz(v1) {
 36884          self.require(ISA_AVX512VL | ISA_AVX512F)
 36885          p.domain = DomainAVX
 36886          p.add(0, func(m *_Encoding, v []interface{}) {
 36887              m.evex(0b10, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 36888              m.emit(0x8a)
 36889              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 36890          })
 36891      }
 36892      // VCOMPRESSPD ymm, ymm{k}{z}
 36893      if isEVEXYMM(v0) && isYMMkz(v1) {
 36894          self.require(ISA_AVX512VL | ISA_AVX512F)
 36895          p.domain = DomainAVX
 36896          p.add(0, func(m *_Encoding, v []interface{}) {
 36897              m.emit(0x62)
 36898              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 36899              m.emit(0xfd)
 36900              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 36901              m.emit(0x8a)
 36902              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 36903          })
 36904      }
 36905      // VCOMPRESSPD ymm, m256{k}{z}
 36906      if isEVEXYMM(v0) && isM256kz(v1) {
 36907          self.require(ISA_AVX512VL | ISA_AVX512F)
 36908          p.domain = DomainAVX
 36909          p.add(0, func(m *_Encoding, v []interface{}) {
 36910              m.evex(0b10, 0x85, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 36911              m.emit(0x8a)
 36912              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 36913          })
 36914      }
 36915      if p.len == 0 {
 36916          panic("invalid operands for VCOMPRESSPD")
 36917      }
 36918      return p
 36919  }
 36920  
 36921  // VCOMPRESSPS performs "Store Sparse Packed Single-Precision Floating-Point Values into Dense Memory/Register".
 36922  //
 36923  // Mnemonic        : VCOMPRESSPS
 36924  // Supported forms : (6 forms)
 36925  //
 36926  //    * VCOMPRESSPS zmm, zmm{k}{z}     [AVX512F]
 36927  //    * VCOMPRESSPS zmm, m512{k}{z}    [AVX512F]
 36928  //    * VCOMPRESSPS xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 36929  //    * VCOMPRESSPS xmm, m128{k}{z}    [AVX512F,AVX512VL]
 36930  //    * VCOMPRESSPS ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 36931  //    * VCOMPRESSPS ymm, m256{k}{z}    [AVX512F,AVX512VL]
 36932  //
 36933  func (self *Program) VCOMPRESSPS(v0 interface{}, v1 interface{}) *Instruction {
 36934      p := self.alloc("VCOMPRESSPS", 2, Operands { v0, v1 })
 36935      // VCOMPRESSPS zmm, zmm{k}{z}
 36936      if isZMM(v0) && isZMMkz(v1) {
 36937          self.require(ISA_AVX512F)
 36938          p.domain = DomainAVX
 36939          p.add(0, func(m *_Encoding, v []interface{}) {
 36940              m.emit(0x62)
 36941              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 36942              m.emit(0x7d)
 36943              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 36944              m.emit(0x8a)
 36945              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 36946          })
 36947      }
 36948      // VCOMPRESSPS zmm, m512{k}{z}
 36949      if isZMM(v0) && isM512kz(v1) {
 36950          self.require(ISA_AVX512F)
 36951          p.domain = DomainAVX
 36952          p.add(0, func(m *_Encoding, v []interface{}) {
 36953              m.evex(0b10, 0x05, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 36954              m.emit(0x8a)
 36955              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 36956          })
 36957      }
 36958      // VCOMPRESSPS xmm, xmm{k}{z}
 36959      if isEVEXXMM(v0) && isXMMkz(v1) {
 36960          self.require(ISA_AVX512VL | ISA_AVX512F)
 36961          p.domain = DomainAVX
 36962          p.add(0, func(m *_Encoding, v []interface{}) {
 36963              m.emit(0x62)
 36964              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 36965              m.emit(0x7d)
 36966              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 36967              m.emit(0x8a)
 36968              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 36969          })
 36970      }
 36971      // VCOMPRESSPS xmm, m128{k}{z}
 36972      if isEVEXXMM(v0) && isM128kz(v1) {
 36973          self.require(ISA_AVX512VL | ISA_AVX512F)
 36974          p.domain = DomainAVX
 36975          p.add(0, func(m *_Encoding, v []interface{}) {
 36976              m.evex(0b10, 0x05, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 36977              m.emit(0x8a)
 36978              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 36979          })
 36980      }
 36981      // VCOMPRESSPS ymm, ymm{k}{z}
 36982      if isEVEXYMM(v0) && isYMMkz(v1) {
 36983          self.require(ISA_AVX512VL | ISA_AVX512F)
 36984          p.domain = DomainAVX
 36985          p.add(0, func(m *_Encoding, v []interface{}) {
 36986              m.emit(0x62)
 36987              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 36988              m.emit(0x7d)
 36989              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 36990              m.emit(0x8a)
 36991              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 36992          })
 36993      }
 36994      // VCOMPRESSPS ymm, m256{k}{z}
 36995      if isEVEXYMM(v0) && isM256kz(v1) {
 36996          self.require(ISA_AVX512VL | ISA_AVX512F)
 36997          p.domain = DomainAVX
 36998          p.add(0, func(m *_Encoding, v []interface{}) {
 36999              m.evex(0b10, 0x05, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 37000              m.emit(0x8a)
 37001              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 37002          })
 37003      }
 37004      if p.len == 0 {
 37005          panic("invalid operands for VCOMPRESSPS")
 37006      }
 37007      return p
 37008  }
 37009  
 37010  // VCVTDQ2PD performs "Convert Packed Dword Integers to Packed Double-Precision FP Values".
 37011  //
 37012  // Mnemonic        : VCVTDQ2PD
 37013  // Supported forms : (10 forms)
 37014  //
 37015  //    * VCVTDQ2PD xmm, xmm                   [AVX]
 37016  //    * VCVTDQ2PD m64, xmm                   [AVX]
 37017  //    * VCVTDQ2PD xmm, ymm                   [AVX]
 37018  //    * VCVTDQ2PD m128, ymm                  [AVX]
 37019  //    * VCVTDQ2PD m256/m32bcst, zmm{k}{z}    [AVX512F]
 37020  //    * VCVTDQ2PD ymm, zmm{k}{z}             [AVX512F]
 37021  //    * VCVTDQ2PD m64/m32bcst, xmm{k}{z}     [AVX512F,AVX512VL]
 37022  //    * VCVTDQ2PD m128/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 37023  //    * VCVTDQ2PD xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 37024  //    * VCVTDQ2PD xmm, ymm{k}{z}             [AVX512F,AVX512VL]
 37025  //
 37026  func (self *Program) VCVTDQ2PD(v0 interface{}, v1 interface{}) *Instruction {
 37027      p := self.alloc("VCVTDQ2PD", 2, Operands { v0, v1 })
 37028      // VCVTDQ2PD xmm, xmm
 37029      if isXMM(v0) && isXMM(v1) {
 37030          self.require(ISA_AVX)
 37031          p.domain = DomainAVX
 37032          p.add(0, func(m *_Encoding, v []interface{}) {
 37033              m.vex2(2, hcode(v[1]), v[0], 0)
 37034              m.emit(0xe6)
 37035              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37036          })
 37037      }
 37038      // VCVTDQ2PD m64, xmm
 37039      if isM64(v0) && isXMM(v1) {
 37040          self.require(ISA_AVX)
 37041          p.domain = DomainAVX
 37042          p.add(0, func(m *_Encoding, v []interface{}) {
 37043              m.vex2(2, hcode(v[1]), addr(v[0]), 0)
 37044              m.emit(0xe6)
 37045              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 37046          })
 37047      }
 37048      // VCVTDQ2PD xmm, ymm
 37049      if isXMM(v0) && isYMM(v1) {
 37050          self.require(ISA_AVX)
 37051          p.domain = DomainAVX
 37052          p.add(0, func(m *_Encoding, v []interface{}) {
 37053              m.vex2(6, hcode(v[1]), v[0], 0)
 37054              m.emit(0xe6)
 37055              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37056          })
 37057      }
 37058      // VCVTDQ2PD m128, ymm
 37059      if isM128(v0) && isYMM(v1) {
 37060          self.require(ISA_AVX)
 37061          p.domain = DomainAVX
 37062          p.add(0, func(m *_Encoding, v []interface{}) {
 37063              m.vex2(6, hcode(v[1]), addr(v[0]), 0)
 37064              m.emit(0xe6)
 37065              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 37066          })
 37067      }
 37068      // VCVTDQ2PD m256/m32bcst, zmm{k}{z}
 37069      if isM256M32bcst(v0) && isZMMkz(v1) {
 37070          self.require(ISA_AVX512F)
 37071          p.domain = DomainAVX
 37072          p.add(0, func(m *_Encoding, v []interface{}) {
 37073              m.evex(0b01, 0x06, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37074              m.emit(0xe6)
 37075              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 37076          })
 37077      }
 37078      // VCVTDQ2PD ymm, zmm{k}{z}
 37079      if isEVEXYMM(v0) && isZMMkz(v1) {
 37080          self.require(ISA_AVX512F)
 37081          p.domain = DomainAVX
 37082          p.add(0, func(m *_Encoding, v []interface{}) {
 37083              m.emit(0x62)
 37084              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37085              m.emit(0x7e)
 37086              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 37087              m.emit(0xe6)
 37088              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37089          })
 37090      }
 37091      // VCVTDQ2PD m64/m32bcst, xmm{k}{z}
 37092      if isM64M32bcst(v0) && isXMMkz(v1) {
 37093          self.require(ISA_AVX512VL | ISA_AVX512F)
 37094          p.domain = DomainAVX
 37095          p.add(0, func(m *_Encoding, v []interface{}) {
 37096              m.evex(0b01, 0x06, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37097              m.emit(0xe6)
 37098              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 37099          })
 37100      }
 37101      // VCVTDQ2PD m128/m32bcst, ymm{k}{z}
 37102      if isM128M32bcst(v0) && isYMMkz(v1) {
 37103          self.require(ISA_AVX512VL | ISA_AVX512F)
 37104          p.domain = DomainAVX
 37105          p.add(0, func(m *_Encoding, v []interface{}) {
 37106              m.evex(0b01, 0x06, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37107              m.emit(0xe6)
 37108              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 37109          })
 37110      }
 37111      // VCVTDQ2PD xmm, xmm{k}{z}
 37112      if isEVEXXMM(v0) && isXMMkz(v1) {
 37113          self.require(ISA_AVX512VL | ISA_AVX512F)
 37114          p.domain = DomainAVX
 37115          p.add(0, func(m *_Encoding, v []interface{}) {
 37116              m.emit(0x62)
 37117              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37118              m.emit(0x7e)
 37119              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 37120              m.emit(0xe6)
 37121              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37122          })
 37123      }
 37124      // VCVTDQ2PD xmm, ymm{k}{z}
 37125      if isEVEXXMM(v0) && isYMMkz(v1) {
 37126          self.require(ISA_AVX512VL | ISA_AVX512F)
 37127          p.domain = DomainAVX
 37128          p.add(0, func(m *_Encoding, v []interface{}) {
 37129              m.emit(0x62)
 37130              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37131              m.emit(0x7e)
 37132              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 37133              m.emit(0xe6)
 37134              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37135          })
 37136      }
 37137      if p.len == 0 {
 37138          panic("invalid operands for VCVTDQ2PD")
 37139      }
 37140      return p
 37141  }
 37142  
 37143  // VCVTDQ2PS performs "Convert Packed Dword Integers to Packed Single-Precision FP Values".
 37144  //
 37145  // Mnemonic        : VCVTDQ2PS
 37146  // Supported forms : (11 forms)
 37147  //
 37148  //    * VCVTDQ2PS xmm, xmm                   [AVX]
 37149  //    * VCVTDQ2PS m128, xmm                  [AVX]
 37150  //    * VCVTDQ2PS ymm, ymm                   [AVX]
 37151  //    * VCVTDQ2PS m256, ymm                  [AVX]
 37152  //    * VCVTDQ2PS m512/m32bcst, zmm{k}{z}    [AVX512F]
 37153  //    * VCVTDQ2PS {er}, zmm, zmm{k}{z}       [AVX512F]
 37154  //    * VCVTDQ2PS zmm, zmm{k}{z}             [AVX512F]
 37155  //    * VCVTDQ2PS m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 37156  //    * VCVTDQ2PS m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 37157  //    * VCVTDQ2PS xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 37158  //    * VCVTDQ2PS ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 37159  //
 37160  func (self *Program) VCVTDQ2PS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 37161      var p *Instruction
 37162      switch len(vv) {
 37163          case 0  : p = self.alloc("VCVTDQ2PS", 2, Operands { v0, v1 })
 37164          case 1  : p = self.alloc("VCVTDQ2PS", 3, Operands { v0, v1, vv[0] })
 37165          default : panic("instruction VCVTDQ2PS takes 2 or 3 operands")
 37166      }
 37167      // VCVTDQ2PS xmm, xmm
 37168      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 37169          self.require(ISA_AVX)
 37170          p.domain = DomainAVX
 37171          p.add(0, func(m *_Encoding, v []interface{}) {
 37172              m.vex2(0, hcode(v[1]), v[0], 0)
 37173              m.emit(0x5b)
 37174              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37175          })
 37176      }
 37177      // VCVTDQ2PS m128, xmm
 37178      if len(vv) == 0 && isM128(v0) && isXMM(v1) {
 37179          self.require(ISA_AVX)
 37180          p.domain = DomainAVX
 37181          p.add(0, func(m *_Encoding, v []interface{}) {
 37182              m.vex2(0, hcode(v[1]), addr(v[0]), 0)
 37183              m.emit(0x5b)
 37184              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 37185          })
 37186      }
 37187      // VCVTDQ2PS ymm, ymm
 37188      if len(vv) == 0 && isYMM(v0) && isYMM(v1) {
 37189          self.require(ISA_AVX)
 37190          p.domain = DomainAVX
 37191          p.add(0, func(m *_Encoding, v []interface{}) {
 37192              m.vex2(4, hcode(v[1]), v[0], 0)
 37193              m.emit(0x5b)
 37194              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37195          })
 37196      }
 37197      // VCVTDQ2PS m256, ymm
 37198      if len(vv) == 0 && isM256(v0) && isYMM(v1) {
 37199          self.require(ISA_AVX)
 37200          p.domain = DomainAVX
 37201          p.add(0, func(m *_Encoding, v []interface{}) {
 37202              m.vex2(4, hcode(v[1]), addr(v[0]), 0)
 37203              m.emit(0x5b)
 37204              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 37205          })
 37206      }
 37207      // VCVTDQ2PS m512/m32bcst, zmm{k}{z}
 37208      if len(vv) == 0 && isM512M32bcst(v0) && isZMMkz(v1) {
 37209          self.require(ISA_AVX512F)
 37210          p.domain = DomainAVX
 37211          p.add(0, func(m *_Encoding, v []interface{}) {
 37212              m.evex(0b01, 0x04, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37213              m.emit(0x5b)
 37214              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 37215          })
 37216      }
 37217      // VCVTDQ2PS {er}, zmm, zmm{k}{z}
 37218      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 37219          self.require(ISA_AVX512F)
 37220          p.domain = DomainAVX
 37221          p.add(0, func(m *_Encoding, v []interface{}) {
 37222              m.emit(0x62)
 37223              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 37224              m.emit(0x7c)
 37225              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 37226              m.emit(0x5b)
 37227              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 37228          })
 37229      }
 37230      // VCVTDQ2PS zmm, zmm{k}{z}
 37231      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 37232          self.require(ISA_AVX512F)
 37233          p.domain = DomainAVX
 37234          p.add(0, func(m *_Encoding, v []interface{}) {
 37235              m.emit(0x62)
 37236              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37237              m.emit(0x7c)
 37238              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 37239              m.emit(0x5b)
 37240              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37241          })
 37242      }
 37243      // VCVTDQ2PS m128/m32bcst, xmm{k}{z}
 37244      if len(vv) == 0 && isM128M32bcst(v0) && isXMMkz(v1) {
 37245          self.require(ISA_AVX512VL | ISA_AVX512F)
 37246          p.domain = DomainAVX
 37247          p.add(0, func(m *_Encoding, v []interface{}) {
 37248              m.evex(0b01, 0x04, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37249              m.emit(0x5b)
 37250              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 37251          })
 37252      }
 37253      // VCVTDQ2PS m256/m32bcst, ymm{k}{z}
 37254      if len(vv) == 0 && isM256M32bcst(v0) && isYMMkz(v1) {
 37255          self.require(ISA_AVX512VL | ISA_AVX512F)
 37256          p.domain = DomainAVX
 37257          p.add(0, func(m *_Encoding, v []interface{}) {
 37258              m.evex(0b01, 0x04, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37259              m.emit(0x5b)
 37260              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 37261          })
 37262      }
 37263      // VCVTDQ2PS xmm, xmm{k}{z}
 37264      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 37265          self.require(ISA_AVX512VL | ISA_AVX512F)
 37266          p.domain = DomainAVX
 37267          p.add(0, func(m *_Encoding, v []interface{}) {
 37268              m.emit(0x62)
 37269              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37270              m.emit(0x7c)
 37271              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 37272              m.emit(0x5b)
 37273              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37274          })
 37275      }
 37276      // VCVTDQ2PS ymm, ymm{k}{z}
 37277      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 37278          self.require(ISA_AVX512VL | ISA_AVX512F)
 37279          p.domain = DomainAVX
 37280          p.add(0, func(m *_Encoding, v []interface{}) {
 37281              m.emit(0x62)
 37282              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37283              m.emit(0x7c)
 37284              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 37285              m.emit(0x5b)
 37286              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37287          })
 37288      }
 37289      if p.len == 0 {
 37290          panic("invalid operands for VCVTDQ2PS")
 37291      }
 37292      return p
 37293  }
 37294  
 37295  // VCVTPD2DQ performs "Convert Packed Double-Precision FP Values to Packed Dword Integers".
 37296  //
 37297  // Mnemonic        : VCVTPD2DQ
 37298  // Supported forms : (11 forms)
 37299  //
 37300  //    * VCVTPD2DQ xmm, xmm                   [AVX]
 37301  //    * VCVTPD2DQ ymm, xmm                   [AVX]
 37302  //    * VCVTPD2DQ m128, xmm                  [AVX]
 37303  //    * VCVTPD2DQ m256, xmm                  [AVX]
 37304  //    * VCVTPD2DQ m512/m64bcst, ymm{k}{z}    [AVX512F]
 37305  //    * VCVTPD2DQ {er}, zmm, ymm{k}{z}       [AVX512F]
 37306  //    * VCVTPD2DQ zmm, ymm{k}{z}             [AVX512F]
 37307  //    * VCVTPD2DQ m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 37308  //    * VCVTPD2DQ m256/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 37309  //    * VCVTPD2DQ xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 37310  //    * VCVTPD2DQ ymm, xmm{k}{z}             [AVX512F,AVX512VL]
 37311  //
 37312  func (self *Program) VCVTPD2DQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 37313      var p *Instruction
 37314      switch len(vv) {
 37315          case 0  : p = self.alloc("VCVTPD2DQ", 2, Operands { v0, v1 })
 37316          case 1  : p = self.alloc("VCVTPD2DQ", 3, Operands { v0, v1, vv[0] })
 37317          default : panic("instruction VCVTPD2DQ takes 2 or 3 operands")
 37318      }
 37319      // VCVTPD2DQ xmm, xmm
 37320      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 37321          self.require(ISA_AVX)
 37322          p.domain = DomainAVX
 37323          p.add(0, func(m *_Encoding, v []interface{}) {
 37324              m.vex2(3, hcode(v[1]), v[0], 0)
 37325              m.emit(0xe6)
 37326              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37327          })
 37328      }
 37329      // VCVTPD2DQ ymm, xmm
 37330      if len(vv) == 0 && isYMM(v0) && isXMM(v1) {
 37331          self.require(ISA_AVX)
 37332          p.domain = DomainAVX
 37333          p.add(0, func(m *_Encoding, v []interface{}) {
 37334              m.vex2(7, hcode(v[1]), v[0], 0)
 37335              m.emit(0xe6)
 37336              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37337          })
 37338      }
 37339      // VCVTPD2DQ m128, xmm
 37340      if len(vv) == 0 && isM128(v0) && isXMM(v1) {
 37341          self.require(ISA_AVX)
 37342          p.domain = DomainAVX
 37343          p.add(0, func(m *_Encoding, v []interface{}) {
 37344              m.vex2(3, hcode(v[1]), addr(v[0]), 0)
 37345              m.emit(0xe6)
 37346              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 37347          })
 37348      }
 37349      // VCVTPD2DQ m256, xmm
 37350      if len(vv) == 0 && isM256(v0) && isXMM(v1) {
 37351          self.require(ISA_AVX)
 37352          p.domain = DomainAVX
 37353          p.add(0, func(m *_Encoding, v []interface{}) {
 37354              m.vex2(7, hcode(v[1]), addr(v[0]), 0)
 37355              m.emit(0xe6)
 37356              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 37357          })
 37358      }
 37359      // VCVTPD2DQ m512/m64bcst, ymm{k}{z}
 37360      if len(vv) == 0 && isM512M64bcst(v0) && isYMMkz(v1) {
 37361          self.require(ISA_AVX512F)
 37362          p.domain = DomainAVX
 37363          p.add(0, func(m *_Encoding, v []interface{}) {
 37364              m.evex(0b01, 0x87, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37365              m.emit(0xe6)
 37366              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 37367          })
 37368      }
 37369      // VCVTPD2DQ {er}, zmm, ymm{k}{z}
 37370      if len(vv) == 1 && isER(v0) && isZMM(v1) && isYMMkz(vv[0]) {
 37371          self.require(ISA_AVX512F)
 37372          p.domain = DomainAVX
 37373          p.add(0, func(m *_Encoding, v []interface{}) {
 37374              m.emit(0x62)
 37375              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 37376              m.emit(0xff)
 37377              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 37378              m.emit(0xe6)
 37379              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 37380          })
 37381      }
 37382      // VCVTPD2DQ zmm, ymm{k}{z}
 37383      if len(vv) == 0 && isZMM(v0) && isYMMkz(v1) {
 37384          self.require(ISA_AVX512F)
 37385          p.domain = DomainAVX
 37386          p.add(0, func(m *_Encoding, v []interface{}) {
 37387              m.emit(0x62)
 37388              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37389              m.emit(0xff)
 37390              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 37391              m.emit(0xe6)
 37392              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37393          })
 37394      }
 37395      // VCVTPD2DQ m128/m64bcst, xmm{k}{z}
 37396      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 37397          self.require(ISA_AVX512VL | ISA_AVX512F)
 37398          p.domain = DomainAVX
 37399          p.add(0, func(m *_Encoding, v []interface{}) {
 37400              m.evex(0b01, 0x87, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37401              m.emit(0xe6)
 37402              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 37403          })
 37404      }
 37405      // VCVTPD2DQ m256/m64bcst, xmm{k}{z}
 37406      if len(vv) == 0 && isM256M64bcst(v0) && isXMMkz(v1) {
 37407          self.require(ISA_AVX512VL | ISA_AVX512F)
 37408          p.domain = DomainAVX
 37409          p.add(0, func(m *_Encoding, v []interface{}) {
 37410              m.evex(0b01, 0x87, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37411              m.emit(0xe6)
 37412              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 37413          })
 37414      }
 37415      // VCVTPD2DQ xmm, xmm{k}{z}
 37416      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 37417          self.require(ISA_AVX512VL | ISA_AVX512F)
 37418          p.domain = DomainAVX
 37419          p.add(0, func(m *_Encoding, v []interface{}) {
 37420              m.emit(0x62)
 37421              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37422              m.emit(0xff)
 37423              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 37424              m.emit(0xe6)
 37425              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37426          })
 37427      }
 37428      // VCVTPD2DQ ymm, xmm{k}{z}
 37429      if len(vv) == 0 && isEVEXYMM(v0) && isXMMkz(v1) {
 37430          self.require(ISA_AVX512VL | ISA_AVX512F)
 37431          p.domain = DomainAVX
 37432          p.add(0, func(m *_Encoding, v []interface{}) {
 37433              m.emit(0x62)
 37434              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37435              m.emit(0xff)
 37436              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 37437              m.emit(0xe6)
 37438              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37439          })
 37440      }
 37441      if p.len == 0 {
 37442          panic("invalid operands for VCVTPD2DQ")
 37443      }
 37444      return p
 37445  }
 37446  
 37447  // VCVTPD2PS performs "Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values".
 37448  //
 37449  // Mnemonic        : VCVTPD2PS
 37450  // Supported forms : (11 forms)
 37451  //
 37452  //    * VCVTPD2PS xmm, xmm                   [AVX]
 37453  //    * VCVTPD2PS ymm, xmm                   [AVX]
 37454  //    * VCVTPD2PS m128, xmm                  [AVX]
 37455  //    * VCVTPD2PS m256, xmm                  [AVX]
 37456  //    * VCVTPD2PS m512/m64bcst, ymm{k}{z}    [AVX512F]
 37457  //    * VCVTPD2PS {er}, zmm, ymm{k}{z}       [AVX512F]
 37458  //    * VCVTPD2PS zmm, ymm{k}{z}             [AVX512F]
 37459  //    * VCVTPD2PS m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 37460  //    * VCVTPD2PS m256/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 37461  //    * VCVTPD2PS xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 37462  //    * VCVTPD2PS ymm, xmm{k}{z}             [AVX512F,AVX512VL]
 37463  //
 37464  func (self *Program) VCVTPD2PS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 37465      var p *Instruction
 37466      switch len(vv) {
 37467          case 0  : p = self.alloc("VCVTPD2PS", 2, Operands { v0, v1 })
 37468          case 1  : p = self.alloc("VCVTPD2PS", 3, Operands { v0, v1, vv[0] })
 37469          default : panic("instruction VCVTPD2PS takes 2 or 3 operands")
 37470      }
 37471      // VCVTPD2PS xmm, xmm
 37472      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 37473          self.require(ISA_AVX)
 37474          p.domain = DomainAVX
 37475          p.add(0, func(m *_Encoding, v []interface{}) {
 37476              m.vex2(1, hcode(v[1]), v[0], 0)
 37477              m.emit(0x5a)
 37478              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37479          })
 37480      }
 37481      // VCVTPD2PS ymm, xmm
 37482      if len(vv) == 0 && isYMM(v0) && isXMM(v1) {
 37483          self.require(ISA_AVX)
 37484          p.domain = DomainAVX
 37485          p.add(0, func(m *_Encoding, v []interface{}) {
 37486              m.vex2(5, hcode(v[1]), v[0], 0)
 37487              m.emit(0x5a)
 37488              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37489          })
 37490      }
 37491      // VCVTPD2PS m128, xmm
 37492      if len(vv) == 0 && isM128(v0) && isXMM(v1) {
 37493          self.require(ISA_AVX)
 37494          p.domain = DomainAVX
 37495          p.add(0, func(m *_Encoding, v []interface{}) {
 37496              m.vex2(1, hcode(v[1]), addr(v[0]), 0)
 37497              m.emit(0x5a)
 37498              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 37499          })
 37500      }
 37501      // VCVTPD2PS m256, xmm
 37502      if len(vv) == 0 && isM256(v0) && isXMM(v1) {
 37503          self.require(ISA_AVX)
 37504          p.domain = DomainAVX
 37505          p.add(0, func(m *_Encoding, v []interface{}) {
 37506              m.vex2(5, hcode(v[1]), addr(v[0]), 0)
 37507              m.emit(0x5a)
 37508              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 37509          })
 37510      }
 37511      // VCVTPD2PS m512/m64bcst, ymm{k}{z}
 37512      if len(vv) == 0 && isM512M64bcst(v0) && isYMMkz(v1) {
 37513          self.require(ISA_AVX512F)
 37514          p.domain = DomainAVX
 37515          p.add(0, func(m *_Encoding, v []interface{}) {
 37516              m.evex(0b01, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37517              m.emit(0x5a)
 37518              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 37519          })
 37520      }
 37521      // VCVTPD2PS {er}, zmm, ymm{k}{z}
 37522      if len(vv) == 1 && isER(v0) && isZMM(v1) && isYMMkz(vv[0]) {
 37523          self.require(ISA_AVX512F)
 37524          p.domain = DomainAVX
 37525          p.add(0, func(m *_Encoding, v []interface{}) {
 37526              m.emit(0x62)
 37527              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 37528              m.emit(0xfd)
 37529              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 37530              m.emit(0x5a)
 37531              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 37532          })
 37533      }
 37534      // VCVTPD2PS zmm, ymm{k}{z}
 37535      if len(vv) == 0 && isZMM(v0) && isYMMkz(v1) {
 37536          self.require(ISA_AVX512F)
 37537          p.domain = DomainAVX
 37538          p.add(0, func(m *_Encoding, v []interface{}) {
 37539              m.emit(0x62)
 37540              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37541              m.emit(0xfd)
 37542              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 37543              m.emit(0x5a)
 37544              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37545          })
 37546      }
 37547      // VCVTPD2PS m128/m64bcst, xmm{k}{z}
 37548      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 37549          self.require(ISA_AVX512VL | ISA_AVX512F)
 37550          p.domain = DomainAVX
 37551          p.add(0, func(m *_Encoding, v []interface{}) {
 37552              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37553              m.emit(0x5a)
 37554              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 37555          })
 37556      }
 37557      // VCVTPD2PS m256/m64bcst, xmm{k}{z}
 37558      if len(vv) == 0 && isM256M64bcst(v0) && isXMMkz(v1) {
 37559          self.require(ISA_AVX512VL | ISA_AVX512F)
 37560          p.domain = DomainAVX
 37561          p.add(0, func(m *_Encoding, v []interface{}) {
 37562              m.evex(0b01, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37563              m.emit(0x5a)
 37564              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 37565          })
 37566      }
 37567      // VCVTPD2PS xmm, xmm{k}{z}
 37568      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 37569          self.require(ISA_AVX512VL | ISA_AVX512F)
 37570          p.domain = DomainAVX
 37571          p.add(0, func(m *_Encoding, v []interface{}) {
 37572              m.emit(0x62)
 37573              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37574              m.emit(0xfd)
 37575              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 37576              m.emit(0x5a)
 37577              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37578          })
 37579      }
 37580      // VCVTPD2PS ymm, xmm{k}{z}
 37581      if len(vv) == 0 && isEVEXYMM(v0) && isXMMkz(v1) {
 37582          self.require(ISA_AVX512VL | ISA_AVX512F)
 37583          p.domain = DomainAVX
 37584          p.add(0, func(m *_Encoding, v []interface{}) {
 37585              m.emit(0x62)
 37586              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37587              m.emit(0xfd)
 37588              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 37589              m.emit(0x5a)
 37590              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37591          })
 37592      }
 37593      if p.len == 0 {
 37594          panic("invalid operands for VCVTPD2PS")
 37595      }
 37596      return p
 37597  }
 37598  
 37599  // VCVTPD2QQ performs "Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers".
 37600  //
 37601  // Mnemonic        : VCVTPD2QQ
 37602  // Supported forms : (7 forms)
 37603  //
 37604  //    * VCVTPD2QQ m512/m64bcst, zmm{k}{z}    [AVX512DQ]
 37605  //    * VCVTPD2QQ {er}, zmm, zmm{k}{z}       [AVX512DQ]
 37606  //    * VCVTPD2QQ zmm, zmm{k}{z}             [AVX512DQ]
 37607  //    * VCVTPD2QQ m128/m64bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 37608  //    * VCVTPD2QQ m256/m64bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 37609  //    * VCVTPD2QQ xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 37610  //    * VCVTPD2QQ ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 37611  //
 37612  func (self *Program) VCVTPD2QQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 37613      var p *Instruction
 37614      switch len(vv) {
 37615          case 0  : p = self.alloc("VCVTPD2QQ", 2, Operands { v0, v1 })
 37616          case 1  : p = self.alloc("VCVTPD2QQ", 3, Operands { v0, v1, vv[0] })
 37617          default : panic("instruction VCVTPD2QQ takes 2 or 3 operands")
 37618      }
 37619      // VCVTPD2QQ m512/m64bcst, zmm{k}{z}
 37620      if len(vv) == 0 && isM512M64bcst(v0) && isZMMkz(v1) {
 37621          self.require(ISA_AVX512DQ)
 37622          p.domain = DomainAVX
 37623          p.add(0, func(m *_Encoding, v []interface{}) {
 37624              m.evex(0b01, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37625              m.emit(0x7b)
 37626              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 37627          })
 37628      }
 37629      // VCVTPD2QQ {er}, zmm, zmm{k}{z}
 37630      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 37631          self.require(ISA_AVX512DQ)
 37632          p.domain = DomainAVX
 37633          p.add(0, func(m *_Encoding, v []interface{}) {
 37634              m.emit(0x62)
 37635              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 37636              m.emit(0xfd)
 37637              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 37638              m.emit(0x7b)
 37639              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 37640          })
 37641      }
 37642      // VCVTPD2QQ zmm, zmm{k}{z}
 37643      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 37644          self.require(ISA_AVX512DQ)
 37645          p.domain = DomainAVX
 37646          p.add(0, func(m *_Encoding, v []interface{}) {
 37647              m.emit(0x62)
 37648              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37649              m.emit(0xfd)
 37650              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 37651              m.emit(0x7b)
 37652              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37653          })
 37654      }
 37655      // VCVTPD2QQ m128/m64bcst, xmm{k}{z}
 37656      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 37657          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 37658          p.domain = DomainAVX
 37659          p.add(0, func(m *_Encoding, v []interface{}) {
 37660              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37661              m.emit(0x7b)
 37662              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 37663          })
 37664      }
 37665      // VCVTPD2QQ m256/m64bcst, ymm{k}{z}
 37666      if len(vv) == 0 && isM256M64bcst(v0) && isYMMkz(v1) {
 37667          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 37668          p.domain = DomainAVX
 37669          p.add(0, func(m *_Encoding, v []interface{}) {
 37670              m.evex(0b01, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37671              m.emit(0x7b)
 37672              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 37673          })
 37674      }
 37675      // VCVTPD2QQ xmm, xmm{k}{z}
 37676      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 37677          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 37678          p.domain = DomainAVX
 37679          p.add(0, func(m *_Encoding, v []interface{}) {
 37680              m.emit(0x62)
 37681              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37682              m.emit(0xfd)
 37683              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 37684              m.emit(0x7b)
 37685              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37686          })
 37687      }
 37688      // VCVTPD2QQ ymm, ymm{k}{z}
 37689      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 37690          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 37691          p.domain = DomainAVX
 37692          p.add(0, func(m *_Encoding, v []interface{}) {
 37693              m.emit(0x62)
 37694              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37695              m.emit(0xfd)
 37696              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 37697              m.emit(0x7b)
 37698              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37699          })
 37700      }
 37701      if p.len == 0 {
 37702          panic("invalid operands for VCVTPD2QQ")
 37703      }
 37704      return p
 37705  }
 37706  
 37707  // VCVTPD2UDQ performs "Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers".
 37708  //
 37709  // Mnemonic        : VCVTPD2UDQ
 37710  // Supported forms : (7 forms)
 37711  //
 37712  //    * VCVTPD2UDQ m512/m64bcst, ymm{k}{z}    [AVX512F]
 37713  //    * VCVTPD2UDQ {er}, zmm, ymm{k}{z}       [AVX512F]
 37714  //    * VCVTPD2UDQ zmm, ymm{k}{z}             [AVX512F]
 37715  //    * VCVTPD2UDQ m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 37716  //    * VCVTPD2UDQ m256/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 37717  //    * VCVTPD2UDQ xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 37718  //    * VCVTPD2UDQ ymm, xmm{k}{z}             [AVX512F,AVX512VL]
 37719  //
 37720  func (self *Program) VCVTPD2UDQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 37721      var p *Instruction
 37722      switch len(vv) {
 37723          case 0  : p = self.alloc("VCVTPD2UDQ", 2, Operands { v0, v1 })
 37724          case 1  : p = self.alloc("VCVTPD2UDQ", 3, Operands { v0, v1, vv[0] })
 37725          default : panic("instruction VCVTPD2UDQ takes 2 or 3 operands")
 37726      }
 37727      // VCVTPD2UDQ m512/m64bcst, ymm{k}{z}
 37728      if len(vv) == 0 && isM512M64bcst(v0) && isYMMkz(v1) {
 37729          self.require(ISA_AVX512F)
 37730          p.domain = DomainAVX
 37731          p.add(0, func(m *_Encoding, v []interface{}) {
 37732              m.evex(0b01, 0x84, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37733              m.emit(0x79)
 37734              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 37735          })
 37736      }
 37737      // VCVTPD2UDQ {er}, zmm, ymm{k}{z}
 37738      if len(vv) == 1 && isER(v0) && isZMM(v1) && isYMMkz(vv[0]) {
 37739          self.require(ISA_AVX512F)
 37740          p.domain = DomainAVX
 37741          p.add(0, func(m *_Encoding, v []interface{}) {
 37742              m.emit(0x62)
 37743              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 37744              m.emit(0xfc)
 37745              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 37746              m.emit(0x79)
 37747              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 37748          })
 37749      }
 37750      // VCVTPD2UDQ zmm, ymm{k}{z}
 37751      if len(vv) == 0 && isZMM(v0) && isYMMkz(v1) {
 37752          self.require(ISA_AVX512F)
 37753          p.domain = DomainAVX
 37754          p.add(0, func(m *_Encoding, v []interface{}) {
 37755              m.emit(0x62)
 37756              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37757              m.emit(0xfc)
 37758              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 37759              m.emit(0x79)
 37760              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37761          })
 37762      }
 37763      // VCVTPD2UDQ m128/m64bcst, xmm{k}{z}
 37764      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 37765          self.require(ISA_AVX512VL | ISA_AVX512F)
 37766          p.domain = DomainAVX
 37767          p.add(0, func(m *_Encoding, v []interface{}) {
 37768              m.evex(0b01, 0x84, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37769              m.emit(0x79)
 37770              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 37771          })
 37772      }
 37773      // VCVTPD2UDQ m256/m64bcst, xmm{k}{z}
 37774      if len(vv) == 0 && isM256M64bcst(v0) && isXMMkz(v1) {
 37775          self.require(ISA_AVX512VL | ISA_AVX512F)
 37776          p.domain = DomainAVX
 37777          p.add(0, func(m *_Encoding, v []interface{}) {
 37778              m.evex(0b01, 0x84, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37779              m.emit(0x79)
 37780              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 37781          })
 37782      }
 37783      // VCVTPD2UDQ xmm, xmm{k}{z}
 37784      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 37785          self.require(ISA_AVX512VL | ISA_AVX512F)
 37786          p.domain = DomainAVX
 37787          p.add(0, func(m *_Encoding, v []interface{}) {
 37788              m.emit(0x62)
 37789              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37790              m.emit(0xfc)
 37791              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 37792              m.emit(0x79)
 37793              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37794          })
 37795      }
 37796      // VCVTPD2UDQ ymm, xmm{k}{z}
 37797      if len(vv) == 0 && isEVEXYMM(v0) && isXMMkz(v1) {
 37798          self.require(ISA_AVX512VL | ISA_AVX512F)
 37799          p.domain = DomainAVX
 37800          p.add(0, func(m *_Encoding, v []interface{}) {
 37801              m.emit(0x62)
 37802              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37803              m.emit(0xfc)
 37804              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 37805              m.emit(0x79)
 37806              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37807          })
 37808      }
 37809      if p.len == 0 {
 37810          panic("invalid operands for VCVTPD2UDQ")
 37811      }
 37812      return p
 37813  }
 37814  
 37815  // VCVTPD2UQQ performs "Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers".
 37816  //
 37817  // Mnemonic        : VCVTPD2UQQ
 37818  // Supported forms : (7 forms)
 37819  //
 37820  //    * VCVTPD2UQQ m512/m64bcst, zmm{k}{z}    [AVX512DQ]
 37821  //    * VCVTPD2UQQ {er}, zmm, zmm{k}{z}       [AVX512DQ]
 37822  //    * VCVTPD2UQQ zmm, zmm{k}{z}             [AVX512DQ]
 37823  //    * VCVTPD2UQQ m128/m64bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 37824  //    * VCVTPD2UQQ m256/m64bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 37825  //    * VCVTPD2UQQ xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 37826  //    * VCVTPD2UQQ ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 37827  //
 37828  func (self *Program) VCVTPD2UQQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 37829      var p *Instruction
 37830      switch len(vv) {
 37831          case 0  : p = self.alloc("VCVTPD2UQQ", 2, Operands { v0, v1 })
 37832          case 1  : p = self.alloc("VCVTPD2UQQ", 3, Operands { v0, v1, vv[0] })
 37833          default : panic("instruction VCVTPD2UQQ takes 2 or 3 operands")
 37834      }
 37835      // VCVTPD2UQQ m512/m64bcst, zmm{k}{z}
 37836      if len(vv) == 0 && isM512M64bcst(v0) && isZMMkz(v1) {
 37837          self.require(ISA_AVX512DQ)
 37838          p.domain = DomainAVX
 37839          p.add(0, func(m *_Encoding, v []interface{}) {
 37840              m.evex(0b01, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37841              m.emit(0x79)
 37842              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 37843          })
 37844      }
 37845      // VCVTPD2UQQ {er}, zmm, zmm{k}{z}
 37846      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 37847          self.require(ISA_AVX512DQ)
 37848          p.domain = DomainAVX
 37849          p.add(0, func(m *_Encoding, v []interface{}) {
 37850              m.emit(0x62)
 37851              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 37852              m.emit(0xfd)
 37853              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 37854              m.emit(0x79)
 37855              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 37856          })
 37857      }
 37858      // VCVTPD2UQQ zmm, zmm{k}{z}
 37859      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 37860          self.require(ISA_AVX512DQ)
 37861          p.domain = DomainAVX
 37862          p.add(0, func(m *_Encoding, v []interface{}) {
 37863              m.emit(0x62)
 37864              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37865              m.emit(0xfd)
 37866              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 37867              m.emit(0x79)
 37868              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37869          })
 37870      }
 37871      // VCVTPD2UQQ m128/m64bcst, xmm{k}{z}
 37872      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 37873          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 37874          p.domain = DomainAVX
 37875          p.add(0, func(m *_Encoding, v []interface{}) {
 37876              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37877              m.emit(0x79)
 37878              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 37879          })
 37880      }
 37881      // VCVTPD2UQQ m256/m64bcst, ymm{k}{z}
 37882      if len(vv) == 0 && isM256M64bcst(v0) && isYMMkz(v1) {
 37883          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 37884          p.domain = DomainAVX
 37885          p.add(0, func(m *_Encoding, v []interface{}) {
 37886              m.evex(0b01, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37887              m.emit(0x79)
 37888              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 37889          })
 37890      }
 37891      // VCVTPD2UQQ xmm, xmm{k}{z}
 37892      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 37893          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 37894          p.domain = DomainAVX
 37895          p.add(0, func(m *_Encoding, v []interface{}) {
 37896              m.emit(0x62)
 37897              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37898              m.emit(0xfd)
 37899              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 37900              m.emit(0x79)
 37901              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37902          })
 37903      }
 37904      // VCVTPD2UQQ ymm, ymm{k}{z}
 37905      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 37906          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 37907          p.domain = DomainAVX
 37908          p.add(0, func(m *_Encoding, v []interface{}) {
 37909              m.emit(0x62)
 37910              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37911              m.emit(0xfd)
 37912              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 37913              m.emit(0x79)
 37914              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37915          })
 37916      }
 37917      if p.len == 0 {
 37918          panic("invalid operands for VCVTPD2UQQ")
 37919      }
 37920      return p
 37921  }
 37922  
 37923  // VCVTPH2PS performs "Convert Half-Precision FP Values to Single-Precision FP Values".
 37924  //
 37925  // Mnemonic        : VCVTPH2PS
 37926  // Supported forms : (11 forms)
 37927  //
 37928  //    * VCVTPH2PS xmm, xmm                 [F16C]
 37929  //    * VCVTPH2PS m64, xmm                 [F16C]
 37930  //    * VCVTPH2PS xmm, ymm                 [F16C]
 37931  //    * VCVTPH2PS m128, ymm                [F16C]
 37932  //    * VCVTPH2PS m256, zmm{k}{z}          [AVX512F]
 37933  //    * VCVTPH2PS {sae}, ymm, zmm{k}{z}    [AVX512F]
 37934  //    * VCVTPH2PS ymm, zmm{k}{z}           [AVX512F]
 37935  //    * VCVTPH2PS xmm, xmm{k}{z}           [AVX512F,AVX512VL]
 37936  //    * VCVTPH2PS xmm, ymm{k}{z}           [AVX512F,AVX512VL]
 37937  //    * VCVTPH2PS m64, xmm{k}{z}           [AVX512F,AVX512VL]
 37938  //    * VCVTPH2PS m128, ymm{k}{z}          [AVX512F,AVX512VL]
 37939  //
 37940  func (self *Program) VCVTPH2PS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 37941      var p *Instruction
 37942      switch len(vv) {
 37943          case 0  : p = self.alloc("VCVTPH2PS", 2, Operands { v0, v1 })
 37944          case 1  : p = self.alloc("VCVTPH2PS", 3, Operands { v0, v1, vv[0] })
 37945          default : panic("instruction VCVTPH2PS takes 2 or 3 operands")
 37946      }
 37947      // VCVTPH2PS xmm, xmm
 37948      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 37949          self.require(ISA_F16C)
 37950          p.domain = DomainAVX
 37951          p.add(0, func(m *_Encoding, v []interface{}) {
 37952              m.emit(0xc4)
 37953              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 37954              m.emit(0x79)
 37955              m.emit(0x13)
 37956              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37957          })
 37958      }
 37959      // VCVTPH2PS m64, xmm
 37960      if len(vv) == 0 && isM64(v0) && isXMM(v1) {
 37961          self.require(ISA_F16C)
 37962          p.domain = DomainAVX
 37963          p.add(0, func(m *_Encoding, v []interface{}) {
 37964              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 37965              m.emit(0x13)
 37966              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 37967          })
 37968      }
 37969      // VCVTPH2PS xmm, ymm
 37970      if len(vv) == 0 && isXMM(v0) && isYMM(v1) {
 37971          self.require(ISA_F16C)
 37972          p.domain = DomainAVX
 37973          p.add(0, func(m *_Encoding, v []interface{}) {
 37974              m.emit(0xc4)
 37975              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 37976              m.emit(0x7d)
 37977              m.emit(0x13)
 37978              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37979          })
 37980      }
 37981      // VCVTPH2PS m128, ymm
 37982      if len(vv) == 0 && isM128(v0) && isYMM(v1) {
 37983          self.require(ISA_F16C)
 37984          p.domain = DomainAVX
 37985          p.add(0, func(m *_Encoding, v []interface{}) {
 37986              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 37987              m.emit(0x13)
 37988              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 37989          })
 37990      }
 37991      // VCVTPH2PS m256, zmm{k}{z}
 37992      if len(vv) == 0 && isM256(v0) && isZMMkz(v1) {
 37993          self.require(ISA_AVX512F)
 37994          p.domain = DomainAVX
 37995          p.add(0, func(m *_Encoding, v []interface{}) {
 37996              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 37997              m.emit(0x13)
 37998              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 37999          })
 38000      }
 38001      // VCVTPH2PS {sae}, ymm, zmm{k}{z}
 38002      if len(vv) == 1 && isSAE(v0) && isEVEXYMM(v1) && isZMMkz(vv[0]) {
 38003          self.require(ISA_AVX512F)
 38004          p.domain = DomainAVX
 38005          p.add(0, func(m *_Encoding, v []interface{}) {
 38006              m.emit(0x62)
 38007              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 38008              m.emit(0x7d)
 38009              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 38010              m.emit(0x13)
 38011              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 38012          })
 38013      }
 38014      // VCVTPH2PS ymm, zmm{k}{z}
 38015      if len(vv) == 0 && isEVEXYMM(v0) && isZMMkz(v1) {
 38016          self.require(ISA_AVX512F)
 38017          p.domain = DomainAVX
 38018          p.add(0, func(m *_Encoding, v []interface{}) {
 38019              m.emit(0x62)
 38020              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38021              m.emit(0x7d)
 38022              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 38023              m.emit(0x13)
 38024              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38025          })
 38026      }
 38027      // VCVTPH2PS xmm, xmm{k}{z}
 38028      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 38029          self.require(ISA_AVX512VL | ISA_AVX512F)
 38030          p.domain = DomainAVX
 38031          p.add(0, func(m *_Encoding, v []interface{}) {
 38032              m.emit(0x62)
 38033              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38034              m.emit(0x7d)
 38035              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 38036              m.emit(0x13)
 38037              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38038          })
 38039      }
 38040      // VCVTPH2PS xmm, ymm{k}{z}
 38041      if len(vv) == 0 && isEVEXXMM(v0) && isYMMkz(v1) {
 38042          self.require(ISA_AVX512VL | ISA_AVX512F)
 38043          p.domain = DomainAVX
 38044          p.add(0, func(m *_Encoding, v []interface{}) {
 38045              m.emit(0x62)
 38046              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38047              m.emit(0x7d)
 38048              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 38049              m.emit(0x13)
 38050              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38051          })
 38052      }
 38053      // VCVTPH2PS m64, xmm{k}{z}
 38054      if len(vv) == 0 && isM64(v0) && isXMMkz(v1) {
 38055          self.require(ISA_AVX512VL | ISA_AVX512F)
 38056          p.domain = DomainAVX
 38057          p.add(0, func(m *_Encoding, v []interface{}) {
 38058              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 38059              m.emit(0x13)
 38060              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 38061          })
 38062      }
 38063      // VCVTPH2PS m128, ymm{k}{z}
 38064      if len(vv) == 0 && isM128(v0) && isYMMkz(v1) {
 38065          self.require(ISA_AVX512VL | ISA_AVX512F)
 38066          p.domain = DomainAVX
 38067          p.add(0, func(m *_Encoding, v []interface{}) {
 38068              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 38069              m.emit(0x13)
 38070              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 38071          })
 38072      }
 38073      if p.len == 0 {
 38074          panic("invalid operands for VCVTPH2PS")
 38075      }
 38076      return p
 38077  }
 38078  
 38079  // VCVTPS2DQ performs "Convert Packed Single-Precision FP Values to Packed Dword Integers".
 38080  //
 38081  // Mnemonic        : VCVTPS2DQ
 38082  // Supported forms : (11 forms)
 38083  //
 38084  //    * VCVTPS2DQ xmm, xmm                   [AVX]
 38085  //    * VCVTPS2DQ m128, xmm                  [AVX]
 38086  //    * VCVTPS2DQ ymm, ymm                   [AVX]
 38087  //    * VCVTPS2DQ m256, ymm                  [AVX]
 38088  //    * VCVTPS2DQ m512/m32bcst, zmm{k}{z}    [AVX512F]
 38089  //    * VCVTPS2DQ {er}, zmm, zmm{k}{z}       [AVX512F]
 38090  //    * VCVTPS2DQ zmm, zmm{k}{z}             [AVX512F]
 38091  //    * VCVTPS2DQ m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 38092  //    * VCVTPS2DQ m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 38093  //    * VCVTPS2DQ xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 38094  //    * VCVTPS2DQ ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 38095  //
 38096  func (self *Program) VCVTPS2DQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 38097      var p *Instruction
 38098      switch len(vv) {
 38099          case 0  : p = self.alloc("VCVTPS2DQ", 2, Operands { v0, v1 })
 38100          case 1  : p = self.alloc("VCVTPS2DQ", 3, Operands { v0, v1, vv[0] })
 38101          default : panic("instruction VCVTPS2DQ takes 2 or 3 operands")
 38102      }
 38103      // VCVTPS2DQ xmm, xmm
 38104      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 38105          self.require(ISA_AVX)
 38106          p.domain = DomainAVX
 38107          p.add(0, func(m *_Encoding, v []interface{}) {
 38108              m.vex2(1, hcode(v[1]), v[0], 0)
 38109              m.emit(0x5b)
 38110              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38111          })
 38112      }
 38113      // VCVTPS2DQ m128, xmm
 38114      if len(vv) == 0 && isM128(v0) && isXMM(v1) {
 38115          self.require(ISA_AVX)
 38116          p.domain = DomainAVX
 38117          p.add(0, func(m *_Encoding, v []interface{}) {
 38118              m.vex2(1, hcode(v[1]), addr(v[0]), 0)
 38119              m.emit(0x5b)
 38120              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 38121          })
 38122      }
 38123      // VCVTPS2DQ ymm, ymm
 38124      if len(vv) == 0 && isYMM(v0) && isYMM(v1) {
 38125          self.require(ISA_AVX)
 38126          p.domain = DomainAVX
 38127          p.add(0, func(m *_Encoding, v []interface{}) {
 38128              m.vex2(5, hcode(v[1]), v[0], 0)
 38129              m.emit(0x5b)
 38130              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38131          })
 38132      }
 38133      // VCVTPS2DQ m256, ymm
 38134      if len(vv) == 0 && isM256(v0) && isYMM(v1) {
 38135          self.require(ISA_AVX)
 38136          p.domain = DomainAVX
 38137          p.add(0, func(m *_Encoding, v []interface{}) {
 38138              m.vex2(5, hcode(v[1]), addr(v[0]), 0)
 38139              m.emit(0x5b)
 38140              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 38141          })
 38142      }
 38143      // VCVTPS2DQ m512/m32bcst, zmm{k}{z}
 38144      if len(vv) == 0 && isM512M32bcst(v0) && isZMMkz(v1) {
 38145          self.require(ISA_AVX512F)
 38146          p.domain = DomainAVX
 38147          p.add(0, func(m *_Encoding, v []interface{}) {
 38148              m.evex(0b01, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38149              m.emit(0x5b)
 38150              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 38151          })
 38152      }
 38153      // VCVTPS2DQ {er}, zmm, zmm{k}{z}
 38154      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 38155          self.require(ISA_AVX512F)
 38156          p.domain = DomainAVX
 38157          p.add(0, func(m *_Encoding, v []interface{}) {
 38158              m.emit(0x62)
 38159              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 38160              m.emit(0x7d)
 38161              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 38162              m.emit(0x5b)
 38163              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 38164          })
 38165      }
 38166      // VCVTPS2DQ zmm, zmm{k}{z}
 38167      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 38168          self.require(ISA_AVX512F)
 38169          p.domain = DomainAVX
 38170          p.add(0, func(m *_Encoding, v []interface{}) {
 38171              m.emit(0x62)
 38172              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38173              m.emit(0x7d)
 38174              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 38175              m.emit(0x5b)
 38176              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38177          })
 38178      }
 38179      // VCVTPS2DQ m128/m32bcst, xmm{k}{z}
 38180      if len(vv) == 0 && isM128M32bcst(v0) && isXMMkz(v1) {
 38181          self.require(ISA_AVX512VL | ISA_AVX512F)
 38182          p.domain = DomainAVX
 38183          p.add(0, func(m *_Encoding, v []interface{}) {
 38184              m.evex(0b01, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38185              m.emit(0x5b)
 38186              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 38187          })
 38188      }
 38189      // VCVTPS2DQ m256/m32bcst, ymm{k}{z}
 38190      if len(vv) == 0 && isM256M32bcst(v0) && isYMMkz(v1) {
 38191          self.require(ISA_AVX512VL | ISA_AVX512F)
 38192          p.domain = DomainAVX
 38193          p.add(0, func(m *_Encoding, v []interface{}) {
 38194              m.evex(0b01, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38195              m.emit(0x5b)
 38196              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 38197          })
 38198      }
 38199      // VCVTPS2DQ xmm, xmm{k}{z}
 38200      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 38201          self.require(ISA_AVX512VL | ISA_AVX512F)
 38202          p.domain = DomainAVX
 38203          p.add(0, func(m *_Encoding, v []interface{}) {
 38204              m.emit(0x62)
 38205              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38206              m.emit(0x7d)
 38207              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 38208              m.emit(0x5b)
 38209              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38210          })
 38211      }
 38212      // VCVTPS2DQ ymm, ymm{k}{z}
 38213      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 38214          self.require(ISA_AVX512VL | ISA_AVX512F)
 38215          p.domain = DomainAVX
 38216          p.add(0, func(m *_Encoding, v []interface{}) {
 38217              m.emit(0x62)
 38218              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38219              m.emit(0x7d)
 38220              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 38221              m.emit(0x5b)
 38222              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38223          })
 38224      }
 38225      if p.len == 0 {
 38226          panic("invalid operands for VCVTPS2DQ")
 38227      }
 38228      return p
 38229  }
 38230  
 38231  // VCVTPS2PD performs "Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values".
 38232  //
 38233  // Mnemonic        : VCVTPS2PD
 38234  // Supported forms : (11 forms)
 38235  //
 38236  //    * VCVTPS2PD xmm, xmm                   [AVX]
 38237  //    * VCVTPS2PD m64, xmm                   [AVX]
 38238  //    * VCVTPS2PD xmm, ymm                   [AVX]
 38239  //    * VCVTPS2PD m128, ymm                  [AVX]
 38240  //    * VCVTPS2PD m256/m32bcst, zmm{k}{z}    [AVX512F]
 38241  //    * VCVTPS2PD {sae}, ymm, zmm{k}{z}      [AVX512F]
 38242  //    * VCVTPS2PD ymm, zmm{k}{z}             [AVX512F]
 38243  //    * VCVTPS2PD m64/m32bcst, xmm{k}{z}     [AVX512F,AVX512VL]
 38244  //    * VCVTPS2PD m128/m32bcst, ymm{k}{z}    [AVX512VL]
 38245  //    * VCVTPS2PD xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 38246  //    * VCVTPS2PD xmm, ymm{k}{z}             [AVX512VL]
 38247  //
 38248  func (self *Program) VCVTPS2PD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 38249      var p *Instruction
 38250      switch len(vv) {
 38251          case 0  : p = self.alloc("VCVTPS2PD", 2, Operands { v0, v1 })
 38252          case 1  : p = self.alloc("VCVTPS2PD", 3, Operands { v0, v1, vv[0] })
 38253          default : panic("instruction VCVTPS2PD takes 2 or 3 operands")
 38254      }
 38255      // VCVTPS2PD xmm, xmm
 38256      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 38257          self.require(ISA_AVX)
 38258          p.domain = DomainAVX
 38259          p.add(0, func(m *_Encoding, v []interface{}) {
 38260              m.vex2(0, hcode(v[1]), v[0], 0)
 38261              m.emit(0x5a)
 38262              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38263          })
 38264      }
 38265      // VCVTPS2PD m64, xmm
 38266      if len(vv) == 0 && isM64(v0) && isXMM(v1) {
 38267          self.require(ISA_AVX)
 38268          p.domain = DomainAVX
 38269          p.add(0, func(m *_Encoding, v []interface{}) {
 38270              m.vex2(0, hcode(v[1]), addr(v[0]), 0)
 38271              m.emit(0x5a)
 38272              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 38273          })
 38274      }
 38275      // VCVTPS2PD xmm, ymm
 38276      if len(vv) == 0 && isXMM(v0) && isYMM(v1) {
 38277          self.require(ISA_AVX)
 38278          p.domain = DomainAVX
 38279          p.add(0, func(m *_Encoding, v []interface{}) {
 38280              m.vex2(4, hcode(v[1]), v[0], 0)
 38281              m.emit(0x5a)
 38282              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38283          })
 38284      }
 38285      // VCVTPS2PD m128, ymm
 38286      if len(vv) == 0 && isM128(v0) && isYMM(v1) {
 38287          self.require(ISA_AVX)
 38288          p.domain = DomainAVX
 38289          p.add(0, func(m *_Encoding, v []interface{}) {
 38290              m.vex2(4, hcode(v[1]), addr(v[0]), 0)
 38291              m.emit(0x5a)
 38292              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 38293          })
 38294      }
 38295      // VCVTPS2PD m256/m32bcst, zmm{k}{z}
 38296      if len(vv) == 0 && isM256M32bcst(v0) && isZMMkz(v1) {
 38297          self.require(ISA_AVX512F)
 38298          p.domain = DomainAVX
 38299          p.add(0, func(m *_Encoding, v []interface{}) {
 38300              m.evex(0b01, 0x04, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38301              m.emit(0x5a)
 38302              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 38303          })
 38304      }
 38305      // VCVTPS2PD {sae}, ymm, zmm{k}{z}
 38306      if len(vv) == 1 && isSAE(v0) && isEVEXYMM(v1) && isZMMkz(vv[0]) {
 38307          self.require(ISA_AVX512F)
 38308          p.domain = DomainAVX
 38309          p.add(0, func(m *_Encoding, v []interface{}) {
 38310              m.emit(0x62)
 38311              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 38312              m.emit(0x7c)
 38313              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 38314              m.emit(0x5a)
 38315              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 38316          })
 38317      }
 38318      // VCVTPS2PD ymm, zmm{k}{z}
 38319      if len(vv) == 0 && isEVEXYMM(v0) && isZMMkz(v1) {
 38320          self.require(ISA_AVX512F)
 38321          p.domain = DomainAVX
 38322          p.add(0, func(m *_Encoding, v []interface{}) {
 38323              m.emit(0x62)
 38324              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38325              m.emit(0x7c)
 38326              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 38327              m.emit(0x5a)
 38328              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38329          })
 38330      }
 38331      // VCVTPS2PD m64/m32bcst, xmm{k}{z}
 38332      if len(vv) == 0 && isM64M32bcst(v0) && isXMMkz(v1) {
 38333          self.require(ISA_AVX512VL | ISA_AVX512F)
 38334          p.domain = DomainAVX
 38335          p.add(0, func(m *_Encoding, v []interface{}) {
 38336              m.evex(0b01, 0x04, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38337              m.emit(0x5a)
 38338              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 38339          })
 38340      }
 38341      // VCVTPS2PD m128/m32bcst, ymm{k}{z}
 38342      if len(vv) == 0 && isM128M32bcst(v0) && isYMMkz(v1) {
 38343          self.require(ISA_AVX512VL)
 38344          p.domain = DomainAVX
 38345          p.add(0, func(m *_Encoding, v []interface{}) {
 38346              m.evex(0b01, 0x04, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38347              m.emit(0x5a)
 38348              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 38349          })
 38350      }
 38351      // VCVTPS2PD xmm, xmm{k}{z}
 38352      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 38353          self.require(ISA_AVX512VL | ISA_AVX512F)
 38354          p.domain = DomainAVX
 38355          p.add(0, func(m *_Encoding, v []interface{}) {
 38356              m.emit(0x62)
 38357              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38358              m.emit(0x7c)
 38359              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 38360              m.emit(0x5a)
 38361              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38362          })
 38363      }
 38364      // VCVTPS2PD xmm, ymm{k}{z}
 38365      if len(vv) == 0 && isEVEXXMM(v0) && isYMMkz(v1) {
 38366          self.require(ISA_AVX512VL)
 38367          p.domain = DomainAVX
 38368          p.add(0, func(m *_Encoding, v []interface{}) {
 38369              m.emit(0x62)
 38370              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38371              m.emit(0x7c)
 38372              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 38373              m.emit(0x5a)
 38374              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38375          })
 38376      }
 38377      if p.len == 0 {
 38378          panic("invalid operands for VCVTPS2PD")
 38379      }
 38380      return p
 38381  }
 38382  
 38383  // VCVTPS2PH performs "Convert Single-Precision FP value to Half-Precision FP value".
 38384  //
 38385  // Mnemonic        : VCVTPS2PH
 38386  // Supported forms : (11 forms)
 38387  //
 38388  //    * VCVTPS2PH imm8, xmm, xmm                 [F16C]
 38389  //    * VCVTPS2PH imm8, ymm, xmm                 [F16C]
 38390  //    * VCVTPS2PH imm8, xmm, m64                 [F16C]
 38391  //    * VCVTPS2PH imm8, ymm, m128                [F16C]
 38392  //    * VCVTPS2PH imm8, zmm, m256{k}{z}          [AVX512F]
 38393  //    * VCVTPS2PH imm8, {sae}, zmm, ymm{k}{z}    [AVX512F]
 38394  //    * VCVTPS2PH imm8, zmm, ymm{k}{z}           [AVX512F]
 38395  //    * VCVTPS2PH imm8, xmm, xmm{k}{z}           [AVX512F,AVX512VL]
 38396  //    * VCVTPS2PH imm8, xmm, m64{k}{z}           [AVX512F,AVX512VL]
 38397  //    * VCVTPS2PH imm8, ymm, xmm{k}{z}           [AVX512F,AVX512VL]
 38398  //    * VCVTPS2PH imm8, ymm, m128{k}{z}          [AVX512F,AVX512VL]
 38399  //
 38400  func (self *Program) VCVTPS2PH(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 38401      var p *Instruction
 38402      switch len(vv) {
 38403          case 0  : p = self.alloc("VCVTPS2PH", 3, Operands { v0, v1, v2 })
 38404          case 1  : p = self.alloc("VCVTPS2PH", 4, Operands { v0, v1, v2, vv[0] })
 38405          default : panic("instruction VCVTPS2PH takes 3 or 4 operands")
 38406      }
 38407      // VCVTPS2PH imm8, xmm, xmm
 38408      if len(vv) == 0 && isImm8(v0) && isXMM(v1) && isXMM(v2) {
 38409          self.require(ISA_F16C)
 38410          p.domain = DomainAVX
 38411          p.add(0, func(m *_Encoding, v []interface{}) {
 38412              m.emit(0xc4)
 38413              m.emit(0xe3 ^ (hcode(v[1]) << 7) ^ (hcode(v[2]) << 5))
 38414              m.emit(0x79)
 38415              m.emit(0x1d)
 38416              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 38417              m.imm1(toImmAny(v[0]))
 38418          })
 38419      }
 38420      // VCVTPS2PH imm8, ymm, xmm
 38421      if len(vv) == 0 && isImm8(v0) && isYMM(v1) && isXMM(v2) {
 38422          self.require(ISA_F16C)
 38423          p.domain = DomainAVX
 38424          p.add(0, func(m *_Encoding, v []interface{}) {
 38425              m.emit(0xc4)
 38426              m.emit(0xe3 ^ (hcode(v[1]) << 7) ^ (hcode(v[2]) << 5))
 38427              m.emit(0x7d)
 38428              m.emit(0x1d)
 38429              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 38430              m.imm1(toImmAny(v[0]))
 38431          })
 38432      }
 38433      // VCVTPS2PH imm8, xmm, m64
 38434      if len(vv) == 0 && isImm8(v0) && isXMM(v1) && isM64(v2) {
 38435          self.require(ISA_F16C)
 38436          p.domain = DomainAVX
 38437          p.add(0, func(m *_Encoding, v []interface{}) {
 38438              m.vex3(0xc4, 0b11, 0x01, hcode(v[1]), addr(v[2]), 0)
 38439              m.emit(0x1d)
 38440              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 38441              m.imm1(toImmAny(v[0]))
 38442          })
 38443      }
 38444      // VCVTPS2PH imm8, ymm, m128
 38445      if len(vv) == 0 && isImm8(v0) && isYMM(v1) && isM128(v2) {
 38446          self.require(ISA_F16C)
 38447          p.domain = DomainAVX
 38448          p.add(0, func(m *_Encoding, v []interface{}) {
 38449              m.vex3(0xc4, 0b11, 0x05, hcode(v[1]), addr(v[2]), 0)
 38450              m.emit(0x1d)
 38451              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 38452              m.imm1(toImmAny(v[0]))
 38453          })
 38454      }
 38455      // VCVTPS2PH imm8, zmm, m256{k}{z}
 38456      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isM256kz(v2) {
 38457          self.require(ISA_AVX512F)
 38458          p.domain = DomainAVX
 38459          p.add(0, func(m *_Encoding, v []interface{}) {
 38460              m.evex(0b11, 0x05, 0b10, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 38461              m.emit(0x1d)
 38462              m.mrsd(lcode(v[1]), addr(v[2]), 32)
 38463              m.imm1(toImmAny(v[0]))
 38464          })
 38465      }
 38466      // VCVTPS2PH imm8, {sae}, zmm, ymm{k}{z}
 38467      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isZMM(v2) && isYMMkz(vv[0]) {
 38468          self.require(ISA_AVX512F)
 38469          p.domain = DomainAVX
 38470          p.add(0, func(m *_Encoding, v []interface{}) {
 38471              m.emit(0x62)
 38472              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[3]) << 5) | (ecode(v[2]) << 4)))
 38473              m.emit(0x7d)
 38474              m.emit((zcode(v[3]) << 7) | kcode(v[3]) | 0x18)
 38475              m.emit(0x1d)
 38476              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[3]))
 38477              m.imm1(toImmAny(v[0]))
 38478          })
 38479      }
 38480      // VCVTPS2PH imm8, zmm, ymm{k}{z}
 38481      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isYMMkz(v2) {
 38482          self.require(ISA_AVX512F)
 38483          p.domain = DomainAVX
 38484          p.add(0, func(m *_Encoding, v []interface{}) {
 38485              m.emit(0x62)
 38486              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 38487              m.emit(0x7d)
 38488              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 38489              m.emit(0x1d)
 38490              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 38491              m.imm1(toImmAny(v[0]))
 38492          })
 38493      }
 38494      // VCVTPS2PH imm8, xmm, xmm{k}{z}
 38495      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 38496          self.require(ISA_AVX512VL | ISA_AVX512F)
 38497          p.domain = DomainAVX
 38498          p.add(0, func(m *_Encoding, v []interface{}) {
 38499              m.emit(0x62)
 38500              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 38501              m.emit(0x7d)
 38502              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 38503              m.emit(0x1d)
 38504              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 38505              m.imm1(toImmAny(v[0]))
 38506          })
 38507      }
 38508      // VCVTPS2PH imm8, xmm, m64{k}{z}
 38509      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isM64kz(v2) {
 38510          self.require(ISA_AVX512VL | ISA_AVX512F)
 38511          p.domain = DomainAVX
 38512          p.add(0, func(m *_Encoding, v []interface{}) {
 38513              m.evex(0b11, 0x05, 0b00, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 38514              m.emit(0x1d)
 38515              m.mrsd(lcode(v[1]), addr(v[2]), 8)
 38516              m.imm1(toImmAny(v[0]))
 38517          })
 38518      }
 38519      // VCVTPS2PH imm8, ymm, xmm{k}{z}
 38520      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isXMMkz(v2) {
 38521          self.require(ISA_AVX512VL | ISA_AVX512F)
 38522          p.domain = DomainAVX
 38523          p.add(0, func(m *_Encoding, v []interface{}) {
 38524              m.emit(0x62)
 38525              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 38526              m.emit(0x7d)
 38527              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 38528              m.emit(0x1d)
 38529              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 38530              m.imm1(toImmAny(v[0]))
 38531          })
 38532      }
 38533      // VCVTPS2PH imm8, ymm, m128{k}{z}
 38534      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isM128kz(v2) {
 38535          self.require(ISA_AVX512VL | ISA_AVX512F)
 38536          p.domain = DomainAVX
 38537          p.add(0, func(m *_Encoding, v []interface{}) {
 38538              m.evex(0b11, 0x05, 0b01, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 38539              m.emit(0x1d)
 38540              m.mrsd(lcode(v[1]), addr(v[2]), 16)
 38541              m.imm1(toImmAny(v[0]))
 38542          })
 38543      }
 38544      if p.len == 0 {
 38545          panic("invalid operands for VCVTPS2PH")
 38546      }
 38547      return p
 38548  }
 38549  
 38550  // VCVTPS2QQ performs "Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values".
 38551  //
 38552  // Mnemonic        : VCVTPS2QQ
 38553  // Supported forms : (7 forms)
 38554  //
 38555  //    * VCVTPS2QQ m256/m32bcst, zmm{k}{z}    [AVX512DQ]
 38556  //    * VCVTPS2QQ {er}, ymm, zmm{k}{z}       [AVX512DQ]
 38557  //    * VCVTPS2QQ ymm, zmm{k}{z}             [AVX512DQ]
 38558  //    * VCVTPS2QQ m64/m32bcst, xmm{k}{z}     [AVX512DQ,AVX512VL]
 38559  //    * VCVTPS2QQ m128/m32bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 38560  //    * VCVTPS2QQ xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 38561  //    * VCVTPS2QQ xmm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 38562  //
 38563  func (self *Program) VCVTPS2QQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 38564      var p *Instruction
 38565      switch len(vv) {
 38566          case 0  : p = self.alloc("VCVTPS2QQ", 2, Operands { v0, v1 })
 38567          case 1  : p = self.alloc("VCVTPS2QQ", 3, Operands { v0, v1, vv[0] })
 38568          default : panic("instruction VCVTPS2QQ takes 2 or 3 operands")
 38569      }
 38570      // VCVTPS2QQ m256/m32bcst, zmm{k}{z}
 38571      if len(vv) == 0 && isM256M32bcst(v0) && isZMMkz(v1) {
 38572          self.require(ISA_AVX512DQ)
 38573          p.domain = DomainAVX
 38574          p.add(0, func(m *_Encoding, v []interface{}) {
 38575              m.evex(0b01, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38576              m.emit(0x7b)
 38577              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 38578          })
 38579      }
 38580      // VCVTPS2QQ {er}, ymm, zmm{k}{z}
 38581      if len(vv) == 1 && isER(v0) && isEVEXYMM(v1) && isZMMkz(vv[0]) {
 38582          self.require(ISA_AVX512DQ)
 38583          p.domain = DomainAVX
 38584          p.add(0, func(m *_Encoding, v []interface{}) {
 38585              m.emit(0x62)
 38586              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 38587              m.emit(0x7d)
 38588              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 38589              m.emit(0x7b)
 38590              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 38591          })
 38592      }
 38593      // VCVTPS2QQ ymm, zmm{k}{z}
 38594      if len(vv) == 0 && isEVEXYMM(v0) && isZMMkz(v1) {
 38595          self.require(ISA_AVX512DQ)
 38596          p.domain = DomainAVX
 38597          p.add(0, func(m *_Encoding, v []interface{}) {
 38598              m.emit(0x62)
 38599              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38600              m.emit(0x7d)
 38601              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 38602              m.emit(0x7b)
 38603              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38604          })
 38605      }
 38606      // VCVTPS2QQ m64/m32bcst, xmm{k}{z}
 38607      if len(vv) == 0 && isM64M32bcst(v0) && isXMMkz(v1) {
 38608          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38609          p.domain = DomainAVX
 38610          p.add(0, func(m *_Encoding, v []interface{}) {
 38611              m.evex(0b01, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38612              m.emit(0x7b)
 38613              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 38614          })
 38615      }
 38616      // VCVTPS2QQ m128/m32bcst, ymm{k}{z}
 38617      if len(vv) == 0 && isM128M32bcst(v0) && isYMMkz(v1) {
 38618          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38619          p.domain = DomainAVX
 38620          p.add(0, func(m *_Encoding, v []interface{}) {
 38621              m.evex(0b01, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38622              m.emit(0x7b)
 38623              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 38624          })
 38625      }
 38626      // VCVTPS2QQ xmm, xmm{k}{z}
 38627      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 38628          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38629          p.domain = DomainAVX
 38630          p.add(0, func(m *_Encoding, v []interface{}) {
 38631              m.emit(0x62)
 38632              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38633              m.emit(0x7d)
 38634              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 38635              m.emit(0x7b)
 38636              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38637          })
 38638      }
 38639      // VCVTPS2QQ xmm, ymm{k}{z}
 38640      if len(vv) == 0 && isEVEXXMM(v0) && isYMMkz(v1) {
 38641          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38642          p.domain = DomainAVX
 38643          p.add(0, func(m *_Encoding, v []interface{}) {
 38644              m.emit(0x62)
 38645              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38646              m.emit(0x7d)
 38647              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 38648              m.emit(0x7b)
 38649              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38650          })
 38651      }
 38652      if p.len == 0 {
 38653          panic("invalid operands for VCVTPS2QQ")
 38654      }
 38655      return p
 38656  }
 38657  
 38658  // VCVTPS2UDQ performs "Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values".
 38659  //
 38660  // Mnemonic        : VCVTPS2UDQ
 38661  // Supported forms : (7 forms)
 38662  //
 38663  //    * VCVTPS2UDQ m512/m32bcst, zmm{k}{z}    [AVX512F]
 38664  //    * VCVTPS2UDQ {er}, zmm, zmm{k}{z}       [AVX512F]
 38665  //    * VCVTPS2UDQ zmm, zmm{k}{z}             [AVX512F]
 38666  //    * VCVTPS2UDQ m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 38667  //    * VCVTPS2UDQ m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 38668  //    * VCVTPS2UDQ xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 38669  //    * VCVTPS2UDQ ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 38670  //
 38671  func (self *Program) VCVTPS2UDQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 38672      var p *Instruction
 38673      switch len(vv) {
 38674          case 0  : p = self.alloc("VCVTPS2UDQ", 2, Operands { v0, v1 })
 38675          case 1  : p = self.alloc("VCVTPS2UDQ", 3, Operands { v0, v1, vv[0] })
 38676          default : panic("instruction VCVTPS2UDQ takes 2 or 3 operands")
 38677      }
 38678      // VCVTPS2UDQ m512/m32bcst, zmm{k}{z}
 38679      if len(vv) == 0 && isM512M32bcst(v0) && isZMMkz(v1) {
 38680          self.require(ISA_AVX512F)
 38681          p.domain = DomainAVX
 38682          p.add(0, func(m *_Encoding, v []interface{}) {
 38683              m.evex(0b01, 0x04, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38684              m.emit(0x79)
 38685              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 38686          })
 38687      }
 38688      // VCVTPS2UDQ {er}, zmm, zmm{k}{z}
 38689      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 38690          self.require(ISA_AVX512F)
 38691          p.domain = DomainAVX
 38692          p.add(0, func(m *_Encoding, v []interface{}) {
 38693              m.emit(0x62)
 38694              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 38695              m.emit(0x7c)
 38696              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 38697              m.emit(0x79)
 38698              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 38699          })
 38700      }
 38701      // VCVTPS2UDQ zmm, zmm{k}{z}
 38702      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 38703          self.require(ISA_AVX512F)
 38704          p.domain = DomainAVX
 38705          p.add(0, func(m *_Encoding, v []interface{}) {
 38706              m.emit(0x62)
 38707              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38708              m.emit(0x7c)
 38709              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 38710              m.emit(0x79)
 38711              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38712          })
 38713      }
 38714      // VCVTPS2UDQ m128/m32bcst, xmm{k}{z}
 38715      if len(vv) == 0 && isM128M32bcst(v0) && isXMMkz(v1) {
 38716          self.require(ISA_AVX512VL | ISA_AVX512F)
 38717          p.domain = DomainAVX
 38718          p.add(0, func(m *_Encoding, v []interface{}) {
 38719              m.evex(0b01, 0x04, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38720              m.emit(0x79)
 38721              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 38722          })
 38723      }
 38724      // VCVTPS2UDQ m256/m32bcst, ymm{k}{z}
 38725      if len(vv) == 0 && isM256M32bcst(v0) && isYMMkz(v1) {
 38726          self.require(ISA_AVX512VL | ISA_AVX512F)
 38727          p.domain = DomainAVX
 38728          p.add(0, func(m *_Encoding, v []interface{}) {
 38729              m.evex(0b01, 0x04, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38730              m.emit(0x79)
 38731              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 38732          })
 38733      }
 38734      // VCVTPS2UDQ xmm, xmm{k}{z}
 38735      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 38736          self.require(ISA_AVX512VL | ISA_AVX512F)
 38737          p.domain = DomainAVX
 38738          p.add(0, func(m *_Encoding, v []interface{}) {
 38739              m.emit(0x62)
 38740              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38741              m.emit(0x7c)
 38742              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 38743              m.emit(0x79)
 38744              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38745          })
 38746      }
 38747      // VCVTPS2UDQ ymm, ymm{k}{z}
 38748      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 38749          self.require(ISA_AVX512VL | ISA_AVX512F)
 38750          p.domain = DomainAVX
 38751          p.add(0, func(m *_Encoding, v []interface{}) {
 38752              m.emit(0x62)
 38753              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38754              m.emit(0x7c)
 38755              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 38756              m.emit(0x79)
 38757              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38758          })
 38759      }
 38760      if p.len == 0 {
 38761          panic("invalid operands for VCVTPS2UDQ")
 38762      }
 38763      return p
 38764  }
 38765  
 38766  // VCVTPS2UQQ performs "Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values".
 38767  //
 38768  // Mnemonic        : VCVTPS2UQQ
 38769  // Supported forms : (7 forms)
 38770  //
 38771  //    * VCVTPS2UQQ m256/m32bcst, zmm{k}{z}    [AVX512DQ]
 38772  //    * VCVTPS2UQQ {er}, ymm, zmm{k}{z}       [AVX512DQ]
 38773  //    * VCVTPS2UQQ ymm, zmm{k}{z}             [AVX512DQ]
 38774  //    * VCVTPS2UQQ m64/m32bcst, xmm{k}{z}     [AVX512DQ,AVX512VL]
 38775  //    * VCVTPS2UQQ m128/m32bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 38776  //    * VCVTPS2UQQ xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 38777  //    * VCVTPS2UQQ xmm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 38778  //
 38779  func (self *Program) VCVTPS2UQQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 38780      var p *Instruction
 38781      switch len(vv) {
 38782          case 0  : p = self.alloc("VCVTPS2UQQ", 2, Operands { v0, v1 })
 38783          case 1  : p = self.alloc("VCVTPS2UQQ", 3, Operands { v0, v1, vv[0] })
 38784          default : panic("instruction VCVTPS2UQQ takes 2 or 3 operands")
 38785      }
 38786      // VCVTPS2UQQ m256/m32bcst, zmm{k}{z}
 38787      if len(vv) == 0 && isM256M32bcst(v0) && isZMMkz(v1) {
 38788          self.require(ISA_AVX512DQ)
 38789          p.domain = DomainAVX
 38790          p.add(0, func(m *_Encoding, v []interface{}) {
 38791              m.evex(0b01, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38792              m.emit(0x79)
 38793              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 38794          })
 38795      }
 38796      // VCVTPS2UQQ {er}, ymm, zmm{k}{z}
 38797      if len(vv) == 1 && isER(v0) && isEVEXYMM(v1) && isZMMkz(vv[0]) {
 38798          self.require(ISA_AVX512DQ)
 38799          p.domain = DomainAVX
 38800          p.add(0, func(m *_Encoding, v []interface{}) {
 38801              m.emit(0x62)
 38802              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 38803              m.emit(0x7d)
 38804              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 38805              m.emit(0x79)
 38806              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 38807          })
 38808      }
 38809      // VCVTPS2UQQ ymm, zmm{k}{z}
 38810      if len(vv) == 0 && isEVEXYMM(v0) && isZMMkz(v1) {
 38811          self.require(ISA_AVX512DQ)
 38812          p.domain = DomainAVX
 38813          p.add(0, func(m *_Encoding, v []interface{}) {
 38814              m.emit(0x62)
 38815              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38816              m.emit(0x7d)
 38817              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 38818              m.emit(0x79)
 38819              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38820          })
 38821      }
 38822      // VCVTPS2UQQ m64/m32bcst, xmm{k}{z}
 38823      if len(vv) == 0 && isM64M32bcst(v0) && isXMMkz(v1) {
 38824          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38825          p.domain = DomainAVX
 38826          p.add(0, func(m *_Encoding, v []interface{}) {
 38827              m.evex(0b01, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38828              m.emit(0x79)
 38829              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 38830          })
 38831      }
 38832      // VCVTPS2UQQ m128/m32bcst, ymm{k}{z}
 38833      if len(vv) == 0 && isM128M32bcst(v0) && isYMMkz(v1) {
 38834          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38835          p.domain = DomainAVX
 38836          p.add(0, func(m *_Encoding, v []interface{}) {
 38837              m.evex(0b01, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38838              m.emit(0x79)
 38839              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 38840          })
 38841      }
 38842      // VCVTPS2UQQ xmm, xmm{k}{z}
 38843      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 38844          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38845          p.domain = DomainAVX
 38846          p.add(0, func(m *_Encoding, v []interface{}) {
 38847              m.emit(0x62)
 38848              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38849              m.emit(0x7d)
 38850              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 38851              m.emit(0x79)
 38852              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38853          })
 38854      }
 38855      // VCVTPS2UQQ xmm, ymm{k}{z}
 38856      if len(vv) == 0 && isEVEXXMM(v0) && isYMMkz(v1) {
 38857          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38858          p.domain = DomainAVX
 38859          p.add(0, func(m *_Encoding, v []interface{}) {
 38860              m.emit(0x62)
 38861              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38862              m.emit(0x7d)
 38863              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 38864              m.emit(0x79)
 38865              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38866          })
 38867      }
 38868      if p.len == 0 {
 38869          panic("invalid operands for VCVTPS2UQQ")
 38870      }
 38871      return p
 38872  }
 38873  
 38874  // VCVTQQ2PD performs "Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values".
 38875  //
 38876  // Mnemonic        : VCVTQQ2PD
 38877  // Supported forms : (7 forms)
 38878  //
 38879  //    * VCVTQQ2PD m512/m64bcst, zmm{k}{z}    [AVX512DQ]
 38880  //    * VCVTQQ2PD {er}, zmm, zmm{k}{z}       [AVX512DQ]
 38881  //    * VCVTQQ2PD zmm, zmm{k}{z}             [AVX512DQ]
 38882  //    * VCVTQQ2PD m128/m64bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 38883  //    * VCVTQQ2PD m256/m64bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 38884  //    * VCVTQQ2PD xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 38885  //    * VCVTQQ2PD ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 38886  //
 38887  func (self *Program) VCVTQQ2PD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 38888      var p *Instruction
 38889      switch len(vv) {
 38890          case 0  : p = self.alloc("VCVTQQ2PD", 2, Operands { v0, v1 })
 38891          case 1  : p = self.alloc("VCVTQQ2PD", 3, Operands { v0, v1, vv[0] })
 38892          default : panic("instruction VCVTQQ2PD takes 2 or 3 operands")
 38893      }
 38894      // VCVTQQ2PD m512/m64bcst, zmm{k}{z}
 38895      if len(vv) == 0 && isM512M64bcst(v0) && isZMMkz(v1) {
 38896          self.require(ISA_AVX512DQ)
 38897          p.domain = DomainAVX
 38898          p.add(0, func(m *_Encoding, v []interface{}) {
 38899              m.evex(0b01, 0x86, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38900              m.emit(0xe6)
 38901              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 38902          })
 38903      }
 38904      // VCVTQQ2PD {er}, zmm, zmm{k}{z}
 38905      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 38906          self.require(ISA_AVX512DQ)
 38907          p.domain = DomainAVX
 38908          p.add(0, func(m *_Encoding, v []interface{}) {
 38909              m.emit(0x62)
 38910              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 38911              m.emit(0xfe)
 38912              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 38913              m.emit(0xe6)
 38914              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 38915          })
 38916      }
 38917      // VCVTQQ2PD zmm, zmm{k}{z}
 38918      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 38919          self.require(ISA_AVX512DQ)
 38920          p.domain = DomainAVX
 38921          p.add(0, func(m *_Encoding, v []interface{}) {
 38922              m.emit(0x62)
 38923              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38924              m.emit(0xfe)
 38925              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 38926              m.emit(0xe6)
 38927              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38928          })
 38929      }
 38930      // VCVTQQ2PD m128/m64bcst, xmm{k}{z}
 38931      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 38932          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38933          p.domain = DomainAVX
 38934          p.add(0, func(m *_Encoding, v []interface{}) {
 38935              m.evex(0b01, 0x86, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38936              m.emit(0xe6)
 38937              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 38938          })
 38939      }
 38940      // VCVTQQ2PD m256/m64bcst, ymm{k}{z}
 38941      if len(vv) == 0 && isM256M64bcst(v0) && isYMMkz(v1) {
 38942          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38943          p.domain = DomainAVX
 38944          p.add(0, func(m *_Encoding, v []interface{}) {
 38945              m.evex(0b01, 0x86, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38946              m.emit(0xe6)
 38947              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 38948          })
 38949      }
 38950      // VCVTQQ2PD xmm, xmm{k}{z}
 38951      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 38952          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38953          p.domain = DomainAVX
 38954          p.add(0, func(m *_Encoding, v []interface{}) {
 38955              m.emit(0x62)
 38956              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38957              m.emit(0xfe)
 38958              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 38959              m.emit(0xe6)
 38960              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38961          })
 38962      }
 38963      // VCVTQQ2PD ymm, ymm{k}{z}
 38964      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 38965          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38966          p.domain = DomainAVX
 38967          p.add(0, func(m *_Encoding, v []interface{}) {
 38968              m.emit(0x62)
 38969              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38970              m.emit(0xfe)
 38971              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 38972              m.emit(0xe6)
 38973              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38974          })
 38975      }
 38976      if p.len == 0 {
 38977          panic("invalid operands for VCVTQQ2PD")
 38978      }
 38979      return p
 38980  }
 38981  
 38982  // VCVTQQ2PS performs "Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values".
 38983  //
 38984  // Mnemonic        : VCVTQQ2PS
 38985  // Supported forms : (7 forms)
 38986  //
 38987  //    * VCVTQQ2PS m512/m64bcst, ymm{k}{z}    [AVX512DQ]
 38988  //    * VCVTQQ2PS {er}, zmm, ymm{k}{z}       [AVX512DQ]
 38989  //    * VCVTQQ2PS zmm, ymm{k}{z}             [AVX512DQ]
 38990  //    * VCVTQQ2PS m128/m64bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 38991  //    * VCVTQQ2PS m256/m64bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 38992  //    * VCVTQQ2PS xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 38993  //    * VCVTQQ2PS ymm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 38994  //
 38995  func (self *Program) VCVTQQ2PS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 38996      var p *Instruction
 38997      switch len(vv) {
 38998          case 0  : p = self.alloc("VCVTQQ2PS", 2, Operands { v0, v1 })
 38999          case 1  : p = self.alloc("VCVTQQ2PS", 3, Operands { v0, v1, vv[0] })
 39000          default : panic("instruction VCVTQQ2PS takes 2 or 3 operands")
 39001      }
 39002      // VCVTQQ2PS m512/m64bcst, ymm{k}{z}
 39003      if len(vv) == 0 && isM512M64bcst(v0) && isYMMkz(v1) {
 39004          self.require(ISA_AVX512DQ)
 39005          p.domain = DomainAVX
 39006          p.add(0, func(m *_Encoding, v []interface{}) {
 39007              m.evex(0b01, 0x84, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 39008              m.emit(0x5b)
 39009              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 39010          })
 39011      }
 39012      // VCVTQQ2PS {er}, zmm, ymm{k}{z}
 39013      if len(vv) == 1 && isER(v0) && isZMM(v1) && isYMMkz(vv[0]) {
 39014          self.require(ISA_AVX512DQ)
 39015          p.domain = DomainAVX
 39016          p.add(0, func(m *_Encoding, v []interface{}) {
 39017              m.emit(0x62)
 39018              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 39019              m.emit(0xfc)
 39020              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 39021              m.emit(0x5b)
 39022              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 39023          })
 39024      }
 39025      // VCVTQQ2PS zmm, ymm{k}{z}
 39026      if len(vv) == 0 && isZMM(v0) && isYMMkz(v1) {
 39027          self.require(ISA_AVX512DQ)
 39028          p.domain = DomainAVX
 39029          p.add(0, func(m *_Encoding, v []interface{}) {
 39030              m.emit(0x62)
 39031              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 39032              m.emit(0xfc)
 39033              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 39034              m.emit(0x5b)
 39035              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39036          })
 39037      }
 39038      // VCVTQQ2PS m128/m64bcst, xmm{k}{z}
 39039      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 39040          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 39041          p.domain = DomainAVX
 39042          p.add(0, func(m *_Encoding, v []interface{}) {
 39043              m.evex(0b01, 0x84, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 39044              m.emit(0x5b)
 39045              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 39046          })
 39047      }
 39048      // VCVTQQ2PS m256/m64bcst, xmm{k}{z}
 39049      if len(vv) == 0 && isM256M64bcst(v0) && isXMMkz(v1) {
 39050          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 39051          p.domain = DomainAVX
 39052          p.add(0, func(m *_Encoding, v []interface{}) {
 39053              m.evex(0b01, 0x84, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 39054              m.emit(0x5b)
 39055              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 39056          })
 39057      }
 39058      // VCVTQQ2PS xmm, xmm{k}{z}
 39059      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 39060          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 39061          p.domain = DomainAVX
 39062          p.add(0, func(m *_Encoding, v []interface{}) {
 39063              m.emit(0x62)
 39064              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 39065              m.emit(0xfc)
 39066              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 39067              m.emit(0x5b)
 39068              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39069          })
 39070      }
 39071      // VCVTQQ2PS ymm, xmm{k}{z}
 39072      if len(vv) == 0 && isEVEXYMM(v0) && isXMMkz(v1) {
 39073          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 39074          p.domain = DomainAVX
 39075          p.add(0, func(m *_Encoding, v []interface{}) {
 39076              m.emit(0x62)
 39077              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 39078              m.emit(0xfc)
 39079              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 39080              m.emit(0x5b)
 39081              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39082          })
 39083      }
 39084      if p.len == 0 {
 39085          panic("invalid operands for VCVTQQ2PS")
 39086      }
 39087      return p
 39088  }
 39089  
 39090  // VCVTSD2SI performs "Convert Scalar Double-Precision FP Value to Integer".
 39091  //
 39092  // Mnemonic        : VCVTSD2SI
 39093  // Supported forms : (10 forms)
 39094  //
 39095  //    * VCVTSD2SI xmm, r32          [AVX]
 39096  //    * VCVTSD2SI m64, r32          [AVX]
 39097  //    * VCVTSD2SI xmm, r64          [AVX]
 39098  //    * VCVTSD2SI m64, r64          [AVX]
 39099  //    * VCVTSD2SI m64, r32          [AVX512F]
 39100  //    * VCVTSD2SI m64, r64          [AVX512F]
 39101  //    * VCVTSD2SI {er}, xmm, r32    [AVX512F]
 39102  //    * VCVTSD2SI {er}, xmm, r64    [AVX512F]
 39103  //    * VCVTSD2SI xmm, r32          [AVX512F]
 39104  //    * VCVTSD2SI xmm, r64          [AVX512F]
 39105  //
 39106  func (self *Program) VCVTSD2SI(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 39107      var p *Instruction
 39108      switch len(vv) {
 39109          case 0  : p = self.alloc("VCVTSD2SI", 2, Operands { v0, v1 })
 39110          case 1  : p = self.alloc("VCVTSD2SI", 3, Operands { v0, v1, vv[0] })
 39111          default : panic("instruction VCVTSD2SI takes 2 or 3 operands")
 39112      }
 39113      // VCVTSD2SI xmm, r32
 39114      if len(vv) == 0 && isXMM(v0) && isReg32(v1) {
 39115          self.require(ISA_AVX)
 39116          p.domain = DomainAVX
 39117          p.add(0, func(m *_Encoding, v []interface{}) {
 39118              m.vex2(3, hcode(v[1]), v[0], 0)
 39119              m.emit(0x2d)
 39120              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39121          })
 39122      }
 39123      // VCVTSD2SI m64, r32
 39124      if len(vv) == 0 && isM64(v0) && isReg32(v1) {
 39125          self.require(ISA_AVX)
 39126          p.domain = DomainAVX
 39127          p.add(0, func(m *_Encoding, v []interface{}) {
 39128              m.vex2(3, hcode(v[1]), addr(v[0]), 0)
 39129              m.emit(0x2d)
 39130              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 39131          })
 39132      }
 39133      // VCVTSD2SI xmm, r64
 39134      if len(vv) == 0 && isXMM(v0) && isReg64(v1) {
 39135          self.require(ISA_AVX)
 39136          p.domain = DomainAVX
 39137          p.add(0, func(m *_Encoding, v []interface{}) {
 39138              m.emit(0xc4)
 39139              m.emit(0xe1 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 39140              m.emit(0xfb)
 39141              m.emit(0x2d)
 39142              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39143          })
 39144      }
 39145      // VCVTSD2SI m64, r64
 39146      if len(vv) == 0 && isM64(v0) && isReg64(v1) {
 39147          self.require(ISA_AVX)
 39148          p.domain = DomainAVX
 39149          p.add(0, func(m *_Encoding, v []interface{}) {
 39150              m.vex3(0xc4, 0b1, 0x83, hcode(v[1]), addr(v[0]), 0)
 39151              m.emit(0x2d)
 39152              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 39153          })
 39154      }
 39155      // VCVTSD2SI m64, r32
 39156      if len(vv) == 0 && isM64(v0) && isReg32(v1) {
 39157          self.require(ISA_AVX512F)
 39158          p.domain = DomainAVX
 39159          p.add(0, func(m *_Encoding, v []interface{}) {
 39160              m.evex(0b01, 0x07, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 39161              m.emit(0x2d)
 39162              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 39163          })
 39164      }
 39165      // VCVTSD2SI m64, r64
 39166      if len(vv) == 0 && isM64(v0) && isReg64(v1) {
 39167          self.require(ISA_AVX512F)
 39168          p.domain = DomainAVX
 39169          p.add(0, func(m *_Encoding, v []interface{}) {
 39170              m.evex(0b01, 0x87, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 39171              m.emit(0x2d)
 39172              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 39173          })
 39174      }
 39175      // VCVTSD2SI {er}, xmm, r32
 39176      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isReg32(vv[0]) {
 39177          self.require(ISA_AVX512F)
 39178          p.domain = DomainAVX
 39179          p.add(0, func(m *_Encoding, v []interface{}) {
 39180              m.emit(0x62)
 39181              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 39182              m.emit(0x7f)
 39183              m.emit((vcode(v[0]) << 5) | 0x18)
 39184              m.emit(0x2d)
 39185              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 39186          })
 39187      }
 39188      // VCVTSD2SI {er}, xmm, r64
 39189      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isReg64(vv[0]) {
 39190          self.require(ISA_AVX512F)
 39191          p.domain = DomainAVX
 39192          p.add(0, func(m *_Encoding, v []interface{}) {
 39193              m.emit(0x62)
 39194              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 39195              m.emit(0xff)
 39196              m.emit((vcode(v[0]) << 5) | 0x18)
 39197              m.emit(0x2d)
 39198              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 39199          })
 39200      }
 39201      // VCVTSD2SI xmm, r32
 39202      if len(vv) == 0 && isEVEXXMM(v0) && isReg32(v1) {
 39203          self.require(ISA_AVX512F)
 39204          p.domain = DomainAVX
 39205          p.add(0, func(m *_Encoding, v []interface{}) {
 39206              m.emit(0x62)
 39207              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 39208              m.emit(0x7f)
 39209              m.emit(0x48)
 39210              m.emit(0x2d)
 39211              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39212          })
 39213      }
 39214      // VCVTSD2SI xmm, r64
 39215      if len(vv) == 0 && isEVEXXMM(v0) && isReg64(v1) {
 39216          self.require(ISA_AVX512F)
 39217          p.domain = DomainAVX
 39218          p.add(0, func(m *_Encoding, v []interface{}) {
 39219              m.emit(0x62)
 39220              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 39221              m.emit(0xff)
 39222              m.emit(0x48)
 39223              m.emit(0x2d)
 39224              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39225          })
 39226      }
 39227      if p.len == 0 {
 39228          panic("invalid operands for VCVTSD2SI")
 39229      }
 39230      return p
 39231  }
 39232  
 39233  // VCVTSD2SS performs "Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value".
 39234  //
 39235  // Mnemonic        : VCVTSD2SS
 39236  // Supported forms : (5 forms)
 39237  //
 39238  //    * VCVTSD2SS xmm, xmm, xmm                [AVX]
 39239  //    * VCVTSD2SS m64, xmm, xmm                [AVX]
 39240  //    * VCVTSD2SS m64, xmm, xmm{k}{z}          [AVX512F]
 39241  //    * VCVTSD2SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 39242  //    * VCVTSD2SS xmm, xmm, xmm{k}{z}          [AVX512F]
 39243  //
 39244  func (self *Program) VCVTSD2SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 39245      var p *Instruction
 39246      switch len(vv) {
 39247          case 0  : p = self.alloc("VCVTSD2SS", 3, Operands { v0, v1, v2 })
 39248          case 1  : p = self.alloc("VCVTSD2SS", 4, Operands { v0, v1, v2, vv[0] })
 39249          default : panic("instruction VCVTSD2SS takes 3 or 4 operands")
 39250      }
 39251      // VCVTSD2SS xmm, xmm, xmm
 39252      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 39253          self.require(ISA_AVX)
 39254          p.domain = DomainAVX
 39255          p.add(0, func(m *_Encoding, v []interface{}) {
 39256              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 39257              m.emit(0x5a)
 39258              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39259          })
 39260      }
 39261      // VCVTSD2SS m64, xmm, xmm
 39262      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 39263          self.require(ISA_AVX)
 39264          p.domain = DomainAVX
 39265          p.add(0, func(m *_Encoding, v []interface{}) {
 39266              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 39267              m.emit(0x5a)
 39268              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 39269          })
 39270      }
 39271      // VCVTSD2SS m64, xmm, xmm{k}{z}
 39272      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 39273          self.require(ISA_AVX512F)
 39274          p.domain = DomainAVX
 39275          p.add(0, func(m *_Encoding, v []interface{}) {
 39276              m.evex(0b01, 0x87, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 39277              m.emit(0x5a)
 39278              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 39279          })
 39280      }
 39281      // VCVTSD2SS {er}, xmm, xmm, xmm{k}{z}
 39282      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 39283          self.require(ISA_AVX512F)
 39284          p.domain = DomainAVX
 39285          p.add(0, func(m *_Encoding, v []interface{}) {
 39286              m.emit(0x62)
 39287              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 39288              m.emit(0xff ^ (hlcode(v[2]) << 3))
 39289              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 39290              m.emit(0x5a)
 39291              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 39292          })
 39293      }
 39294      // VCVTSD2SS xmm, xmm, xmm{k}{z}
 39295      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 39296          self.require(ISA_AVX512F)
 39297          p.domain = DomainAVX
 39298          p.add(0, func(m *_Encoding, v []interface{}) {
 39299              m.emit(0x62)
 39300              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 39301              m.emit(0xff ^ (hlcode(v[1]) << 3))
 39302              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 39303              m.emit(0x5a)
 39304              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39305          })
 39306      }
 39307      if p.len == 0 {
 39308          panic("invalid operands for VCVTSD2SS")
 39309      }
 39310      return p
 39311  }
 39312  
 39313  // VCVTSD2USI performs "Convert Scalar Double-Precision Floating-Point Value to Unsigned Doubleword Integer".
 39314  //
 39315  // Mnemonic        : VCVTSD2USI
 39316  // Supported forms : (6 forms)
 39317  //
 39318  //    * VCVTSD2USI m64, r32          [AVX512F]
 39319  //    * VCVTSD2USI m64, r64          [AVX512F]
 39320  //    * VCVTSD2USI {er}, xmm, r32    [AVX512F]
 39321  //    * VCVTSD2USI {er}, xmm, r64    [AVX512F]
 39322  //    * VCVTSD2USI xmm, r32          [AVX512F]
 39323  //    * VCVTSD2USI xmm, r64          [AVX512F]
 39324  //
 39325  func (self *Program) VCVTSD2USI(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 39326      var p *Instruction
 39327      switch len(vv) {
 39328          case 0  : p = self.alloc("VCVTSD2USI", 2, Operands { v0, v1 })
 39329          case 1  : p = self.alloc("VCVTSD2USI", 3, Operands { v0, v1, vv[0] })
 39330          default : panic("instruction VCVTSD2USI takes 2 or 3 operands")
 39331      }
 39332      // VCVTSD2USI m64, r32
 39333      if len(vv) == 0 && isM64(v0) && isReg32(v1) {
 39334          self.require(ISA_AVX512F)
 39335          p.domain = DomainAVX
 39336          p.add(0, func(m *_Encoding, v []interface{}) {
 39337              m.evex(0b01, 0x07, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 39338              m.emit(0x79)
 39339              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 39340          })
 39341      }
 39342      // VCVTSD2USI m64, r64
 39343      if len(vv) == 0 && isM64(v0) && isReg64(v1) {
 39344          self.require(ISA_AVX512F)
 39345          p.domain = DomainAVX
 39346          p.add(0, func(m *_Encoding, v []interface{}) {
 39347              m.evex(0b01, 0x87, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 39348              m.emit(0x79)
 39349              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 39350          })
 39351      }
 39352      // VCVTSD2USI {er}, xmm, r32
 39353      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isReg32(vv[0]) {
 39354          self.require(ISA_AVX512F)
 39355          p.domain = DomainAVX
 39356          p.add(0, func(m *_Encoding, v []interface{}) {
 39357              m.emit(0x62)
 39358              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 39359              m.emit(0x7f)
 39360              m.emit((vcode(v[0]) << 5) | 0x18)
 39361              m.emit(0x79)
 39362              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 39363          })
 39364      }
 39365      // VCVTSD2USI {er}, xmm, r64
 39366      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isReg64(vv[0]) {
 39367          self.require(ISA_AVX512F)
 39368          p.domain = DomainAVX
 39369          p.add(0, func(m *_Encoding, v []interface{}) {
 39370              m.emit(0x62)
 39371              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 39372              m.emit(0xff)
 39373              m.emit((vcode(v[0]) << 5) | 0x18)
 39374              m.emit(0x79)
 39375              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 39376          })
 39377      }
 39378      // VCVTSD2USI xmm, r32
 39379      if len(vv) == 0 && isEVEXXMM(v0) && isReg32(v1) {
 39380          self.require(ISA_AVX512F)
 39381          p.domain = DomainAVX
 39382          p.add(0, func(m *_Encoding, v []interface{}) {
 39383              m.emit(0x62)
 39384              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 39385              m.emit(0x7f)
 39386              m.emit(0x48)
 39387              m.emit(0x79)
 39388              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39389          })
 39390      }
 39391      // VCVTSD2USI xmm, r64
 39392      if len(vv) == 0 && isEVEXXMM(v0) && isReg64(v1) {
 39393          self.require(ISA_AVX512F)
 39394          p.domain = DomainAVX
 39395          p.add(0, func(m *_Encoding, v []interface{}) {
 39396              m.emit(0x62)
 39397              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 39398              m.emit(0xff)
 39399              m.emit(0x48)
 39400              m.emit(0x79)
 39401              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39402          })
 39403      }
 39404      if p.len == 0 {
 39405          panic("invalid operands for VCVTSD2USI")
 39406      }
 39407      return p
 39408  }
 39409  
 39410  // VCVTSI2SD performs "Convert Dword Integer to Scalar Double-Precision FP Value".
 39411  //
 39412  // Mnemonic        : VCVTSI2SD
 39413  // Supported forms : (9 forms)
 39414  //
 39415  //    * VCVTSI2SD r32, xmm, xmm          [AVX]
 39416  //    * VCVTSI2SD r64, xmm, xmm          [AVX]
 39417  //    * VCVTSI2SD m32, xmm, xmm          [AVX]
 39418  //    * VCVTSI2SD m64, xmm, xmm          [AVX]
 39419  //    * VCVTSI2SD r32, xmm, xmm          [AVX512F]
 39420  //    * VCVTSI2SD m32, xmm, xmm          [AVX512F]
 39421  //    * VCVTSI2SD m64, xmm, xmm          [AVX512F]
 39422  //    * VCVTSI2SD {er}, r64, xmm, xmm    [AVX512F]
 39423  //    * VCVTSI2SD r64, xmm, xmm          [AVX512F]
 39424  //
 39425  func (self *Program) VCVTSI2SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 39426      var p *Instruction
 39427      switch len(vv) {
 39428          case 0  : p = self.alloc("VCVTSI2SD", 3, Operands { v0, v1, v2 })
 39429          case 1  : p = self.alloc("VCVTSI2SD", 4, Operands { v0, v1, v2, vv[0] })
 39430          default : panic("instruction VCVTSI2SD takes 3 or 4 operands")
 39431      }
 39432      // VCVTSI2SD r32, xmm, xmm
 39433      if len(vv) == 0 && isReg32(v0) && isXMM(v1) && isXMM(v2) {
 39434          self.require(ISA_AVX)
 39435          p.domain = DomainAVX
 39436          p.add(0, func(m *_Encoding, v []interface{}) {
 39437              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 39438              m.emit(0x2a)
 39439              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39440          })
 39441      }
 39442      // VCVTSI2SD r64, xmm, xmm
 39443      if len(vv) == 0 && isReg64(v0) && isXMM(v1) && isXMM(v2) {
 39444          self.require(ISA_AVX)
 39445          p.domain = DomainAVX
 39446          p.add(0, func(m *_Encoding, v []interface{}) {
 39447              m.emit(0xc4)
 39448              m.emit(0xe1 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 39449              m.emit(0xfb ^ (hlcode(v[1]) << 3))
 39450              m.emit(0x2a)
 39451              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39452          })
 39453      }
 39454      // VCVTSI2SD m32, xmm, xmm
 39455      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 39456          self.require(ISA_AVX)
 39457          p.domain = DomainAVX
 39458          p.add(0, func(m *_Encoding, v []interface{}) {
 39459              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 39460              m.emit(0x2a)
 39461              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 39462          })
 39463      }
 39464      // VCVTSI2SD m64, xmm, xmm
 39465      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 39466          self.require(ISA_AVX)
 39467          p.domain = DomainAVX
 39468          p.add(0, func(m *_Encoding, v []interface{}) {
 39469              m.vex3(0xc4, 0b1, 0x83, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 39470              m.emit(0x2a)
 39471              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 39472          })
 39473      }
 39474      // VCVTSI2SD r32, xmm, xmm
 39475      if len(vv) == 0 && isReg32(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 39476          self.require(ISA_AVX512F)
 39477          p.domain = DomainAVX
 39478          p.add(0, func(m *_Encoding, v []interface{}) {
 39479              m.emit(0x62)
 39480              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 39481              m.emit(0x7f ^ (hlcode(v[1]) << 3))
 39482              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x00)
 39483              m.emit(0x2a)
 39484              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39485          })
 39486      }
 39487      // VCVTSI2SD m32, xmm, xmm
 39488      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 39489          self.require(ISA_AVX512F)
 39490          p.domain = DomainAVX
 39491          p.add(0, func(m *_Encoding, v []interface{}) {
 39492              m.evex(0b01, 0x07, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 39493              m.emit(0x2a)
 39494              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 39495          })
 39496      }
 39497      // VCVTSI2SD m64, xmm, xmm
 39498      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 39499          self.require(ISA_AVX512F)
 39500          p.domain = DomainAVX
 39501          p.add(0, func(m *_Encoding, v []interface{}) {
 39502              m.evex(0b01, 0x87, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 39503              m.emit(0x2a)
 39504              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 39505          })
 39506      }
 39507      // VCVTSI2SD {er}, r64, xmm, xmm
 39508      if len(vv) == 1 && isER(v0) && isReg64(v1) && isEVEXXMM(v2) && isEVEXXMM(vv[0]) {
 39509          self.require(ISA_AVX512F)
 39510          p.domain = DomainAVX
 39511          p.add(0, func(m *_Encoding, v []interface{}) {
 39512              m.emit(0x62)
 39513              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 39514              m.emit(0xff ^ (hlcode(v[2]) << 3))
 39515              m.emit((vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | 0x10)
 39516              m.emit(0x2a)
 39517              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 39518          })
 39519      }
 39520      // VCVTSI2SD r64, xmm, xmm
 39521      if len(vv) == 0 && isReg64(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 39522          self.require(ISA_AVX512F)
 39523          p.domain = DomainAVX
 39524          p.add(0, func(m *_Encoding, v []interface{}) {
 39525              m.emit(0x62)
 39526              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 39527              m.emit(0xff ^ (hlcode(v[1]) << 3))
 39528              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x40)
 39529              m.emit(0x2a)
 39530              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39531          })
 39532      }
 39533      if p.len == 0 {
 39534          panic("invalid operands for VCVTSI2SD")
 39535      }
 39536      return p
 39537  }
 39538  
 39539  // VCVTSI2SS performs "Convert Dword Integer to Scalar Single-Precision FP Value".
 39540  //
 39541  // Mnemonic        : VCVTSI2SS
 39542  // Supported forms : (10 forms)
 39543  //
 39544  //    * VCVTSI2SS r32, xmm, xmm          [AVX]
 39545  //    * VCVTSI2SS r64, xmm, xmm          [AVX]
 39546  //    * VCVTSI2SS m32, xmm, xmm          [AVX]
 39547  //    * VCVTSI2SS m64, xmm, xmm          [AVX]
 39548  //    * VCVTSI2SS m32, xmm, xmm          [AVX512F]
 39549  //    * VCVTSI2SS m64, xmm, xmm          [AVX512F]
 39550  //    * VCVTSI2SS {er}, r32, xmm, xmm    [AVX512F]
 39551  //    * VCVTSI2SS {er}, r64, xmm, xmm    [AVX512F]
 39552  //    * VCVTSI2SS r32, xmm, xmm          [AVX512F]
 39553  //    * VCVTSI2SS r64, xmm, xmm          [AVX512F]
 39554  //
 39555  func (self *Program) VCVTSI2SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 39556      var p *Instruction
 39557      switch len(vv) {
 39558          case 0  : p = self.alloc("VCVTSI2SS", 3, Operands { v0, v1, v2 })
 39559          case 1  : p = self.alloc("VCVTSI2SS", 4, Operands { v0, v1, v2, vv[0] })
 39560          default : panic("instruction VCVTSI2SS takes 3 or 4 operands")
 39561      }
 39562      // VCVTSI2SS r32, xmm, xmm
 39563      if len(vv) == 0 && isReg32(v0) && isXMM(v1) && isXMM(v2) {
 39564          self.require(ISA_AVX)
 39565          p.domain = DomainAVX
 39566          p.add(0, func(m *_Encoding, v []interface{}) {
 39567              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 39568              m.emit(0x2a)
 39569              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39570          })
 39571      }
 39572      // VCVTSI2SS r64, xmm, xmm
 39573      if len(vv) == 0 && isReg64(v0) && isXMM(v1) && isXMM(v2) {
 39574          self.require(ISA_AVX)
 39575          p.domain = DomainAVX
 39576          p.add(0, func(m *_Encoding, v []interface{}) {
 39577              m.emit(0xc4)
 39578              m.emit(0xe1 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 39579              m.emit(0xfa ^ (hlcode(v[1]) << 3))
 39580              m.emit(0x2a)
 39581              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39582          })
 39583      }
 39584      // VCVTSI2SS m32, xmm, xmm
 39585      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 39586          self.require(ISA_AVX)
 39587          p.domain = DomainAVX
 39588          p.add(0, func(m *_Encoding, v []interface{}) {
 39589              m.vex2(2, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 39590              m.emit(0x2a)
 39591              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 39592          })
 39593      }
 39594      // VCVTSI2SS m64, xmm, xmm
 39595      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 39596          self.require(ISA_AVX)
 39597          p.domain = DomainAVX
 39598          p.add(0, func(m *_Encoding, v []interface{}) {
 39599              m.vex3(0xc4, 0b1, 0x82, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 39600              m.emit(0x2a)
 39601              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 39602          })
 39603      }
 39604      // VCVTSI2SS m32, xmm, xmm
 39605      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 39606          self.require(ISA_AVX512F)
 39607          p.domain = DomainAVX
 39608          p.add(0, func(m *_Encoding, v []interface{}) {
 39609              m.evex(0b01, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 39610              m.emit(0x2a)
 39611              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 39612          })
 39613      }
 39614      // VCVTSI2SS m64, xmm, xmm
 39615      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 39616          self.require(ISA_AVX512F)
 39617          p.domain = DomainAVX
 39618          p.add(0, func(m *_Encoding, v []interface{}) {
 39619              m.evex(0b01, 0x86, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 39620              m.emit(0x2a)
 39621              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 39622          })
 39623      }
 39624      // VCVTSI2SS {er}, r32, xmm, xmm
 39625      if len(vv) == 1 && isER(v0) && isReg32(v1) && isEVEXXMM(v2) && isEVEXXMM(vv[0]) {
 39626          self.require(ISA_AVX512F)
 39627          p.domain = DomainAVX
 39628          p.add(0, func(m *_Encoding, v []interface{}) {
 39629              m.emit(0x62)
 39630              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 39631              m.emit(0x7e ^ (hlcode(v[2]) << 3))
 39632              m.emit((vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | 0x10)
 39633              m.emit(0x2a)
 39634              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 39635          })
 39636      }
 39637      // VCVTSI2SS {er}, r64, xmm, xmm
 39638      if len(vv) == 1 && isER(v0) && isReg64(v1) && isEVEXXMM(v2) && isEVEXXMM(vv[0]) {
 39639          self.require(ISA_AVX512F)
 39640          p.domain = DomainAVX
 39641          p.add(0, func(m *_Encoding, v []interface{}) {
 39642              m.emit(0x62)
 39643              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 39644              m.emit(0xfe ^ (hlcode(v[2]) << 3))
 39645              m.emit((vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | 0x10)
 39646              m.emit(0x2a)
 39647              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 39648          })
 39649      }
 39650      // VCVTSI2SS r32, xmm, xmm
 39651      if len(vv) == 0 && isReg32(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 39652          self.require(ISA_AVX512F)
 39653          p.domain = DomainAVX
 39654          p.add(0, func(m *_Encoding, v []interface{}) {
 39655              m.emit(0x62)
 39656              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 39657              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 39658              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x40)
 39659              m.emit(0x2a)
 39660              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39661          })
 39662      }
 39663      // VCVTSI2SS r64, xmm, xmm
 39664      if len(vv) == 0 && isReg64(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 39665          self.require(ISA_AVX512F)
 39666          p.domain = DomainAVX
 39667          p.add(0, func(m *_Encoding, v []interface{}) {
 39668              m.emit(0x62)
 39669              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 39670              m.emit(0xfe ^ (hlcode(v[1]) << 3))
 39671              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x40)
 39672              m.emit(0x2a)
 39673              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39674          })
 39675      }
 39676      if p.len == 0 {
 39677          panic("invalid operands for VCVTSI2SS")
 39678      }
 39679      return p
 39680  }
 39681  
 39682  // VCVTSS2SD performs "Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value".
 39683  //
 39684  // Mnemonic        : VCVTSS2SD
 39685  // Supported forms : (5 forms)
 39686  //
 39687  //    * VCVTSS2SD xmm, xmm, xmm                 [AVX]
 39688  //    * VCVTSS2SD m32, xmm, xmm                 [AVX]
 39689  //    * VCVTSS2SD m32, xmm, xmm{k}{z}           [AVX512F]
 39690  //    * VCVTSS2SD {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 39691  //    * VCVTSS2SD xmm, xmm, xmm{k}{z}           [AVX512F]
 39692  //
 39693  func (self *Program) VCVTSS2SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 39694      var p *Instruction
 39695      switch len(vv) {
 39696          case 0  : p = self.alloc("VCVTSS2SD", 3, Operands { v0, v1, v2 })
 39697          case 1  : p = self.alloc("VCVTSS2SD", 4, Operands { v0, v1, v2, vv[0] })
 39698          default : panic("instruction VCVTSS2SD takes 3 or 4 operands")
 39699      }
 39700      // VCVTSS2SD xmm, xmm, xmm
 39701      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 39702          self.require(ISA_AVX)
 39703          p.domain = DomainAVX
 39704          p.add(0, func(m *_Encoding, v []interface{}) {
 39705              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 39706              m.emit(0x5a)
 39707              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39708          })
 39709      }
 39710      // VCVTSS2SD m32, xmm, xmm
 39711      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 39712          self.require(ISA_AVX)
 39713          p.domain = DomainAVX
 39714          p.add(0, func(m *_Encoding, v []interface{}) {
 39715              m.vex2(2, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 39716              m.emit(0x5a)
 39717              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 39718          })
 39719      }
 39720      // VCVTSS2SD m32, xmm, xmm{k}{z}
 39721      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 39722          self.require(ISA_AVX512F)
 39723          p.domain = DomainAVX
 39724          p.add(0, func(m *_Encoding, v []interface{}) {
 39725              m.evex(0b01, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 39726              m.emit(0x5a)
 39727              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 39728          })
 39729      }
 39730      // VCVTSS2SD {sae}, xmm, xmm, xmm{k}{z}
 39731      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 39732          self.require(ISA_AVX512F)
 39733          p.domain = DomainAVX
 39734          p.add(0, func(m *_Encoding, v []interface{}) {
 39735              m.emit(0x62)
 39736              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 39737              m.emit(0x7e ^ (hlcode(v[2]) << 3))
 39738              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 39739              m.emit(0x5a)
 39740              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 39741          })
 39742      }
 39743      // VCVTSS2SD xmm, xmm, xmm{k}{z}
 39744      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 39745          self.require(ISA_AVX512F)
 39746          p.domain = DomainAVX
 39747          p.add(0, func(m *_Encoding, v []interface{}) {
 39748              m.emit(0x62)
 39749              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 39750              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 39751              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 39752              m.emit(0x5a)
 39753              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39754          })
 39755      }
 39756      if p.len == 0 {
 39757          panic("invalid operands for VCVTSS2SD")
 39758      }
 39759      return p
 39760  }
 39761  
 39762  // VCVTSS2SI performs "Convert Scalar Single-Precision FP Value to Dword Integer".
 39763  //
 39764  // Mnemonic        : VCVTSS2SI
 39765  // Supported forms : (10 forms)
 39766  //
 39767  //    * VCVTSS2SI xmm, r32          [AVX]
 39768  //    * VCVTSS2SI m32, r32          [AVX]
 39769  //    * VCVTSS2SI xmm, r64          [AVX]
 39770  //    * VCVTSS2SI m32, r64          [AVX]
 39771  //    * VCVTSS2SI m32, r32          [AVX512F]
 39772  //    * VCVTSS2SI m32, r64          [AVX512F]
 39773  //    * VCVTSS2SI {er}, xmm, r32    [AVX512F]
 39774  //    * VCVTSS2SI {er}, xmm, r64    [AVX512F]
 39775  //    * VCVTSS2SI xmm, r32          [AVX512F]
 39776  //    * VCVTSS2SI xmm, r64          [AVX512F]
 39777  //
 39778  func (self *Program) VCVTSS2SI(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 39779      var p *Instruction
 39780      switch len(vv) {
 39781          case 0  : p = self.alloc("VCVTSS2SI", 2, Operands { v0, v1 })
 39782          case 1  : p = self.alloc("VCVTSS2SI", 3, Operands { v0, v1, vv[0] })
 39783          default : panic("instruction VCVTSS2SI takes 2 or 3 operands")
 39784      }
 39785      // VCVTSS2SI xmm, r32
 39786      if len(vv) == 0 && isXMM(v0) && isReg32(v1) {
 39787          self.require(ISA_AVX)
 39788          p.domain = DomainAVX
 39789          p.add(0, func(m *_Encoding, v []interface{}) {
 39790              m.vex2(2, hcode(v[1]), v[0], 0)
 39791              m.emit(0x2d)
 39792              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39793          })
 39794      }
 39795      // VCVTSS2SI m32, r32
 39796      if len(vv) == 0 && isM32(v0) && isReg32(v1) {
 39797          self.require(ISA_AVX)
 39798          p.domain = DomainAVX
 39799          p.add(0, func(m *_Encoding, v []interface{}) {
 39800              m.vex2(2, hcode(v[1]), addr(v[0]), 0)
 39801              m.emit(0x2d)
 39802              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 39803          })
 39804      }
 39805      // VCVTSS2SI xmm, r64
 39806      if len(vv) == 0 && isXMM(v0) && isReg64(v1) {
 39807          self.require(ISA_AVX)
 39808          p.domain = DomainAVX
 39809          p.add(0, func(m *_Encoding, v []interface{}) {
 39810              m.emit(0xc4)
 39811              m.emit(0xe1 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 39812              m.emit(0xfa)
 39813              m.emit(0x2d)
 39814              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39815          })
 39816      }
 39817      // VCVTSS2SI m32, r64
 39818      if len(vv) == 0 && isM32(v0) && isReg64(v1) {
 39819          self.require(ISA_AVX)
 39820          p.domain = DomainAVX
 39821          p.add(0, func(m *_Encoding, v []interface{}) {
 39822              m.vex3(0xc4, 0b1, 0x82, hcode(v[1]), addr(v[0]), 0)
 39823              m.emit(0x2d)
 39824              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 39825          })
 39826      }
 39827      // VCVTSS2SI m32, r32
 39828      if len(vv) == 0 && isM32(v0) && isReg32(v1) {
 39829          self.require(ISA_AVX512F)
 39830          p.domain = DomainAVX
 39831          p.add(0, func(m *_Encoding, v []interface{}) {
 39832              m.evex(0b01, 0x06, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 39833              m.emit(0x2d)
 39834              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 39835          })
 39836      }
 39837      // VCVTSS2SI m32, r64
 39838      if len(vv) == 0 && isM32(v0) && isReg64(v1) {
 39839          self.require(ISA_AVX512F)
 39840          p.domain = DomainAVX
 39841          p.add(0, func(m *_Encoding, v []interface{}) {
 39842              m.evex(0b01, 0x86, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 39843              m.emit(0x2d)
 39844              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 39845          })
 39846      }
 39847      // VCVTSS2SI {er}, xmm, r32
 39848      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isReg32(vv[0]) {
 39849          self.require(ISA_AVX512F)
 39850          p.domain = DomainAVX
 39851          p.add(0, func(m *_Encoding, v []interface{}) {
 39852              m.emit(0x62)
 39853              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 39854              m.emit(0x7e)
 39855              m.emit((vcode(v[0]) << 5) | 0x18)
 39856              m.emit(0x2d)
 39857              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 39858          })
 39859      }
 39860      // VCVTSS2SI {er}, xmm, r64
 39861      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isReg64(vv[0]) {
 39862          self.require(ISA_AVX512F)
 39863          p.domain = DomainAVX
 39864          p.add(0, func(m *_Encoding, v []interface{}) {
 39865              m.emit(0x62)
 39866              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 39867              m.emit(0xfe)
 39868              m.emit((vcode(v[0]) << 5) | 0x18)
 39869              m.emit(0x2d)
 39870              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 39871          })
 39872      }
 39873      // VCVTSS2SI xmm, r32
 39874      if len(vv) == 0 && isEVEXXMM(v0) && isReg32(v1) {
 39875          self.require(ISA_AVX512F)
 39876          p.domain = DomainAVX
 39877          p.add(0, func(m *_Encoding, v []interface{}) {
 39878              m.emit(0x62)
 39879              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 39880              m.emit(0x7e)
 39881              m.emit(0x48)
 39882              m.emit(0x2d)
 39883              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39884          })
 39885      }
 39886      // VCVTSS2SI xmm, r64
 39887      if len(vv) == 0 && isEVEXXMM(v0) && isReg64(v1) {
 39888          self.require(ISA_AVX512F)
 39889          p.domain = DomainAVX
 39890          p.add(0, func(m *_Encoding, v []interface{}) {
 39891              m.emit(0x62)
 39892              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 39893              m.emit(0xfe)
 39894              m.emit(0x48)
 39895              m.emit(0x2d)
 39896              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39897          })
 39898      }
 39899      if p.len == 0 {
 39900          panic("invalid operands for VCVTSS2SI")
 39901      }
 39902      return p
 39903  }
 39904  
 39905  // VCVTSS2USI performs "Convert Scalar Single-Precision Floating-Point Value to Unsigned Doubleword Integer".
 39906  //
 39907  // Mnemonic        : VCVTSS2USI
 39908  // Supported forms : (6 forms)
 39909  //
 39910  //    * VCVTSS2USI m32, r32          [AVX512F]
 39911  //    * VCVTSS2USI m32, r64          [AVX512F]
 39912  //    * VCVTSS2USI {er}, xmm, r32    [AVX512F]
 39913  //    * VCVTSS2USI {er}, xmm, r64    [AVX512F]
 39914  //    * VCVTSS2USI xmm, r32          [AVX512F]
 39915  //    * VCVTSS2USI xmm, r64          [AVX512F]
 39916  //
 39917  func (self *Program) VCVTSS2USI(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 39918      var p *Instruction
 39919      switch len(vv) {
 39920          case 0  : p = self.alloc("VCVTSS2USI", 2, Operands { v0, v1 })
 39921          case 1  : p = self.alloc("VCVTSS2USI", 3, Operands { v0, v1, vv[0] })
 39922          default : panic("instruction VCVTSS2USI takes 2 or 3 operands")
 39923      }
 39924      // VCVTSS2USI m32, r32
 39925      if len(vv) == 0 && isM32(v0) && isReg32(v1) {
 39926          self.require(ISA_AVX512F)
 39927          p.domain = DomainAVX
 39928          p.add(0, func(m *_Encoding, v []interface{}) {
 39929              m.evex(0b01, 0x06, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 39930              m.emit(0x79)
 39931              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 39932          })
 39933      }
 39934      // VCVTSS2USI m32, r64
 39935      if len(vv) == 0 && isM32(v0) && isReg64(v1) {
 39936          self.require(ISA_AVX512F)
 39937          p.domain = DomainAVX
 39938          p.add(0, func(m *_Encoding, v []interface{}) {
 39939              m.evex(0b01, 0x86, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 39940              m.emit(0x79)
 39941              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 39942          })
 39943      }
 39944      // VCVTSS2USI {er}, xmm, r32
 39945      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isReg32(vv[0]) {
 39946          self.require(ISA_AVX512F)
 39947          p.domain = DomainAVX
 39948          p.add(0, func(m *_Encoding, v []interface{}) {
 39949              m.emit(0x62)
 39950              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 39951              m.emit(0x7e)
 39952              m.emit((vcode(v[0]) << 5) | 0x18)
 39953              m.emit(0x79)
 39954              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 39955          })
 39956      }
 39957      // VCVTSS2USI {er}, xmm, r64
 39958      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isReg64(vv[0]) {
 39959          self.require(ISA_AVX512F)
 39960          p.domain = DomainAVX
 39961          p.add(0, func(m *_Encoding, v []interface{}) {
 39962              m.emit(0x62)
 39963              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 39964              m.emit(0xfe)
 39965              m.emit((vcode(v[0]) << 5) | 0x18)
 39966              m.emit(0x79)
 39967              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 39968          })
 39969      }
 39970      // VCVTSS2USI xmm, r32
 39971      if len(vv) == 0 && isEVEXXMM(v0) && isReg32(v1) {
 39972          self.require(ISA_AVX512F)
 39973          p.domain = DomainAVX
 39974          p.add(0, func(m *_Encoding, v []interface{}) {
 39975              m.emit(0x62)
 39976              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 39977              m.emit(0x7e)
 39978              m.emit(0x48)
 39979              m.emit(0x79)
 39980              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39981          })
 39982      }
 39983      // VCVTSS2USI xmm, r64
 39984      if len(vv) == 0 && isEVEXXMM(v0) && isReg64(v1) {
 39985          self.require(ISA_AVX512F)
 39986          p.domain = DomainAVX
 39987          p.add(0, func(m *_Encoding, v []interface{}) {
 39988              m.emit(0x62)
 39989              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 39990              m.emit(0xfe)
 39991              m.emit(0x48)
 39992              m.emit(0x79)
 39993              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39994          })
 39995      }
 39996      if p.len == 0 {
 39997          panic("invalid operands for VCVTSS2USI")
 39998      }
 39999      return p
 40000  }
 40001  
 40002  // VCVTTPD2DQ performs "Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers".
 40003  //
 40004  // Mnemonic        : VCVTTPD2DQ
 40005  // Supported forms : (11 forms)
 40006  //
 40007  //    * VCVTTPD2DQ xmm, xmm                   [AVX]
 40008  //    * VCVTTPD2DQ ymm, xmm                   [AVX]
 40009  //    * VCVTTPD2DQ m128, xmm                  [AVX]
 40010  //    * VCVTTPD2DQ m256, xmm                  [AVX]
 40011  //    * VCVTTPD2DQ m512/m64bcst, ymm{k}{z}    [AVX512F]
 40012  //    * VCVTTPD2DQ {sae}, zmm, ymm{k}{z}      [AVX512F]
 40013  //    * VCVTTPD2DQ zmm, ymm{k}{z}             [AVX512F]
 40014  //    * VCVTTPD2DQ m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 40015  //    * VCVTTPD2DQ m256/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 40016  //    * VCVTTPD2DQ xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 40017  //    * VCVTTPD2DQ ymm, xmm{k}{z}             [AVX512F,AVX512VL]
 40018  //
 40019  func (self *Program) VCVTTPD2DQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 40020      var p *Instruction
 40021      switch len(vv) {
 40022          case 0  : p = self.alloc("VCVTTPD2DQ", 2, Operands { v0, v1 })
 40023          case 1  : p = self.alloc("VCVTTPD2DQ", 3, Operands { v0, v1, vv[0] })
 40024          default : panic("instruction VCVTTPD2DQ takes 2 or 3 operands")
 40025      }
 40026      // VCVTTPD2DQ xmm, xmm
 40027      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 40028          self.require(ISA_AVX)
 40029          p.domain = DomainAVX
 40030          p.add(0, func(m *_Encoding, v []interface{}) {
 40031              m.vex2(1, hcode(v[1]), v[0], 0)
 40032              m.emit(0xe6)
 40033              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40034          })
 40035      }
 40036      // VCVTTPD2DQ ymm, xmm
 40037      if len(vv) == 0 && isYMM(v0) && isXMM(v1) {
 40038          self.require(ISA_AVX)
 40039          p.domain = DomainAVX
 40040          p.add(0, func(m *_Encoding, v []interface{}) {
 40041              m.vex2(5, hcode(v[1]), v[0], 0)
 40042              m.emit(0xe6)
 40043              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40044          })
 40045      }
 40046      // VCVTTPD2DQ m128, xmm
 40047      if len(vv) == 0 && isM128(v0) && isXMM(v1) {
 40048          self.require(ISA_AVX)
 40049          p.domain = DomainAVX
 40050          p.add(0, func(m *_Encoding, v []interface{}) {
 40051              m.vex2(1, hcode(v[1]), addr(v[0]), 0)
 40052              m.emit(0xe6)
 40053              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 40054          })
 40055      }
 40056      // VCVTTPD2DQ m256, xmm
 40057      if len(vv) == 0 && isM256(v0) && isXMM(v1) {
 40058          self.require(ISA_AVX)
 40059          p.domain = DomainAVX
 40060          p.add(0, func(m *_Encoding, v []interface{}) {
 40061              m.vex2(5, hcode(v[1]), addr(v[0]), 0)
 40062              m.emit(0xe6)
 40063              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 40064          })
 40065      }
 40066      // VCVTTPD2DQ m512/m64bcst, ymm{k}{z}
 40067      if len(vv) == 0 && isM512M64bcst(v0) && isYMMkz(v1) {
 40068          self.require(ISA_AVX512F)
 40069          p.domain = DomainAVX
 40070          p.add(0, func(m *_Encoding, v []interface{}) {
 40071              m.evex(0b01, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40072              m.emit(0xe6)
 40073              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 40074          })
 40075      }
 40076      // VCVTTPD2DQ {sae}, zmm, ymm{k}{z}
 40077      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isYMMkz(vv[0]) {
 40078          self.require(ISA_AVX512F)
 40079          p.domain = DomainAVX
 40080          p.add(0, func(m *_Encoding, v []interface{}) {
 40081              m.emit(0x62)
 40082              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 40083              m.emit(0xfd)
 40084              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 40085              m.emit(0xe6)
 40086              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 40087          })
 40088      }
 40089      // VCVTTPD2DQ zmm, ymm{k}{z}
 40090      if len(vv) == 0 && isZMM(v0) && isYMMkz(v1) {
 40091          self.require(ISA_AVX512F)
 40092          p.domain = DomainAVX
 40093          p.add(0, func(m *_Encoding, v []interface{}) {
 40094              m.emit(0x62)
 40095              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40096              m.emit(0xfd)
 40097              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 40098              m.emit(0xe6)
 40099              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40100          })
 40101      }
 40102      // VCVTTPD2DQ m128/m64bcst, xmm{k}{z}
 40103      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 40104          self.require(ISA_AVX512VL | ISA_AVX512F)
 40105          p.domain = DomainAVX
 40106          p.add(0, func(m *_Encoding, v []interface{}) {
 40107              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40108              m.emit(0xe6)
 40109              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 40110          })
 40111      }
 40112      // VCVTTPD2DQ m256/m64bcst, xmm{k}{z}
 40113      if len(vv) == 0 && isM256M64bcst(v0) && isXMMkz(v1) {
 40114          self.require(ISA_AVX512VL | ISA_AVX512F)
 40115          p.domain = DomainAVX
 40116          p.add(0, func(m *_Encoding, v []interface{}) {
 40117              m.evex(0b01, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40118              m.emit(0xe6)
 40119              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 40120          })
 40121      }
 40122      // VCVTTPD2DQ xmm, xmm{k}{z}
 40123      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 40124          self.require(ISA_AVX512VL | ISA_AVX512F)
 40125          p.domain = DomainAVX
 40126          p.add(0, func(m *_Encoding, v []interface{}) {
 40127              m.emit(0x62)
 40128              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40129              m.emit(0xfd)
 40130              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 40131              m.emit(0xe6)
 40132              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40133          })
 40134      }
 40135      // VCVTTPD2DQ ymm, xmm{k}{z}
 40136      if len(vv) == 0 && isEVEXYMM(v0) && isXMMkz(v1) {
 40137          self.require(ISA_AVX512VL | ISA_AVX512F)
 40138          p.domain = DomainAVX
 40139          p.add(0, func(m *_Encoding, v []interface{}) {
 40140              m.emit(0x62)
 40141              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40142              m.emit(0xfd)
 40143              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 40144              m.emit(0xe6)
 40145              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40146          })
 40147      }
 40148      if p.len == 0 {
 40149          panic("invalid operands for VCVTTPD2DQ")
 40150      }
 40151      return p
 40152  }
 40153  
 40154  // VCVTTPD2QQ performs "Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Quadword Integers".
 40155  //
 40156  // Mnemonic        : VCVTTPD2QQ
 40157  // Supported forms : (7 forms)
 40158  //
 40159  //    * VCVTTPD2QQ m512/m64bcst, zmm{k}{z}    [AVX512DQ]
 40160  //    * VCVTTPD2QQ {sae}, zmm, zmm{k}{z}      [AVX512DQ]
 40161  //    * VCVTTPD2QQ zmm, zmm{k}{z}             [AVX512DQ]
 40162  //    * VCVTTPD2QQ m128/m64bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 40163  //    * VCVTTPD2QQ m256/m64bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 40164  //    * VCVTTPD2QQ xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 40165  //    * VCVTTPD2QQ ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 40166  //
 40167  func (self *Program) VCVTTPD2QQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 40168      var p *Instruction
 40169      switch len(vv) {
 40170          case 0  : p = self.alloc("VCVTTPD2QQ", 2, Operands { v0, v1 })
 40171          case 1  : p = self.alloc("VCVTTPD2QQ", 3, Operands { v0, v1, vv[0] })
 40172          default : panic("instruction VCVTTPD2QQ takes 2 or 3 operands")
 40173      }
 40174      // VCVTTPD2QQ m512/m64bcst, zmm{k}{z}
 40175      if len(vv) == 0 && isM512M64bcst(v0) && isZMMkz(v1) {
 40176          self.require(ISA_AVX512DQ)
 40177          p.domain = DomainAVX
 40178          p.add(0, func(m *_Encoding, v []interface{}) {
 40179              m.evex(0b01, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40180              m.emit(0x7a)
 40181              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 40182          })
 40183      }
 40184      // VCVTTPD2QQ {sae}, zmm, zmm{k}{z}
 40185      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 40186          self.require(ISA_AVX512DQ)
 40187          p.domain = DomainAVX
 40188          p.add(0, func(m *_Encoding, v []interface{}) {
 40189              m.emit(0x62)
 40190              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 40191              m.emit(0xfd)
 40192              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 40193              m.emit(0x7a)
 40194              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 40195          })
 40196      }
 40197      // VCVTTPD2QQ zmm, zmm{k}{z}
 40198      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 40199          self.require(ISA_AVX512DQ)
 40200          p.domain = DomainAVX
 40201          p.add(0, func(m *_Encoding, v []interface{}) {
 40202              m.emit(0x62)
 40203              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40204              m.emit(0xfd)
 40205              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 40206              m.emit(0x7a)
 40207              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40208          })
 40209      }
 40210      // VCVTTPD2QQ m128/m64bcst, xmm{k}{z}
 40211      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 40212          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40213          p.domain = DomainAVX
 40214          p.add(0, func(m *_Encoding, v []interface{}) {
 40215              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40216              m.emit(0x7a)
 40217              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 40218          })
 40219      }
 40220      // VCVTTPD2QQ m256/m64bcst, ymm{k}{z}
 40221      if len(vv) == 0 && isM256M64bcst(v0) && isYMMkz(v1) {
 40222          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40223          p.domain = DomainAVX
 40224          p.add(0, func(m *_Encoding, v []interface{}) {
 40225              m.evex(0b01, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40226              m.emit(0x7a)
 40227              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 40228          })
 40229      }
 40230      // VCVTTPD2QQ xmm, xmm{k}{z}
 40231      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 40232          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40233          p.domain = DomainAVX
 40234          p.add(0, func(m *_Encoding, v []interface{}) {
 40235              m.emit(0x62)
 40236              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40237              m.emit(0xfd)
 40238              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 40239              m.emit(0x7a)
 40240              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40241          })
 40242      }
 40243      // VCVTTPD2QQ ymm, ymm{k}{z}
 40244      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 40245          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40246          p.domain = DomainAVX
 40247          p.add(0, func(m *_Encoding, v []interface{}) {
 40248              m.emit(0x62)
 40249              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40250              m.emit(0xfd)
 40251              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 40252              m.emit(0x7a)
 40253              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40254          })
 40255      }
 40256      if p.len == 0 {
 40257          panic("invalid operands for VCVTTPD2QQ")
 40258      }
 40259      return p
 40260  }
 40261  
 40262  // VCVTTPD2UDQ performs "Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers".
 40263  //
 40264  // Mnemonic        : VCVTTPD2UDQ
 40265  // Supported forms : (7 forms)
 40266  //
 40267  //    * VCVTTPD2UDQ m512/m64bcst, ymm{k}{z}    [AVX512F]
 40268  //    * VCVTTPD2UDQ {sae}, zmm, ymm{k}{z}      [AVX512F]
 40269  //    * VCVTTPD2UDQ zmm, ymm{k}{z}             [AVX512F]
 40270  //    * VCVTTPD2UDQ m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 40271  //    * VCVTTPD2UDQ m256/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 40272  //    * VCVTTPD2UDQ xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 40273  //    * VCVTTPD2UDQ ymm, xmm{k}{z}             [AVX512F,AVX512VL]
 40274  //
 40275  func (self *Program) VCVTTPD2UDQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 40276      var p *Instruction
 40277      switch len(vv) {
 40278          case 0  : p = self.alloc("VCVTTPD2UDQ", 2, Operands { v0, v1 })
 40279          case 1  : p = self.alloc("VCVTTPD2UDQ", 3, Operands { v0, v1, vv[0] })
 40280          default : panic("instruction VCVTTPD2UDQ takes 2 or 3 operands")
 40281      }
 40282      // VCVTTPD2UDQ m512/m64bcst, ymm{k}{z}
 40283      if len(vv) == 0 && isM512M64bcst(v0) && isYMMkz(v1) {
 40284          self.require(ISA_AVX512F)
 40285          p.domain = DomainAVX
 40286          p.add(0, func(m *_Encoding, v []interface{}) {
 40287              m.evex(0b01, 0x84, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40288              m.emit(0x78)
 40289              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 40290          })
 40291      }
 40292      // VCVTTPD2UDQ {sae}, zmm, ymm{k}{z}
 40293      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isYMMkz(vv[0]) {
 40294          self.require(ISA_AVX512F)
 40295          p.domain = DomainAVX
 40296          p.add(0, func(m *_Encoding, v []interface{}) {
 40297              m.emit(0x62)
 40298              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 40299              m.emit(0xfc)
 40300              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 40301              m.emit(0x78)
 40302              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 40303          })
 40304      }
 40305      // VCVTTPD2UDQ zmm, ymm{k}{z}
 40306      if len(vv) == 0 && isZMM(v0) && isYMMkz(v1) {
 40307          self.require(ISA_AVX512F)
 40308          p.domain = DomainAVX
 40309          p.add(0, func(m *_Encoding, v []interface{}) {
 40310              m.emit(0x62)
 40311              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40312              m.emit(0xfc)
 40313              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 40314              m.emit(0x78)
 40315              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40316          })
 40317      }
 40318      // VCVTTPD2UDQ m128/m64bcst, xmm{k}{z}
 40319      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 40320          self.require(ISA_AVX512VL | ISA_AVX512F)
 40321          p.domain = DomainAVX
 40322          p.add(0, func(m *_Encoding, v []interface{}) {
 40323              m.evex(0b01, 0x84, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40324              m.emit(0x78)
 40325              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 40326          })
 40327      }
 40328      // VCVTTPD2UDQ m256/m64bcst, xmm{k}{z}
 40329      if len(vv) == 0 && isM256M64bcst(v0) && isXMMkz(v1) {
 40330          self.require(ISA_AVX512VL | ISA_AVX512F)
 40331          p.domain = DomainAVX
 40332          p.add(0, func(m *_Encoding, v []interface{}) {
 40333              m.evex(0b01, 0x84, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40334              m.emit(0x78)
 40335              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 40336          })
 40337      }
 40338      // VCVTTPD2UDQ xmm, xmm{k}{z}
 40339      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 40340          self.require(ISA_AVX512VL | ISA_AVX512F)
 40341          p.domain = DomainAVX
 40342          p.add(0, func(m *_Encoding, v []interface{}) {
 40343              m.emit(0x62)
 40344              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40345              m.emit(0xfc)
 40346              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 40347              m.emit(0x78)
 40348              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40349          })
 40350      }
 40351      // VCVTTPD2UDQ ymm, xmm{k}{z}
 40352      if len(vv) == 0 && isEVEXYMM(v0) && isXMMkz(v1) {
 40353          self.require(ISA_AVX512VL | ISA_AVX512F)
 40354          p.domain = DomainAVX
 40355          p.add(0, func(m *_Encoding, v []interface{}) {
 40356              m.emit(0x62)
 40357              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40358              m.emit(0xfc)
 40359              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 40360              m.emit(0x78)
 40361              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40362          })
 40363      }
 40364      if p.len == 0 {
 40365          panic("invalid operands for VCVTTPD2UDQ")
 40366      }
 40367      return p
 40368  }
 40369  
 40370  // VCVTTPD2UQQ performs "Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers".
 40371  //
 40372  // Mnemonic        : VCVTTPD2UQQ
 40373  // Supported forms : (7 forms)
 40374  //
 40375  //    * VCVTTPD2UQQ m512/m64bcst, zmm{k}{z}    [AVX512DQ]
 40376  //    * VCVTTPD2UQQ {sae}, zmm, zmm{k}{z}      [AVX512DQ]
 40377  //    * VCVTTPD2UQQ zmm, zmm{k}{z}             [AVX512DQ]
 40378  //    * VCVTTPD2UQQ m128/m64bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 40379  //    * VCVTTPD2UQQ m256/m64bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 40380  //    * VCVTTPD2UQQ xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 40381  //    * VCVTTPD2UQQ ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 40382  //
 40383  func (self *Program) VCVTTPD2UQQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 40384      var p *Instruction
 40385      switch len(vv) {
 40386          case 0  : p = self.alloc("VCVTTPD2UQQ", 2, Operands { v0, v1 })
 40387          case 1  : p = self.alloc("VCVTTPD2UQQ", 3, Operands { v0, v1, vv[0] })
 40388          default : panic("instruction VCVTTPD2UQQ takes 2 or 3 operands")
 40389      }
 40390      // VCVTTPD2UQQ m512/m64bcst, zmm{k}{z}
 40391      if len(vv) == 0 && isM512M64bcst(v0) && isZMMkz(v1) {
 40392          self.require(ISA_AVX512DQ)
 40393          p.domain = DomainAVX
 40394          p.add(0, func(m *_Encoding, v []interface{}) {
 40395              m.evex(0b01, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40396              m.emit(0x78)
 40397              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 40398          })
 40399      }
 40400      // VCVTTPD2UQQ {sae}, zmm, zmm{k}{z}
 40401      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 40402          self.require(ISA_AVX512DQ)
 40403          p.domain = DomainAVX
 40404          p.add(0, func(m *_Encoding, v []interface{}) {
 40405              m.emit(0x62)
 40406              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 40407              m.emit(0xfd)
 40408              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 40409              m.emit(0x78)
 40410              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 40411          })
 40412      }
 40413      // VCVTTPD2UQQ zmm, zmm{k}{z}
 40414      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 40415          self.require(ISA_AVX512DQ)
 40416          p.domain = DomainAVX
 40417          p.add(0, func(m *_Encoding, v []interface{}) {
 40418              m.emit(0x62)
 40419              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40420              m.emit(0xfd)
 40421              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 40422              m.emit(0x78)
 40423              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40424          })
 40425      }
 40426      // VCVTTPD2UQQ m128/m64bcst, xmm{k}{z}
 40427      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 40428          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40429          p.domain = DomainAVX
 40430          p.add(0, func(m *_Encoding, v []interface{}) {
 40431              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40432              m.emit(0x78)
 40433              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 40434          })
 40435      }
 40436      // VCVTTPD2UQQ m256/m64bcst, ymm{k}{z}
 40437      if len(vv) == 0 && isM256M64bcst(v0) && isYMMkz(v1) {
 40438          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40439          p.domain = DomainAVX
 40440          p.add(0, func(m *_Encoding, v []interface{}) {
 40441              m.evex(0b01, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40442              m.emit(0x78)
 40443              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 40444          })
 40445      }
 40446      // VCVTTPD2UQQ xmm, xmm{k}{z}
 40447      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 40448          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40449          p.domain = DomainAVX
 40450          p.add(0, func(m *_Encoding, v []interface{}) {
 40451              m.emit(0x62)
 40452              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40453              m.emit(0xfd)
 40454              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 40455              m.emit(0x78)
 40456              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40457          })
 40458      }
 40459      // VCVTTPD2UQQ ymm, ymm{k}{z}
 40460      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 40461          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40462          p.domain = DomainAVX
 40463          p.add(0, func(m *_Encoding, v []interface{}) {
 40464              m.emit(0x62)
 40465              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40466              m.emit(0xfd)
 40467              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 40468              m.emit(0x78)
 40469              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40470          })
 40471      }
 40472      if p.len == 0 {
 40473          panic("invalid operands for VCVTTPD2UQQ")
 40474      }
 40475      return p
 40476  }
 40477  
 40478  // VCVTTPS2DQ performs "Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers".
 40479  //
 40480  // Mnemonic        : VCVTTPS2DQ
 40481  // Supported forms : (11 forms)
 40482  //
 40483  //    * VCVTTPS2DQ xmm, xmm                   [AVX]
 40484  //    * VCVTTPS2DQ m128, xmm                  [AVX]
 40485  //    * VCVTTPS2DQ ymm, ymm                   [AVX]
 40486  //    * VCVTTPS2DQ m256, ymm                  [AVX]
 40487  //    * VCVTTPS2DQ m512/m32bcst, zmm{k}{z}    [AVX512F]
 40488  //    * VCVTTPS2DQ {sae}, zmm, zmm{k}{z}      [AVX512F]
 40489  //    * VCVTTPS2DQ zmm, zmm{k}{z}             [AVX512F]
 40490  //    * VCVTTPS2DQ m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 40491  //    * VCVTTPS2DQ m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 40492  //    * VCVTTPS2DQ xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 40493  //    * VCVTTPS2DQ ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 40494  //
 40495  func (self *Program) VCVTTPS2DQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 40496      var p *Instruction
 40497      switch len(vv) {
 40498          case 0  : p = self.alloc("VCVTTPS2DQ", 2, Operands { v0, v1 })
 40499          case 1  : p = self.alloc("VCVTTPS2DQ", 3, Operands { v0, v1, vv[0] })
 40500          default : panic("instruction VCVTTPS2DQ takes 2 or 3 operands")
 40501      }
 40502      // VCVTTPS2DQ xmm, xmm
 40503      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 40504          self.require(ISA_AVX)
 40505          p.domain = DomainAVX
 40506          p.add(0, func(m *_Encoding, v []interface{}) {
 40507              m.vex2(2, hcode(v[1]), v[0], 0)
 40508              m.emit(0x5b)
 40509              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40510          })
 40511      }
 40512      // VCVTTPS2DQ m128, xmm
 40513      if len(vv) == 0 && isM128(v0) && isXMM(v1) {
 40514          self.require(ISA_AVX)
 40515          p.domain = DomainAVX
 40516          p.add(0, func(m *_Encoding, v []interface{}) {
 40517              m.vex2(2, hcode(v[1]), addr(v[0]), 0)
 40518              m.emit(0x5b)
 40519              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 40520          })
 40521      }
 40522      // VCVTTPS2DQ ymm, ymm
 40523      if len(vv) == 0 && isYMM(v0) && isYMM(v1) {
 40524          self.require(ISA_AVX)
 40525          p.domain = DomainAVX
 40526          p.add(0, func(m *_Encoding, v []interface{}) {
 40527              m.vex2(6, hcode(v[1]), v[0], 0)
 40528              m.emit(0x5b)
 40529              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40530          })
 40531      }
 40532      // VCVTTPS2DQ m256, ymm
 40533      if len(vv) == 0 && isM256(v0) && isYMM(v1) {
 40534          self.require(ISA_AVX)
 40535          p.domain = DomainAVX
 40536          p.add(0, func(m *_Encoding, v []interface{}) {
 40537              m.vex2(6, hcode(v[1]), addr(v[0]), 0)
 40538              m.emit(0x5b)
 40539              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 40540          })
 40541      }
 40542      // VCVTTPS2DQ m512/m32bcst, zmm{k}{z}
 40543      if len(vv) == 0 && isM512M32bcst(v0) && isZMMkz(v1) {
 40544          self.require(ISA_AVX512F)
 40545          p.domain = DomainAVX
 40546          p.add(0, func(m *_Encoding, v []interface{}) {
 40547              m.evex(0b01, 0x06, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40548              m.emit(0x5b)
 40549              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 40550          })
 40551      }
 40552      // VCVTTPS2DQ {sae}, zmm, zmm{k}{z}
 40553      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 40554          self.require(ISA_AVX512F)
 40555          p.domain = DomainAVX
 40556          p.add(0, func(m *_Encoding, v []interface{}) {
 40557              m.emit(0x62)
 40558              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 40559              m.emit(0x7e)
 40560              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 40561              m.emit(0x5b)
 40562              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 40563          })
 40564      }
 40565      // VCVTTPS2DQ zmm, zmm{k}{z}
 40566      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 40567          self.require(ISA_AVX512F)
 40568          p.domain = DomainAVX
 40569          p.add(0, func(m *_Encoding, v []interface{}) {
 40570              m.emit(0x62)
 40571              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40572              m.emit(0x7e)
 40573              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 40574              m.emit(0x5b)
 40575              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40576          })
 40577      }
 40578      // VCVTTPS2DQ m128/m32bcst, xmm{k}{z}
 40579      if len(vv) == 0 && isM128M32bcst(v0) && isXMMkz(v1) {
 40580          self.require(ISA_AVX512VL | ISA_AVX512F)
 40581          p.domain = DomainAVX
 40582          p.add(0, func(m *_Encoding, v []interface{}) {
 40583              m.evex(0b01, 0x06, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40584              m.emit(0x5b)
 40585              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 40586          })
 40587      }
 40588      // VCVTTPS2DQ m256/m32bcst, ymm{k}{z}
 40589      if len(vv) == 0 && isM256M32bcst(v0) && isYMMkz(v1) {
 40590          self.require(ISA_AVX512VL | ISA_AVX512F)
 40591          p.domain = DomainAVX
 40592          p.add(0, func(m *_Encoding, v []interface{}) {
 40593              m.evex(0b01, 0x06, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40594              m.emit(0x5b)
 40595              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 40596          })
 40597      }
 40598      // VCVTTPS2DQ xmm, xmm{k}{z}
 40599      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 40600          self.require(ISA_AVX512VL | ISA_AVX512F)
 40601          p.domain = DomainAVX
 40602          p.add(0, func(m *_Encoding, v []interface{}) {
 40603              m.emit(0x62)
 40604              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40605              m.emit(0x7e)
 40606              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 40607              m.emit(0x5b)
 40608              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40609          })
 40610      }
 40611      // VCVTTPS2DQ ymm, ymm{k}{z}
 40612      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 40613          self.require(ISA_AVX512VL | ISA_AVX512F)
 40614          p.domain = DomainAVX
 40615          p.add(0, func(m *_Encoding, v []interface{}) {
 40616              m.emit(0x62)
 40617              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40618              m.emit(0x7e)
 40619              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 40620              m.emit(0x5b)
 40621              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40622          })
 40623      }
 40624      if p.len == 0 {
 40625          panic("invalid operands for VCVTTPS2DQ")
 40626      }
 40627      return p
 40628  }
 40629  
 40630  // VCVTTPS2QQ performs "Convert with Truncation Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values".
 40631  //
 40632  // Mnemonic        : VCVTTPS2QQ
 40633  // Supported forms : (7 forms)
 40634  //
 40635  //    * VCVTTPS2QQ m256/m32bcst, zmm{k}{z}    [AVX512DQ]
 40636  //    * VCVTTPS2QQ {sae}, ymm, zmm{k}{z}      [AVX512DQ]
 40637  //    * VCVTTPS2QQ ymm, zmm{k}{z}             [AVX512DQ]
 40638  //    * VCVTTPS2QQ m64/m32bcst, xmm{k}{z}     [AVX512DQ,AVX512VL]
 40639  //    * VCVTTPS2QQ m128/m32bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 40640  //    * VCVTTPS2QQ xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 40641  //    * VCVTTPS2QQ xmm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 40642  //
 40643  func (self *Program) VCVTTPS2QQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 40644      var p *Instruction
 40645      switch len(vv) {
 40646          case 0  : p = self.alloc("VCVTTPS2QQ", 2, Operands { v0, v1 })
 40647          case 1  : p = self.alloc("VCVTTPS2QQ", 3, Operands { v0, v1, vv[0] })
 40648          default : panic("instruction VCVTTPS2QQ takes 2 or 3 operands")
 40649      }
 40650      // VCVTTPS2QQ m256/m32bcst, zmm{k}{z}
 40651      if len(vv) == 0 && isM256M32bcst(v0) && isZMMkz(v1) {
 40652          self.require(ISA_AVX512DQ)
 40653          p.domain = DomainAVX
 40654          p.add(0, func(m *_Encoding, v []interface{}) {
 40655              m.evex(0b01, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40656              m.emit(0x7a)
 40657              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 40658          })
 40659      }
 40660      // VCVTTPS2QQ {sae}, ymm, zmm{k}{z}
 40661      if len(vv) == 1 && isSAE(v0) && isEVEXYMM(v1) && isZMMkz(vv[0]) {
 40662          self.require(ISA_AVX512DQ)
 40663          p.domain = DomainAVX
 40664          p.add(0, func(m *_Encoding, v []interface{}) {
 40665              m.emit(0x62)
 40666              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 40667              m.emit(0x7d)
 40668              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 40669              m.emit(0x7a)
 40670              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 40671          })
 40672      }
 40673      // VCVTTPS2QQ ymm, zmm{k}{z}
 40674      if len(vv) == 0 && isEVEXYMM(v0) && isZMMkz(v1) {
 40675          self.require(ISA_AVX512DQ)
 40676          p.domain = DomainAVX
 40677          p.add(0, func(m *_Encoding, v []interface{}) {
 40678              m.emit(0x62)
 40679              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40680              m.emit(0x7d)
 40681              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 40682              m.emit(0x7a)
 40683              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40684          })
 40685      }
 40686      // VCVTTPS2QQ m64/m32bcst, xmm{k}{z}
 40687      if len(vv) == 0 && isM64M32bcst(v0) && isXMMkz(v1) {
 40688          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40689          p.domain = DomainAVX
 40690          p.add(0, func(m *_Encoding, v []interface{}) {
 40691              m.evex(0b01, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40692              m.emit(0x7a)
 40693              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 40694          })
 40695      }
 40696      // VCVTTPS2QQ m128/m32bcst, ymm{k}{z}
 40697      if len(vv) == 0 && isM128M32bcst(v0) && isYMMkz(v1) {
 40698          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40699          p.domain = DomainAVX
 40700          p.add(0, func(m *_Encoding, v []interface{}) {
 40701              m.evex(0b01, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40702              m.emit(0x7a)
 40703              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 40704          })
 40705      }
 40706      // VCVTTPS2QQ xmm, xmm{k}{z}
 40707      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 40708          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40709          p.domain = DomainAVX
 40710          p.add(0, func(m *_Encoding, v []interface{}) {
 40711              m.emit(0x62)
 40712              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40713              m.emit(0x7d)
 40714              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 40715              m.emit(0x7a)
 40716              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40717          })
 40718      }
 40719      // VCVTTPS2QQ xmm, ymm{k}{z}
 40720      if len(vv) == 0 && isEVEXXMM(v0) && isYMMkz(v1) {
 40721          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40722          p.domain = DomainAVX
 40723          p.add(0, func(m *_Encoding, v []interface{}) {
 40724              m.emit(0x62)
 40725              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40726              m.emit(0x7d)
 40727              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 40728              m.emit(0x7a)
 40729              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40730          })
 40731      }
 40732      if p.len == 0 {
 40733          panic("invalid operands for VCVTTPS2QQ")
 40734      }
 40735      return p
 40736  }
 40737  
 40738  // VCVTTPS2UDQ performs "Convert with Truncation Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values".
 40739  //
 40740  // Mnemonic        : VCVTTPS2UDQ
 40741  // Supported forms : (7 forms)
 40742  //
 40743  //    * VCVTTPS2UDQ m512/m32bcst, zmm{k}{z}    [AVX512F]
 40744  //    * VCVTTPS2UDQ {sae}, zmm, zmm{k}{z}      [AVX512F]
 40745  //    * VCVTTPS2UDQ zmm, zmm{k}{z}             [AVX512F]
 40746  //    * VCVTTPS2UDQ m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 40747  //    * VCVTTPS2UDQ m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 40748  //    * VCVTTPS2UDQ xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 40749  //    * VCVTTPS2UDQ ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 40750  //
 40751  func (self *Program) VCVTTPS2UDQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 40752      var p *Instruction
 40753      switch len(vv) {
 40754          case 0  : p = self.alloc("VCVTTPS2UDQ", 2, Operands { v0, v1 })
 40755          case 1  : p = self.alloc("VCVTTPS2UDQ", 3, Operands { v0, v1, vv[0] })
 40756          default : panic("instruction VCVTTPS2UDQ takes 2 or 3 operands")
 40757      }
 40758      // VCVTTPS2UDQ m512/m32bcst, zmm{k}{z}
 40759      if len(vv) == 0 && isM512M32bcst(v0) && isZMMkz(v1) {
 40760          self.require(ISA_AVX512F)
 40761          p.domain = DomainAVX
 40762          p.add(0, func(m *_Encoding, v []interface{}) {
 40763              m.evex(0b01, 0x04, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40764              m.emit(0x78)
 40765              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 40766          })
 40767      }
 40768      // VCVTTPS2UDQ {sae}, zmm, zmm{k}{z}
 40769      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 40770          self.require(ISA_AVX512F)
 40771          p.domain = DomainAVX
 40772          p.add(0, func(m *_Encoding, v []interface{}) {
 40773              m.emit(0x62)
 40774              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 40775              m.emit(0x7c)
 40776              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 40777              m.emit(0x78)
 40778              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 40779          })
 40780      }
 40781      // VCVTTPS2UDQ zmm, zmm{k}{z}
 40782      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 40783          self.require(ISA_AVX512F)
 40784          p.domain = DomainAVX
 40785          p.add(0, func(m *_Encoding, v []interface{}) {
 40786              m.emit(0x62)
 40787              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40788              m.emit(0x7c)
 40789              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 40790              m.emit(0x78)
 40791              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40792          })
 40793      }
 40794      // VCVTTPS2UDQ m128/m32bcst, xmm{k}{z}
 40795      if len(vv) == 0 && isM128M32bcst(v0) && isXMMkz(v1) {
 40796          self.require(ISA_AVX512VL | ISA_AVX512F)
 40797          p.domain = DomainAVX
 40798          p.add(0, func(m *_Encoding, v []interface{}) {
 40799              m.evex(0b01, 0x04, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40800              m.emit(0x78)
 40801              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 40802          })
 40803      }
 40804      // VCVTTPS2UDQ m256/m32bcst, ymm{k}{z}
 40805      if len(vv) == 0 && isM256M32bcst(v0) && isYMMkz(v1) {
 40806          self.require(ISA_AVX512VL | ISA_AVX512F)
 40807          p.domain = DomainAVX
 40808          p.add(0, func(m *_Encoding, v []interface{}) {
 40809              m.evex(0b01, 0x04, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40810              m.emit(0x78)
 40811              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 40812          })
 40813      }
 40814      // VCVTTPS2UDQ xmm, xmm{k}{z}
 40815      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 40816          self.require(ISA_AVX512VL | ISA_AVX512F)
 40817          p.domain = DomainAVX
 40818          p.add(0, func(m *_Encoding, v []interface{}) {
 40819              m.emit(0x62)
 40820              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40821              m.emit(0x7c)
 40822              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 40823              m.emit(0x78)
 40824              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40825          })
 40826      }
 40827      // VCVTTPS2UDQ ymm, ymm{k}{z}
 40828      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 40829          self.require(ISA_AVX512VL | ISA_AVX512F)
 40830          p.domain = DomainAVX
 40831          p.add(0, func(m *_Encoding, v []interface{}) {
 40832              m.emit(0x62)
 40833              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40834              m.emit(0x7c)
 40835              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 40836              m.emit(0x78)
 40837              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40838          })
 40839      }
 40840      if p.len == 0 {
 40841          panic("invalid operands for VCVTTPS2UDQ")
 40842      }
 40843      return p
 40844  }
 40845  
 40846  // VCVTTPS2UQQ performs "Convert with Truncation Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values".
 40847  //
 40848  // Mnemonic        : VCVTTPS2UQQ
 40849  // Supported forms : (7 forms)
 40850  //
 40851  //    * VCVTTPS2UQQ m256/m32bcst, zmm{k}{z}    [AVX512DQ]
 40852  //    * VCVTTPS2UQQ {sae}, ymm, zmm{k}{z}      [AVX512DQ]
 40853  //    * VCVTTPS2UQQ ymm, zmm{k}{z}             [AVX512DQ]
 40854  //    * VCVTTPS2UQQ m64/m32bcst, xmm{k}{z}     [AVX512DQ,AVX512VL]
 40855  //    * VCVTTPS2UQQ m128/m32bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 40856  //    * VCVTTPS2UQQ xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 40857  //    * VCVTTPS2UQQ xmm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 40858  //
 40859  func (self *Program) VCVTTPS2UQQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 40860      var p *Instruction
 40861      switch len(vv) {
 40862          case 0  : p = self.alloc("VCVTTPS2UQQ", 2, Operands { v0, v1 })
 40863          case 1  : p = self.alloc("VCVTTPS2UQQ", 3, Operands { v0, v1, vv[0] })
 40864          default : panic("instruction VCVTTPS2UQQ takes 2 or 3 operands")
 40865      }
 40866      // VCVTTPS2UQQ m256/m32bcst, zmm{k}{z}
 40867      if len(vv) == 0 && isM256M32bcst(v0) && isZMMkz(v1) {
 40868          self.require(ISA_AVX512DQ)
 40869          p.domain = DomainAVX
 40870          p.add(0, func(m *_Encoding, v []interface{}) {
 40871              m.evex(0b01, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40872              m.emit(0x78)
 40873              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 40874          })
 40875      }
 40876      // VCVTTPS2UQQ {sae}, ymm, zmm{k}{z}
 40877      if len(vv) == 1 && isSAE(v0) && isEVEXYMM(v1) && isZMMkz(vv[0]) {
 40878          self.require(ISA_AVX512DQ)
 40879          p.domain = DomainAVX
 40880          p.add(0, func(m *_Encoding, v []interface{}) {
 40881              m.emit(0x62)
 40882              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 40883              m.emit(0x7d)
 40884              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 40885              m.emit(0x78)
 40886              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 40887          })
 40888      }
 40889      // VCVTTPS2UQQ ymm, zmm{k}{z}
 40890      if len(vv) == 0 && isEVEXYMM(v0) && isZMMkz(v1) {
 40891          self.require(ISA_AVX512DQ)
 40892          p.domain = DomainAVX
 40893          p.add(0, func(m *_Encoding, v []interface{}) {
 40894              m.emit(0x62)
 40895              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40896              m.emit(0x7d)
 40897              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 40898              m.emit(0x78)
 40899              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40900          })
 40901      }
 40902      // VCVTTPS2UQQ m64/m32bcst, xmm{k}{z}
 40903      if len(vv) == 0 && isM64M32bcst(v0) && isXMMkz(v1) {
 40904          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40905          p.domain = DomainAVX
 40906          p.add(0, func(m *_Encoding, v []interface{}) {
 40907              m.evex(0b01, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40908              m.emit(0x78)
 40909              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 40910          })
 40911      }
 40912      // VCVTTPS2UQQ m128/m32bcst, ymm{k}{z}
 40913      if len(vv) == 0 && isM128M32bcst(v0) && isYMMkz(v1) {
 40914          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40915          p.domain = DomainAVX
 40916          p.add(0, func(m *_Encoding, v []interface{}) {
 40917              m.evex(0b01, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40918              m.emit(0x78)
 40919              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 40920          })
 40921      }
 40922      // VCVTTPS2UQQ xmm, xmm{k}{z}
 40923      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 40924          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40925          p.domain = DomainAVX
 40926          p.add(0, func(m *_Encoding, v []interface{}) {
 40927              m.emit(0x62)
 40928              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40929              m.emit(0x7d)
 40930              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 40931              m.emit(0x78)
 40932              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40933          })
 40934      }
 40935      // VCVTTPS2UQQ xmm, ymm{k}{z}
 40936      if len(vv) == 0 && isEVEXXMM(v0) && isYMMkz(v1) {
 40937          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40938          p.domain = DomainAVX
 40939          p.add(0, func(m *_Encoding, v []interface{}) {
 40940              m.emit(0x62)
 40941              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40942              m.emit(0x7d)
 40943              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 40944              m.emit(0x78)
 40945              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40946          })
 40947      }
 40948      if p.len == 0 {
 40949          panic("invalid operands for VCVTTPS2UQQ")
 40950      }
 40951      return p
 40952  }
 40953  
 40954  // VCVTTSD2SI performs "Convert with Truncation Scalar Double-Precision FP Value to Signed Integer".
 40955  //
 40956  // Mnemonic        : VCVTTSD2SI
 40957  // Supported forms : (10 forms)
 40958  //
 40959  //    * VCVTTSD2SI xmm, r32           [AVX]
 40960  //    * VCVTTSD2SI m64, r32           [AVX]
 40961  //    * VCVTTSD2SI xmm, r64           [AVX]
 40962  //    * VCVTTSD2SI m64, r64           [AVX]
 40963  //    * VCVTTSD2SI m64, r32           [AVX512F]
 40964  //    * VCVTTSD2SI m64, r64           [AVX512F]
 40965  //    * VCVTTSD2SI {sae}, xmm, r32    [AVX512F]
 40966  //    * VCVTTSD2SI {sae}, xmm, r64    [AVX512F]
 40967  //    * VCVTTSD2SI xmm, r32           [AVX512F]
 40968  //    * VCVTTSD2SI xmm, r64           [AVX512F]
 40969  //
 40970  func (self *Program) VCVTTSD2SI(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 40971      var p *Instruction
 40972      switch len(vv) {
 40973          case 0  : p = self.alloc("VCVTTSD2SI", 2, Operands { v0, v1 })
 40974          case 1  : p = self.alloc("VCVTTSD2SI", 3, Operands { v0, v1, vv[0] })
 40975          default : panic("instruction VCVTTSD2SI takes 2 or 3 operands")
 40976      }
 40977      // VCVTTSD2SI xmm, r32
 40978      if len(vv) == 0 && isXMM(v0) && isReg32(v1) {
 40979          self.require(ISA_AVX)
 40980          p.domain = DomainAVX
 40981          p.add(0, func(m *_Encoding, v []interface{}) {
 40982              m.vex2(3, hcode(v[1]), v[0], 0)
 40983              m.emit(0x2c)
 40984              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40985          })
 40986      }
 40987      // VCVTTSD2SI m64, r32
 40988      if len(vv) == 0 && isM64(v0) && isReg32(v1) {
 40989          self.require(ISA_AVX)
 40990          p.domain = DomainAVX
 40991          p.add(0, func(m *_Encoding, v []interface{}) {
 40992              m.vex2(3, hcode(v[1]), addr(v[0]), 0)
 40993              m.emit(0x2c)
 40994              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 40995          })
 40996      }
 40997      // VCVTTSD2SI xmm, r64
 40998      if len(vv) == 0 && isXMM(v0) && isReg64(v1) {
 40999          self.require(ISA_AVX)
 41000          p.domain = DomainAVX
 41001          p.add(0, func(m *_Encoding, v []interface{}) {
 41002              m.emit(0xc4)
 41003              m.emit(0xe1 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 41004              m.emit(0xfb)
 41005              m.emit(0x2c)
 41006              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41007          })
 41008      }
 41009      // VCVTTSD2SI m64, r64
 41010      if len(vv) == 0 && isM64(v0) && isReg64(v1) {
 41011          self.require(ISA_AVX)
 41012          p.domain = DomainAVX
 41013          p.add(0, func(m *_Encoding, v []interface{}) {
 41014              m.vex3(0xc4, 0b1, 0x83, hcode(v[1]), addr(v[0]), 0)
 41015              m.emit(0x2c)
 41016              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 41017          })
 41018      }
 41019      // VCVTTSD2SI m64, r32
 41020      if len(vv) == 0 && isM64(v0) && isReg32(v1) {
 41021          self.require(ISA_AVX512F)
 41022          p.domain = DomainAVX
 41023          p.add(0, func(m *_Encoding, v []interface{}) {
 41024              m.evex(0b01, 0x07, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 41025              m.emit(0x2c)
 41026              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 41027          })
 41028      }
 41029      // VCVTTSD2SI m64, r64
 41030      if len(vv) == 0 && isM64(v0) && isReg64(v1) {
 41031          self.require(ISA_AVX512F)
 41032          p.domain = DomainAVX
 41033          p.add(0, func(m *_Encoding, v []interface{}) {
 41034              m.evex(0b01, 0x87, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 41035              m.emit(0x2c)
 41036              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 41037          })
 41038      }
 41039      // VCVTTSD2SI {sae}, xmm, r32
 41040      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isReg32(vv[0]) {
 41041          self.require(ISA_AVX512F)
 41042          p.domain = DomainAVX
 41043          p.add(0, func(m *_Encoding, v []interface{}) {
 41044              m.emit(0x62)
 41045              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 41046              m.emit(0x7f)
 41047              m.emit(0x18)
 41048              m.emit(0x2c)
 41049              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 41050          })
 41051      }
 41052      // VCVTTSD2SI {sae}, xmm, r64
 41053      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isReg64(vv[0]) {
 41054          self.require(ISA_AVX512F)
 41055          p.domain = DomainAVX
 41056          p.add(0, func(m *_Encoding, v []interface{}) {
 41057              m.emit(0x62)
 41058              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 41059              m.emit(0xff)
 41060              m.emit(0x18)
 41061              m.emit(0x2c)
 41062              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 41063          })
 41064      }
 41065      // VCVTTSD2SI xmm, r32
 41066      if len(vv) == 0 && isEVEXXMM(v0) && isReg32(v1) {
 41067          self.require(ISA_AVX512F)
 41068          p.domain = DomainAVX
 41069          p.add(0, func(m *_Encoding, v []interface{}) {
 41070              m.emit(0x62)
 41071              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41072              m.emit(0x7f)
 41073              m.emit(0x48)
 41074              m.emit(0x2c)
 41075              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41076          })
 41077      }
 41078      // VCVTTSD2SI xmm, r64
 41079      if len(vv) == 0 && isEVEXXMM(v0) && isReg64(v1) {
 41080          self.require(ISA_AVX512F)
 41081          p.domain = DomainAVX
 41082          p.add(0, func(m *_Encoding, v []interface{}) {
 41083              m.emit(0x62)
 41084              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41085              m.emit(0xff)
 41086              m.emit(0x48)
 41087              m.emit(0x2c)
 41088              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41089          })
 41090      }
 41091      if p.len == 0 {
 41092          panic("invalid operands for VCVTTSD2SI")
 41093      }
 41094      return p
 41095  }
 41096  
 41097  // VCVTTSD2USI performs "Convert with Truncation Scalar Double-Precision Floating-Point Value to Unsigned Integer".
 41098  //
 41099  // Mnemonic        : VCVTTSD2USI
 41100  // Supported forms : (6 forms)
 41101  //
 41102  //    * VCVTTSD2USI m64, r32           [AVX512F]
 41103  //    * VCVTTSD2USI m64, r64           [AVX512F]
 41104  //    * VCVTTSD2USI {sae}, xmm, r32    [AVX512F]
 41105  //    * VCVTTSD2USI {sae}, xmm, r64    [AVX512F]
 41106  //    * VCVTTSD2USI xmm, r32           [AVX512F]
 41107  //    * VCVTTSD2USI xmm, r64           [AVX512F]
 41108  //
 41109  func (self *Program) VCVTTSD2USI(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 41110      var p *Instruction
 41111      switch len(vv) {
 41112          case 0  : p = self.alloc("VCVTTSD2USI", 2, Operands { v0, v1 })
 41113          case 1  : p = self.alloc("VCVTTSD2USI", 3, Operands { v0, v1, vv[0] })
 41114          default : panic("instruction VCVTTSD2USI takes 2 or 3 operands")
 41115      }
 41116      // VCVTTSD2USI m64, r32
 41117      if len(vv) == 0 && isM64(v0) && isReg32(v1) {
 41118          self.require(ISA_AVX512F)
 41119          p.domain = DomainAVX
 41120          p.add(0, func(m *_Encoding, v []interface{}) {
 41121              m.evex(0b01, 0x07, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 41122              m.emit(0x78)
 41123              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 41124          })
 41125      }
 41126      // VCVTTSD2USI m64, r64
 41127      if len(vv) == 0 && isM64(v0) && isReg64(v1) {
 41128          self.require(ISA_AVX512F)
 41129          p.domain = DomainAVX
 41130          p.add(0, func(m *_Encoding, v []interface{}) {
 41131              m.evex(0b01, 0x87, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 41132              m.emit(0x78)
 41133              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 41134          })
 41135      }
 41136      // VCVTTSD2USI {sae}, xmm, r32
 41137      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isReg32(vv[0]) {
 41138          self.require(ISA_AVX512F)
 41139          p.domain = DomainAVX
 41140          p.add(0, func(m *_Encoding, v []interface{}) {
 41141              m.emit(0x62)
 41142              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 41143              m.emit(0x7f)
 41144              m.emit(0x18)
 41145              m.emit(0x78)
 41146              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 41147          })
 41148      }
 41149      // VCVTTSD2USI {sae}, xmm, r64
 41150      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isReg64(vv[0]) {
 41151          self.require(ISA_AVX512F)
 41152          p.domain = DomainAVX
 41153          p.add(0, func(m *_Encoding, v []interface{}) {
 41154              m.emit(0x62)
 41155              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 41156              m.emit(0xff)
 41157              m.emit(0x18)
 41158              m.emit(0x78)
 41159              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 41160          })
 41161      }
 41162      // VCVTTSD2USI xmm, r32
 41163      if len(vv) == 0 && isEVEXXMM(v0) && isReg32(v1) {
 41164          self.require(ISA_AVX512F)
 41165          p.domain = DomainAVX
 41166          p.add(0, func(m *_Encoding, v []interface{}) {
 41167              m.emit(0x62)
 41168              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41169              m.emit(0x7f)
 41170              m.emit(0x48)
 41171              m.emit(0x78)
 41172              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41173          })
 41174      }
 41175      // VCVTTSD2USI xmm, r64
 41176      if len(vv) == 0 && isEVEXXMM(v0) && isReg64(v1) {
 41177          self.require(ISA_AVX512F)
 41178          p.domain = DomainAVX
 41179          p.add(0, func(m *_Encoding, v []interface{}) {
 41180              m.emit(0x62)
 41181              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41182              m.emit(0xff)
 41183              m.emit(0x48)
 41184              m.emit(0x78)
 41185              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41186          })
 41187      }
 41188      if p.len == 0 {
 41189          panic("invalid operands for VCVTTSD2USI")
 41190      }
 41191      return p
 41192  }
 41193  
 41194  // VCVTTSS2SI performs "Convert with Truncation Scalar Single-Precision FP Value to Dword Integer".
 41195  //
 41196  // Mnemonic        : VCVTTSS2SI
 41197  // Supported forms : (10 forms)
 41198  //
 41199  //    * VCVTTSS2SI xmm, r32           [AVX]
 41200  //    * VCVTTSS2SI m32, r32           [AVX]
 41201  //    * VCVTTSS2SI xmm, r64           [AVX]
 41202  //    * VCVTTSS2SI m32, r64           [AVX]
 41203  //    * VCVTTSS2SI m32, r32           [AVX512F]
 41204  //    * VCVTTSS2SI m32, r64           [AVX512F]
 41205  //    * VCVTTSS2SI {sae}, xmm, r32    [AVX512F]
 41206  //    * VCVTTSS2SI {sae}, xmm, r64    [AVX512F]
 41207  //    * VCVTTSS2SI xmm, r32           [AVX512F]
 41208  //    * VCVTTSS2SI xmm, r64           [AVX512F]
 41209  //
 41210  func (self *Program) VCVTTSS2SI(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 41211      var p *Instruction
 41212      switch len(vv) {
 41213          case 0  : p = self.alloc("VCVTTSS2SI", 2, Operands { v0, v1 })
 41214          case 1  : p = self.alloc("VCVTTSS2SI", 3, Operands { v0, v1, vv[0] })
 41215          default : panic("instruction VCVTTSS2SI takes 2 or 3 operands")
 41216      }
 41217      // VCVTTSS2SI xmm, r32
 41218      if len(vv) == 0 && isXMM(v0) && isReg32(v1) {
 41219          self.require(ISA_AVX)
 41220          p.domain = DomainAVX
 41221          p.add(0, func(m *_Encoding, v []interface{}) {
 41222              m.vex2(2, hcode(v[1]), v[0], 0)
 41223              m.emit(0x2c)
 41224              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41225          })
 41226      }
 41227      // VCVTTSS2SI m32, r32
 41228      if len(vv) == 0 && isM32(v0) && isReg32(v1) {
 41229          self.require(ISA_AVX)
 41230          p.domain = DomainAVX
 41231          p.add(0, func(m *_Encoding, v []interface{}) {
 41232              m.vex2(2, hcode(v[1]), addr(v[0]), 0)
 41233              m.emit(0x2c)
 41234              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 41235          })
 41236      }
 41237      // VCVTTSS2SI xmm, r64
 41238      if len(vv) == 0 && isXMM(v0) && isReg64(v1) {
 41239          self.require(ISA_AVX)
 41240          p.domain = DomainAVX
 41241          p.add(0, func(m *_Encoding, v []interface{}) {
 41242              m.emit(0xc4)
 41243              m.emit(0xe1 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 41244              m.emit(0xfa)
 41245              m.emit(0x2c)
 41246              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41247          })
 41248      }
 41249      // VCVTTSS2SI m32, r64
 41250      if len(vv) == 0 && isM32(v0) && isReg64(v1) {
 41251          self.require(ISA_AVX)
 41252          p.domain = DomainAVX
 41253          p.add(0, func(m *_Encoding, v []interface{}) {
 41254              m.vex3(0xc4, 0b1, 0x82, hcode(v[1]), addr(v[0]), 0)
 41255              m.emit(0x2c)
 41256              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 41257          })
 41258      }
 41259      // VCVTTSS2SI m32, r32
 41260      if len(vv) == 0 && isM32(v0) && isReg32(v1) {
 41261          self.require(ISA_AVX512F)
 41262          p.domain = DomainAVX
 41263          p.add(0, func(m *_Encoding, v []interface{}) {
 41264              m.evex(0b01, 0x06, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 41265              m.emit(0x2c)
 41266              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 41267          })
 41268      }
 41269      // VCVTTSS2SI m32, r64
 41270      if len(vv) == 0 && isM32(v0) && isReg64(v1) {
 41271          self.require(ISA_AVX512F)
 41272          p.domain = DomainAVX
 41273          p.add(0, func(m *_Encoding, v []interface{}) {
 41274              m.evex(0b01, 0x86, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 41275              m.emit(0x2c)
 41276              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 41277          })
 41278      }
 41279      // VCVTTSS2SI {sae}, xmm, r32
 41280      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isReg32(vv[0]) {
 41281          self.require(ISA_AVX512F)
 41282          p.domain = DomainAVX
 41283          p.add(0, func(m *_Encoding, v []interface{}) {
 41284              m.emit(0x62)
 41285              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 41286              m.emit(0x7e)
 41287              m.emit(0x18)
 41288              m.emit(0x2c)
 41289              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 41290          })
 41291      }
 41292      // VCVTTSS2SI {sae}, xmm, r64
 41293      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isReg64(vv[0]) {
 41294          self.require(ISA_AVX512F)
 41295          p.domain = DomainAVX
 41296          p.add(0, func(m *_Encoding, v []interface{}) {
 41297              m.emit(0x62)
 41298              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 41299              m.emit(0xfe)
 41300              m.emit(0x18)
 41301              m.emit(0x2c)
 41302              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 41303          })
 41304      }
 41305      // VCVTTSS2SI xmm, r32
 41306      if len(vv) == 0 && isEVEXXMM(v0) && isReg32(v1) {
 41307          self.require(ISA_AVX512F)
 41308          p.domain = DomainAVX
 41309          p.add(0, func(m *_Encoding, v []interface{}) {
 41310              m.emit(0x62)
 41311              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41312              m.emit(0x7e)
 41313              m.emit(0x48)
 41314              m.emit(0x2c)
 41315              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41316          })
 41317      }
 41318      // VCVTTSS2SI xmm, r64
 41319      if len(vv) == 0 && isEVEXXMM(v0) && isReg64(v1) {
 41320          self.require(ISA_AVX512F)
 41321          p.domain = DomainAVX
 41322          p.add(0, func(m *_Encoding, v []interface{}) {
 41323              m.emit(0x62)
 41324              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41325              m.emit(0xfe)
 41326              m.emit(0x48)
 41327              m.emit(0x2c)
 41328              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41329          })
 41330      }
 41331      if p.len == 0 {
 41332          panic("invalid operands for VCVTTSS2SI")
 41333      }
 41334      return p
 41335  }
 41336  
 41337  // VCVTTSS2USI performs "Convert with Truncation Scalar Single-Precision Floating-Point Value to Unsigned Integer".
 41338  //
 41339  // Mnemonic        : VCVTTSS2USI
 41340  // Supported forms : (6 forms)
 41341  //
 41342  //    * VCVTTSS2USI m32, r32           [AVX512F]
 41343  //    * VCVTTSS2USI m32, r64           [AVX512F]
 41344  //    * VCVTTSS2USI {sae}, xmm, r32    [AVX512F]
 41345  //    * VCVTTSS2USI {sae}, xmm, r64    [AVX512F]
 41346  //    * VCVTTSS2USI xmm, r32           [AVX512F]
 41347  //    * VCVTTSS2USI xmm, r64           [AVX512F]
 41348  //
 41349  func (self *Program) VCVTTSS2USI(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 41350      var p *Instruction
 41351      switch len(vv) {
 41352          case 0  : p = self.alloc("VCVTTSS2USI", 2, Operands { v0, v1 })
 41353          case 1  : p = self.alloc("VCVTTSS2USI", 3, Operands { v0, v1, vv[0] })
 41354          default : panic("instruction VCVTTSS2USI takes 2 or 3 operands")
 41355      }
 41356      // VCVTTSS2USI m32, r32
 41357      if len(vv) == 0 && isM32(v0) && isReg32(v1) {
 41358          self.require(ISA_AVX512F)
 41359          p.domain = DomainAVX
 41360          p.add(0, func(m *_Encoding, v []interface{}) {
 41361              m.evex(0b01, 0x06, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 41362              m.emit(0x78)
 41363              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 41364          })
 41365      }
 41366      // VCVTTSS2USI m32, r64
 41367      if len(vv) == 0 && isM32(v0) && isReg64(v1) {
 41368          self.require(ISA_AVX512F)
 41369          p.domain = DomainAVX
 41370          p.add(0, func(m *_Encoding, v []interface{}) {
 41371              m.evex(0b01, 0x86, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 41372              m.emit(0x78)
 41373              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 41374          })
 41375      }
 41376      // VCVTTSS2USI {sae}, xmm, r32
 41377      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isReg32(vv[0]) {
 41378          self.require(ISA_AVX512F)
 41379          p.domain = DomainAVX
 41380          p.add(0, func(m *_Encoding, v []interface{}) {
 41381              m.emit(0x62)
 41382              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 41383              m.emit(0x7e)
 41384              m.emit(0x18)
 41385              m.emit(0x78)
 41386              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 41387          })
 41388      }
 41389      // VCVTTSS2USI {sae}, xmm, r64
 41390      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isReg64(vv[0]) {
 41391          self.require(ISA_AVX512F)
 41392          p.domain = DomainAVX
 41393          p.add(0, func(m *_Encoding, v []interface{}) {
 41394              m.emit(0x62)
 41395              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 41396              m.emit(0xfe)
 41397              m.emit(0x18)
 41398              m.emit(0x78)
 41399              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 41400          })
 41401      }
 41402      // VCVTTSS2USI xmm, r32
 41403      if len(vv) == 0 && isEVEXXMM(v0) && isReg32(v1) {
 41404          self.require(ISA_AVX512F)
 41405          p.domain = DomainAVX
 41406          p.add(0, func(m *_Encoding, v []interface{}) {
 41407              m.emit(0x62)
 41408              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41409              m.emit(0x7e)
 41410              m.emit(0x48)
 41411              m.emit(0x78)
 41412              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41413          })
 41414      }
 41415      // VCVTTSS2USI xmm, r64
 41416      if len(vv) == 0 && isEVEXXMM(v0) && isReg64(v1) {
 41417          self.require(ISA_AVX512F)
 41418          p.domain = DomainAVX
 41419          p.add(0, func(m *_Encoding, v []interface{}) {
 41420              m.emit(0x62)
 41421              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41422              m.emit(0xfe)
 41423              m.emit(0x48)
 41424              m.emit(0x78)
 41425              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41426          })
 41427      }
 41428      if p.len == 0 {
 41429          panic("invalid operands for VCVTTSS2USI")
 41430      }
 41431      return p
 41432  }
 41433  
 41434  // VCVTUDQ2PD performs "Convert Packed Unsigned Doubleword Integers to Packed Double-Precision Floating-Point Values".
 41435  //
 41436  // Mnemonic        : VCVTUDQ2PD
 41437  // Supported forms : (6 forms)
 41438  //
 41439  //    * VCVTUDQ2PD m256/m32bcst, zmm{k}{z}    [AVX512F]
 41440  //    * VCVTUDQ2PD ymm, zmm{k}{z}             [AVX512F]
 41441  //    * VCVTUDQ2PD m64/m32bcst, xmm{k}{z}     [AVX512F,AVX512VL]
 41442  //    * VCVTUDQ2PD m128/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 41443  //    * VCVTUDQ2PD xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 41444  //    * VCVTUDQ2PD xmm, ymm{k}{z}             [AVX512F,AVX512VL]
 41445  //
 41446  func (self *Program) VCVTUDQ2PD(v0 interface{}, v1 interface{}) *Instruction {
 41447      p := self.alloc("VCVTUDQ2PD", 2, Operands { v0, v1 })
 41448      // VCVTUDQ2PD m256/m32bcst, zmm{k}{z}
 41449      if isM256M32bcst(v0) && isZMMkz(v1) {
 41450          self.require(ISA_AVX512F)
 41451          p.domain = DomainAVX
 41452          p.add(0, func(m *_Encoding, v []interface{}) {
 41453              m.evex(0b01, 0x06, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41454              m.emit(0x7a)
 41455              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 41456          })
 41457      }
 41458      // VCVTUDQ2PD ymm, zmm{k}{z}
 41459      if isEVEXYMM(v0) && isZMMkz(v1) {
 41460          self.require(ISA_AVX512F)
 41461          p.domain = DomainAVX
 41462          p.add(0, func(m *_Encoding, v []interface{}) {
 41463              m.emit(0x62)
 41464              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41465              m.emit(0x7e)
 41466              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 41467              m.emit(0x7a)
 41468              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41469          })
 41470      }
 41471      // VCVTUDQ2PD m64/m32bcst, xmm{k}{z}
 41472      if isM64M32bcst(v0) && isXMMkz(v1) {
 41473          self.require(ISA_AVX512VL | ISA_AVX512F)
 41474          p.domain = DomainAVX
 41475          p.add(0, func(m *_Encoding, v []interface{}) {
 41476              m.evex(0b01, 0x06, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41477              m.emit(0x7a)
 41478              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 41479          })
 41480      }
 41481      // VCVTUDQ2PD m128/m32bcst, ymm{k}{z}
 41482      if isM128M32bcst(v0) && isYMMkz(v1) {
 41483          self.require(ISA_AVX512VL | ISA_AVX512F)
 41484          p.domain = DomainAVX
 41485          p.add(0, func(m *_Encoding, v []interface{}) {
 41486              m.evex(0b01, 0x06, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41487              m.emit(0x7a)
 41488              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 41489          })
 41490      }
 41491      // VCVTUDQ2PD xmm, xmm{k}{z}
 41492      if isEVEXXMM(v0) && isXMMkz(v1) {
 41493          self.require(ISA_AVX512VL | ISA_AVX512F)
 41494          p.domain = DomainAVX
 41495          p.add(0, func(m *_Encoding, v []interface{}) {
 41496              m.emit(0x62)
 41497              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41498              m.emit(0x7e)
 41499              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 41500              m.emit(0x7a)
 41501              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41502          })
 41503      }
 41504      // VCVTUDQ2PD xmm, ymm{k}{z}
 41505      if isEVEXXMM(v0) && isYMMkz(v1) {
 41506          self.require(ISA_AVX512VL | ISA_AVX512F)
 41507          p.domain = DomainAVX
 41508          p.add(0, func(m *_Encoding, v []interface{}) {
 41509              m.emit(0x62)
 41510              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41511              m.emit(0x7e)
 41512              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 41513              m.emit(0x7a)
 41514              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41515          })
 41516      }
 41517      if p.len == 0 {
 41518          panic("invalid operands for VCVTUDQ2PD")
 41519      }
 41520      return p
 41521  }
 41522  
 41523  // VCVTUDQ2PS performs "Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values".
 41524  //
 41525  // Mnemonic        : VCVTUDQ2PS
 41526  // Supported forms : (7 forms)
 41527  //
 41528  //    * VCVTUDQ2PS m512/m32bcst, zmm{k}{z}    [AVX512F]
 41529  //    * VCVTUDQ2PS {er}, zmm, zmm{k}{z}       [AVX512F]
 41530  //    * VCVTUDQ2PS zmm, zmm{k}{z}             [AVX512F]
 41531  //    * VCVTUDQ2PS m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 41532  //    * VCVTUDQ2PS m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 41533  //    * VCVTUDQ2PS xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 41534  //    * VCVTUDQ2PS ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 41535  //
 41536  func (self *Program) VCVTUDQ2PS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 41537      var p *Instruction
 41538      switch len(vv) {
 41539          case 0  : p = self.alloc("VCVTUDQ2PS", 2, Operands { v0, v1 })
 41540          case 1  : p = self.alloc("VCVTUDQ2PS", 3, Operands { v0, v1, vv[0] })
 41541          default : panic("instruction VCVTUDQ2PS takes 2 or 3 operands")
 41542      }
 41543      // VCVTUDQ2PS m512/m32bcst, zmm{k}{z}
 41544      if len(vv) == 0 && isM512M32bcst(v0) && isZMMkz(v1) {
 41545          self.require(ISA_AVX512F)
 41546          p.domain = DomainAVX
 41547          p.add(0, func(m *_Encoding, v []interface{}) {
 41548              m.evex(0b01, 0x07, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41549              m.emit(0x7a)
 41550              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 41551          })
 41552      }
 41553      // VCVTUDQ2PS {er}, zmm, zmm{k}{z}
 41554      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 41555          self.require(ISA_AVX512F)
 41556          p.domain = DomainAVX
 41557          p.add(0, func(m *_Encoding, v []interface{}) {
 41558              m.emit(0x62)
 41559              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 41560              m.emit(0x7f)
 41561              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 41562              m.emit(0x7a)
 41563              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 41564          })
 41565      }
 41566      // VCVTUDQ2PS zmm, zmm{k}{z}
 41567      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 41568          self.require(ISA_AVX512F)
 41569          p.domain = DomainAVX
 41570          p.add(0, func(m *_Encoding, v []interface{}) {
 41571              m.emit(0x62)
 41572              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41573              m.emit(0x7f)
 41574              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 41575              m.emit(0x7a)
 41576              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41577          })
 41578      }
 41579      // VCVTUDQ2PS m128/m32bcst, xmm{k}{z}
 41580      if len(vv) == 0 && isM128M32bcst(v0) && isXMMkz(v1) {
 41581          self.require(ISA_AVX512VL | ISA_AVX512F)
 41582          p.domain = DomainAVX
 41583          p.add(0, func(m *_Encoding, v []interface{}) {
 41584              m.evex(0b01, 0x07, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41585              m.emit(0x7a)
 41586              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 41587          })
 41588      }
 41589      // VCVTUDQ2PS m256/m32bcst, ymm{k}{z}
 41590      if len(vv) == 0 && isM256M32bcst(v0) && isYMMkz(v1) {
 41591          self.require(ISA_AVX512VL | ISA_AVX512F)
 41592          p.domain = DomainAVX
 41593          p.add(0, func(m *_Encoding, v []interface{}) {
 41594              m.evex(0b01, 0x07, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41595              m.emit(0x7a)
 41596              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 41597          })
 41598      }
 41599      // VCVTUDQ2PS xmm, xmm{k}{z}
 41600      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 41601          self.require(ISA_AVX512VL | ISA_AVX512F)
 41602          p.domain = DomainAVX
 41603          p.add(0, func(m *_Encoding, v []interface{}) {
 41604              m.emit(0x62)
 41605              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41606              m.emit(0x7f)
 41607              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 41608              m.emit(0x7a)
 41609              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41610          })
 41611      }
 41612      // VCVTUDQ2PS ymm, ymm{k}{z}
 41613      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 41614          self.require(ISA_AVX512VL | ISA_AVX512F)
 41615          p.domain = DomainAVX
 41616          p.add(0, func(m *_Encoding, v []interface{}) {
 41617              m.emit(0x62)
 41618              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41619              m.emit(0x7f)
 41620              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 41621              m.emit(0x7a)
 41622              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41623          })
 41624      }
 41625      if p.len == 0 {
 41626          panic("invalid operands for VCVTUDQ2PS")
 41627      }
 41628      return p
 41629  }
 41630  
 41631  // VCVTUQQ2PD performs "Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values".
 41632  //
 41633  // Mnemonic        : VCVTUQQ2PD
 41634  // Supported forms : (7 forms)
 41635  //
 41636  //    * VCVTUQQ2PD m512/m64bcst, zmm{k}{z}    [AVX512DQ]
 41637  //    * VCVTUQQ2PD {er}, zmm, zmm{k}{z}       [AVX512DQ]
 41638  //    * VCVTUQQ2PD zmm, zmm{k}{z}             [AVX512DQ]
 41639  //    * VCVTUQQ2PD m128/m64bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 41640  //    * VCVTUQQ2PD m256/m64bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 41641  //    * VCVTUQQ2PD xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 41642  //    * VCVTUQQ2PD ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 41643  //
 41644  func (self *Program) VCVTUQQ2PD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 41645      var p *Instruction
 41646      switch len(vv) {
 41647          case 0  : p = self.alloc("VCVTUQQ2PD", 2, Operands { v0, v1 })
 41648          case 1  : p = self.alloc("VCVTUQQ2PD", 3, Operands { v0, v1, vv[0] })
 41649          default : panic("instruction VCVTUQQ2PD takes 2 or 3 operands")
 41650      }
 41651      // VCVTUQQ2PD m512/m64bcst, zmm{k}{z}
 41652      if len(vv) == 0 && isM512M64bcst(v0) && isZMMkz(v1) {
 41653          self.require(ISA_AVX512DQ)
 41654          p.domain = DomainAVX
 41655          p.add(0, func(m *_Encoding, v []interface{}) {
 41656              m.evex(0b01, 0x86, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41657              m.emit(0x7a)
 41658              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 41659          })
 41660      }
 41661      // VCVTUQQ2PD {er}, zmm, zmm{k}{z}
 41662      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 41663          self.require(ISA_AVX512DQ)
 41664          p.domain = DomainAVX
 41665          p.add(0, func(m *_Encoding, v []interface{}) {
 41666              m.emit(0x62)
 41667              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 41668              m.emit(0xfe)
 41669              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 41670              m.emit(0x7a)
 41671              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 41672          })
 41673      }
 41674      // VCVTUQQ2PD zmm, zmm{k}{z}
 41675      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 41676          self.require(ISA_AVX512DQ)
 41677          p.domain = DomainAVX
 41678          p.add(0, func(m *_Encoding, v []interface{}) {
 41679              m.emit(0x62)
 41680              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41681              m.emit(0xfe)
 41682              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 41683              m.emit(0x7a)
 41684              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41685          })
 41686      }
 41687      // VCVTUQQ2PD m128/m64bcst, xmm{k}{z}
 41688      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 41689          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 41690          p.domain = DomainAVX
 41691          p.add(0, func(m *_Encoding, v []interface{}) {
 41692              m.evex(0b01, 0x86, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41693              m.emit(0x7a)
 41694              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 41695          })
 41696      }
 41697      // VCVTUQQ2PD m256/m64bcst, ymm{k}{z}
 41698      if len(vv) == 0 && isM256M64bcst(v0) && isYMMkz(v1) {
 41699          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 41700          p.domain = DomainAVX
 41701          p.add(0, func(m *_Encoding, v []interface{}) {
 41702              m.evex(0b01, 0x86, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41703              m.emit(0x7a)
 41704              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 41705          })
 41706      }
 41707      // VCVTUQQ2PD xmm, xmm{k}{z}
 41708      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 41709          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 41710          p.domain = DomainAVX
 41711          p.add(0, func(m *_Encoding, v []interface{}) {
 41712              m.emit(0x62)
 41713              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41714              m.emit(0xfe)
 41715              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 41716              m.emit(0x7a)
 41717              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41718          })
 41719      }
 41720      // VCVTUQQ2PD ymm, ymm{k}{z}
 41721      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 41722          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 41723          p.domain = DomainAVX
 41724          p.add(0, func(m *_Encoding, v []interface{}) {
 41725              m.emit(0x62)
 41726              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41727              m.emit(0xfe)
 41728              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 41729              m.emit(0x7a)
 41730              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41731          })
 41732      }
 41733      if p.len == 0 {
 41734          panic("invalid operands for VCVTUQQ2PD")
 41735      }
 41736      return p
 41737  }
 41738  
 41739  // VCVTUQQ2PS performs "Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values".
 41740  //
 41741  // Mnemonic        : VCVTUQQ2PS
 41742  // Supported forms : (7 forms)
 41743  //
 41744  //    * VCVTUQQ2PS m512/m64bcst, ymm{k}{z}    [AVX512DQ]
 41745  //    * VCVTUQQ2PS {er}, zmm, ymm{k}{z}       [AVX512DQ]
 41746  //    * VCVTUQQ2PS zmm, ymm{k}{z}             [AVX512DQ]
 41747  //    * VCVTUQQ2PS m128/m64bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 41748  //    * VCVTUQQ2PS m256/m64bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 41749  //    * VCVTUQQ2PS xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 41750  //    * VCVTUQQ2PS ymm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 41751  //
 41752  func (self *Program) VCVTUQQ2PS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 41753      var p *Instruction
 41754      switch len(vv) {
 41755          case 0  : p = self.alloc("VCVTUQQ2PS", 2, Operands { v0, v1 })
 41756          case 1  : p = self.alloc("VCVTUQQ2PS", 3, Operands { v0, v1, vv[0] })
 41757          default : panic("instruction VCVTUQQ2PS takes 2 or 3 operands")
 41758      }
 41759      // VCVTUQQ2PS m512/m64bcst, ymm{k}{z}
 41760      if len(vv) == 0 && isM512M64bcst(v0) && isYMMkz(v1) {
 41761          self.require(ISA_AVX512DQ)
 41762          p.domain = DomainAVX
 41763          p.add(0, func(m *_Encoding, v []interface{}) {
 41764              m.evex(0b01, 0x87, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41765              m.emit(0x7a)
 41766              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 41767          })
 41768      }
 41769      // VCVTUQQ2PS {er}, zmm, ymm{k}{z}
 41770      if len(vv) == 1 && isER(v0) && isZMM(v1) && isYMMkz(vv[0]) {
 41771          self.require(ISA_AVX512DQ)
 41772          p.domain = DomainAVX
 41773          p.add(0, func(m *_Encoding, v []interface{}) {
 41774              m.emit(0x62)
 41775              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 41776              m.emit(0xff)
 41777              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 41778              m.emit(0x7a)
 41779              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 41780          })
 41781      }
 41782      // VCVTUQQ2PS zmm, ymm{k}{z}
 41783      if len(vv) == 0 && isZMM(v0) && isYMMkz(v1) {
 41784          self.require(ISA_AVX512DQ)
 41785          p.domain = DomainAVX
 41786          p.add(0, func(m *_Encoding, v []interface{}) {
 41787              m.emit(0x62)
 41788              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41789              m.emit(0xff)
 41790              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 41791              m.emit(0x7a)
 41792              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41793          })
 41794      }
 41795      // VCVTUQQ2PS m128/m64bcst, xmm{k}{z}
 41796      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 41797          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 41798          p.domain = DomainAVX
 41799          p.add(0, func(m *_Encoding, v []interface{}) {
 41800              m.evex(0b01, 0x87, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41801              m.emit(0x7a)
 41802              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 41803          })
 41804      }
 41805      // VCVTUQQ2PS m256/m64bcst, xmm{k}{z}
 41806      if len(vv) == 0 && isM256M64bcst(v0) && isXMMkz(v1) {
 41807          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 41808          p.domain = DomainAVX
 41809          p.add(0, func(m *_Encoding, v []interface{}) {
 41810              m.evex(0b01, 0x87, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41811              m.emit(0x7a)
 41812              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 41813          })
 41814      }
 41815      // VCVTUQQ2PS xmm, xmm{k}{z}
 41816      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 41817          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 41818          p.domain = DomainAVX
 41819          p.add(0, func(m *_Encoding, v []interface{}) {
 41820              m.emit(0x62)
 41821              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41822              m.emit(0xff)
 41823              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 41824              m.emit(0x7a)
 41825              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41826          })
 41827      }
 41828      // VCVTUQQ2PS ymm, xmm{k}{z}
 41829      if len(vv) == 0 && isEVEXYMM(v0) && isXMMkz(v1) {
 41830          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 41831          p.domain = DomainAVX
 41832          p.add(0, func(m *_Encoding, v []interface{}) {
 41833              m.emit(0x62)
 41834              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41835              m.emit(0xff)
 41836              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 41837              m.emit(0x7a)
 41838              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41839          })
 41840      }
 41841      if p.len == 0 {
 41842          panic("invalid operands for VCVTUQQ2PS")
 41843      }
 41844      return p
 41845  }
 41846  
 41847  // VCVTUSI2SD performs "Convert Unsigned Integer to Scalar Double-Precision Floating-Point Value".
 41848  //
 41849  // Mnemonic        : VCVTUSI2SD
 41850  // Supported forms : (5 forms)
 41851  //
 41852  //    * VCVTUSI2SD r32, xmm, xmm          [AVX512F]
 41853  //    * VCVTUSI2SD m32, xmm, xmm          [AVX512F]
 41854  //    * VCVTUSI2SD m64, xmm, xmm          [AVX512F]
 41855  //    * VCVTUSI2SD {er}, r64, xmm, xmm    [AVX512F]
 41856  //    * VCVTUSI2SD r64, xmm, xmm          [AVX512F]
 41857  //
 41858  func (self *Program) VCVTUSI2SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 41859      var p *Instruction
 41860      switch len(vv) {
 41861          case 0  : p = self.alloc("VCVTUSI2SD", 3, Operands { v0, v1, v2 })
 41862          case 1  : p = self.alloc("VCVTUSI2SD", 4, Operands { v0, v1, v2, vv[0] })
 41863          default : panic("instruction VCVTUSI2SD takes 3 or 4 operands")
 41864      }
 41865      // VCVTUSI2SD r32, xmm, xmm
 41866      if len(vv) == 0 && isReg32(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 41867          self.require(ISA_AVX512F)
 41868          p.domain = DomainAVX
 41869          p.add(0, func(m *_Encoding, v []interface{}) {
 41870              m.emit(0x62)
 41871              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 41872              m.emit(0x7f ^ (hlcode(v[1]) << 3))
 41873              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x00)
 41874              m.emit(0x7b)
 41875              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 41876          })
 41877      }
 41878      // VCVTUSI2SD m32, xmm, xmm
 41879      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 41880          self.require(ISA_AVX512F)
 41881          p.domain = DomainAVX
 41882          p.add(0, func(m *_Encoding, v []interface{}) {
 41883              m.evex(0b01, 0x07, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 41884              m.emit(0x7b)
 41885              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 41886          })
 41887      }
 41888      // VCVTUSI2SD m64, xmm, xmm
 41889      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 41890          self.require(ISA_AVX512F)
 41891          p.domain = DomainAVX
 41892          p.add(0, func(m *_Encoding, v []interface{}) {
 41893              m.evex(0b01, 0x87, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 41894              m.emit(0x7b)
 41895              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 41896          })
 41897      }
 41898      // VCVTUSI2SD {er}, r64, xmm, xmm
 41899      if len(vv) == 1 && isER(v0) && isReg64(v1) && isEVEXXMM(v2) && isEVEXXMM(vv[0]) {
 41900          self.require(ISA_AVX512F)
 41901          p.domain = DomainAVX
 41902          p.add(0, func(m *_Encoding, v []interface{}) {
 41903              m.emit(0x62)
 41904              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 41905              m.emit(0xff ^ (hlcode(v[2]) << 3))
 41906              m.emit((vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | 0x10)
 41907              m.emit(0x7b)
 41908              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 41909          })
 41910      }
 41911      // VCVTUSI2SD r64, xmm, xmm
 41912      if len(vv) == 0 && isReg64(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 41913          self.require(ISA_AVX512F)
 41914          p.domain = DomainAVX
 41915          p.add(0, func(m *_Encoding, v []interface{}) {
 41916              m.emit(0x62)
 41917              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 41918              m.emit(0xff ^ (hlcode(v[1]) << 3))
 41919              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x40)
 41920              m.emit(0x7b)
 41921              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 41922          })
 41923      }
 41924      if p.len == 0 {
 41925          panic("invalid operands for VCVTUSI2SD")
 41926      }
 41927      return p
 41928  }
 41929  
 41930  // VCVTUSI2SS performs "Convert Unsigned Integer to Scalar Single-Precision Floating-Point Value".
 41931  //
 41932  // Mnemonic        : VCVTUSI2SS
 41933  // Supported forms : (6 forms)
 41934  //
 41935  //    * VCVTUSI2SS m32, xmm, xmm          [AVX512F]
 41936  //    * VCVTUSI2SS m64, xmm, xmm          [AVX512F]
 41937  //    * VCVTUSI2SS {er}, r32, xmm, xmm    [AVX512F]
 41938  //    * VCVTUSI2SS {er}, r64, xmm, xmm    [AVX512F]
 41939  //    * VCVTUSI2SS r32, xmm, xmm          [AVX512F]
 41940  //    * VCVTUSI2SS r64, xmm, xmm          [AVX512F]
 41941  //
 41942  func (self *Program) VCVTUSI2SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 41943      var p *Instruction
 41944      switch len(vv) {
 41945          case 0  : p = self.alloc("VCVTUSI2SS", 3, Operands { v0, v1, v2 })
 41946          case 1  : p = self.alloc("VCVTUSI2SS", 4, Operands { v0, v1, v2, vv[0] })
 41947          default : panic("instruction VCVTUSI2SS takes 3 or 4 operands")
 41948      }
 41949      // VCVTUSI2SS m32, xmm, xmm
 41950      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 41951          self.require(ISA_AVX512F)
 41952          p.domain = DomainAVX
 41953          p.add(0, func(m *_Encoding, v []interface{}) {
 41954              m.evex(0b01, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 41955              m.emit(0x7b)
 41956              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 41957          })
 41958      }
 41959      // VCVTUSI2SS m64, xmm, xmm
 41960      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 41961          self.require(ISA_AVX512F)
 41962          p.domain = DomainAVX
 41963          p.add(0, func(m *_Encoding, v []interface{}) {
 41964              m.evex(0b01, 0x86, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 41965              m.emit(0x7b)
 41966              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 41967          })
 41968      }
 41969      // VCVTUSI2SS {er}, r32, xmm, xmm
 41970      if len(vv) == 1 && isER(v0) && isReg32(v1) && isEVEXXMM(v2) && isEVEXXMM(vv[0]) {
 41971          self.require(ISA_AVX512F)
 41972          p.domain = DomainAVX
 41973          p.add(0, func(m *_Encoding, v []interface{}) {
 41974              m.emit(0x62)
 41975              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 41976              m.emit(0x7e ^ (hlcode(v[2]) << 3))
 41977              m.emit((vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | 0x10)
 41978              m.emit(0x7b)
 41979              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 41980          })
 41981      }
 41982      // VCVTUSI2SS {er}, r64, xmm, xmm
 41983      if len(vv) == 1 && isER(v0) && isReg64(v1) && isEVEXXMM(v2) && isEVEXXMM(vv[0]) {
 41984          self.require(ISA_AVX512F)
 41985          p.domain = DomainAVX
 41986          p.add(0, func(m *_Encoding, v []interface{}) {
 41987              m.emit(0x62)
 41988              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 41989              m.emit(0xfe ^ (hlcode(v[2]) << 3))
 41990              m.emit((vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | 0x10)
 41991              m.emit(0x7b)
 41992              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 41993          })
 41994      }
 41995      // VCVTUSI2SS r32, xmm, xmm
 41996      if len(vv) == 0 && isReg32(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 41997          self.require(ISA_AVX512F)
 41998          p.domain = DomainAVX
 41999          p.add(0, func(m *_Encoding, v []interface{}) {
 42000              m.emit(0x62)
 42001              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 42002              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 42003              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x40)
 42004              m.emit(0x7b)
 42005              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42006          })
 42007      }
 42008      // VCVTUSI2SS r64, xmm, xmm
 42009      if len(vv) == 0 && isReg64(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 42010          self.require(ISA_AVX512F)
 42011          p.domain = DomainAVX
 42012          p.add(0, func(m *_Encoding, v []interface{}) {
 42013              m.emit(0x62)
 42014              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 42015              m.emit(0xfe ^ (hlcode(v[1]) << 3))
 42016              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x40)
 42017              m.emit(0x7b)
 42018              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42019          })
 42020      }
 42021      if p.len == 0 {
 42022          panic("invalid operands for VCVTUSI2SS")
 42023      }
 42024      return p
 42025  }
 42026  
 42027  // VDBPSADBW performs "Double Block Packed Sum-Absolute-Differences on Unsigned Bytes".
 42028  //
 42029  // Mnemonic        : VDBPSADBW
 42030  // Supported forms : (6 forms)
 42031  //
 42032  //    * VDBPSADBW imm8, zmm, zmm, zmm{k}{z}     [AVX512BW]
 42033  //    * VDBPSADBW imm8, m512, zmm, zmm{k}{z}    [AVX512BW]
 42034  //    * VDBPSADBW imm8, xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 42035  //    * VDBPSADBW imm8, m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 42036  //    * VDBPSADBW imm8, ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 42037  //    * VDBPSADBW imm8, m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 42038  //
 42039  func (self *Program) VDBPSADBW(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 42040      p := self.alloc("VDBPSADBW", 4, Operands { v0, v1, v2, v3 })
 42041      // VDBPSADBW imm8, zmm, zmm, zmm{k}{z}
 42042      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 42043          self.require(ISA_AVX512BW)
 42044          p.domain = DomainAVX
 42045          p.add(0, func(m *_Encoding, v []interface{}) {
 42046              m.emit(0x62)
 42047              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 42048              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 42049              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 42050              m.emit(0x42)
 42051              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 42052              m.imm1(toImmAny(v[0]))
 42053          })
 42054      }
 42055      // VDBPSADBW imm8, m512, zmm, zmm{k}{z}
 42056      if isImm8(v0) && isM512(v1) && isZMM(v2) && isZMMkz(v3) {
 42057          self.require(ISA_AVX512BW)
 42058          p.domain = DomainAVX
 42059          p.add(0, func(m *_Encoding, v []interface{}) {
 42060              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 42061              m.emit(0x42)
 42062              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 42063              m.imm1(toImmAny(v[0]))
 42064          })
 42065      }
 42066      // VDBPSADBW imm8, xmm, xmm, xmm{k}{z}
 42067      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 42068          self.require(ISA_AVX512VL | ISA_AVX512BW)
 42069          p.domain = DomainAVX
 42070          p.add(0, func(m *_Encoding, v []interface{}) {
 42071              m.emit(0x62)
 42072              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 42073              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 42074              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 42075              m.emit(0x42)
 42076              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 42077              m.imm1(toImmAny(v[0]))
 42078          })
 42079      }
 42080      // VDBPSADBW imm8, m128, xmm, xmm{k}{z}
 42081      if isImm8(v0) && isM128(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 42082          self.require(ISA_AVX512VL | ISA_AVX512BW)
 42083          p.domain = DomainAVX
 42084          p.add(0, func(m *_Encoding, v []interface{}) {
 42085              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 42086              m.emit(0x42)
 42087              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 42088              m.imm1(toImmAny(v[0]))
 42089          })
 42090      }
 42091      // VDBPSADBW imm8, ymm, ymm, ymm{k}{z}
 42092      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 42093          self.require(ISA_AVX512VL | ISA_AVX512BW)
 42094          p.domain = DomainAVX
 42095          p.add(0, func(m *_Encoding, v []interface{}) {
 42096              m.emit(0x62)
 42097              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 42098              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 42099              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 42100              m.emit(0x42)
 42101              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 42102              m.imm1(toImmAny(v[0]))
 42103          })
 42104      }
 42105      // VDBPSADBW imm8, m256, ymm, ymm{k}{z}
 42106      if isImm8(v0) && isM256(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 42107          self.require(ISA_AVX512VL | ISA_AVX512BW)
 42108          p.domain = DomainAVX
 42109          p.add(0, func(m *_Encoding, v []interface{}) {
 42110              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 42111              m.emit(0x42)
 42112              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 42113              m.imm1(toImmAny(v[0]))
 42114          })
 42115      }
 42116      if p.len == 0 {
 42117          panic("invalid operands for VDBPSADBW")
 42118      }
 42119      return p
 42120  }
 42121  
 42122  // VDIVPD performs "Divide Packed Double-Precision Floating-Point Values".
 42123  //
 42124  // Mnemonic        : VDIVPD
 42125  // Supported forms : (11 forms)
 42126  //
 42127  //    * VDIVPD xmm, xmm, xmm                   [AVX]
 42128  //    * VDIVPD m128, xmm, xmm                  [AVX]
 42129  //    * VDIVPD ymm, ymm, ymm                   [AVX]
 42130  //    * VDIVPD m256, ymm, ymm                  [AVX]
 42131  //    * VDIVPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 42132  //    * VDIVPD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 42133  //    * VDIVPD zmm, zmm, zmm{k}{z}             [AVX512F]
 42134  //    * VDIVPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 42135  //    * VDIVPD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 42136  //    * VDIVPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 42137  //    * VDIVPD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 42138  //
 42139  func (self *Program) VDIVPD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 42140      var p *Instruction
 42141      switch len(vv) {
 42142          case 0  : p = self.alloc("VDIVPD", 3, Operands { v0, v1, v2 })
 42143          case 1  : p = self.alloc("VDIVPD", 4, Operands { v0, v1, v2, vv[0] })
 42144          default : panic("instruction VDIVPD takes 3 or 4 operands")
 42145      }
 42146      // VDIVPD xmm, xmm, xmm
 42147      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 42148          self.require(ISA_AVX)
 42149          p.domain = DomainAVX
 42150          p.add(0, func(m *_Encoding, v []interface{}) {
 42151              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 42152              m.emit(0x5e)
 42153              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42154          })
 42155      }
 42156      // VDIVPD m128, xmm, xmm
 42157      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 42158          self.require(ISA_AVX)
 42159          p.domain = DomainAVX
 42160          p.add(0, func(m *_Encoding, v []interface{}) {
 42161              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 42162              m.emit(0x5e)
 42163              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 42164          })
 42165      }
 42166      // VDIVPD ymm, ymm, ymm
 42167      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 42168          self.require(ISA_AVX)
 42169          p.domain = DomainAVX
 42170          p.add(0, func(m *_Encoding, v []interface{}) {
 42171              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 42172              m.emit(0x5e)
 42173              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42174          })
 42175      }
 42176      // VDIVPD m256, ymm, ymm
 42177      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 42178          self.require(ISA_AVX)
 42179          p.domain = DomainAVX
 42180          p.add(0, func(m *_Encoding, v []interface{}) {
 42181              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 42182              m.emit(0x5e)
 42183              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 42184          })
 42185      }
 42186      // VDIVPD m512/m64bcst, zmm, zmm{k}{z}
 42187      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 42188          self.require(ISA_AVX512F)
 42189          p.domain = DomainAVX
 42190          p.add(0, func(m *_Encoding, v []interface{}) {
 42191              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 42192              m.emit(0x5e)
 42193              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 42194          })
 42195      }
 42196      // VDIVPD {er}, zmm, zmm, zmm{k}{z}
 42197      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 42198          self.require(ISA_AVX512F)
 42199          p.domain = DomainAVX
 42200          p.add(0, func(m *_Encoding, v []interface{}) {
 42201              m.emit(0x62)
 42202              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 42203              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 42204              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 42205              m.emit(0x5e)
 42206              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 42207          })
 42208      }
 42209      // VDIVPD zmm, zmm, zmm{k}{z}
 42210      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 42211          self.require(ISA_AVX512F)
 42212          p.domain = DomainAVX
 42213          p.add(0, func(m *_Encoding, v []interface{}) {
 42214              m.emit(0x62)
 42215              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 42216              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 42217              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 42218              m.emit(0x5e)
 42219              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42220          })
 42221      }
 42222      // VDIVPD m128/m64bcst, xmm, xmm{k}{z}
 42223      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 42224          self.require(ISA_AVX512VL | ISA_AVX512F)
 42225          p.domain = DomainAVX
 42226          p.add(0, func(m *_Encoding, v []interface{}) {
 42227              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 42228              m.emit(0x5e)
 42229              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 42230          })
 42231      }
 42232      // VDIVPD xmm, xmm, xmm{k}{z}
 42233      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 42234          self.require(ISA_AVX512VL | ISA_AVX512F)
 42235          p.domain = DomainAVX
 42236          p.add(0, func(m *_Encoding, v []interface{}) {
 42237              m.emit(0x62)
 42238              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 42239              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 42240              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 42241              m.emit(0x5e)
 42242              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42243          })
 42244      }
 42245      // VDIVPD m256/m64bcst, ymm, ymm{k}{z}
 42246      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 42247          self.require(ISA_AVX512VL | ISA_AVX512F)
 42248          p.domain = DomainAVX
 42249          p.add(0, func(m *_Encoding, v []interface{}) {
 42250              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 42251              m.emit(0x5e)
 42252              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 42253          })
 42254      }
 42255      // VDIVPD ymm, ymm, ymm{k}{z}
 42256      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 42257          self.require(ISA_AVX512VL | ISA_AVX512F)
 42258          p.domain = DomainAVX
 42259          p.add(0, func(m *_Encoding, v []interface{}) {
 42260              m.emit(0x62)
 42261              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 42262              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 42263              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 42264              m.emit(0x5e)
 42265              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42266          })
 42267      }
 42268      if p.len == 0 {
 42269          panic("invalid operands for VDIVPD")
 42270      }
 42271      return p
 42272  }
 42273  
 42274  // VDIVPS performs "Divide Packed Single-Precision Floating-Point Values".
 42275  //
 42276  // Mnemonic        : VDIVPS
 42277  // Supported forms : (11 forms)
 42278  //
 42279  //    * VDIVPS xmm, xmm, xmm                   [AVX]
 42280  //    * VDIVPS m128, xmm, xmm                  [AVX]
 42281  //    * VDIVPS ymm, ymm, ymm                   [AVX]
 42282  //    * VDIVPS m256, ymm, ymm                  [AVX]
 42283  //    * VDIVPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 42284  //    * VDIVPS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 42285  //    * VDIVPS zmm, zmm, zmm{k}{z}             [AVX512F]
 42286  //    * VDIVPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 42287  //    * VDIVPS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 42288  //    * VDIVPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 42289  //    * VDIVPS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 42290  //
 42291  func (self *Program) VDIVPS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 42292      var p *Instruction
 42293      switch len(vv) {
 42294          case 0  : p = self.alloc("VDIVPS", 3, Operands { v0, v1, v2 })
 42295          case 1  : p = self.alloc("VDIVPS", 4, Operands { v0, v1, v2, vv[0] })
 42296          default : panic("instruction VDIVPS takes 3 or 4 operands")
 42297      }
 42298      // VDIVPS xmm, xmm, xmm
 42299      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 42300          self.require(ISA_AVX)
 42301          p.domain = DomainAVX
 42302          p.add(0, func(m *_Encoding, v []interface{}) {
 42303              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 42304              m.emit(0x5e)
 42305              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42306          })
 42307      }
 42308      // VDIVPS m128, xmm, xmm
 42309      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 42310          self.require(ISA_AVX)
 42311          p.domain = DomainAVX
 42312          p.add(0, func(m *_Encoding, v []interface{}) {
 42313              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 42314              m.emit(0x5e)
 42315              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 42316          })
 42317      }
 42318      // VDIVPS ymm, ymm, ymm
 42319      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 42320          self.require(ISA_AVX)
 42321          p.domain = DomainAVX
 42322          p.add(0, func(m *_Encoding, v []interface{}) {
 42323              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 42324              m.emit(0x5e)
 42325              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42326          })
 42327      }
 42328      // VDIVPS m256, ymm, ymm
 42329      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 42330          self.require(ISA_AVX)
 42331          p.domain = DomainAVX
 42332          p.add(0, func(m *_Encoding, v []interface{}) {
 42333              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 42334              m.emit(0x5e)
 42335              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 42336          })
 42337      }
 42338      // VDIVPS m512/m32bcst, zmm, zmm{k}{z}
 42339      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 42340          self.require(ISA_AVX512F)
 42341          p.domain = DomainAVX
 42342          p.add(0, func(m *_Encoding, v []interface{}) {
 42343              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 42344              m.emit(0x5e)
 42345              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 42346          })
 42347      }
 42348      // VDIVPS {er}, zmm, zmm, zmm{k}{z}
 42349      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 42350          self.require(ISA_AVX512F)
 42351          p.domain = DomainAVX
 42352          p.add(0, func(m *_Encoding, v []interface{}) {
 42353              m.emit(0x62)
 42354              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 42355              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 42356              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 42357              m.emit(0x5e)
 42358              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 42359          })
 42360      }
 42361      // VDIVPS zmm, zmm, zmm{k}{z}
 42362      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 42363          self.require(ISA_AVX512F)
 42364          p.domain = DomainAVX
 42365          p.add(0, func(m *_Encoding, v []interface{}) {
 42366              m.emit(0x62)
 42367              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 42368              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 42369              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 42370              m.emit(0x5e)
 42371              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42372          })
 42373      }
 42374      // VDIVPS m128/m32bcst, xmm, xmm{k}{z}
 42375      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 42376          self.require(ISA_AVX512VL | ISA_AVX512F)
 42377          p.domain = DomainAVX
 42378          p.add(0, func(m *_Encoding, v []interface{}) {
 42379              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 42380              m.emit(0x5e)
 42381              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 42382          })
 42383      }
 42384      // VDIVPS xmm, xmm, xmm{k}{z}
 42385      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 42386          self.require(ISA_AVX512VL | ISA_AVX512F)
 42387          p.domain = DomainAVX
 42388          p.add(0, func(m *_Encoding, v []interface{}) {
 42389              m.emit(0x62)
 42390              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 42391              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 42392              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 42393              m.emit(0x5e)
 42394              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42395          })
 42396      }
 42397      // VDIVPS m256/m32bcst, ymm, ymm{k}{z}
 42398      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 42399          self.require(ISA_AVX512VL | ISA_AVX512F)
 42400          p.domain = DomainAVX
 42401          p.add(0, func(m *_Encoding, v []interface{}) {
 42402              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 42403              m.emit(0x5e)
 42404              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 42405          })
 42406      }
 42407      // VDIVPS ymm, ymm, ymm{k}{z}
 42408      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 42409          self.require(ISA_AVX512VL | ISA_AVX512F)
 42410          p.domain = DomainAVX
 42411          p.add(0, func(m *_Encoding, v []interface{}) {
 42412              m.emit(0x62)
 42413              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 42414              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 42415              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 42416              m.emit(0x5e)
 42417              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42418          })
 42419      }
 42420      if p.len == 0 {
 42421          panic("invalid operands for VDIVPS")
 42422      }
 42423      return p
 42424  }
 42425  
 42426  // VDIVSD performs "Divide Scalar Double-Precision Floating-Point Values".
 42427  //
 42428  // Mnemonic        : VDIVSD
 42429  // Supported forms : (5 forms)
 42430  //
 42431  //    * VDIVSD xmm, xmm, xmm                [AVX]
 42432  //    * VDIVSD m64, xmm, xmm                [AVX]
 42433  //    * VDIVSD m64, xmm, xmm{k}{z}          [AVX512F]
 42434  //    * VDIVSD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 42435  //    * VDIVSD xmm, xmm, xmm{k}{z}          [AVX512F]
 42436  //
 42437  func (self *Program) VDIVSD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 42438      var p *Instruction
 42439      switch len(vv) {
 42440          case 0  : p = self.alloc("VDIVSD", 3, Operands { v0, v1, v2 })
 42441          case 1  : p = self.alloc("VDIVSD", 4, Operands { v0, v1, v2, vv[0] })
 42442          default : panic("instruction VDIVSD takes 3 or 4 operands")
 42443      }
 42444      // VDIVSD xmm, xmm, xmm
 42445      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 42446          self.require(ISA_AVX)
 42447          p.domain = DomainAVX
 42448          p.add(0, func(m *_Encoding, v []interface{}) {
 42449              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 42450              m.emit(0x5e)
 42451              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42452          })
 42453      }
 42454      // VDIVSD m64, xmm, xmm
 42455      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 42456          self.require(ISA_AVX)
 42457          p.domain = DomainAVX
 42458          p.add(0, func(m *_Encoding, v []interface{}) {
 42459              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 42460              m.emit(0x5e)
 42461              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 42462          })
 42463      }
 42464      // VDIVSD m64, xmm, xmm{k}{z}
 42465      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 42466          self.require(ISA_AVX512F)
 42467          p.domain = DomainAVX
 42468          p.add(0, func(m *_Encoding, v []interface{}) {
 42469              m.evex(0b01, 0x87, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 42470              m.emit(0x5e)
 42471              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 42472          })
 42473      }
 42474      // VDIVSD {er}, xmm, xmm, xmm{k}{z}
 42475      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 42476          self.require(ISA_AVX512F)
 42477          p.domain = DomainAVX
 42478          p.add(0, func(m *_Encoding, v []interface{}) {
 42479              m.emit(0x62)
 42480              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 42481              m.emit(0xff ^ (hlcode(v[2]) << 3))
 42482              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 42483              m.emit(0x5e)
 42484              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 42485          })
 42486      }
 42487      // VDIVSD xmm, xmm, xmm{k}{z}
 42488      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 42489          self.require(ISA_AVX512F)
 42490          p.domain = DomainAVX
 42491          p.add(0, func(m *_Encoding, v []interface{}) {
 42492              m.emit(0x62)
 42493              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 42494              m.emit(0xff ^ (hlcode(v[1]) << 3))
 42495              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 42496              m.emit(0x5e)
 42497              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42498          })
 42499      }
 42500      if p.len == 0 {
 42501          panic("invalid operands for VDIVSD")
 42502      }
 42503      return p
 42504  }
 42505  
 42506  // VDIVSS performs "Divide Scalar Single-Precision Floating-Point Values".
 42507  //
 42508  // Mnemonic        : VDIVSS
 42509  // Supported forms : (5 forms)
 42510  //
 42511  //    * VDIVSS xmm, xmm, xmm                [AVX]
 42512  //    * VDIVSS m32, xmm, xmm                [AVX]
 42513  //    * VDIVSS m32, xmm, xmm{k}{z}          [AVX512F]
 42514  //    * VDIVSS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 42515  //    * VDIVSS xmm, xmm, xmm{k}{z}          [AVX512F]
 42516  //
 42517  func (self *Program) VDIVSS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 42518      var p *Instruction
 42519      switch len(vv) {
 42520          case 0  : p = self.alloc("VDIVSS", 3, Operands { v0, v1, v2 })
 42521          case 1  : p = self.alloc("VDIVSS", 4, Operands { v0, v1, v2, vv[0] })
 42522          default : panic("instruction VDIVSS takes 3 or 4 operands")
 42523      }
 42524      // VDIVSS xmm, xmm, xmm
 42525      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 42526          self.require(ISA_AVX)
 42527          p.domain = DomainAVX
 42528          p.add(0, func(m *_Encoding, v []interface{}) {
 42529              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 42530              m.emit(0x5e)
 42531              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42532          })
 42533      }
 42534      // VDIVSS m32, xmm, xmm
 42535      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 42536          self.require(ISA_AVX)
 42537          p.domain = DomainAVX
 42538          p.add(0, func(m *_Encoding, v []interface{}) {
 42539              m.vex2(2, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 42540              m.emit(0x5e)
 42541              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 42542          })
 42543      }
 42544      // VDIVSS m32, xmm, xmm{k}{z}
 42545      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 42546          self.require(ISA_AVX512F)
 42547          p.domain = DomainAVX
 42548          p.add(0, func(m *_Encoding, v []interface{}) {
 42549              m.evex(0b01, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 42550              m.emit(0x5e)
 42551              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 42552          })
 42553      }
 42554      // VDIVSS {er}, xmm, xmm, xmm{k}{z}
 42555      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 42556          self.require(ISA_AVX512F)
 42557          p.domain = DomainAVX
 42558          p.add(0, func(m *_Encoding, v []interface{}) {
 42559              m.emit(0x62)
 42560              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 42561              m.emit(0x7e ^ (hlcode(v[2]) << 3))
 42562              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 42563              m.emit(0x5e)
 42564              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 42565          })
 42566      }
 42567      // VDIVSS xmm, xmm, xmm{k}{z}
 42568      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 42569          self.require(ISA_AVX512F)
 42570          p.domain = DomainAVX
 42571          p.add(0, func(m *_Encoding, v []interface{}) {
 42572              m.emit(0x62)
 42573              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 42574              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 42575              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 42576              m.emit(0x5e)
 42577              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42578          })
 42579      }
 42580      if p.len == 0 {
 42581          panic("invalid operands for VDIVSS")
 42582      }
 42583      return p
 42584  }
 42585  
 42586  // VDPPD performs "Dot Product of Packed Double Precision Floating-Point Values".
 42587  //
 42588  // Mnemonic        : VDPPD
 42589  // Supported forms : (2 forms)
 42590  //
 42591  //    * VDPPD imm8, xmm, xmm, xmm     [AVX]
 42592  //    * VDPPD imm8, m128, xmm, xmm    [AVX]
 42593  //
 42594  func (self *Program) VDPPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 42595      p := self.alloc("VDPPD", 4, Operands { v0, v1, v2, v3 })
 42596      // VDPPD imm8, xmm, xmm, xmm
 42597      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 42598          self.require(ISA_AVX)
 42599          p.domain = DomainAVX
 42600          p.add(0, func(m *_Encoding, v []interface{}) {
 42601              m.emit(0xc4)
 42602              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 42603              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 42604              m.emit(0x41)
 42605              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 42606              m.imm1(toImmAny(v[0]))
 42607          })
 42608      }
 42609      // VDPPD imm8, m128, xmm, xmm
 42610      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 42611          self.require(ISA_AVX)
 42612          p.domain = DomainAVX
 42613          p.add(0, func(m *_Encoding, v []interface{}) {
 42614              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 42615              m.emit(0x41)
 42616              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 42617              m.imm1(toImmAny(v[0]))
 42618          })
 42619      }
 42620      if p.len == 0 {
 42621          panic("invalid operands for VDPPD")
 42622      }
 42623      return p
 42624  }
 42625  
 42626  // VDPPS performs "Dot Product of Packed Single Precision Floating-Point Values".
 42627  //
 42628  // Mnemonic        : VDPPS
 42629  // Supported forms : (4 forms)
 42630  //
 42631  //    * VDPPS imm8, xmm, xmm, xmm     [AVX]
 42632  //    * VDPPS imm8, m128, xmm, xmm    [AVX]
 42633  //    * VDPPS imm8, ymm, ymm, ymm     [AVX]
 42634  //    * VDPPS imm8, m256, ymm, ymm    [AVX]
 42635  //
 42636  func (self *Program) VDPPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 42637      p := self.alloc("VDPPS", 4, Operands { v0, v1, v2, v3 })
 42638      // VDPPS imm8, xmm, xmm, xmm
 42639      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 42640          self.require(ISA_AVX)
 42641          p.domain = DomainAVX
 42642          p.add(0, func(m *_Encoding, v []interface{}) {
 42643              m.emit(0xc4)
 42644              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 42645              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 42646              m.emit(0x40)
 42647              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 42648              m.imm1(toImmAny(v[0]))
 42649          })
 42650      }
 42651      // VDPPS imm8, m128, xmm, xmm
 42652      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 42653          self.require(ISA_AVX)
 42654          p.domain = DomainAVX
 42655          p.add(0, func(m *_Encoding, v []interface{}) {
 42656              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 42657              m.emit(0x40)
 42658              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 42659              m.imm1(toImmAny(v[0]))
 42660          })
 42661      }
 42662      // VDPPS imm8, ymm, ymm, ymm
 42663      if isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 42664          self.require(ISA_AVX)
 42665          p.domain = DomainAVX
 42666          p.add(0, func(m *_Encoding, v []interface{}) {
 42667              m.emit(0xc4)
 42668              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 42669              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 42670              m.emit(0x40)
 42671              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 42672              m.imm1(toImmAny(v[0]))
 42673          })
 42674      }
 42675      // VDPPS imm8, m256, ymm, ymm
 42676      if isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 42677          self.require(ISA_AVX)
 42678          p.domain = DomainAVX
 42679          p.add(0, func(m *_Encoding, v []interface{}) {
 42680              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 42681              m.emit(0x40)
 42682              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 42683              m.imm1(toImmAny(v[0]))
 42684          })
 42685      }
 42686      if p.len == 0 {
 42687          panic("invalid operands for VDPPS")
 42688      }
 42689      return p
 42690  }
 42691  
 42692  // VEXP2PD performs "Approximation to the Exponential 2^x of Packed Double-Precision Floating-Point Values with Less Than 2^-23 Relative Error".
 42693  //
 42694  // Mnemonic        : VEXP2PD
 42695  // Supported forms : (3 forms)
 42696  //
 42697  //    * VEXP2PD m512/m64bcst, zmm{k}{z}    [AVX512ER]
 42698  //    * VEXP2PD {sae}, zmm, zmm{k}{z}      [AVX512ER]
 42699  //    * VEXP2PD zmm, zmm{k}{z}             [AVX512ER]
 42700  //
 42701  func (self *Program) VEXP2PD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 42702      var p *Instruction
 42703      switch len(vv) {
 42704          case 0  : p = self.alloc("VEXP2PD", 2, Operands { v0, v1 })
 42705          case 1  : p = self.alloc("VEXP2PD", 3, Operands { v0, v1, vv[0] })
 42706          default : panic("instruction VEXP2PD takes 2 or 3 operands")
 42707      }
 42708      // VEXP2PD m512/m64bcst, zmm{k}{z}
 42709      if len(vv) == 0 && isM512M64bcst(v0) && isZMMkz(v1) {
 42710          self.require(ISA_AVX512ER)
 42711          p.domain = DomainAVX
 42712          p.add(0, func(m *_Encoding, v []interface{}) {
 42713              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 42714              m.emit(0xc8)
 42715              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 42716          })
 42717      }
 42718      // VEXP2PD {sae}, zmm, zmm{k}{z}
 42719      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 42720          self.require(ISA_AVX512ER)
 42721          p.domain = DomainAVX
 42722          p.add(0, func(m *_Encoding, v []interface{}) {
 42723              m.emit(0x62)
 42724              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 42725              m.emit(0xfd)
 42726              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 42727              m.emit(0xc8)
 42728              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 42729          })
 42730      }
 42731      // VEXP2PD zmm, zmm{k}{z}
 42732      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 42733          self.require(ISA_AVX512ER)
 42734          p.domain = DomainAVX
 42735          p.add(0, func(m *_Encoding, v []interface{}) {
 42736              m.emit(0x62)
 42737              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 42738              m.emit(0xfd)
 42739              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 42740              m.emit(0xc8)
 42741              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 42742          })
 42743      }
 42744      if p.len == 0 {
 42745          panic("invalid operands for VEXP2PD")
 42746      }
 42747      return p
 42748  }
 42749  
 42750  // VEXP2PS performs "Approximation to the Exponential 2^x of Packed Single-Precision Floating-Point Values with Less Than 2^-23 Relative Error".
 42751  //
 42752  // Mnemonic        : VEXP2PS
 42753  // Supported forms : (3 forms)
 42754  //
 42755  //    * VEXP2PS m512/m32bcst, zmm{k}{z}    [AVX512ER]
 42756  //    * VEXP2PS {sae}, zmm, zmm{k}{z}      [AVX512ER]
 42757  //    * VEXP2PS zmm, zmm{k}{z}             [AVX512ER]
 42758  //
 42759  func (self *Program) VEXP2PS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 42760      var p *Instruction
 42761      switch len(vv) {
 42762          case 0  : p = self.alloc("VEXP2PS", 2, Operands { v0, v1 })
 42763          case 1  : p = self.alloc("VEXP2PS", 3, Operands { v0, v1, vv[0] })
 42764          default : panic("instruction VEXP2PS takes 2 or 3 operands")
 42765      }
 42766      // VEXP2PS m512/m32bcst, zmm{k}{z}
 42767      if len(vv) == 0 && isM512M32bcst(v0) && isZMMkz(v1) {
 42768          self.require(ISA_AVX512ER)
 42769          p.domain = DomainAVX
 42770          p.add(0, func(m *_Encoding, v []interface{}) {
 42771              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 42772              m.emit(0xc8)
 42773              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 42774          })
 42775      }
 42776      // VEXP2PS {sae}, zmm, zmm{k}{z}
 42777      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 42778          self.require(ISA_AVX512ER)
 42779          p.domain = DomainAVX
 42780          p.add(0, func(m *_Encoding, v []interface{}) {
 42781              m.emit(0x62)
 42782              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 42783              m.emit(0x7d)
 42784              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 42785              m.emit(0xc8)
 42786              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 42787          })
 42788      }
 42789      // VEXP2PS zmm, zmm{k}{z}
 42790      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 42791          self.require(ISA_AVX512ER)
 42792          p.domain = DomainAVX
 42793          p.add(0, func(m *_Encoding, v []interface{}) {
 42794              m.emit(0x62)
 42795              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 42796              m.emit(0x7d)
 42797              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 42798              m.emit(0xc8)
 42799              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 42800          })
 42801      }
 42802      if p.len == 0 {
 42803          panic("invalid operands for VEXP2PS")
 42804      }
 42805      return p
 42806  }
 42807  
 42808  // VEXPANDPD performs "Load Sparse Packed Double-Precision Floating-Point Values from Dense Memory".
 42809  //
 42810  // Mnemonic        : VEXPANDPD
 42811  // Supported forms : (6 forms)
 42812  //
 42813  //    * VEXPANDPD zmm, zmm{k}{z}     [AVX512F]
 42814  //    * VEXPANDPD m512, zmm{k}{z}    [AVX512F]
 42815  //    * VEXPANDPD xmm, xmm{k}{z}     [AVX512VL]
 42816  //    * VEXPANDPD ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 42817  //    * VEXPANDPD m128, xmm{k}{z}    [AVX512VL]
 42818  //    * VEXPANDPD m256, ymm{k}{z}    [AVX512F,AVX512VL]
 42819  //
 42820  func (self *Program) VEXPANDPD(v0 interface{}, v1 interface{}) *Instruction {
 42821      p := self.alloc("VEXPANDPD", 2, Operands { v0, v1 })
 42822      // VEXPANDPD zmm, zmm{k}{z}
 42823      if isZMM(v0) && isZMMkz(v1) {
 42824          self.require(ISA_AVX512F)
 42825          p.domain = DomainAVX
 42826          p.add(0, func(m *_Encoding, v []interface{}) {
 42827              m.emit(0x62)
 42828              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 42829              m.emit(0xfd)
 42830              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 42831              m.emit(0x88)
 42832              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 42833          })
 42834      }
 42835      // VEXPANDPD m512, zmm{k}{z}
 42836      if isM512(v0) && isZMMkz(v1) {
 42837          self.require(ISA_AVX512F)
 42838          p.domain = DomainAVX
 42839          p.add(0, func(m *_Encoding, v []interface{}) {
 42840              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 42841              m.emit(0x88)
 42842              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 42843          })
 42844      }
 42845      // VEXPANDPD xmm, xmm{k}{z}
 42846      if isEVEXXMM(v0) && isXMMkz(v1) {
 42847          self.require(ISA_AVX512VL)
 42848          p.domain = DomainAVX
 42849          p.add(0, func(m *_Encoding, v []interface{}) {
 42850              m.emit(0x62)
 42851              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 42852              m.emit(0xfd)
 42853              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 42854              m.emit(0x88)
 42855              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 42856          })
 42857      }
 42858      // VEXPANDPD ymm, ymm{k}{z}
 42859      if isEVEXYMM(v0) && isYMMkz(v1) {
 42860          self.require(ISA_AVX512VL | ISA_AVX512F)
 42861          p.domain = DomainAVX
 42862          p.add(0, func(m *_Encoding, v []interface{}) {
 42863              m.emit(0x62)
 42864              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 42865              m.emit(0xfd)
 42866              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 42867              m.emit(0x88)
 42868              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 42869          })
 42870      }
 42871      // VEXPANDPD m128, xmm{k}{z}
 42872      if isM128(v0) && isXMMkz(v1) {
 42873          self.require(ISA_AVX512VL)
 42874          p.domain = DomainAVX
 42875          p.add(0, func(m *_Encoding, v []interface{}) {
 42876              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 42877              m.emit(0x88)
 42878              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 42879          })
 42880      }
 42881      // VEXPANDPD m256, ymm{k}{z}
 42882      if isM256(v0) && isYMMkz(v1) {
 42883          self.require(ISA_AVX512VL | ISA_AVX512F)
 42884          p.domain = DomainAVX
 42885          p.add(0, func(m *_Encoding, v []interface{}) {
 42886              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 42887              m.emit(0x88)
 42888              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 42889          })
 42890      }
 42891      if p.len == 0 {
 42892          panic("invalid operands for VEXPANDPD")
 42893      }
 42894      return p
 42895  }
 42896  
 42897  // VEXPANDPS performs "Load Sparse Packed Single-Precision Floating-Point Values from Dense Memory".
 42898  //
 42899  // Mnemonic        : VEXPANDPS
 42900  // Supported forms : (6 forms)
 42901  //
 42902  //    * VEXPANDPS zmm, zmm{k}{z}     [AVX512F]
 42903  //    * VEXPANDPS m512, zmm{k}{z}    [AVX512F]
 42904  //    * VEXPANDPS xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 42905  //    * VEXPANDPS ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 42906  //    * VEXPANDPS m128, xmm{k}{z}    [AVX512F,AVX512VL]
 42907  //    * VEXPANDPS m256, ymm{k}{z}    [AVX512F,AVX512VL]
 42908  //
 42909  func (self *Program) VEXPANDPS(v0 interface{}, v1 interface{}) *Instruction {
 42910      p := self.alloc("VEXPANDPS", 2, Operands { v0, v1 })
 42911      // VEXPANDPS zmm, zmm{k}{z}
 42912      if isZMM(v0) && isZMMkz(v1) {
 42913          self.require(ISA_AVX512F)
 42914          p.domain = DomainAVX
 42915          p.add(0, func(m *_Encoding, v []interface{}) {
 42916              m.emit(0x62)
 42917              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 42918              m.emit(0x7d)
 42919              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 42920              m.emit(0x88)
 42921              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 42922          })
 42923      }
 42924      // VEXPANDPS m512, zmm{k}{z}
 42925      if isM512(v0) && isZMMkz(v1) {
 42926          self.require(ISA_AVX512F)
 42927          p.domain = DomainAVX
 42928          p.add(0, func(m *_Encoding, v []interface{}) {
 42929              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 42930              m.emit(0x88)
 42931              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 42932          })
 42933      }
 42934      // VEXPANDPS xmm, xmm{k}{z}
 42935      if isEVEXXMM(v0) && isXMMkz(v1) {
 42936          self.require(ISA_AVX512VL | ISA_AVX512F)
 42937          p.domain = DomainAVX
 42938          p.add(0, func(m *_Encoding, v []interface{}) {
 42939              m.emit(0x62)
 42940              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 42941              m.emit(0x7d)
 42942              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 42943              m.emit(0x88)
 42944              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 42945          })
 42946      }
 42947      // VEXPANDPS ymm, ymm{k}{z}
 42948      if isEVEXYMM(v0) && isYMMkz(v1) {
 42949          self.require(ISA_AVX512VL | ISA_AVX512F)
 42950          p.domain = DomainAVX
 42951          p.add(0, func(m *_Encoding, v []interface{}) {
 42952              m.emit(0x62)
 42953              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 42954              m.emit(0x7d)
 42955              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 42956              m.emit(0x88)
 42957              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 42958          })
 42959      }
 42960      // VEXPANDPS m128, xmm{k}{z}
 42961      if isM128(v0) && isXMMkz(v1) {
 42962          self.require(ISA_AVX512VL | ISA_AVX512F)
 42963          p.domain = DomainAVX
 42964          p.add(0, func(m *_Encoding, v []interface{}) {
 42965              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 42966              m.emit(0x88)
 42967              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 42968          })
 42969      }
 42970      // VEXPANDPS m256, ymm{k}{z}
 42971      if isM256(v0) && isYMMkz(v1) {
 42972          self.require(ISA_AVX512VL | ISA_AVX512F)
 42973          p.domain = DomainAVX
 42974          p.add(0, func(m *_Encoding, v []interface{}) {
 42975              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 42976              m.emit(0x88)
 42977              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 42978          })
 42979      }
 42980      if p.len == 0 {
 42981          panic("invalid operands for VEXPANDPS")
 42982      }
 42983      return p
 42984  }
 42985  
 42986  // VEXTRACTF128 performs "Extract Packed Floating-Point Values".
 42987  //
 42988  // Mnemonic        : VEXTRACTF128
 42989  // Supported forms : (2 forms)
 42990  //
 42991  //    * VEXTRACTF128 imm8, ymm, xmm     [AVX]
 42992  //    * VEXTRACTF128 imm8, ymm, m128    [AVX]
 42993  //
 42994  func (self *Program) VEXTRACTF128(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 42995      p := self.alloc("VEXTRACTF128", 3, Operands { v0, v1, v2 })
 42996      // VEXTRACTF128 imm8, ymm, xmm
 42997      if isImm8(v0) && isYMM(v1) && isXMM(v2) {
 42998          self.require(ISA_AVX)
 42999          p.domain = DomainAVX
 43000          p.add(0, func(m *_Encoding, v []interface{}) {
 43001              m.emit(0xc4)
 43002              m.emit(0xe3 ^ (hcode(v[1]) << 7) ^ (hcode(v[2]) << 5))
 43003              m.emit(0x7d)
 43004              m.emit(0x19)
 43005              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43006              m.imm1(toImmAny(v[0]))
 43007          })
 43008      }
 43009      // VEXTRACTF128 imm8, ymm, m128
 43010      if isImm8(v0) && isYMM(v1) && isM128(v2) {
 43011          self.require(ISA_AVX)
 43012          p.domain = DomainAVX
 43013          p.add(0, func(m *_Encoding, v []interface{}) {
 43014              m.vex3(0xc4, 0b11, 0x05, hcode(v[1]), addr(v[2]), 0)
 43015              m.emit(0x19)
 43016              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 43017              m.imm1(toImmAny(v[0]))
 43018          })
 43019      }
 43020      if p.len == 0 {
 43021          panic("invalid operands for VEXTRACTF128")
 43022      }
 43023      return p
 43024  }
 43025  
 43026  // VEXTRACTF32X4 performs "Extract 128 Bits of Packed Single-Precision Floating-Point Values".
 43027  //
 43028  // Mnemonic        : VEXTRACTF32X4
 43029  // Supported forms : (4 forms)
 43030  //
 43031  //    * VEXTRACTF32X4 imm8, zmm, xmm{k}{z}     [AVX512F]
 43032  //    * VEXTRACTF32X4 imm8, zmm, m128{k}{z}    [AVX512F]
 43033  //    * VEXTRACTF32X4 imm8, ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 43034  //    * VEXTRACTF32X4 imm8, ymm, m128{k}{z}    [AVX512F,AVX512VL]
 43035  //
 43036  func (self *Program) VEXTRACTF32X4(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 43037      p := self.alloc("VEXTRACTF32X4", 3, Operands { v0, v1, v2 })
 43038      // VEXTRACTF32X4 imm8, zmm, xmm{k}{z}
 43039      if isImm8(v0) && isZMM(v1) && isXMMkz(v2) {
 43040          self.require(ISA_AVX512F)
 43041          p.domain = DomainAVX
 43042          p.add(0, func(m *_Encoding, v []interface{}) {
 43043              m.emit(0x62)
 43044              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43045              m.emit(0x7d)
 43046              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 43047              m.emit(0x19)
 43048              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43049              m.imm1(toImmAny(v[0]))
 43050          })
 43051      }
 43052      // VEXTRACTF32X4 imm8, zmm, m128{k}{z}
 43053      if isImm8(v0) && isZMM(v1) && isM128kz(v2) {
 43054          self.require(ISA_AVX512F)
 43055          p.domain = DomainAVX
 43056          p.add(0, func(m *_Encoding, v []interface{}) {
 43057              m.evex(0b11, 0x05, 0b10, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43058              m.emit(0x19)
 43059              m.mrsd(lcode(v[1]), addr(v[2]), 16)
 43060              m.imm1(toImmAny(v[0]))
 43061          })
 43062      }
 43063      // VEXTRACTF32X4 imm8, ymm, xmm{k}{z}
 43064      if isImm8(v0) && isEVEXYMM(v1) && isXMMkz(v2) {
 43065          self.require(ISA_AVX512VL | ISA_AVX512F)
 43066          p.domain = DomainAVX
 43067          p.add(0, func(m *_Encoding, v []interface{}) {
 43068              m.emit(0x62)
 43069              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43070              m.emit(0x7d)
 43071              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 43072              m.emit(0x19)
 43073              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43074              m.imm1(toImmAny(v[0]))
 43075          })
 43076      }
 43077      // VEXTRACTF32X4 imm8, ymm, m128{k}{z}
 43078      if isImm8(v0) && isEVEXYMM(v1) && isM128kz(v2) {
 43079          self.require(ISA_AVX512VL | ISA_AVX512F)
 43080          p.domain = DomainAVX
 43081          p.add(0, func(m *_Encoding, v []interface{}) {
 43082              m.evex(0b11, 0x05, 0b01, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43083              m.emit(0x19)
 43084              m.mrsd(lcode(v[1]), addr(v[2]), 16)
 43085              m.imm1(toImmAny(v[0]))
 43086          })
 43087      }
 43088      if p.len == 0 {
 43089          panic("invalid operands for VEXTRACTF32X4")
 43090      }
 43091      return p
 43092  }
 43093  
 43094  // VEXTRACTF32X8 performs "Extract 256 Bits of Packed Single-Precision Floating-Point Values".
 43095  //
 43096  // Mnemonic        : VEXTRACTF32X8
 43097  // Supported forms : (2 forms)
 43098  //
 43099  //    * VEXTRACTF32X8 imm8, zmm, ymm{k}{z}     [AVX512DQ]
 43100  //    * VEXTRACTF32X8 imm8, zmm, m256{k}{z}    [AVX512DQ]
 43101  //
 43102  func (self *Program) VEXTRACTF32X8(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 43103      p := self.alloc("VEXTRACTF32X8", 3, Operands { v0, v1, v2 })
 43104      // VEXTRACTF32X8 imm8, zmm, ymm{k}{z}
 43105      if isImm8(v0) && isZMM(v1) && isYMMkz(v2) {
 43106          self.require(ISA_AVX512DQ)
 43107          p.domain = DomainAVX
 43108          p.add(0, func(m *_Encoding, v []interface{}) {
 43109              m.emit(0x62)
 43110              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43111              m.emit(0x7d)
 43112              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 43113              m.emit(0x1b)
 43114              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43115              m.imm1(toImmAny(v[0]))
 43116          })
 43117      }
 43118      // VEXTRACTF32X8 imm8, zmm, m256{k}{z}
 43119      if isImm8(v0) && isZMM(v1) && isM256kz(v2) {
 43120          self.require(ISA_AVX512DQ)
 43121          p.domain = DomainAVX
 43122          p.add(0, func(m *_Encoding, v []interface{}) {
 43123              m.evex(0b11, 0x05, 0b10, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43124              m.emit(0x1b)
 43125              m.mrsd(lcode(v[1]), addr(v[2]), 32)
 43126              m.imm1(toImmAny(v[0]))
 43127          })
 43128      }
 43129      if p.len == 0 {
 43130          panic("invalid operands for VEXTRACTF32X8")
 43131      }
 43132      return p
 43133  }
 43134  
 43135  // VEXTRACTF64X2 performs "Extract 128 Bits of Packed Double-Precision Floating-Point Values".
 43136  //
 43137  // Mnemonic        : VEXTRACTF64X2
 43138  // Supported forms : (4 forms)
 43139  //
 43140  //    * VEXTRACTF64X2 imm8, zmm, xmm{k}{z}     [AVX512DQ]
 43141  //    * VEXTRACTF64X2 imm8, zmm, m128{k}{z}    [AVX512DQ]
 43142  //    * VEXTRACTF64X2 imm8, ymm, xmm{k}{z}     [AVX512DQ,AVX512VL]
 43143  //    * VEXTRACTF64X2 imm8, ymm, m128{k}{z}    [AVX512DQ,AVX512VL]
 43144  //
 43145  func (self *Program) VEXTRACTF64X2(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 43146      p := self.alloc("VEXTRACTF64X2", 3, Operands { v0, v1, v2 })
 43147      // VEXTRACTF64X2 imm8, zmm, xmm{k}{z}
 43148      if isImm8(v0) && isZMM(v1) && isXMMkz(v2) {
 43149          self.require(ISA_AVX512DQ)
 43150          p.domain = DomainAVX
 43151          p.add(0, func(m *_Encoding, v []interface{}) {
 43152              m.emit(0x62)
 43153              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43154              m.emit(0xfd)
 43155              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 43156              m.emit(0x19)
 43157              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43158              m.imm1(toImmAny(v[0]))
 43159          })
 43160      }
 43161      // VEXTRACTF64X2 imm8, zmm, m128{k}{z}
 43162      if isImm8(v0) && isZMM(v1) && isM128kz(v2) {
 43163          self.require(ISA_AVX512DQ)
 43164          p.domain = DomainAVX
 43165          p.add(0, func(m *_Encoding, v []interface{}) {
 43166              m.evex(0b11, 0x85, 0b10, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43167              m.emit(0x19)
 43168              m.mrsd(lcode(v[1]), addr(v[2]), 16)
 43169              m.imm1(toImmAny(v[0]))
 43170          })
 43171      }
 43172      // VEXTRACTF64X2 imm8, ymm, xmm{k}{z}
 43173      if isImm8(v0) && isEVEXYMM(v1) && isXMMkz(v2) {
 43174          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 43175          p.domain = DomainAVX
 43176          p.add(0, func(m *_Encoding, v []interface{}) {
 43177              m.emit(0x62)
 43178              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43179              m.emit(0xfd)
 43180              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 43181              m.emit(0x19)
 43182              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43183              m.imm1(toImmAny(v[0]))
 43184          })
 43185      }
 43186      // VEXTRACTF64X2 imm8, ymm, m128{k}{z}
 43187      if isImm8(v0) && isEVEXYMM(v1) && isM128kz(v2) {
 43188          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 43189          p.domain = DomainAVX
 43190          p.add(0, func(m *_Encoding, v []interface{}) {
 43191              m.evex(0b11, 0x85, 0b01, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43192              m.emit(0x19)
 43193              m.mrsd(lcode(v[1]), addr(v[2]), 16)
 43194              m.imm1(toImmAny(v[0]))
 43195          })
 43196      }
 43197      if p.len == 0 {
 43198          panic("invalid operands for VEXTRACTF64X2")
 43199      }
 43200      return p
 43201  }
 43202  
 43203  // VEXTRACTF64X4 performs "Extract 256 Bits of Packed Double-Precision Floating-Point Values".
 43204  //
 43205  // Mnemonic        : VEXTRACTF64X4
 43206  // Supported forms : (2 forms)
 43207  //
 43208  //    * VEXTRACTF64X4 imm8, zmm, ymm{k}{z}     [AVX512F]
 43209  //    * VEXTRACTF64X4 imm8, zmm, m256{k}{z}    [AVX512F]
 43210  //
 43211  func (self *Program) VEXTRACTF64X4(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 43212      p := self.alloc("VEXTRACTF64X4", 3, Operands { v0, v1, v2 })
 43213      // VEXTRACTF64X4 imm8, zmm, ymm{k}{z}
 43214      if isImm8(v0) && isZMM(v1) && isYMMkz(v2) {
 43215          self.require(ISA_AVX512F)
 43216          p.domain = DomainAVX
 43217          p.add(0, func(m *_Encoding, v []interface{}) {
 43218              m.emit(0x62)
 43219              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43220              m.emit(0xfd)
 43221              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 43222              m.emit(0x1b)
 43223              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43224              m.imm1(toImmAny(v[0]))
 43225          })
 43226      }
 43227      // VEXTRACTF64X4 imm8, zmm, m256{k}{z}
 43228      if isImm8(v0) && isZMM(v1) && isM256kz(v2) {
 43229          self.require(ISA_AVX512F)
 43230          p.domain = DomainAVX
 43231          p.add(0, func(m *_Encoding, v []interface{}) {
 43232              m.evex(0b11, 0x85, 0b10, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43233              m.emit(0x1b)
 43234              m.mrsd(lcode(v[1]), addr(v[2]), 32)
 43235              m.imm1(toImmAny(v[0]))
 43236          })
 43237      }
 43238      if p.len == 0 {
 43239          panic("invalid operands for VEXTRACTF64X4")
 43240      }
 43241      return p
 43242  }
 43243  
 43244  // VEXTRACTI128 performs "Extract Packed Integer Values".
 43245  //
 43246  // Mnemonic        : VEXTRACTI128
 43247  // Supported forms : (2 forms)
 43248  //
 43249  //    * VEXTRACTI128 imm8, ymm, xmm     [AVX2]
 43250  //    * VEXTRACTI128 imm8, ymm, m128    [AVX2]
 43251  //
 43252  func (self *Program) VEXTRACTI128(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 43253      p := self.alloc("VEXTRACTI128", 3, Operands { v0, v1, v2 })
 43254      // VEXTRACTI128 imm8, ymm, xmm
 43255      if isImm8(v0) && isYMM(v1) && isXMM(v2) {
 43256          self.require(ISA_AVX2)
 43257          p.domain = DomainAVX
 43258          p.add(0, func(m *_Encoding, v []interface{}) {
 43259              m.emit(0xc4)
 43260              m.emit(0xe3 ^ (hcode(v[1]) << 7) ^ (hcode(v[2]) << 5))
 43261              m.emit(0x7d)
 43262              m.emit(0x39)
 43263              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43264              m.imm1(toImmAny(v[0]))
 43265          })
 43266      }
 43267      // VEXTRACTI128 imm8, ymm, m128
 43268      if isImm8(v0) && isYMM(v1) && isM128(v2) {
 43269          self.require(ISA_AVX2)
 43270          p.domain = DomainAVX
 43271          p.add(0, func(m *_Encoding, v []interface{}) {
 43272              m.vex3(0xc4, 0b11, 0x05, hcode(v[1]), addr(v[2]), 0)
 43273              m.emit(0x39)
 43274              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 43275              m.imm1(toImmAny(v[0]))
 43276          })
 43277      }
 43278      if p.len == 0 {
 43279          panic("invalid operands for VEXTRACTI128")
 43280      }
 43281      return p
 43282  }
 43283  
 43284  // VEXTRACTI32X4 performs "Extract 128 Bits of Packed Doubleword Integer Values".
 43285  //
 43286  // Mnemonic        : VEXTRACTI32X4
 43287  // Supported forms : (4 forms)
 43288  //
 43289  //    * VEXTRACTI32X4 imm8, zmm, xmm{k}{z}     [AVX512F]
 43290  //    * VEXTRACTI32X4 imm8, zmm, m128{k}{z}    [AVX512F]
 43291  //    * VEXTRACTI32X4 imm8, ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 43292  //    * VEXTRACTI32X4 imm8, ymm, m128{k}{z}    [AVX512F,AVX512VL]
 43293  //
 43294  func (self *Program) VEXTRACTI32X4(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 43295      p := self.alloc("VEXTRACTI32X4", 3, Operands { v0, v1, v2 })
 43296      // VEXTRACTI32X4 imm8, zmm, xmm{k}{z}
 43297      if isImm8(v0) && isZMM(v1) && isXMMkz(v2) {
 43298          self.require(ISA_AVX512F)
 43299          p.domain = DomainAVX
 43300          p.add(0, func(m *_Encoding, v []interface{}) {
 43301              m.emit(0x62)
 43302              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43303              m.emit(0x7d)
 43304              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 43305              m.emit(0x39)
 43306              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43307              m.imm1(toImmAny(v[0]))
 43308          })
 43309      }
 43310      // VEXTRACTI32X4 imm8, zmm, m128{k}{z}
 43311      if isImm8(v0) && isZMM(v1) && isM128kz(v2) {
 43312          self.require(ISA_AVX512F)
 43313          p.domain = DomainAVX
 43314          p.add(0, func(m *_Encoding, v []interface{}) {
 43315              m.evex(0b11, 0x05, 0b10, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43316              m.emit(0x39)
 43317              m.mrsd(lcode(v[1]), addr(v[2]), 16)
 43318              m.imm1(toImmAny(v[0]))
 43319          })
 43320      }
 43321      // VEXTRACTI32X4 imm8, ymm, xmm{k}{z}
 43322      if isImm8(v0) && isEVEXYMM(v1) && isXMMkz(v2) {
 43323          self.require(ISA_AVX512VL | ISA_AVX512F)
 43324          p.domain = DomainAVX
 43325          p.add(0, func(m *_Encoding, v []interface{}) {
 43326              m.emit(0x62)
 43327              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43328              m.emit(0x7d)
 43329              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 43330              m.emit(0x39)
 43331              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43332              m.imm1(toImmAny(v[0]))
 43333          })
 43334      }
 43335      // VEXTRACTI32X4 imm8, ymm, m128{k}{z}
 43336      if isImm8(v0) && isEVEXYMM(v1) && isM128kz(v2) {
 43337          self.require(ISA_AVX512VL | ISA_AVX512F)
 43338          p.domain = DomainAVX
 43339          p.add(0, func(m *_Encoding, v []interface{}) {
 43340              m.evex(0b11, 0x05, 0b01, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43341              m.emit(0x39)
 43342              m.mrsd(lcode(v[1]), addr(v[2]), 16)
 43343              m.imm1(toImmAny(v[0]))
 43344          })
 43345      }
 43346      if p.len == 0 {
 43347          panic("invalid operands for VEXTRACTI32X4")
 43348      }
 43349      return p
 43350  }
 43351  
 43352  // VEXTRACTI32X8 performs "Extract 256 Bits of Packed Doubleword Integer Values".
 43353  //
 43354  // Mnemonic        : VEXTRACTI32X8
 43355  // Supported forms : (2 forms)
 43356  //
 43357  //    * VEXTRACTI32X8 imm8, zmm, ymm{k}{z}     [AVX512DQ]
 43358  //    * VEXTRACTI32X8 imm8, zmm, m256{k}{z}    [AVX512DQ]
 43359  //
 43360  func (self *Program) VEXTRACTI32X8(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 43361      p := self.alloc("VEXTRACTI32X8", 3, Operands { v0, v1, v2 })
 43362      // VEXTRACTI32X8 imm8, zmm, ymm{k}{z}
 43363      if isImm8(v0) && isZMM(v1) && isYMMkz(v2) {
 43364          self.require(ISA_AVX512DQ)
 43365          p.domain = DomainAVX
 43366          p.add(0, func(m *_Encoding, v []interface{}) {
 43367              m.emit(0x62)
 43368              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43369              m.emit(0x7d)
 43370              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 43371              m.emit(0x3b)
 43372              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43373              m.imm1(toImmAny(v[0]))
 43374          })
 43375      }
 43376      // VEXTRACTI32X8 imm8, zmm, m256{k}{z}
 43377      if isImm8(v0) && isZMM(v1) && isM256kz(v2) {
 43378          self.require(ISA_AVX512DQ)
 43379          p.domain = DomainAVX
 43380          p.add(0, func(m *_Encoding, v []interface{}) {
 43381              m.evex(0b11, 0x05, 0b10, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43382              m.emit(0x3b)
 43383              m.mrsd(lcode(v[1]), addr(v[2]), 32)
 43384              m.imm1(toImmAny(v[0]))
 43385          })
 43386      }
 43387      if p.len == 0 {
 43388          panic("invalid operands for VEXTRACTI32X8")
 43389      }
 43390      return p
 43391  }
 43392  
 43393  // VEXTRACTI64X2 performs "Extract 128 Bits of Packed Quadword Integer Values".
 43394  //
 43395  // Mnemonic        : VEXTRACTI64X2
 43396  // Supported forms : (4 forms)
 43397  //
 43398  //    * VEXTRACTI64X2 imm8, zmm, xmm{k}{z}     [AVX512DQ]
 43399  //    * VEXTRACTI64X2 imm8, zmm, m128{k}{z}    [AVX512DQ]
 43400  //    * VEXTRACTI64X2 imm8, ymm, xmm{k}{z}     [AVX512DQ,AVX512VL]
 43401  //    * VEXTRACTI64X2 imm8, ymm, m128{k}{z}    [AVX512DQ,AVX512VL]
 43402  //
 43403  func (self *Program) VEXTRACTI64X2(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 43404      p := self.alloc("VEXTRACTI64X2", 3, Operands { v0, v1, v2 })
 43405      // VEXTRACTI64X2 imm8, zmm, xmm{k}{z}
 43406      if isImm8(v0) && isZMM(v1) && isXMMkz(v2) {
 43407          self.require(ISA_AVX512DQ)
 43408          p.domain = DomainAVX
 43409          p.add(0, func(m *_Encoding, v []interface{}) {
 43410              m.emit(0x62)
 43411              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43412              m.emit(0xfd)
 43413              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 43414              m.emit(0x39)
 43415              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43416              m.imm1(toImmAny(v[0]))
 43417          })
 43418      }
 43419      // VEXTRACTI64X2 imm8, zmm, m128{k}{z}
 43420      if isImm8(v0) && isZMM(v1) && isM128kz(v2) {
 43421          self.require(ISA_AVX512DQ)
 43422          p.domain = DomainAVX
 43423          p.add(0, func(m *_Encoding, v []interface{}) {
 43424              m.evex(0b11, 0x85, 0b10, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43425              m.emit(0x39)
 43426              m.mrsd(lcode(v[1]), addr(v[2]), 16)
 43427              m.imm1(toImmAny(v[0]))
 43428          })
 43429      }
 43430      // VEXTRACTI64X2 imm8, ymm, xmm{k}{z}
 43431      if isImm8(v0) && isEVEXYMM(v1) && isXMMkz(v2) {
 43432          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 43433          p.domain = DomainAVX
 43434          p.add(0, func(m *_Encoding, v []interface{}) {
 43435              m.emit(0x62)
 43436              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43437              m.emit(0xfd)
 43438              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 43439              m.emit(0x39)
 43440              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43441              m.imm1(toImmAny(v[0]))
 43442          })
 43443      }
 43444      // VEXTRACTI64X2 imm8, ymm, m128{k}{z}
 43445      if isImm8(v0) && isEVEXYMM(v1) && isM128kz(v2) {
 43446          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 43447          p.domain = DomainAVX
 43448          p.add(0, func(m *_Encoding, v []interface{}) {
 43449              m.evex(0b11, 0x85, 0b01, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43450              m.emit(0x39)
 43451              m.mrsd(lcode(v[1]), addr(v[2]), 16)
 43452              m.imm1(toImmAny(v[0]))
 43453          })
 43454      }
 43455      if p.len == 0 {
 43456          panic("invalid operands for VEXTRACTI64X2")
 43457      }
 43458      return p
 43459  }
 43460  
 43461  // VEXTRACTI64X4 performs "Extract 256 Bits of Packed Quadword Integer Values".
 43462  //
 43463  // Mnemonic        : VEXTRACTI64X4
 43464  // Supported forms : (2 forms)
 43465  //
 43466  //    * VEXTRACTI64X4 imm8, zmm, ymm{k}{z}     [AVX512F]
 43467  //    * VEXTRACTI64X4 imm8, zmm, m256{k}{z}    [AVX512F]
 43468  //
 43469  func (self *Program) VEXTRACTI64X4(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 43470      p := self.alloc("VEXTRACTI64X4", 3, Operands { v0, v1, v2 })
 43471      // VEXTRACTI64X4 imm8, zmm, ymm{k}{z}
 43472      if isImm8(v0) && isZMM(v1) && isYMMkz(v2) {
 43473          self.require(ISA_AVX512F)
 43474          p.domain = DomainAVX
 43475          p.add(0, func(m *_Encoding, v []interface{}) {
 43476              m.emit(0x62)
 43477              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43478              m.emit(0xfd)
 43479              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 43480              m.emit(0x3b)
 43481              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43482              m.imm1(toImmAny(v[0]))
 43483          })
 43484      }
 43485      // VEXTRACTI64X4 imm8, zmm, m256{k}{z}
 43486      if isImm8(v0) && isZMM(v1) && isM256kz(v2) {
 43487          self.require(ISA_AVX512F)
 43488          p.domain = DomainAVX
 43489          p.add(0, func(m *_Encoding, v []interface{}) {
 43490              m.evex(0b11, 0x85, 0b10, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43491              m.emit(0x3b)
 43492              m.mrsd(lcode(v[1]), addr(v[2]), 32)
 43493              m.imm1(toImmAny(v[0]))
 43494          })
 43495      }
 43496      if p.len == 0 {
 43497          panic("invalid operands for VEXTRACTI64X4")
 43498      }
 43499      return p
 43500  }
 43501  
 43502  // VEXTRACTPS performs "Extract Packed Single Precision Floating-Point Value".
 43503  //
 43504  // Mnemonic        : VEXTRACTPS
 43505  // Supported forms : (4 forms)
 43506  //
 43507  //    * VEXTRACTPS imm8, xmm, r32    [AVX]
 43508  //    * VEXTRACTPS imm8, xmm, m32    [AVX]
 43509  //    * VEXTRACTPS imm8, xmm, r32    [AVX512F]
 43510  //    * VEXTRACTPS imm8, xmm, m32    [AVX512F]
 43511  //
 43512  func (self *Program) VEXTRACTPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 43513      p := self.alloc("VEXTRACTPS", 3, Operands { v0, v1, v2 })
 43514      // VEXTRACTPS imm8, xmm, r32
 43515      if isImm8(v0) && isXMM(v1) && isReg32(v2) {
 43516          self.require(ISA_AVX)
 43517          p.domain = DomainAVX
 43518          p.add(0, func(m *_Encoding, v []interface{}) {
 43519              m.emit(0xc4)
 43520              m.emit(0xe3 ^ (hcode(v[1]) << 7) ^ (hcode(v[2]) << 5))
 43521              m.emit(0x79)
 43522              m.emit(0x17)
 43523              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43524              m.imm1(toImmAny(v[0]))
 43525          })
 43526      }
 43527      // VEXTRACTPS imm8, xmm, m32
 43528      if isImm8(v0) && isXMM(v1) && isM32(v2) {
 43529          self.require(ISA_AVX)
 43530          p.domain = DomainAVX
 43531          p.add(0, func(m *_Encoding, v []interface{}) {
 43532              m.vex3(0xc4, 0b11, 0x01, hcode(v[1]), addr(v[2]), 0)
 43533              m.emit(0x17)
 43534              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 43535              m.imm1(toImmAny(v[0]))
 43536          })
 43537      }
 43538      // VEXTRACTPS imm8, xmm, r32
 43539      if isImm8(v0) && isEVEXXMM(v1) && isReg32(v2) {
 43540          self.require(ISA_AVX512F)
 43541          p.domain = DomainAVX
 43542          p.add(0, func(m *_Encoding, v []interface{}) {
 43543              m.emit(0x62)
 43544              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43545              m.emit(0x7d)
 43546              m.emit(0x08)
 43547              m.emit(0x17)
 43548              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43549              m.imm1(toImmAny(v[0]))
 43550          })
 43551      }
 43552      // VEXTRACTPS imm8, xmm, m32
 43553      if isImm8(v0) && isEVEXXMM(v1) && isM32(v2) {
 43554          self.require(ISA_AVX512F)
 43555          p.domain = DomainAVX
 43556          p.add(0, func(m *_Encoding, v []interface{}) {
 43557              m.evex(0b11, 0x05, 0b00, ehcode(v[1]), addr(v[2]), 0, 0, 0, 0)
 43558              m.emit(0x17)
 43559              m.mrsd(lcode(v[1]), addr(v[2]), 4)
 43560              m.imm1(toImmAny(v[0]))
 43561          })
 43562      }
 43563      if p.len == 0 {
 43564          panic("invalid operands for VEXTRACTPS")
 43565      }
 43566      return p
 43567  }
 43568  
 43569  // VFIXUPIMMPD performs "Fix Up Special Packed Double-Precision Floating-Point Values".
 43570  //
 43571  // Mnemonic        : VFIXUPIMMPD
 43572  // Supported forms : (7 forms)
 43573  //
 43574  //    * VFIXUPIMMPD imm8, m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 43575  //    * VFIXUPIMMPD imm8, {sae}, zmm, zmm, zmm{k}{z}      [AVX512F]
 43576  //    * VFIXUPIMMPD imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 43577  //    * VFIXUPIMMPD imm8, m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 43578  //    * VFIXUPIMMPD imm8, xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 43579  //    * VFIXUPIMMPD imm8, m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 43580  //    * VFIXUPIMMPD imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 43581  //
 43582  func (self *Program) VFIXUPIMMPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 43583      var p *Instruction
 43584      switch len(vv) {
 43585          case 0  : p = self.alloc("VFIXUPIMMPD", 4, Operands { v0, v1, v2, v3 })
 43586          case 1  : p = self.alloc("VFIXUPIMMPD", 5, Operands { v0, v1, v2, v3, vv[0] })
 43587          default : panic("instruction VFIXUPIMMPD takes 4 or 5 operands")
 43588      }
 43589      // VFIXUPIMMPD imm8, m512/m64bcst, zmm, zmm{k}{z}
 43590      if len(vv) == 0 && isImm8(v0) && isM512M64bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 43591          self.require(ISA_AVX512F)
 43592          p.domain = DomainAVX
 43593          p.add(0, func(m *_Encoding, v []interface{}) {
 43594              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 43595              m.emit(0x54)
 43596              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 43597              m.imm1(toImmAny(v[0]))
 43598          })
 43599      }
 43600      // VFIXUPIMMPD imm8, {sae}, zmm, zmm, zmm{k}{z}
 43601      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isZMM(v2) && isZMM(v3) && isZMMkz(vv[0]) {
 43602          self.require(ISA_AVX512F)
 43603          p.domain = DomainAVX
 43604          p.add(0, func(m *_Encoding, v []interface{}) {
 43605              m.emit(0x62)
 43606              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 43607              m.emit(0xfd ^ (hlcode(v[3]) << 3))
 43608              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 43609              m.emit(0x54)
 43610              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 43611              m.imm1(toImmAny(v[0]))
 43612          })
 43613      }
 43614      // VFIXUPIMMPD imm8, zmm, zmm, zmm{k}{z}
 43615      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 43616          self.require(ISA_AVX512F)
 43617          p.domain = DomainAVX
 43618          p.add(0, func(m *_Encoding, v []interface{}) {
 43619              m.emit(0x62)
 43620              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 43621              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 43622              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 43623              m.emit(0x54)
 43624              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 43625              m.imm1(toImmAny(v[0]))
 43626          })
 43627      }
 43628      // VFIXUPIMMPD imm8, m128/m64bcst, xmm, xmm{k}{z}
 43629      if len(vv) == 0 && isImm8(v0) && isM128M64bcst(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 43630          self.require(ISA_AVX512VL | ISA_AVX512F)
 43631          p.domain = DomainAVX
 43632          p.add(0, func(m *_Encoding, v []interface{}) {
 43633              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 43634              m.emit(0x54)
 43635              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 43636              m.imm1(toImmAny(v[0]))
 43637          })
 43638      }
 43639      // VFIXUPIMMPD imm8, xmm, xmm, xmm{k}{z}
 43640      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 43641          self.require(ISA_AVX512VL | ISA_AVX512F)
 43642          p.domain = DomainAVX
 43643          p.add(0, func(m *_Encoding, v []interface{}) {
 43644              m.emit(0x62)
 43645              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 43646              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 43647              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 43648              m.emit(0x54)
 43649              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 43650              m.imm1(toImmAny(v[0]))
 43651          })
 43652      }
 43653      // VFIXUPIMMPD imm8, m256/m64bcst, ymm, ymm{k}{z}
 43654      if len(vv) == 0 && isImm8(v0) && isM256M64bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 43655          self.require(ISA_AVX512VL | ISA_AVX512F)
 43656          p.domain = DomainAVX
 43657          p.add(0, func(m *_Encoding, v []interface{}) {
 43658              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 43659              m.emit(0x54)
 43660              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 43661              m.imm1(toImmAny(v[0]))
 43662          })
 43663      }
 43664      // VFIXUPIMMPD imm8, ymm, ymm, ymm{k}{z}
 43665      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 43666          self.require(ISA_AVX512VL | ISA_AVX512F)
 43667          p.domain = DomainAVX
 43668          p.add(0, func(m *_Encoding, v []interface{}) {
 43669              m.emit(0x62)
 43670              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 43671              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 43672              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 43673              m.emit(0x54)
 43674              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 43675              m.imm1(toImmAny(v[0]))
 43676          })
 43677      }
 43678      if p.len == 0 {
 43679          panic("invalid operands for VFIXUPIMMPD")
 43680      }
 43681      return p
 43682  }
 43683  
 43684  // VFIXUPIMMPS performs "Fix Up Special Packed Single-Precision Floating-Point Values".
 43685  //
 43686  // Mnemonic        : VFIXUPIMMPS
 43687  // Supported forms : (7 forms)
 43688  //
 43689  //    * VFIXUPIMMPS imm8, m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 43690  //    * VFIXUPIMMPS imm8, {sae}, zmm, zmm, zmm{k}{z}      [AVX512F]
 43691  //    * VFIXUPIMMPS imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 43692  //    * VFIXUPIMMPS imm8, m128/m32bcst, xmm, xmm{k}{z}    [AVX512VL]
 43693  //    * VFIXUPIMMPS imm8, xmm, xmm, xmm{k}{z}             [AVX512VL]
 43694  //    * VFIXUPIMMPS imm8, m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 43695  //    * VFIXUPIMMPS imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 43696  //
 43697  func (self *Program) VFIXUPIMMPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 43698      var p *Instruction
 43699      switch len(vv) {
 43700          case 0  : p = self.alloc("VFIXUPIMMPS", 4, Operands { v0, v1, v2, v3 })
 43701          case 1  : p = self.alloc("VFIXUPIMMPS", 5, Operands { v0, v1, v2, v3, vv[0] })
 43702          default : panic("instruction VFIXUPIMMPS takes 4 or 5 operands")
 43703      }
 43704      // VFIXUPIMMPS imm8, m512/m32bcst, zmm, zmm{k}{z}
 43705      if len(vv) == 0 && isImm8(v0) && isM512M32bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 43706          self.require(ISA_AVX512F)
 43707          p.domain = DomainAVX
 43708          p.add(0, func(m *_Encoding, v []interface{}) {
 43709              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 43710              m.emit(0x54)
 43711              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 43712              m.imm1(toImmAny(v[0]))
 43713          })
 43714      }
 43715      // VFIXUPIMMPS imm8, {sae}, zmm, zmm, zmm{k}{z}
 43716      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isZMM(v2) && isZMM(v3) && isZMMkz(vv[0]) {
 43717          self.require(ISA_AVX512F)
 43718          p.domain = DomainAVX
 43719          p.add(0, func(m *_Encoding, v []interface{}) {
 43720              m.emit(0x62)
 43721              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 43722              m.emit(0x7d ^ (hlcode(v[3]) << 3))
 43723              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 43724              m.emit(0x54)
 43725              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 43726              m.imm1(toImmAny(v[0]))
 43727          })
 43728      }
 43729      // VFIXUPIMMPS imm8, zmm, zmm, zmm{k}{z}
 43730      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 43731          self.require(ISA_AVX512F)
 43732          p.domain = DomainAVX
 43733          p.add(0, func(m *_Encoding, v []interface{}) {
 43734              m.emit(0x62)
 43735              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 43736              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 43737              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 43738              m.emit(0x54)
 43739              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 43740              m.imm1(toImmAny(v[0]))
 43741          })
 43742      }
 43743      // VFIXUPIMMPS imm8, m128/m32bcst, xmm, xmm{k}{z}
 43744      if len(vv) == 0 && isImm8(v0) && isM128M32bcst(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 43745          self.require(ISA_AVX512VL)
 43746          p.domain = DomainAVX
 43747          p.add(0, func(m *_Encoding, v []interface{}) {
 43748              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 43749              m.emit(0x54)
 43750              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 43751              m.imm1(toImmAny(v[0]))
 43752          })
 43753      }
 43754      // VFIXUPIMMPS imm8, xmm, xmm, xmm{k}{z}
 43755      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 43756          self.require(ISA_AVX512VL)
 43757          p.domain = DomainAVX
 43758          p.add(0, func(m *_Encoding, v []interface{}) {
 43759              m.emit(0x62)
 43760              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 43761              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 43762              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 43763              m.emit(0x54)
 43764              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 43765              m.imm1(toImmAny(v[0]))
 43766          })
 43767      }
 43768      // VFIXUPIMMPS imm8, m256/m32bcst, ymm, ymm{k}{z}
 43769      if len(vv) == 0 && isImm8(v0) && isM256M32bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 43770          self.require(ISA_AVX512VL | ISA_AVX512F)
 43771          p.domain = DomainAVX
 43772          p.add(0, func(m *_Encoding, v []interface{}) {
 43773              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 43774              m.emit(0x54)
 43775              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 43776              m.imm1(toImmAny(v[0]))
 43777          })
 43778      }
 43779      // VFIXUPIMMPS imm8, ymm, ymm, ymm{k}{z}
 43780      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 43781          self.require(ISA_AVX512VL | ISA_AVX512F)
 43782          p.domain = DomainAVX
 43783          p.add(0, func(m *_Encoding, v []interface{}) {
 43784              m.emit(0x62)
 43785              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 43786              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 43787              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 43788              m.emit(0x54)
 43789              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 43790              m.imm1(toImmAny(v[0]))
 43791          })
 43792      }
 43793      if p.len == 0 {
 43794          panic("invalid operands for VFIXUPIMMPS")
 43795      }
 43796      return p
 43797  }
 43798  
 43799  // VFIXUPIMMSD performs "Fix Up Special Scalar Double-Precision Floating-Point Value".
 43800  //
 43801  // Mnemonic        : VFIXUPIMMSD
 43802  // Supported forms : (3 forms)
 43803  //
 43804  //    * VFIXUPIMMSD imm8, m64, xmm, xmm{k}{z}           [AVX512F]
 43805  //    * VFIXUPIMMSD imm8, {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 43806  //    * VFIXUPIMMSD imm8, xmm, xmm, xmm{k}{z}           [AVX512F]
 43807  //
 43808  func (self *Program) VFIXUPIMMSD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 43809      var p *Instruction
 43810      switch len(vv) {
 43811          case 0  : p = self.alloc("VFIXUPIMMSD", 4, Operands { v0, v1, v2, v3 })
 43812          case 1  : p = self.alloc("VFIXUPIMMSD", 5, Operands { v0, v1, v2, v3, vv[0] })
 43813          default : panic("instruction VFIXUPIMMSD takes 4 or 5 operands")
 43814      }
 43815      // VFIXUPIMMSD imm8, m64, xmm, xmm{k}{z}
 43816      if len(vv) == 0 && isImm8(v0) && isM64(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 43817          self.require(ISA_AVX512F)
 43818          p.domain = DomainAVX
 43819          p.add(0, func(m *_Encoding, v []interface{}) {
 43820              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 43821              m.emit(0x55)
 43822              m.mrsd(lcode(v[3]), addr(v[1]), 8)
 43823              m.imm1(toImmAny(v[0]))
 43824          })
 43825      }
 43826      // VFIXUPIMMSD imm8, {sae}, xmm, xmm, xmm{k}{z}
 43827      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) && isXMMkz(vv[0]) {
 43828          self.require(ISA_AVX512F)
 43829          p.domain = DomainAVX
 43830          p.add(0, func(m *_Encoding, v []interface{}) {
 43831              m.emit(0x62)
 43832              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 43833              m.emit(0xfd ^ (hlcode(v[3]) << 3))
 43834              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 43835              m.emit(0x55)
 43836              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 43837              m.imm1(toImmAny(v[0]))
 43838          })
 43839      }
 43840      // VFIXUPIMMSD imm8, xmm, xmm, xmm{k}{z}
 43841      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 43842          self.require(ISA_AVX512F)
 43843          p.domain = DomainAVX
 43844          p.add(0, func(m *_Encoding, v []interface{}) {
 43845              m.emit(0x62)
 43846              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 43847              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 43848              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 43849              m.emit(0x55)
 43850              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 43851              m.imm1(toImmAny(v[0]))
 43852          })
 43853      }
 43854      if p.len == 0 {
 43855          panic("invalid operands for VFIXUPIMMSD")
 43856      }
 43857      return p
 43858  }
 43859  
 43860  // VFIXUPIMMSS performs "Fix Up Special Scalar Single-Precision Floating-Point Value".
 43861  //
 43862  // Mnemonic        : VFIXUPIMMSS
 43863  // Supported forms : (3 forms)
 43864  //
 43865  //    * VFIXUPIMMSS imm8, m32, xmm, xmm{k}{z}           [AVX512F]
 43866  //    * VFIXUPIMMSS imm8, {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 43867  //    * VFIXUPIMMSS imm8, xmm, xmm, xmm{k}{z}           [AVX512F]
 43868  //
 43869  func (self *Program) VFIXUPIMMSS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 43870      var p *Instruction
 43871      switch len(vv) {
 43872          case 0  : p = self.alloc("VFIXUPIMMSS", 4, Operands { v0, v1, v2, v3 })
 43873          case 1  : p = self.alloc("VFIXUPIMMSS", 5, Operands { v0, v1, v2, v3, vv[0] })
 43874          default : panic("instruction VFIXUPIMMSS takes 4 or 5 operands")
 43875      }
 43876      // VFIXUPIMMSS imm8, m32, xmm, xmm{k}{z}
 43877      if len(vv) == 0 && isImm8(v0) && isM32(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 43878          self.require(ISA_AVX512F)
 43879          p.domain = DomainAVX
 43880          p.add(0, func(m *_Encoding, v []interface{}) {
 43881              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 43882              m.emit(0x55)
 43883              m.mrsd(lcode(v[3]), addr(v[1]), 4)
 43884              m.imm1(toImmAny(v[0]))
 43885          })
 43886      }
 43887      // VFIXUPIMMSS imm8, {sae}, xmm, xmm, xmm{k}{z}
 43888      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) && isXMMkz(vv[0]) {
 43889          self.require(ISA_AVX512F)
 43890          p.domain = DomainAVX
 43891          p.add(0, func(m *_Encoding, v []interface{}) {
 43892              m.emit(0x62)
 43893              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 43894              m.emit(0x7d ^ (hlcode(v[3]) << 3))
 43895              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 43896              m.emit(0x55)
 43897              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 43898              m.imm1(toImmAny(v[0]))
 43899          })
 43900      }
 43901      // VFIXUPIMMSS imm8, xmm, xmm, xmm{k}{z}
 43902      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 43903          self.require(ISA_AVX512F)
 43904          p.domain = DomainAVX
 43905          p.add(0, func(m *_Encoding, v []interface{}) {
 43906              m.emit(0x62)
 43907              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 43908              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 43909              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 43910              m.emit(0x55)
 43911              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 43912              m.imm1(toImmAny(v[0]))
 43913          })
 43914      }
 43915      if p.len == 0 {
 43916          panic("invalid operands for VFIXUPIMMSS")
 43917      }
 43918      return p
 43919  }
 43920  
 43921  // VFMADD132PD performs "Fused Multiply-Add of Packed Double-Precision Floating-Point Values".
 43922  //
 43923  // Mnemonic        : VFMADD132PD
 43924  // Supported forms : (11 forms)
 43925  //
 43926  //    * VFMADD132PD xmm, xmm, xmm                   [FMA3]
 43927  //    * VFMADD132PD m128, xmm, xmm                  [FMA3]
 43928  //    * VFMADD132PD ymm, ymm, ymm                   [FMA3]
 43929  //    * VFMADD132PD m256, ymm, ymm                  [FMA3]
 43930  //    * VFMADD132PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 43931  //    * VFMADD132PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 43932  //    * VFMADD132PD zmm, zmm, zmm{k}{z}             [AVX512F]
 43933  //    * VFMADD132PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 43934  //    * VFMADD132PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 43935  //    * VFMADD132PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 43936  //    * VFMADD132PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 43937  //
 43938  func (self *Program) VFMADD132PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 43939      var p *Instruction
 43940      switch len(vv) {
 43941          case 0  : p = self.alloc("VFMADD132PD", 3, Operands { v0, v1, v2 })
 43942          case 1  : p = self.alloc("VFMADD132PD", 4, Operands { v0, v1, v2, vv[0] })
 43943          default : panic("instruction VFMADD132PD takes 3 or 4 operands")
 43944      }
 43945      // VFMADD132PD xmm, xmm, xmm
 43946      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 43947          self.require(ISA_FMA3)
 43948          p.domain = DomainFMA
 43949          p.add(0, func(m *_Encoding, v []interface{}) {
 43950              m.emit(0xc4)
 43951              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 43952              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 43953              m.emit(0x98)
 43954              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 43955          })
 43956      }
 43957      // VFMADD132PD m128, xmm, xmm
 43958      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 43959          self.require(ISA_FMA3)
 43960          p.domain = DomainFMA
 43961          p.add(0, func(m *_Encoding, v []interface{}) {
 43962              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 43963              m.emit(0x98)
 43964              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 43965          })
 43966      }
 43967      // VFMADD132PD ymm, ymm, ymm
 43968      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 43969          self.require(ISA_FMA3)
 43970          p.domain = DomainFMA
 43971          p.add(0, func(m *_Encoding, v []interface{}) {
 43972              m.emit(0xc4)
 43973              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 43974              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 43975              m.emit(0x98)
 43976              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 43977          })
 43978      }
 43979      // VFMADD132PD m256, ymm, ymm
 43980      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 43981          self.require(ISA_FMA3)
 43982          p.domain = DomainFMA
 43983          p.add(0, func(m *_Encoding, v []interface{}) {
 43984              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 43985              m.emit(0x98)
 43986              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 43987          })
 43988      }
 43989      // VFMADD132PD m512/m64bcst, zmm, zmm{k}{z}
 43990      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 43991          self.require(ISA_AVX512F)
 43992          p.domain = DomainFMA
 43993          p.add(0, func(m *_Encoding, v []interface{}) {
 43994              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 43995              m.emit(0x98)
 43996              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 43997          })
 43998      }
 43999      // VFMADD132PD {er}, zmm, zmm, zmm{k}{z}
 44000      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 44001          self.require(ISA_AVX512F)
 44002          p.domain = DomainFMA
 44003          p.add(0, func(m *_Encoding, v []interface{}) {
 44004              m.emit(0x62)
 44005              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 44006              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 44007              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 44008              m.emit(0x98)
 44009              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 44010          })
 44011      }
 44012      // VFMADD132PD zmm, zmm, zmm{k}{z}
 44013      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 44014          self.require(ISA_AVX512F)
 44015          p.domain = DomainFMA
 44016          p.add(0, func(m *_Encoding, v []interface{}) {
 44017              m.emit(0x62)
 44018              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44019              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44020              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 44021              m.emit(0x98)
 44022              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44023          })
 44024      }
 44025      // VFMADD132PD m128/m64bcst, xmm, xmm{k}{z}
 44026      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44027          self.require(ISA_AVX512VL | ISA_AVX512F)
 44028          p.domain = DomainFMA
 44029          p.add(0, func(m *_Encoding, v []interface{}) {
 44030              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44031              m.emit(0x98)
 44032              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 44033          })
 44034      }
 44035      // VFMADD132PD xmm, xmm, xmm{k}{z}
 44036      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44037          self.require(ISA_AVX512VL | ISA_AVX512F)
 44038          p.domain = DomainFMA
 44039          p.add(0, func(m *_Encoding, v []interface{}) {
 44040              m.emit(0x62)
 44041              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44042              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44043              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 44044              m.emit(0x98)
 44045              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44046          })
 44047      }
 44048      // VFMADD132PD m256/m64bcst, ymm, ymm{k}{z}
 44049      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 44050          self.require(ISA_AVX512VL | ISA_AVX512F)
 44051          p.domain = DomainFMA
 44052          p.add(0, func(m *_Encoding, v []interface{}) {
 44053              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44054              m.emit(0x98)
 44055              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 44056          })
 44057      }
 44058      // VFMADD132PD ymm, ymm, ymm{k}{z}
 44059      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 44060          self.require(ISA_AVX512VL | ISA_AVX512F)
 44061          p.domain = DomainFMA
 44062          p.add(0, func(m *_Encoding, v []interface{}) {
 44063              m.emit(0x62)
 44064              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44065              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44066              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 44067              m.emit(0x98)
 44068              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44069          })
 44070      }
 44071      if p.len == 0 {
 44072          panic("invalid operands for VFMADD132PD")
 44073      }
 44074      return p
 44075  }
 44076  
 44077  // VFMADD132PS performs "Fused Multiply-Add of Packed Single-Precision Floating-Point Values".
 44078  //
 44079  // Mnemonic        : VFMADD132PS
 44080  // Supported forms : (11 forms)
 44081  //
 44082  //    * VFMADD132PS xmm, xmm, xmm                   [FMA3]
 44083  //    * VFMADD132PS m128, xmm, xmm                  [FMA3]
 44084  //    * VFMADD132PS ymm, ymm, ymm                   [FMA3]
 44085  //    * VFMADD132PS m256, ymm, ymm                  [FMA3]
 44086  //    * VFMADD132PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 44087  //    * VFMADD132PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 44088  //    * VFMADD132PS zmm, zmm, zmm{k}{z}             [AVX512F]
 44089  //    * VFMADD132PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 44090  //    * VFMADD132PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 44091  //    * VFMADD132PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 44092  //    * VFMADD132PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 44093  //
 44094  func (self *Program) VFMADD132PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 44095      var p *Instruction
 44096      switch len(vv) {
 44097          case 0  : p = self.alloc("VFMADD132PS", 3, Operands { v0, v1, v2 })
 44098          case 1  : p = self.alloc("VFMADD132PS", 4, Operands { v0, v1, v2, vv[0] })
 44099          default : panic("instruction VFMADD132PS takes 3 or 4 operands")
 44100      }
 44101      // VFMADD132PS xmm, xmm, xmm
 44102      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 44103          self.require(ISA_FMA3)
 44104          p.domain = DomainFMA
 44105          p.add(0, func(m *_Encoding, v []interface{}) {
 44106              m.emit(0xc4)
 44107              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44108              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 44109              m.emit(0x98)
 44110              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44111          })
 44112      }
 44113      // VFMADD132PS m128, xmm, xmm
 44114      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 44115          self.require(ISA_FMA3)
 44116          p.domain = DomainFMA
 44117          p.add(0, func(m *_Encoding, v []interface{}) {
 44118              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44119              m.emit(0x98)
 44120              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44121          })
 44122      }
 44123      // VFMADD132PS ymm, ymm, ymm
 44124      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 44125          self.require(ISA_FMA3)
 44126          p.domain = DomainFMA
 44127          p.add(0, func(m *_Encoding, v []interface{}) {
 44128              m.emit(0xc4)
 44129              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44130              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 44131              m.emit(0x98)
 44132              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44133          })
 44134      }
 44135      // VFMADD132PS m256, ymm, ymm
 44136      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 44137          self.require(ISA_FMA3)
 44138          p.domain = DomainFMA
 44139          p.add(0, func(m *_Encoding, v []interface{}) {
 44140              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44141              m.emit(0x98)
 44142              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44143          })
 44144      }
 44145      // VFMADD132PS m512/m32bcst, zmm, zmm{k}{z}
 44146      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 44147          self.require(ISA_AVX512F)
 44148          p.domain = DomainFMA
 44149          p.add(0, func(m *_Encoding, v []interface{}) {
 44150              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44151              m.emit(0x98)
 44152              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 44153          })
 44154      }
 44155      // VFMADD132PS {er}, zmm, zmm, zmm{k}{z}
 44156      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 44157          self.require(ISA_AVX512F)
 44158          p.domain = DomainFMA
 44159          p.add(0, func(m *_Encoding, v []interface{}) {
 44160              m.emit(0x62)
 44161              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 44162              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 44163              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 44164              m.emit(0x98)
 44165              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 44166          })
 44167      }
 44168      // VFMADD132PS zmm, zmm, zmm{k}{z}
 44169      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 44170          self.require(ISA_AVX512F)
 44171          p.domain = DomainFMA
 44172          p.add(0, func(m *_Encoding, v []interface{}) {
 44173              m.emit(0x62)
 44174              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44175              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 44176              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 44177              m.emit(0x98)
 44178              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44179          })
 44180      }
 44181      // VFMADD132PS m128/m32bcst, xmm, xmm{k}{z}
 44182      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44183          self.require(ISA_AVX512VL | ISA_AVX512F)
 44184          p.domain = DomainFMA
 44185          p.add(0, func(m *_Encoding, v []interface{}) {
 44186              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44187              m.emit(0x98)
 44188              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 44189          })
 44190      }
 44191      // VFMADD132PS xmm, xmm, xmm{k}{z}
 44192      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44193          self.require(ISA_AVX512VL | ISA_AVX512F)
 44194          p.domain = DomainFMA
 44195          p.add(0, func(m *_Encoding, v []interface{}) {
 44196              m.emit(0x62)
 44197              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44198              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 44199              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 44200              m.emit(0x98)
 44201              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44202          })
 44203      }
 44204      // VFMADD132PS m256/m32bcst, ymm, ymm{k}{z}
 44205      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 44206          self.require(ISA_AVX512VL | ISA_AVX512F)
 44207          p.domain = DomainFMA
 44208          p.add(0, func(m *_Encoding, v []interface{}) {
 44209              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44210              m.emit(0x98)
 44211              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 44212          })
 44213      }
 44214      // VFMADD132PS ymm, ymm, ymm{k}{z}
 44215      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 44216          self.require(ISA_AVX512VL | ISA_AVX512F)
 44217          p.domain = DomainFMA
 44218          p.add(0, func(m *_Encoding, v []interface{}) {
 44219              m.emit(0x62)
 44220              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44221              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 44222              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 44223              m.emit(0x98)
 44224              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44225          })
 44226      }
 44227      if p.len == 0 {
 44228          panic("invalid operands for VFMADD132PS")
 44229      }
 44230      return p
 44231  }
 44232  
 44233  // VFMADD132SD performs "Fused Multiply-Add of Scalar Double-Precision Floating-Point Values".
 44234  //
 44235  // Mnemonic        : VFMADD132SD
 44236  // Supported forms : (5 forms)
 44237  //
 44238  //    * VFMADD132SD xmm, xmm, xmm                [FMA3]
 44239  //    * VFMADD132SD m64, xmm, xmm                [FMA3]
 44240  //    * VFMADD132SD m64, xmm, xmm{k}{z}          [AVX512F]
 44241  //    * VFMADD132SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 44242  //    * VFMADD132SD xmm, xmm, xmm{k}{z}          [AVX512F]
 44243  //
 44244  func (self *Program) VFMADD132SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 44245      var p *Instruction
 44246      switch len(vv) {
 44247          case 0  : p = self.alloc("VFMADD132SD", 3, Operands { v0, v1, v2 })
 44248          case 1  : p = self.alloc("VFMADD132SD", 4, Operands { v0, v1, v2, vv[0] })
 44249          default : panic("instruction VFMADD132SD takes 3 or 4 operands")
 44250      }
 44251      // VFMADD132SD xmm, xmm, xmm
 44252      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 44253          self.require(ISA_FMA3)
 44254          p.domain = DomainFMA
 44255          p.add(0, func(m *_Encoding, v []interface{}) {
 44256              m.emit(0xc4)
 44257              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44258              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 44259              m.emit(0x99)
 44260              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44261          })
 44262      }
 44263      // VFMADD132SD m64, xmm, xmm
 44264      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 44265          self.require(ISA_FMA3)
 44266          p.domain = DomainFMA
 44267          p.add(0, func(m *_Encoding, v []interface{}) {
 44268              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44269              m.emit(0x99)
 44270              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44271          })
 44272      }
 44273      // VFMADD132SD m64, xmm, xmm{k}{z}
 44274      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44275          self.require(ISA_AVX512F)
 44276          p.domain = DomainFMA
 44277          p.add(0, func(m *_Encoding, v []interface{}) {
 44278              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 44279              m.emit(0x99)
 44280              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 44281          })
 44282      }
 44283      // VFMADD132SD {er}, xmm, xmm, xmm{k}{z}
 44284      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 44285          self.require(ISA_AVX512F)
 44286          p.domain = DomainFMA
 44287          p.add(0, func(m *_Encoding, v []interface{}) {
 44288              m.emit(0x62)
 44289              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 44290              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 44291              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 44292              m.emit(0x99)
 44293              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 44294          })
 44295      }
 44296      // VFMADD132SD xmm, xmm, xmm{k}{z}
 44297      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44298          self.require(ISA_AVX512F)
 44299          p.domain = DomainFMA
 44300          p.add(0, func(m *_Encoding, v []interface{}) {
 44301              m.emit(0x62)
 44302              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44303              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44304              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 44305              m.emit(0x99)
 44306              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44307          })
 44308      }
 44309      if p.len == 0 {
 44310          panic("invalid operands for VFMADD132SD")
 44311      }
 44312      return p
 44313  }
 44314  
 44315  // VFMADD132SS performs "Fused Multiply-Add of Scalar Single-Precision Floating-Point Values".
 44316  //
 44317  // Mnemonic        : VFMADD132SS
 44318  // Supported forms : (5 forms)
 44319  //
 44320  //    * VFMADD132SS xmm, xmm, xmm                [FMA3]
 44321  //    * VFMADD132SS m32, xmm, xmm                [FMA3]
 44322  //    * VFMADD132SS m32, xmm, xmm{k}{z}          [AVX512F]
 44323  //    * VFMADD132SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 44324  //    * VFMADD132SS xmm, xmm, xmm{k}{z}          [AVX512F]
 44325  //
 44326  func (self *Program) VFMADD132SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 44327      var p *Instruction
 44328      switch len(vv) {
 44329          case 0  : p = self.alloc("VFMADD132SS", 3, Operands { v0, v1, v2 })
 44330          case 1  : p = self.alloc("VFMADD132SS", 4, Operands { v0, v1, v2, vv[0] })
 44331          default : panic("instruction VFMADD132SS takes 3 or 4 operands")
 44332      }
 44333      // VFMADD132SS xmm, xmm, xmm
 44334      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 44335          self.require(ISA_FMA3)
 44336          p.domain = DomainFMA
 44337          p.add(0, func(m *_Encoding, v []interface{}) {
 44338              m.emit(0xc4)
 44339              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44340              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 44341              m.emit(0x99)
 44342              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44343          })
 44344      }
 44345      // VFMADD132SS m32, xmm, xmm
 44346      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 44347          self.require(ISA_FMA3)
 44348          p.domain = DomainFMA
 44349          p.add(0, func(m *_Encoding, v []interface{}) {
 44350              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44351              m.emit(0x99)
 44352              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44353          })
 44354      }
 44355      // VFMADD132SS m32, xmm, xmm{k}{z}
 44356      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44357          self.require(ISA_AVX512F)
 44358          p.domain = DomainFMA
 44359          p.add(0, func(m *_Encoding, v []interface{}) {
 44360              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 44361              m.emit(0x99)
 44362              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 44363          })
 44364      }
 44365      // VFMADD132SS {er}, xmm, xmm, xmm{k}{z}
 44366      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 44367          self.require(ISA_AVX512F)
 44368          p.domain = DomainFMA
 44369          p.add(0, func(m *_Encoding, v []interface{}) {
 44370              m.emit(0x62)
 44371              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 44372              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 44373              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 44374              m.emit(0x99)
 44375              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 44376          })
 44377      }
 44378      // VFMADD132SS xmm, xmm, xmm{k}{z}
 44379      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44380          self.require(ISA_AVX512F)
 44381          p.domain = DomainFMA
 44382          p.add(0, func(m *_Encoding, v []interface{}) {
 44383              m.emit(0x62)
 44384              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44385              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 44386              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 44387              m.emit(0x99)
 44388              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44389          })
 44390      }
 44391      if p.len == 0 {
 44392          panic("invalid operands for VFMADD132SS")
 44393      }
 44394      return p
 44395  }
 44396  
 44397  // VFMADD213PD performs "Fused Multiply-Add of Packed Double-Precision Floating-Point Values".
 44398  //
 44399  // Mnemonic        : VFMADD213PD
 44400  // Supported forms : (11 forms)
 44401  //
 44402  //    * VFMADD213PD xmm, xmm, xmm                   [FMA3]
 44403  //    * VFMADD213PD m128, xmm, xmm                  [FMA3]
 44404  //    * VFMADD213PD ymm, ymm, ymm                   [FMA3]
 44405  //    * VFMADD213PD m256, ymm, ymm                  [FMA3]
 44406  //    * VFMADD213PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 44407  //    * VFMADD213PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 44408  //    * VFMADD213PD zmm, zmm, zmm{k}{z}             [AVX512F]
 44409  //    * VFMADD213PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 44410  //    * VFMADD213PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 44411  //    * VFMADD213PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 44412  //    * VFMADD213PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 44413  //
 44414  func (self *Program) VFMADD213PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 44415      var p *Instruction
 44416      switch len(vv) {
 44417          case 0  : p = self.alloc("VFMADD213PD", 3, Operands { v0, v1, v2 })
 44418          case 1  : p = self.alloc("VFMADD213PD", 4, Operands { v0, v1, v2, vv[0] })
 44419          default : panic("instruction VFMADD213PD takes 3 or 4 operands")
 44420      }
 44421      // VFMADD213PD xmm, xmm, xmm
 44422      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 44423          self.require(ISA_FMA3)
 44424          p.domain = DomainFMA
 44425          p.add(0, func(m *_Encoding, v []interface{}) {
 44426              m.emit(0xc4)
 44427              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44428              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 44429              m.emit(0xa8)
 44430              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44431          })
 44432      }
 44433      // VFMADD213PD m128, xmm, xmm
 44434      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 44435          self.require(ISA_FMA3)
 44436          p.domain = DomainFMA
 44437          p.add(0, func(m *_Encoding, v []interface{}) {
 44438              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44439              m.emit(0xa8)
 44440              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44441          })
 44442      }
 44443      // VFMADD213PD ymm, ymm, ymm
 44444      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 44445          self.require(ISA_FMA3)
 44446          p.domain = DomainFMA
 44447          p.add(0, func(m *_Encoding, v []interface{}) {
 44448              m.emit(0xc4)
 44449              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44450              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44451              m.emit(0xa8)
 44452              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44453          })
 44454      }
 44455      // VFMADD213PD m256, ymm, ymm
 44456      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 44457          self.require(ISA_FMA3)
 44458          p.domain = DomainFMA
 44459          p.add(0, func(m *_Encoding, v []interface{}) {
 44460              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44461              m.emit(0xa8)
 44462              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44463          })
 44464      }
 44465      // VFMADD213PD m512/m64bcst, zmm, zmm{k}{z}
 44466      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 44467          self.require(ISA_AVX512F)
 44468          p.domain = DomainFMA
 44469          p.add(0, func(m *_Encoding, v []interface{}) {
 44470              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44471              m.emit(0xa8)
 44472              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 44473          })
 44474      }
 44475      // VFMADD213PD {er}, zmm, zmm, zmm{k}{z}
 44476      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 44477          self.require(ISA_AVX512F)
 44478          p.domain = DomainFMA
 44479          p.add(0, func(m *_Encoding, v []interface{}) {
 44480              m.emit(0x62)
 44481              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 44482              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 44483              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 44484              m.emit(0xa8)
 44485              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 44486          })
 44487      }
 44488      // VFMADD213PD zmm, zmm, zmm{k}{z}
 44489      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 44490          self.require(ISA_AVX512F)
 44491          p.domain = DomainFMA
 44492          p.add(0, func(m *_Encoding, v []interface{}) {
 44493              m.emit(0x62)
 44494              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44495              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44496              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 44497              m.emit(0xa8)
 44498              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44499          })
 44500      }
 44501      // VFMADD213PD m128/m64bcst, xmm, xmm{k}{z}
 44502      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44503          self.require(ISA_AVX512VL | ISA_AVX512F)
 44504          p.domain = DomainFMA
 44505          p.add(0, func(m *_Encoding, v []interface{}) {
 44506              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44507              m.emit(0xa8)
 44508              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 44509          })
 44510      }
 44511      // VFMADD213PD xmm, xmm, xmm{k}{z}
 44512      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44513          self.require(ISA_AVX512VL | ISA_AVX512F)
 44514          p.domain = DomainFMA
 44515          p.add(0, func(m *_Encoding, v []interface{}) {
 44516              m.emit(0x62)
 44517              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44518              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44519              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 44520              m.emit(0xa8)
 44521              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44522          })
 44523      }
 44524      // VFMADD213PD m256/m64bcst, ymm, ymm{k}{z}
 44525      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 44526          self.require(ISA_AVX512VL | ISA_AVX512F)
 44527          p.domain = DomainFMA
 44528          p.add(0, func(m *_Encoding, v []interface{}) {
 44529              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44530              m.emit(0xa8)
 44531              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 44532          })
 44533      }
 44534      // VFMADD213PD ymm, ymm, ymm{k}{z}
 44535      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 44536          self.require(ISA_AVX512VL | ISA_AVX512F)
 44537          p.domain = DomainFMA
 44538          p.add(0, func(m *_Encoding, v []interface{}) {
 44539              m.emit(0x62)
 44540              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44541              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44542              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 44543              m.emit(0xa8)
 44544              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44545          })
 44546      }
 44547      if p.len == 0 {
 44548          panic("invalid operands for VFMADD213PD")
 44549      }
 44550      return p
 44551  }
 44552  
 44553  // VFMADD213PS performs "Fused Multiply-Add of Packed Single-Precision Floating-Point Values".
 44554  //
 44555  // Mnemonic        : VFMADD213PS
 44556  // Supported forms : (11 forms)
 44557  //
 44558  //    * VFMADD213PS xmm, xmm, xmm                   [FMA3]
 44559  //    * VFMADD213PS m128, xmm, xmm                  [FMA3]
 44560  //    * VFMADD213PS ymm, ymm, ymm                   [FMA3]
 44561  //    * VFMADD213PS m256, ymm, ymm                  [FMA3]
 44562  //    * VFMADD213PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 44563  //    * VFMADD213PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 44564  //    * VFMADD213PS zmm, zmm, zmm{k}{z}             [AVX512F]
 44565  //    * VFMADD213PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 44566  //    * VFMADD213PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 44567  //    * VFMADD213PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 44568  //    * VFMADD213PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 44569  //
 44570  func (self *Program) VFMADD213PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 44571      var p *Instruction
 44572      switch len(vv) {
 44573          case 0  : p = self.alloc("VFMADD213PS", 3, Operands { v0, v1, v2 })
 44574          case 1  : p = self.alloc("VFMADD213PS", 4, Operands { v0, v1, v2, vv[0] })
 44575          default : panic("instruction VFMADD213PS takes 3 or 4 operands")
 44576      }
 44577      // VFMADD213PS xmm, xmm, xmm
 44578      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 44579          self.require(ISA_FMA3)
 44580          p.domain = DomainFMA
 44581          p.add(0, func(m *_Encoding, v []interface{}) {
 44582              m.emit(0xc4)
 44583              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44584              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 44585              m.emit(0xa8)
 44586              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44587          })
 44588      }
 44589      // VFMADD213PS m128, xmm, xmm
 44590      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 44591          self.require(ISA_FMA3)
 44592          p.domain = DomainFMA
 44593          p.add(0, func(m *_Encoding, v []interface{}) {
 44594              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44595              m.emit(0xa8)
 44596              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44597          })
 44598      }
 44599      // VFMADD213PS ymm, ymm, ymm
 44600      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 44601          self.require(ISA_FMA3)
 44602          p.domain = DomainFMA
 44603          p.add(0, func(m *_Encoding, v []interface{}) {
 44604              m.emit(0xc4)
 44605              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44606              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 44607              m.emit(0xa8)
 44608              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44609          })
 44610      }
 44611      // VFMADD213PS m256, ymm, ymm
 44612      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 44613          self.require(ISA_FMA3)
 44614          p.domain = DomainFMA
 44615          p.add(0, func(m *_Encoding, v []interface{}) {
 44616              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44617              m.emit(0xa8)
 44618              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44619          })
 44620      }
 44621      // VFMADD213PS m512/m32bcst, zmm, zmm{k}{z}
 44622      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 44623          self.require(ISA_AVX512F)
 44624          p.domain = DomainFMA
 44625          p.add(0, func(m *_Encoding, v []interface{}) {
 44626              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44627              m.emit(0xa8)
 44628              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 44629          })
 44630      }
 44631      // VFMADD213PS {er}, zmm, zmm, zmm{k}{z}
 44632      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 44633          self.require(ISA_AVX512F)
 44634          p.domain = DomainFMA
 44635          p.add(0, func(m *_Encoding, v []interface{}) {
 44636              m.emit(0x62)
 44637              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 44638              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 44639              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 44640              m.emit(0xa8)
 44641              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 44642          })
 44643      }
 44644      // VFMADD213PS zmm, zmm, zmm{k}{z}
 44645      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 44646          self.require(ISA_AVX512F)
 44647          p.domain = DomainFMA
 44648          p.add(0, func(m *_Encoding, v []interface{}) {
 44649              m.emit(0x62)
 44650              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44651              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 44652              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 44653              m.emit(0xa8)
 44654              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44655          })
 44656      }
 44657      // VFMADD213PS m128/m32bcst, xmm, xmm{k}{z}
 44658      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44659          self.require(ISA_AVX512VL | ISA_AVX512F)
 44660          p.domain = DomainFMA
 44661          p.add(0, func(m *_Encoding, v []interface{}) {
 44662              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44663              m.emit(0xa8)
 44664              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 44665          })
 44666      }
 44667      // VFMADD213PS xmm, xmm, xmm{k}{z}
 44668      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44669          self.require(ISA_AVX512VL | ISA_AVX512F)
 44670          p.domain = DomainFMA
 44671          p.add(0, func(m *_Encoding, v []interface{}) {
 44672              m.emit(0x62)
 44673              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44674              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 44675              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 44676              m.emit(0xa8)
 44677              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44678          })
 44679      }
 44680      // VFMADD213PS m256/m32bcst, ymm, ymm{k}{z}
 44681      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 44682          self.require(ISA_AVX512VL | ISA_AVX512F)
 44683          p.domain = DomainFMA
 44684          p.add(0, func(m *_Encoding, v []interface{}) {
 44685              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44686              m.emit(0xa8)
 44687              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 44688          })
 44689      }
 44690      // VFMADD213PS ymm, ymm, ymm{k}{z}
 44691      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 44692          self.require(ISA_AVX512VL | ISA_AVX512F)
 44693          p.domain = DomainFMA
 44694          p.add(0, func(m *_Encoding, v []interface{}) {
 44695              m.emit(0x62)
 44696              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44697              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 44698              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 44699              m.emit(0xa8)
 44700              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44701          })
 44702      }
 44703      if p.len == 0 {
 44704          panic("invalid operands for VFMADD213PS")
 44705      }
 44706      return p
 44707  }
 44708  
 44709  // VFMADD213SD performs "Fused Multiply-Add of Scalar Double-Precision Floating-Point Values".
 44710  //
 44711  // Mnemonic        : VFMADD213SD
 44712  // Supported forms : (5 forms)
 44713  //
 44714  //    * VFMADD213SD xmm, xmm, xmm                [FMA3]
 44715  //    * VFMADD213SD m64, xmm, xmm                [FMA3]
 44716  //    * VFMADD213SD m64, xmm, xmm{k}{z}          [AVX512F]
 44717  //    * VFMADD213SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 44718  //    * VFMADD213SD xmm, xmm, xmm{k}{z}          [AVX512F]
 44719  //
 44720  func (self *Program) VFMADD213SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 44721      var p *Instruction
 44722      switch len(vv) {
 44723          case 0  : p = self.alloc("VFMADD213SD", 3, Operands { v0, v1, v2 })
 44724          case 1  : p = self.alloc("VFMADD213SD", 4, Operands { v0, v1, v2, vv[0] })
 44725          default : panic("instruction VFMADD213SD takes 3 or 4 operands")
 44726      }
 44727      // VFMADD213SD xmm, xmm, xmm
 44728      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 44729          self.require(ISA_FMA3)
 44730          p.domain = DomainFMA
 44731          p.add(0, func(m *_Encoding, v []interface{}) {
 44732              m.emit(0xc4)
 44733              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44734              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 44735              m.emit(0xa9)
 44736              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44737          })
 44738      }
 44739      // VFMADD213SD m64, xmm, xmm
 44740      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 44741          self.require(ISA_FMA3)
 44742          p.domain = DomainFMA
 44743          p.add(0, func(m *_Encoding, v []interface{}) {
 44744              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44745              m.emit(0xa9)
 44746              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44747          })
 44748      }
 44749      // VFMADD213SD m64, xmm, xmm{k}{z}
 44750      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44751          self.require(ISA_AVX512F)
 44752          p.domain = DomainFMA
 44753          p.add(0, func(m *_Encoding, v []interface{}) {
 44754              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 44755              m.emit(0xa9)
 44756              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 44757          })
 44758      }
 44759      // VFMADD213SD {er}, xmm, xmm, xmm{k}{z}
 44760      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 44761          self.require(ISA_AVX512F)
 44762          p.domain = DomainFMA
 44763          p.add(0, func(m *_Encoding, v []interface{}) {
 44764              m.emit(0x62)
 44765              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 44766              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 44767              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 44768              m.emit(0xa9)
 44769              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 44770          })
 44771      }
 44772      // VFMADD213SD xmm, xmm, xmm{k}{z}
 44773      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44774          self.require(ISA_AVX512F)
 44775          p.domain = DomainFMA
 44776          p.add(0, func(m *_Encoding, v []interface{}) {
 44777              m.emit(0x62)
 44778              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44779              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44780              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 44781              m.emit(0xa9)
 44782              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44783          })
 44784      }
 44785      if p.len == 0 {
 44786          panic("invalid operands for VFMADD213SD")
 44787      }
 44788      return p
 44789  }
 44790  
 44791  // VFMADD213SS performs "Fused Multiply-Add of Scalar Single-Precision Floating-Point Values".
 44792  //
 44793  // Mnemonic        : VFMADD213SS
 44794  // Supported forms : (5 forms)
 44795  //
 44796  //    * VFMADD213SS xmm, xmm, xmm                [FMA3]
 44797  //    * VFMADD213SS m32, xmm, xmm                [FMA3]
 44798  //    * VFMADD213SS m32, xmm, xmm{k}{z}          [AVX512F]
 44799  //    * VFMADD213SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 44800  //    * VFMADD213SS xmm, xmm, xmm{k}{z}          [AVX512F]
 44801  //
 44802  func (self *Program) VFMADD213SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 44803      var p *Instruction
 44804      switch len(vv) {
 44805          case 0  : p = self.alloc("VFMADD213SS", 3, Operands { v0, v1, v2 })
 44806          case 1  : p = self.alloc("VFMADD213SS", 4, Operands { v0, v1, v2, vv[0] })
 44807          default : panic("instruction VFMADD213SS takes 3 or 4 operands")
 44808      }
 44809      // VFMADD213SS xmm, xmm, xmm
 44810      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 44811          self.require(ISA_FMA3)
 44812          p.domain = DomainFMA
 44813          p.add(0, func(m *_Encoding, v []interface{}) {
 44814              m.emit(0xc4)
 44815              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44816              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 44817              m.emit(0xa9)
 44818              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44819          })
 44820      }
 44821      // VFMADD213SS m32, xmm, xmm
 44822      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 44823          self.require(ISA_FMA3)
 44824          p.domain = DomainFMA
 44825          p.add(0, func(m *_Encoding, v []interface{}) {
 44826              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44827              m.emit(0xa9)
 44828              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44829          })
 44830      }
 44831      // VFMADD213SS m32, xmm, xmm{k}{z}
 44832      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44833          self.require(ISA_AVX512F)
 44834          p.domain = DomainFMA
 44835          p.add(0, func(m *_Encoding, v []interface{}) {
 44836              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 44837              m.emit(0xa9)
 44838              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 44839          })
 44840      }
 44841      // VFMADD213SS {er}, xmm, xmm, xmm{k}{z}
 44842      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 44843          self.require(ISA_AVX512F)
 44844          p.domain = DomainFMA
 44845          p.add(0, func(m *_Encoding, v []interface{}) {
 44846              m.emit(0x62)
 44847              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 44848              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 44849              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 44850              m.emit(0xa9)
 44851              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 44852          })
 44853      }
 44854      // VFMADD213SS xmm, xmm, xmm{k}{z}
 44855      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44856          self.require(ISA_AVX512F)
 44857          p.domain = DomainFMA
 44858          p.add(0, func(m *_Encoding, v []interface{}) {
 44859              m.emit(0x62)
 44860              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44861              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 44862              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 44863              m.emit(0xa9)
 44864              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44865          })
 44866      }
 44867      if p.len == 0 {
 44868          panic("invalid operands for VFMADD213SS")
 44869      }
 44870      return p
 44871  }
 44872  
 44873  // VFMADD231PD performs "Fused Multiply-Add of Packed Double-Precision Floating-Point Values".
 44874  //
 44875  // Mnemonic        : VFMADD231PD
 44876  // Supported forms : (11 forms)
 44877  //
 44878  //    * VFMADD231PD xmm, xmm, xmm                   [FMA3]
 44879  //    * VFMADD231PD m128, xmm, xmm                  [FMA3]
 44880  //    * VFMADD231PD ymm, ymm, ymm                   [FMA3]
 44881  //    * VFMADD231PD m256, ymm, ymm                  [FMA3]
 44882  //    * VFMADD231PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 44883  //    * VFMADD231PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 44884  //    * VFMADD231PD zmm, zmm, zmm{k}{z}             [AVX512F]
 44885  //    * VFMADD231PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 44886  //    * VFMADD231PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 44887  //    * VFMADD231PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 44888  //    * VFMADD231PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 44889  //
 44890  func (self *Program) VFMADD231PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 44891      var p *Instruction
 44892      switch len(vv) {
 44893          case 0  : p = self.alloc("VFMADD231PD", 3, Operands { v0, v1, v2 })
 44894          case 1  : p = self.alloc("VFMADD231PD", 4, Operands { v0, v1, v2, vv[0] })
 44895          default : panic("instruction VFMADD231PD takes 3 or 4 operands")
 44896      }
 44897      // VFMADD231PD xmm, xmm, xmm
 44898      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 44899          self.require(ISA_FMA3)
 44900          p.domain = DomainFMA
 44901          p.add(0, func(m *_Encoding, v []interface{}) {
 44902              m.emit(0xc4)
 44903              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44904              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 44905              m.emit(0xb8)
 44906              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44907          })
 44908      }
 44909      // VFMADD231PD m128, xmm, xmm
 44910      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 44911          self.require(ISA_FMA3)
 44912          p.domain = DomainFMA
 44913          p.add(0, func(m *_Encoding, v []interface{}) {
 44914              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44915              m.emit(0xb8)
 44916              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44917          })
 44918      }
 44919      // VFMADD231PD ymm, ymm, ymm
 44920      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 44921          self.require(ISA_FMA3)
 44922          p.domain = DomainFMA
 44923          p.add(0, func(m *_Encoding, v []interface{}) {
 44924              m.emit(0xc4)
 44925              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44926              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44927              m.emit(0xb8)
 44928              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44929          })
 44930      }
 44931      // VFMADD231PD m256, ymm, ymm
 44932      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 44933          self.require(ISA_FMA3)
 44934          p.domain = DomainFMA
 44935          p.add(0, func(m *_Encoding, v []interface{}) {
 44936              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44937              m.emit(0xb8)
 44938              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44939          })
 44940      }
 44941      // VFMADD231PD m512/m64bcst, zmm, zmm{k}{z}
 44942      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 44943          self.require(ISA_AVX512F)
 44944          p.domain = DomainFMA
 44945          p.add(0, func(m *_Encoding, v []interface{}) {
 44946              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44947              m.emit(0xb8)
 44948              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 44949          })
 44950      }
 44951      // VFMADD231PD {er}, zmm, zmm, zmm{k}{z}
 44952      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 44953          self.require(ISA_AVX512F)
 44954          p.domain = DomainFMA
 44955          p.add(0, func(m *_Encoding, v []interface{}) {
 44956              m.emit(0x62)
 44957              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 44958              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 44959              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 44960              m.emit(0xb8)
 44961              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 44962          })
 44963      }
 44964      // VFMADD231PD zmm, zmm, zmm{k}{z}
 44965      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 44966          self.require(ISA_AVX512F)
 44967          p.domain = DomainFMA
 44968          p.add(0, func(m *_Encoding, v []interface{}) {
 44969              m.emit(0x62)
 44970              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44971              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44972              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 44973              m.emit(0xb8)
 44974              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44975          })
 44976      }
 44977      // VFMADD231PD m128/m64bcst, xmm, xmm{k}{z}
 44978      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44979          self.require(ISA_AVX512VL | ISA_AVX512F)
 44980          p.domain = DomainFMA
 44981          p.add(0, func(m *_Encoding, v []interface{}) {
 44982              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44983              m.emit(0xb8)
 44984              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 44985          })
 44986      }
 44987      // VFMADD231PD xmm, xmm, xmm{k}{z}
 44988      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44989          self.require(ISA_AVX512VL | ISA_AVX512F)
 44990          p.domain = DomainFMA
 44991          p.add(0, func(m *_Encoding, v []interface{}) {
 44992              m.emit(0x62)
 44993              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44994              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44995              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 44996              m.emit(0xb8)
 44997              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44998          })
 44999      }
 45000      // VFMADD231PD m256/m64bcst, ymm, ymm{k}{z}
 45001      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 45002          self.require(ISA_AVX512VL | ISA_AVX512F)
 45003          p.domain = DomainFMA
 45004          p.add(0, func(m *_Encoding, v []interface{}) {
 45005              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 45006              m.emit(0xb8)
 45007              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 45008          })
 45009      }
 45010      // VFMADD231PD ymm, ymm, ymm{k}{z}
 45011      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 45012          self.require(ISA_AVX512VL | ISA_AVX512F)
 45013          p.domain = DomainFMA
 45014          p.add(0, func(m *_Encoding, v []interface{}) {
 45015              m.emit(0x62)
 45016              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45017              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 45018              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 45019              m.emit(0xb8)
 45020              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45021          })
 45022      }
 45023      if p.len == 0 {
 45024          panic("invalid operands for VFMADD231PD")
 45025      }
 45026      return p
 45027  }
 45028  
 45029  // VFMADD231PS performs "Fused Multiply-Add of Packed Single-Precision Floating-Point Values".
 45030  //
 45031  // Mnemonic        : VFMADD231PS
 45032  // Supported forms : (11 forms)
 45033  //
 45034  //    * VFMADD231PS xmm, xmm, xmm                   [FMA3]
 45035  //    * VFMADD231PS m128, xmm, xmm                  [FMA3]
 45036  //    * VFMADD231PS ymm, ymm, ymm                   [FMA3]
 45037  //    * VFMADD231PS m256, ymm, ymm                  [FMA3]
 45038  //    * VFMADD231PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 45039  //    * VFMADD231PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 45040  //    * VFMADD231PS zmm, zmm, zmm{k}{z}             [AVX512F]
 45041  //    * VFMADD231PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 45042  //    * VFMADD231PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 45043  //    * VFMADD231PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 45044  //    * VFMADD231PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 45045  //
 45046  func (self *Program) VFMADD231PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 45047      var p *Instruction
 45048      switch len(vv) {
 45049          case 0  : p = self.alloc("VFMADD231PS", 3, Operands { v0, v1, v2 })
 45050          case 1  : p = self.alloc("VFMADD231PS", 4, Operands { v0, v1, v2, vv[0] })
 45051          default : panic("instruction VFMADD231PS takes 3 or 4 operands")
 45052      }
 45053      // VFMADD231PS xmm, xmm, xmm
 45054      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 45055          self.require(ISA_FMA3)
 45056          p.domain = DomainFMA
 45057          p.add(0, func(m *_Encoding, v []interface{}) {
 45058              m.emit(0xc4)
 45059              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 45060              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 45061              m.emit(0xb8)
 45062              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45063          })
 45064      }
 45065      // VFMADD231PS m128, xmm, xmm
 45066      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 45067          self.require(ISA_FMA3)
 45068          p.domain = DomainFMA
 45069          p.add(0, func(m *_Encoding, v []interface{}) {
 45070              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 45071              m.emit(0xb8)
 45072              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 45073          })
 45074      }
 45075      // VFMADD231PS ymm, ymm, ymm
 45076      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 45077          self.require(ISA_FMA3)
 45078          p.domain = DomainFMA
 45079          p.add(0, func(m *_Encoding, v []interface{}) {
 45080              m.emit(0xc4)
 45081              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 45082              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 45083              m.emit(0xb8)
 45084              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45085          })
 45086      }
 45087      // VFMADD231PS m256, ymm, ymm
 45088      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 45089          self.require(ISA_FMA3)
 45090          p.domain = DomainFMA
 45091          p.add(0, func(m *_Encoding, v []interface{}) {
 45092              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 45093              m.emit(0xb8)
 45094              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 45095          })
 45096      }
 45097      // VFMADD231PS m512/m32bcst, zmm, zmm{k}{z}
 45098      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 45099          self.require(ISA_AVX512F)
 45100          p.domain = DomainFMA
 45101          p.add(0, func(m *_Encoding, v []interface{}) {
 45102              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 45103              m.emit(0xb8)
 45104              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 45105          })
 45106      }
 45107      // VFMADD231PS {er}, zmm, zmm, zmm{k}{z}
 45108      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 45109          self.require(ISA_AVX512F)
 45110          p.domain = DomainFMA
 45111          p.add(0, func(m *_Encoding, v []interface{}) {
 45112              m.emit(0x62)
 45113              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 45114              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 45115              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 45116              m.emit(0xb8)
 45117              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 45118          })
 45119      }
 45120      // VFMADD231PS zmm, zmm, zmm{k}{z}
 45121      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 45122          self.require(ISA_AVX512F)
 45123          p.domain = DomainFMA
 45124          p.add(0, func(m *_Encoding, v []interface{}) {
 45125              m.emit(0x62)
 45126              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45127              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 45128              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 45129              m.emit(0xb8)
 45130              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45131          })
 45132      }
 45133      // VFMADD231PS m128/m32bcst, xmm, xmm{k}{z}
 45134      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 45135          self.require(ISA_AVX512VL | ISA_AVX512F)
 45136          p.domain = DomainFMA
 45137          p.add(0, func(m *_Encoding, v []interface{}) {
 45138              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 45139              m.emit(0xb8)
 45140              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 45141          })
 45142      }
 45143      // VFMADD231PS xmm, xmm, xmm{k}{z}
 45144      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 45145          self.require(ISA_AVX512VL | ISA_AVX512F)
 45146          p.domain = DomainFMA
 45147          p.add(0, func(m *_Encoding, v []interface{}) {
 45148              m.emit(0x62)
 45149              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45150              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 45151              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 45152              m.emit(0xb8)
 45153              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45154          })
 45155      }
 45156      // VFMADD231PS m256/m32bcst, ymm, ymm{k}{z}
 45157      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 45158          self.require(ISA_AVX512VL | ISA_AVX512F)
 45159          p.domain = DomainFMA
 45160          p.add(0, func(m *_Encoding, v []interface{}) {
 45161              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 45162              m.emit(0xb8)
 45163              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 45164          })
 45165      }
 45166      // VFMADD231PS ymm, ymm, ymm{k}{z}
 45167      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 45168          self.require(ISA_AVX512VL | ISA_AVX512F)
 45169          p.domain = DomainFMA
 45170          p.add(0, func(m *_Encoding, v []interface{}) {
 45171              m.emit(0x62)
 45172              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45173              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 45174              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 45175              m.emit(0xb8)
 45176              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45177          })
 45178      }
 45179      if p.len == 0 {
 45180          panic("invalid operands for VFMADD231PS")
 45181      }
 45182      return p
 45183  }
 45184  
 45185  // VFMADD231SD performs "Fused Multiply-Add of Scalar Double-Precision Floating-Point Values".
 45186  //
 45187  // Mnemonic        : VFMADD231SD
 45188  // Supported forms : (5 forms)
 45189  //
 45190  //    * VFMADD231SD xmm, xmm, xmm                [FMA3]
 45191  //    * VFMADD231SD m64, xmm, xmm                [FMA3]
 45192  //    * VFMADD231SD m64, xmm, xmm{k}{z}          [AVX512F]
 45193  //    * VFMADD231SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 45194  //    * VFMADD231SD xmm, xmm, xmm{k}{z}          [AVX512F]
 45195  //
 45196  func (self *Program) VFMADD231SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 45197      var p *Instruction
 45198      switch len(vv) {
 45199          case 0  : p = self.alloc("VFMADD231SD", 3, Operands { v0, v1, v2 })
 45200          case 1  : p = self.alloc("VFMADD231SD", 4, Operands { v0, v1, v2, vv[0] })
 45201          default : panic("instruction VFMADD231SD takes 3 or 4 operands")
 45202      }
 45203      // VFMADD231SD xmm, xmm, xmm
 45204      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 45205          self.require(ISA_FMA3)
 45206          p.domain = DomainFMA
 45207          p.add(0, func(m *_Encoding, v []interface{}) {
 45208              m.emit(0xc4)
 45209              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 45210              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 45211              m.emit(0xb9)
 45212              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45213          })
 45214      }
 45215      // VFMADD231SD m64, xmm, xmm
 45216      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 45217          self.require(ISA_FMA3)
 45218          p.domain = DomainFMA
 45219          p.add(0, func(m *_Encoding, v []interface{}) {
 45220              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 45221              m.emit(0xb9)
 45222              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 45223          })
 45224      }
 45225      // VFMADD231SD m64, xmm, xmm{k}{z}
 45226      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 45227          self.require(ISA_AVX512F)
 45228          p.domain = DomainFMA
 45229          p.add(0, func(m *_Encoding, v []interface{}) {
 45230              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 45231              m.emit(0xb9)
 45232              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 45233          })
 45234      }
 45235      // VFMADD231SD {er}, xmm, xmm, xmm{k}{z}
 45236      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 45237          self.require(ISA_AVX512F)
 45238          p.domain = DomainFMA
 45239          p.add(0, func(m *_Encoding, v []interface{}) {
 45240              m.emit(0x62)
 45241              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 45242              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 45243              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 45244              m.emit(0xb9)
 45245              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 45246          })
 45247      }
 45248      // VFMADD231SD xmm, xmm, xmm{k}{z}
 45249      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 45250          self.require(ISA_AVX512F)
 45251          p.domain = DomainFMA
 45252          p.add(0, func(m *_Encoding, v []interface{}) {
 45253              m.emit(0x62)
 45254              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45255              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 45256              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 45257              m.emit(0xb9)
 45258              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45259          })
 45260      }
 45261      if p.len == 0 {
 45262          panic("invalid operands for VFMADD231SD")
 45263      }
 45264      return p
 45265  }
 45266  
 45267  // VFMADD231SS performs "Fused Multiply-Add of Scalar Single-Precision Floating-Point Values".
 45268  //
 45269  // Mnemonic        : VFMADD231SS
 45270  // Supported forms : (5 forms)
 45271  //
 45272  //    * VFMADD231SS xmm, xmm, xmm                [FMA3]
 45273  //    * VFMADD231SS m32, xmm, xmm                [FMA3]
 45274  //    * VFMADD231SS m32, xmm, xmm{k}{z}          [AVX512F]
 45275  //    * VFMADD231SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 45276  //    * VFMADD231SS xmm, xmm, xmm{k}{z}          [AVX512F]
 45277  //
 45278  func (self *Program) VFMADD231SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 45279      var p *Instruction
 45280      switch len(vv) {
 45281          case 0  : p = self.alloc("VFMADD231SS", 3, Operands { v0, v1, v2 })
 45282          case 1  : p = self.alloc("VFMADD231SS", 4, Operands { v0, v1, v2, vv[0] })
 45283          default : panic("instruction VFMADD231SS takes 3 or 4 operands")
 45284      }
 45285      // VFMADD231SS xmm, xmm, xmm
 45286      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 45287          self.require(ISA_FMA3)
 45288          p.domain = DomainFMA
 45289          p.add(0, func(m *_Encoding, v []interface{}) {
 45290              m.emit(0xc4)
 45291              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 45292              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 45293              m.emit(0xb9)
 45294              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45295          })
 45296      }
 45297      // VFMADD231SS m32, xmm, xmm
 45298      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 45299          self.require(ISA_FMA3)
 45300          p.domain = DomainFMA
 45301          p.add(0, func(m *_Encoding, v []interface{}) {
 45302              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 45303              m.emit(0xb9)
 45304              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 45305          })
 45306      }
 45307      // VFMADD231SS m32, xmm, xmm{k}{z}
 45308      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 45309          self.require(ISA_AVX512F)
 45310          p.domain = DomainFMA
 45311          p.add(0, func(m *_Encoding, v []interface{}) {
 45312              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 45313              m.emit(0xb9)
 45314              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 45315          })
 45316      }
 45317      // VFMADD231SS {er}, xmm, xmm, xmm{k}{z}
 45318      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 45319          self.require(ISA_AVX512F)
 45320          p.domain = DomainFMA
 45321          p.add(0, func(m *_Encoding, v []interface{}) {
 45322              m.emit(0x62)
 45323              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 45324              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 45325              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 45326              m.emit(0xb9)
 45327              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 45328          })
 45329      }
 45330      // VFMADD231SS xmm, xmm, xmm{k}{z}
 45331      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 45332          self.require(ISA_AVX512F)
 45333          p.domain = DomainFMA
 45334          p.add(0, func(m *_Encoding, v []interface{}) {
 45335              m.emit(0x62)
 45336              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45337              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 45338              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 45339              m.emit(0xb9)
 45340              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45341          })
 45342      }
 45343      if p.len == 0 {
 45344          panic("invalid operands for VFMADD231SS")
 45345      }
 45346      return p
 45347  }
 45348  
 45349  // VFMADDPD performs "Fused Multiply-Add of Packed Double-Precision Floating-Point Values".
 45350  //
 45351  // Mnemonic        : VFMADDPD
 45352  // Supported forms : (6 forms)
 45353  //
 45354  //    * VFMADDPD xmm, xmm, xmm, xmm     [FMA4]
 45355  //    * VFMADDPD m128, xmm, xmm, xmm    [FMA4]
 45356  //    * VFMADDPD xmm, m128, xmm, xmm    [FMA4]
 45357  //    * VFMADDPD ymm, ymm, ymm, ymm     [FMA4]
 45358  //    * VFMADDPD m256, ymm, ymm, ymm    [FMA4]
 45359  //    * VFMADDPD ymm, m256, ymm, ymm    [FMA4]
 45360  //
 45361  func (self *Program) VFMADDPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 45362      p := self.alloc("VFMADDPD", 4, Operands { v0, v1, v2, v3 })
 45363      // VFMADDPD xmm, xmm, xmm, xmm
 45364      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 45365          self.require(ISA_FMA4)
 45366          p.domain = DomainFMA
 45367          p.add(0, func(m *_Encoding, v []interface{}) {
 45368              m.emit(0xc4)
 45369              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 45370              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 45371              m.emit(0x69)
 45372              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 45373              m.emit(hlcode(v[1]) << 4)
 45374          })
 45375          p.add(0, func(m *_Encoding, v []interface{}) {
 45376              m.emit(0xc4)
 45377              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 45378              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 45379              m.emit(0x69)
 45380              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 45381              m.emit(hlcode(v[0]) << 4)
 45382          })
 45383      }
 45384      // VFMADDPD m128, xmm, xmm, xmm
 45385      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 45386          self.require(ISA_FMA4)
 45387          p.domain = DomainFMA
 45388          p.add(0, func(m *_Encoding, v []interface{}) {
 45389              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 45390              m.emit(0x69)
 45391              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 45392              m.emit(hlcode(v[1]) << 4)
 45393          })
 45394      }
 45395      // VFMADDPD xmm, m128, xmm, xmm
 45396      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 45397          self.require(ISA_FMA4)
 45398          p.domain = DomainFMA
 45399          p.add(0, func(m *_Encoding, v []interface{}) {
 45400              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 45401              m.emit(0x69)
 45402              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 45403              m.emit(hlcode(v[0]) << 4)
 45404          })
 45405      }
 45406      // VFMADDPD ymm, ymm, ymm, ymm
 45407      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 45408          self.require(ISA_FMA4)
 45409          p.domain = DomainFMA
 45410          p.add(0, func(m *_Encoding, v []interface{}) {
 45411              m.emit(0xc4)
 45412              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 45413              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 45414              m.emit(0x69)
 45415              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 45416              m.emit(hlcode(v[1]) << 4)
 45417          })
 45418          p.add(0, func(m *_Encoding, v []interface{}) {
 45419              m.emit(0xc4)
 45420              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 45421              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 45422              m.emit(0x69)
 45423              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 45424              m.emit(hlcode(v[0]) << 4)
 45425          })
 45426      }
 45427      // VFMADDPD m256, ymm, ymm, ymm
 45428      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 45429          self.require(ISA_FMA4)
 45430          p.domain = DomainFMA
 45431          p.add(0, func(m *_Encoding, v []interface{}) {
 45432              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 45433              m.emit(0x69)
 45434              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 45435              m.emit(hlcode(v[1]) << 4)
 45436          })
 45437      }
 45438      // VFMADDPD ymm, m256, ymm, ymm
 45439      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 45440          self.require(ISA_FMA4)
 45441          p.domain = DomainFMA
 45442          p.add(0, func(m *_Encoding, v []interface{}) {
 45443              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 45444              m.emit(0x69)
 45445              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 45446              m.emit(hlcode(v[0]) << 4)
 45447          })
 45448      }
 45449      if p.len == 0 {
 45450          panic("invalid operands for VFMADDPD")
 45451      }
 45452      return p
 45453  }
 45454  
 45455  // VFMADDPS performs "Fused Multiply-Add of Packed Single-Precision Floating-Point Values".
 45456  //
 45457  // Mnemonic        : VFMADDPS
 45458  // Supported forms : (6 forms)
 45459  //
 45460  //    * VFMADDPS xmm, xmm, xmm, xmm     [FMA4]
 45461  //    * VFMADDPS m128, xmm, xmm, xmm    [FMA4]
 45462  //    * VFMADDPS xmm, m128, xmm, xmm    [FMA4]
 45463  //    * VFMADDPS ymm, ymm, ymm, ymm     [FMA4]
 45464  //    * VFMADDPS m256, ymm, ymm, ymm    [FMA4]
 45465  //    * VFMADDPS ymm, m256, ymm, ymm    [FMA4]
 45466  //
 45467  func (self *Program) VFMADDPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 45468      p := self.alloc("VFMADDPS", 4, Operands { v0, v1, v2, v3 })
 45469      // VFMADDPS xmm, xmm, xmm, xmm
 45470      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 45471          self.require(ISA_FMA4)
 45472          p.domain = DomainFMA
 45473          p.add(0, func(m *_Encoding, v []interface{}) {
 45474              m.emit(0xc4)
 45475              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 45476              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 45477              m.emit(0x68)
 45478              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 45479              m.emit(hlcode(v[1]) << 4)
 45480          })
 45481          p.add(0, func(m *_Encoding, v []interface{}) {
 45482              m.emit(0xc4)
 45483              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 45484              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 45485              m.emit(0x68)
 45486              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 45487              m.emit(hlcode(v[0]) << 4)
 45488          })
 45489      }
 45490      // VFMADDPS m128, xmm, xmm, xmm
 45491      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 45492          self.require(ISA_FMA4)
 45493          p.domain = DomainFMA
 45494          p.add(0, func(m *_Encoding, v []interface{}) {
 45495              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 45496              m.emit(0x68)
 45497              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 45498              m.emit(hlcode(v[1]) << 4)
 45499          })
 45500      }
 45501      // VFMADDPS xmm, m128, xmm, xmm
 45502      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 45503          self.require(ISA_FMA4)
 45504          p.domain = DomainFMA
 45505          p.add(0, func(m *_Encoding, v []interface{}) {
 45506              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 45507              m.emit(0x68)
 45508              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 45509              m.emit(hlcode(v[0]) << 4)
 45510          })
 45511      }
 45512      // VFMADDPS ymm, ymm, ymm, ymm
 45513      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 45514          self.require(ISA_FMA4)
 45515          p.domain = DomainFMA
 45516          p.add(0, func(m *_Encoding, v []interface{}) {
 45517              m.emit(0xc4)
 45518              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 45519              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 45520              m.emit(0x68)
 45521              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 45522              m.emit(hlcode(v[1]) << 4)
 45523          })
 45524          p.add(0, func(m *_Encoding, v []interface{}) {
 45525              m.emit(0xc4)
 45526              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 45527              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 45528              m.emit(0x68)
 45529              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 45530              m.emit(hlcode(v[0]) << 4)
 45531          })
 45532      }
 45533      // VFMADDPS m256, ymm, ymm, ymm
 45534      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 45535          self.require(ISA_FMA4)
 45536          p.domain = DomainFMA
 45537          p.add(0, func(m *_Encoding, v []interface{}) {
 45538              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 45539              m.emit(0x68)
 45540              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 45541              m.emit(hlcode(v[1]) << 4)
 45542          })
 45543      }
 45544      // VFMADDPS ymm, m256, ymm, ymm
 45545      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 45546          self.require(ISA_FMA4)
 45547          p.domain = DomainFMA
 45548          p.add(0, func(m *_Encoding, v []interface{}) {
 45549              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 45550              m.emit(0x68)
 45551              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 45552              m.emit(hlcode(v[0]) << 4)
 45553          })
 45554      }
 45555      if p.len == 0 {
 45556          panic("invalid operands for VFMADDPS")
 45557      }
 45558      return p
 45559  }
 45560  
 45561  // VFMADDSD performs "Fused Multiply-Add of Scalar Double-Precision Floating-Point Values".
 45562  //
 45563  // Mnemonic        : VFMADDSD
 45564  // Supported forms : (3 forms)
 45565  //
 45566  //    * VFMADDSD xmm, xmm, xmm, xmm    [FMA4]
 45567  //    * VFMADDSD m64, xmm, xmm, xmm    [FMA4]
 45568  //    * VFMADDSD xmm, m64, xmm, xmm    [FMA4]
 45569  //
 45570  func (self *Program) VFMADDSD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 45571      p := self.alloc("VFMADDSD", 4, Operands { v0, v1, v2, v3 })
 45572      // VFMADDSD xmm, xmm, xmm, xmm
 45573      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 45574          self.require(ISA_FMA4)
 45575          p.domain = DomainFMA
 45576          p.add(0, func(m *_Encoding, v []interface{}) {
 45577              m.emit(0xc4)
 45578              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 45579              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 45580              m.emit(0x6b)
 45581              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 45582              m.emit(hlcode(v[1]) << 4)
 45583          })
 45584          p.add(0, func(m *_Encoding, v []interface{}) {
 45585              m.emit(0xc4)
 45586              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 45587              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 45588              m.emit(0x6b)
 45589              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 45590              m.emit(hlcode(v[0]) << 4)
 45591          })
 45592      }
 45593      // VFMADDSD m64, xmm, xmm, xmm
 45594      if isM64(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 45595          self.require(ISA_FMA4)
 45596          p.domain = DomainFMA
 45597          p.add(0, func(m *_Encoding, v []interface{}) {
 45598              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 45599              m.emit(0x6b)
 45600              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 45601              m.emit(hlcode(v[1]) << 4)
 45602          })
 45603      }
 45604      // VFMADDSD xmm, m64, xmm, xmm
 45605      if isXMM(v0) && isM64(v1) && isXMM(v2) && isXMM(v3) {
 45606          self.require(ISA_FMA4)
 45607          p.domain = DomainFMA
 45608          p.add(0, func(m *_Encoding, v []interface{}) {
 45609              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 45610              m.emit(0x6b)
 45611              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 45612              m.emit(hlcode(v[0]) << 4)
 45613          })
 45614      }
 45615      if p.len == 0 {
 45616          panic("invalid operands for VFMADDSD")
 45617      }
 45618      return p
 45619  }
 45620  
 45621  // VFMADDSS performs "Fused Multiply-Add of Scalar Single-Precision Floating-Point Values".
 45622  //
 45623  // Mnemonic        : VFMADDSS
 45624  // Supported forms : (3 forms)
 45625  //
 45626  //    * VFMADDSS xmm, xmm, xmm, xmm    [FMA4]
 45627  //    * VFMADDSS m32, xmm, xmm, xmm    [FMA4]
 45628  //    * VFMADDSS xmm, m32, xmm, xmm    [FMA4]
 45629  //
 45630  func (self *Program) VFMADDSS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 45631      p := self.alloc("VFMADDSS", 4, Operands { v0, v1, v2, v3 })
 45632      // VFMADDSS xmm, xmm, xmm, xmm
 45633      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 45634          self.require(ISA_FMA4)
 45635          p.domain = DomainFMA
 45636          p.add(0, func(m *_Encoding, v []interface{}) {
 45637              m.emit(0xc4)
 45638              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 45639              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 45640              m.emit(0x6a)
 45641              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 45642              m.emit(hlcode(v[1]) << 4)
 45643          })
 45644          p.add(0, func(m *_Encoding, v []interface{}) {
 45645              m.emit(0xc4)
 45646              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 45647              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 45648              m.emit(0x6a)
 45649              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 45650              m.emit(hlcode(v[0]) << 4)
 45651          })
 45652      }
 45653      // VFMADDSS m32, xmm, xmm, xmm
 45654      if isM32(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 45655          self.require(ISA_FMA4)
 45656          p.domain = DomainFMA
 45657          p.add(0, func(m *_Encoding, v []interface{}) {
 45658              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 45659              m.emit(0x6a)
 45660              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 45661              m.emit(hlcode(v[1]) << 4)
 45662          })
 45663      }
 45664      // VFMADDSS xmm, m32, xmm, xmm
 45665      if isXMM(v0) && isM32(v1) && isXMM(v2) && isXMM(v3) {
 45666          self.require(ISA_FMA4)
 45667          p.domain = DomainFMA
 45668          p.add(0, func(m *_Encoding, v []interface{}) {
 45669              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 45670              m.emit(0x6a)
 45671              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 45672              m.emit(hlcode(v[0]) << 4)
 45673          })
 45674      }
 45675      if p.len == 0 {
 45676          panic("invalid operands for VFMADDSS")
 45677      }
 45678      return p
 45679  }
 45680  
 45681  // VFMADDSUB132PD performs "Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values".
 45682  //
 45683  // Mnemonic        : VFMADDSUB132PD
 45684  // Supported forms : (11 forms)
 45685  //
 45686  //    * VFMADDSUB132PD xmm, xmm, xmm                   [FMA3]
 45687  //    * VFMADDSUB132PD m128, xmm, xmm                  [FMA3]
 45688  //    * VFMADDSUB132PD ymm, ymm, ymm                   [FMA3]
 45689  //    * VFMADDSUB132PD m256, ymm, ymm                  [FMA3]
 45690  //    * VFMADDSUB132PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 45691  //    * VFMADDSUB132PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 45692  //    * VFMADDSUB132PD zmm, zmm, zmm{k}{z}             [AVX512F]
 45693  //    * VFMADDSUB132PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 45694  //    * VFMADDSUB132PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 45695  //    * VFMADDSUB132PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 45696  //    * VFMADDSUB132PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 45697  //
 45698  func (self *Program) VFMADDSUB132PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 45699      var p *Instruction
 45700      switch len(vv) {
 45701          case 0  : p = self.alloc("VFMADDSUB132PD", 3, Operands { v0, v1, v2 })
 45702          case 1  : p = self.alloc("VFMADDSUB132PD", 4, Operands { v0, v1, v2, vv[0] })
 45703          default : panic("instruction VFMADDSUB132PD takes 3 or 4 operands")
 45704      }
 45705      // VFMADDSUB132PD xmm, xmm, xmm
 45706      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 45707          self.require(ISA_FMA3)
 45708          p.domain = DomainFMA
 45709          p.add(0, func(m *_Encoding, v []interface{}) {
 45710              m.emit(0xc4)
 45711              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 45712              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 45713              m.emit(0x96)
 45714              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45715          })
 45716      }
 45717      // VFMADDSUB132PD m128, xmm, xmm
 45718      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 45719          self.require(ISA_FMA3)
 45720          p.domain = DomainFMA
 45721          p.add(0, func(m *_Encoding, v []interface{}) {
 45722              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 45723              m.emit(0x96)
 45724              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 45725          })
 45726      }
 45727      // VFMADDSUB132PD ymm, ymm, ymm
 45728      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 45729          self.require(ISA_FMA3)
 45730          p.domain = DomainFMA
 45731          p.add(0, func(m *_Encoding, v []interface{}) {
 45732              m.emit(0xc4)
 45733              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 45734              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 45735              m.emit(0x96)
 45736              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45737          })
 45738      }
 45739      // VFMADDSUB132PD m256, ymm, ymm
 45740      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 45741          self.require(ISA_FMA3)
 45742          p.domain = DomainFMA
 45743          p.add(0, func(m *_Encoding, v []interface{}) {
 45744              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 45745              m.emit(0x96)
 45746              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 45747          })
 45748      }
 45749      // VFMADDSUB132PD m512/m64bcst, zmm, zmm{k}{z}
 45750      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 45751          self.require(ISA_AVX512F)
 45752          p.domain = DomainFMA
 45753          p.add(0, func(m *_Encoding, v []interface{}) {
 45754              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 45755              m.emit(0x96)
 45756              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 45757          })
 45758      }
 45759      // VFMADDSUB132PD {er}, zmm, zmm, zmm{k}{z}
 45760      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 45761          self.require(ISA_AVX512F)
 45762          p.domain = DomainFMA
 45763          p.add(0, func(m *_Encoding, v []interface{}) {
 45764              m.emit(0x62)
 45765              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 45766              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 45767              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 45768              m.emit(0x96)
 45769              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 45770          })
 45771      }
 45772      // VFMADDSUB132PD zmm, zmm, zmm{k}{z}
 45773      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 45774          self.require(ISA_AVX512F)
 45775          p.domain = DomainFMA
 45776          p.add(0, func(m *_Encoding, v []interface{}) {
 45777              m.emit(0x62)
 45778              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45779              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 45780              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 45781              m.emit(0x96)
 45782              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45783          })
 45784      }
 45785      // VFMADDSUB132PD m128/m64bcst, xmm, xmm{k}{z}
 45786      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 45787          self.require(ISA_AVX512VL | ISA_AVX512F)
 45788          p.domain = DomainFMA
 45789          p.add(0, func(m *_Encoding, v []interface{}) {
 45790              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 45791              m.emit(0x96)
 45792              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 45793          })
 45794      }
 45795      // VFMADDSUB132PD xmm, xmm, xmm{k}{z}
 45796      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 45797          self.require(ISA_AVX512VL | ISA_AVX512F)
 45798          p.domain = DomainFMA
 45799          p.add(0, func(m *_Encoding, v []interface{}) {
 45800              m.emit(0x62)
 45801              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45802              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 45803              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 45804              m.emit(0x96)
 45805              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45806          })
 45807      }
 45808      // VFMADDSUB132PD m256/m64bcst, ymm, ymm{k}{z}
 45809      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 45810          self.require(ISA_AVX512VL | ISA_AVX512F)
 45811          p.domain = DomainFMA
 45812          p.add(0, func(m *_Encoding, v []interface{}) {
 45813              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 45814              m.emit(0x96)
 45815              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 45816          })
 45817      }
 45818      // VFMADDSUB132PD ymm, ymm, ymm{k}{z}
 45819      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 45820          self.require(ISA_AVX512VL | ISA_AVX512F)
 45821          p.domain = DomainFMA
 45822          p.add(0, func(m *_Encoding, v []interface{}) {
 45823              m.emit(0x62)
 45824              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45825              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 45826              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 45827              m.emit(0x96)
 45828              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45829          })
 45830      }
 45831      if p.len == 0 {
 45832          panic("invalid operands for VFMADDSUB132PD")
 45833      }
 45834      return p
 45835  }
 45836  
 45837  // VFMADDSUB132PS performs "Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values".
 45838  //
 45839  // Mnemonic        : VFMADDSUB132PS
 45840  // Supported forms : (11 forms)
 45841  //
 45842  //    * VFMADDSUB132PS xmm, xmm, xmm                   [FMA3]
 45843  //    * VFMADDSUB132PS m128, xmm, xmm                  [FMA3]
 45844  //    * VFMADDSUB132PS ymm, ymm, ymm                   [FMA3]
 45845  //    * VFMADDSUB132PS m256, ymm, ymm                  [FMA3]
 45846  //    * VFMADDSUB132PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 45847  //    * VFMADDSUB132PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 45848  //    * VFMADDSUB132PS zmm, zmm, zmm{k}{z}             [AVX512F]
 45849  //    * VFMADDSUB132PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 45850  //    * VFMADDSUB132PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 45851  //    * VFMADDSUB132PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 45852  //    * VFMADDSUB132PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 45853  //
 45854  func (self *Program) VFMADDSUB132PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 45855      var p *Instruction
 45856      switch len(vv) {
 45857          case 0  : p = self.alloc("VFMADDSUB132PS", 3, Operands { v0, v1, v2 })
 45858          case 1  : p = self.alloc("VFMADDSUB132PS", 4, Operands { v0, v1, v2, vv[0] })
 45859          default : panic("instruction VFMADDSUB132PS takes 3 or 4 operands")
 45860      }
 45861      // VFMADDSUB132PS xmm, xmm, xmm
 45862      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 45863          self.require(ISA_FMA3)
 45864          p.domain = DomainFMA
 45865          p.add(0, func(m *_Encoding, v []interface{}) {
 45866              m.emit(0xc4)
 45867              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 45868              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 45869              m.emit(0x96)
 45870              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45871          })
 45872      }
 45873      // VFMADDSUB132PS m128, xmm, xmm
 45874      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 45875          self.require(ISA_FMA3)
 45876          p.domain = DomainFMA
 45877          p.add(0, func(m *_Encoding, v []interface{}) {
 45878              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 45879              m.emit(0x96)
 45880              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 45881          })
 45882      }
 45883      // VFMADDSUB132PS ymm, ymm, ymm
 45884      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 45885          self.require(ISA_FMA3)
 45886          p.domain = DomainFMA
 45887          p.add(0, func(m *_Encoding, v []interface{}) {
 45888              m.emit(0xc4)
 45889              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 45890              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 45891              m.emit(0x96)
 45892              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45893          })
 45894      }
 45895      // VFMADDSUB132PS m256, ymm, ymm
 45896      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 45897          self.require(ISA_FMA3)
 45898          p.domain = DomainFMA
 45899          p.add(0, func(m *_Encoding, v []interface{}) {
 45900              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 45901              m.emit(0x96)
 45902              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 45903          })
 45904      }
 45905      // VFMADDSUB132PS m512/m32bcst, zmm, zmm{k}{z}
 45906      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 45907          self.require(ISA_AVX512F)
 45908          p.domain = DomainFMA
 45909          p.add(0, func(m *_Encoding, v []interface{}) {
 45910              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 45911              m.emit(0x96)
 45912              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 45913          })
 45914      }
 45915      // VFMADDSUB132PS {er}, zmm, zmm, zmm{k}{z}
 45916      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 45917          self.require(ISA_AVX512F)
 45918          p.domain = DomainFMA
 45919          p.add(0, func(m *_Encoding, v []interface{}) {
 45920              m.emit(0x62)
 45921              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 45922              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 45923              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 45924              m.emit(0x96)
 45925              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 45926          })
 45927      }
 45928      // VFMADDSUB132PS zmm, zmm, zmm{k}{z}
 45929      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 45930          self.require(ISA_AVX512F)
 45931          p.domain = DomainFMA
 45932          p.add(0, func(m *_Encoding, v []interface{}) {
 45933              m.emit(0x62)
 45934              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45935              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 45936              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 45937              m.emit(0x96)
 45938              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45939          })
 45940      }
 45941      // VFMADDSUB132PS m128/m32bcst, xmm, xmm{k}{z}
 45942      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 45943          self.require(ISA_AVX512VL | ISA_AVX512F)
 45944          p.domain = DomainFMA
 45945          p.add(0, func(m *_Encoding, v []interface{}) {
 45946              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 45947              m.emit(0x96)
 45948              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 45949          })
 45950      }
 45951      // VFMADDSUB132PS xmm, xmm, xmm{k}{z}
 45952      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 45953          self.require(ISA_AVX512VL | ISA_AVX512F)
 45954          p.domain = DomainFMA
 45955          p.add(0, func(m *_Encoding, v []interface{}) {
 45956              m.emit(0x62)
 45957              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45958              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 45959              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 45960              m.emit(0x96)
 45961              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45962          })
 45963      }
 45964      // VFMADDSUB132PS m256/m32bcst, ymm, ymm{k}{z}
 45965      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 45966          self.require(ISA_AVX512VL | ISA_AVX512F)
 45967          p.domain = DomainFMA
 45968          p.add(0, func(m *_Encoding, v []interface{}) {
 45969              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 45970              m.emit(0x96)
 45971              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 45972          })
 45973      }
 45974      // VFMADDSUB132PS ymm, ymm, ymm{k}{z}
 45975      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 45976          self.require(ISA_AVX512VL | ISA_AVX512F)
 45977          p.domain = DomainFMA
 45978          p.add(0, func(m *_Encoding, v []interface{}) {
 45979              m.emit(0x62)
 45980              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45981              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 45982              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 45983              m.emit(0x96)
 45984              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45985          })
 45986      }
 45987      if p.len == 0 {
 45988          panic("invalid operands for VFMADDSUB132PS")
 45989      }
 45990      return p
 45991  }
 45992  
 45993  // VFMADDSUB213PD performs "Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values".
 45994  //
 45995  // Mnemonic        : VFMADDSUB213PD
 45996  // Supported forms : (11 forms)
 45997  //
 45998  //    * VFMADDSUB213PD xmm, xmm, xmm                   [FMA3]
 45999  //    * VFMADDSUB213PD m128, xmm, xmm                  [FMA3]
 46000  //    * VFMADDSUB213PD ymm, ymm, ymm                   [FMA3]
 46001  //    * VFMADDSUB213PD m256, ymm, ymm                  [FMA3]
 46002  //    * VFMADDSUB213PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 46003  //    * VFMADDSUB213PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 46004  //    * VFMADDSUB213PD zmm, zmm, zmm{k}{z}             [AVX512F]
 46005  //    * VFMADDSUB213PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 46006  //    * VFMADDSUB213PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 46007  //    * VFMADDSUB213PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 46008  //    * VFMADDSUB213PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 46009  //
 46010  func (self *Program) VFMADDSUB213PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 46011      var p *Instruction
 46012      switch len(vv) {
 46013          case 0  : p = self.alloc("VFMADDSUB213PD", 3, Operands { v0, v1, v2 })
 46014          case 1  : p = self.alloc("VFMADDSUB213PD", 4, Operands { v0, v1, v2, vv[0] })
 46015          default : panic("instruction VFMADDSUB213PD takes 3 or 4 operands")
 46016      }
 46017      // VFMADDSUB213PD xmm, xmm, xmm
 46018      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 46019          self.require(ISA_FMA3)
 46020          p.domain = DomainFMA
 46021          p.add(0, func(m *_Encoding, v []interface{}) {
 46022              m.emit(0xc4)
 46023              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 46024              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 46025              m.emit(0xa6)
 46026              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46027          })
 46028      }
 46029      // VFMADDSUB213PD m128, xmm, xmm
 46030      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 46031          self.require(ISA_FMA3)
 46032          p.domain = DomainFMA
 46033          p.add(0, func(m *_Encoding, v []interface{}) {
 46034              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 46035              m.emit(0xa6)
 46036              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 46037          })
 46038      }
 46039      // VFMADDSUB213PD ymm, ymm, ymm
 46040      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 46041          self.require(ISA_FMA3)
 46042          p.domain = DomainFMA
 46043          p.add(0, func(m *_Encoding, v []interface{}) {
 46044              m.emit(0xc4)
 46045              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 46046              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46047              m.emit(0xa6)
 46048              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46049          })
 46050      }
 46051      // VFMADDSUB213PD m256, ymm, ymm
 46052      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 46053          self.require(ISA_FMA3)
 46054          p.domain = DomainFMA
 46055          p.add(0, func(m *_Encoding, v []interface{}) {
 46056              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 46057              m.emit(0xa6)
 46058              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 46059          })
 46060      }
 46061      // VFMADDSUB213PD m512/m64bcst, zmm, zmm{k}{z}
 46062      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 46063          self.require(ISA_AVX512F)
 46064          p.domain = DomainFMA
 46065          p.add(0, func(m *_Encoding, v []interface{}) {
 46066              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46067              m.emit(0xa6)
 46068              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 46069          })
 46070      }
 46071      // VFMADDSUB213PD {er}, zmm, zmm, zmm{k}{z}
 46072      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 46073          self.require(ISA_AVX512F)
 46074          p.domain = DomainFMA
 46075          p.add(0, func(m *_Encoding, v []interface{}) {
 46076              m.emit(0x62)
 46077              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 46078              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 46079              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 46080              m.emit(0xa6)
 46081              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 46082          })
 46083      }
 46084      // VFMADDSUB213PD zmm, zmm, zmm{k}{z}
 46085      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 46086          self.require(ISA_AVX512F)
 46087          p.domain = DomainFMA
 46088          p.add(0, func(m *_Encoding, v []interface{}) {
 46089              m.emit(0x62)
 46090              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46091              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46092              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 46093              m.emit(0xa6)
 46094              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46095          })
 46096      }
 46097      // VFMADDSUB213PD m128/m64bcst, xmm, xmm{k}{z}
 46098      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 46099          self.require(ISA_AVX512VL | ISA_AVX512F)
 46100          p.domain = DomainFMA
 46101          p.add(0, func(m *_Encoding, v []interface{}) {
 46102              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46103              m.emit(0xa6)
 46104              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 46105          })
 46106      }
 46107      // VFMADDSUB213PD xmm, xmm, xmm{k}{z}
 46108      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 46109          self.require(ISA_AVX512VL | ISA_AVX512F)
 46110          p.domain = DomainFMA
 46111          p.add(0, func(m *_Encoding, v []interface{}) {
 46112              m.emit(0x62)
 46113              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46114              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46115              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 46116              m.emit(0xa6)
 46117              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46118          })
 46119      }
 46120      // VFMADDSUB213PD m256/m64bcst, ymm, ymm{k}{z}
 46121      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 46122          self.require(ISA_AVX512VL | ISA_AVX512F)
 46123          p.domain = DomainFMA
 46124          p.add(0, func(m *_Encoding, v []interface{}) {
 46125              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46126              m.emit(0xa6)
 46127              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 46128          })
 46129      }
 46130      // VFMADDSUB213PD ymm, ymm, ymm{k}{z}
 46131      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 46132          self.require(ISA_AVX512VL | ISA_AVX512F)
 46133          p.domain = DomainFMA
 46134          p.add(0, func(m *_Encoding, v []interface{}) {
 46135              m.emit(0x62)
 46136              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46137              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46138              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 46139              m.emit(0xa6)
 46140              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46141          })
 46142      }
 46143      if p.len == 0 {
 46144          panic("invalid operands for VFMADDSUB213PD")
 46145      }
 46146      return p
 46147  }
 46148  
 46149  // VFMADDSUB213PS performs "Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values".
 46150  //
 46151  // Mnemonic        : VFMADDSUB213PS
 46152  // Supported forms : (11 forms)
 46153  //
 46154  //    * VFMADDSUB213PS xmm, xmm, xmm                   [FMA3]
 46155  //    * VFMADDSUB213PS m128, xmm, xmm                  [FMA3]
 46156  //    * VFMADDSUB213PS ymm, ymm, ymm                   [FMA3]
 46157  //    * VFMADDSUB213PS m256, ymm, ymm                  [FMA3]
 46158  //    * VFMADDSUB213PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 46159  //    * VFMADDSUB213PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 46160  //    * VFMADDSUB213PS zmm, zmm, zmm{k}{z}             [AVX512F]
 46161  //    * VFMADDSUB213PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 46162  //    * VFMADDSUB213PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 46163  //    * VFMADDSUB213PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 46164  //    * VFMADDSUB213PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 46165  //
 46166  func (self *Program) VFMADDSUB213PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 46167      var p *Instruction
 46168      switch len(vv) {
 46169          case 0  : p = self.alloc("VFMADDSUB213PS", 3, Operands { v0, v1, v2 })
 46170          case 1  : p = self.alloc("VFMADDSUB213PS", 4, Operands { v0, v1, v2, vv[0] })
 46171          default : panic("instruction VFMADDSUB213PS takes 3 or 4 operands")
 46172      }
 46173      // VFMADDSUB213PS xmm, xmm, xmm
 46174      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 46175          self.require(ISA_FMA3)
 46176          p.domain = DomainFMA
 46177          p.add(0, func(m *_Encoding, v []interface{}) {
 46178              m.emit(0xc4)
 46179              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 46180              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 46181              m.emit(0xa6)
 46182              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46183          })
 46184      }
 46185      // VFMADDSUB213PS m128, xmm, xmm
 46186      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 46187          self.require(ISA_FMA3)
 46188          p.domain = DomainFMA
 46189          p.add(0, func(m *_Encoding, v []interface{}) {
 46190              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 46191              m.emit(0xa6)
 46192              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 46193          })
 46194      }
 46195      // VFMADDSUB213PS ymm, ymm, ymm
 46196      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 46197          self.require(ISA_FMA3)
 46198          p.domain = DomainFMA
 46199          p.add(0, func(m *_Encoding, v []interface{}) {
 46200              m.emit(0xc4)
 46201              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 46202              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 46203              m.emit(0xa6)
 46204              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46205          })
 46206      }
 46207      // VFMADDSUB213PS m256, ymm, ymm
 46208      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 46209          self.require(ISA_FMA3)
 46210          p.domain = DomainFMA
 46211          p.add(0, func(m *_Encoding, v []interface{}) {
 46212              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 46213              m.emit(0xa6)
 46214              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 46215          })
 46216      }
 46217      // VFMADDSUB213PS m512/m32bcst, zmm, zmm{k}{z}
 46218      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 46219          self.require(ISA_AVX512F)
 46220          p.domain = DomainFMA
 46221          p.add(0, func(m *_Encoding, v []interface{}) {
 46222              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46223              m.emit(0xa6)
 46224              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 46225          })
 46226      }
 46227      // VFMADDSUB213PS {er}, zmm, zmm, zmm{k}{z}
 46228      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 46229          self.require(ISA_AVX512F)
 46230          p.domain = DomainFMA
 46231          p.add(0, func(m *_Encoding, v []interface{}) {
 46232              m.emit(0x62)
 46233              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 46234              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 46235              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 46236              m.emit(0xa6)
 46237              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 46238          })
 46239      }
 46240      // VFMADDSUB213PS zmm, zmm, zmm{k}{z}
 46241      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 46242          self.require(ISA_AVX512F)
 46243          p.domain = DomainFMA
 46244          p.add(0, func(m *_Encoding, v []interface{}) {
 46245              m.emit(0x62)
 46246              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46247              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 46248              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 46249              m.emit(0xa6)
 46250              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46251          })
 46252      }
 46253      // VFMADDSUB213PS m128/m32bcst, xmm, xmm{k}{z}
 46254      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 46255          self.require(ISA_AVX512VL | ISA_AVX512F)
 46256          p.domain = DomainFMA
 46257          p.add(0, func(m *_Encoding, v []interface{}) {
 46258              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46259              m.emit(0xa6)
 46260              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 46261          })
 46262      }
 46263      // VFMADDSUB213PS xmm, xmm, xmm{k}{z}
 46264      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 46265          self.require(ISA_AVX512VL | ISA_AVX512F)
 46266          p.domain = DomainFMA
 46267          p.add(0, func(m *_Encoding, v []interface{}) {
 46268              m.emit(0x62)
 46269              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46270              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 46271              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 46272              m.emit(0xa6)
 46273              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46274          })
 46275      }
 46276      // VFMADDSUB213PS m256/m32bcst, ymm, ymm{k}{z}
 46277      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 46278          self.require(ISA_AVX512VL | ISA_AVX512F)
 46279          p.domain = DomainFMA
 46280          p.add(0, func(m *_Encoding, v []interface{}) {
 46281              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46282              m.emit(0xa6)
 46283              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 46284          })
 46285      }
 46286      // VFMADDSUB213PS ymm, ymm, ymm{k}{z}
 46287      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 46288          self.require(ISA_AVX512VL | ISA_AVX512F)
 46289          p.domain = DomainFMA
 46290          p.add(0, func(m *_Encoding, v []interface{}) {
 46291              m.emit(0x62)
 46292              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46293              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 46294              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 46295              m.emit(0xa6)
 46296              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46297          })
 46298      }
 46299      if p.len == 0 {
 46300          panic("invalid operands for VFMADDSUB213PS")
 46301      }
 46302      return p
 46303  }
 46304  
 46305  // VFMADDSUB231PD performs "Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values".
 46306  //
 46307  // Mnemonic        : VFMADDSUB231PD
 46308  // Supported forms : (11 forms)
 46309  //
 46310  //    * VFMADDSUB231PD xmm, xmm, xmm                   [FMA3]
 46311  //    * VFMADDSUB231PD m128, xmm, xmm                  [FMA3]
 46312  //    * VFMADDSUB231PD ymm, ymm, ymm                   [FMA3]
 46313  //    * VFMADDSUB231PD m256, ymm, ymm                  [FMA3]
 46314  //    * VFMADDSUB231PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 46315  //    * VFMADDSUB231PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 46316  //    * VFMADDSUB231PD zmm, zmm, zmm{k}{z}             [AVX512F]
 46317  //    * VFMADDSUB231PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 46318  //    * VFMADDSUB231PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 46319  //    * VFMADDSUB231PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 46320  //    * VFMADDSUB231PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 46321  //
 46322  func (self *Program) VFMADDSUB231PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 46323      var p *Instruction
 46324      switch len(vv) {
 46325          case 0  : p = self.alloc("VFMADDSUB231PD", 3, Operands { v0, v1, v2 })
 46326          case 1  : p = self.alloc("VFMADDSUB231PD", 4, Operands { v0, v1, v2, vv[0] })
 46327          default : panic("instruction VFMADDSUB231PD takes 3 or 4 operands")
 46328      }
 46329      // VFMADDSUB231PD xmm, xmm, xmm
 46330      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 46331          self.require(ISA_FMA3)
 46332          p.domain = DomainFMA
 46333          p.add(0, func(m *_Encoding, v []interface{}) {
 46334              m.emit(0xc4)
 46335              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 46336              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 46337              m.emit(0xb6)
 46338              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46339          })
 46340      }
 46341      // VFMADDSUB231PD m128, xmm, xmm
 46342      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 46343          self.require(ISA_FMA3)
 46344          p.domain = DomainFMA
 46345          p.add(0, func(m *_Encoding, v []interface{}) {
 46346              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 46347              m.emit(0xb6)
 46348              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 46349          })
 46350      }
 46351      // VFMADDSUB231PD ymm, ymm, ymm
 46352      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 46353          self.require(ISA_FMA3)
 46354          p.domain = DomainFMA
 46355          p.add(0, func(m *_Encoding, v []interface{}) {
 46356              m.emit(0xc4)
 46357              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 46358              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46359              m.emit(0xb6)
 46360              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46361          })
 46362      }
 46363      // VFMADDSUB231PD m256, ymm, ymm
 46364      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 46365          self.require(ISA_FMA3)
 46366          p.domain = DomainFMA
 46367          p.add(0, func(m *_Encoding, v []interface{}) {
 46368              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 46369              m.emit(0xb6)
 46370              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 46371          })
 46372      }
 46373      // VFMADDSUB231PD m512/m64bcst, zmm, zmm{k}{z}
 46374      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 46375          self.require(ISA_AVX512F)
 46376          p.domain = DomainFMA
 46377          p.add(0, func(m *_Encoding, v []interface{}) {
 46378              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46379              m.emit(0xb6)
 46380              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 46381          })
 46382      }
 46383      // VFMADDSUB231PD {er}, zmm, zmm, zmm{k}{z}
 46384      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 46385          self.require(ISA_AVX512F)
 46386          p.domain = DomainFMA
 46387          p.add(0, func(m *_Encoding, v []interface{}) {
 46388              m.emit(0x62)
 46389              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 46390              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 46391              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 46392              m.emit(0xb6)
 46393              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 46394          })
 46395      }
 46396      // VFMADDSUB231PD zmm, zmm, zmm{k}{z}
 46397      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 46398          self.require(ISA_AVX512F)
 46399          p.domain = DomainFMA
 46400          p.add(0, func(m *_Encoding, v []interface{}) {
 46401              m.emit(0x62)
 46402              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46403              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46404              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 46405              m.emit(0xb6)
 46406              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46407          })
 46408      }
 46409      // VFMADDSUB231PD m128/m64bcst, xmm, xmm{k}{z}
 46410      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 46411          self.require(ISA_AVX512VL | ISA_AVX512F)
 46412          p.domain = DomainFMA
 46413          p.add(0, func(m *_Encoding, v []interface{}) {
 46414              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46415              m.emit(0xb6)
 46416              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 46417          })
 46418      }
 46419      // VFMADDSUB231PD xmm, xmm, xmm{k}{z}
 46420      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 46421          self.require(ISA_AVX512VL | ISA_AVX512F)
 46422          p.domain = DomainFMA
 46423          p.add(0, func(m *_Encoding, v []interface{}) {
 46424              m.emit(0x62)
 46425              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46426              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46427              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 46428              m.emit(0xb6)
 46429              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46430          })
 46431      }
 46432      // VFMADDSUB231PD m256/m64bcst, ymm, ymm{k}{z}
 46433      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 46434          self.require(ISA_AVX512VL | ISA_AVX512F)
 46435          p.domain = DomainFMA
 46436          p.add(0, func(m *_Encoding, v []interface{}) {
 46437              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46438              m.emit(0xb6)
 46439              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 46440          })
 46441      }
 46442      // VFMADDSUB231PD ymm, ymm, ymm{k}{z}
 46443      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 46444          self.require(ISA_AVX512VL | ISA_AVX512F)
 46445          p.domain = DomainFMA
 46446          p.add(0, func(m *_Encoding, v []interface{}) {
 46447              m.emit(0x62)
 46448              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46449              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46450              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 46451              m.emit(0xb6)
 46452              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46453          })
 46454      }
 46455      if p.len == 0 {
 46456          panic("invalid operands for VFMADDSUB231PD")
 46457      }
 46458      return p
 46459  }
 46460  
 46461  // VFMADDSUB231PS performs "Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values".
 46462  //
 46463  // Mnemonic        : VFMADDSUB231PS
 46464  // Supported forms : (11 forms)
 46465  //
 46466  //    * VFMADDSUB231PS xmm, xmm, xmm                   [FMA3]
 46467  //    * VFMADDSUB231PS m128, xmm, xmm                  [FMA3]
 46468  //    * VFMADDSUB231PS ymm, ymm, ymm                   [FMA3]
 46469  //    * VFMADDSUB231PS m256, ymm, ymm                  [FMA3]
 46470  //    * VFMADDSUB231PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 46471  //    * VFMADDSUB231PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 46472  //    * VFMADDSUB231PS zmm, zmm, zmm{k}{z}             [AVX512F]
 46473  //    * VFMADDSUB231PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 46474  //    * VFMADDSUB231PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 46475  //    * VFMADDSUB231PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 46476  //    * VFMADDSUB231PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 46477  //
 46478  func (self *Program) VFMADDSUB231PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 46479      var p *Instruction
 46480      switch len(vv) {
 46481          case 0  : p = self.alloc("VFMADDSUB231PS", 3, Operands { v0, v1, v2 })
 46482          case 1  : p = self.alloc("VFMADDSUB231PS", 4, Operands { v0, v1, v2, vv[0] })
 46483          default : panic("instruction VFMADDSUB231PS takes 3 or 4 operands")
 46484      }
 46485      // VFMADDSUB231PS xmm, xmm, xmm
 46486      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 46487          self.require(ISA_FMA3)
 46488          p.domain = DomainFMA
 46489          p.add(0, func(m *_Encoding, v []interface{}) {
 46490              m.emit(0xc4)
 46491              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 46492              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 46493              m.emit(0xb6)
 46494              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46495          })
 46496      }
 46497      // VFMADDSUB231PS m128, xmm, xmm
 46498      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 46499          self.require(ISA_FMA3)
 46500          p.domain = DomainFMA
 46501          p.add(0, func(m *_Encoding, v []interface{}) {
 46502              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 46503              m.emit(0xb6)
 46504              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 46505          })
 46506      }
 46507      // VFMADDSUB231PS ymm, ymm, ymm
 46508      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 46509          self.require(ISA_FMA3)
 46510          p.domain = DomainFMA
 46511          p.add(0, func(m *_Encoding, v []interface{}) {
 46512              m.emit(0xc4)
 46513              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 46514              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 46515              m.emit(0xb6)
 46516              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46517          })
 46518      }
 46519      // VFMADDSUB231PS m256, ymm, ymm
 46520      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 46521          self.require(ISA_FMA3)
 46522          p.domain = DomainFMA
 46523          p.add(0, func(m *_Encoding, v []interface{}) {
 46524              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 46525              m.emit(0xb6)
 46526              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 46527          })
 46528      }
 46529      // VFMADDSUB231PS m512/m32bcst, zmm, zmm{k}{z}
 46530      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 46531          self.require(ISA_AVX512F)
 46532          p.domain = DomainFMA
 46533          p.add(0, func(m *_Encoding, v []interface{}) {
 46534              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46535              m.emit(0xb6)
 46536              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 46537          })
 46538      }
 46539      // VFMADDSUB231PS {er}, zmm, zmm, zmm{k}{z}
 46540      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 46541          self.require(ISA_AVX512F)
 46542          p.domain = DomainFMA
 46543          p.add(0, func(m *_Encoding, v []interface{}) {
 46544              m.emit(0x62)
 46545              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 46546              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 46547              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 46548              m.emit(0xb6)
 46549              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 46550          })
 46551      }
 46552      // VFMADDSUB231PS zmm, zmm, zmm{k}{z}
 46553      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 46554          self.require(ISA_AVX512F)
 46555          p.domain = DomainFMA
 46556          p.add(0, func(m *_Encoding, v []interface{}) {
 46557              m.emit(0x62)
 46558              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46559              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 46560              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 46561              m.emit(0xb6)
 46562              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46563          })
 46564      }
 46565      // VFMADDSUB231PS m128/m32bcst, xmm, xmm{k}{z}
 46566      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 46567          self.require(ISA_AVX512VL | ISA_AVX512F)
 46568          p.domain = DomainFMA
 46569          p.add(0, func(m *_Encoding, v []interface{}) {
 46570              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46571              m.emit(0xb6)
 46572              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 46573          })
 46574      }
 46575      // VFMADDSUB231PS xmm, xmm, xmm{k}{z}
 46576      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 46577          self.require(ISA_AVX512VL | ISA_AVX512F)
 46578          p.domain = DomainFMA
 46579          p.add(0, func(m *_Encoding, v []interface{}) {
 46580              m.emit(0x62)
 46581              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46582              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 46583              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 46584              m.emit(0xb6)
 46585              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46586          })
 46587      }
 46588      // VFMADDSUB231PS m256/m32bcst, ymm, ymm{k}{z}
 46589      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 46590          self.require(ISA_AVX512VL | ISA_AVX512F)
 46591          p.domain = DomainFMA
 46592          p.add(0, func(m *_Encoding, v []interface{}) {
 46593              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46594              m.emit(0xb6)
 46595              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 46596          })
 46597      }
 46598      // VFMADDSUB231PS ymm, ymm, ymm{k}{z}
 46599      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 46600          self.require(ISA_AVX512VL | ISA_AVX512F)
 46601          p.domain = DomainFMA
 46602          p.add(0, func(m *_Encoding, v []interface{}) {
 46603              m.emit(0x62)
 46604              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46605              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 46606              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 46607              m.emit(0xb6)
 46608              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46609          })
 46610      }
 46611      if p.len == 0 {
 46612          panic("invalid operands for VFMADDSUB231PS")
 46613      }
 46614      return p
 46615  }
 46616  
 46617  // VFMADDSUBPD performs "Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values".
 46618  //
 46619  // Mnemonic        : VFMADDSUBPD
 46620  // Supported forms : (6 forms)
 46621  //
 46622  //    * VFMADDSUBPD xmm, xmm, xmm, xmm     [FMA4]
 46623  //    * VFMADDSUBPD m128, xmm, xmm, xmm    [FMA4]
 46624  //    * VFMADDSUBPD xmm, m128, xmm, xmm    [FMA4]
 46625  //    * VFMADDSUBPD ymm, ymm, ymm, ymm     [FMA4]
 46626  //    * VFMADDSUBPD m256, ymm, ymm, ymm    [FMA4]
 46627  //    * VFMADDSUBPD ymm, m256, ymm, ymm    [FMA4]
 46628  //
 46629  func (self *Program) VFMADDSUBPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 46630      p := self.alloc("VFMADDSUBPD", 4, Operands { v0, v1, v2, v3 })
 46631      // VFMADDSUBPD xmm, xmm, xmm, xmm
 46632      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 46633          self.require(ISA_FMA4)
 46634          p.domain = DomainFMA
 46635          p.add(0, func(m *_Encoding, v []interface{}) {
 46636              m.emit(0xc4)
 46637              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 46638              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 46639              m.emit(0x5d)
 46640              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 46641              m.emit(hlcode(v[1]) << 4)
 46642          })
 46643          p.add(0, func(m *_Encoding, v []interface{}) {
 46644              m.emit(0xc4)
 46645              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 46646              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 46647              m.emit(0x5d)
 46648              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 46649              m.emit(hlcode(v[0]) << 4)
 46650          })
 46651      }
 46652      // VFMADDSUBPD m128, xmm, xmm, xmm
 46653      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 46654          self.require(ISA_FMA4)
 46655          p.domain = DomainFMA
 46656          p.add(0, func(m *_Encoding, v []interface{}) {
 46657              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 46658              m.emit(0x5d)
 46659              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 46660              m.emit(hlcode(v[1]) << 4)
 46661          })
 46662      }
 46663      // VFMADDSUBPD xmm, m128, xmm, xmm
 46664      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 46665          self.require(ISA_FMA4)
 46666          p.domain = DomainFMA
 46667          p.add(0, func(m *_Encoding, v []interface{}) {
 46668              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 46669              m.emit(0x5d)
 46670              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 46671              m.emit(hlcode(v[0]) << 4)
 46672          })
 46673      }
 46674      // VFMADDSUBPD ymm, ymm, ymm, ymm
 46675      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 46676          self.require(ISA_FMA4)
 46677          p.domain = DomainFMA
 46678          p.add(0, func(m *_Encoding, v []interface{}) {
 46679              m.emit(0xc4)
 46680              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 46681              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 46682              m.emit(0x5d)
 46683              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 46684              m.emit(hlcode(v[1]) << 4)
 46685          })
 46686          p.add(0, func(m *_Encoding, v []interface{}) {
 46687              m.emit(0xc4)
 46688              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 46689              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 46690              m.emit(0x5d)
 46691              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 46692              m.emit(hlcode(v[0]) << 4)
 46693          })
 46694      }
 46695      // VFMADDSUBPD m256, ymm, ymm, ymm
 46696      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 46697          self.require(ISA_FMA4)
 46698          p.domain = DomainFMA
 46699          p.add(0, func(m *_Encoding, v []interface{}) {
 46700              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 46701              m.emit(0x5d)
 46702              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 46703              m.emit(hlcode(v[1]) << 4)
 46704          })
 46705      }
 46706      // VFMADDSUBPD ymm, m256, ymm, ymm
 46707      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 46708          self.require(ISA_FMA4)
 46709          p.domain = DomainFMA
 46710          p.add(0, func(m *_Encoding, v []interface{}) {
 46711              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 46712              m.emit(0x5d)
 46713              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 46714              m.emit(hlcode(v[0]) << 4)
 46715          })
 46716      }
 46717      if p.len == 0 {
 46718          panic("invalid operands for VFMADDSUBPD")
 46719      }
 46720      return p
 46721  }
 46722  
 46723  // VFMADDSUBPS performs "Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values".
 46724  //
 46725  // Mnemonic        : VFMADDSUBPS
 46726  // Supported forms : (6 forms)
 46727  //
 46728  //    * VFMADDSUBPS xmm, xmm, xmm, xmm     [FMA4]
 46729  //    * VFMADDSUBPS m128, xmm, xmm, xmm    [FMA4]
 46730  //    * VFMADDSUBPS xmm, m128, xmm, xmm    [FMA4]
 46731  //    * VFMADDSUBPS ymm, ymm, ymm, ymm     [FMA4]
 46732  //    * VFMADDSUBPS m256, ymm, ymm, ymm    [FMA4]
 46733  //    * VFMADDSUBPS ymm, m256, ymm, ymm    [FMA4]
 46734  //
 46735  func (self *Program) VFMADDSUBPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 46736      p := self.alloc("VFMADDSUBPS", 4, Operands { v0, v1, v2, v3 })
 46737      // VFMADDSUBPS xmm, xmm, xmm, xmm
 46738      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 46739          self.require(ISA_FMA4)
 46740          p.domain = DomainFMA
 46741          p.add(0, func(m *_Encoding, v []interface{}) {
 46742              m.emit(0xc4)
 46743              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 46744              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 46745              m.emit(0x5c)
 46746              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 46747              m.emit(hlcode(v[1]) << 4)
 46748          })
 46749          p.add(0, func(m *_Encoding, v []interface{}) {
 46750              m.emit(0xc4)
 46751              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 46752              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 46753              m.emit(0x5c)
 46754              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 46755              m.emit(hlcode(v[0]) << 4)
 46756          })
 46757      }
 46758      // VFMADDSUBPS m128, xmm, xmm, xmm
 46759      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 46760          self.require(ISA_FMA4)
 46761          p.domain = DomainFMA
 46762          p.add(0, func(m *_Encoding, v []interface{}) {
 46763              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 46764              m.emit(0x5c)
 46765              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 46766              m.emit(hlcode(v[1]) << 4)
 46767          })
 46768      }
 46769      // VFMADDSUBPS xmm, m128, xmm, xmm
 46770      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 46771          self.require(ISA_FMA4)
 46772          p.domain = DomainFMA
 46773          p.add(0, func(m *_Encoding, v []interface{}) {
 46774              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 46775              m.emit(0x5c)
 46776              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 46777              m.emit(hlcode(v[0]) << 4)
 46778          })
 46779      }
 46780      // VFMADDSUBPS ymm, ymm, ymm, ymm
 46781      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 46782          self.require(ISA_FMA4)
 46783          p.domain = DomainFMA
 46784          p.add(0, func(m *_Encoding, v []interface{}) {
 46785              m.emit(0xc4)
 46786              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 46787              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 46788              m.emit(0x5c)
 46789              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 46790              m.emit(hlcode(v[1]) << 4)
 46791          })
 46792          p.add(0, func(m *_Encoding, v []interface{}) {
 46793              m.emit(0xc4)
 46794              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 46795              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 46796              m.emit(0x5c)
 46797              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 46798              m.emit(hlcode(v[0]) << 4)
 46799          })
 46800      }
 46801      // VFMADDSUBPS m256, ymm, ymm, ymm
 46802      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 46803          self.require(ISA_FMA4)
 46804          p.domain = DomainFMA
 46805          p.add(0, func(m *_Encoding, v []interface{}) {
 46806              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 46807              m.emit(0x5c)
 46808              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 46809              m.emit(hlcode(v[1]) << 4)
 46810          })
 46811      }
 46812      // VFMADDSUBPS ymm, m256, ymm, ymm
 46813      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 46814          self.require(ISA_FMA4)
 46815          p.domain = DomainFMA
 46816          p.add(0, func(m *_Encoding, v []interface{}) {
 46817              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 46818              m.emit(0x5c)
 46819              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 46820              m.emit(hlcode(v[0]) << 4)
 46821          })
 46822      }
 46823      if p.len == 0 {
 46824          panic("invalid operands for VFMADDSUBPS")
 46825      }
 46826      return p
 46827  }
 46828  
 46829  // VFMSUB132PD performs "Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values".
 46830  //
 46831  // Mnemonic        : VFMSUB132PD
 46832  // Supported forms : (11 forms)
 46833  //
 46834  //    * VFMSUB132PD xmm, xmm, xmm                   [FMA3]
 46835  //    * VFMSUB132PD m128, xmm, xmm                  [FMA3]
 46836  //    * VFMSUB132PD ymm, ymm, ymm                   [FMA3]
 46837  //    * VFMSUB132PD m256, ymm, ymm                  [FMA3]
 46838  //    * VFMSUB132PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 46839  //    * VFMSUB132PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 46840  //    * VFMSUB132PD zmm, zmm, zmm{k}{z}             [AVX512F]
 46841  //    * VFMSUB132PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 46842  //    * VFMSUB132PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 46843  //    * VFMSUB132PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 46844  //    * VFMSUB132PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 46845  //
 46846  func (self *Program) VFMSUB132PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 46847      var p *Instruction
 46848      switch len(vv) {
 46849          case 0  : p = self.alloc("VFMSUB132PD", 3, Operands { v0, v1, v2 })
 46850          case 1  : p = self.alloc("VFMSUB132PD", 4, Operands { v0, v1, v2, vv[0] })
 46851          default : panic("instruction VFMSUB132PD takes 3 or 4 operands")
 46852      }
 46853      // VFMSUB132PD xmm, xmm, xmm
 46854      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 46855          self.require(ISA_FMA3)
 46856          p.domain = DomainFMA
 46857          p.add(0, func(m *_Encoding, v []interface{}) {
 46858              m.emit(0xc4)
 46859              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 46860              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 46861              m.emit(0x9a)
 46862              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46863          })
 46864      }
 46865      // VFMSUB132PD m128, xmm, xmm
 46866      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 46867          self.require(ISA_FMA3)
 46868          p.domain = DomainFMA
 46869          p.add(0, func(m *_Encoding, v []interface{}) {
 46870              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 46871              m.emit(0x9a)
 46872              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 46873          })
 46874      }
 46875      // VFMSUB132PD ymm, ymm, ymm
 46876      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 46877          self.require(ISA_FMA3)
 46878          p.domain = DomainFMA
 46879          p.add(0, func(m *_Encoding, v []interface{}) {
 46880              m.emit(0xc4)
 46881              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 46882              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46883              m.emit(0x9a)
 46884              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46885          })
 46886      }
 46887      // VFMSUB132PD m256, ymm, ymm
 46888      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 46889          self.require(ISA_FMA3)
 46890          p.domain = DomainFMA
 46891          p.add(0, func(m *_Encoding, v []interface{}) {
 46892              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 46893              m.emit(0x9a)
 46894              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 46895          })
 46896      }
 46897      // VFMSUB132PD m512/m64bcst, zmm, zmm{k}{z}
 46898      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 46899          self.require(ISA_AVX512F)
 46900          p.domain = DomainFMA
 46901          p.add(0, func(m *_Encoding, v []interface{}) {
 46902              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46903              m.emit(0x9a)
 46904              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 46905          })
 46906      }
 46907      // VFMSUB132PD {er}, zmm, zmm, zmm{k}{z}
 46908      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 46909          self.require(ISA_AVX512F)
 46910          p.domain = DomainFMA
 46911          p.add(0, func(m *_Encoding, v []interface{}) {
 46912              m.emit(0x62)
 46913              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 46914              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 46915              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 46916              m.emit(0x9a)
 46917              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 46918          })
 46919      }
 46920      // VFMSUB132PD zmm, zmm, zmm{k}{z}
 46921      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 46922          self.require(ISA_AVX512F)
 46923          p.domain = DomainFMA
 46924          p.add(0, func(m *_Encoding, v []interface{}) {
 46925              m.emit(0x62)
 46926              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46927              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46928              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 46929              m.emit(0x9a)
 46930              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46931          })
 46932      }
 46933      // VFMSUB132PD m128/m64bcst, xmm, xmm{k}{z}
 46934      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 46935          self.require(ISA_AVX512VL | ISA_AVX512F)
 46936          p.domain = DomainFMA
 46937          p.add(0, func(m *_Encoding, v []interface{}) {
 46938              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46939              m.emit(0x9a)
 46940              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 46941          })
 46942      }
 46943      // VFMSUB132PD xmm, xmm, xmm{k}{z}
 46944      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 46945          self.require(ISA_AVX512VL | ISA_AVX512F)
 46946          p.domain = DomainFMA
 46947          p.add(0, func(m *_Encoding, v []interface{}) {
 46948              m.emit(0x62)
 46949              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46950              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46951              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 46952              m.emit(0x9a)
 46953              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46954          })
 46955      }
 46956      // VFMSUB132PD m256/m64bcst, ymm, ymm{k}{z}
 46957      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 46958          self.require(ISA_AVX512VL | ISA_AVX512F)
 46959          p.domain = DomainFMA
 46960          p.add(0, func(m *_Encoding, v []interface{}) {
 46961              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46962              m.emit(0x9a)
 46963              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 46964          })
 46965      }
 46966      // VFMSUB132PD ymm, ymm, ymm{k}{z}
 46967      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 46968          self.require(ISA_AVX512VL | ISA_AVX512F)
 46969          p.domain = DomainFMA
 46970          p.add(0, func(m *_Encoding, v []interface{}) {
 46971              m.emit(0x62)
 46972              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46973              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46974              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 46975              m.emit(0x9a)
 46976              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46977          })
 46978      }
 46979      if p.len == 0 {
 46980          panic("invalid operands for VFMSUB132PD")
 46981      }
 46982      return p
 46983  }
 46984  
 46985  // VFMSUB132PS performs "Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values".
 46986  //
 46987  // Mnemonic        : VFMSUB132PS
 46988  // Supported forms : (11 forms)
 46989  //
 46990  //    * VFMSUB132PS xmm, xmm, xmm                   [FMA3]
 46991  //    * VFMSUB132PS m128, xmm, xmm                  [FMA3]
 46992  //    * VFMSUB132PS ymm, ymm, ymm                   [FMA3]
 46993  //    * VFMSUB132PS m256, ymm, ymm                  [FMA3]
 46994  //    * VFMSUB132PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 46995  //    * VFMSUB132PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 46996  //    * VFMSUB132PS zmm, zmm, zmm{k}{z}             [AVX512F]
 46997  //    * VFMSUB132PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 46998  //    * VFMSUB132PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 46999  //    * VFMSUB132PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 47000  //    * VFMSUB132PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 47001  //
 47002  func (self *Program) VFMSUB132PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 47003      var p *Instruction
 47004      switch len(vv) {
 47005          case 0  : p = self.alloc("VFMSUB132PS", 3, Operands { v0, v1, v2 })
 47006          case 1  : p = self.alloc("VFMSUB132PS", 4, Operands { v0, v1, v2, vv[0] })
 47007          default : panic("instruction VFMSUB132PS takes 3 or 4 operands")
 47008      }
 47009      // VFMSUB132PS xmm, xmm, xmm
 47010      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 47011          self.require(ISA_FMA3)
 47012          p.domain = DomainFMA
 47013          p.add(0, func(m *_Encoding, v []interface{}) {
 47014              m.emit(0xc4)
 47015              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47016              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 47017              m.emit(0x9a)
 47018              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47019          })
 47020      }
 47021      // VFMSUB132PS m128, xmm, xmm
 47022      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 47023          self.require(ISA_FMA3)
 47024          p.domain = DomainFMA
 47025          p.add(0, func(m *_Encoding, v []interface{}) {
 47026              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47027              m.emit(0x9a)
 47028              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47029          })
 47030      }
 47031      // VFMSUB132PS ymm, ymm, ymm
 47032      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 47033          self.require(ISA_FMA3)
 47034          p.domain = DomainFMA
 47035          p.add(0, func(m *_Encoding, v []interface{}) {
 47036              m.emit(0xc4)
 47037              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47038              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 47039              m.emit(0x9a)
 47040              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47041          })
 47042      }
 47043      // VFMSUB132PS m256, ymm, ymm
 47044      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 47045          self.require(ISA_FMA3)
 47046          p.domain = DomainFMA
 47047          p.add(0, func(m *_Encoding, v []interface{}) {
 47048              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47049              m.emit(0x9a)
 47050              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47051          })
 47052      }
 47053      // VFMSUB132PS m512/m32bcst, zmm, zmm{k}{z}
 47054      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 47055          self.require(ISA_AVX512F)
 47056          p.domain = DomainFMA
 47057          p.add(0, func(m *_Encoding, v []interface{}) {
 47058              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47059              m.emit(0x9a)
 47060              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 47061          })
 47062      }
 47063      // VFMSUB132PS {er}, zmm, zmm, zmm{k}{z}
 47064      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 47065          self.require(ISA_AVX512F)
 47066          p.domain = DomainFMA
 47067          p.add(0, func(m *_Encoding, v []interface{}) {
 47068              m.emit(0x62)
 47069              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 47070              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 47071              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 47072              m.emit(0x9a)
 47073              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 47074          })
 47075      }
 47076      // VFMSUB132PS zmm, zmm, zmm{k}{z}
 47077      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 47078          self.require(ISA_AVX512F)
 47079          p.domain = DomainFMA
 47080          p.add(0, func(m *_Encoding, v []interface{}) {
 47081              m.emit(0x62)
 47082              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47083              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 47084              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 47085              m.emit(0x9a)
 47086              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47087          })
 47088      }
 47089      // VFMSUB132PS m128/m32bcst, xmm, xmm{k}{z}
 47090      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47091          self.require(ISA_AVX512VL | ISA_AVX512F)
 47092          p.domain = DomainFMA
 47093          p.add(0, func(m *_Encoding, v []interface{}) {
 47094              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47095              m.emit(0x9a)
 47096              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 47097          })
 47098      }
 47099      // VFMSUB132PS xmm, xmm, xmm{k}{z}
 47100      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47101          self.require(ISA_AVX512VL | ISA_AVX512F)
 47102          p.domain = DomainFMA
 47103          p.add(0, func(m *_Encoding, v []interface{}) {
 47104              m.emit(0x62)
 47105              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47106              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 47107              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 47108              m.emit(0x9a)
 47109              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47110          })
 47111      }
 47112      // VFMSUB132PS m256/m32bcst, ymm, ymm{k}{z}
 47113      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 47114          self.require(ISA_AVX512VL | ISA_AVX512F)
 47115          p.domain = DomainFMA
 47116          p.add(0, func(m *_Encoding, v []interface{}) {
 47117              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47118              m.emit(0x9a)
 47119              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 47120          })
 47121      }
 47122      // VFMSUB132PS ymm, ymm, ymm{k}{z}
 47123      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 47124          self.require(ISA_AVX512VL | ISA_AVX512F)
 47125          p.domain = DomainFMA
 47126          p.add(0, func(m *_Encoding, v []interface{}) {
 47127              m.emit(0x62)
 47128              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47129              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 47130              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 47131              m.emit(0x9a)
 47132              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47133          })
 47134      }
 47135      if p.len == 0 {
 47136          panic("invalid operands for VFMSUB132PS")
 47137      }
 47138      return p
 47139  }
 47140  
 47141  // VFMSUB132SD performs "Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values".
 47142  //
 47143  // Mnemonic        : VFMSUB132SD
 47144  // Supported forms : (5 forms)
 47145  //
 47146  //    * VFMSUB132SD xmm, xmm, xmm                [FMA3]
 47147  //    * VFMSUB132SD m64, xmm, xmm                [FMA3]
 47148  //    * VFMSUB132SD m64, xmm, xmm{k}{z}          [AVX512F]
 47149  //    * VFMSUB132SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 47150  //    * VFMSUB132SD xmm, xmm, xmm{k}{z}          [AVX512F]
 47151  //
 47152  func (self *Program) VFMSUB132SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 47153      var p *Instruction
 47154      switch len(vv) {
 47155          case 0  : p = self.alloc("VFMSUB132SD", 3, Operands { v0, v1, v2 })
 47156          case 1  : p = self.alloc("VFMSUB132SD", 4, Operands { v0, v1, v2, vv[0] })
 47157          default : panic("instruction VFMSUB132SD takes 3 or 4 operands")
 47158      }
 47159      // VFMSUB132SD xmm, xmm, xmm
 47160      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 47161          self.require(ISA_FMA3)
 47162          p.domain = DomainFMA
 47163          p.add(0, func(m *_Encoding, v []interface{}) {
 47164              m.emit(0xc4)
 47165              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47166              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 47167              m.emit(0x9b)
 47168              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47169          })
 47170      }
 47171      // VFMSUB132SD m64, xmm, xmm
 47172      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 47173          self.require(ISA_FMA3)
 47174          p.domain = DomainFMA
 47175          p.add(0, func(m *_Encoding, v []interface{}) {
 47176              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47177              m.emit(0x9b)
 47178              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47179          })
 47180      }
 47181      // VFMSUB132SD m64, xmm, xmm{k}{z}
 47182      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47183          self.require(ISA_AVX512F)
 47184          p.domain = DomainFMA
 47185          p.add(0, func(m *_Encoding, v []interface{}) {
 47186              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 47187              m.emit(0x9b)
 47188              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 47189          })
 47190      }
 47191      // VFMSUB132SD {er}, xmm, xmm, xmm{k}{z}
 47192      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 47193          self.require(ISA_AVX512F)
 47194          p.domain = DomainFMA
 47195          p.add(0, func(m *_Encoding, v []interface{}) {
 47196              m.emit(0x62)
 47197              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 47198              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 47199              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 47200              m.emit(0x9b)
 47201              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 47202          })
 47203      }
 47204      // VFMSUB132SD xmm, xmm, xmm{k}{z}
 47205      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47206          self.require(ISA_AVX512F)
 47207          p.domain = DomainFMA
 47208          p.add(0, func(m *_Encoding, v []interface{}) {
 47209              m.emit(0x62)
 47210              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47211              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 47212              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 47213              m.emit(0x9b)
 47214              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47215          })
 47216      }
 47217      if p.len == 0 {
 47218          panic("invalid operands for VFMSUB132SD")
 47219      }
 47220      return p
 47221  }
 47222  
 47223  // VFMSUB132SS performs "Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values".
 47224  //
 47225  // Mnemonic        : VFMSUB132SS
 47226  // Supported forms : (5 forms)
 47227  //
 47228  //    * VFMSUB132SS xmm, xmm, xmm                [FMA3]
 47229  //    * VFMSUB132SS m32, xmm, xmm                [FMA3]
 47230  //    * VFMSUB132SS m32, xmm, xmm{k}{z}          [AVX512F]
 47231  //    * VFMSUB132SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 47232  //    * VFMSUB132SS xmm, xmm, xmm{k}{z}          [AVX512F]
 47233  //
 47234  func (self *Program) VFMSUB132SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 47235      var p *Instruction
 47236      switch len(vv) {
 47237          case 0  : p = self.alloc("VFMSUB132SS", 3, Operands { v0, v1, v2 })
 47238          case 1  : p = self.alloc("VFMSUB132SS", 4, Operands { v0, v1, v2, vv[0] })
 47239          default : panic("instruction VFMSUB132SS takes 3 or 4 operands")
 47240      }
 47241      // VFMSUB132SS xmm, xmm, xmm
 47242      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 47243          self.require(ISA_FMA3)
 47244          p.domain = DomainFMA
 47245          p.add(0, func(m *_Encoding, v []interface{}) {
 47246              m.emit(0xc4)
 47247              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47248              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 47249              m.emit(0x9b)
 47250              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47251          })
 47252      }
 47253      // VFMSUB132SS m32, xmm, xmm
 47254      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 47255          self.require(ISA_FMA3)
 47256          p.domain = DomainFMA
 47257          p.add(0, func(m *_Encoding, v []interface{}) {
 47258              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47259              m.emit(0x9b)
 47260              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47261          })
 47262      }
 47263      // VFMSUB132SS m32, xmm, xmm{k}{z}
 47264      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47265          self.require(ISA_AVX512F)
 47266          p.domain = DomainFMA
 47267          p.add(0, func(m *_Encoding, v []interface{}) {
 47268              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 47269              m.emit(0x9b)
 47270              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 47271          })
 47272      }
 47273      // VFMSUB132SS {er}, xmm, xmm, xmm{k}{z}
 47274      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 47275          self.require(ISA_AVX512F)
 47276          p.domain = DomainFMA
 47277          p.add(0, func(m *_Encoding, v []interface{}) {
 47278              m.emit(0x62)
 47279              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 47280              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 47281              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 47282              m.emit(0x9b)
 47283              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 47284          })
 47285      }
 47286      // VFMSUB132SS xmm, xmm, xmm{k}{z}
 47287      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47288          self.require(ISA_AVX512F)
 47289          p.domain = DomainFMA
 47290          p.add(0, func(m *_Encoding, v []interface{}) {
 47291              m.emit(0x62)
 47292              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47293              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 47294              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 47295              m.emit(0x9b)
 47296              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47297          })
 47298      }
 47299      if p.len == 0 {
 47300          panic("invalid operands for VFMSUB132SS")
 47301      }
 47302      return p
 47303  }
 47304  
 47305  // VFMSUB213PD performs "Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values".
 47306  //
 47307  // Mnemonic        : VFMSUB213PD
 47308  // Supported forms : (11 forms)
 47309  //
 47310  //    * VFMSUB213PD xmm, xmm, xmm                   [FMA3]
 47311  //    * VFMSUB213PD m128, xmm, xmm                  [FMA3]
 47312  //    * VFMSUB213PD ymm, ymm, ymm                   [FMA3]
 47313  //    * VFMSUB213PD m256, ymm, ymm                  [FMA3]
 47314  //    * VFMSUB213PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 47315  //    * VFMSUB213PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 47316  //    * VFMSUB213PD zmm, zmm, zmm{k}{z}             [AVX512F]
 47317  //    * VFMSUB213PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 47318  //    * VFMSUB213PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 47319  //    * VFMSUB213PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 47320  //    * VFMSUB213PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 47321  //
 47322  func (self *Program) VFMSUB213PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 47323      var p *Instruction
 47324      switch len(vv) {
 47325          case 0  : p = self.alloc("VFMSUB213PD", 3, Operands { v0, v1, v2 })
 47326          case 1  : p = self.alloc("VFMSUB213PD", 4, Operands { v0, v1, v2, vv[0] })
 47327          default : panic("instruction VFMSUB213PD takes 3 or 4 operands")
 47328      }
 47329      // VFMSUB213PD xmm, xmm, xmm
 47330      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 47331          self.require(ISA_FMA3)
 47332          p.domain = DomainFMA
 47333          p.add(0, func(m *_Encoding, v []interface{}) {
 47334              m.emit(0xc4)
 47335              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47336              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 47337              m.emit(0xaa)
 47338              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47339          })
 47340      }
 47341      // VFMSUB213PD m128, xmm, xmm
 47342      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 47343          self.require(ISA_FMA3)
 47344          p.domain = DomainFMA
 47345          p.add(0, func(m *_Encoding, v []interface{}) {
 47346              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47347              m.emit(0xaa)
 47348              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47349          })
 47350      }
 47351      // VFMSUB213PD ymm, ymm, ymm
 47352      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 47353          self.require(ISA_FMA3)
 47354          p.domain = DomainFMA
 47355          p.add(0, func(m *_Encoding, v []interface{}) {
 47356              m.emit(0xc4)
 47357              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47358              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 47359              m.emit(0xaa)
 47360              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47361          })
 47362      }
 47363      // VFMSUB213PD m256, ymm, ymm
 47364      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 47365          self.require(ISA_FMA3)
 47366          p.domain = DomainFMA
 47367          p.add(0, func(m *_Encoding, v []interface{}) {
 47368              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47369              m.emit(0xaa)
 47370              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47371          })
 47372      }
 47373      // VFMSUB213PD m512/m64bcst, zmm, zmm{k}{z}
 47374      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 47375          self.require(ISA_AVX512F)
 47376          p.domain = DomainFMA
 47377          p.add(0, func(m *_Encoding, v []interface{}) {
 47378              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47379              m.emit(0xaa)
 47380              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 47381          })
 47382      }
 47383      // VFMSUB213PD {er}, zmm, zmm, zmm{k}{z}
 47384      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 47385          self.require(ISA_AVX512F)
 47386          p.domain = DomainFMA
 47387          p.add(0, func(m *_Encoding, v []interface{}) {
 47388              m.emit(0x62)
 47389              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 47390              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 47391              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 47392              m.emit(0xaa)
 47393              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 47394          })
 47395      }
 47396      // VFMSUB213PD zmm, zmm, zmm{k}{z}
 47397      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 47398          self.require(ISA_AVX512F)
 47399          p.domain = DomainFMA
 47400          p.add(0, func(m *_Encoding, v []interface{}) {
 47401              m.emit(0x62)
 47402              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47403              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 47404              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 47405              m.emit(0xaa)
 47406              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47407          })
 47408      }
 47409      // VFMSUB213PD m128/m64bcst, xmm, xmm{k}{z}
 47410      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47411          self.require(ISA_AVX512VL | ISA_AVX512F)
 47412          p.domain = DomainFMA
 47413          p.add(0, func(m *_Encoding, v []interface{}) {
 47414              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47415              m.emit(0xaa)
 47416              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 47417          })
 47418      }
 47419      // VFMSUB213PD xmm, xmm, xmm{k}{z}
 47420      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47421          self.require(ISA_AVX512VL | ISA_AVX512F)
 47422          p.domain = DomainFMA
 47423          p.add(0, func(m *_Encoding, v []interface{}) {
 47424              m.emit(0x62)
 47425              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47426              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 47427              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 47428              m.emit(0xaa)
 47429              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47430          })
 47431      }
 47432      // VFMSUB213PD m256/m64bcst, ymm, ymm{k}{z}
 47433      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 47434          self.require(ISA_AVX512VL | ISA_AVX512F)
 47435          p.domain = DomainFMA
 47436          p.add(0, func(m *_Encoding, v []interface{}) {
 47437              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47438              m.emit(0xaa)
 47439              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 47440          })
 47441      }
 47442      // VFMSUB213PD ymm, ymm, ymm{k}{z}
 47443      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 47444          self.require(ISA_AVX512VL | ISA_AVX512F)
 47445          p.domain = DomainFMA
 47446          p.add(0, func(m *_Encoding, v []interface{}) {
 47447              m.emit(0x62)
 47448              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47449              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 47450              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 47451              m.emit(0xaa)
 47452              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47453          })
 47454      }
 47455      if p.len == 0 {
 47456          panic("invalid operands for VFMSUB213PD")
 47457      }
 47458      return p
 47459  }
 47460  
 47461  // VFMSUB213PS performs "Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values".
 47462  //
 47463  // Mnemonic        : VFMSUB213PS
 47464  // Supported forms : (11 forms)
 47465  //
 47466  //    * VFMSUB213PS xmm, xmm, xmm                   [FMA3]
 47467  //    * VFMSUB213PS m128, xmm, xmm                  [FMA3]
 47468  //    * VFMSUB213PS ymm, ymm, ymm                   [FMA3]
 47469  //    * VFMSUB213PS m256, ymm, ymm                  [FMA3]
 47470  //    * VFMSUB213PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 47471  //    * VFMSUB213PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 47472  //    * VFMSUB213PS zmm, zmm, zmm{k}{z}             [AVX512F]
 47473  //    * VFMSUB213PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 47474  //    * VFMSUB213PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 47475  //    * VFMSUB213PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 47476  //    * VFMSUB213PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 47477  //
 47478  func (self *Program) VFMSUB213PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 47479      var p *Instruction
 47480      switch len(vv) {
 47481          case 0  : p = self.alloc("VFMSUB213PS", 3, Operands { v0, v1, v2 })
 47482          case 1  : p = self.alloc("VFMSUB213PS", 4, Operands { v0, v1, v2, vv[0] })
 47483          default : panic("instruction VFMSUB213PS takes 3 or 4 operands")
 47484      }
 47485      // VFMSUB213PS xmm, xmm, xmm
 47486      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 47487          self.require(ISA_FMA3)
 47488          p.domain = DomainFMA
 47489          p.add(0, func(m *_Encoding, v []interface{}) {
 47490              m.emit(0xc4)
 47491              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47492              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 47493              m.emit(0xaa)
 47494              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47495          })
 47496      }
 47497      // VFMSUB213PS m128, xmm, xmm
 47498      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 47499          self.require(ISA_FMA3)
 47500          p.domain = DomainFMA
 47501          p.add(0, func(m *_Encoding, v []interface{}) {
 47502              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47503              m.emit(0xaa)
 47504              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47505          })
 47506      }
 47507      // VFMSUB213PS ymm, ymm, ymm
 47508      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 47509          self.require(ISA_FMA3)
 47510          p.domain = DomainFMA
 47511          p.add(0, func(m *_Encoding, v []interface{}) {
 47512              m.emit(0xc4)
 47513              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47514              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 47515              m.emit(0xaa)
 47516              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47517          })
 47518      }
 47519      // VFMSUB213PS m256, ymm, ymm
 47520      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 47521          self.require(ISA_FMA3)
 47522          p.domain = DomainFMA
 47523          p.add(0, func(m *_Encoding, v []interface{}) {
 47524              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47525              m.emit(0xaa)
 47526              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47527          })
 47528      }
 47529      // VFMSUB213PS m512/m32bcst, zmm, zmm{k}{z}
 47530      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 47531          self.require(ISA_AVX512F)
 47532          p.domain = DomainFMA
 47533          p.add(0, func(m *_Encoding, v []interface{}) {
 47534              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47535              m.emit(0xaa)
 47536              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 47537          })
 47538      }
 47539      // VFMSUB213PS {er}, zmm, zmm, zmm{k}{z}
 47540      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 47541          self.require(ISA_AVX512F)
 47542          p.domain = DomainFMA
 47543          p.add(0, func(m *_Encoding, v []interface{}) {
 47544              m.emit(0x62)
 47545              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 47546              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 47547              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 47548              m.emit(0xaa)
 47549              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 47550          })
 47551      }
 47552      // VFMSUB213PS zmm, zmm, zmm{k}{z}
 47553      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 47554          self.require(ISA_AVX512F)
 47555          p.domain = DomainFMA
 47556          p.add(0, func(m *_Encoding, v []interface{}) {
 47557              m.emit(0x62)
 47558              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47559              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 47560              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 47561              m.emit(0xaa)
 47562              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47563          })
 47564      }
 47565      // VFMSUB213PS m128/m32bcst, xmm, xmm{k}{z}
 47566      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47567          self.require(ISA_AVX512VL | ISA_AVX512F)
 47568          p.domain = DomainFMA
 47569          p.add(0, func(m *_Encoding, v []interface{}) {
 47570              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47571              m.emit(0xaa)
 47572              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 47573          })
 47574      }
 47575      // VFMSUB213PS xmm, xmm, xmm{k}{z}
 47576      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47577          self.require(ISA_AVX512VL | ISA_AVX512F)
 47578          p.domain = DomainFMA
 47579          p.add(0, func(m *_Encoding, v []interface{}) {
 47580              m.emit(0x62)
 47581              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47582              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 47583              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 47584              m.emit(0xaa)
 47585              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47586          })
 47587      }
 47588      // VFMSUB213PS m256/m32bcst, ymm, ymm{k}{z}
 47589      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 47590          self.require(ISA_AVX512VL | ISA_AVX512F)
 47591          p.domain = DomainFMA
 47592          p.add(0, func(m *_Encoding, v []interface{}) {
 47593              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47594              m.emit(0xaa)
 47595              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 47596          })
 47597      }
 47598      // VFMSUB213PS ymm, ymm, ymm{k}{z}
 47599      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 47600          self.require(ISA_AVX512VL | ISA_AVX512F)
 47601          p.domain = DomainFMA
 47602          p.add(0, func(m *_Encoding, v []interface{}) {
 47603              m.emit(0x62)
 47604              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47605              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 47606              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 47607              m.emit(0xaa)
 47608              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47609          })
 47610      }
 47611      if p.len == 0 {
 47612          panic("invalid operands for VFMSUB213PS")
 47613      }
 47614      return p
 47615  }
 47616  
 47617  // VFMSUB213SD performs "Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values".
 47618  //
 47619  // Mnemonic        : VFMSUB213SD
 47620  // Supported forms : (5 forms)
 47621  //
 47622  //    * VFMSUB213SD xmm, xmm, xmm                [FMA3]
 47623  //    * VFMSUB213SD m64, xmm, xmm                [FMA3]
 47624  //    * VFMSUB213SD m64, xmm, xmm{k}{z}          [AVX512F]
 47625  //    * VFMSUB213SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 47626  //    * VFMSUB213SD xmm, xmm, xmm{k}{z}          [AVX512F]
 47627  //
 47628  func (self *Program) VFMSUB213SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 47629      var p *Instruction
 47630      switch len(vv) {
 47631          case 0  : p = self.alloc("VFMSUB213SD", 3, Operands { v0, v1, v2 })
 47632          case 1  : p = self.alloc("VFMSUB213SD", 4, Operands { v0, v1, v2, vv[0] })
 47633          default : panic("instruction VFMSUB213SD takes 3 or 4 operands")
 47634      }
 47635      // VFMSUB213SD xmm, xmm, xmm
 47636      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 47637          self.require(ISA_FMA3)
 47638          p.domain = DomainFMA
 47639          p.add(0, func(m *_Encoding, v []interface{}) {
 47640              m.emit(0xc4)
 47641              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47642              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 47643              m.emit(0xab)
 47644              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47645          })
 47646      }
 47647      // VFMSUB213SD m64, xmm, xmm
 47648      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 47649          self.require(ISA_FMA3)
 47650          p.domain = DomainFMA
 47651          p.add(0, func(m *_Encoding, v []interface{}) {
 47652              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47653              m.emit(0xab)
 47654              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47655          })
 47656      }
 47657      // VFMSUB213SD m64, xmm, xmm{k}{z}
 47658      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47659          self.require(ISA_AVX512F)
 47660          p.domain = DomainFMA
 47661          p.add(0, func(m *_Encoding, v []interface{}) {
 47662              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 47663              m.emit(0xab)
 47664              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 47665          })
 47666      }
 47667      // VFMSUB213SD {er}, xmm, xmm, xmm{k}{z}
 47668      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 47669          self.require(ISA_AVX512F)
 47670          p.domain = DomainFMA
 47671          p.add(0, func(m *_Encoding, v []interface{}) {
 47672              m.emit(0x62)
 47673              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 47674              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 47675              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 47676              m.emit(0xab)
 47677              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 47678          })
 47679      }
 47680      // VFMSUB213SD xmm, xmm, xmm{k}{z}
 47681      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47682          self.require(ISA_AVX512F)
 47683          p.domain = DomainFMA
 47684          p.add(0, func(m *_Encoding, v []interface{}) {
 47685              m.emit(0x62)
 47686              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47687              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 47688              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 47689              m.emit(0xab)
 47690              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47691          })
 47692      }
 47693      if p.len == 0 {
 47694          panic("invalid operands for VFMSUB213SD")
 47695      }
 47696      return p
 47697  }
 47698  
 47699  // VFMSUB213SS performs "Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values".
 47700  //
 47701  // Mnemonic        : VFMSUB213SS
 47702  // Supported forms : (5 forms)
 47703  //
 47704  //    * VFMSUB213SS xmm, xmm, xmm                [FMA3]
 47705  //    * VFMSUB213SS m32, xmm, xmm                [FMA3]
 47706  //    * VFMSUB213SS m32, xmm, xmm{k}{z}          [AVX512F]
 47707  //    * VFMSUB213SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 47708  //    * VFMSUB213SS xmm, xmm, xmm{k}{z}          [AVX512F]
 47709  //
 47710  func (self *Program) VFMSUB213SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 47711      var p *Instruction
 47712      switch len(vv) {
 47713          case 0  : p = self.alloc("VFMSUB213SS", 3, Operands { v0, v1, v2 })
 47714          case 1  : p = self.alloc("VFMSUB213SS", 4, Operands { v0, v1, v2, vv[0] })
 47715          default : panic("instruction VFMSUB213SS takes 3 or 4 operands")
 47716      }
 47717      // VFMSUB213SS xmm, xmm, xmm
 47718      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 47719          self.require(ISA_FMA3)
 47720          p.domain = DomainFMA
 47721          p.add(0, func(m *_Encoding, v []interface{}) {
 47722              m.emit(0xc4)
 47723              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47724              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 47725              m.emit(0xab)
 47726              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47727          })
 47728      }
 47729      // VFMSUB213SS m32, xmm, xmm
 47730      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 47731          self.require(ISA_FMA3)
 47732          p.domain = DomainFMA
 47733          p.add(0, func(m *_Encoding, v []interface{}) {
 47734              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47735              m.emit(0xab)
 47736              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47737          })
 47738      }
 47739      // VFMSUB213SS m32, xmm, xmm{k}{z}
 47740      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47741          self.require(ISA_AVX512F)
 47742          p.domain = DomainFMA
 47743          p.add(0, func(m *_Encoding, v []interface{}) {
 47744              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 47745              m.emit(0xab)
 47746              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 47747          })
 47748      }
 47749      // VFMSUB213SS {er}, xmm, xmm, xmm{k}{z}
 47750      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 47751          self.require(ISA_AVX512F)
 47752          p.domain = DomainFMA
 47753          p.add(0, func(m *_Encoding, v []interface{}) {
 47754              m.emit(0x62)
 47755              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 47756              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 47757              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 47758              m.emit(0xab)
 47759              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 47760          })
 47761      }
 47762      // VFMSUB213SS xmm, xmm, xmm{k}{z}
 47763      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47764          self.require(ISA_AVX512F)
 47765          p.domain = DomainFMA
 47766          p.add(0, func(m *_Encoding, v []interface{}) {
 47767              m.emit(0x62)
 47768              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47769              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 47770              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 47771              m.emit(0xab)
 47772              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47773          })
 47774      }
 47775      if p.len == 0 {
 47776          panic("invalid operands for VFMSUB213SS")
 47777      }
 47778      return p
 47779  }
 47780  
 47781  // VFMSUB231PD performs "Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values".
 47782  //
 47783  // Mnemonic        : VFMSUB231PD
 47784  // Supported forms : (11 forms)
 47785  //
 47786  //    * VFMSUB231PD xmm, xmm, xmm                   [FMA3]
 47787  //    * VFMSUB231PD m128, xmm, xmm                  [FMA3]
 47788  //    * VFMSUB231PD ymm, ymm, ymm                   [FMA3]
 47789  //    * VFMSUB231PD m256, ymm, ymm                  [FMA3]
 47790  //    * VFMSUB231PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 47791  //    * VFMSUB231PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 47792  //    * VFMSUB231PD zmm, zmm, zmm{k}{z}             [AVX512F]
 47793  //    * VFMSUB231PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 47794  //    * VFMSUB231PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 47795  //    * VFMSUB231PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 47796  //    * VFMSUB231PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 47797  //
 47798  func (self *Program) VFMSUB231PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 47799      var p *Instruction
 47800      switch len(vv) {
 47801          case 0  : p = self.alloc("VFMSUB231PD", 3, Operands { v0, v1, v2 })
 47802          case 1  : p = self.alloc("VFMSUB231PD", 4, Operands { v0, v1, v2, vv[0] })
 47803          default : panic("instruction VFMSUB231PD takes 3 or 4 operands")
 47804      }
 47805      // VFMSUB231PD xmm, xmm, xmm
 47806      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 47807          self.require(ISA_FMA3)
 47808          p.domain = DomainFMA
 47809          p.add(0, func(m *_Encoding, v []interface{}) {
 47810              m.emit(0xc4)
 47811              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47812              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 47813              m.emit(0xba)
 47814              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47815          })
 47816      }
 47817      // VFMSUB231PD m128, xmm, xmm
 47818      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 47819          self.require(ISA_FMA3)
 47820          p.domain = DomainFMA
 47821          p.add(0, func(m *_Encoding, v []interface{}) {
 47822              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47823              m.emit(0xba)
 47824              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47825          })
 47826      }
 47827      // VFMSUB231PD ymm, ymm, ymm
 47828      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 47829          self.require(ISA_FMA3)
 47830          p.domain = DomainFMA
 47831          p.add(0, func(m *_Encoding, v []interface{}) {
 47832              m.emit(0xc4)
 47833              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47834              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 47835              m.emit(0xba)
 47836              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47837          })
 47838      }
 47839      // VFMSUB231PD m256, ymm, ymm
 47840      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 47841          self.require(ISA_FMA3)
 47842          p.domain = DomainFMA
 47843          p.add(0, func(m *_Encoding, v []interface{}) {
 47844              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47845              m.emit(0xba)
 47846              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47847          })
 47848      }
 47849      // VFMSUB231PD m512/m64bcst, zmm, zmm{k}{z}
 47850      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 47851          self.require(ISA_AVX512F)
 47852          p.domain = DomainFMA
 47853          p.add(0, func(m *_Encoding, v []interface{}) {
 47854              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47855              m.emit(0xba)
 47856              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 47857          })
 47858      }
 47859      // VFMSUB231PD {er}, zmm, zmm, zmm{k}{z}
 47860      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 47861          self.require(ISA_AVX512F)
 47862          p.domain = DomainFMA
 47863          p.add(0, func(m *_Encoding, v []interface{}) {
 47864              m.emit(0x62)
 47865              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 47866              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 47867              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 47868              m.emit(0xba)
 47869              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 47870          })
 47871      }
 47872      // VFMSUB231PD zmm, zmm, zmm{k}{z}
 47873      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 47874          self.require(ISA_AVX512F)
 47875          p.domain = DomainFMA
 47876          p.add(0, func(m *_Encoding, v []interface{}) {
 47877              m.emit(0x62)
 47878              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47879              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 47880              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 47881              m.emit(0xba)
 47882              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47883          })
 47884      }
 47885      // VFMSUB231PD m128/m64bcst, xmm, xmm{k}{z}
 47886      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47887          self.require(ISA_AVX512VL | ISA_AVX512F)
 47888          p.domain = DomainFMA
 47889          p.add(0, func(m *_Encoding, v []interface{}) {
 47890              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47891              m.emit(0xba)
 47892              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 47893          })
 47894      }
 47895      // VFMSUB231PD xmm, xmm, xmm{k}{z}
 47896      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47897          self.require(ISA_AVX512VL | ISA_AVX512F)
 47898          p.domain = DomainFMA
 47899          p.add(0, func(m *_Encoding, v []interface{}) {
 47900              m.emit(0x62)
 47901              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47902              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 47903              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 47904              m.emit(0xba)
 47905              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47906          })
 47907      }
 47908      // VFMSUB231PD m256/m64bcst, ymm, ymm{k}{z}
 47909      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 47910          self.require(ISA_AVX512VL | ISA_AVX512F)
 47911          p.domain = DomainFMA
 47912          p.add(0, func(m *_Encoding, v []interface{}) {
 47913              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47914              m.emit(0xba)
 47915              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 47916          })
 47917      }
 47918      // VFMSUB231PD ymm, ymm, ymm{k}{z}
 47919      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 47920          self.require(ISA_AVX512VL | ISA_AVX512F)
 47921          p.domain = DomainFMA
 47922          p.add(0, func(m *_Encoding, v []interface{}) {
 47923              m.emit(0x62)
 47924              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47925              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 47926              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 47927              m.emit(0xba)
 47928              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47929          })
 47930      }
 47931      if p.len == 0 {
 47932          panic("invalid operands for VFMSUB231PD")
 47933      }
 47934      return p
 47935  }
 47936  
 47937  // VFMSUB231PS performs "Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values".
 47938  //
 47939  // Mnemonic        : VFMSUB231PS
 47940  // Supported forms : (11 forms)
 47941  //
 47942  //    * VFMSUB231PS xmm, xmm, xmm                   [FMA3]
 47943  //    * VFMSUB231PS m128, xmm, xmm                  [FMA3]
 47944  //    * VFMSUB231PS ymm, ymm, ymm                   [FMA3]
 47945  //    * VFMSUB231PS m256, ymm, ymm                  [FMA3]
 47946  //    * VFMSUB231PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 47947  //    * VFMSUB231PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 47948  //    * VFMSUB231PS zmm, zmm, zmm{k}{z}             [AVX512F]
 47949  //    * VFMSUB231PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 47950  //    * VFMSUB231PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 47951  //    * VFMSUB231PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 47952  //    * VFMSUB231PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 47953  //
 47954  func (self *Program) VFMSUB231PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 47955      var p *Instruction
 47956      switch len(vv) {
 47957          case 0  : p = self.alloc("VFMSUB231PS", 3, Operands { v0, v1, v2 })
 47958          case 1  : p = self.alloc("VFMSUB231PS", 4, Operands { v0, v1, v2, vv[0] })
 47959          default : panic("instruction VFMSUB231PS takes 3 or 4 operands")
 47960      }
 47961      // VFMSUB231PS xmm, xmm, xmm
 47962      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 47963          self.require(ISA_FMA3)
 47964          p.domain = DomainFMA
 47965          p.add(0, func(m *_Encoding, v []interface{}) {
 47966              m.emit(0xc4)
 47967              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47968              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 47969              m.emit(0xba)
 47970              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47971          })
 47972      }
 47973      // VFMSUB231PS m128, xmm, xmm
 47974      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 47975          self.require(ISA_FMA3)
 47976          p.domain = DomainFMA
 47977          p.add(0, func(m *_Encoding, v []interface{}) {
 47978              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47979              m.emit(0xba)
 47980              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47981          })
 47982      }
 47983      // VFMSUB231PS ymm, ymm, ymm
 47984      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 47985          self.require(ISA_FMA3)
 47986          p.domain = DomainFMA
 47987          p.add(0, func(m *_Encoding, v []interface{}) {
 47988              m.emit(0xc4)
 47989              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47990              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 47991              m.emit(0xba)
 47992              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47993          })
 47994      }
 47995      // VFMSUB231PS m256, ymm, ymm
 47996      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 47997          self.require(ISA_FMA3)
 47998          p.domain = DomainFMA
 47999          p.add(0, func(m *_Encoding, v []interface{}) {
 48000              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48001              m.emit(0xba)
 48002              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48003          })
 48004      }
 48005      // VFMSUB231PS m512/m32bcst, zmm, zmm{k}{z}
 48006      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 48007          self.require(ISA_AVX512F)
 48008          p.domain = DomainFMA
 48009          p.add(0, func(m *_Encoding, v []interface{}) {
 48010              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48011              m.emit(0xba)
 48012              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 48013          })
 48014      }
 48015      // VFMSUB231PS {er}, zmm, zmm, zmm{k}{z}
 48016      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 48017          self.require(ISA_AVX512F)
 48018          p.domain = DomainFMA
 48019          p.add(0, func(m *_Encoding, v []interface{}) {
 48020              m.emit(0x62)
 48021              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 48022              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 48023              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 48024              m.emit(0xba)
 48025              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 48026          })
 48027      }
 48028      // VFMSUB231PS zmm, zmm, zmm{k}{z}
 48029      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 48030          self.require(ISA_AVX512F)
 48031          p.domain = DomainFMA
 48032          p.add(0, func(m *_Encoding, v []interface{}) {
 48033              m.emit(0x62)
 48034              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48035              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48036              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 48037              m.emit(0xba)
 48038              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48039          })
 48040      }
 48041      // VFMSUB231PS m128/m32bcst, xmm, xmm{k}{z}
 48042      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48043          self.require(ISA_AVX512VL | ISA_AVX512F)
 48044          p.domain = DomainFMA
 48045          p.add(0, func(m *_Encoding, v []interface{}) {
 48046              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48047              m.emit(0xba)
 48048              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 48049          })
 48050      }
 48051      // VFMSUB231PS xmm, xmm, xmm{k}{z}
 48052      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48053          self.require(ISA_AVX512VL | ISA_AVX512F)
 48054          p.domain = DomainFMA
 48055          p.add(0, func(m *_Encoding, v []interface{}) {
 48056              m.emit(0x62)
 48057              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48058              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48059              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 48060              m.emit(0xba)
 48061              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48062          })
 48063      }
 48064      // VFMSUB231PS m256/m32bcst, ymm, ymm{k}{z}
 48065      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 48066          self.require(ISA_AVX512VL | ISA_AVX512F)
 48067          p.domain = DomainFMA
 48068          p.add(0, func(m *_Encoding, v []interface{}) {
 48069              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48070              m.emit(0xba)
 48071              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 48072          })
 48073      }
 48074      // VFMSUB231PS ymm, ymm, ymm{k}{z}
 48075      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 48076          self.require(ISA_AVX512VL | ISA_AVX512F)
 48077          p.domain = DomainFMA
 48078          p.add(0, func(m *_Encoding, v []interface{}) {
 48079              m.emit(0x62)
 48080              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48081              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48082              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 48083              m.emit(0xba)
 48084              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48085          })
 48086      }
 48087      if p.len == 0 {
 48088          panic("invalid operands for VFMSUB231PS")
 48089      }
 48090      return p
 48091  }
 48092  
 48093  // VFMSUB231SD performs "Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values".
 48094  //
 48095  // Mnemonic        : VFMSUB231SD
 48096  // Supported forms : (5 forms)
 48097  //
 48098  //    * VFMSUB231SD xmm, xmm, xmm                [FMA3]
 48099  //    * VFMSUB231SD m64, xmm, xmm                [FMA3]
 48100  //    * VFMSUB231SD m64, xmm, xmm{k}{z}          [AVX512F]
 48101  //    * VFMSUB231SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 48102  //    * VFMSUB231SD xmm, xmm, xmm{k}{z}          [AVX512F]
 48103  //
 48104  func (self *Program) VFMSUB231SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 48105      var p *Instruction
 48106      switch len(vv) {
 48107          case 0  : p = self.alloc("VFMSUB231SD", 3, Operands { v0, v1, v2 })
 48108          case 1  : p = self.alloc("VFMSUB231SD", 4, Operands { v0, v1, v2, vv[0] })
 48109          default : panic("instruction VFMSUB231SD takes 3 or 4 operands")
 48110      }
 48111      // VFMSUB231SD xmm, xmm, xmm
 48112      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 48113          self.require(ISA_FMA3)
 48114          p.domain = DomainFMA
 48115          p.add(0, func(m *_Encoding, v []interface{}) {
 48116              m.emit(0xc4)
 48117              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48118              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 48119              m.emit(0xbb)
 48120              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48121          })
 48122      }
 48123      // VFMSUB231SD m64, xmm, xmm
 48124      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 48125          self.require(ISA_FMA3)
 48126          p.domain = DomainFMA
 48127          p.add(0, func(m *_Encoding, v []interface{}) {
 48128              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48129              m.emit(0xbb)
 48130              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48131          })
 48132      }
 48133      // VFMSUB231SD m64, xmm, xmm{k}{z}
 48134      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48135          self.require(ISA_AVX512F)
 48136          p.domain = DomainFMA
 48137          p.add(0, func(m *_Encoding, v []interface{}) {
 48138              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 48139              m.emit(0xbb)
 48140              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 48141          })
 48142      }
 48143      // VFMSUB231SD {er}, xmm, xmm, xmm{k}{z}
 48144      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 48145          self.require(ISA_AVX512F)
 48146          p.domain = DomainFMA
 48147          p.add(0, func(m *_Encoding, v []interface{}) {
 48148              m.emit(0x62)
 48149              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 48150              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 48151              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 48152              m.emit(0xbb)
 48153              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 48154          })
 48155      }
 48156      // VFMSUB231SD xmm, xmm, xmm{k}{z}
 48157      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48158          self.require(ISA_AVX512F)
 48159          p.domain = DomainFMA
 48160          p.add(0, func(m *_Encoding, v []interface{}) {
 48161              m.emit(0x62)
 48162              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48163              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 48164              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 48165              m.emit(0xbb)
 48166              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48167          })
 48168      }
 48169      if p.len == 0 {
 48170          panic("invalid operands for VFMSUB231SD")
 48171      }
 48172      return p
 48173  }
 48174  
 48175  // VFMSUB231SS performs "Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values".
 48176  //
 48177  // Mnemonic        : VFMSUB231SS
 48178  // Supported forms : (5 forms)
 48179  //
 48180  //    * VFMSUB231SS xmm, xmm, xmm                [FMA3]
 48181  //    * VFMSUB231SS m32, xmm, xmm                [FMA3]
 48182  //    * VFMSUB231SS m32, xmm, xmm{k}{z}          [AVX512F]
 48183  //    * VFMSUB231SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 48184  //    * VFMSUB231SS xmm, xmm, xmm{k}{z}          [AVX512F]
 48185  //
 48186  func (self *Program) VFMSUB231SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 48187      var p *Instruction
 48188      switch len(vv) {
 48189          case 0  : p = self.alloc("VFMSUB231SS", 3, Operands { v0, v1, v2 })
 48190          case 1  : p = self.alloc("VFMSUB231SS", 4, Operands { v0, v1, v2, vv[0] })
 48191          default : panic("instruction VFMSUB231SS takes 3 or 4 operands")
 48192      }
 48193      // VFMSUB231SS xmm, xmm, xmm
 48194      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 48195          self.require(ISA_FMA3)
 48196          p.domain = DomainFMA
 48197          p.add(0, func(m *_Encoding, v []interface{}) {
 48198              m.emit(0xc4)
 48199              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48200              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 48201              m.emit(0xbb)
 48202              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48203          })
 48204      }
 48205      // VFMSUB231SS m32, xmm, xmm
 48206      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 48207          self.require(ISA_FMA3)
 48208          p.domain = DomainFMA
 48209          p.add(0, func(m *_Encoding, v []interface{}) {
 48210              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48211              m.emit(0xbb)
 48212              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48213          })
 48214      }
 48215      // VFMSUB231SS m32, xmm, xmm{k}{z}
 48216      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48217          self.require(ISA_AVX512F)
 48218          p.domain = DomainFMA
 48219          p.add(0, func(m *_Encoding, v []interface{}) {
 48220              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 48221              m.emit(0xbb)
 48222              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 48223          })
 48224      }
 48225      // VFMSUB231SS {er}, xmm, xmm, xmm{k}{z}
 48226      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 48227          self.require(ISA_AVX512F)
 48228          p.domain = DomainFMA
 48229          p.add(0, func(m *_Encoding, v []interface{}) {
 48230              m.emit(0x62)
 48231              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 48232              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 48233              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 48234              m.emit(0xbb)
 48235              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 48236          })
 48237      }
 48238      // VFMSUB231SS xmm, xmm, xmm{k}{z}
 48239      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48240          self.require(ISA_AVX512F)
 48241          p.domain = DomainFMA
 48242          p.add(0, func(m *_Encoding, v []interface{}) {
 48243              m.emit(0x62)
 48244              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48245              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48246              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 48247              m.emit(0xbb)
 48248              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48249          })
 48250      }
 48251      if p.len == 0 {
 48252          panic("invalid operands for VFMSUB231SS")
 48253      }
 48254      return p
 48255  }
 48256  
 48257  // VFMSUBADD132PD performs "Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values".
 48258  //
 48259  // Mnemonic        : VFMSUBADD132PD
 48260  // Supported forms : (11 forms)
 48261  //
 48262  //    * VFMSUBADD132PD xmm, xmm, xmm                   [FMA3]
 48263  //    * VFMSUBADD132PD m128, xmm, xmm                  [FMA3]
 48264  //    * VFMSUBADD132PD ymm, ymm, ymm                   [FMA3]
 48265  //    * VFMSUBADD132PD m256, ymm, ymm                  [FMA3]
 48266  //    * VFMSUBADD132PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 48267  //    * VFMSUBADD132PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 48268  //    * VFMSUBADD132PD zmm, zmm, zmm{k}{z}             [AVX512F]
 48269  //    * VFMSUBADD132PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 48270  //    * VFMSUBADD132PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 48271  //    * VFMSUBADD132PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 48272  //    * VFMSUBADD132PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 48273  //
 48274  func (self *Program) VFMSUBADD132PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 48275      var p *Instruction
 48276      switch len(vv) {
 48277          case 0  : p = self.alloc("VFMSUBADD132PD", 3, Operands { v0, v1, v2 })
 48278          case 1  : p = self.alloc("VFMSUBADD132PD", 4, Operands { v0, v1, v2, vv[0] })
 48279          default : panic("instruction VFMSUBADD132PD takes 3 or 4 operands")
 48280      }
 48281      // VFMSUBADD132PD xmm, xmm, xmm
 48282      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 48283          self.require(ISA_FMA3)
 48284          p.domain = DomainFMA
 48285          p.add(0, func(m *_Encoding, v []interface{}) {
 48286              m.emit(0xc4)
 48287              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48288              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 48289              m.emit(0x97)
 48290              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48291          })
 48292      }
 48293      // VFMSUBADD132PD m128, xmm, xmm
 48294      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 48295          self.require(ISA_FMA3)
 48296          p.domain = DomainFMA
 48297          p.add(0, func(m *_Encoding, v []interface{}) {
 48298              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48299              m.emit(0x97)
 48300              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48301          })
 48302      }
 48303      // VFMSUBADD132PD ymm, ymm, ymm
 48304      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 48305          self.require(ISA_FMA3)
 48306          p.domain = DomainFMA
 48307          p.add(0, func(m *_Encoding, v []interface{}) {
 48308              m.emit(0xc4)
 48309              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48310              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 48311              m.emit(0x97)
 48312              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48313          })
 48314      }
 48315      // VFMSUBADD132PD m256, ymm, ymm
 48316      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 48317          self.require(ISA_FMA3)
 48318          p.domain = DomainFMA
 48319          p.add(0, func(m *_Encoding, v []interface{}) {
 48320              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48321              m.emit(0x97)
 48322              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48323          })
 48324      }
 48325      // VFMSUBADD132PD m512/m64bcst, zmm, zmm{k}{z}
 48326      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 48327          self.require(ISA_AVX512F)
 48328          p.domain = DomainFMA
 48329          p.add(0, func(m *_Encoding, v []interface{}) {
 48330              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48331              m.emit(0x97)
 48332              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 48333          })
 48334      }
 48335      // VFMSUBADD132PD {er}, zmm, zmm, zmm{k}{z}
 48336      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 48337          self.require(ISA_AVX512F)
 48338          p.domain = DomainFMA
 48339          p.add(0, func(m *_Encoding, v []interface{}) {
 48340              m.emit(0x62)
 48341              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 48342              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 48343              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 48344              m.emit(0x97)
 48345              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 48346          })
 48347      }
 48348      // VFMSUBADD132PD zmm, zmm, zmm{k}{z}
 48349      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 48350          self.require(ISA_AVX512F)
 48351          p.domain = DomainFMA
 48352          p.add(0, func(m *_Encoding, v []interface{}) {
 48353              m.emit(0x62)
 48354              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48355              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 48356              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 48357              m.emit(0x97)
 48358              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48359          })
 48360      }
 48361      // VFMSUBADD132PD m128/m64bcst, xmm, xmm{k}{z}
 48362      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48363          self.require(ISA_AVX512VL | ISA_AVX512F)
 48364          p.domain = DomainFMA
 48365          p.add(0, func(m *_Encoding, v []interface{}) {
 48366              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48367              m.emit(0x97)
 48368              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 48369          })
 48370      }
 48371      // VFMSUBADD132PD xmm, xmm, xmm{k}{z}
 48372      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48373          self.require(ISA_AVX512VL | ISA_AVX512F)
 48374          p.domain = DomainFMA
 48375          p.add(0, func(m *_Encoding, v []interface{}) {
 48376              m.emit(0x62)
 48377              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48378              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 48379              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 48380              m.emit(0x97)
 48381              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48382          })
 48383      }
 48384      // VFMSUBADD132PD m256/m64bcst, ymm, ymm{k}{z}
 48385      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 48386          self.require(ISA_AVX512VL | ISA_AVX512F)
 48387          p.domain = DomainFMA
 48388          p.add(0, func(m *_Encoding, v []interface{}) {
 48389              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48390              m.emit(0x97)
 48391              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 48392          })
 48393      }
 48394      // VFMSUBADD132PD ymm, ymm, ymm{k}{z}
 48395      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 48396          self.require(ISA_AVX512VL | ISA_AVX512F)
 48397          p.domain = DomainFMA
 48398          p.add(0, func(m *_Encoding, v []interface{}) {
 48399              m.emit(0x62)
 48400              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48401              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 48402              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 48403              m.emit(0x97)
 48404              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48405          })
 48406      }
 48407      if p.len == 0 {
 48408          panic("invalid operands for VFMSUBADD132PD")
 48409      }
 48410      return p
 48411  }
 48412  
 48413  // VFMSUBADD132PS performs "Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values".
 48414  //
 48415  // Mnemonic        : VFMSUBADD132PS
 48416  // Supported forms : (11 forms)
 48417  //
 48418  //    * VFMSUBADD132PS xmm, xmm, xmm                   [FMA3]
 48419  //    * VFMSUBADD132PS m128, xmm, xmm                  [FMA3]
 48420  //    * VFMSUBADD132PS ymm, ymm, ymm                   [FMA3]
 48421  //    * VFMSUBADD132PS m256, ymm, ymm                  [FMA3]
 48422  //    * VFMSUBADD132PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 48423  //    * VFMSUBADD132PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 48424  //    * VFMSUBADD132PS zmm, zmm, zmm{k}{z}             [AVX512F]
 48425  //    * VFMSUBADD132PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 48426  //    * VFMSUBADD132PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 48427  //    * VFMSUBADD132PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 48428  //    * VFMSUBADD132PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 48429  //
 48430  func (self *Program) VFMSUBADD132PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 48431      var p *Instruction
 48432      switch len(vv) {
 48433          case 0  : p = self.alloc("VFMSUBADD132PS", 3, Operands { v0, v1, v2 })
 48434          case 1  : p = self.alloc("VFMSUBADD132PS", 4, Operands { v0, v1, v2, vv[0] })
 48435          default : panic("instruction VFMSUBADD132PS takes 3 or 4 operands")
 48436      }
 48437      // VFMSUBADD132PS xmm, xmm, xmm
 48438      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 48439          self.require(ISA_FMA3)
 48440          p.domain = DomainFMA
 48441          p.add(0, func(m *_Encoding, v []interface{}) {
 48442              m.emit(0xc4)
 48443              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48444              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 48445              m.emit(0x97)
 48446              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48447          })
 48448      }
 48449      // VFMSUBADD132PS m128, xmm, xmm
 48450      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 48451          self.require(ISA_FMA3)
 48452          p.domain = DomainFMA
 48453          p.add(0, func(m *_Encoding, v []interface{}) {
 48454              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48455              m.emit(0x97)
 48456              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48457          })
 48458      }
 48459      // VFMSUBADD132PS ymm, ymm, ymm
 48460      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 48461          self.require(ISA_FMA3)
 48462          p.domain = DomainFMA
 48463          p.add(0, func(m *_Encoding, v []interface{}) {
 48464              m.emit(0xc4)
 48465              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48466              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48467              m.emit(0x97)
 48468              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48469          })
 48470      }
 48471      // VFMSUBADD132PS m256, ymm, ymm
 48472      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 48473          self.require(ISA_FMA3)
 48474          p.domain = DomainFMA
 48475          p.add(0, func(m *_Encoding, v []interface{}) {
 48476              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48477              m.emit(0x97)
 48478              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48479          })
 48480      }
 48481      // VFMSUBADD132PS m512/m32bcst, zmm, zmm{k}{z}
 48482      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 48483          self.require(ISA_AVX512F)
 48484          p.domain = DomainFMA
 48485          p.add(0, func(m *_Encoding, v []interface{}) {
 48486              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48487              m.emit(0x97)
 48488              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 48489          })
 48490      }
 48491      // VFMSUBADD132PS {er}, zmm, zmm, zmm{k}{z}
 48492      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 48493          self.require(ISA_AVX512F)
 48494          p.domain = DomainFMA
 48495          p.add(0, func(m *_Encoding, v []interface{}) {
 48496              m.emit(0x62)
 48497              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 48498              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 48499              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 48500              m.emit(0x97)
 48501              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 48502          })
 48503      }
 48504      // VFMSUBADD132PS zmm, zmm, zmm{k}{z}
 48505      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 48506          self.require(ISA_AVX512F)
 48507          p.domain = DomainFMA
 48508          p.add(0, func(m *_Encoding, v []interface{}) {
 48509              m.emit(0x62)
 48510              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48511              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48512              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 48513              m.emit(0x97)
 48514              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48515          })
 48516      }
 48517      // VFMSUBADD132PS m128/m32bcst, xmm, xmm{k}{z}
 48518      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48519          self.require(ISA_AVX512VL | ISA_AVX512F)
 48520          p.domain = DomainFMA
 48521          p.add(0, func(m *_Encoding, v []interface{}) {
 48522              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48523              m.emit(0x97)
 48524              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 48525          })
 48526      }
 48527      // VFMSUBADD132PS xmm, xmm, xmm{k}{z}
 48528      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48529          self.require(ISA_AVX512VL | ISA_AVX512F)
 48530          p.domain = DomainFMA
 48531          p.add(0, func(m *_Encoding, v []interface{}) {
 48532              m.emit(0x62)
 48533              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48534              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48535              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 48536              m.emit(0x97)
 48537              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48538          })
 48539      }
 48540      // VFMSUBADD132PS m256/m32bcst, ymm, ymm{k}{z}
 48541      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 48542          self.require(ISA_AVX512VL | ISA_AVX512F)
 48543          p.domain = DomainFMA
 48544          p.add(0, func(m *_Encoding, v []interface{}) {
 48545              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48546              m.emit(0x97)
 48547              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 48548          })
 48549      }
 48550      // VFMSUBADD132PS ymm, ymm, ymm{k}{z}
 48551      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 48552          self.require(ISA_AVX512VL | ISA_AVX512F)
 48553          p.domain = DomainFMA
 48554          p.add(0, func(m *_Encoding, v []interface{}) {
 48555              m.emit(0x62)
 48556              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48557              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48558              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 48559              m.emit(0x97)
 48560              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48561          })
 48562      }
 48563      if p.len == 0 {
 48564          panic("invalid operands for VFMSUBADD132PS")
 48565      }
 48566      return p
 48567  }
 48568  
 48569  // VFMSUBADD213PD performs "Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values".
 48570  //
 48571  // Mnemonic        : VFMSUBADD213PD
 48572  // Supported forms : (11 forms)
 48573  //
 48574  //    * VFMSUBADD213PD xmm, xmm, xmm                   [FMA3]
 48575  //    * VFMSUBADD213PD m128, xmm, xmm                  [FMA3]
 48576  //    * VFMSUBADD213PD ymm, ymm, ymm                   [FMA3]
 48577  //    * VFMSUBADD213PD m256, ymm, ymm                  [FMA3]
 48578  //    * VFMSUBADD213PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 48579  //    * VFMSUBADD213PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 48580  //    * VFMSUBADD213PD zmm, zmm, zmm{k}{z}             [AVX512F]
 48581  //    * VFMSUBADD213PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 48582  //    * VFMSUBADD213PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 48583  //    * VFMSUBADD213PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 48584  //    * VFMSUBADD213PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 48585  //
 48586  func (self *Program) VFMSUBADD213PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 48587      var p *Instruction
 48588      switch len(vv) {
 48589          case 0  : p = self.alloc("VFMSUBADD213PD", 3, Operands { v0, v1, v2 })
 48590          case 1  : p = self.alloc("VFMSUBADD213PD", 4, Operands { v0, v1, v2, vv[0] })
 48591          default : panic("instruction VFMSUBADD213PD takes 3 or 4 operands")
 48592      }
 48593      // VFMSUBADD213PD xmm, xmm, xmm
 48594      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 48595          self.require(ISA_FMA3)
 48596          p.domain = DomainFMA
 48597          p.add(0, func(m *_Encoding, v []interface{}) {
 48598              m.emit(0xc4)
 48599              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48600              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 48601              m.emit(0xa7)
 48602              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48603          })
 48604      }
 48605      // VFMSUBADD213PD m128, xmm, xmm
 48606      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 48607          self.require(ISA_FMA3)
 48608          p.domain = DomainFMA
 48609          p.add(0, func(m *_Encoding, v []interface{}) {
 48610              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48611              m.emit(0xa7)
 48612              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48613          })
 48614      }
 48615      // VFMSUBADD213PD ymm, ymm, ymm
 48616      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 48617          self.require(ISA_FMA3)
 48618          p.domain = DomainFMA
 48619          p.add(0, func(m *_Encoding, v []interface{}) {
 48620              m.emit(0xc4)
 48621              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48622              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 48623              m.emit(0xa7)
 48624              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48625          })
 48626      }
 48627      // VFMSUBADD213PD m256, ymm, ymm
 48628      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 48629          self.require(ISA_FMA3)
 48630          p.domain = DomainFMA
 48631          p.add(0, func(m *_Encoding, v []interface{}) {
 48632              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48633              m.emit(0xa7)
 48634              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48635          })
 48636      }
 48637      // VFMSUBADD213PD m512/m64bcst, zmm, zmm{k}{z}
 48638      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 48639          self.require(ISA_AVX512F)
 48640          p.domain = DomainFMA
 48641          p.add(0, func(m *_Encoding, v []interface{}) {
 48642              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48643              m.emit(0xa7)
 48644              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 48645          })
 48646      }
 48647      // VFMSUBADD213PD {er}, zmm, zmm, zmm{k}{z}
 48648      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 48649          self.require(ISA_AVX512F)
 48650          p.domain = DomainFMA
 48651          p.add(0, func(m *_Encoding, v []interface{}) {
 48652              m.emit(0x62)
 48653              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 48654              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 48655              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 48656              m.emit(0xa7)
 48657              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 48658          })
 48659      }
 48660      // VFMSUBADD213PD zmm, zmm, zmm{k}{z}
 48661      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 48662          self.require(ISA_AVX512F)
 48663          p.domain = DomainFMA
 48664          p.add(0, func(m *_Encoding, v []interface{}) {
 48665              m.emit(0x62)
 48666              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48667              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 48668              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 48669              m.emit(0xa7)
 48670              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48671          })
 48672      }
 48673      // VFMSUBADD213PD m128/m64bcst, xmm, xmm{k}{z}
 48674      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48675          self.require(ISA_AVX512VL | ISA_AVX512F)
 48676          p.domain = DomainFMA
 48677          p.add(0, func(m *_Encoding, v []interface{}) {
 48678              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48679              m.emit(0xa7)
 48680              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 48681          })
 48682      }
 48683      // VFMSUBADD213PD xmm, xmm, xmm{k}{z}
 48684      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48685          self.require(ISA_AVX512VL | ISA_AVX512F)
 48686          p.domain = DomainFMA
 48687          p.add(0, func(m *_Encoding, v []interface{}) {
 48688              m.emit(0x62)
 48689              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48690              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 48691              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 48692              m.emit(0xa7)
 48693              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48694          })
 48695      }
 48696      // VFMSUBADD213PD m256/m64bcst, ymm, ymm{k}{z}
 48697      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 48698          self.require(ISA_AVX512VL | ISA_AVX512F)
 48699          p.domain = DomainFMA
 48700          p.add(0, func(m *_Encoding, v []interface{}) {
 48701              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48702              m.emit(0xa7)
 48703              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 48704          })
 48705      }
 48706      // VFMSUBADD213PD ymm, ymm, ymm{k}{z}
 48707      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 48708          self.require(ISA_AVX512VL | ISA_AVX512F)
 48709          p.domain = DomainFMA
 48710          p.add(0, func(m *_Encoding, v []interface{}) {
 48711              m.emit(0x62)
 48712              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48713              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 48714              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 48715              m.emit(0xa7)
 48716              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48717          })
 48718      }
 48719      if p.len == 0 {
 48720          panic("invalid operands for VFMSUBADD213PD")
 48721      }
 48722      return p
 48723  }
 48724  
 48725  // VFMSUBADD213PS performs "Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values".
 48726  //
 48727  // Mnemonic        : VFMSUBADD213PS
 48728  // Supported forms : (11 forms)
 48729  //
 48730  //    * VFMSUBADD213PS xmm, xmm, xmm                   [FMA3]
 48731  //    * VFMSUBADD213PS m128, xmm, xmm                  [FMA3]
 48732  //    * VFMSUBADD213PS ymm, ymm, ymm                   [FMA3]
 48733  //    * VFMSUBADD213PS m256, ymm, ymm                  [FMA3]
 48734  //    * VFMSUBADD213PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 48735  //    * VFMSUBADD213PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 48736  //    * VFMSUBADD213PS zmm, zmm, zmm{k}{z}             [AVX512F]
 48737  //    * VFMSUBADD213PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 48738  //    * VFMSUBADD213PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 48739  //    * VFMSUBADD213PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 48740  //    * VFMSUBADD213PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 48741  //
 48742  func (self *Program) VFMSUBADD213PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 48743      var p *Instruction
 48744      switch len(vv) {
 48745          case 0  : p = self.alloc("VFMSUBADD213PS", 3, Operands { v0, v1, v2 })
 48746          case 1  : p = self.alloc("VFMSUBADD213PS", 4, Operands { v0, v1, v2, vv[0] })
 48747          default : panic("instruction VFMSUBADD213PS takes 3 or 4 operands")
 48748      }
 48749      // VFMSUBADD213PS xmm, xmm, xmm
 48750      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 48751          self.require(ISA_FMA3)
 48752          p.domain = DomainFMA
 48753          p.add(0, func(m *_Encoding, v []interface{}) {
 48754              m.emit(0xc4)
 48755              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48756              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 48757              m.emit(0xa7)
 48758              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48759          })
 48760      }
 48761      // VFMSUBADD213PS m128, xmm, xmm
 48762      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 48763          self.require(ISA_FMA3)
 48764          p.domain = DomainFMA
 48765          p.add(0, func(m *_Encoding, v []interface{}) {
 48766              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48767              m.emit(0xa7)
 48768              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48769          })
 48770      }
 48771      // VFMSUBADD213PS ymm, ymm, ymm
 48772      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 48773          self.require(ISA_FMA3)
 48774          p.domain = DomainFMA
 48775          p.add(0, func(m *_Encoding, v []interface{}) {
 48776              m.emit(0xc4)
 48777              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48778              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48779              m.emit(0xa7)
 48780              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48781          })
 48782      }
 48783      // VFMSUBADD213PS m256, ymm, ymm
 48784      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 48785          self.require(ISA_FMA3)
 48786          p.domain = DomainFMA
 48787          p.add(0, func(m *_Encoding, v []interface{}) {
 48788              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48789              m.emit(0xa7)
 48790              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48791          })
 48792      }
 48793      // VFMSUBADD213PS m512/m32bcst, zmm, zmm{k}{z}
 48794      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 48795          self.require(ISA_AVX512F)
 48796          p.domain = DomainFMA
 48797          p.add(0, func(m *_Encoding, v []interface{}) {
 48798              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48799              m.emit(0xa7)
 48800              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 48801          })
 48802      }
 48803      // VFMSUBADD213PS {er}, zmm, zmm, zmm{k}{z}
 48804      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 48805          self.require(ISA_AVX512F)
 48806          p.domain = DomainFMA
 48807          p.add(0, func(m *_Encoding, v []interface{}) {
 48808              m.emit(0x62)
 48809              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 48810              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 48811              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 48812              m.emit(0xa7)
 48813              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 48814          })
 48815      }
 48816      // VFMSUBADD213PS zmm, zmm, zmm{k}{z}
 48817      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 48818          self.require(ISA_AVX512F)
 48819          p.domain = DomainFMA
 48820          p.add(0, func(m *_Encoding, v []interface{}) {
 48821              m.emit(0x62)
 48822              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48823              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48824              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 48825              m.emit(0xa7)
 48826              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48827          })
 48828      }
 48829      // VFMSUBADD213PS m128/m32bcst, xmm, xmm{k}{z}
 48830      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48831          self.require(ISA_AVX512VL | ISA_AVX512F)
 48832          p.domain = DomainFMA
 48833          p.add(0, func(m *_Encoding, v []interface{}) {
 48834              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48835              m.emit(0xa7)
 48836              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 48837          })
 48838      }
 48839      // VFMSUBADD213PS xmm, xmm, xmm{k}{z}
 48840      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48841          self.require(ISA_AVX512VL | ISA_AVX512F)
 48842          p.domain = DomainFMA
 48843          p.add(0, func(m *_Encoding, v []interface{}) {
 48844              m.emit(0x62)
 48845              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48846              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48847              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 48848              m.emit(0xa7)
 48849              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48850          })
 48851      }
 48852      // VFMSUBADD213PS m256/m32bcst, ymm, ymm{k}{z}
 48853      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 48854          self.require(ISA_AVX512VL | ISA_AVX512F)
 48855          p.domain = DomainFMA
 48856          p.add(0, func(m *_Encoding, v []interface{}) {
 48857              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48858              m.emit(0xa7)
 48859              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 48860          })
 48861      }
 48862      // VFMSUBADD213PS ymm, ymm, ymm{k}{z}
 48863      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 48864          self.require(ISA_AVX512VL | ISA_AVX512F)
 48865          p.domain = DomainFMA
 48866          p.add(0, func(m *_Encoding, v []interface{}) {
 48867              m.emit(0x62)
 48868              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48869              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48870              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 48871              m.emit(0xa7)
 48872              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48873          })
 48874      }
 48875      if p.len == 0 {
 48876          panic("invalid operands for VFMSUBADD213PS")
 48877      }
 48878      return p
 48879  }
 48880  
 48881  // VFMSUBADD231PD performs "Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values".
 48882  //
 48883  // Mnemonic        : VFMSUBADD231PD
 48884  // Supported forms : (11 forms)
 48885  //
 48886  //    * VFMSUBADD231PD xmm, xmm, xmm                   [FMA3]
 48887  //    * VFMSUBADD231PD m128, xmm, xmm                  [FMA3]
 48888  //    * VFMSUBADD231PD ymm, ymm, ymm                   [FMA3]
 48889  //    * VFMSUBADD231PD m256, ymm, ymm                  [FMA3]
 48890  //    * VFMSUBADD231PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 48891  //    * VFMSUBADD231PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 48892  //    * VFMSUBADD231PD zmm, zmm, zmm{k}{z}             [AVX512F]
 48893  //    * VFMSUBADD231PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 48894  //    * VFMSUBADD231PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 48895  //    * VFMSUBADD231PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 48896  //    * VFMSUBADD231PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 48897  //
 48898  func (self *Program) VFMSUBADD231PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 48899      var p *Instruction
 48900      switch len(vv) {
 48901          case 0  : p = self.alloc("VFMSUBADD231PD", 3, Operands { v0, v1, v2 })
 48902          case 1  : p = self.alloc("VFMSUBADD231PD", 4, Operands { v0, v1, v2, vv[0] })
 48903          default : panic("instruction VFMSUBADD231PD takes 3 or 4 operands")
 48904      }
 48905      // VFMSUBADD231PD xmm, xmm, xmm
 48906      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 48907          self.require(ISA_FMA3)
 48908          p.domain = DomainFMA
 48909          p.add(0, func(m *_Encoding, v []interface{}) {
 48910              m.emit(0xc4)
 48911              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48912              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 48913              m.emit(0xb7)
 48914              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48915          })
 48916      }
 48917      // VFMSUBADD231PD m128, xmm, xmm
 48918      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 48919          self.require(ISA_FMA3)
 48920          p.domain = DomainFMA
 48921          p.add(0, func(m *_Encoding, v []interface{}) {
 48922              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48923              m.emit(0xb7)
 48924              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48925          })
 48926      }
 48927      // VFMSUBADD231PD ymm, ymm, ymm
 48928      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 48929          self.require(ISA_FMA3)
 48930          p.domain = DomainFMA
 48931          p.add(0, func(m *_Encoding, v []interface{}) {
 48932              m.emit(0xc4)
 48933              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48934              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 48935              m.emit(0xb7)
 48936              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48937          })
 48938      }
 48939      // VFMSUBADD231PD m256, ymm, ymm
 48940      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 48941          self.require(ISA_FMA3)
 48942          p.domain = DomainFMA
 48943          p.add(0, func(m *_Encoding, v []interface{}) {
 48944              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48945              m.emit(0xb7)
 48946              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48947          })
 48948      }
 48949      // VFMSUBADD231PD m512/m64bcst, zmm, zmm{k}{z}
 48950      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 48951          self.require(ISA_AVX512F)
 48952          p.domain = DomainFMA
 48953          p.add(0, func(m *_Encoding, v []interface{}) {
 48954              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48955              m.emit(0xb7)
 48956              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 48957          })
 48958      }
 48959      // VFMSUBADD231PD {er}, zmm, zmm, zmm{k}{z}
 48960      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 48961          self.require(ISA_AVX512F)
 48962          p.domain = DomainFMA
 48963          p.add(0, func(m *_Encoding, v []interface{}) {
 48964              m.emit(0x62)
 48965              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 48966              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 48967              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 48968              m.emit(0xb7)
 48969              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 48970          })
 48971      }
 48972      // VFMSUBADD231PD zmm, zmm, zmm{k}{z}
 48973      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 48974          self.require(ISA_AVX512F)
 48975          p.domain = DomainFMA
 48976          p.add(0, func(m *_Encoding, v []interface{}) {
 48977              m.emit(0x62)
 48978              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48979              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 48980              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 48981              m.emit(0xb7)
 48982              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48983          })
 48984      }
 48985      // VFMSUBADD231PD m128/m64bcst, xmm, xmm{k}{z}
 48986      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48987          self.require(ISA_AVX512VL | ISA_AVX512F)
 48988          p.domain = DomainFMA
 48989          p.add(0, func(m *_Encoding, v []interface{}) {
 48990              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48991              m.emit(0xb7)
 48992              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 48993          })
 48994      }
 48995      // VFMSUBADD231PD xmm, xmm, xmm{k}{z}
 48996      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48997          self.require(ISA_AVX512VL | ISA_AVX512F)
 48998          p.domain = DomainFMA
 48999          p.add(0, func(m *_Encoding, v []interface{}) {
 49000              m.emit(0x62)
 49001              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 49002              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 49003              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 49004              m.emit(0xb7)
 49005              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49006          })
 49007      }
 49008      // VFMSUBADD231PD m256/m64bcst, ymm, ymm{k}{z}
 49009      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 49010          self.require(ISA_AVX512VL | ISA_AVX512F)
 49011          p.domain = DomainFMA
 49012          p.add(0, func(m *_Encoding, v []interface{}) {
 49013              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 49014              m.emit(0xb7)
 49015              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 49016          })
 49017      }
 49018      // VFMSUBADD231PD ymm, ymm, ymm{k}{z}
 49019      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 49020          self.require(ISA_AVX512VL | ISA_AVX512F)
 49021          p.domain = DomainFMA
 49022          p.add(0, func(m *_Encoding, v []interface{}) {
 49023              m.emit(0x62)
 49024              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 49025              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 49026              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 49027              m.emit(0xb7)
 49028              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49029          })
 49030      }
 49031      if p.len == 0 {
 49032          panic("invalid operands for VFMSUBADD231PD")
 49033      }
 49034      return p
 49035  }
 49036  
 49037  // VFMSUBADD231PS performs "Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values".
 49038  //
 49039  // Mnemonic        : VFMSUBADD231PS
 49040  // Supported forms : (11 forms)
 49041  //
 49042  //    * VFMSUBADD231PS xmm, xmm, xmm                   [FMA3]
 49043  //    * VFMSUBADD231PS m128, xmm, xmm                  [FMA3]
 49044  //    * VFMSUBADD231PS ymm, ymm, ymm                   [FMA3]
 49045  //    * VFMSUBADD231PS m256, ymm, ymm                  [FMA3]
 49046  //    * VFMSUBADD231PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 49047  //    * VFMSUBADD231PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 49048  //    * VFMSUBADD231PS zmm, zmm, zmm{k}{z}             [AVX512F]
 49049  //    * VFMSUBADD231PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 49050  //    * VFMSUBADD231PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 49051  //    * VFMSUBADD231PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 49052  //    * VFMSUBADD231PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 49053  //
 49054  func (self *Program) VFMSUBADD231PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 49055      var p *Instruction
 49056      switch len(vv) {
 49057          case 0  : p = self.alloc("VFMSUBADD231PS", 3, Operands { v0, v1, v2 })
 49058          case 1  : p = self.alloc("VFMSUBADD231PS", 4, Operands { v0, v1, v2, vv[0] })
 49059          default : panic("instruction VFMSUBADD231PS takes 3 or 4 operands")
 49060      }
 49061      // VFMSUBADD231PS xmm, xmm, xmm
 49062      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 49063          self.require(ISA_FMA3)
 49064          p.domain = DomainFMA
 49065          p.add(0, func(m *_Encoding, v []interface{}) {
 49066              m.emit(0xc4)
 49067              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 49068              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 49069              m.emit(0xb7)
 49070              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49071          })
 49072      }
 49073      // VFMSUBADD231PS m128, xmm, xmm
 49074      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 49075          self.require(ISA_FMA3)
 49076          p.domain = DomainFMA
 49077          p.add(0, func(m *_Encoding, v []interface{}) {
 49078              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 49079              m.emit(0xb7)
 49080              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 49081          })
 49082      }
 49083      // VFMSUBADD231PS ymm, ymm, ymm
 49084      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 49085          self.require(ISA_FMA3)
 49086          p.domain = DomainFMA
 49087          p.add(0, func(m *_Encoding, v []interface{}) {
 49088              m.emit(0xc4)
 49089              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 49090              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 49091              m.emit(0xb7)
 49092              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49093          })
 49094      }
 49095      // VFMSUBADD231PS m256, ymm, ymm
 49096      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 49097          self.require(ISA_FMA3)
 49098          p.domain = DomainFMA
 49099          p.add(0, func(m *_Encoding, v []interface{}) {
 49100              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 49101              m.emit(0xb7)
 49102              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 49103          })
 49104      }
 49105      // VFMSUBADD231PS m512/m32bcst, zmm, zmm{k}{z}
 49106      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 49107          self.require(ISA_AVX512F)
 49108          p.domain = DomainFMA
 49109          p.add(0, func(m *_Encoding, v []interface{}) {
 49110              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 49111              m.emit(0xb7)
 49112              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 49113          })
 49114      }
 49115      // VFMSUBADD231PS {er}, zmm, zmm, zmm{k}{z}
 49116      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 49117          self.require(ISA_AVX512F)
 49118          p.domain = DomainFMA
 49119          p.add(0, func(m *_Encoding, v []interface{}) {
 49120              m.emit(0x62)
 49121              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 49122              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 49123              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 49124              m.emit(0xb7)
 49125              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49126          })
 49127      }
 49128      // VFMSUBADD231PS zmm, zmm, zmm{k}{z}
 49129      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 49130          self.require(ISA_AVX512F)
 49131          p.domain = DomainFMA
 49132          p.add(0, func(m *_Encoding, v []interface{}) {
 49133              m.emit(0x62)
 49134              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 49135              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 49136              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 49137              m.emit(0xb7)
 49138              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49139          })
 49140      }
 49141      // VFMSUBADD231PS m128/m32bcst, xmm, xmm{k}{z}
 49142      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 49143          self.require(ISA_AVX512VL | ISA_AVX512F)
 49144          p.domain = DomainFMA
 49145          p.add(0, func(m *_Encoding, v []interface{}) {
 49146              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 49147              m.emit(0xb7)
 49148              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 49149          })
 49150      }
 49151      // VFMSUBADD231PS xmm, xmm, xmm{k}{z}
 49152      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 49153          self.require(ISA_AVX512VL | ISA_AVX512F)
 49154          p.domain = DomainFMA
 49155          p.add(0, func(m *_Encoding, v []interface{}) {
 49156              m.emit(0x62)
 49157              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 49158              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 49159              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 49160              m.emit(0xb7)
 49161              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49162          })
 49163      }
 49164      // VFMSUBADD231PS m256/m32bcst, ymm, ymm{k}{z}
 49165      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 49166          self.require(ISA_AVX512VL | ISA_AVX512F)
 49167          p.domain = DomainFMA
 49168          p.add(0, func(m *_Encoding, v []interface{}) {
 49169              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 49170              m.emit(0xb7)
 49171              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 49172          })
 49173      }
 49174      // VFMSUBADD231PS ymm, ymm, ymm{k}{z}
 49175      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 49176          self.require(ISA_AVX512VL | ISA_AVX512F)
 49177          p.domain = DomainFMA
 49178          p.add(0, func(m *_Encoding, v []interface{}) {
 49179              m.emit(0x62)
 49180              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 49181              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 49182              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 49183              m.emit(0xb7)
 49184              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49185          })
 49186      }
 49187      if p.len == 0 {
 49188          panic("invalid operands for VFMSUBADD231PS")
 49189      }
 49190      return p
 49191  }
 49192  
 49193  // VFMSUBADDPD performs "Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values".
 49194  //
 49195  // Mnemonic        : VFMSUBADDPD
 49196  // Supported forms : (6 forms)
 49197  //
 49198  //    * VFMSUBADDPD xmm, xmm, xmm, xmm     [FMA4]
 49199  //    * VFMSUBADDPD m128, xmm, xmm, xmm    [FMA4]
 49200  //    * VFMSUBADDPD xmm, m128, xmm, xmm    [FMA4]
 49201  //    * VFMSUBADDPD ymm, ymm, ymm, ymm     [FMA4]
 49202  //    * VFMSUBADDPD m256, ymm, ymm, ymm    [FMA4]
 49203  //    * VFMSUBADDPD ymm, m256, ymm, ymm    [FMA4]
 49204  //
 49205  func (self *Program) VFMSUBADDPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 49206      p := self.alloc("VFMSUBADDPD", 4, Operands { v0, v1, v2, v3 })
 49207      // VFMSUBADDPD xmm, xmm, xmm, xmm
 49208      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49209          self.require(ISA_FMA4)
 49210          p.domain = DomainFMA
 49211          p.add(0, func(m *_Encoding, v []interface{}) {
 49212              m.emit(0xc4)
 49213              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 49214              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 49215              m.emit(0x5f)
 49216              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 49217              m.emit(hlcode(v[1]) << 4)
 49218          })
 49219          p.add(0, func(m *_Encoding, v []interface{}) {
 49220              m.emit(0xc4)
 49221              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 49222              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 49223              m.emit(0x5f)
 49224              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49225              m.emit(hlcode(v[0]) << 4)
 49226          })
 49227      }
 49228      // VFMSUBADDPD m128, xmm, xmm, xmm
 49229      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49230          self.require(ISA_FMA4)
 49231          p.domain = DomainFMA
 49232          p.add(0, func(m *_Encoding, v []interface{}) {
 49233              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 49234              m.emit(0x5f)
 49235              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 49236              m.emit(hlcode(v[1]) << 4)
 49237          })
 49238      }
 49239      // VFMSUBADDPD xmm, m128, xmm, xmm
 49240      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 49241          self.require(ISA_FMA4)
 49242          p.domain = DomainFMA
 49243          p.add(0, func(m *_Encoding, v []interface{}) {
 49244              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 49245              m.emit(0x5f)
 49246              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 49247              m.emit(hlcode(v[0]) << 4)
 49248          })
 49249      }
 49250      // VFMSUBADDPD ymm, ymm, ymm, ymm
 49251      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 49252          self.require(ISA_FMA4)
 49253          p.domain = DomainFMA
 49254          p.add(0, func(m *_Encoding, v []interface{}) {
 49255              m.emit(0xc4)
 49256              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 49257              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 49258              m.emit(0x5f)
 49259              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 49260              m.emit(hlcode(v[1]) << 4)
 49261          })
 49262          p.add(0, func(m *_Encoding, v []interface{}) {
 49263              m.emit(0xc4)
 49264              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 49265              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 49266              m.emit(0x5f)
 49267              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49268              m.emit(hlcode(v[0]) << 4)
 49269          })
 49270      }
 49271      // VFMSUBADDPD m256, ymm, ymm, ymm
 49272      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 49273          self.require(ISA_FMA4)
 49274          p.domain = DomainFMA
 49275          p.add(0, func(m *_Encoding, v []interface{}) {
 49276              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 49277              m.emit(0x5f)
 49278              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 49279              m.emit(hlcode(v[1]) << 4)
 49280          })
 49281      }
 49282      // VFMSUBADDPD ymm, m256, ymm, ymm
 49283      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 49284          self.require(ISA_FMA4)
 49285          p.domain = DomainFMA
 49286          p.add(0, func(m *_Encoding, v []interface{}) {
 49287              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 49288              m.emit(0x5f)
 49289              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 49290              m.emit(hlcode(v[0]) << 4)
 49291          })
 49292      }
 49293      if p.len == 0 {
 49294          panic("invalid operands for VFMSUBADDPD")
 49295      }
 49296      return p
 49297  }
 49298  
 49299  // VFMSUBADDPS performs "Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values".
 49300  //
 49301  // Mnemonic        : VFMSUBADDPS
 49302  // Supported forms : (6 forms)
 49303  //
 49304  //    * VFMSUBADDPS xmm, xmm, xmm, xmm     [FMA4]
 49305  //    * VFMSUBADDPS m128, xmm, xmm, xmm    [FMA4]
 49306  //    * VFMSUBADDPS xmm, m128, xmm, xmm    [FMA4]
 49307  //    * VFMSUBADDPS ymm, ymm, ymm, ymm     [FMA4]
 49308  //    * VFMSUBADDPS m256, ymm, ymm, ymm    [FMA4]
 49309  //    * VFMSUBADDPS ymm, m256, ymm, ymm    [FMA4]
 49310  //
 49311  func (self *Program) VFMSUBADDPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 49312      p := self.alloc("VFMSUBADDPS", 4, Operands { v0, v1, v2, v3 })
 49313      // VFMSUBADDPS xmm, xmm, xmm, xmm
 49314      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49315          self.require(ISA_FMA4)
 49316          p.domain = DomainFMA
 49317          p.add(0, func(m *_Encoding, v []interface{}) {
 49318              m.emit(0xc4)
 49319              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 49320              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 49321              m.emit(0x5e)
 49322              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 49323              m.emit(hlcode(v[1]) << 4)
 49324          })
 49325          p.add(0, func(m *_Encoding, v []interface{}) {
 49326              m.emit(0xc4)
 49327              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 49328              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 49329              m.emit(0x5e)
 49330              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49331              m.emit(hlcode(v[0]) << 4)
 49332          })
 49333      }
 49334      // VFMSUBADDPS m128, xmm, xmm, xmm
 49335      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49336          self.require(ISA_FMA4)
 49337          p.domain = DomainFMA
 49338          p.add(0, func(m *_Encoding, v []interface{}) {
 49339              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 49340              m.emit(0x5e)
 49341              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 49342              m.emit(hlcode(v[1]) << 4)
 49343          })
 49344      }
 49345      // VFMSUBADDPS xmm, m128, xmm, xmm
 49346      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 49347          self.require(ISA_FMA4)
 49348          p.domain = DomainFMA
 49349          p.add(0, func(m *_Encoding, v []interface{}) {
 49350              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 49351              m.emit(0x5e)
 49352              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 49353              m.emit(hlcode(v[0]) << 4)
 49354          })
 49355      }
 49356      // VFMSUBADDPS ymm, ymm, ymm, ymm
 49357      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 49358          self.require(ISA_FMA4)
 49359          p.domain = DomainFMA
 49360          p.add(0, func(m *_Encoding, v []interface{}) {
 49361              m.emit(0xc4)
 49362              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 49363              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 49364              m.emit(0x5e)
 49365              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 49366              m.emit(hlcode(v[1]) << 4)
 49367          })
 49368          p.add(0, func(m *_Encoding, v []interface{}) {
 49369              m.emit(0xc4)
 49370              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 49371              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 49372              m.emit(0x5e)
 49373              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49374              m.emit(hlcode(v[0]) << 4)
 49375          })
 49376      }
 49377      // VFMSUBADDPS m256, ymm, ymm, ymm
 49378      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 49379          self.require(ISA_FMA4)
 49380          p.domain = DomainFMA
 49381          p.add(0, func(m *_Encoding, v []interface{}) {
 49382              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 49383              m.emit(0x5e)
 49384              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 49385              m.emit(hlcode(v[1]) << 4)
 49386          })
 49387      }
 49388      // VFMSUBADDPS ymm, m256, ymm, ymm
 49389      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 49390          self.require(ISA_FMA4)
 49391          p.domain = DomainFMA
 49392          p.add(0, func(m *_Encoding, v []interface{}) {
 49393              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 49394              m.emit(0x5e)
 49395              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 49396              m.emit(hlcode(v[0]) << 4)
 49397          })
 49398      }
 49399      if p.len == 0 {
 49400          panic("invalid operands for VFMSUBADDPS")
 49401      }
 49402      return p
 49403  }
 49404  
 49405  // VFMSUBPD performs "Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values".
 49406  //
 49407  // Mnemonic        : VFMSUBPD
 49408  // Supported forms : (6 forms)
 49409  //
 49410  //    * VFMSUBPD xmm, xmm, xmm, xmm     [FMA4]
 49411  //    * VFMSUBPD m128, xmm, xmm, xmm    [FMA4]
 49412  //    * VFMSUBPD xmm, m128, xmm, xmm    [FMA4]
 49413  //    * VFMSUBPD ymm, ymm, ymm, ymm     [FMA4]
 49414  //    * VFMSUBPD m256, ymm, ymm, ymm    [FMA4]
 49415  //    * VFMSUBPD ymm, m256, ymm, ymm    [FMA4]
 49416  //
 49417  func (self *Program) VFMSUBPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 49418      p := self.alloc("VFMSUBPD", 4, Operands { v0, v1, v2, v3 })
 49419      // VFMSUBPD xmm, xmm, xmm, xmm
 49420      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49421          self.require(ISA_FMA4)
 49422          p.domain = DomainFMA
 49423          p.add(0, func(m *_Encoding, v []interface{}) {
 49424              m.emit(0xc4)
 49425              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 49426              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 49427              m.emit(0x6d)
 49428              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 49429              m.emit(hlcode(v[1]) << 4)
 49430          })
 49431          p.add(0, func(m *_Encoding, v []interface{}) {
 49432              m.emit(0xc4)
 49433              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 49434              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 49435              m.emit(0x6d)
 49436              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49437              m.emit(hlcode(v[0]) << 4)
 49438          })
 49439      }
 49440      // VFMSUBPD m128, xmm, xmm, xmm
 49441      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49442          self.require(ISA_FMA4)
 49443          p.domain = DomainFMA
 49444          p.add(0, func(m *_Encoding, v []interface{}) {
 49445              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 49446              m.emit(0x6d)
 49447              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 49448              m.emit(hlcode(v[1]) << 4)
 49449          })
 49450      }
 49451      // VFMSUBPD xmm, m128, xmm, xmm
 49452      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 49453          self.require(ISA_FMA4)
 49454          p.domain = DomainFMA
 49455          p.add(0, func(m *_Encoding, v []interface{}) {
 49456              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 49457              m.emit(0x6d)
 49458              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 49459              m.emit(hlcode(v[0]) << 4)
 49460          })
 49461      }
 49462      // VFMSUBPD ymm, ymm, ymm, ymm
 49463      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 49464          self.require(ISA_FMA4)
 49465          p.domain = DomainFMA
 49466          p.add(0, func(m *_Encoding, v []interface{}) {
 49467              m.emit(0xc4)
 49468              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 49469              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 49470              m.emit(0x6d)
 49471              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 49472              m.emit(hlcode(v[1]) << 4)
 49473          })
 49474          p.add(0, func(m *_Encoding, v []interface{}) {
 49475              m.emit(0xc4)
 49476              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 49477              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 49478              m.emit(0x6d)
 49479              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49480              m.emit(hlcode(v[0]) << 4)
 49481          })
 49482      }
 49483      // VFMSUBPD m256, ymm, ymm, ymm
 49484      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 49485          self.require(ISA_FMA4)
 49486          p.domain = DomainFMA
 49487          p.add(0, func(m *_Encoding, v []interface{}) {
 49488              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 49489              m.emit(0x6d)
 49490              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 49491              m.emit(hlcode(v[1]) << 4)
 49492          })
 49493      }
 49494      // VFMSUBPD ymm, m256, ymm, ymm
 49495      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 49496          self.require(ISA_FMA4)
 49497          p.domain = DomainFMA
 49498          p.add(0, func(m *_Encoding, v []interface{}) {
 49499              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 49500              m.emit(0x6d)
 49501              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 49502              m.emit(hlcode(v[0]) << 4)
 49503          })
 49504      }
 49505      if p.len == 0 {
 49506          panic("invalid operands for VFMSUBPD")
 49507      }
 49508      return p
 49509  }
 49510  
 49511  // VFMSUBPS performs "Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values".
 49512  //
 49513  // Mnemonic        : VFMSUBPS
 49514  // Supported forms : (6 forms)
 49515  //
 49516  //    * VFMSUBPS xmm, xmm, xmm, xmm     [FMA4]
 49517  //    * VFMSUBPS m128, xmm, xmm, xmm    [FMA4]
 49518  //    * VFMSUBPS xmm, m128, xmm, xmm    [FMA4]
 49519  //    * VFMSUBPS ymm, ymm, ymm, ymm     [FMA4]
 49520  //    * VFMSUBPS m256, ymm, ymm, ymm    [FMA4]
 49521  //    * VFMSUBPS ymm, m256, ymm, ymm    [FMA4]
 49522  //
 49523  func (self *Program) VFMSUBPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 49524      p := self.alloc("VFMSUBPS", 4, Operands { v0, v1, v2, v3 })
 49525      // VFMSUBPS xmm, xmm, xmm, xmm
 49526      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49527          self.require(ISA_FMA4)
 49528          p.domain = DomainFMA
 49529          p.add(0, func(m *_Encoding, v []interface{}) {
 49530              m.emit(0xc4)
 49531              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 49532              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 49533              m.emit(0x6c)
 49534              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 49535              m.emit(hlcode(v[1]) << 4)
 49536          })
 49537          p.add(0, func(m *_Encoding, v []interface{}) {
 49538              m.emit(0xc4)
 49539              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 49540              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 49541              m.emit(0x6c)
 49542              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49543              m.emit(hlcode(v[0]) << 4)
 49544          })
 49545      }
 49546      // VFMSUBPS m128, xmm, xmm, xmm
 49547      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49548          self.require(ISA_FMA4)
 49549          p.domain = DomainFMA
 49550          p.add(0, func(m *_Encoding, v []interface{}) {
 49551              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 49552              m.emit(0x6c)
 49553              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 49554              m.emit(hlcode(v[1]) << 4)
 49555          })
 49556      }
 49557      // VFMSUBPS xmm, m128, xmm, xmm
 49558      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 49559          self.require(ISA_FMA4)
 49560          p.domain = DomainFMA
 49561          p.add(0, func(m *_Encoding, v []interface{}) {
 49562              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 49563              m.emit(0x6c)
 49564              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 49565              m.emit(hlcode(v[0]) << 4)
 49566          })
 49567      }
 49568      // VFMSUBPS ymm, ymm, ymm, ymm
 49569      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 49570          self.require(ISA_FMA4)
 49571          p.domain = DomainFMA
 49572          p.add(0, func(m *_Encoding, v []interface{}) {
 49573              m.emit(0xc4)
 49574              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 49575              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 49576              m.emit(0x6c)
 49577              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 49578              m.emit(hlcode(v[1]) << 4)
 49579          })
 49580          p.add(0, func(m *_Encoding, v []interface{}) {
 49581              m.emit(0xc4)
 49582              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 49583              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 49584              m.emit(0x6c)
 49585              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49586              m.emit(hlcode(v[0]) << 4)
 49587          })
 49588      }
 49589      // VFMSUBPS m256, ymm, ymm, ymm
 49590      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 49591          self.require(ISA_FMA4)
 49592          p.domain = DomainFMA
 49593          p.add(0, func(m *_Encoding, v []interface{}) {
 49594              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 49595              m.emit(0x6c)
 49596              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 49597              m.emit(hlcode(v[1]) << 4)
 49598          })
 49599      }
 49600      // VFMSUBPS ymm, m256, ymm, ymm
 49601      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 49602          self.require(ISA_FMA4)
 49603          p.domain = DomainFMA
 49604          p.add(0, func(m *_Encoding, v []interface{}) {
 49605              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 49606              m.emit(0x6c)
 49607              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 49608              m.emit(hlcode(v[0]) << 4)
 49609          })
 49610      }
 49611      if p.len == 0 {
 49612          panic("invalid operands for VFMSUBPS")
 49613      }
 49614      return p
 49615  }
 49616  
 49617  // VFMSUBSD performs "Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values".
 49618  //
 49619  // Mnemonic        : VFMSUBSD
 49620  // Supported forms : (3 forms)
 49621  //
 49622  //    * VFMSUBSD xmm, xmm, xmm, xmm    [FMA4]
 49623  //    * VFMSUBSD m64, xmm, xmm, xmm    [FMA4]
 49624  //    * VFMSUBSD xmm, m64, xmm, xmm    [FMA4]
 49625  //
 49626  func (self *Program) VFMSUBSD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 49627      p := self.alloc("VFMSUBSD", 4, Operands { v0, v1, v2, v3 })
 49628      // VFMSUBSD xmm, xmm, xmm, xmm
 49629      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49630          self.require(ISA_FMA4)
 49631          p.domain = DomainFMA
 49632          p.add(0, func(m *_Encoding, v []interface{}) {
 49633              m.emit(0xc4)
 49634              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 49635              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 49636              m.emit(0x6f)
 49637              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 49638              m.emit(hlcode(v[1]) << 4)
 49639          })
 49640          p.add(0, func(m *_Encoding, v []interface{}) {
 49641              m.emit(0xc4)
 49642              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 49643              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 49644              m.emit(0x6f)
 49645              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49646              m.emit(hlcode(v[0]) << 4)
 49647          })
 49648      }
 49649      // VFMSUBSD m64, xmm, xmm, xmm
 49650      if isM64(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49651          self.require(ISA_FMA4)
 49652          p.domain = DomainFMA
 49653          p.add(0, func(m *_Encoding, v []interface{}) {
 49654              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 49655              m.emit(0x6f)
 49656              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 49657              m.emit(hlcode(v[1]) << 4)
 49658          })
 49659      }
 49660      // VFMSUBSD xmm, m64, xmm, xmm
 49661      if isXMM(v0) && isM64(v1) && isXMM(v2) && isXMM(v3) {
 49662          self.require(ISA_FMA4)
 49663          p.domain = DomainFMA
 49664          p.add(0, func(m *_Encoding, v []interface{}) {
 49665              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 49666              m.emit(0x6f)
 49667              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 49668              m.emit(hlcode(v[0]) << 4)
 49669          })
 49670      }
 49671      if p.len == 0 {
 49672          panic("invalid operands for VFMSUBSD")
 49673      }
 49674      return p
 49675  }
 49676  
 49677  // VFMSUBSS performs "Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values".
 49678  //
 49679  // Mnemonic        : VFMSUBSS
 49680  // Supported forms : (3 forms)
 49681  //
 49682  //    * VFMSUBSS xmm, xmm, xmm, xmm    [FMA4]
 49683  //    * VFMSUBSS m32, xmm, xmm, xmm    [FMA4]
 49684  //    * VFMSUBSS xmm, m32, xmm, xmm    [FMA4]
 49685  //
 49686  func (self *Program) VFMSUBSS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 49687      p := self.alloc("VFMSUBSS", 4, Operands { v0, v1, v2, v3 })
 49688      // VFMSUBSS xmm, xmm, xmm, xmm
 49689      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49690          self.require(ISA_FMA4)
 49691          p.domain = DomainFMA
 49692          p.add(0, func(m *_Encoding, v []interface{}) {
 49693              m.emit(0xc4)
 49694              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 49695              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 49696              m.emit(0x6e)
 49697              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 49698              m.emit(hlcode(v[1]) << 4)
 49699          })
 49700          p.add(0, func(m *_Encoding, v []interface{}) {
 49701              m.emit(0xc4)
 49702              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 49703              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 49704              m.emit(0x6e)
 49705              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49706              m.emit(hlcode(v[0]) << 4)
 49707          })
 49708      }
 49709      // VFMSUBSS m32, xmm, xmm, xmm
 49710      if isM32(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49711          self.require(ISA_FMA4)
 49712          p.domain = DomainFMA
 49713          p.add(0, func(m *_Encoding, v []interface{}) {
 49714              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 49715              m.emit(0x6e)
 49716              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 49717              m.emit(hlcode(v[1]) << 4)
 49718          })
 49719      }
 49720      // VFMSUBSS xmm, m32, xmm, xmm
 49721      if isXMM(v0) && isM32(v1) && isXMM(v2) && isXMM(v3) {
 49722          self.require(ISA_FMA4)
 49723          p.domain = DomainFMA
 49724          p.add(0, func(m *_Encoding, v []interface{}) {
 49725              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 49726              m.emit(0x6e)
 49727              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 49728              m.emit(hlcode(v[0]) << 4)
 49729          })
 49730      }
 49731      if p.len == 0 {
 49732          panic("invalid operands for VFMSUBSS")
 49733      }
 49734      return p
 49735  }
 49736  
 49737  // VFNMADD132PD performs "Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values".
 49738  //
 49739  // Mnemonic        : VFNMADD132PD
 49740  // Supported forms : (11 forms)
 49741  //
 49742  //    * VFNMADD132PD xmm, xmm, xmm                   [FMA3]
 49743  //    * VFNMADD132PD m128, xmm, xmm                  [FMA3]
 49744  //    * VFNMADD132PD ymm, ymm, ymm                   [FMA3]
 49745  //    * VFNMADD132PD m256, ymm, ymm                  [FMA3]
 49746  //    * VFNMADD132PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 49747  //    * VFNMADD132PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 49748  //    * VFNMADD132PD zmm, zmm, zmm{k}{z}             [AVX512F]
 49749  //    * VFNMADD132PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 49750  //    * VFNMADD132PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 49751  //    * VFNMADD132PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 49752  //    * VFNMADD132PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 49753  //
 49754  func (self *Program) VFNMADD132PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 49755      var p *Instruction
 49756      switch len(vv) {
 49757          case 0  : p = self.alloc("VFNMADD132PD", 3, Operands { v0, v1, v2 })
 49758          case 1  : p = self.alloc("VFNMADD132PD", 4, Operands { v0, v1, v2, vv[0] })
 49759          default : panic("instruction VFNMADD132PD takes 3 or 4 operands")
 49760      }
 49761      // VFNMADD132PD xmm, xmm, xmm
 49762      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 49763          self.require(ISA_FMA3)
 49764          p.domain = DomainFMA
 49765          p.add(0, func(m *_Encoding, v []interface{}) {
 49766              m.emit(0xc4)
 49767              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 49768              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 49769              m.emit(0x9c)
 49770              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49771          })
 49772      }
 49773      // VFNMADD132PD m128, xmm, xmm
 49774      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 49775          self.require(ISA_FMA3)
 49776          p.domain = DomainFMA
 49777          p.add(0, func(m *_Encoding, v []interface{}) {
 49778              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 49779              m.emit(0x9c)
 49780              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 49781          })
 49782      }
 49783      // VFNMADD132PD ymm, ymm, ymm
 49784      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 49785          self.require(ISA_FMA3)
 49786          p.domain = DomainFMA
 49787          p.add(0, func(m *_Encoding, v []interface{}) {
 49788              m.emit(0xc4)
 49789              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 49790              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 49791              m.emit(0x9c)
 49792              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49793          })
 49794      }
 49795      // VFNMADD132PD m256, ymm, ymm
 49796      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 49797          self.require(ISA_FMA3)
 49798          p.domain = DomainFMA
 49799          p.add(0, func(m *_Encoding, v []interface{}) {
 49800              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 49801              m.emit(0x9c)
 49802              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 49803          })
 49804      }
 49805      // VFNMADD132PD m512/m64bcst, zmm, zmm{k}{z}
 49806      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 49807          self.require(ISA_AVX512F)
 49808          p.domain = DomainFMA
 49809          p.add(0, func(m *_Encoding, v []interface{}) {
 49810              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 49811              m.emit(0x9c)
 49812              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 49813          })
 49814      }
 49815      // VFNMADD132PD {er}, zmm, zmm, zmm{k}{z}
 49816      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 49817          self.require(ISA_AVX512F)
 49818          p.domain = DomainFMA
 49819          p.add(0, func(m *_Encoding, v []interface{}) {
 49820              m.emit(0x62)
 49821              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 49822              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 49823              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 49824              m.emit(0x9c)
 49825              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49826          })
 49827      }
 49828      // VFNMADD132PD zmm, zmm, zmm{k}{z}
 49829      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 49830          self.require(ISA_AVX512F)
 49831          p.domain = DomainFMA
 49832          p.add(0, func(m *_Encoding, v []interface{}) {
 49833              m.emit(0x62)
 49834              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 49835              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 49836              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 49837              m.emit(0x9c)
 49838              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49839          })
 49840      }
 49841      // VFNMADD132PD m128/m64bcst, xmm, xmm{k}{z}
 49842      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 49843          self.require(ISA_AVX512VL | ISA_AVX512F)
 49844          p.domain = DomainFMA
 49845          p.add(0, func(m *_Encoding, v []interface{}) {
 49846              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 49847              m.emit(0x9c)
 49848              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 49849          })
 49850      }
 49851      // VFNMADD132PD xmm, xmm, xmm{k}{z}
 49852      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 49853          self.require(ISA_AVX512VL | ISA_AVX512F)
 49854          p.domain = DomainFMA
 49855          p.add(0, func(m *_Encoding, v []interface{}) {
 49856              m.emit(0x62)
 49857              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 49858              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 49859              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 49860              m.emit(0x9c)
 49861              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49862          })
 49863      }
 49864      // VFNMADD132PD m256/m64bcst, ymm, ymm{k}{z}
 49865      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 49866          self.require(ISA_AVX512VL | ISA_AVX512F)
 49867          p.domain = DomainFMA
 49868          p.add(0, func(m *_Encoding, v []interface{}) {
 49869              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 49870              m.emit(0x9c)
 49871              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 49872          })
 49873      }
 49874      // VFNMADD132PD ymm, ymm, ymm{k}{z}
 49875      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 49876          self.require(ISA_AVX512VL | ISA_AVX512F)
 49877          p.domain = DomainFMA
 49878          p.add(0, func(m *_Encoding, v []interface{}) {
 49879              m.emit(0x62)
 49880              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 49881              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 49882              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 49883              m.emit(0x9c)
 49884              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49885          })
 49886      }
 49887      if p.len == 0 {
 49888          panic("invalid operands for VFNMADD132PD")
 49889      }
 49890      return p
 49891  }
 49892  
 49893  // VFNMADD132PS performs "Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values".
 49894  //
 49895  // Mnemonic        : VFNMADD132PS
 49896  // Supported forms : (11 forms)
 49897  //
 49898  //    * VFNMADD132PS xmm, xmm, xmm                   [FMA3]
 49899  //    * VFNMADD132PS m128, xmm, xmm                  [FMA3]
 49900  //    * VFNMADD132PS ymm, ymm, ymm                   [FMA3]
 49901  //    * VFNMADD132PS m256, ymm, ymm                  [FMA3]
 49902  //    * VFNMADD132PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 49903  //    * VFNMADD132PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 49904  //    * VFNMADD132PS zmm, zmm, zmm{k}{z}             [AVX512F]
 49905  //    * VFNMADD132PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 49906  //    * VFNMADD132PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 49907  //    * VFNMADD132PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 49908  //    * VFNMADD132PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 49909  //
 49910  func (self *Program) VFNMADD132PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 49911      var p *Instruction
 49912      switch len(vv) {
 49913          case 0  : p = self.alloc("VFNMADD132PS", 3, Operands { v0, v1, v2 })
 49914          case 1  : p = self.alloc("VFNMADD132PS", 4, Operands { v0, v1, v2, vv[0] })
 49915          default : panic("instruction VFNMADD132PS takes 3 or 4 operands")
 49916      }
 49917      // VFNMADD132PS xmm, xmm, xmm
 49918      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 49919          self.require(ISA_FMA3)
 49920          p.domain = DomainFMA
 49921          p.add(0, func(m *_Encoding, v []interface{}) {
 49922              m.emit(0xc4)
 49923              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 49924              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 49925              m.emit(0x9c)
 49926              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49927          })
 49928      }
 49929      // VFNMADD132PS m128, xmm, xmm
 49930      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 49931          self.require(ISA_FMA3)
 49932          p.domain = DomainFMA
 49933          p.add(0, func(m *_Encoding, v []interface{}) {
 49934              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 49935              m.emit(0x9c)
 49936              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 49937          })
 49938      }
 49939      // VFNMADD132PS ymm, ymm, ymm
 49940      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 49941          self.require(ISA_FMA3)
 49942          p.domain = DomainFMA
 49943          p.add(0, func(m *_Encoding, v []interface{}) {
 49944              m.emit(0xc4)
 49945              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 49946              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 49947              m.emit(0x9c)
 49948              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49949          })
 49950      }
 49951      // VFNMADD132PS m256, ymm, ymm
 49952      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 49953          self.require(ISA_FMA3)
 49954          p.domain = DomainFMA
 49955          p.add(0, func(m *_Encoding, v []interface{}) {
 49956              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 49957              m.emit(0x9c)
 49958              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 49959          })
 49960      }
 49961      // VFNMADD132PS m512/m32bcst, zmm, zmm{k}{z}
 49962      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 49963          self.require(ISA_AVX512F)
 49964          p.domain = DomainFMA
 49965          p.add(0, func(m *_Encoding, v []interface{}) {
 49966              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 49967              m.emit(0x9c)
 49968              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 49969          })
 49970      }
 49971      // VFNMADD132PS {er}, zmm, zmm, zmm{k}{z}
 49972      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 49973          self.require(ISA_AVX512F)
 49974          p.domain = DomainFMA
 49975          p.add(0, func(m *_Encoding, v []interface{}) {
 49976              m.emit(0x62)
 49977              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 49978              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 49979              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 49980              m.emit(0x9c)
 49981              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49982          })
 49983      }
 49984      // VFNMADD132PS zmm, zmm, zmm{k}{z}
 49985      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 49986          self.require(ISA_AVX512F)
 49987          p.domain = DomainFMA
 49988          p.add(0, func(m *_Encoding, v []interface{}) {
 49989              m.emit(0x62)
 49990              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 49991              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 49992              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 49993              m.emit(0x9c)
 49994              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49995          })
 49996      }
 49997      // VFNMADD132PS m128/m32bcst, xmm, xmm{k}{z}
 49998      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 49999          self.require(ISA_AVX512VL | ISA_AVX512F)
 50000          p.domain = DomainFMA
 50001          p.add(0, func(m *_Encoding, v []interface{}) {
 50002              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50003              m.emit(0x9c)
 50004              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 50005          })
 50006      }
 50007      // VFNMADD132PS xmm, xmm, xmm{k}{z}
 50008      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50009          self.require(ISA_AVX512VL | ISA_AVX512F)
 50010          p.domain = DomainFMA
 50011          p.add(0, func(m *_Encoding, v []interface{}) {
 50012              m.emit(0x62)
 50013              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50014              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50015              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 50016              m.emit(0x9c)
 50017              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50018          })
 50019      }
 50020      // VFNMADD132PS m256/m32bcst, ymm, ymm{k}{z}
 50021      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 50022          self.require(ISA_AVX512VL | ISA_AVX512F)
 50023          p.domain = DomainFMA
 50024          p.add(0, func(m *_Encoding, v []interface{}) {
 50025              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50026              m.emit(0x9c)
 50027              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 50028          })
 50029      }
 50030      // VFNMADD132PS ymm, ymm, ymm{k}{z}
 50031      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 50032          self.require(ISA_AVX512VL | ISA_AVX512F)
 50033          p.domain = DomainFMA
 50034          p.add(0, func(m *_Encoding, v []interface{}) {
 50035              m.emit(0x62)
 50036              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50037              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50038              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 50039              m.emit(0x9c)
 50040              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50041          })
 50042      }
 50043      if p.len == 0 {
 50044          panic("invalid operands for VFNMADD132PS")
 50045      }
 50046      return p
 50047  }
 50048  
 50049  // VFNMADD132SD performs "Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values".
 50050  //
 50051  // Mnemonic        : VFNMADD132SD
 50052  // Supported forms : (5 forms)
 50053  //
 50054  //    * VFNMADD132SD xmm, xmm, xmm                [FMA3]
 50055  //    * VFNMADD132SD m64, xmm, xmm                [FMA3]
 50056  //    * VFNMADD132SD m64, xmm, xmm{k}{z}          [AVX512F]
 50057  //    * VFNMADD132SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 50058  //    * VFNMADD132SD xmm, xmm, xmm{k}{z}          [AVX512F]
 50059  //
 50060  func (self *Program) VFNMADD132SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 50061      var p *Instruction
 50062      switch len(vv) {
 50063          case 0  : p = self.alloc("VFNMADD132SD", 3, Operands { v0, v1, v2 })
 50064          case 1  : p = self.alloc("VFNMADD132SD", 4, Operands { v0, v1, v2, vv[0] })
 50065          default : panic("instruction VFNMADD132SD takes 3 or 4 operands")
 50066      }
 50067      // VFNMADD132SD xmm, xmm, xmm
 50068      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 50069          self.require(ISA_FMA3)
 50070          p.domain = DomainFMA
 50071          p.add(0, func(m *_Encoding, v []interface{}) {
 50072              m.emit(0xc4)
 50073              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50074              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 50075              m.emit(0x9d)
 50076              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50077          })
 50078      }
 50079      // VFNMADD132SD m64, xmm, xmm
 50080      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 50081          self.require(ISA_FMA3)
 50082          p.domain = DomainFMA
 50083          p.add(0, func(m *_Encoding, v []interface{}) {
 50084              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50085              m.emit(0x9d)
 50086              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50087          })
 50088      }
 50089      // VFNMADD132SD m64, xmm, xmm{k}{z}
 50090      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50091          self.require(ISA_AVX512F)
 50092          p.domain = DomainFMA
 50093          p.add(0, func(m *_Encoding, v []interface{}) {
 50094              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 50095              m.emit(0x9d)
 50096              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 50097          })
 50098      }
 50099      // VFNMADD132SD {er}, xmm, xmm, xmm{k}{z}
 50100      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 50101          self.require(ISA_AVX512F)
 50102          p.domain = DomainFMA
 50103          p.add(0, func(m *_Encoding, v []interface{}) {
 50104              m.emit(0x62)
 50105              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 50106              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 50107              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 50108              m.emit(0x9d)
 50109              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 50110          })
 50111      }
 50112      // VFNMADD132SD xmm, xmm, xmm{k}{z}
 50113      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50114          self.require(ISA_AVX512F)
 50115          p.domain = DomainFMA
 50116          p.add(0, func(m *_Encoding, v []interface{}) {
 50117              m.emit(0x62)
 50118              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50119              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 50120              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 50121              m.emit(0x9d)
 50122              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50123          })
 50124      }
 50125      if p.len == 0 {
 50126          panic("invalid operands for VFNMADD132SD")
 50127      }
 50128      return p
 50129  }
 50130  
 50131  // VFNMADD132SS performs "Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values".
 50132  //
 50133  // Mnemonic        : VFNMADD132SS
 50134  // Supported forms : (5 forms)
 50135  //
 50136  //    * VFNMADD132SS xmm, xmm, xmm                [FMA3]
 50137  //    * VFNMADD132SS m32, xmm, xmm                [FMA3]
 50138  //    * VFNMADD132SS m32, xmm, xmm{k}{z}          [AVX512F]
 50139  //    * VFNMADD132SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 50140  //    * VFNMADD132SS xmm, xmm, xmm{k}{z}          [AVX512F]
 50141  //
 50142  func (self *Program) VFNMADD132SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 50143      var p *Instruction
 50144      switch len(vv) {
 50145          case 0  : p = self.alloc("VFNMADD132SS", 3, Operands { v0, v1, v2 })
 50146          case 1  : p = self.alloc("VFNMADD132SS", 4, Operands { v0, v1, v2, vv[0] })
 50147          default : panic("instruction VFNMADD132SS takes 3 or 4 operands")
 50148      }
 50149      // VFNMADD132SS xmm, xmm, xmm
 50150      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 50151          self.require(ISA_FMA3)
 50152          p.domain = DomainFMA
 50153          p.add(0, func(m *_Encoding, v []interface{}) {
 50154              m.emit(0xc4)
 50155              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50156              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 50157              m.emit(0x9d)
 50158              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50159          })
 50160      }
 50161      // VFNMADD132SS m32, xmm, xmm
 50162      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 50163          self.require(ISA_FMA3)
 50164          p.domain = DomainFMA
 50165          p.add(0, func(m *_Encoding, v []interface{}) {
 50166              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50167              m.emit(0x9d)
 50168              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50169          })
 50170      }
 50171      // VFNMADD132SS m32, xmm, xmm{k}{z}
 50172      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50173          self.require(ISA_AVX512F)
 50174          p.domain = DomainFMA
 50175          p.add(0, func(m *_Encoding, v []interface{}) {
 50176              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 50177              m.emit(0x9d)
 50178              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 50179          })
 50180      }
 50181      // VFNMADD132SS {er}, xmm, xmm, xmm{k}{z}
 50182      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 50183          self.require(ISA_AVX512F)
 50184          p.domain = DomainFMA
 50185          p.add(0, func(m *_Encoding, v []interface{}) {
 50186              m.emit(0x62)
 50187              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 50188              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 50189              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 50190              m.emit(0x9d)
 50191              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 50192          })
 50193      }
 50194      // VFNMADD132SS xmm, xmm, xmm{k}{z}
 50195      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50196          self.require(ISA_AVX512F)
 50197          p.domain = DomainFMA
 50198          p.add(0, func(m *_Encoding, v []interface{}) {
 50199              m.emit(0x62)
 50200              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50201              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50202              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 50203              m.emit(0x9d)
 50204              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50205          })
 50206      }
 50207      if p.len == 0 {
 50208          panic("invalid operands for VFNMADD132SS")
 50209      }
 50210      return p
 50211  }
 50212  
 50213  // VFNMADD213PD performs "Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values".
 50214  //
 50215  // Mnemonic        : VFNMADD213PD
 50216  // Supported forms : (11 forms)
 50217  //
 50218  //    * VFNMADD213PD xmm, xmm, xmm                   [FMA3]
 50219  //    * VFNMADD213PD m128, xmm, xmm                  [FMA3]
 50220  //    * VFNMADD213PD ymm, ymm, ymm                   [FMA3]
 50221  //    * VFNMADD213PD m256, ymm, ymm                  [FMA3]
 50222  //    * VFNMADD213PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 50223  //    * VFNMADD213PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 50224  //    * VFNMADD213PD zmm, zmm, zmm{k}{z}             [AVX512F]
 50225  //    * VFNMADD213PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 50226  //    * VFNMADD213PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 50227  //    * VFNMADD213PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 50228  //    * VFNMADD213PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 50229  //
 50230  func (self *Program) VFNMADD213PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 50231      var p *Instruction
 50232      switch len(vv) {
 50233          case 0  : p = self.alloc("VFNMADD213PD", 3, Operands { v0, v1, v2 })
 50234          case 1  : p = self.alloc("VFNMADD213PD", 4, Operands { v0, v1, v2, vv[0] })
 50235          default : panic("instruction VFNMADD213PD takes 3 or 4 operands")
 50236      }
 50237      // VFNMADD213PD xmm, xmm, xmm
 50238      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 50239          self.require(ISA_FMA3)
 50240          p.domain = DomainFMA
 50241          p.add(0, func(m *_Encoding, v []interface{}) {
 50242              m.emit(0xc4)
 50243              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50244              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 50245              m.emit(0xac)
 50246              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50247          })
 50248      }
 50249      // VFNMADD213PD m128, xmm, xmm
 50250      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 50251          self.require(ISA_FMA3)
 50252          p.domain = DomainFMA
 50253          p.add(0, func(m *_Encoding, v []interface{}) {
 50254              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50255              m.emit(0xac)
 50256              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50257          })
 50258      }
 50259      // VFNMADD213PD ymm, ymm, ymm
 50260      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 50261          self.require(ISA_FMA3)
 50262          p.domain = DomainFMA
 50263          p.add(0, func(m *_Encoding, v []interface{}) {
 50264              m.emit(0xc4)
 50265              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50266              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 50267              m.emit(0xac)
 50268              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50269          })
 50270      }
 50271      // VFNMADD213PD m256, ymm, ymm
 50272      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 50273          self.require(ISA_FMA3)
 50274          p.domain = DomainFMA
 50275          p.add(0, func(m *_Encoding, v []interface{}) {
 50276              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50277              m.emit(0xac)
 50278              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50279          })
 50280      }
 50281      // VFNMADD213PD m512/m64bcst, zmm, zmm{k}{z}
 50282      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 50283          self.require(ISA_AVX512F)
 50284          p.domain = DomainFMA
 50285          p.add(0, func(m *_Encoding, v []interface{}) {
 50286              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50287              m.emit(0xac)
 50288              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 50289          })
 50290      }
 50291      // VFNMADD213PD {er}, zmm, zmm, zmm{k}{z}
 50292      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 50293          self.require(ISA_AVX512F)
 50294          p.domain = DomainFMA
 50295          p.add(0, func(m *_Encoding, v []interface{}) {
 50296              m.emit(0x62)
 50297              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 50298              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 50299              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 50300              m.emit(0xac)
 50301              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 50302          })
 50303      }
 50304      // VFNMADD213PD zmm, zmm, zmm{k}{z}
 50305      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 50306          self.require(ISA_AVX512F)
 50307          p.domain = DomainFMA
 50308          p.add(0, func(m *_Encoding, v []interface{}) {
 50309              m.emit(0x62)
 50310              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50311              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 50312              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 50313              m.emit(0xac)
 50314              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50315          })
 50316      }
 50317      // VFNMADD213PD m128/m64bcst, xmm, xmm{k}{z}
 50318      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50319          self.require(ISA_AVX512VL | ISA_AVX512F)
 50320          p.domain = DomainFMA
 50321          p.add(0, func(m *_Encoding, v []interface{}) {
 50322              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50323              m.emit(0xac)
 50324              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 50325          })
 50326      }
 50327      // VFNMADD213PD xmm, xmm, xmm{k}{z}
 50328      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50329          self.require(ISA_AVX512VL | ISA_AVX512F)
 50330          p.domain = DomainFMA
 50331          p.add(0, func(m *_Encoding, v []interface{}) {
 50332              m.emit(0x62)
 50333              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50334              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 50335              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 50336              m.emit(0xac)
 50337              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50338          })
 50339      }
 50340      // VFNMADD213PD m256/m64bcst, ymm, ymm{k}{z}
 50341      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 50342          self.require(ISA_AVX512VL | ISA_AVX512F)
 50343          p.domain = DomainFMA
 50344          p.add(0, func(m *_Encoding, v []interface{}) {
 50345              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50346              m.emit(0xac)
 50347              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 50348          })
 50349      }
 50350      // VFNMADD213PD ymm, ymm, ymm{k}{z}
 50351      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 50352          self.require(ISA_AVX512VL | ISA_AVX512F)
 50353          p.domain = DomainFMA
 50354          p.add(0, func(m *_Encoding, v []interface{}) {
 50355              m.emit(0x62)
 50356              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50357              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 50358              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 50359              m.emit(0xac)
 50360              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50361          })
 50362      }
 50363      if p.len == 0 {
 50364          panic("invalid operands for VFNMADD213PD")
 50365      }
 50366      return p
 50367  }
 50368  
 50369  // VFNMADD213PS performs "Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values".
 50370  //
 50371  // Mnemonic        : VFNMADD213PS
 50372  // Supported forms : (11 forms)
 50373  //
 50374  //    * VFNMADD213PS xmm, xmm, xmm                   [FMA3]
 50375  //    * VFNMADD213PS m128, xmm, xmm                  [FMA3]
 50376  //    * VFNMADD213PS ymm, ymm, ymm                   [FMA3]
 50377  //    * VFNMADD213PS m256, ymm, ymm                  [FMA3]
 50378  //    * VFNMADD213PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 50379  //    * VFNMADD213PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 50380  //    * VFNMADD213PS zmm, zmm, zmm{k}{z}             [AVX512F]
 50381  //    * VFNMADD213PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 50382  //    * VFNMADD213PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 50383  //    * VFNMADD213PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 50384  //    * VFNMADD213PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 50385  //
 50386  func (self *Program) VFNMADD213PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 50387      var p *Instruction
 50388      switch len(vv) {
 50389          case 0  : p = self.alloc("VFNMADD213PS", 3, Operands { v0, v1, v2 })
 50390          case 1  : p = self.alloc("VFNMADD213PS", 4, Operands { v0, v1, v2, vv[0] })
 50391          default : panic("instruction VFNMADD213PS takes 3 or 4 operands")
 50392      }
 50393      // VFNMADD213PS xmm, xmm, xmm
 50394      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 50395          self.require(ISA_FMA3)
 50396          p.domain = DomainFMA
 50397          p.add(0, func(m *_Encoding, v []interface{}) {
 50398              m.emit(0xc4)
 50399              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50400              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 50401              m.emit(0xac)
 50402              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50403          })
 50404      }
 50405      // VFNMADD213PS m128, xmm, xmm
 50406      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 50407          self.require(ISA_FMA3)
 50408          p.domain = DomainFMA
 50409          p.add(0, func(m *_Encoding, v []interface{}) {
 50410              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50411              m.emit(0xac)
 50412              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50413          })
 50414      }
 50415      // VFNMADD213PS ymm, ymm, ymm
 50416      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 50417          self.require(ISA_FMA3)
 50418          p.domain = DomainFMA
 50419          p.add(0, func(m *_Encoding, v []interface{}) {
 50420              m.emit(0xc4)
 50421              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50422              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50423              m.emit(0xac)
 50424              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50425          })
 50426      }
 50427      // VFNMADD213PS m256, ymm, ymm
 50428      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 50429          self.require(ISA_FMA3)
 50430          p.domain = DomainFMA
 50431          p.add(0, func(m *_Encoding, v []interface{}) {
 50432              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50433              m.emit(0xac)
 50434              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50435          })
 50436      }
 50437      // VFNMADD213PS m512/m32bcst, zmm, zmm{k}{z}
 50438      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 50439          self.require(ISA_AVX512F)
 50440          p.domain = DomainFMA
 50441          p.add(0, func(m *_Encoding, v []interface{}) {
 50442              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50443              m.emit(0xac)
 50444              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 50445          })
 50446      }
 50447      // VFNMADD213PS {er}, zmm, zmm, zmm{k}{z}
 50448      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 50449          self.require(ISA_AVX512F)
 50450          p.domain = DomainFMA
 50451          p.add(0, func(m *_Encoding, v []interface{}) {
 50452              m.emit(0x62)
 50453              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 50454              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 50455              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 50456              m.emit(0xac)
 50457              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 50458          })
 50459      }
 50460      // VFNMADD213PS zmm, zmm, zmm{k}{z}
 50461      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 50462          self.require(ISA_AVX512F)
 50463          p.domain = DomainFMA
 50464          p.add(0, func(m *_Encoding, v []interface{}) {
 50465              m.emit(0x62)
 50466              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50467              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50468              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 50469              m.emit(0xac)
 50470              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50471          })
 50472      }
 50473      // VFNMADD213PS m128/m32bcst, xmm, xmm{k}{z}
 50474      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50475          self.require(ISA_AVX512VL | ISA_AVX512F)
 50476          p.domain = DomainFMA
 50477          p.add(0, func(m *_Encoding, v []interface{}) {
 50478              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50479              m.emit(0xac)
 50480              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 50481          })
 50482      }
 50483      // VFNMADD213PS xmm, xmm, xmm{k}{z}
 50484      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50485          self.require(ISA_AVX512VL | ISA_AVX512F)
 50486          p.domain = DomainFMA
 50487          p.add(0, func(m *_Encoding, v []interface{}) {
 50488              m.emit(0x62)
 50489              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50490              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50491              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 50492              m.emit(0xac)
 50493              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50494          })
 50495      }
 50496      // VFNMADD213PS m256/m32bcst, ymm, ymm{k}{z}
 50497      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 50498          self.require(ISA_AVX512VL | ISA_AVX512F)
 50499          p.domain = DomainFMA
 50500          p.add(0, func(m *_Encoding, v []interface{}) {
 50501              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50502              m.emit(0xac)
 50503              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 50504          })
 50505      }
 50506      // VFNMADD213PS ymm, ymm, ymm{k}{z}
 50507      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 50508          self.require(ISA_AVX512VL | ISA_AVX512F)
 50509          p.domain = DomainFMA
 50510          p.add(0, func(m *_Encoding, v []interface{}) {
 50511              m.emit(0x62)
 50512              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50513              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50514              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 50515              m.emit(0xac)
 50516              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50517          })
 50518      }
 50519      if p.len == 0 {
 50520          panic("invalid operands for VFNMADD213PS")
 50521      }
 50522      return p
 50523  }
 50524  
 50525  // VFNMADD213SD performs "Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values".
 50526  //
 50527  // Mnemonic        : VFNMADD213SD
 50528  // Supported forms : (5 forms)
 50529  //
 50530  //    * VFNMADD213SD xmm, xmm, xmm                [FMA3]
 50531  //    * VFNMADD213SD m64, xmm, xmm                [FMA3]
 50532  //    * VFNMADD213SD m64, xmm, xmm{k}{z}          [AVX512F]
 50533  //    * VFNMADD213SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 50534  //    * VFNMADD213SD xmm, xmm, xmm{k}{z}          [AVX512F]
 50535  //
 50536  func (self *Program) VFNMADD213SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 50537      var p *Instruction
 50538      switch len(vv) {
 50539          case 0  : p = self.alloc("VFNMADD213SD", 3, Operands { v0, v1, v2 })
 50540          case 1  : p = self.alloc("VFNMADD213SD", 4, Operands { v0, v1, v2, vv[0] })
 50541          default : panic("instruction VFNMADD213SD takes 3 or 4 operands")
 50542      }
 50543      // VFNMADD213SD xmm, xmm, xmm
 50544      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 50545          self.require(ISA_FMA3)
 50546          p.domain = DomainFMA
 50547          p.add(0, func(m *_Encoding, v []interface{}) {
 50548              m.emit(0xc4)
 50549              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50550              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 50551              m.emit(0xad)
 50552              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50553          })
 50554      }
 50555      // VFNMADD213SD m64, xmm, xmm
 50556      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 50557          self.require(ISA_FMA3)
 50558          p.domain = DomainFMA
 50559          p.add(0, func(m *_Encoding, v []interface{}) {
 50560              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50561              m.emit(0xad)
 50562              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50563          })
 50564      }
 50565      // VFNMADD213SD m64, xmm, xmm{k}{z}
 50566      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50567          self.require(ISA_AVX512F)
 50568          p.domain = DomainFMA
 50569          p.add(0, func(m *_Encoding, v []interface{}) {
 50570              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 50571              m.emit(0xad)
 50572              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 50573          })
 50574      }
 50575      // VFNMADD213SD {er}, xmm, xmm, xmm{k}{z}
 50576      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 50577          self.require(ISA_AVX512F)
 50578          p.domain = DomainFMA
 50579          p.add(0, func(m *_Encoding, v []interface{}) {
 50580              m.emit(0x62)
 50581              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 50582              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 50583              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 50584              m.emit(0xad)
 50585              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 50586          })
 50587      }
 50588      // VFNMADD213SD xmm, xmm, xmm{k}{z}
 50589      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50590          self.require(ISA_AVX512F)
 50591          p.domain = DomainFMA
 50592          p.add(0, func(m *_Encoding, v []interface{}) {
 50593              m.emit(0x62)
 50594              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50595              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 50596              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 50597              m.emit(0xad)
 50598              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50599          })
 50600      }
 50601      if p.len == 0 {
 50602          panic("invalid operands for VFNMADD213SD")
 50603      }
 50604      return p
 50605  }
 50606  
 50607  // VFNMADD213SS performs "Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values".
 50608  //
 50609  // Mnemonic        : VFNMADD213SS
 50610  // Supported forms : (5 forms)
 50611  //
 50612  //    * VFNMADD213SS xmm, xmm, xmm                [FMA3]
 50613  //    * VFNMADD213SS m32, xmm, xmm                [FMA3]
 50614  //    * VFNMADD213SS m32, xmm, xmm{k}{z}          [AVX512F]
 50615  //    * VFNMADD213SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 50616  //    * VFNMADD213SS xmm, xmm, xmm{k}{z}          [AVX512F]
 50617  //
 50618  func (self *Program) VFNMADD213SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 50619      var p *Instruction
 50620      switch len(vv) {
 50621          case 0  : p = self.alloc("VFNMADD213SS", 3, Operands { v0, v1, v2 })
 50622          case 1  : p = self.alloc("VFNMADD213SS", 4, Operands { v0, v1, v2, vv[0] })
 50623          default : panic("instruction VFNMADD213SS takes 3 or 4 operands")
 50624      }
 50625      // VFNMADD213SS xmm, xmm, xmm
 50626      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 50627          self.require(ISA_FMA3)
 50628          p.domain = DomainFMA
 50629          p.add(0, func(m *_Encoding, v []interface{}) {
 50630              m.emit(0xc4)
 50631              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50632              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 50633              m.emit(0xad)
 50634              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50635          })
 50636      }
 50637      // VFNMADD213SS m32, xmm, xmm
 50638      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 50639          self.require(ISA_FMA3)
 50640          p.domain = DomainFMA
 50641          p.add(0, func(m *_Encoding, v []interface{}) {
 50642              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50643              m.emit(0xad)
 50644              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50645          })
 50646      }
 50647      // VFNMADD213SS m32, xmm, xmm{k}{z}
 50648      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50649          self.require(ISA_AVX512F)
 50650          p.domain = DomainFMA
 50651          p.add(0, func(m *_Encoding, v []interface{}) {
 50652              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 50653              m.emit(0xad)
 50654              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 50655          })
 50656      }
 50657      // VFNMADD213SS {er}, xmm, xmm, xmm{k}{z}
 50658      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 50659          self.require(ISA_AVX512F)
 50660          p.domain = DomainFMA
 50661          p.add(0, func(m *_Encoding, v []interface{}) {
 50662              m.emit(0x62)
 50663              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 50664              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 50665              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 50666              m.emit(0xad)
 50667              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 50668          })
 50669      }
 50670      // VFNMADD213SS xmm, xmm, xmm{k}{z}
 50671      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50672          self.require(ISA_AVX512F)
 50673          p.domain = DomainFMA
 50674          p.add(0, func(m *_Encoding, v []interface{}) {
 50675              m.emit(0x62)
 50676              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50677              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50678              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 50679              m.emit(0xad)
 50680              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50681          })
 50682      }
 50683      if p.len == 0 {
 50684          panic("invalid operands for VFNMADD213SS")
 50685      }
 50686      return p
 50687  }
 50688  
 50689  // VFNMADD231PD performs "Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values".
 50690  //
 50691  // Mnemonic        : VFNMADD231PD
 50692  // Supported forms : (11 forms)
 50693  //
 50694  //    * VFNMADD231PD xmm, xmm, xmm                   [FMA3]
 50695  //    * VFNMADD231PD m128, xmm, xmm                  [FMA3]
 50696  //    * VFNMADD231PD ymm, ymm, ymm                   [FMA3]
 50697  //    * VFNMADD231PD m256, ymm, ymm                  [FMA3]
 50698  //    * VFNMADD231PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 50699  //    * VFNMADD231PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 50700  //    * VFNMADD231PD zmm, zmm, zmm{k}{z}             [AVX512F]
 50701  //    * VFNMADD231PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 50702  //    * VFNMADD231PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 50703  //    * VFNMADD231PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 50704  //    * VFNMADD231PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 50705  //
 50706  func (self *Program) VFNMADD231PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 50707      var p *Instruction
 50708      switch len(vv) {
 50709          case 0  : p = self.alloc("VFNMADD231PD", 3, Operands { v0, v1, v2 })
 50710          case 1  : p = self.alloc("VFNMADD231PD", 4, Operands { v0, v1, v2, vv[0] })
 50711          default : panic("instruction VFNMADD231PD takes 3 or 4 operands")
 50712      }
 50713      // VFNMADD231PD xmm, xmm, xmm
 50714      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 50715          self.require(ISA_FMA3)
 50716          p.domain = DomainFMA
 50717          p.add(0, func(m *_Encoding, v []interface{}) {
 50718              m.emit(0xc4)
 50719              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50720              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 50721              m.emit(0xbc)
 50722              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50723          })
 50724      }
 50725      // VFNMADD231PD m128, xmm, xmm
 50726      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 50727          self.require(ISA_FMA3)
 50728          p.domain = DomainFMA
 50729          p.add(0, func(m *_Encoding, v []interface{}) {
 50730              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50731              m.emit(0xbc)
 50732              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50733          })
 50734      }
 50735      // VFNMADD231PD ymm, ymm, ymm
 50736      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 50737          self.require(ISA_FMA3)
 50738          p.domain = DomainFMA
 50739          p.add(0, func(m *_Encoding, v []interface{}) {
 50740              m.emit(0xc4)
 50741              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50742              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 50743              m.emit(0xbc)
 50744              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50745          })
 50746      }
 50747      // VFNMADD231PD m256, ymm, ymm
 50748      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 50749          self.require(ISA_FMA3)
 50750          p.domain = DomainFMA
 50751          p.add(0, func(m *_Encoding, v []interface{}) {
 50752              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50753              m.emit(0xbc)
 50754              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50755          })
 50756      }
 50757      // VFNMADD231PD m512/m64bcst, zmm, zmm{k}{z}
 50758      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 50759          self.require(ISA_AVX512F)
 50760          p.domain = DomainFMA
 50761          p.add(0, func(m *_Encoding, v []interface{}) {
 50762              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50763              m.emit(0xbc)
 50764              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 50765          })
 50766      }
 50767      // VFNMADD231PD {er}, zmm, zmm, zmm{k}{z}
 50768      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 50769          self.require(ISA_AVX512F)
 50770          p.domain = DomainFMA
 50771          p.add(0, func(m *_Encoding, v []interface{}) {
 50772              m.emit(0x62)
 50773              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 50774              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 50775              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 50776              m.emit(0xbc)
 50777              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 50778          })
 50779      }
 50780      // VFNMADD231PD zmm, zmm, zmm{k}{z}
 50781      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 50782          self.require(ISA_AVX512F)
 50783          p.domain = DomainFMA
 50784          p.add(0, func(m *_Encoding, v []interface{}) {
 50785              m.emit(0x62)
 50786              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50787              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 50788              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 50789              m.emit(0xbc)
 50790              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50791          })
 50792      }
 50793      // VFNMADD231PD m128/m64bcst, xmm, xmm{k}{z}
 50794      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50795          self.require(ISA_AVX512VL | ISA_AVX512F)
 50796          p.domain = DomainFMA
 50797          p.add(0, func(m *_Encoding, v []interface{}) {
 50798              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50799              m.emit(0xbc)
 50800              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 50801          })
 50802      }
 50803      // VFNMADD231PD xmm, xmm, xmm{k}{z}
 50804      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50805          self.require(ISA_AVX512VL | ISA_AVX512F)
 50806          p.domain = DomainFMA
 50807          p.add(0, func(m *_Encoding, v []interface{}) {
 50808              m.emit(0x62)
 50809              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50810              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 50811              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 50812              m.emit(0xbc)
 50813              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50814          })
 50815      }
 50816      // VFNMADD231PD m256/m64bcst, ymm, ymm{k}{z}
 50817      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 50818          self.require(ISA_AVX512VL | ISA_AVX512F)
 50819          p.domain = DomainFMA
 50820          p.add(0, func(m *_Encoding, v []interface{}) {
 50821              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50822              m.emit(0xbc)
 50823              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 50824          })
 50825      }
 50826      // VFNMADD231PD ymm, ymm, ymm{k}{z}
 50827      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 50828          self.require(ISA_AVX512VL | ISA_AVX512F)
 50829          p.domain = DomainFMA
 50830          p.add(0, func(m *_Encoding, v []interface{}) {
 50831              m.emit(0x62)
 50832              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50833              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 50834              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 50835              m.emit(0xbc)
 50836              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50837          })
 50838      }
 50839      if p.len == 0 {
 50840          panic("invalid operands for VFNMADD231PD")
 50841      }
 50842      return p
 50843  }
 50844  
 50845  // VFNMADD231PS performs "Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values".
 50846  //
 50847  // Mnemonic        : VFNMADD231PS
 50848  // Supported forms : (11 forms)
 50849  //
 50850  //    * VFNMADD231PS xmm, xmm, xmm                   [FMA3]
 50851  //    * VFNMADD231PS m128, xmm, xmm                  [FMA3]
 50852  //    * VFNMADD231PS ymm, ymm, ymm                   [FMA3]
 50853  //    * VFNMADD231PS m256, ymm, ymm                  [FMA3]
 50854  //    * VFNMADD231PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 50855  //    * VFNMADD231PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 50856  //    * VFNMADD231PS zmm, zmm, zmm{k}{z}             [AVX512F]
 50857  //    * VFNMADD231PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 50858  //    * VFNMADD231PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 50859  //    * VFNMADD231PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 50860  //    * VFNMADD231PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 50861  //
 50862  func (self *Program) VFNMADD231PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 50863      var p *Instruction
 50864      switch len(vv) {
 50865          case 0  : p = self.alloc("VFNMADD231PS", 3, Operands { v0, v1, v2 })
 50866          case 1  : p = self.alloc("VFNMADD231PS", 4, Operands { v0, v1, v2, vv[0] })
 50867          default : panic("instruction VFNMADD231PS takes 3 or 4 operands")
 50868      }
 50869      // VFNMADD231PS xmm, xmm, xmm
 50870      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 50871          self.require(ISA_FMA3)
 50872          p.domain = DomainFMA
 50873          p.add(0, func(m *_Encoding, v []interface{}) {
 50874              m.emit(0xc4)
 50875              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50876              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 50877              m.emit(0xbc)
 50878              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50879          })
 50880      }
 50881      // VFNMADD231PS m128, xmm, xmm
 50882      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 50883          self.require(ISA_FMA3)
 50884          p.domain = DomainFMA
 50885          p.add(0, func(m *_Encoding, v []interface{}) {
 50886              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50887              m.emit(0xbc)
 50888              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50889          })
 50890      }
 50891      // VFNMADD231PS ymm, ymm, ymm
 50892      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 50893          self.require(ISA_FMA3)
 50894          p.domain = DomainFMA
 50895          p.add(0, func(m *_Encoding, v []interface{}) {
 50896              m.emit(0xc4)
 50897              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50898              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50899              m.emit(0xbc)
 50900              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50901          })
 50902      }
 50903      // VFNMADD231PS m256, ymm, ymm
 50904      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 50905          self.require(ISA_FMA3)
 50906          p.domain = DomainFMA
 50907          p.add(0, func(m *_Encoding, v []interface{}) {
 50908              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50909              m.emit(0xbc)
 50910              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50911          })
 50912      }
 50913      // VFNMADD231PS m512/m32bcst, zmm, zmm{k}{z}
 50914      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 50915          self.require(ISA_AVX512F)
 50916          p.domain = DomainFMA
 50917          p.add(0, func(m *_Encoding, v []interface{}) {
 50918              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50919              m.emit(0xbc)
 50920              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 50921          })
 50922      }
 50923      // VFNMADD231PS {er}, zmm, zmm, zmm{k}{z}
 50924      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 50925          self.require(ISA_AVX512F)
 50926          p.domain = DomainFMA
 50927          p.add(0, func(m *_Encoding, v []interface{}) {
 50928              m.emit(0x62)
 50929              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 50930              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 50931              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 50932              m.emit(0xbc)
 50933              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 50934          })
 50935      }
 50936      // VFNMADD231PS zmm, zmm, zmm{k}{z}
 50937      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 50938          self.require(ISA_AVX512F)
 50939          p.domain = DomainFMA
 50940          p.add(0, func(m *_Encoding, v []interface{}) {
 50941              m.emit(0x62)
 50942              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50943              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50944              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 50945              m.emit(0xbc)
 50946              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50947          })
 50948      }
 50949      // VFNMADD231PS m128/m32bcst, xmm, xmm{k}{z}
 50950      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50951          self.require(ISA_AVX512VL | ISA_AVX512F)
 50952          p.domain = DomainFMA
 50953          p.add(0, func(m *_Encoding, v []interface{}) {
 50954              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50955              m.emit(0xbc)
 50956              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 50957          })
 50958      }
 50959      // VFNMADD231PS xmm, xmm, xmm{k}{z}
 50960      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50961          self.require(ISA_AVX512VL | ISA_AVX512F)
 50962          p.domain = DomainFMA
 50963          p.add(0, func(m *_Encoding, v []interface{}) {
 50964              m.emit(0x62)
 50965              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50966              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50967              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 50968              m.emit(0xbc)
 50969              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50970          })
 50971      }
 50972      // VFNMADD231PS m256/m32bcst, ymm, ymm{k}{z}
 50973      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 50974          self.require(ISA_AVX512VL | ISA_AVX512F)
 50975          p.domain = DomainFMA
 50976          p.add(0, func(m *_Encoding, v []interface{}) {
 50977              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50978              m.emit(0xbc)
 50979              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 50980          })
 50981      }
 50982      // VFNMADD231PS ymm, ymm, ymm{k}{z}
 50983      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 50984          self.require(ISA_AVX512VL | ISA_AVX512F)
 50985          p.domain = DomainFMA
 50986          p.add(0, func(m *_Encoding, v []interface{}) {
 50987              m.emit(0x62)
 50988              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50989              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50990              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 50991              m.emit(0xbc)
 50992              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50993          })
 50994      }
 50995      if p.len == 0 {
 50996          panic("invalid operands for VFNMADD231PS")
 50997      }
 50998      return p
 50999  }
 51000  
 51001  // VFNMADD231SD performs "Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values".
 51002  //
 51003  // Mnemonic        : VFNMADD231SD
 51004  // Supported forms : (5 forms)
 51005  //
 51006  //    * VFNMADD231SD xmm, xmm, xmm                [FMA3]
 51007  //    * VFNMADD231SD m64, xmm, xmm                [FMA3]
 51008  //    * VFNMADD231SD m64, xmm, xmm{k}{z}          [AVX512F]
 51009  //    * VFNMADD231SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 51010  //    * VFNMADD231SD xmm, xmm, xmm{k}{z}          [AVX512F]
 51011  //
 51012  func (self *Program) VFNMADD231SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 51013      var p *Instruction
 51014      switch len(vv) {
 51015          case 0  : p = self.alloc("VFNMADD231SD", 3, Operands { v0, v1, v2 })
 51016          case 1  : p = self.alloc("VFNMADD231SD", 4, Operands { v0, v1, v2, vv[0] })
 51017          default : panic("instruction VFNMADD231SD takes 3 or 4 operands")
 51018      }
 51019      // VFNMADD231SD xmm, xmm, xmm
 51020      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 51021          self.require(ISA_FMA3)
 51022          p.domain = DomainFMA
 51023          p.add(0, func(m *_Encoding, v []interface{}) {
 51024              m.emit(0xc4)
 51025              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 51026              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 51027              m.emit(0xbd)
 51028              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51029          })
 51030      }
 51031      // VFNMADD231SD m64, xmm, xmm
 51032      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 51033          self.require(ISA_FMA3)
 51034          p.domain = DomainFMA
 51035          p.add(0, func(m *_Encoding, v []interface{}) {
 51036              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 51037              m.emit(0xbd)
 51038              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 51039          })
 51040      }
 51041      // VFNMADD231SD m64, xmm, xmm{k}{z}
 51042      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51043          self.require(ISA_AVX512F)
 51044          p.domain = DomainFMA
 51045          p.add(0, func(m *_Encoding, v []interface{}) {
 51046              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 51047              m.emit(0xbd)
 51048              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 51049          })
 51050      }
 51051      // VFNMADD231SD {er}, xmm, xmm, xmm{k}{z}
 51052      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 51053          self.require(ISA_AVX512F)
 51054          p.domain = DomainFMA
 51055          p.add(0, func(m *_Encoding, v []interface{}) {
 51056              m.emit(0x62)
 51057              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 51058              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 51059              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 51060              m.emit(0xbd)
 51061              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51062          })
 51063      }
 51064      // VFNMADD231SD xmm, xmm, xmm{k}{z}
 51065      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51066          self.require(ISA_AVX512F)
 51067          p.domain = DomainFMA
 51068          p.add(0, func(m *_Encoding, v []interface{}) {
 51069              m.emit(0x62)
 51070              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 51071              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 51072              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 51073              m.emit(0xbd)
 51074              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51075          })
 51076      }
 51077      if p.len == 0 {
 51078          panic("invalid operands for VFNMADD231SD")
 51079      }
 51080      return p
 51081  }
 51082  
 51083  // VFNMADD231SS performs "Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values".
 51084  //
 51085  // Mnemonic        : VFNMADD231SS
 51086  // Supported forms : (5 forms)
 51087  //
 51088  //    * VFNMADD231SS xmm, xmm, xmm                [FMA3]
 51089  //    * VFNMADD231SS m32, xmm, xmm                [FMA3]
 51090  //    * VFNMADD231SS m32, xmm, xmm{k}{z}          [AVX512F]
 51091  //    * VFNMADD231SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 51092  //    * VFNMADD231SS xmm, xmm, xmm{k}{z}          [AVX512F]
 51093  //
 51094  func (self *Program) VFNMADD231SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 51095      var p *Instruction
 51096      switch len(vv) {
 51097          case 0  : p = self.alloc("VFNMADD231SS", 3, Operands { v0, v1, v2 })
 51098          case 1  : p = self.alloc("VFNMADD231SS", 4, Operands { v0, v1, v2, vv[0] })
 51099          default : panic("instruction VFNMADD231SS takes 3 or 4 operands")
 51100      }
 51101      // VFNMADD231SS xmm, xmm, xmm
 51102      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 51103          self.require(ISA_FMA3)
 51104          p.domain = DomainFMA
 51105          p.add(0, func(m *_Encoding, v []interface{}) {
 51106              m.emit(0xc4)
 51107              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 51108              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 51109              m.emit(0xbd)
 51110              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51111          })
 51112      }
 51113      // VFNMADD231SS m32, xmm, xmm
 51114      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 51115          self.require(ISA_FMA3)
 51116          p.domain = DomainFMA
 51117          p.add(0, func(m *_Encoding, v []interface{}) {
 51118              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 51119              m.emit(0xbd)
 51120              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 51121          })
 51122      }
 51123      // VFNMADD231SS m32, xmm, xmm{k}{z}
 51124      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51125          self.require(ISA_AVX512F)
 51126          p.domain = DomainFMA
 51127          p.add(0, func(m *_Encoding, v []interface{}) {
 51128              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 51129              m.emit(0xbd)
 51130              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 51131          })
 51132      }
 51133      // VFNMADD231SS {er}, xmm, xmm, xmm{k}{z}
 51134      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 51135          self.require(ISA_AVX512F)
 51136          p.domain = DomainFMA
 51137          p.add(0, func(m *_Encoding, v []interface{}) {
 51138              m.emit(0x62)
 51139              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 51140              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 51141              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 51142              m.emit(0xbd)
 51143              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51144          })
 51145      }
 51146      // VFNMADD231SS xmm, xmm, xmm{k}{z}
 51147      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51148          self.require(ISA_AVX512F)
 51149          p.domain = DomainFMA
 51150          p.add(0, func(m *_Encoding, v []interface{}) {
 51151              m.emit(0x62)
 51152              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 51153              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 51154              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 51155              m.emit(0xbd)
 51156              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51157          })
 51158      }
 51159      if p.len == 0 {
 51160          panic("invalid operands for VFNMADD231SS")
 51161      }
 51162      return p
 51163  }
 51164  
 51165  // VFNMADDPD performs "Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values".
 51166  //
 51167  // Mnemonic        : VFNMADDPD
 51168  // Supported forms : (6 forms)
 51169  //
 51170  //    * VFNMADDPD xmm, xmm, xmm, xmm     [FMA4]
 51171  //    * VFNMADDPD m128, xmm, xmm, xmm    [FMA4]
 51172  //    * VFNMADDPD xmm, m128, xmm, xmm    [FMA4]
 51173  //    * VFNMADDPD ymm, ymm, ymm, ymm     [FMA4]
 51174  //    * VFNMADDPD m256, ymm, ymm, ymm    [FMA4]
 51175  //    * VFNMADDPD ymm, m256, ymm, ymm    [FMA4]
 51176  //
 51177  func (self *Program) VFNMADDPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 51178      p := self.alloc("VFNMADDPD", 4, Operands { v0, v1, v2, v3 })
 51179      // VFNMADDPD xmm, xmm, xmm, xmm
 51180      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 51181          self.require(ISA_FMA4)
 51182          p.domain = DomainFMA
 51183          p.add(0, func(m *_Encoding, v []interface{}) {
 51184              m.emit(0xc4)
 51185              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 51186              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 51187              m.emit(0x79)
 51188              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 51189              m.emit(hlcode(v[1]) << 4)
 51190          })
 51191          p.add(0, func(m *_Encoding, v []interface{}) {
 51192              m.emit(0xc4)
 51193              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 51194              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 51195              m.emit(0x79)
 51196              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51197              m.emit(hlcode(v[0]) << 4)
 51198          })
 51199      }
 51200      // VFNMADDPD m128, xmm, xmm, xmm
 51201      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 51202          self.require(ISA_FMA4)
 51203          p.domain = DomainFMA
 51204          p.add(0, func(m *_Encoding, v []interface{}) {
 51205              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 51206              m.emit(0x79)
 51207              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 51208              m.emit(hlcode(v[1]) << 4)
 51209          })
 51210      }
 51211      // VFNMADDPD xmm, m128, xmm, xmm
 51212      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 51213          self.require(ISA_FMA4)
 51214          p.domain = DomainFMA
 51215          p.add(0, func(m *_Encoding, v []interface{}) {
 51216              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 51217              m.emit(0x79)
 51218              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 51219              m.emit(hlcode(v[0]) << 4)
 51220          })
 51221      }
 51222      // VFNMADDPD ymm, ymm, ymm, ymm
 51223      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 51224          self.require(ISA_FMA4)
 51225          p.domain = DomainFMA
 51226          p.add(0, func(m *_Encoding, v []interface{}) {
 51227              m.emit(0xc4)
 51228              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 51229              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 51230              m.emit(0x79)
 51231              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 51232              m.emit(hlcode(v[1]) << 4)
 51233          })
 51234          p.add(0, func(m *_Encoding, v []interface{}) {
 51235              m.emit(0xc4)
 51236              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 51237              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 51238              m.emit(0x79)
 51239              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51240              m.emit(hlcode(v[0]) << 4)
 51241          })
 51242      }
 51243      // VFNMADDPD m256, ymm, ymm, ymm
 51244      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 51245          self.require(ISA_FMA4)
 51246          p.domain = DomainFMA
 51247          p.add(0, func(m *_Encoding, v []interface{}) {
 51248              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 51249              m.emit(0x79)
 51250              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 51251              m.emit(hlcode(v[1]) << 4)
 51252          })
 51253      }
 51254      // VFNMADDPD ymm, m256, ymm, ymm
 51255      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 51256          self.require(ISA_FMA4)
 51257          p.domain = DomainFMA
 51258          p.add(0, func(m *_Encoding, v []interface{}) {
 51259              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 51260              m.emit(0x79)
 51261              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 51262              m.emit(hlcode(v[0]) << 4)
 51263          })
 51264      }
 51265      if p.len == 0 {
 51266          panic("invalid operands for VFNMADDPD")
 51267      }
 51268      return p
 51269  }
 51270  
 51271  // VFNMADDPS performs "Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values".
 51272  //
 51273  // Mnemonic        : VFNMADDPS
 51274  // Supported forms : (6 forms)
 51275  //
 51276  //    * VFNMADDPS xmm, xmm, xmm, xmm     [FMA4]
 51277  //    * VFNMADDPS m128, xmm, xmm, xmm    [FMA4]
 51278  //    * VFNMADDPS xmm, m128, xmm, xmm    [FMA4]
 51279  //    * VFNMADDPS ymm, ymm, ymm, ymm     [FMA4]
 51280  //    * VFNMADDPS m256, ymm, ymm, ymm    [FMA4]
 51281  //    * VFNMADDPS ymm, m256, ymm, ymm    [FMA4]
 51282  //
 51283  func (self *Program) VFNMADDPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 51284      p := self.alloc("VFNMADDPS", 4, Operands { v0, v1, v2, v3 })
 51285      // VFNMADDPS xmm, xmm, xmm, xmm
 51286      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 51287          self.require(ISA_FMA4)
 51288          p.domain = DomainFMA
 51289          p.add(0, func(m *_Encoding, v []interface{}) {
 51290              m.emit(0xc4)
 51291              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 51292              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 51293              m.emit(0x78)
 51294              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 51295              m.emit(hlcode(v[1]) << 4)
 51296          })
 51297          p.add(0, func(m *_Encoding, v []interface{}) {
 51298              m.emit(0xc4)
 51299              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 51300              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 51301              m.emit(0x78)
 51302              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51303              m.emit(hlcode(v[0]) << 4)
 51304          })
 51305      }
 51306      // VFNMADDPS m128, xmm, xmm, xmm
 51307      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 51308          self.require(ISA_FMA4)
 51309          p.domain = DomainFMA
 51310          p.add(0, func(m *_Encoding, v []interface{}) {
 51311              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 51312              m.emit(0x78)
 51313              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 51314              m.emit(hlcode(v[1]) << 4)
 51315          })
 51316      }
 51317      // VFNMADDPS xmm, m128, xmm, xmm
 51318      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 51319          self.require(ISA_FMA4)
 51320          p.domain = DomainFMA
 51321          p.add(0, func(m *_Encoding, v []interface{}) {
 51322              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 51323              m.emit(0x78)
 51324              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 51325              m.emit(hlcode(v[0]) << 4)
 51326          })
 51327      }
 51328      // VFNMADDPS ymm, ymm, ymm, ymm
 51329      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 51330          self.require(ISA_FMA4)
 51331          p.domain = DomainFMA
 51332          p.add(0, func(m *_Encoding, v []interface{}) {
 51333              m.emit(0xc4)
 51334              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 51335              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 51336              m.emit(0x78)
 51337              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 51338              m.emit(hlcode(v[1]) << 4)
 51339          })
 51340          p.add(0, func(m *_Encoding, v []interface{}) {
 51341              m.emit(0xc4)
 51342              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 51343              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 51344              m.emit(0x78)
 51345              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51346              m.emit(hlcode(v[0]) << 4)
 51347          })
 51348      }
 51349      // VFNMADDPS m256, ymm, ymm, ymm
 51350      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 51351          self.require(ISA_FMA4)
 51352          p.domain = DomainFMA
 51353          p.add(0, func(m *_Encoding, v []interface{}) {
 51354              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 51355              m.emit(0x78)
 51356              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 51357              m.emit(hlcode(v[1]) << 4)
 51358          })
 51359      }
 51360      // VFNMADDPS ymm, m256, ymm, ymm
 51361      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 51362          self.require(ISA_FMA4)
 51363          p.domain = DomainFMA
 51364          p.add(0, func(m *_Encoding, v []interface{}) {
 51365              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 51366              m.emit(0x78)
 51367              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 51368              m.emit(hlcode(v[0]) << 4)
 51369          })
 51370      }
 51371      if p.len == 0 {
 51372          panic("invalid operands for VFNMADDPS")
 51373      }
 51374      return p
 51375  }
 51376  
 51377  // VFNMADDSD performs "Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values".
 51378  //
 51379  // Mnemonic        : VFNMADDSD
 51380  // Supported forms : (3 forms)
 51381  //
 51382  //    * VFNMADDSD xmm, xmm, xmm, xmm    [FMA4]
 51383  //    * VFNMADDSD m64, xmm, xmm, xmm    [FMA4]
 51384  //    * VFNMADDSD xmm, m64, xmm, xmm    [FMA4]
 51385  //
 51386  func (self *Program) VFNMADDSD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 51387      p := self.alloc("VFNMADDSD", 4, Operands { v0, v1, v2, v3 })
 51388      // VFNMADDSD xmm, xmm, xmm, xmm
 51389      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 51390          self.require(ISA_FMA4)
 51391          p.domain = DomainFMA
 51392          p.add(0, func(m *_Encoding, v []interface{}) {
 51393              m.emit(0xc4)
 51394              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 51395              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 51396              m.emit(0x7b)
 51397              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 51398              m.emit(hlcode(v[1]) << 4)
 51399          })
 51400          p.add(0, func(m *_Encoding, v []interface{}) {
 51401              m.emit(0xc4)
 51402              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 51403              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 51404              m.emit(0x7b)
 51405              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51406              m.emit(hlcode(v[0]) << 4)
 51407          })
 51408      }
 51409      // VFNMADDSD m64, xmm, xmm, xmm
 51410      if isM64(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 51411          self.require(ISA_FMA4)
 51412          p.domain = DomainFMA
 51413          p.add(0, func(m *_Encoding, v []interface{}) {
 51414              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 51415              m.emit(0x7b)
 51416              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 51417              m.emit(hlcode(v[1]) << 4)
 51418          })
 51419      }
 51420      // VFNMADDSD xmm, m64, xmm, xmm
 51421      if isXMM(v0) && isM64(v1) && isXMM(v2) && isXMM(v3) {
 51422          self.require(ISA_FMA4)
 51423          p.domain = DomainFMA
 51424          p.add(0, func(m *_Encoding, v []interface{}) {
 51425              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 51426              m.emit(0x7b)
 51427              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 51428              m.emit(hlcode(v[0]) << 4)
 51429          })
 51430      }
 51431      if p.len == 0 {
 51432          panic("invalid operands for VFNMADDSD")
 51433      }
 51434      return p
 51435  }
 51436  
 51437  // VFNMADDSS performs "Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values".
 51438  //
 51439  // Mnemonic        : VFNMADDSS
 51440  // Supported forms : (3 forms)
 51441  //
 51442  //    * VFNMADDSS xmm, xmm, xmm, xmm    [FMA4]
 51443  //    * VFNMADDSS m32, xmm, xmm, xmm    [FMA4]
 51444  //    * VFNMADDSS xmm, m32, xmm, xmm    [FMA4]
 51445  //
 51446  func (self *Program) VFNMADDSS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 51447      p := self.alloc("VFNMADDSS", 4, Operands { v0, v1, v2, v3 })
 51448      // VFNMADDSS xmm, xmm, xmm, xmm
 51449      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 51450          self.require(ISA_FMA4)
 51451          p.domain = DomainFMA
 51452          p.add(0, func(m *_Encoding, v []interface{}) {
 51453              m.emit(0xc4)
 51454              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 51455              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 51456              m.emit(0x7a)
 51457              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 51458              m.emit(hlcode(v[1]) << 4)
 51459          })
 51460          p.add(0, func(m *_Encoding, v []interface{}) {
 51461              m.emit(0xc4)
 51462              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 51463              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 51464              m.emit(0x7a)
 51465              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51466              m.emit(hlcode(v[0]) << 4)
 51467          })
 51468      }
 51469      // VFNMADDSS m32, xmm, xmm, xmm
 51470      if isM32(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 51471          self.require(ISA_FMA4)
 51472          p.domain = DomainFMA
 51473          p.add(0, func(m *_Encoding, v []interface{}) {
 51474              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 51475              m.emit(0x7a)
 51476              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 51477              m.emit(hlcode(v[1]) << 4)
 51478          })
 51479      }
 51480      // VFNMADDSS xmm, m32, xmm, xmm
 51481      if isXMM(v0) && isM32(v1) && isXMM(v2) && isXMM(v3) {
 51482          self.require(ISA_FMA4)
 51483          p.domain = DomainFMA
 51484          p.add(0, func(m *_Encoding, v []interface{}) {
 51485              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 51486              m.emit(0x7a)
 51487              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 51488              m.emit(hlcode(v[0]) << 4)
 51489          })
 51490      }
 51491      if p.len == 0 {
 51492          panic("invalid operands for VFNMADDSS")
 51493      }
 51494      return p
 51495  }
 51496  
 51497  // VFNMSUB132PD performs "Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values".
 51498  //
 51499  // Mnemonic        : VFNMSUB132PD
 51500  // Supported forms : (11 forms)
 51501  //
 51502  //    * VFNMSUB132PD xmm, xmm, xmm                   [FMA3]
 51503  //    * VFNMSUB132PD m128, xmm, xmm                  [FMA3]
 51504  //    * VFNMSUB132PD ymm, ymm, ymm                   [FMA3]
 51505  //    * VFNMSUB132PD m256, ymm, ymm                  [FMA3]
 51506  //    * VFNMSUB132PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 51507  //    * VFNMSUB132PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 51508  //    * VFNMSUB132PD zmm, zmm, zmm{k}{z}             [AVX512F]
 51509  //    * VFNMSUB132PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 51510  //    * VFNMSUB132PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 51511  //    * VFNMSUB132PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 51512  //    * VFNMSUB132PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 51513  //
 51514  func (self *Program) VFNMSUB132PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 51515      var p *Instruction
 51516      switch len(vv) {
 51517          case 0  : p = self.alloc("VFNMSUB132PD", 3, Operands { v0, v1, v2 })
 51518          case 1  : p = self.alloc("VFNMSUB132PD", 4, Operands { v0, v1, v2, vv[0] })
 51519          default : panic("instruction VFNMSUB132PD takes 3 or 4 operands")
 51520      }
 51521      // VFNMSUB132PD xmm, xmm, xmm
 51522      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 51523          self.require(ISA_FMA3)
 51524          p.domain = DomainFMA
 51525          p.add(0, func(m *_Encoding, v []interface{}) {
 51526              m.emit(0xc4)
 51527              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 51528              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 51529              m.emit(0x9e)
 51530              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51531          })
 51532      }
 51533      // VFNMSUB132PD m128, xmm, xmm
 51534      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 51535          self.require(ISA_FMA3)
 51536          p.domain = DomainFMA
 51537          p.add(0, func(m *_Encoding, v []interface{}) {
 51538              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 51539              m.emit(0x9e)
 51540              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 51541          })
 51542      }
 51543      // VFNMSUB132PD ymm, ymm, ymm
 51544      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 51545          self.require(ISA_FMA3)
 51546          p.domain = DomainFMA
 51547          p.add(0, func(m *_Encoding, v []interface{}) {
 51548              m.emit(0xc4)
 51549              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 51550              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 51551              m.emit(0x9e)
 51552              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51553          })
 51554      }
 51555      // VFNMSUB132PD m256, ymm, ymm
 51556      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 51557          self.require(ISA_FMA3)
 51558          p.domain = DomainFMA
 51559          p.add(0, func(m *_Encoding, v []interface{}) {
 51560              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 51561              m.emit(0x9e)
 51562              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 51563          })
 51564      }
 51565      // VFNMSUB132PD m512/m64bcst, zmm, zmm{k}{z}
 51566      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 51567          self.require(ISA_AVX512F)
 51568          p.domain = DomainFMA
 51569          p.add(0, func(m *_Encoding, v []interface{}) {
 51570              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 51571              m.emit(0x9e)
 51572              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 51573          })
 51574      }
 51575      // VFNMSUB132PD {er}, zmm, zmm, zmm{k}{z}
 51576      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 51577          self.require(ISA_AVX512F)
 51578          p.domain = DomainFMA
 51579          p.add(0, func(m *_Encoding, v []interface{}) {
 51580              m.emit(0x62)
 51581              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 51582              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 51583              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 51584              m.emit(0x9e)
 51585              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51586          })
 51587      }
 51588      // VFNMSUB132PD zmm, zmm, zmm{k}{z}
 51589      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 51590          self.require(ISA_AVX512F)
 51591          p.domain = DomainFMA
 51592          p.add(0, func(m *_Encoding, v []interface{}) {
 51593              m.emit(0x62)
 51594              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 51595              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 51596              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 51597              m.emit(0x9e)
 51598              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51599          })
 51600      }
 51601      // VFNMSUB132PD m128/m64bcst, xmm, xmm{k}{z}
 51602      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51603          self.require(ISA_AVX512VL | ISA_AVX512F)
 51604          p.domain = DomainFMA
 51605          p.add(0, func(m *_Encoding, v []interface{}) {
 51606              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 51607              m.emit(0x9e)
 51608              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 51609          })
 51610      }
 51611      // VFNMSUB132PD xmm, xmm, xmm{k}{z}
 51612      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51613          self.require(ISA_AVX512VL | ISA_AVX512F)
 51614          p.domain = DomainFMA
 51615          p.add(0, func(m *_Encoding, v []interface{}) {
 51616              m.emit(0x62)
 51617              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 51618              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 51619              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 51620              m.emit(0x9e)
 51621              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51622          })
 51623      }
 51624      // VFNMSUB132PD m256/m64bcst, ymm, ymm{k}{z}
 51625      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 51626          self.require(ISA_AVX512VL | ISA_AVX512F)
 51627          p.domain = DomainFMA
 51628          p.add(0, func(m *_Encoding, v []interface{}) {
 51629              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 51630              m.emit(0x9e)
 51631              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 51632          })
 51633      }
 51634      // VFNMSUB132PD ymm, ymm, ymm{k}{z}
 51635      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 51636          self.require(ISA_AVX512VL | ISA_AVX512F)
 51637          p.domain = DomainFMA
 51638          p.add(0, func(m *_Encoding, v []interface{}) {
 51639              m.emit(0x62)
 51640              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 51641              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 51642              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 51643              m.emit(0x9e)
 51644              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51645          })
 51646      }
 51647      if p.len == 0 {
 51648          panic("invalid operands for VFNMSUB132PD")
 51649      }
 51650      return p
 51651  }
 51652  
 51653  // VFNMSUB132PS performs "Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values".
 51654  //
 51655  // Mnemonic        : VFNMSUB132PS
 51656  // Supported forms : (11 forms)
 51657  //
 51658  //    * VFNMSUB132PS xmm, xmm, xmm                   [FMA3]
 51659  //    * VFNMSUB132PS m128, xmm, xmm                  [FMA3]
 51660  //    * VFNMSUB132PS ymm, ymm, ymm                   [FMA3]
 51661  //    * VFNMSUB132PS m256, ymm, ymm                  [FMA3]
 51662  //    * VFNMSUB132PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 51663  //    * VFNMSUB132PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 51664  //    * VFNMSUB132PS zmm, zmm, zmm{k}{z}             [AVX512F]
 51665  //    * VFNMSUB132PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 51666  //    * VFNMSUB132PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 51667  //    * VFNMSUB132PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 51668  //    * VFNMSUB132PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 51669  //
 51670  func (self *Program) VFNMSUB132PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 51671      var p *Instruction
 51672      switch len(vv) {
 51673          case 0  : p = self.alloc("VFNMSUB132PS", 3, Operands { v0, v1, v2 })
 51674          case 1  : p = self.alloc("VFNMSUB132PS", 4, Operands { v0, v1, v2, vv[0] })
 51675          default : panic("instruction VFNMSUB132PS takes 3 or 4 operands")
 51676      }
 51677      // VFNMSUB132PS xmm, xmm, xmm
 51678      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 51679          self.require(ISA_FMA3)
 51680          p.domain = DomainFMA
 51681          p.add(0, func(m *_Encoding, v []interface{}) {
 51682              m.emit(0xc4)
 51683              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 51684              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 51685              m.emit(0x9e)
 51686              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51687          })
 51688      }
 51689      // VFNMSUB132PS m128, xmm, xmm
 51690      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 51691          self.require(ISA_FMA3)
 51692          p.domain = DomainFMA
 51693          p.add(0, func(m *_Encoding, v []interface{}) {
 51694              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 51695              m.emit(0x9e)
 51696              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 51697          })
 51698      }
 51699      // VFNMSUB132PS ymm, ymm, ymm
 51700      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 51701          self.require(ISA_FMA3)
 51702          p.domain = DomainFMA
 51703          p.add(0, func(m *_Encoding, v []interface{}) {
 51704              m.emit(0xc4)
 51705              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 51706              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 51707              m.emit(0x9e)
 51708              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51709          })
 51710      }
 51711      // VFNMSUB132PS m256, ymm, ymm
 51712      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 51713          self.require(ISA_FMA3)
 51714          p.domain = DomainFMA
 51715          p.add(0, func(m *_Encoding, v []interface{}) {
 51716              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 51717              m.emit(0x9e)
 51718              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 51719          })
 51720      }
 51721      // VFNMSUB132PS m512/m32bcst, zmm, zmm{k}{z}
 51722      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 51723          self.require(ISA_AVX512F)
 51724          p.domain = DomainFMA
 51725          p.add(0, func(m *_Encoding, v []interface{}) {
 51726              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 51727              m.emit(0x9e)
 51728              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 51729          })
 51730      }
 51731      // VFNMSUB132PS {er}, zmm, zmm, zmm{k}{z}
 51732      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 51733          self.require(ISA_AVX512F)
 51734          p.domain = DomainFMA
 51735          p.add(0, func(m *_Encoding, v []interface{}) {
 51736              m.emit(0x62)
 51737              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 51738              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 51739              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 51740              m.emit(0x9e)
 51741              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51742          })
 51743      }
 51744      // VFNMSUB132PS zmm, zmm, zmm{k}{z}
 51745      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 51746          self.require(ISA_AVX512F)
 51747          p.domain = DomainFMA
 51748          p.add(0, func(m *_Encoding, v []interface{}) {
 51749              m.emit(0x62)
 51750              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 51751              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 51752              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 51753              m.emit(0x9e)
 51754              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51755          })
 51756      }
 51757      // VFNMSUB132PS m128/m32bcst, xmm, xmm{k}{z}
 51758      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51759          self.require(ISA_AVX512VL | ISA_AVX512F)
 51760          p.domain = DomainFMA
 51761          p.add(0, func(m *_Encoding, v []interface{}) {
 51762              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 51763              m.emit(0x9e)
 51764              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 51765          })
 51766      }
 51767      // VFNMSUB132PS xmm, xmm, xmm{k}{z}
 51768      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51769          self.require(ISA_AVX512VL | ISA_AVX512F)
 51770          p.domain = DomainFMA
 51771          p.add(0, func(m *_Encoding, v []interface{}) {
 51772              m.emit(0x62)
 51773              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 51774              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 51775              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 51776              m.emit(0x9e)
 51777              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51778          })
 51779      }
 51780      // VFNMSUB132PS m256/m32bcst, ymm, ymm{k}{z}
 51781      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 51782          self.require(ISA_AVX512VL | ISA_AVX512F)
 51783          p.domain = DomainFMA
 51784          p.add(0, func(m *_Encoding, v []interface{}) {
 51785              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 51786              m.emit(0x9e)
 51787              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 51788          })
 51789      }
 51790      // VFNMSUB132PS ymm, ymm, ymm{k}{z}
 51791      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 51792          self.require(ISA_AVX512VL | ISA_AVX512F)
 51793          p.domain = DomainFMA
 51794          p.add(0, func(m *_Encoding, v []interface{}) {
 51795              m.emit(0x62)
 51796              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 51797              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 51798              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 51799              m.emit(0x9e)
 51800              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51801          })
 51802      }
 51803      if p.len == 0 {
 51804          panic("invalid operands for VFNMSUB132PS")
 51805      }
 51806      return p
 51807  }
 51808  
 51809  // VFNMSUB132SD performs "Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values".
 51810  //
 51811  // Mnemonic        : VFNMSUB132SD
 51812  // Supported forms : (5 forms)
 51813  //
 51814  //    * VFNMSUB132SD xmm, xmm, xmm                [FMA3]
 51815  //    * VFNMSUB132SD m64, xmm, xmm                [FMA3]
 51816  //    * VFNMSUB132SD m64, xmm, xmm{k}{z}          [AVX512F]
 51817  //    * VFNMSUB132SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 51818  //    * VFNMSUB132SD xmm, xmm, xmm{k}{z}          [AVX512F]
 51819  //
 51820  func (self *Program) VFNMSUB132SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 51821      var p *Instruction
 51822      switch len(vv) {
 51823          case 0  : p = self.alloc("VFNMSUB132SD", 3, Operands { v0, v1, v2 })
 51824          case 1  : p = self.alloc("VFNMSUB132SD", 4, Operands { v0, v1, v2, vv[0] })
 51825          default : panic("instruction VFNMSUB132SD takes 3 or 4 operands")
 51826      }
 51827      // VFNMSUB132SD xmm, xmm, xmm
 51828      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 51829          self.require(ISA_FMA3)
 51830          p.domain = DomainFMA
 51831          p.add(0, func(m *_Encoding, v []interface{}) {
 51832              m.emit(0xc4)
 51833              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 51834              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 51835              m.emit(0x9f)
 51836              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51837          })
 51838      }
 51839      // VFNMSUB132SD m64, xmm, xmm
 51840      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 51841          self.require(ISA_FMA3)
 51842          p.domain = DomainFMA
 51843          p.add(0, func(m *_Encoding, v []interface{}) {
 51844              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 51845              m.emit(0x9f)
 51846              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 51847          })
 51848      }
 51849      // VFNMSUB132SD m64, xmm, xmm{k}{z}
 51850      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51851          self.require(ISA_AVX512F)
 51852          p.domain = DomainFMA
 51853          p.add(0, func(m *_Encoding, v []interface{}) {
 51854              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 51855              m.emit(0x9f)
 51856              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 51857          })
 51858      }
 51859      // VFNMSUB132SD {er}, xmm, xmm, xmm{k}{z}
 51860      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 51861          self.require(ISA_AVX512F)
 51862          p.domain = DomainFMA
 51863          p.add(0, func(m *_Encoding, v []interface{}) {
 51864              m.emit(0x62)
 51865              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 51866              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 51867              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 51868              m.emit(0x9f)
 51869              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51870          })
 51871      }
 51872      // VFNMSUB132SD xmm, xmm, xmm{k}{z}
 51873      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51874          self.require(ISA_AVX512F)
 51875          p.domain = DomainFMA
 51876          p.add(0, func(m *_Encoding, v []interface{}) {
 51877              m.emit(0x62)
 51878              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 51879              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 51880              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 51881              m.emit(0x9f)
 51882              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51883          })
 51884      }
 51885      if p.len == 0 {
 51886          panic("invalid operands for VFNMSUB132SD")
 51887      }
 51888      return p
 51889  }
 51890  
 51891  // VFNMSUB132SS performs "Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values".
 51892  //
 51893  // Mnemonic        : VFNMSUB132SS
 51894  // Supported forms : (5 forms)
 51895  //
 51896  //    * VFNMSUB132SS xmm, xmm, xmm                [FMA3]
 51897  //    * VFNMSUB132SS m32, xmm, xmm                [FMA3]
 51898  //    * VFNMSUB132SS m32, xmm, xmm{k}{z}          [AVX512F]
 51899  //    * VFNMSUB132SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 51900  //    * VFNMSUB132SS xmm, xmm, xmm{k}{z}          [AVX512F]
 51901  //
 51902  func (self *Program) VFNMSUB132SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 51903      var p *Instruction
 51904      switch len(vv) {
 51905          case 0  : p = self.alloc("VFNMSUB132SS", 3, Operands { v0, v1, v2 })
 51906          case 1  : p = self.alloc("VFNMSUB132SS", 4, Operands { v0, v1, v2, vv[0] })
 51907          default : panic("instruction VFNMSUB132SS takes 3 or 4 operands")
 51908      }
 51909      // VFNMSUB132SS xmm, xmm, xmm
 51910      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 51911          self.require(ISA_FMA3)
 51912          p.domain = DomainFMA
 51913          p.add(0, func(m *_Encoding, v []interface{}) {
 51914              m.emit(0xc4)
 51915              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 51916              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 51917              m.emit(0x9f)
 51918              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51919          })
 51920      }
 51921      // VFNMSUB132SS m32, xmm, xmm
 51922      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 51923          self.require(ISA_FMA3)
 51924          p.domain = DomainFMA
 51925          p.add(0, func(m *_Encoding, v []interface{}) {
 51926              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 51927              m.emit(0x9f)
 51928              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 51929          })
 51930      }
 51931      // VFNMSUB132SS m32, xmm, xmm{k}{z}
 51932      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51933          self.require(ISA_AVX512F)
 51934          p.domain = DomainFMA
 51935          p.add(0, func(m *_Encoding, v []interface{}) {
 51936              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 51937              m.emit(0x9f)
 51938              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 51939          })
 51940      }
 51941      // VFNMSUB132SS {er}, xmm, xmm, xmm{k}{z}
 51942      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 51943          self.require(ISA_AVX512F)
 51944          p.domain = DomainFMA
 51945          p.add(0, func(m *_Encoding, v []interface{}) {
 51946              m.emit(0x62)
 51947              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 51948              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 51949              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 51950              m.emit(0x9f)
 51951              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51952          })
 51953      }
 51954      // VFNMSUB132SS xmm, xmm, xmm{k}{z}
 51955      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51956          self.require(ISA_AVX512F)
 51957          p.domain = DomainFMA
 51958          p.add(0, func(m *_Encoding, v []interface{}) {
 51959              m.emit(0x62)
 51960              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 51961              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 51962              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 51963              m.emit(0x9f)
 51964              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51965          })
 51966      }
 51967      if p.len == 0 {
 51968          panic("invalid operands for VFNMSUB132SS")
 51969      }
 51970      return p
 51971  }
 51972  
 51973  // VFNMSUB213PD performs "Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values".
 51974  //
 51975  // Mnemonic        : VFNMSUB213PD
 51976  // Supported forms : (11 forms)
 51977  //
 51978  //    * VFNMSUB213PD xmm, xmm, xmm                   [FMA3]
 51979  //    * VFNMSUB213PD m128, xmm, xmm                  [FMA3]
 51980  //    * VFNMSUB213PD ymm, ymm, ymm                   [FMA3]
 51981  //    * VFNMSUB213PD m256, ymm, ymm                  [FMA3]
 51982  //    * VFNMSUB213PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 51983  //    * VFNMSUB213PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 51984  //    * VFNMSUB213PD zmm, zmm, zmm{k}{z}             [AVX512F]
 51985  //    * VFNMSUB213PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 51986  //    * VFNMSUB213PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 51987  //    * VFNMSUB213PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 51988  //    * VFNMSUB213PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 51989  //
 51990  func (self *Program) VFNMSUB213PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 51991      var p *Instruction
 51992      switch len(vv) {
 51993          case 0  : p = self.alloc("VFNMSUB213PD", 3, Operands { v0, v1, v2 })
 51994          case 1  : p = self.alloc("VFNMSUB213PD", 4, Operands { v0, v1, v2, vv[0] })
 51995          default : panic("instruction VFNMSUB213PD takes 3 or 4 operands")
 51996      }
 51997      // VFNMSUB213PD xmm, xmm, xmm
 51998      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 51999          self.require(ISA_FMA3)
 52000          p.domain = DomainFMA
 52001          p.add(0, func(m *_Encoding, v []interface{}) {
 52002              m.emit(0xc4)
 52003              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52004              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 52005              m.emit(0xae)
 52006              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52007          })
 52008      }
 52009      // VFNMSUB213PD m128, xmm, xmm
 52010      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 52011          self.require(ISA_FMA3)
 52012          p.domain = DomainFMA
 52013          p.add(0, func(m *_Encoding, v []interface{}) {
 52014              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52015              m.emit(0xae)
 52016              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52017          })
 52018      }
 52019      // VFNMSUB213PD ymm, ymm, ymm
 52020      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 52021          self.require(ISA_FMA3)
 52022          p.domain = DomainFMA
 52023          p.add(0, func(m *_Encoding, v []interface{}) {
 52024              m.emit(0xc4)
 52025              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52026              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 52027              m.emit(0xae)
 52028              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52029          })
 52030      }
 52031      // VFNMSUB213PD m256, ymm, ymm
 52032      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 52033          self.require(ISA_FMA3)
 52034          p.domain = DomainFMA
 52035          p.add(0, func(m *_Encoding, v []interface{}) {
 52036              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52037              m.emit(0xae)
 52038              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52039          })
 52040      }
 52041      // VFNMSUB213PD m512/m64bcst, zmm, zmm{k}{z}
 52042      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 52043          self.require(ISA_AVX512F)
 52044          p.domain = DomainFMA
 52045          p.add(0, func(m *_Encoding, v []interface{}) {
 52046              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52047              m.emit(0xae)
 52048              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 52049          })
 52050      }
 52051      // VFNMSUB213PD {er}, zmm, zmm, zmm{k}{z}
 52052      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 52053          self.require(ISA_AVX512F)
 52054          p.domain = DomainFMA
 52055          p.add(0, func(m *_Encoding, v []interface{}) {
 52056              m.emit(0x62)
 52057              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 52058              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 52059              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 52060              m.emit(0xae)
 52061              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 52062          })
 52063      }
 52064      // VFNMSUB213PD zmm, zmm, zmm{k}{z}
 52065      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 52066          self.require(ISA_AVX512F)
 52067          p.domain = DomainFMA
 52068          p.add(0, func(m *_Encoding, v []interface{}) {
 52069              m.emit(0x62)
 52070              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52071              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 52072              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 52073              m.emit(0xae)
 52074              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52075          })
 52076      }
 52077      // VFNMSUB213PD m128/m64bcst, xmm, xmm{k}{z}
 52078      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52079          self.require(ISA_AVX512VL | ISA_AVX512F)
 52080          p.domain = DomainFMA
 52081          p.add(0, func(m *_Encoding, v []interface{}) {
 52082              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52083              m.emit(0xae)
 52084              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 52085          })
 52086      }
 52087      // VFNMSUB213PD xmm, xmm, xmm{k}{z}
 52088      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52089          self.require(ISA_AVX512VL | ISA_AVX512F)
 52090          p.domain = DomainFMA
 52091          p.add(0, func(m *_Encoding, v []interface{}) {
 52092              m.emit(0x62)
 52093              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52094              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 52095              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 52096              m.emit(0xae)
 52097              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52098          })
 52099      }
 52100      // VFNMSUB213PD m256/m64bcst, ymm, ymm{k}{z}
 52101      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 52102          self.require(ISA_AVX512VL | ISA_AVX512F)
 52103          p.domain = DomainFMA
 52104          p.add(0, func(m *_Encoding, v []interface{}) {
 52105              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52106              m.emit(0xae)
 52107              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 52108          })
 52109      }
 52110      // VFNMSUB213PD ymm, ymm, ymm{k}{z}
 52111      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 52112          self.require(ISA_AVX512VL | ISA_AVX512F)
 52113          p.domain = DomainFMA
 52114          p.add(0, func(m *_Encoding, v []interface{}) {
 52115              m.emit(0x62)
 52116              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52117              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 52118              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 52119              m.emit(0xae)
 52120              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52121          })
 52122      }
 52123      if p.len == 0 {
 52124          panic("invalid operands for VFNMSUB213PD")
 52125      }
 52126      return p
 52127  }
 52128  
 52129  // VFNMSUB213PS performs "Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values".
 52130  //
 52131  // Mnemonic        : VFNMSUB213PS
 52132  // Supported forms : (11 forms)
 52133  //
 52134  //    * VFNMSUB213PS xmm, xmm, xmm                   [FMA3]
 52135  //    * VFNMSUB213PS m128, xmm, xmm                  [FMA3]
 52136  //    * VFNMSUB213PS ymm, ymm, ymm                   [FMA3]
 52137  //    * VFNMSUB213PS m256, ymm, ymm                  [FMA3]
 52138  //    * VFNMSUB213PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 52139  //    * VFNMSUB213PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 52140  //    * VFNMSUB213PS zmm, zmm, zmm{k}{z}             [AVX512F]
 52141  //    * VFNMSUB213PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 52142  //    * VFNMSUB213PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 52143  //    * VFNMSUB213PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 52144  //    * VFNMSUB213PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 52145  //
 52146  func (self *Program) VFNMSUB213PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 52147      var p *Instruction
 52148      switch len(vv) {
 52149          case 0  : p = self.alloc("VFNMSUB213PS", 3, Operands { v0, v1, v2 })
 52150          case 1  : p = self.alloc("VFNMSUB213PS", 4, Operands { v0, v1, v2, vv[0] })
 52151          default : panic("instruction VFNMSUB213PS takes 3 or 4 operands")
 52152      }
 52153      // VFNMSUB213PS xmm, xmm, xmm
 52154      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 52155          self.require(ISA_FMA3)
 52156          p.domain = DomainFMA
 52157          p.add(0, func(m *_Encoding, v []interface{}) {
 52158              m.emit(0xc4)
 52159              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52160              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 52161              m.emit(0xae)
 52162              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52163          })
 52164      }
 52165      // VFNMSUB213PS m128, xmm, xmm
 52166      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 52167          self.require(ISA_FMA3)
 52168          p.domain = DomainFMA
 52169          p.add(0, func(m *_Encoding, v []interface{}) {
 52170              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52171              m.emit(0xae)
 52172              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52173          })
 52174      }
 52175      // VFNMSUB213PS ymm, ymm, ymm
 52176      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 52177          self.require(ISA_FMA3)
 52178          p.domain = DomainFMA
 52179          p.add(0, func(m *_Encoding, v []interface{}) {
 52180              m.emit(0xc4)
 52181              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52182              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 52183              m.emit(0xae)
 52184              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52185          })
 52186      }
 52187      // VFNMSUB213PS m256, ymm, ymm
 52188      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 52189          self.require(ISA_FMA3)
 52190          p.domain = DomainFMA
 52191          p.add(0, func(m *_Encoding, v []interface{}) {
 52192              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52193              m.emit(0xae)
 52194              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52195          })
 52196      }
 52197      // VFNMSUB213PS m512/m32bcst, zmm, zmm{k}{z}
 52198      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 52199          self.require(ISA_AVX512F)
 52200          p.domain = DomainFMA
 52201          p.add(0, func(m *_Encoding, v []interface{}) {
 52202              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52203              m.emit(0xae)
 52204              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 52205          })
 52206      }
 52207      // VFNMSUB213PS {er}, zmm, zmm, zmm{k}{z}
 52208      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 52209          self.require(ISA_AVX512F)
 52210          p.domain = DomainFMA
 52211          p.add(0, func(m *_Encoding, v []interface{}) {
 52212              m.emit(0x62)
 52213              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 52214              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 52215              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 52216              m.emit(0xae)
 52217              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 52218          })
 52219      }
 52220      // VFNMSUB213PS zmm, zmm, zmm{k}{z}
 52221      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 52222          self.require(ISA_AVX512F)
 52223          p.domain = DomainFMA
 52224          p.add(0, func(m *_Encoding, v []interface{}) {
 52225              m.emit(0x62)
 52226              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52227              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 52228              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 52229              m.emit(0xae)
 52230              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52231          })
 52232      }
 52233      // VFNMSUB213PS m128/m32bcst, xmm, xmm{k}{z}
 52234      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52235          self.require(ISA_AVX512VL | ISA_AVX512F)
 52236          p.domain = DomainFMA
 52237          p.add(0, func(m *_Encoding, v []interface{}) {
 52238              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52239              m.emit(0xae)
 52240              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 52241          })
 52242      }
 52243      // VFNMSUB213PS xmm, xmm, xmm{k}{z}
 52244      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52245          self.require(ISA_AVX512VL | ISA_AVX512F)
 52246          p.domain = DomainFMA
 52247          p.add(0, func(m *_Encoding, v []interface{}) {
 52248              m.emit(0x62)
 52249              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52250              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 52251              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 52252              m.emit(0xae)
 52253              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52254          })
 52255      }
 52256      // VFNMSUB213PS m256/m32bcst, ymm, ymm{k}{z}
 52257      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 52258          self.require(ISA_AVX512VL | ISA_AVX512F)
 52259          p.domain = DomainFMA
 52260          p.add(0, func(m *_Encoding, v []interface{}) {
 52261              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52262              m.emit(0xae)
 52263              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 52264          })
 52265      }
 52266      // VFNMSUB213PS ymm, ymm, ymm{k}{z}
 52267      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 52268          self.require(ISA_AVX512VL | ISA_AVX512F)
 52269          p.domain = DomainFMA
 52270          p.add(0, func(m *_Encoding, v []interface{}) {
 52271              m.emit(0x62)
 52272              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52273              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 52274              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 52275              m.emit(0xae)
 52276              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52277          })
 52278      }
 52279      if p.len == 0 {
 52280          panic("invalid operands for VFNMSUB213PS")
 52281      }
 52282      return p
 52283  }
 52284  
 52285  // VFNMSUB213SD performs "Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values".
 52286  //
 52287  // Mnemonic        : VFNMSUB213SD
 52288  // Supported forms : (5 forms)
 52289  //
 52290  //    * VFNMSUB213SD xmm, xmm, xmm                [FMA3]
 52291  //    * VFNMSUB213SD m64, xmm, xmm                [FMA3]
 52292  //    * VFNMSUB213SD m64, xmm, xmm{k}{z}          [AVX512F]
 52293  //    * VFNMSUB213SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 52294  //    * VFNMSUB213SD xmm, xmm, xmm{k}{z}          [AVX512F]
 52295  //
 52296  func (self *Program) VFNMSUB213SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 52297      var p *Instruction
 52298      switch len(vv) {
 52299          case 0  : p = self.alloc("VFNMSUB213SD", 3, Operands { v0, v1, v2 })
 52300          case 1  : p = self.alloc("VFNMSUB213SD", 4, Operands { v0, v1, v2, vv[0] })
 52301          default : panic("instruction VFNMSUB213SD takes 3 or 4 operands")
 52302      }
 52303      // VFNMSUB213SD xmm, xmm, xmm
 52304      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 52305          self.require(ISA_FMA3)
 52306          p.domain = DomainFMA
 52307          p.add(0, func(m *_Encoding, v []interface{}) {
 52308              m.emit(0xc4)
 52309              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52310              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 52311              m.emit(0xaf)
 52312              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52313          })
 52314      }
 52315      // VFNMSUB213SD m64, xmm, xmm
 52316      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 52317          self.require(ISA_FMA3)
 52318          p.domain = DomainFMA
 52319          p.add(0, func(m *_Encoding, v []interface{}) {
 52320              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52321              m.emit(0xaf)
 52322              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52323          })
 52324      }
 52325      // VFNMSUB213SD m64, xmm, xmm{k}{z}
 52326      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52327          self.require(ISA_AVX512F)
 52328          p.domain = DomainFMA
 52329          p.add(0, func(m *_Encoding, v []interface{}) {
 52330              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 52331              m.emit(0xaf)
 52332              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 52333          })
 52334      }
 52335      // VFNMSUB213SD {er}, xmm, xmm, xmm{k}{z}
 52336      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 52337          self.require(ISA_AVX512F)
 52338          p.domain = DomainFMA
 52339          p.add(0, func(m *_Encoding, v []interface{}) {
 52340              m.emit(0x62)
 52341              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 52342              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 52343              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 52344              m.emit(0xaf)
 52345              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 52346          })
 52347      }
 52348      // VFNMSUB213SD xmm, xmm, xmm{k}{z}
 52349      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52350          self.require(ISA_AVX512F)
 52351          p.domain = DomainFMA
 52352          p.add(0, func(m *_Encoding, v []interface{}) {
 52353              m.emit(0x62)
 52354              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52355              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 52356              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 52357              m.emit(0xaf)
 52358              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52359          })
 52360      }
 52361      if p.len == 0 {
 52362          panic("invalid operands for VFNMSUB213SD")
 52363      }
 52364      return p
 52365  }
 52366  
 52367  // VFNMSUB213SS performs "Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values".
 52368  //
 52369  // Mnemonic        : VFNMSUB213SS
 52370  // Supported forms : (5 forms)
 52371  //
 52372  //    * VFNMSUB213SS xmm, xmm, xmm                [FMA3]
 52373  //    * VFNMSUB213SS m32, xmm, xmm                [FMA3]
 52374  //    * VFNMSUB213SS m32, xmm, xmm{k}{z}          [AVX512F]
 52375  //    * VFNMSUB213SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 52376  //    * VFNMSUB213SS xmm, xmm, xmm{k}{z}          [AVX512F]
 52377  //
 52378  func (self *Program) VFNMSUB213SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 52379      var p *Instruction
 52380      switch len(vv) {
 52381          case 0  : p = self.alloc("VFNMSUB213SS", 3, Operands { v0, v1, v2 })
 52382          case 1  : p = self.alloc("VFNMSUB213SS", 4, Operands { v0, v1, v2, vv[0] })
 52383          default : panic("instruction VFNMSUB213SS takes 3 or 4 operands")
 52384      }
 52385      // VFNMSUB213SS xmm, xmm, xmm
 52386      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 52387          self.require(ISA_FMA3)
 52388          p.domain = DomainFMA
 52389          p.add(0, func(m *_Encoding, v []interface{}) {
 52390              m.emit(0xc4)
 52391              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52392              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 52393              m.emit(0xaf)
 52394              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52395          })
 52396      }
 52397      // VFNMSUB213SS m32, xmm, xmm
 52398      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 52399          self.require(ISA_FMA3)
 52400          p.domain = DomainFMA
 52401          p.add(0, func(m *_Encoding, v []interface{}) {
 52402              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52403              m.emit(0xaf)
 52404              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52405          })
 52406      }
 52407      // VFNMSUB213SS m32, xmm, xmm{k}{z}
 52408      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52409          self.require(ISA_AVX512F)
 52410          p.domain = DomainFMA
 52411          p.add(0, func(m *_Encoding, v []interface{}) {
 52412              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 52413              m.emit(0xaf)
 52414              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 52415          })
 52416      }
 52417      // VFNMSUB213SS {er}, xmm, xmm, xmm{k}{z}
 52418      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 52419          self.require(ISA_AVX512F)
 52420          p.domain = DomainFMA
 52421          p.add(0, func(m *_Encoding, v []interface{}) {
 52422              m.emit(0x62)
 52423              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 52424              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 52425              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 52426              m.emit(0xaf)
 52427              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 52428          })
 52429      }
 52430      // VFNMSUB213SS xmm, xmm, xmm{k}{z}
 52431      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52432          self.require(ISA_AVX512F)
 52433          p.domain = DomainFMA
 52434          p.add(0, func(m *_Encoding, v []interface{}) {
 52435              m.emit(0x62)
 52436              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52437              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 52438              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 52439              m.emit(0xaf)
 52440              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52441          })
 52442      }
 52443      if p.len == 0 {
 52444          panic("invalid operands for VFNMSUB213SS")
 52445      }
 52446      return p
 52447  }
 52448  
 52449  // VFNMSUB231PD performs "Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values".
 52450  //
 52451  // Mnemonic        : VFNMSUB231PD
 52452  // Supported forms : (11 forms)
 52453  //
 52454  //    * VFNMSUB231PD xmm, xmm, xmm                   [FMA3]
 52455  //    * VFNMSUB231PD m128, xmm, xmm                  [FMA3]
 52456  //    * VFNMSUB231PD ymm, ymm, ymm                   [FMA3]
 52457  //    * VFNMSUB231PD m256, ymm, ymm                  [FMA3]
 52458  //    * VFNMSUB231PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 52459  //    * VFNMSUB231PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 52460  //    * VFNMSUB231PD zmm, zmm, zmm{k}{z}             [AVX512F]
 52461  //    * VFNMSUB231PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 52462  //    * VFNMSUB231PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 52463  //    * VFNMSUB231PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 52464  //    * VFNMSUB231PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 52465  //
 52466  func (self *Program) VFNMSUB231PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 52467      var p *Instruction
 52468      switch len(vv) {
 52469          case 0  : p = self.alloc("VFNMSUB231PD", 3, Operands { v0, v1, v2 })
 52470          case 1  : p = self.alloc("VFNMSUB231PD", 4, Operands { v0, v1, v2, vv[0] })
 52471          default : panic("instruction VFNMSUB231PD takes 3 or 4 operands")
 52472      }
 52473      // VFNMSUB231PD xmm, xmm, xmm
 52474      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 52475          self.require(ISA_FMA3)
 52476          p.domain = DomainFMA
 52477          p.add(0, func(m *_Encoding, v []interface{}) {
 52478              m.emit(0xc4)
 52479              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52480              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 52481              m.emit(0xbe)
 52482              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52483          })
 52484      }
 52485      // VFNMSUB231PD m128, xmm, xmm
 52486      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 52487          self.require(ISA_FMA3)
 52488          p.domain = DomainFMA
 52489          p.add(0, func(m *_Encoding, v []interface{}) {
 52490              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52491              m.emit(0xbe)
 52492              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52493          })
 52494      }
 52495      // VFNMSUB231PD ymm, ymm, ymm
 52496      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 52497          self.require(ISA_FMA3)
 52498          p.domain = DomainFMA
 52499          p.add(0, func(m *_Encoding, v []interface{}) {
 52500              m.emit(0xc4)
 52501              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52502              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 52503              m.emit(0xbe)
 52504              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52505          })
 52506      }
 52507      // VFNMSUB231PD m256, ymm, ymm
 52508      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 52509          self.require(ISA_FMA3)
 52510          p.domain = DomainFMA
 52511          p.add(0, func(m *_Encoding, v []interface{}) {
 52512              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52513              m.emit(0xbe)
 52514              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52515          })
 52516      }
 52517      // VFNMSUB231PD m512/m64bcst, zmm, zmm{k}{z}
 52518      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 52519          self.require(ISA_AVX512F)
 52520          p.domain = DomainFMA
 52521          p.add(0, func(m *_Encoding, v []interface{}) {
 52522              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52523              m.emit(0xbe)
 52524              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 52525          })
 52526      }
 52527      // VFNMSUB231PD {er}, zmm, zmm, zmm{k}{z}
 52528      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 52529          self.require(ISA_AVX512F)
 52530          p.domain = DomainFMA
 52531          p.add(0, func(m *_Encoding, v []interface{}) {
 52532              m.emit(0x62)
 52533              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 52534              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 52535              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 52536              m.emit(0xbe)
 52537              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 52538          })
 52539      }
 52540      // VFNMSUB231PD zmm, zmm, zmm{k}{z}
 52541      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 52542          self.require(ISA_AVX512F)
 52543          p.domain = DomainFMA
 52544          p.add(0, func(m *_Encoding, v []interface{}) {
 52545              m.emit(0x62)
 52546              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52547              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 52548              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 52549              m.emit(0xbe)
 52550              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52551          })
 52552      }
 52553      // VFNMSUB231PD m128/m64bcst, xmm, xmm{k}{z}
 52554      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52555          self.require(ISA_AVX512VL | ISA_AVX512F)
 52556          p.domain = DomainFMA
 52557          p.add(0, func(m *_Encoding, v []interface{}) {
 52558              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52559              m.emit(0xbe)
 52560              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 52561          })
 52562      }
 52563      // VFNMSUB231PD xmm, xmm, xmm{k}{z}
 52564      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52565          self.require(ISA_AVX512VL | ISA_AVX512F)
 52566          p.domain = DomainFMA
 52567          p.add(0, func(m *_Encoding, v []interface{}) {
 52568              m.emit(0x62)
 52569              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52570              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 52571              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 52572              m.emit(0xbe)
 52573              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52574          })
 52575      }
 52576      // VFNMSUB231PD m256/m64bcst, ymm, ymm{k}{z}
 52577      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 52578          self.require(ISA_AVX512VL | ISA_AVX512F)
 52579          p.domain = DomainFMA
 52580          p.add(0, func(m *_Encoding, v []interface{}) {
 52581              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52582              m.emit(0xbe)
 52583              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 52584          })
 52585      }
 52586      // VFNMSUB231PD ymm, ymm, ymm{k}{z}
 52587      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 52588          self.require(ISA_AVX512VL | ISA_AVX512F)
 52589          p.domain = DomainFMA
 52590          p.add(0, func(m *_Encoding, v []interface{}) {
 52591              m.emit(0x62)
 52592              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52593              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 52594              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 52595              m.emit(0xbe)
 52596              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52597          })
 52598      }
 52599      if p.len == 0 {
 52600          panic("invalid operands for VFNMSUB231PD")
 52601      }
 52602      return p
 52603  }
 52604  
 52605  // VFNMSUB231PS performs "Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values".
 52606  //
 52607  // Mnemonic        : VFNMSUB231PS
 52608  // Supported forms : (11 forms)
 52609  //
 52610  //    * VFNMSUB231PS xmm, xmm, xmm                   [FMA3]
 52611  //    * VFNMSUB231PS m128, xmm, xmm                  [FMA3]
 52612  //    * VFNMSUB231PS ymm, ymm, ymm                   [FMA3]
 52613  //    * VFNMSUB231PS m256, ymm, ymm                  [FMA3]
 52614  //    * VFNMSUB231PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 52615  //    * VFNMSUB231PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 52616  //    * VFNMSUB231PS zmm, zmm, zmm{k}{z}             [AVX512F]
 52617  //    * VFNMSUB231PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 52618  //    * VFNMSUB231PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 52619  //    * VFNMSUB231PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 52620  //    * VFNMSUB231PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 52621  //
 52622  func (self *Program) VFNMSUB231PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 52623      var p *Instruction
 52624      switch len(vv) {
 52625          case 0  : p = self.alloc("VFNMSUB231PS", 3, Operands { v0, v1, v2 })
 52626          case 1  : p = self.alloc("VFNMSUB231PS", 4, Operands { v0, v1, v2, vv[0] })
 52627          default : panic("instruction VFNMSUB231PS takes 3 or 4 operands")
 52628      }
 52629      // VFNMSUB231PS xmm, xmm, xmm
 52630      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 52631          self.require(ISA_FMA3)
 52632          p.domain = DomainFMA
 52633          p.add(0, func(m *_Encoding, v []interface{}) {
 52634              m.emit(0xc4)
 52635              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52636              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 52637              m.emit(0xbe)
 52638              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52639          })
 52640      }
 52641      // VFNMSUB231PS m128, xmm, xmm
 52642      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 52643          self.require(ISA_FMA3)
 52644          p.domain = DomainFMA
 52645          p.add(0, func(m *_Encoding, v []interface{}) {
 52646              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52647              m.emit(0xbe)
 52648              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52649          })
 52650      }
 52651      // VFNMSUB231PS ymm, ymm, ymm
 52652      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 52653          self.require(ISA_FMA3)
 52654          p.domain = DomainFMA
 52655          p.add(0, func(m *_Encoding, v []interface{}) {
 52656              m.emit(0xc4)
 52657              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52658              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 52659              m.emit(0xbe)
 52660              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52661          })
 52662      }
 52663      // VFNMSUB231PS m256, ymm, ymm
 52664      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 52665          self.require(ISA_FMA3)
 52666          p.domain = DomainFMA
 52667          p.add(0, func(m *_Encoding, v []interface{}) {
 52668              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52669              m.emit(0xbe)
 52670              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52671          })
 52672      }
 52673      // VFNMSUB231PS m512/m32bcst, zmm, zmm{k}{z}
 52674      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 52675          self.require(ISA_AVX512F)
 52676          p.domain = DomainFMA
 52677          p.add(0, func(m *_Encoding, v []interface{}) {
 52678              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52679              m.emit(0xbe)
 52680              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 52681          })
 52682      }
 52683      // VFNMSUB231PS {er}, zmm, zmm, zmm{k}{z}
 52684      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 52685          self.require(ISA_AVX512F)
 52686          p.domain = DomainFMA
 52687          p.add(0, func(m *_Encoding, v []interface{}) {
 52688              m.emit(0x62)
 52689              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 52690              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 52691              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 52692              m.emit(0xbe)
 52693              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 52694          })
 52695      }
 52696      // VFNMSUB231PS zmm, zmm, zmm{k}{z}
 52697      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 52698          self.require(ISA_AVX512F)
 52699          p.domain = DomainFMA
 52700          p.add(0, func(m *_Encoding, v []interface{}) {
 52701              m.emit(0x62)
 52702              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52703              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 52704              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 52705              m.emit(0xbe)
 52706              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52707          })
 52708      }
 52709      // VFNMSUB231PS m128/m32bcst, xmm, xmm{k}{z}
 52710      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52711          self.require(ISA_AVX512VL | ISA_AVX512F)
 52712          p.domain = DomainFMA
 52713          p.add(0, func(m *_Encoding, v []interface{}) {
 52714              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52715              m.emit(0xbe)
 52716              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 52717          })
 52718      }
 52719      // VFNMSUB231PS xmm, xmm, xmm{k}{z}
 52720      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52721          self.require(ISA_AVX512VL | ISA_AVX512F)
 52722          p.domain = DomainFMA
 52723          p.add(0, func(m *_Encoding, v []interface{}) {
 52724              m.emit(0x62)
 52725              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52726              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 52727              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 52728              m.emit(0xbe)
 52729              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52730          })
 52731      }
 52732      // VFNMSUB231PS m256/m32bcst, ymm, ymm{k}{z}
 52733      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 52734          self.require(ISA_AVX512VL | ISA_AVX512F)
 52735          p.domain = DomainFMA
 52736          p.add(0, func(m *_Encoding, v []interface{}) {
 52737              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52738              m.emit(0xbe)
 52739              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 52740          })
 52741      }
 52742      // VFNMSUB231PS ymm, ymm, ymm{k}{z}
 52743      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 52744          self.require(ISA_AVX512VL | ISA_AVX512F)
 52745          p.domain = DomainFMA
 52746          p.add(0, func(m *_Encoding, v []interface{}) {
 52747              m.emit(0x62)
 52748              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52749              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 52750              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 52751              m.emit(0xbe)
 52752              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52753          })
 52754      }
 52755      if p.len == 0 {
 52756          panic("invalid operands for VFNMSUB231PS")
 52757      }
 52758      return p
 52759  }
 52760  
 52761  // VFNMSUB231SD performs "Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values".
 52762  //
 52763  // Mnemonic        : VFNMSUB231SD
 52764  // Supported forms : (5 forms)
 52765  //
 52766  //    * VFNMSUB231SD xmm, xmm, xmm                [FMA3]
 52767  //    * VFNMSUB231SD m64, xmm, xmm                [FMA3]
 52768  //    * VFNMSUB231SD m64, xmm, xmm{k}{z}          [AVX512F]
 52769  //    * VFNMSUB231SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 52770  //    * VFNMSUB231SD xmm, xmm, xmm{k}{z}          [AVX512F]
 52771  //
 52772  func (self *Program) VFNMSUB231SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 52773      var p *Instruction
 52774      switch len(vv) {
 52775          case 0  : p = self.alloc("VFNMSUB231SD", 3, Operands { v0, v1, v2 })
 52776          case 1  : p = self.alloc("VFNMSUB231SD", 4, Operands { v0, v1, v2, vv[0] })
 52777          default : panic("instruction VFNMSUB231SD takes 3 or 4 operands")
 52778      }
 52779      // VFNMSUB231SD xmm, xmm, xmm
 52780      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 52781          self.require(ISA_FMA3)
 52782          p.domain = DomainFMA
 52783          p.add(0, func(m *_Encoding, v []interface{}) {
 52784              m.emit(0xc4)
 52785              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52786              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 52787              m.emit(0xbf)
 52788              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52789          })
 52790      }
 52791      // VFNMSUB231SD m64, xmm, xmm
 52792      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 52793          self.require(ISA_FMA3)
 52794          p.domain = DomainFMA
 52795          p.add(0, func(m *_Encoding, v []interface{}) {
 52796              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52797              m.emit(0xbf)
 52798              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52799          })
 52800      }
 52801      // VFNMSUB231SD m64, xmm, xmm{k}{z}
 52802      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52803          self.require(ISA_AVX512F)
 52804          p.domain = DomainFMA
 52805          p.add(0, func(m *_Encoding, v []interface{}) {
 52806              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 52807              m.emit(0xbf)
 52808              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 52809          })
 52810      }
 52811      // VFNMSUB231SD {er}, xmm, xmm, xmm{k}{z}
 52812      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 52813          self.require(ISA_AVX512F)
 52814          p.domain = DomainFMA
 52815          p.add(0, func(m *_Encoding, v []interface{}) {
 52816              m.emit(0x62)
 52817              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 52818              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 52819              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 52820              m.emit(0xbf)
 52821              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 52822          })
 52823      }
 52824      // VFNMSUB231SD xmm, xmm, xmm{k}{z}
 52825      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52826          self.require(ISA_AVX512F)
 52827          p.domain = DomainFMA
 52828          p.add(0, func(m *_Encoding, v []interface{}) {
 52829              m.emit(0x62)
 52830              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52831              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 52832              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 52833              m.emit(0xbf)
 52834              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52835          })
 52836      }
 52837      if p.len == 0 {
 52838          panic("invalid operands for VFNMSUB231SD")
 52839      }
 52840      return p
 52841  }
 52842  
 52843  // VFNMSUB231SS performs "Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values".
 52844  //
 52845  // Mnemonic        : VFNMSUB231SS
 52846  // Supported forms : (5 forms)
 52847  //
 52848  //    * VFNMSUB231SS xmm, xmm, xmm                [FMA3]
 52849  //    * VFNMSUB231SS m32, xmm, xmm                [FMA3]
 52850  //    * VFNMSUB231SS m32, xmm, xmm{k}{z}          [AVX512F]
 52851  //    * VFNMSUB231SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 52852  //    * VFNMSUB231SS xmm, xmm, xmm{k}{z}          [AVX512F]
 52853  //
 52854  func (self *Program) VFNMSUB231SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 52855      var p *Instruction
 52856      switch len(vv) {
 52857          case 0  : p = self.alloc("VFNMSUB231SS", 3, Operands { v0, v1, v2 })
 52858          case 1  : p = self.alloc("VFNMSUB231SS", 4, Operands { v0, v1, v2, vv[0] })
 52859          default : panic("instruction VFNMSUB231SS takes 3 or 4 operands")
 52860      }
 52861      // VFNMSUB231SS xmm, xmm, xmm
 52862      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 52863          self.require(ISA_FMA3)
 52864          p.domain = DomainFMA
 52865          p.add(0, func(m *_Encoding, v []interface{}) {
 52866              m.emit(0xc4)
 52867              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52868              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 52869              m.emit(0xbf)
 52870              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52871          })
 52872      }
 52873      // VFNMSUB231SS m32, xmm, xmm
 52874      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 52875          self.require(ISA_FMA3)
 52876          p.domain = DomainFMA
 52877          p.add(0, func(m *_Encoding, v []interface{}) {
 52878              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52879              m.emit(0xbf)
 52880              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52881          })
 52882      }
 52883      // VFNMSUB231SS m32, xmm, xmm{k}{z}
 52884      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52885          self.require(ISA_AVX512F)
 52886          p.domain = DomainFMA
 52887          p.add(0, func(m *_Encoding, v []interface{}) {
 52888              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 52889              m.emit(0xbf)
 52890              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 52891          })
 52892      }
 52893      // VFNMSUB231SS {er}, xmm, xmm, xmm{k}{z}
 52894      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 52895          self.require(ISA_AVX512F)
 52896          p.domain = DomainFMA
 52897          p.add(0, func(m *_Encoding, v []interface{}) {
 52898              m.emit(0x62)
 52899              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 52900              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 52901              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 52902              m.emit(0xbf)
 52903              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 52904          })
 52905      }
 52906      // VFNMSUB231SS xmm, xmm, xmm{k}{z}
 52907      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52908          self.require(ISA_AVX512F)
 52909          p.domain = DomainFMA
 52910          p.add(0, func(m *_Encoding, v []interface{}) {
 52911              m.emit(0x62)
 52912              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52913              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 52914              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 52915              m.emit(0xbf)
 52916              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52917          })
 52918      }
 52919      if p.len == 0 {
 52920          panic("invalid operands for VFNMSUB231SS")
 52921      }
 52922      return p
 52923  }
 52924  
 52925  // VFNMSUBPD performs "Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values".
 52926  //
 52927  // Mnemonic        : VFNMSUBPD
 52928  // Supported forms : (6 forms)
 52929  //
 52930  //    * VFNMSUBPD xmm, xmm, xmm, xmm     [FMA4]
 52931  //    * VFNMSUBPD m128, xmm, xmm, xmm    [FMA4]
 52932  //    * VFNMSUBPD xmm, m128, xmm, xmm    [FMA4]
 52933  //    * VFNMSUBPD ymm, ymm, ymm, ymm     [FMA4]
 52934  //    * VFNMSUBPD m256, ymm, ymm, ymm    [FMA4]
 52935  //    * VFNMSUBPD ymm, m256, ymm, ymm    [FMA4]
 52936  //
 52937  func (self *Program) VFNMSUBPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 52938      p := self.alloc("VFNMSUBPD", 4, Operands { v0, v1, v2, v3 })
 52939      // VFNMSUBPD xmm, xmm, xmm, xmm
 52940      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 52941          self.require(ISA_FMA4)
 52942          p.domain = DomainFMA
 52943          p.add(0, func(m *_Encoding, v []interface{}) {
 52944              m.emit(0xc4)
 52945              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 52946              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 52947              m.emit(0x7d)
 52948              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 52949              m.emit(hlcode(v[1]) << 4)
 52950          })
 52951          p.add(0, func(m *_Encoding, v []interface{}) {
 52952              m.emit(0xc4)
 52953              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 52954              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 52955              m.emit(0x7d)
 52956              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 52957              m.emit(hlcode(v[0]) << 4)
 52958          })
 52959      }
 52960      // VFNMSUBPD m128, xmm, xmm, xmm
 52961      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 52962          self.require(ISA_FMA4)
 52963          p.domain = DomainFMA
 52964          p.add(0, func(m *_Encoding, v []interface{}) {
 52965              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 52966              m.emit(0x7d)
 52967              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 52968              m.emit(hlcode(v[1]) << 4)
 52969          })
 52970      }
 52971      // VFNMSUBPD xmm, m128, xmm, xmm
 52972      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 52973          self.require(ISA_FMA4)
 52974          p.domain = DomainFMA
 52975          p.add(0, func(m *_Encoding, v []interface{}) {
 52976              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 52977              m.emit(0x7d)
 52978              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 52979              m.emit(hlcode(v[0]) << 4)
 52980          })
 52981      }
 52982      // VFNMSUBPD ymm, ymm, ymm, ymm
 52983      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 52984          self.require(ISA_FMA4)
 52985          p.domain = DomainFMA
 52986          p.add(0, func(m *_Encoding, v []interface{}) {
 52987              m.emit(0xc4)
 52988              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 52989              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 52990              m.emit(0x7d)
 52991              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 52992              m.emit(hlcode(v[1]) << 4)
 52993          })
 52994          p.add(0, func(m *_Encoding, v []interface{}) {
 52995              m.emit(0xc4)
 52996              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 52997              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 52998              m.emit(0x7d)
 52999              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 53000              m.emit(hlcode(v[0]) << 4)
 53001          })
 53002      }
 53003      // VFNMSUBPD m256, ymm, ymm, ymm
 53004      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 53005          self.require(ISA_FMA4)
 53006          p.domain = DomainFMA
 53007          p.add(0, func(m *_Encoding, v []interface{}) {
 53008              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 53009              m.emit(0x7d)
 53010              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 53011              m.emit(hlcode(v[1]) << 4)
 53012          })
 53013      }
 53014      // VFNMSUBPD ymm, m256, ymm, ymm
 53015      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 53016          self.require(ISA_FMA4)
 53017          p.domain = DomainFMA
 53018          p.add(0, func(m *_Encoding, v []interface{}) {
 53019              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 53020              m.emit(0x7d)
 53021              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 53022              m.emit(hlcode(v[0]) << 4)
 53023          })
 53024      }
 53025      if p.len == 0 {
 53026          panic("invalid operands for VFNMSUBPD")
 53027      }
 53028      return p
 53029  }
 53030  
 53031  // VFNMSUBPS performs "Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values".
 53032  //
 53033  // Mnemonic        : VFNMSUBPS
 53034  // Supported forms : (6 forms)
 53035  //
 53036  //    * VFNMSUBPS xmm, xmm, xmm, xmm     [FMA4]
 53037  //    * VFNMSUBPS m128, xmm, xmm, xmm    [FMA4]
 53038  //    * VFNMSUBPS xmm, m128, xmm, xmm    [FMA4]
 53039  //    * VFNMSUBPS ymm, ymm, ymm, ymm     [FMA4]
 53040  //    * VFNMSUBPS m256, ymm, ymm, ymm    [FMA4]
 53041  //    * VFNMSUBPS ymm, m256, ymm, ymm    [FMA4]
 53042  //
 53043  func (self *Program) VFNMSUBPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 53044      p := self.alloc("VFNMSUBPS", 4, Operands { v0, v1, v2, v3 })
 53045      // VFNMSUBPS xmm, xmm, xmm, xmm
 53046      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 53047          self.require(ISA_FMA4)
 53048          p.domain = DomainFMA
 53049          p.add(0, func(m *_Encoding, v []interface{}) {
 53050              m.emit(0xc4)
 53051              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 53052              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 53053              m.emit(0x7c)
 53054              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 53055              m.emit(hlcode(v[1]) << 4)
 53056          })
 53057          p.add(0, func(m *_Encoding, v []interface{}) {
 53058              m.emit(0xc4)
 53059              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 53060              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 53061              m.emit(0x7c)
 53062              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 53063              m.emit(hlcode(v[0]) << 4)
 53064          })
 53065      }
 53066      // VFNMSUBPS m128, xmm, xmm, xmm
 53067      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 53068          self.require(ISA_FMA4)
 53069          p.domain = DomainFMA
 53070          p.add(0, func(m *_Encoding, v []interface{}) {
 53071              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 53072              m.emit(0x7c)
 53073              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 53074              m.emit(hlcode(v[1]) << 4)
 53075          })
 53076      }
 53077      // VFNMSUBPS xmm, m128, xmm, xmm
 53078      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 53079          self.require(ISA_FMA4)
 53080          p.domain = DomainFMA
 53081          p.add(0, func(m *_Encoding, v []interface{}) {
 53082              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 53083              m.emit(0x7c)
 53084              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 53085              m.emit(hlcode(v[0]) << 4)
 53086          })
 53087      }
 53088      // VFNMSUBPS ymm, ymm, ymm, ymm
 53089      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 53090          self.require(ISA_FMA4)
 53091          p.domain = DomainFMA
 53092          p.add(0, func(m *_Encoding, v []interface{}) {
 53093              m.emit(0xc4)
 53094              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 53095              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 53096              m.emit(0x7c)
 53097              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 53098              m.emit(hlcode(v[1]) << 4)
 53099          })
 53100          p.add(0, func(m *_Encoding, v []interface{}) {
 53101              m.emit(0xc4)
 53102              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 53103              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 53104              m.emit(0x7c)
 53105              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 53106              m.emit(hlcode(v[0]) << 4)
 53107          })
 53108      }
 53109      // VFNMSUBPS m256, ymm, ymm, ymm
 53110      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 53111          self.require(ISA_FMA4)
 53112          p.domain = DomainFMA
 53113          p.add(0, func(m *_Encoding, v []interface{}) {
 53114              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 53115              m.emit(0x7c)
 53116              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 53117              m.emit(hlcode(v[1]) << 4)
 53118          })
 53119      }
 53120      // VFNMSUBPS ymm, m256, ymm, ymm
 53121      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 53122          self.require(ISA_FMA4)
 53123          p.domain = DomainFMA
 53124          p.add(0, func(m *_Encoding, v []interface{}) {
 53125              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 53126              m.emit(0x7c)
 53127              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 53128              m.emit(hlcode(v[0]) << 4)
 53129          })
 53130      }
 53131      if p.len == 0 {
 53132          panic("invalid operands for VFNMSUBPS")
 53133      }
 53134      return p
 53135  }
 53136  
 53137  // VFNMSUBSD performs "Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values".
 53138  //
 53139  // Mnemonic        : VFNMSUBSD
 53140  // Supported forms : (3 forms)
 53141  //
 53142  //    * VFNMSUBSD xmm, xmm, xmm, xmm    [FMA4]
 53143  //    * VFNMSUBSD m64, xmm, xmm, xmm    [FMA4]
 53144  //    * VFNMSUBSD xmm, m64, xmm, xmm    [FMA4]
 53145  //
 53146  func (self *Program) VFNMSUBSD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 53147      p := self.alloc("VFNMSUBSD", 4, Operands { v0, v1, v2, v3 })
 53148      // VFNMSUBSD xmm, xmm, xmm, xmm
 53149      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 53150          self.require(ISA_FMA4)
 53151          p.domain = DomainFMA
 53152          p.add(0, func(m *_Encoding, v []interface{}) {
 53153              m.emit(0xc4)
 53154              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 53155              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 53156              m.emit(0x7f)
 53157              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 53158              m.emit(hlcode(v[1]) << 4)
 53159          })
 53160          p.add(0, func(m *_Encoding, v []interface{}) {
 53161              m.emit(0xc4)
 53162              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 53163              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 53164              m.emit(0x7f)
 53165              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 53166              m.emit(hlcode(v[0]) << 4)
 53167          })
 53168      }
 53169      // VFNMSUBSD m64, xmm, xmm, xmm
 53170      if isM64(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 53171          self.require(ISA_FMA4)
 53172          p.domain = DomainFMA
 53173          p.add(0, func(m *_Encoding, v []interface{}) {
 53174              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 53175              m.emit(0x7f)
 53176              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 53177              m.emit(hlcode(v[1]) << 4)
 53178          })
 53179      }
 53180      // VFNMSUBSD xmm, m64, xmm, xmm
 53181      if isXMM(v0) && isM64(v1) && isXMM(v2) && isXMM(v3) {
 53182          self.require(ISA_FMA4)
 53183          p.domain = DomainFMA
 53184          p.add(0, func(m *_Encoding, v []interface{}) {
 53185              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 53186              m.emit(0x7f)
 53187              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 53188              m.emit(hlcode(v[0]) << 4)
 53189          })
 53190      }
 53191      if p.len == 0 {
 53192          panic("invalid operands for VFNMSUBSD")
 53193      }
 53194      return p
 53195  }
 53196  
 53197  // VFNMSUBSS performs "Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values".
 53198  //
 53199  // Mnemonic        : VFNMSUBSS
 53200  // Supported forms : (3 forms)
 53201  //
 53202  //    * VFNMSUBSS xmm, xmm, xmm, xmm    [FMA4]
 53203  //    * VFNMSUBSS m32, xmm, xmm, xmm    [FMA4]
 53204  //    * VFNMSUBSS xmm, m32, xmm, xmm    [FMA4]
 53205  //
 53206  func (self *Program) VFNMSUBSS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 53207      p := self.alloc("VFNMSUBSS", 4, Operands { v0, v1, v2, v3 })
 53208      // VFNMSUBSS xmm, xmm, xmm, xmm
 53209      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 53210          self.require(ISA_FMA4)
 53211          p.domain = DomainFMA
 53212          p.add(0, func(m *_Encoding, v []interface{}) {
 53213              m.emit(0xc4)
 53214              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 53215              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 53216              m.emit(0x7e)
 53217              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 53218              m.emit(hlcode(v[1]) << 4)
 53219          })
 53220          p.add(0, func(m *_Encoding, v []interface{}) {
 53221              m.emit(0xc4)
 53222              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 53223              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 53224              m.emit(0x7e)
 53225              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 53226              m.emit(hlcode(v[0]) << 4)
 53227          })
 53228      }
 53229      // VFNMSUBSS m32, xmm, xmm, xmm
 53230      if isM32(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 53231          self.require(ISA_FMA4)
 53232          p.domain = DomainFMA
 53233          p.add(0, func(m *_Encoding, v []interface{}) {
 53234              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 53235              m.emit(0x7e)
 53236              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 53237              m.emit(hlcode(v[1]) << 4)
 53238          })
 53239      }
 53240      // VFNMSUBSS xmm, m32, xmm, xmm
 53241      if isXMM(v0) && isM32(v1) && isXMM(v2) && isXMM(v3) {
 53242          self.require(ISA_FMA4)
 53243          p.domain = DomainFMA
 53244          p.add(0, func(m *_Encoding, v []interface{}) {
 53245              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 53246              m.emit(0x7e)
 53247              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 53248              m.emit(hlcode(v[0]) << 4)
 53249          })
 53250      }
 53251      if p.len == 0 {
 53252          panic("invalid operands for VFNMSUBSS")
 53253      }
 53254      return p
 53255  }
 53256  
 53257  // VFPCLASSPD performs "Test Class of Packed Double-Precision Floating-Point Values".
 53258  //
 53259  // Mnemonic        : VFPCLASSPD
 53260  // Supported forms : (6 forms)
 53261  //
 53262  //    * VFPCLASSPD imm8, m512/m64bcst, k{k}    [AVX512DQ]
 53263  //    * VFPCLASSPD imm8, zmm, k{k}             [AVX512DQ]
 53264  //    * VFPCLASSPD imm8, m128/m64bcst, k{k}    [AVX512DQ,AVX512VL]
 53265  //    * VFPCLASSPD imm8, m256/m64bcst, k{k}    [AVX512DQ,AVX512VL]
 53266  //    * VFPCLASSPD imm8, xmm, k{k}             [AVX512DQ,AVX512VL]
 53267  //    * VFPCLASSPD imm8, ymm, k{k}             [AVX512DQ,AVX512VL]
 53268  //
 53269  func (self *Program) VFPCLASSPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 53270      p := self.alloc("VFPCLASSPD", 3, Operands { v0, v1, v2 })
 53271      // VFPCLASSPD imm8, m512/m64bcst, k{k}
 53272      if isImm8(v0) && isM512M64bcst(v1) && isKk(v2) {
 53273          self.require(ISA_AVX512DQ)
 53274          p.domain = DomainAVX
 53275          p.add(0, func(m *_Encoding, v []interface{}) {
 53276              m.evex(0b11, 0x85, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), 0, bcode(v[1]))
 53277              m.emit(0x66)
 53278              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 53279              m.imm1(toImmAny(v[0]))
 53280          })
 53281      }
 53282      // VFPCLASSPD imm8, zmm, k{k}
 53283      if isImm8(v0) && isZMM(v1) && isKk(v2) {
 53284          self.require(ISA_AVX512DQ)
 53285          p.domain = DomainAVX
 53286          p.add(0, func(m *_Encoding, v []interface{}) {
 53287              m.emit(0x62)
 53288              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 53289              m.emit(0xfd)
 53290              m.emit(kcode(v[2]) | 0x48)
 53291              m.emit(0x66)
 53292              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 53293              m.imm1(toImmAny(v[0]))
 53294          })
 53295      }
 53296      // VFPCLASSPD imm8, m128/m64bcst, k{k}
 53297      if isImm8(v0) && isM128M64bcst(v1) && isKk(v2) {
 53298          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 53299          p.domain = DomainAVX
 53300          p.add(0, func(m *_Encoding, v []interface{}) {
 53301              m.evex(0b11, 0x85, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), 0, bcode(v[1]))
 53302              m.emit(0x66)
 53303              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 53304              m.imm1(toImmAny(v[0]))
 53305          })
 53306      }
 53307      // VFPCLASSPD imm8, m256/m64bcst, k{k}
 53308      if isImm8(v0) && isM256M64bcst(v1) && isKk(v2) {
 53309          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 53310          p.domain = DomainAVX
 53311          p.add(0, func(m *_Encoding, v []interface{}) {
 53312              m.evex(0b11, 0x85, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), 0, bcode(v[1]))
 53313              m.emit(0x66)
 53314              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 53315              m.imm1(toImmAny(v[0]))
 53316          })
 53317      }
 53318      // VFPCLASSPD imm8, xmm, k{k}
 53319      if isImm8(v0) && isEVEXXMM(v1) && isKk(v2) {
 53320          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 53321          p.domain = DomainAVX
 53322          p.add(0, func(m *_Encoding, v []interface{}) {
 53323              m.emit(0x62)
 53324              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 53325              m.emit(0xfd)
 53326              m.emit(kcode(v[2]) | 0x08)
 53327              m.emit(0x66)
 53328              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 53329              m.imm1(toImmAny(v[0]))
 53330          })
 53331      }
 53332      // VFPCLASSPD imm8, ymm, k{k}
 53333      if isImm8(v0) && isEVEXYMM(v1) && isKk(v2) {
 53334          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 53335          p.domain = DomainAVX
 53336          p.add(0, func(m *_Encoding, v []interface{}) {
 53337              m.emit(0x62)
 53338              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 53339              m.emit(0xfd)
 53340              m.emit(kcode(v[2]) | 0x28)
 53341              m.emit(0x66)
 53342              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 53343              m.imm1(toImmAny(v[0]))
 53344          })
 53345      }
 53346      if p.len == 0 {
 53347          panic("invalid operands for VFPCLASSPD")
 53348      }
 53349      return p
 53350  }
 53351  
 53352  // VFPCLASSPS performs "Test Class of Packed Single-Precision Floating-Point Values".
 53353  //
 53354  // Mnemonic        : VFPCLASSPS
 53355  // Supported forms : (6 forms)
 53356  //
 53357  //    * VFPCLASSPS imm8, m512/m32bcst, k{k}    [AVX512DQ]
 53358  //    * VFPCLASSPS imm8, zmm, k{k}             [AVX512DQ]
 53359  //    * VFPCLASSPS imm8, m128/m32bcst, k{k}    [AVX512DQ,AVX512VL]
 53360  //    * VFPCLASSPS imm8, m256/m32bcst, k{k}    [AVX512DQ,AVX512VL]
 53361  //    * VFPCLASSPS imm8, xmm, k{k}             [AVX512DQ,AVX512VL]
 53362  //    * VFPCLASSPS imm8, ymm, k{k}             [AVX512DQ,AVX512VL]
 53363  //
 53364  func (self *Program) VFPCLASSPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 53365      p := self.alloc("VFPCLASSPS", 3, Operands { v0, v1, v2 })
 53366      // VFPCLASSPS imm8, m512/m32bcst, k{k}
 53367      if isImm8(v0) && isM512M32bcst(v1) && isKk(v2) {
 53368          self.require(ISA_AVX512DQ)
 53369          p.domain = DomainAVX
 53370          p.add(0, func(m *_Encoding, v []interface{}) {
 53371              m.evex(0b11, 0x05, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), 0, bcode(v[1]))
 53372              m.emit(0x66)
 53373              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 53374              m.imm1(toImmAny(v[0]))
 53375          })
 53376      }
 53377      // VFPCLASSPS imm8, zmm, k{k}
 53378      if isImm8(v0) && isZMM(v1) && isKk(v2) {
 53379          self.require(ISA_AVX512DQ)
 53380          p.domain = DomainAVX
 53381          p.add(0, func(m *_Encoding, v []interface{}) {
 53382              m.emit(0x62)
 53383              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 53384              m.emit(0x7d)
 53385              m.emit(kcode(v[2]) | 0x48)
 53386              m.emit(0x66)
 53387              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 53388              m.imm1(toImmAny(v[0]))
 53389          })
 53390      }
 53391      // VFPCLASSPS imm8, m128/m32bcst, k{k}
 53392      if isImm8(v0) && isM128M32bcst(v1) && isKk(v2) {
 53393          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 53394          p.domain = DomainAVX
 53395          p.add(0, func(m *_Encoding, v []interface{}) {
 53396              m.evex(0b11, 0x05, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), 0, bcode(v[1]))
 53397              m.emit(0x66)
 53398              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 53399              m.imm1(toImmAny(v[0]))
 53400          })
 53401      }
 53402      // VFPCLASSPS imm8, m256/m32bcst, k{k}
 53403      if isImm8(v0) && isM256M32bcst(v1) && isKk(v2) {
 53404          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 53405          p.domain = DomainAVX
 53406          p.add(0, func(m *_Encoding, v []interface{}) {
 53407              m.evex(0b11, 0x05, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), 0, bcode(v[1]))
 53408              m.emit(0x66)
 53409              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 53410              m.imm1(toImmAny(v[0]))
 53411          })
 53412      }
 53413      // VFPCLASSPS imm8, xmm, k{k}
 53414      if isImm8(v0) && isEVEXXMM(v1) && isKk(v2) {
 53415          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 53416          p.domain = DomainAVX
 53417          p.add(0, func(m *_Encoding, v []interface{}) {
 53418              m.emit(0x62)
 53419              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 53420              m.emit(0x7d)
 53421              m.emit(kcode(v[2]) | 0x08)
 53422              m.emit(0x66)
 53423              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 53424              m.imm1(toImmAny(v[0]))
 53425          })
 53426      }
 53427      // VFPCLASSPS imm8, ymm, k{k}
 53428      if isImm8(v0) && isEVEXYMM(v1) && isKk(v2) {
 53429          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 53430          p.domain = DomainAVX
 53431          p.add(0, func(m *_Encoding, v []interface{}) {
 53432              m.emit(0x62)
 53433              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 53434              m.emit(0x7d)
 53435              m.emit(kcode(v[2]) | 0x28)
 53436              m.emit(0x66)
 53437              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 53438              m.imm1(toImmAny(v[0]))
 53439          })
 53440      }
 53441      if p.len == 0 {
 53442          panic("invalid operands for VFPCLASSPS")
 53443      }
 53444      return p
 53445  }
 53446  
 53447  // VFPCLASSSD performs "Test Class of Scalar Double-Precision Floating-Point Value".
 53448  //
 53449  // Mnemonic        : VFPCLASSSD
 53450  // Supported forms : (2 forms)
 53451  //
 53452  //    * VFPCLASSSD imm8, xmm, k{k}    [AVX512DQ]
 53453  //    * VFPCLASSSD imm8, m64, k{k}    [AVX512DQ]
 53454  //
 53455  func (self *Program) VFPCLASSSD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 53456      p := self.alloc("VFPCLASSSD", 3, Operands { v0, v1, v2 })
 53457      // VFPCLASSSD imm8, xmm, k{k}
 53458      if isImm8(v0) && isEVEXXMM(v1) && isKk(v2) {
 53459          self.require(ISA_AVX512DQ)
 53460          p.domain = DomainAVX
 53461          p.add(0, func(m *_Encoding, v []interface{}) {
 53462              m.emit(0x62)
 53463              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 53464              m.emit(0xfd)
 53465              m.emit(kcode(v[2]) | 0x08)
 53466              m.emit(0x67)
 53467              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 53468              m.imm1(toImmAny(v[0]))
 53469          })
 53470      }
 53471      // VFPCLASSSD imm8, m64, k{k}
 53472      if isImm8(v0) && isM64(v1) && isKk(v2) {
 53473          self.require(ISA_AVX512DQ)
 53474          p.domain = DomainAVX
 53475          p.add(0, func(m *_Encoding, v []interface{}) {
 53476              m.evex(0b11, 0x85, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), 0, 0)
 53477              m.emit(0x67)
 53478              m.mrsd(lcode(v[2]), addr(v[1]), 8)
 53479              m.imm1(toImmAny(v[0]))
 53480          })
 53481      }
 53482      if p.len == 0 {
 53483          panic("invalid operands for VFPCLASSSD")
 53484      }
 53485      return p
 53486  }
 53487  
 53488  // VFPCLASSSS performs "Test Class of Scalar Single-Precision Floating-Point Value".
 53489  //
 53490  // Mnemonic        : VFPCLASSSS
 53491  // Supported forms : (2 forms)
 53492  //
 53493  //    * VFPCLASSSS imm8, xmm, k{k}    [AVX512DQ]
 53494  //    * VFPCLASSSS imm8, m32, k{k}    [AVX512DQ]
 53495  //
 53496  func (self *Program) VFPCLASSSS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 53497      p := self.alloc("VFPCLASSSS", 3, Operands { v0, v1, v2 })
 53498      // VFPCLASSSS imm8, xmm, k{k}
 53499      if isImm8(v0) && isEVEXXMM(v1) && isKk(v2) {
 53500          self.require(ISA_AVX512DQ)
 53501          p.domain = DomainAVX
 53502          p.add(0, func(m *_Encoding, v []interface{}) {
 53503              m.emit(0x62)
 53504              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 53505              m.emit(0x7d)
 53506              m.emit(kcode(v[2]) | 0x08)
 53507              m.emit(0x67)
 53508              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 53509              m.imm1(toImmAny(v[0]))
 53510          })
 53511      }
 53512      // VFPCLASSSS imm8, m32, k{k}
 53513      if isImm8(v0) && isM32(v1) && isKk(v2) {
 53514          self.require(ISA_AVX512DQ)
 53515          p.domain = DomainAVX
 53516          p.add(0, func(m *_Encoding, v []interface{}) {
 53517              m.evex(0b11, 0x05, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), 0, 0)
 53518              m.emit(0x67)
 53519              m.mrsd(lcode(v[2]), addr(v[1]), 4)
 53520              m.imm1(toImmAny(v[0]))
 53521          })
 53522      }
 53523      if p.len == 0 {
 53524          panic("invalid operands for VFPCLASSSS")
 53525      }
 53526      return p
 53527  }
 53528  
 53529  // VFRCZPD performs "Extract Fraction Packed Double-Precision Floating-Point".
 53530  //
 53531  // Mnemonic        : VFRCZPD
 53532  // Supported forms : (4 forms)
 53533  //
 53534  //    * VFRCZPD xmm, xmm     [XOP]
 53535  //    * VFRCZPD m128, xmm    [XOP]
 53536  //    * VFRCZPD ymm, ymm     [XOP]
 53537  //    * VFRCZPD m256, ymm    [XOP]
 53538  //
 53539  func (self *Program) VFRCZPD(v0 interface{}, v1 interface{}) *Instruction {
 53540      p := self.alloc("VFRCZPD", 2, Operands { v0, v1 })
 53541      // VFRCZPD xmm, xmm
 53542      if isXMM(v0) && isXMM(v1) {
 53543          self.require(ISA_XOP)
 53544          p.domain = DomainAMDSpecific
 53545          p.add(0, func(m *_Encoding, v []interface{}) {
 53546              m.emit(0x8f)
 53547              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 53548              m.emit(0x78)
 53549              m.emit(0x81)
 53550              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 53551          })
 53552      }
 53553      // VFRCZPD m128, xmm
 53554      if isM128(v0) && isXMM(v1) {
 53555          self.require(ISA_XOP)
 53556          p.domain = DomainAMDSpecific
 53557          p.add(0, func(m *_Encoding, v []interface{}) {
 53558              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 53559              m.emit(0x81)
 53560              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 53561          })
 53562      }
 53563      // VFRCZPD ymm, ymm
 53564      if isYMM(v0) && isYMM(v1) {
 53565          self.require(ISA_XOP)
 53566          p.domain = DomainAMDSpecific
 53567          p.add(0, func(m *_Encoding, v []interface{}) {
 53568              m.emit(0x8f)
 53569              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 53570              m.emit(0x7c)
 53571              m.emit(0x81)
 53572              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 53573          })
 53574      }
 53575      // VFRCZPD m256, ymm
 53576      if isM256(v0) && isYMM(v1) {
 53577          self.require(ISA_XOP)
 53578          p.domain = DomainAMDSpecific
 53579          p.add(0, func(m *_Encoding, v []interface{}) {
 53580              m.vex3(0x8f, 0b1001, 0x04, hcode(v[1]), addr(v[0]), 0)
 53581              m.emit(0x81)
 53582              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 53583          })
 53584      }
 53585      if p.len == 0 {
 53586          panic("invalid operands for VFRCZPD")
 53587      }
 53588      return p
 53589  }
 53590  
 53591  // VFRCZPS performs "Extract Fraction Packed Single-Precision Floating-Point".
 53592  //
 53593  // Mnemonic        : VFRCZPS
 53594  // Supported forms : (4 forms)
 53595  //
 53596  //    * VFRCZPS xmm, xmm     [XOP]
 53597  //    * VFRCZPS m128, xmm    [XOP]
 53598  //    * VFRCZPS ymm, ymm     [XOP]
 53599  //    * VFRCZPS m256, ymm    [XOP]
 53600  //
 53601  func (self *Program) VFRCZPS(v0 interface{}, v1 interface{}) *Instruction {
 53602      p := self.alloc("VFRCZPS", 2, Operands { v0, v1 })
 53603      // VFRCZPS xmm, xmm
 53604      if isXMM(v0) && isXMM(v1) {
 53605          self.require(ISA_XOP)
 53606          p.domain = DomainAMDSpecific
 53607          p.add(0, func(m *_Encoding, v []interface{}) {
 53608              m.emit(0x8f)
 53609              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 53610              m.emit(0x78)
 53611              m.emit(0x80)
 53612              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 53613          })
 53614      }
 53615      // VFRCZPS m128, xmm
 53616      if isM128(v0) && isXMM(v1) {
 53617          self.require(ISA_XOP)
 53618          p.domain = DomainAMDSpecific
 53619          p.add(0, func(m *_Encoding, v []interface{}) {
 53620              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 53621              m.emit(0x80)
 53622              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 53623          })
 53624      }
 53625      // VFRCZPS ymm, ymm
 53626      if isYMM(v0) && isYMM(v1) {
 53627          self.require(ISA_XOP)
 53628          p.domain = DomainAMDSpecific
 53629          p.add(0, func(m *_Encoding, v []interface{}) {
 53630              m.emit(0x8f)
 53631              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 53632              m.emit(0x7c)
 53633              m.emit(0x80)
 53634              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 53635          })
 53636      }
 53637      // VFRCZPS m256, ymm
 53638      if isM256(v0) && isYMM(v1) {
 53639          self.require(ISA_XOP)
 53640          p.domain = DomainAMDSpecific
 53641          p.add(0, func(m *_Encoding, v []interface{}) {
 53642              m.vex3(0x8f, 0b1001, 0x04, hcode(v[1]), addr(v[0]), 0)
 53643              m.emit(0x80)
 53644              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 53645          })
 53646      }
 53647      if p.len == 0 {
 53648          panic("invalid operands for VFRCZPS")
 53649      }
 53650      return p
 53651  }
 53652  
 53653  // VFRCZSD performs "Extract Fraction Scalar Double-Precision Floating-Point".
 53654  //
 53655  // Mnemonic        : VFRCZSD
 53656  // Supported forms : (2 forms)
 53657  //
 53658  //    * VFRCZSD xmm, xmm    [XOP]
 53659  //    * VFRCZSD m64, xmm    [XOP]
 53660  //
 53661  func (self *Program) VFRCZSD(v0 interface{}, v1 interface{}) *Instruction {
 53662      p := self.alloc("VFRCZSD", 2, Operands { v0, v1 })
 53663      // VFRCZSD xmm, xmm
 53664      if isXMM(v0) && isXMM(v1) {
 53665          self.require(ISA_XOP)
 53666          p.domain = DomainAMDSpecific
 53667          p.add(0, func(m *_Encoding, v []interface{}) {
 53668              m.emit(0x8f)
 53669              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 53670              m.emit(0x78)
 53671              m.emit(0x83)
 53672              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 53673          })
 53674      }
 53675      // VFRCZSD m64, xmm
 53676      if isM64(v0) && isXMM(v1) {
 53677          self.require(ISA_XOP)
 53678          p.domain = DomainAMDSpecific
 53679          p.add(0, func(m *_Encoding, v []interface{}) {
 53680              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 53681              m.emit(0x83)
 53682              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 53683          })
 53684      }
 53685      if p.len == 0 {
 53686          panic("invalid operands for VFRCZSD")
 53687      }
 53688      return p
 53689  }
 53690  
 53691  // VFRCZSS performs "Extract Fraction Scalar Single-Precision Floating Point".
 53692  //
 53693  // Mnemonic        : VFRCZSS
 53694  // Supported forms : (2 forms)
 53695  //
 53696  //    * VFRCZSS xmm, xmm    [XOP]
 53697  //    * VFRCZSS m32, xmm    [XOP]
 53698  //
 53699  func (self *Program) VFRCZSS(v0 interface{}, v1 interface{}) *Instruction {
 53700      p := self.alloc("VFRCZSS", 2, Operands { v0, v1 })
 53701      // VFRCZSS xmm, xmm
 53702      if isXMM(v0) && isXMM(v1) {
 53703          self.require(ISA_XOP)
 53704          p.domain = DomainAMDSpecific
 53705          p.add(0, func(m *_Encoding, v []interface{}) {
 53706              m.emit(0x8f)
 53707              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 53708              m.emit(0x78)
 53709              m.emit(0x82)
 53710              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 53711          })
 53712      }
 53713      // VFRCZSS m32, xmm
 53714      if isM32(v0) && isXMM(v1) {
 53715          self.require(ISA_XOP)
 53716          p.domain = DomainAMDSpecific
 53717          p.add(0, func(m *_Encoding, v []interface{}) {
 53718              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 53719              m.emit(0x82)
 53720              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 53721          })
 53722      }
 53723      if p.len == 0 {
 53724          panic("invalid operands for VFRCZSS")
 53725      }
 53726      return p
 53727  }
 53728  
 53729  // VGATHERDPD performs "Gather Packed Double-Precision Floating-Point Values Using Signed Doubleword Indices".
 53730  //
 53731  // Mnemonic        : VGATHERDPD
 53732  // Supported forms : (5 forms)
 53733  //
 53734  //    * VGATHERDPD xmm, vm32x, xmm    [AVX2]
 53735  //    * VGATHERDPD ymm, vm32x, ymm    [AVX2]
 53736  //    * VGATHERDPD vm32y, zmm{k}      [AVX512F]
 53737  //    * VGATHERDPD vm32x, xmm{k}      [AVX512F,AVX512VL]
 53738  //    * VGATHERDPD vm32x, ymm{k}      [AVX512F,AVX512VL]
 53739  //
 53740  func (self *Program) VGATHERDPD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 53741      var p *Instruction
 53742      switch len(vv) {
 53743          case 0  : p = self.alloc("VGATHERDPD", 2, Operands { v0, v1 })
 53744          case 1  : p = self.alloc("VGATHERDPD", 3, Operands { v0, v1, vv[0] })
 53745          default : panic("instruction VGATHERDPD takes 2 or 3 operands")
 53746      }
 53747      // VGATHERDPD xmm, vm32x, xmm
 53748      if len(vv) == 1 && isXMM(v0) && isVMX(v1) && isXMM(vv[0]) {
 53749          self.require(ISA_AVX2)
 53750          p.domain = DomainAVX
 53751          p.add(0, func(m *_Encoding, v []interface{}) {
 53752              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 53753              m.emit(0x92)
 53754              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 53755          })
 53756      }
 53757      // VGATHERDPD ymm, vm32x, ymm
 53758      if len(vv) == 1 && isYMM(v0) && isVMX(v1) && isYMM(vv[0]) {
 53759          self.require(ISA_AVX2)
 53760          p.domain = DomainAVX
 53761          p.add(0, func(m *_Encoding, v []interface{}) {
 53762              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 53763              m.emit(0x92)
 53764              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 53765          })
 53766      }
 53767      // VGATHERDPD vm32y, zmm{k}
 53768      if len(vv) == 0 && isEVEXVMY(v0) && isZMMk(v1) {
 53769          self.require(ISA_AVX512F)
 53770          p.domain = DomainAVX
 53771          p.add(0, func(m *_Encoding, v []interface{}) {
 53772              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 53773              m.emit(0x92)
 53774              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 53775          })
 53776      }
 53777      // VGATHERDPD vm32x, xmm{k}
 53778      if len(vv) == 0 && isEVEXVMX(v0) && isXMMk(v1) {
 53779          self.require(ISA_AVX512VL | ISA_AVX512F)
 53780          p.domain = DomainAVX
 53781          p.add(0, func(m *_Encoding, v []interface{}) {
 53782              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 53783              m.emit(0x92)
 53784              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 53785          })
 53786      }
 53787      // VGATHERDPD vm32x, ymm{k}
 53788      if len(vv) == 0 && isEVEXVMX(v0) && isYMMk(v1) {
 53789          self.require(ISA_AVX512VL | ISA_AVX512F)
 53790          p.domain = DomainAVX
 53791          p.add(0, func(m *_Encoding, v []interface{}) {
 53792              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 53793              m.emit(0x92)
 53794              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 53795          })
 53796      }
 53797      if p.len == 0 {
 53798          panic("invalid operands for VGATHERDPD")
 53799      }
 53800      return p
 53801  }
 53802  
 53803  // VGATHERDPS performs "Gather Packed Single-Precision Floating-Point Values Using Signed Doubleword Indices".
 53804  //
 53805  // Mnemonic        : VGATHERDPS
 53806  // Supported forms : (5 forms)
 53807  //
 53808  //    * VGATHERDPS xmm, vm32x, xmm    [AVX2]
 53809  //    * VGATHERDPS ymm, vm32y, ymm    [AVX2]
 53810  //    * VGATHERDPS vm32z, zmm{k}      [AVX512F]
 53811  //    * VGATHERDPS vm32x, xmm{k}      [AVX512F,AVX512VL]
 53812  //    * VGATHERDPS vm32y, ymm{k}      [AVX512F,AVX512VL]
 53813  //
 53814  func (self *Program) VGATHERDPS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 53815      var p *Instruction
 53816      switch len(vv) {
 53817          case 0  : p = self.alloc("VGATHERDPS", 2, Operands { v0, v1 })
 53818          case 1  : p = self.alloc("VGATHERDPS", 3, Operands { v0, v1, vv[0] })
 53819          default : panic("instruction VGATHERDPS takes 2 or 3 operands")
 53820      }
 53821      // VGATHERDPS xmm, vm32x, xmm
 53822      if len(vv) == 1 && isXMM(v0) && isVMX(v1) && isXMM(vv[0]) {
 53823          self.require(ISA_AVX2)
 53824          p.domain = DomainAVX
 53825          p.add(0, func(m *_Encoding, v []interface{}) {
 53826              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 53827              m.emit(0x92)
 53828              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 53829          })
 53830      }
 53831      // VGATHERDPS ymm, vm32y, ymm
 53832      if len(vv) == 1 && isYMM(v0) && isVMY(v1) && isYMM(vv[0]) {
 53833          self.require(ISA_AVX2)
 53834          p.domain = DomainAVX
 53835          p.add(0, func(m *_Encoding, v []interface{}) {
 53836              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 53837              m.emit(0x92)
 53838              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 53839          })
 53840      }
 53841      // VGATHERDPS vm32z, zmm{k}
 53842      if len(vv) == 0 && isVMZ(v0) && isZMMk(v1) {
 53843          self.require(ISA_AVX512F)
 53844          p.domain = DomainAVX
 53845          p.add(0, func(m *_Encoding, v []interface{}) {
 53846              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 53847              m.emit(0x92)
 53848              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 53849          })
 53850      }
 53851      // VGATHERDPS vm32x, xmm{k}
 53852      if len(vv) == 0 && isEVEXVMX(v0) && isXMMk(v1) {
 53853          self.require(ISA_AVX512VL | ISA_AVX512F)
 53854          p.domain = DomainAVX
 53855          p.add(0, func(m *_Encoding, v []interface{}) {
 53856              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 53857              m.emit(0x92)
 53858              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 53859          })
 53860      }
 53861      // VGATHERDPS vm32y, ymm{k}
 53862      if len(vv) == 0 && isEVEXVMY(v0) && isYMMk(v1) {
 53863          self.require(ISA_AVX512VL | ISA_AVX512F)
 53864          p.domain = DomainAVX
 53865          p.add(0, func(m *_Encoding, v []interface{}) {
 53866              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 53867              m.emit(0x92)
 53868              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 53869          })
 53870      }
 53871      if p.len == 0 {
 53872          panic("invalid operands for VGATHERDPS")
 53873      }
 53874      return p
 53875  }
 53876  
 53877  // VGATHERPF0DPD performs "Sparse Prefetch Packed Double-Precision Floating-Point Data Values with Signed Doubleword Indices Using T0 Hint".
 53878  //
 53879  // Mnemonic        : VGATHERPF0DPD
 53880  // Supported forms : (1 form)
 53881  //
 53882  //    * VGATHERPF0DPD vm32y{k}    [AVX512PF]
 53883  //
 53884  func (self *Program) VGATHERPF0DPD(v0 interface{}) *Instruction {
 53885      p := self.alloc("VGATHERPF0DPD", 1, Operands { v0 })
 53886      // VGATHERPF0DPD vm32y{k}
 53887      if isVMYk(v0) {
 53888          self.require(ISA_AVX512PF)
 53889          p.domain = DomainAVX
 53890          p.add(0, func(m *_Encoding, v []interface{}) {
 53891              m.evex(0b10, 0x85, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 53892              m.emit(0xc6)
 53893              m.mrsd(1, addr(v[0]), 8)
 53894          })
 53895      }
 53896      if p.len == 0 {
 53897          panic("invalid operands for VGATHERPF0DPD")
 53898      }
 53899      return p
 53900  }
 53901  
 53902  // VGATHERPF0DPS performs "Sparse Prefetch Packed Single-Precision Floating-Point Data Values with Signed Doubleword Indices Using T0 Hint".
 53903  //
 53904  // Mnemonic        : VGATHERPF0DPS
 53905  // Supported forms : (1 form)
 53906  //
 53907  //    * VGATHERPF0DPS vm32z{k}    [AVX512PF]
 53908  //
 53909  func (self *Program) VGATHERPF0DPS(v0 interface{}) *Instruction {
 53910      p := self.alloc("VGATHERPF0DPS", 1, Operands { v0 })
 53911      // VGATHERPF0DPS vm32z{k}
 53912      if isVMZk(v0) {
 53913          self.require(ISA_AVX512PF)
 53914          p.domain = DomainAVX
 53915          p.add(0, func(m *_Encoding, v []interface{}) {
 53916              m.evex(0b10, 0x05, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 53917              m.emit(0xc6)
 53918              m.mrsd(1, addr(v[0]), 4)
 53919          })
 53920      }
 53921      if p.len == 0 {
 53922          panic("invalid operands for VGATHERPF0DPS")
 53923      }
 53924      return p
 53925  }
 53926  
 53927  // VGATHERPF0QPD performs "Sparse Prefetch Packed Double-Precision Floating-Point Data Values with Signed Quadword Indices Using T0 Hint".
 53928  //
 53929  // Mnemonic        : VGATHERPF0QPD
 53930  // Supported forms : (1 form)
 53931  //
 53932  //    * VGATHERPF0QPD vm64z{k}    [AVX512PF]
 53933  //
 53934  func (self *Program) VGATHERPF0QPD(v0 interface{}) *Instruction {
 53935      p := self.alloc("VGATHERPF0QPD", 1, Operands { v0 })
 53936      // VGATHERPF0QPD vm64z{k}
 53937      if isVMZk(v0) {
 53938          self.require(ISA_AVX512PF)
 53939          p.domain = DomainAVX
 53940          p.add(0, func(m *_Encoding, v []interface{}) {
 53941              m.evex(0b10, 0x85, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 53942              m.emit(0xc7)
 53943              m.mrsd(1, addr(v[0]), 8)
 53944          })
 53945      }
 53946      if p.len == 0 {
 53947          panic("invalid operands for VGATHERPF0QPD")
 53948      }
 53949      return p
 53950  }
 53951  
 53952  // VGATHERPF0QPS performs "Sparse Prefetch Packed Single-Precision Floating-Point Data Values with Signed Quadword Indices Using T0 Hint".
 53953  //
 53954  // Mnemonic        : VGATHERPF0QPS
 53955  // Supported forms : (1 form)
 53956  //
 53957  //    * VGATHERPF0QPS vm64z{k}    [AVX512PF]
 53958  //
 53959  func (self *Program) VGATHERPF0QPS(v0 interface{}) *Instruction {
 53960      p := self.alloc("VGATHERPF0QPS", 1, Operands { v0 })
 53961      // VGATHERPF0QPS vm64z{k}
 53962      if isVMZk(v0) {
 53963          self.require(ISA_AVX512PF)
 53964          p.domain = DomainAVX
 53965          p.add(0, func(m *_Encoding, v []interface{}) {
 53966              m.evex(0b10, 0x05, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 53967              m.emit(0xc7)
 53968              m.mrsd(1, addr(v[0]), 4)
 53969          })
 53970      }
 53971      if p.len == 0 {
 53972          panic("invalid operands for VGATHERPF0QPS")
 53973      }
 53974      return p
 53975  }
 53976  
 53977  // VGATHERPF1DPD performs "Sparse Prefetch Packed Double-Precision Floating-Point Data Values with Signed Doubleword Indices Using T1 Hint".
 53978  //
 53979  // Mnemonic        : VGATHERPF1DPD
 53980  // Supported forms : (1 form)
 53981  //
 53982  //    * VGATHERPF1DPD vm32y{k}    [AVX512PF]
 53983  //
 53984  func (self *Program) VGATHERPF1DPD(v0 interface{}) *Instruction {
 53985      p := self.alloc("VGATHERPF1DPD", 1, Operands { v0 })
 53986      // VGATHERPF1DPD vm32y{k}
 53987      if isVMYk(v0) {
 53988          self.require(ISA_AVX512PF)
 53989          p.domain = DomainAVX
 53990          p.add(0, func(m *_Encoding, v []interface{}) {
 53991              m.evex(0b10, 0x85, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 53992              m.emit(0xc6)
 53993              m.mrsd(2, addr(v[0]), 8)
 53994          })
 53995      }
 53996      if p.len == 0 {
 53997          panic("invalid operands for VGATHERPF1DPD")
 53998      }
 53999      return p
 54000  }
 54001  
 54002  // VGATHERPF1DPS performs "Sparse Prefetch Packed Single-Precision Floating-Point Data Values with Signed Doubleword Indices Using T1 Hint".
 54003  //
 54004  // Mnemonic        : VGATHERPF1DPS
 54005  // Supported forms : (1 form)
 54006  //
 54007  //    * VGATHERPF1DPS vm32z{k}    [AVX512PF]
 54008  //
 54009  func (self *Program) VGATHERPF1DPS(v0 interface{}) *Instruction {
 54010      p := self.alloc("VGATHERPF1DPS", 1, Operands { v0 })
 54011      // VGATHERPF1DPS vm32z{k}
 54012      if isVMZk(v0) {
 54013          self.require(ISA_AVX512PF)
 54014          p.domain = DomainAVX
 54015          p.add(0, func(m *_Encoding, v []interface{}) {
 54016              m.evex(0b10, 0x05, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 54017              m.emit(0xc6)
 54018              m.mrsd(2, addr(v[0]), 4)
 54019          })
 54020      }
 54021      if p.len == 0 {
 54022          panic("invalid operands for VGATHERPF1DPS")
 54023      }
 54024      return p
 54025  }
 54026  
 54027  // VGATHERPF1QPD performs "Sparse Prefetch Packed Double-Precision Floating-Point Data Values with Signed Quadword Indices Using T1 Hint".
 54028  //
 54029  // Mnemonic        : VGATHERPF1QPD
 54030  // Supported forms : (1 form)
 54031  //
 54032  //    * VGATHERPF1QPD vm64z{k}    [AVX512PF]
 54033  //
 54034  func (self *Program) VGATHERPF1QPD(v0 interface{}) *Instruction {
 54035      p := self.alloc("VGATHERPF1QPD", 1, Operands { v0 })
 54036      // VGATHERPF1QPD vm64z{k}
 54037      if isVMZk(v0) {
 54038          self.require(ISA_AVX512PF)
 54039          p.domain = DomainAVX
 54040          p.add(0, func(m *_Encoding, v []interface{}) {
 54041              m.evex(0b10, 0x85, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 54042              m.emit(0xc7)
 54043              m.mrsd(2, addr(v[0]), 8)
 54044          })
 54045      }
 54046      if p.len == 0 {
 54047          panic("invalid operands for VGATHERPF1QPD")
 54048      }
 54049      return p
 54050  }
 54051  
 54052  // VGATHERPF1QPS performs "Sparse Prefetch Packed Single-Precision Floating-Point Data Values with Signed Quadword Indices Using T1 Hint".
 54053  //
 54054  // Mnemonic        : VGATHERPF1QPS
 54055  // Supported forms : (1 form)
 54056  //
 54057  //    * VGATHERPF1QPS vm64z{k}    [AVX512PF]
 54058  //
 54059  func (self *Program) VGATHERPF1QPS(v0 interface{}) *Instruction {
 54060      p := self.alloc("VGATHERPF1QPS", 1, Operands { v0 })
 54061      // VGATHERPF1QPS vm64z{k}
 54062      if isVMZk(v0) {
 54063          self.require(ISA_AVX512PF)
 54064          p.domain = DomainAVX
 54065          p.add(0, func(m *_Encoding, v []interface{}) {
 54066              m.evex(0b10, 0x05, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 54067              m.emit(0xc7)
 54068              m.mrsd(2, addr(v[0]), 4)
 54069          })
 54070      }
 54071      if p.len == 0 {
 54072          panic("invalid operands for VGATHERPF1QPS")
 54073      }
 54074      return p
 54075  }
 54076  
 54077  // VGATHERQPD performs "Gather Packed Double-Precision Floating-Point Values Using Signed Quadword Indices".
 54078  //
 54079  // Mnemonic        : VGATHERQPD
 54080  // Supported forms : (5 forms)
 54081  //
 54082  //    * VGATHERQPD xmm, vm64x, xmm    [AVX2]
 54083  //    * VGATHERQPD ymm, vm64y, ymm    [AVX2]
 54084  //    * VGATHERQPD vm64z, zmm{k}      [AVX512F]
 54085  //    * VGATHERQPD vm64x, xmm{k}      [AVX512F,AVX512VL]
 54086  //    * VGATHERQPD vm64y, ymm{k}      [AVX512F,AVX512VL]
 54087  //
 54088  func (self *Program) VGATHERQPD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 54089      var p *Instruction
 54090      switch len(vv) {
 54091          case 0  : p = self.alloc("VGATHERQPD", 2, Operands { v0, v1 })
 54092          case 1  : p = self.alloc("VGATHERQPD", 3, Operands { v0, v1, vv[0] })
 54093          default : panic("instruction VGATHERQPD takes 2 or 3 operands")
 54094      }
 54095      // VGATHERQPD xmm, vm64x, xmm
 54096      if len(vv) == 1 && isXMM(v0) && isVMX(v1) && isXMM(vv[0]) {
 54097          self.require(ISA_AVX2)
 54098          p.domain = DomainAVX
 54099          p.add(0, func(m *_Encoding, v []interface{}) {
 54100              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 54101              m.emit(0x93)
 54102              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 54103          })
 54104      }
 54105      // VGATHERQPD ymm, vm64y, ymm
 54106      if len(vv) == 1 && isYMM(v0) && isVMY(v1) && isYMM(vv[0]) {
 54107          self.require(ISA_AVX2)
 54108          p.domain = DomainAVX
 54109          p.add(0, func(m *_Encoding, v []interface{}) {
 54110              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 54111              m.emit(0x93)
 54112              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 54113          })
 54114      }
 54115      // VGATHERQPD vm64z, zmm{k}
 54116      if len(vv) == 0 && isVMZ(v0) && isZMMk(v1) {
 54117          self.require(ISA_AVX512F)
 54118          p.domain = DomainAVX
 54119          p.add(0, func(m *_Encoding, v []interface{}) {
 54120              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 54121              m.emit(0x93)
 54122              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 54123          })
 54124      }
 54125      // VGATHERQPD vm64x, xmm{k}
 54126      if len(vv) == 0 && isEVEXVMX(v0) && isXMMk(v1) {
 54127          self.require(ISA_AVX512VL | ISA_AVX512F)
 54128          p.domain = DomainAVX
 54129          p.add(0, func(m *_Encoding, v []interface{}) {
 54130              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 54131              m.emit(0x93)
 54132              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 54133          })
 54134      }
 54135      // VGATHERQPD vm64y, ymm{k}
 54136      if len(vv) == 0 && isEVEXVMY(v0) && isYMMk(v1) {
 54137          self.require(ISA_AVX512VL | ISA_AVX512F)
 54138          p.domain = DomainAVX
 54139          p.add(0, func(m *_Encoding, v []interface{}) {
 54140              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 54141              m.emit(0x93)
 54142              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 54143          })
 54144      }
 54145      if p.len == 0 {
 54146          panic("invalid operands for VGATHERQPD")
 54147      }
 54148      return p
 54149  }
 54150  
 54151  // VGATHERQPS performs "Gather Packed Single-Precision Floating-Point Values Using Signed Quadword Indices".
 54152  //
 54153  // Mnemonic        : VGATHERQPS
 54154  // Supported forms : (5 forms)
 54155  //
 54156  //    * VGATHERQPS xmm, vm64x, xmm    [AVX2]
 54157  //    * VGATHERQPS xmm, vm64y, xmm    [AVX2]
 54158  //    * VGATHERQPS vm64z, ymm{k}      [AVX512F]
 54159  //    * VGATHERQPS vm64x, xmm{k}      [AVX512F,AVX512VL]
 54160  //    * VGATHERQPS vm64y, xmm{k}      [AVX512F,AVX512VL]
 54161  //
 54162  func (self *Program) VGATHERQPS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 54163      var p *Instruction
 54164      switch len(vv) {
 54165          case 0  : p = self.alloc("VGATHERQPS", 2, Operands { v0, v1 })
 54166          case 1  : p = self.alloc("VGATHERQPS", 3, Operands { v0, v1, vv[0] })
 54167          default : panic("instruction VGATHERQPS takes 2 or 3 operands")
 54168      }
 54169      // VGATHERQPS xmm, vm64x, xmm
 54170      if len(vv) == 1 && isXMM(v0) && isVMX(v1) && isXMM(vv[0]) {
 54171          self.require(ISA_AVX2)
 54172          p.domain = DomainAVX
 54173          p.add(0, func(m *_Encoding, v []interface{}) {
 54174              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 54175              m.emit(0x93)
 54176              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 54177          })
 54178      }
 54179      // VGATHERQPS xmm, vm64y, xmm
 54180      if len(vv) == 1 && isXMM(v0) && isVMY(v1) && isXMM(vv[0]) {
 54181          self.require(ISA_AVX2)
 54182          p.domain = DomainAVX
 54183          p.add(0, func(m *_Encoding, v []interface{}) {
 54184              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 54185              m.emit(0x93)
 54186              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 54187          })
 54188      }
 54189      // VGATHERQPS vm64z, ymm{k}
 54190      if len(vv) == 0 && isVMZ(v0) && isYMMk(v1) {
 54191          self.require(ISA_AVX512F)
 54192          p.domain = DomainAVX
 54193          p.add(0, func(m *_Encoding, v []interface{}) {
 54194              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 54195              m.emit(0x93)
 54196              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 54197          })
 54198      }
 54199      // VGATHERQPS vm64x, xmm{k}
 54200      if len(vv) == 0 && isEVEXVMX(v0) && isXMMk(v1) {
 54201          self.require(ISA_AVX512VL | ISA_AVX512F)
 54202          p.domain = DomainAVX
 54203          p.add(0, func(m *_Encoding, v []interface{}) {
 54204              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 54205              m.emit(0x93)
 54206              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 54207          })
 54208      }
 54209      // VGATHERQPS vm64y, xmm{k}
 54210      if len(vv) == 0 && isEVEXVMY(v0) && isXMMk(v1) {
 54211          self.require(ISA_AVX512VL | ISA_AVX512F)
 54212          p.domain = DomainAVX
 54213          p.add(0, func(m *_Encoding, v []interface{}) {
 54214              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 54215              m.emit(0x93)
 54216              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 54217          })
 54218      }
 54219      if p.len == 0 {
 54220          panic("invalid operands for VGATHERQPS")
 54221      }
 54222      return p
 54223  }
 54224  
 54225  // VGETEXPPD performs "Extract Exponents of Packed Double-Precision Floating-Point Values as Double-Precision Floating-Point Values".
 54226  //
 54227  // Mnemonic        : VGETEXPPD
 54228  // Supported forms : (7 forms)
 54229  //
 54230  //    * VGETEXPPD m512/m64bcst, zmm{k}{z}    [AVX512F]
 54231  //    * VGETEXPPD {sae}, zmm, zmm{k}{z}      [AVX512F]
 54232  //    * VGETEXPPD zmm, zmm{k}{z}             [AVX512F]
 54233  //    * VGETEXPPD m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 54234  //    * VGETEXPPD m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 54235  //    * VGETEXPPD xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 54236  //    * VGETEXPPD ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 54237  //
 54238  func (self *Program) VGETEXPPD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 54239      var p *Instruction
 54240      switch len(vv) {
 54241          case 0  : p = self.alloc("VGETEXPPD", 2, Operands { v0, v1 })
 54242          case 1  : p = self.alloc("VGETEXPPD", 3, Operands { v0, v1, vv[0] })
 54243          default : panic("instruction VGETEXPPD takes 2 or 3 operands")
 54244      }
 54245      // VGETEXPPD m512/m64bcst, zmm{k}{z}
 54246      if len(vv) == 0 && isM512M64bcst(v0) && isZMMkz(v1) {
 54247          self.require(ISA_AVX512F)
 54248          p.domain = DomainAVX
 54249          p.add(0, func(m *_Encoding, v []interface{}) {
 54250              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 54251              m.emit(0x42)
 54252              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 54253          })
 54254      }
 54255      // VGETEXPPD {sae}, zmm, zmm{k}{z}
 54256      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 54257          self.require(ISA_AVX512F)
 54258          p.domain = DomainAVX
 54259          p.add(0, func(m *_Encoding, v []interface{}) {
 54260              m.emit(0x62)
 54261              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 54262              m.emit(0xfd)
 54263              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 54264              m.emit(0x42)
 54265              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 54266          })
 54267      }
 54268      // VGETEXPPD zmm, zmm{k}{z}
 54269      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 54270          self.require(ISA_AVX512F)
 54271          p.domain = DomainAVX
 54272          p.add(0, func(m *_Encoding, v []interface{}) {
 54273              m.emit(0x62)
 54274              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 54275              m.emit(0xfd)
 54276              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 54277              m.emit(0x42)
 54278              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 54279          })
 54280      }
 54281      // VGETEXPPD m128/m64bcst, xmm{k}{z}
 54282      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 54283          self.require(ISA_AVX512VL | ISA_AVX512F)
 54284          p.domain = DomainAVX
 54285          p.add(0, func(m *_Encoding, v []interface{}) {
 54286              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 54287              m.emit(0x42)
 54288              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 54289          })
 54290      }
 54291      // VGETEXPPD m256/m64bcst, ymm{k}{z}
 54292      if len(vv) == 0 && isM256M64bcst(v0) && isYMMkz(v1) {
 54293          self.require(ISA_AVX512VL | ISA_AVX512F)
 54294          p.domain = DomainAVX
 54295          p.add(0, func(m *_Encoding, v []interface{}) {
 54296              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 54297              m.emit(0x42)
 54298              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 54299          })
 54300      }
 54301      // VGETEXPPD xmm, xmm{k}{z}
 54302      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 54303          self.require(ISA_AVX512VL | ISA_AVX512F)
 54304          p.domain = DomainAVX
 54305          p.add(0, func(m *_Encoding, v []interface{}) {
 54306              m.emit(0x62)
 54307              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 54308              m.emit(0xfd)
 54309              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 54310              m.emit(0x42)
 54311              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 54312          })
 54313      }
 54314      // VGETEXPPD ymm, ymm{k}{z}
 54315      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 54316          self.require(ISA_AVX512VL | ISA_AVX512F)
 54317          p.domain = DomainAVX
 54318          p.add(0, func(m *_Encoding, v []interface{}) {
 54319              m.emit(0x62)
 54320              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 54321              m.emit(0xfd)
 54322              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 54323              m.emit(0x42)
 54324              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 54325          })
 54326      }
 54327      if p.len == 0 {
 54328          panic("invalid operands for VGETEXPPD")
 54329      }
 54330      return p
 54331  }
 54332  
 54333  // VGETEXPPS performs "Extract Exponents of Packed Single-Precision Floating-Point Values as Single-Precision Floating-Point Values".
 54334  //
 54335  // Mnemonic        : VGETEXPPS
 54336  // Supported forms : (7 forms)
 54337  //
 54338  //    * VGETEXPPS m512/m32bcst, zmm{k}{z}    [AVX512F]
 54339  //    * VGETEXPPS {sae}, zmm, zmm{k}{z}      [AVX512F]
 54340  //    * VGETEXPPS zmm, zmm{k}{z}             [AVX512F]
 54341  //    * VGETEXPPS m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 54342  //    * VGETEXPPS m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 54343  //    * VGETEXPPS xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 54344  //    * VGETEXPPS ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 54345  //
 54346  func (self *Program) VGETEXPPS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 54347      var p *Instruction
 54348      switch len(vv) {
 54349          case 0  : p = self.alloc("VGETEXPPS", 2, Operands { v0, v1 })
 54350          case 1  : p = self.alloc("VGETEXPPS", 3, Operands { v0, v1, vv[0] })
 54351          default : panic("instruction VGETEXPPS takes 2 or 3 operands")
 54352      }
 54353      // VGETEXPPS m512/m32bcst, zmm{k}{z}
 54354      if len(vv) == 0 && isM512M32bcst(v0) && isZMMkz(v1) {
 54355          self.require(ISA_AVX512F)
 54356          p.domain = DomainAVX
 54357          p.add(0, func(m *_Encoding, v []interface{}) {
 54358              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 54359              m.emit(0x42)
 54360              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 54361          })
 54362      }
 54363      // VGETEXPPS {sae}, zmm, zmm{k}{z}
 54364      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 54365          self.require(ISA_AVX512F)
 54366          p.domain = DomainAVX
 54367          p.add(0, func(m *_Encoding, v []interface{}) {
 54368              m.emit(0x62)
 54369              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 54370              m.emit(0x7d)
 54371              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 54372              m.emit(0x42)
 54373              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 54374          })
 54375      }
 54376      // VGETEXPPS zmm, zmm{k}{z}
 54377      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 54378          self.require(ISA_AVX512F)
 54379          p.domain = DomainAVX
 54380          p.add(0, func(m *_Encoding, v []interface{}) {
 54381              m.emit(0x62)
 54382              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 54383              m.emit(0x7d)
 54384              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 54385              m.emit(0x42)
 54386              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 54387          })
 54388      }
 54389      // VGETEXPPS m128/m32bcst, xmm{k}{z}
 54390      if len(vv) == 0 && isM128M32bcst(v0) && isXMMkz(v1) {
 54391          self.require(ISA_AVX512VL | ISA_AVX512F)
 54392          p.domain = DomainAVX
 54393          p.add(0, func(m *_Encoding, v []interface{}) {
 54394              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 54395              m.emit(0x42)
 54396              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 54397          })
 54398      }
 54399      // VGETEXPPS m256/m32bcst, ymm{k}{z}
 54400      if len(vv) == 0 && isM256M32bcst(v0) && isYMMkz(v1) {
 54401          self.require(ISA_AVX512VL | ISA_AVX512F)
 54402          p.domain = DomainAVX
 54403          p.add(0, func(m *_Encoding, v []interface{}) {
 54404              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 54405              m.emit(0x42)
 54406              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 54407          })
 54408      }
 54409      // VGETEXPPS xmm, xmm{k}{z}
 54410      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 54411          self.require(ISA_AVX512VL | ISA_AVX512F)
 54412          p.domain = DomainAVX
 54413          p.add(0, func(m *_Encoding, v []interface{}) {
 54414              m.emit(0x62)
 54415              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 54416              m.emit(0x7d)
 54417              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 54418              m.emit(0x42)
 54419              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 54420          })
 54421      }
 54422      // VGETEXPPS ymm, ymm{k}{z}
 54423      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 54424          self.require(ISA_AVX512VL | ISA_AVX512F)
 54425          p.domain = DomainAVX
 54426          p.add(0, func(m *_Encoding, v []interface{}) {
 54427              m.emit(0x62)
 54428              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 54429              m.emit(0x7d)
 54430              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 54431              m.emit(0x42)
 54432              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 54433          })
 54434      }
 54435      if p.len == 0 {
 54436          panic("invalid operands for VGETEXPPS")
 54437      }
 54438      return p
 54439  }
 54440  
 54441  // VGETEXPSD performs "Extract Exponent of Scalar Double-Precision Floating-Point Value as Double-Precision Floating-Point Value".
 54442  //
 54443  // Mnemonic        : VGETEXPSD
 54444  // Supported forms : (3 forms)
 54445  //
 54446  //    * VGETEXPSD m64, xmm, xmm{k}{z}           [AVX512F]
 54447  //    * VGETEXPSD {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 54448  //    * VGETEXPSD xmm, xmm, xmm{k}{z}           [AVX512F]
 54449  //
 54450  func (self *Program) VGETEXPSD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 54451      var p *Instruction
 54452      switch len(vv) {
 54453          case 0  : p = self.alloc("VGETEXPSD", 3, Operands { v0, v1, v2 })
 54454          case 1  : p = self.alloc("VGETEXPSD", 4, Operands { v0, v1, v2, vv[0] })
 54455          default : panic("instruction VGETEXPSD takes 3 or 4 operands")
 54456      }
 54457      // VGETEXPSD m64, xmm, xmm{k}{z}
 54458      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 54459          self.require(ISA_AVX512F)
 54460          p.domain = DomainAVX
 54461          p.add(0, func(m *_Encoding, v []interface{}) {
 54462              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 54463              m.emit(0x43)
 54464              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 54465          })
 54466      }
 54467      // VGETEXPSD {sae}, xmm, xmm, xmm{k}{z}
 54468      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 54469          self.require(ISA_AVX512F)
 54470          p.domain = DomainAVX
 54471          p.add(0, func(m *_Encoding, v []interface{}) {
 54472              m.emit(0x62)
 54473              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 54474              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 54475              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 54476              m.emit(0x43)
 54477              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 54478          })
 54479      }
 54480      // VGETEXPSD xmm, xmm, xmm{k}{z}
 54481      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 54482          self.require(ISA_AVX512F)
 54483          p.domain = DomainAVX
 54484          p.add(0, func(m *_Encoding, v []interface{}) {
 54485              m.emit(0x62)
 54486              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 54487              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 54488              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 54489              m.emit(0x43)
 54490              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 54491          })
 54492      }
 54493      if p.len == 0 {
 54494          panic("invalid operands for VGETEXPSD")
 54495      }
 54496      return p
 54497  }
 54498  
 54499  // VGETEXPSS performs "Extract Exponent of Scalar Single-Precision Floating-Point Value as Single-Precision Floating-Point Value".
 54500  //
 54501  // Mnemonic        : VGETEXPSS
 54502  // Supported forms : (3 forms)
 54503  //
 54504  //    * VGETEXPSS m32, xmm, xmm{k}{z}           [AVX512F]
 54505  //    * VGETEXPSS {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 54506  //    * VGETEXPSS xmm, xmm, xmm{k}{z}           [AVX512F]
 54507  //
 54508  func (self *Program) VGETEXPSS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 54509      var p *Instruction
 54510      switch len(vv) {
 54511          case 0  : p = self.alloc("VGETEXPSS", 3, Operands { v0, v1, v2 })
 54512          case 1  : p = self.alloc("VGETEXPSS", 4, Operands { v0, v1, v2, vv[0] })
 54513          default : panic("instruction VGETEXPSS takes 3 or 4 operands")
 54514      }
 54515      // VGETEXPSS m32, xmm, xmm{k}{z}
 54516      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 54517          self.require(ISA_AVX512F)
 54518          p.domain = DomainAVX
 54519          p.add(0, func(m *_Encoding, v []interface{}) {
 54520              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 54521              m.emit(0x43)
 54522              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 54523          })
 54524      }
 54525      // VGETEXPSS {sae}, xmm, xmm, xmm{k}{z}
 54526      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 54527          self.require(ISA_AVX512F)
 54528          p.domain = DomainAVX
 54529          p.add(0, func(m *_Encoding, v []interface{}) {
 54530              m.emit(0x62)
 54531              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 54532              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 54533              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 54534              m.emit(0x43)
 54535              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 54536          })
 54537      }
 54538      // VGETEXPSS xmm, xmm, xmm{k}{z}
 54539      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 54540          self.require(ISA_AVX512F)
 54541          p.domain = DomainAVX
 54542          p.add(0, func(m *_Encoding, v []interface{}) {
 54543              m.emit(0x62)
 54544              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 54545              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 54546              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 54547              m.emit(0x43)
 54548              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 54549          })
 54550      }
 54551      if p.len == 0 {
 54552          panic("invalid operands for VGETEXPSS")
 54553      }
 54554      return p
 54555  }
 54556  
 54557  // VGETMANTPD performs "Extract Normalized Mantissas from Packed Double-Precision Floating-Point Values".
 54558  //
 54559  // Mnemonic        : VGETMANTPD
 54560  // Supported forms : (7 forms)
 54561  //
 54562  //    * VGETMANTPD imm8, m512/m64bcst, zmm{k}{z}    [AVX512F]
 54563  //    * VGETMANTPD imm8, {sae}, zmm, zmm{k}{z}      [AVX512F]
 54564  //    * VGETMANTPD imm8, zmm, zmm{k}{z}             [AVX512F]
 54565  //    * VGETMANTPD imm8, m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 54566  //    * VGETMANTPD imm8, m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 54567  //    * VGETMANTPD imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 54568  //    * VGETMANTPD imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 54569  //
 54570  func (self *Program) VGETMANTPD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 54571      var p *Instruction
 54572      switch len(vv) {
 54573          case 0  : p = self.alloc("VGETMANTPD", 3, Operands { v0, v1, v2 })
 54574          case 1  : p = self.alloc("VGETMANTPD", 4, Operands { v0, v1, v2, vv[0] })
 54575          default : panic("instruction VGETMANTPD takes 3 or 4 operands")
 54576      }
 54577      // VGETMANTPD imm8, m512/m64bcst, zmm{k}{z}
 54578      if len(vv) == 0 && isImm8(v0) && isM512M64bcst(v1) && isZMMkz(v2) {
 54579          self.require(ISA_AVX512F)
 54580          p.domain = DomainAVX
 54581          p.add(0, func(m *_Encoding, v []interface{}) {
 54582              m.evex(0b11, 0x85, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 54583              m.emit(0x26)
 54584              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 54585              m.imm1(toImmAny(v[0]))
 54586          })
 54587      }
 54588      // VGETMANTPD imm8, {sae}, zmm, zmm{k}{z}
 54589      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 54590          self.require(ISA_AVX512F)
 54591          p.domain = DomainAVX
 54592          p.add(0, func(m *_Encoding, v []interface{}) {
 54593              m.emit(0x62)
 54594              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[3]) << 4)))
 54595              m.emit(0xfd)
 54596              m.emit((zcode(v[3]) << 7) | kcode(v[3]) | 0x18)
 54597              m.emit(0x26)
 54598              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[2]))
 54599              m.imm1(toImmAny(v[0]))
 54600          })
 54601      }
 54602      // VGETMANTPD imm8, zmm, zmm{k}{z}
 54603      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 54604          self.require(ISA_AVX512F)
 54605          p.domain = DomainAVX
 54606          p.add(0, func(m *_Encoding, v []interface{}) {
 54607              m.emit(0x62)
 54608              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 54609              m.emit(0xfd)
 54610              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 54611              m.emit(0x26)
 54612              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 54613              m.imm1(toImmAny(v[0]))
 54614          })
 54615      }
 54616      // VGETMANTPD imm8, m128/m64bcst, xmm{k}{z}
 54617      if len(vv) == 0 && isImm8(v0) && isM128M64bcst(v1) && isXMMkz(v2) {
 54618          self.require(ISA_AVX512VL | ISA_AVX512F)
 54619          p.domain = DomainAVX
 54620          p.add(0, func(m *_Encoding, v []interface{}) {
 54621              m.evex(0b11, 0x85, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 54622              m.emit(0x26)
 54623              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 54624              m.imm1(toImmAny(v[0]))
 54625          })
 54626      }
 54627      // VGETMANTPD imm8, m256/m64bcst, ymm{k}{z}
 54628      if len(vv) == 0 && isImm8(v0) && isM256M64bcst(v1) && isYMMkz(v2) {
 54629          self.require(ISA_AVX512VL | ISA_AVX512F)
 54630          p.domain = DomainAVX
 54631          p.add(0, func(m *_Encoding, v []interface{}) {
 54632              m.evex(0b11, 0x85, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 54633              m.emit(0x26)
 54634              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 54635              m.imm1(toImmAny(v[0]))
 54636          })
 54637      }
 54638      // VGETMANTPD imm8, xmm, xmm{k}{z}
 54639      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 54640          self.require(ISA_AVX512VL | ISA_AVX512F)
 54641          p.domain = DomainAVX
 54642          p.add(0, func(m *_Encoding, v []interface{}) {
 54643              m.emit(0x62)
 54644              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 54645              m.emit(0xfd)
 54646              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 54647              m.emit(0x26)
 54648              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 54649              m.imm1(toImmAny(v[0]))
 54650          })
 54651      }
 54652      // VGETMANTPD imm8, ymm, ymm{k}{z}
 54653      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 54654          self.require(ISA_AVX512VL | ISA_AVX512F)
 54655          p.domain = DomainAVX
 54656          p.add(0, func(m *_Encoding, v []interface{}) {
 54657              m.emit(0x62)
 54658              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 54659              m.emit(0xfd)
 54660              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 54661              m.emit(0x26)
 54662              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 54663              m.imm1(toImmAny(v[0]))
 54664          })
 54665      }
 54666      if p.len == 0 {
 54667          panic("invalid operands for VGETMANTPD")
 54668      }
 54669      return p
 54670  }
 54671  
 54672  // VGETMANTPS performs "Extract Normalized Mantissas from Packed Single-Precision Floating-Point Values".
 54673  //
 54674  // Mnemonic        : VGETMANTPS
 54675  // Supported forms : (7 forms)
 54676  //
 54677  //    * VGETMANTPS imm8, m512/m32bcst, zmm{k}{z}    [AVX512F]
 54678  //    * VGETMANTPS imm8, {sae}, zmm, zmm{k}{z}      [AVX512F]
 54679  //    * VGETMANTPS imm8, zmm, zmm{k}{z}             [AVX512F]
 54680  //    * VGETMANTPS imm8, m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 54681  //    * VGETMANTPS imm8, m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 54682  //    * VGETMANTPS imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 54683  //    * VGETMANTPS imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 54684  //
 54685  func (self *Program) VGETMANTPS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 54686      var p *Instruction
 54687      switch len(vv) {
 54688          case 0  : p = self.alloc("VGETMANTPS", 3, Operands { v0, v1, v2 })
 54689          case 1  : p = self.alloc("VGETMANTPS", 4, Operands { v0, v1, v2, vv[0] })
 54690          default : panic("instruction VGETMANTPS takes 3 or 4 operands")
 54691      }
 54692      // VGETMANTPS imm8, m512/m32bcst, zmm{k}{z}
 54693      if len(vv) == 0 && isImm8(v0) && isM512M32bcst(v1) && isZMMkz(v2) {
 54694          self.require(ISA_AVX512F)
 54695          p.domain = DomainAVX
 54696          p.add(0, func(m *_Encoding, v []interface{}) {
 54697              m.evex(0b11, 0x05, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 54698              m.emit(0x26)
 54699              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 54700              m.imm1(toImmAny(v[0]))
 54701          })
 54702      }
 54703      // VGETMANTPS imm8, {sae}, zmm, zmm{k}{z}
 54704      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 54705          self.require(ISA_AVX512F)
 54706          p.domain = DomainAVX
 54707          p.add(0, func(m *_Encoding, v []interface{}) {
 54708              m.emit(0x62)
 54709              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[3]) << 4)))
 54710              m.emit(0x7d)
 54711              m.emit((zcode(v[3]) << 7) | kcode(v[3]) | 0x18)
 54712              m.emit(0x26)
 54713              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[2]))
 54714              m.imm1(toImmAny(v[0]))
 54715          })
 54716      }
 54717      // VGETMANTPS imm8, zmm, zmm{k}{z}
 54718      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 54719          self.require(ISA_AVX512F)
 54720          p.domain = DomainAVX
 54721          p.add(0, func(m *_Encoding, v []interface{}) {
 54722              m.emit(0x62)
 54723              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 54724              m.emit(0x7d)
 54725              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 54726              m.emit(0x26)
 54727              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 54728              m.imm1(toImmAny(v[0]))
 54729          })
 54730      }
 54731      // VGETMANTPS imm8, m128/m32bcst, xmm{k}{z}
 54732      if len(vv) == 0 && isImm8(v0) && isM128M32bcst(v1) && isXMMkz(v2) {
 54733          self.require(ISA_AVX512VL | ISA_AVX512F)
 54734          p.domain = DomainAVX
 54735          p.add(0, func(m *_Encoding, v []interface{}) {
 54736              m.evex(0b11, 0x05, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 54737              m.emit(0x26)
 54738              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 54739              m.imm1(toImmAny(v[0]))
 54740          })
 54741      }
 54742      // VGETMANTPS imm8, m256/m32bcst, ymm{k}{z}
 54743      if len(vv) == 0 && isImm8(v0) && isM256M32bcst(v1) && isYMMkz(v2) {
 54744          self.require(ISA_AVX512VL | ISA_AVX512F)
 54745          p.domain = DomainAVX
 54746          p.add(0, func(m *_Encoding, v []interface{}) {
 54747              m.evex(0b11, 0x05, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 54748              m.emit(0x26)
 54749              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 54750              m.imm1(toImmAny(v[0]))
 54751          })
 54752      }
 54753      // VGETMANTPS imm8, xmm, xmm{k}{z}
 54754      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 54755          self.require(ISA_AVX512VL | ISA_AVX512F)
 54756          p.domain = DomainAVX
 54757          p.add(0, func(m *_Encoding, v []interface{}) {
 54758              m.emit(0x62)
 54759              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 54760              m.emit(0x7d)
 54761              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 54762              m.emit(0x26)
 54763              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 54764              m.imm1(toImmAny(v[0]))
 54765          })
 54766      }
 54767      // VGETMANTPS imm8, ymm, ymm{k}{z}
 54768      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 54769          self.require(ISA_AVX512VL | ISA_AVX512F)
 54770          p.domain = DomainAVX
 54771          p.add(0, func(m *_Encoding, v []interface{}) {
 54772              m.emit(0x62)
 54773              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 54774              m.emit(0x7d)
 54775              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 54776              m.emit(0x26)
 54777              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 54778              m.imm1(toImmAny(v[0]))
 54779          })
 54780      }
 54781      if p.len == 0 {
 54782          panic("invalid operands for VGETMANTPS")
 54783      }
 54784      return p
 54785  }
 54786  
 54787  // VGETMANTSD performs "Extract Normalized Mantissa from Scalar Double-Precision Floating-Point Value".
 54788  //
 54789  // Mnemonic        : VGETMANTSD
 54790  // Supported forms : (3 forms)
 54791  //
 54792  //    * VGETMANTSD imm8, m64, xmm, xmm{k}{z}           [AVX512F]
 54793  //    * VGETMANTSD imm8, {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 54794  //    * VGETMANTSD imm8, xmm, xmm, xmm{k}{z}           [AVX512F]
 54795  //
 54796  func (self *Program) VGETMANTSD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 54797      var p *Instruction
 54798      switch len(vv) {
 54799          case 0  : p = self.alloc("VGETMANTSD", 4, Operands { v0, v1, v2, v3 })
 54800          case 1  : p = self.alloc("VGETMANTSD", 5, Operands { v0, v1, v2, v3, vv[0] })
 54801          default : panic("instruction VGETMANTSD takes 4 or 5 operands")
 54802      }
 54803      // VGETMANTSD imm8, m64, xmm, xmm{k}{z}
 54804      if len(vv) == 0 && isImm8(v0) && isM64(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 54805          self.require(ISA_AVX512F)
 54806          p.domain = DomainAVX
 54807          p.add(0, func(m *_Encoding, v []interface{}) {
 54808              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 54809              m.emit(0x27)
 54810              m.mrsd(lcode(v[3]), addr(v[1]), 8)
 54811              m.imm1(toImmAny(v[0]))
 54812          })
 54813      }
 54814      // VGETMANTSD imm8, {sae}, xmm, xmm, xmm{k}{z}
 54815      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) && isXMMkz(vv[0]) {
 54816          self.require(ISA_AVX512F)
 54817          p.domain = DomainAVX
 54818          p.add(0, func(m *_Encoding, v []interface{}) {
 54819              m.emit(0x62)
 54820              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 54821              m.emit(0xfd ^ (hlcode(v[3]) << 3))
 54822              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 54823              m.emit(0x27)
 54824              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 54825              m.imm1(toImmAny(v[0]))
 54826          })
 54827      }
 54828      // VGETMANTSD imm8, xmm, xmm, xmm{k}{z}
 54829      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 54830          self.require(ISA_AVX512F)
 54831          p.domain = DomainAVX
 54832          p.add(0, func(m *_Encoding, v []interface{}) {
 54833              m.emit(0x62)
 54834              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 54835              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 54836              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 54837              m.emit(0x27)
 54838              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 54839              m.imm1(toImmAny(v[0]))
 54840          })
 54841      }
 54842      if p.len == 0 {
 54843          panic("invalid operands for VGETMANTSD")
 54844      }
 54845      return p
 54846  }
 54847  
 54848  // VGETMANTSS performs "Extract Normalized Mantissa from Scalar Single-Precision Floating-Point Value".
 54849  //
 54850  // Mnemonic        : VGETMANTSS
 54851  // Supported forms : (3 forms)
 54852  //
 54853  //    * VGETMANTSS imm8, m32, xmm, xmm{k}{z}           [AVX512F]
 54854  //    * VGETMANTSS imm8, {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 54855  //    * VGETMANTSS imm8, xmm, xmm, xmm{k}{z}           [AVX512F]
 54856  //
 54857  func (self *Program) VGETMANTSS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 54858      var p *Instruction
 54859      switch len(vv) {
 54860          case 0  : p = self.alloc("VGETMANTSS", 4, Operands { v0, v1, v2, v3 })
 54861          case 1  : p = self.alloc("VGETMANTSS", 5, Operands { v0, v1, v2, v3, vv[0] })
 54862          default : panic("instruction VGETMANTSS takes 4 or 5 operands")
 54863      }
 54864      // VGETMANTSS imm8, m32, xmm, xmm{k}{z}
 54865      if len(vv) == 0 && isImm8(v0) && isM32(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 54866          self.require(ISA_AVX512F)
 54867          p.domain = DomainAVX
 54868          p.add(0, func(m *_Encoding, v []interface{}) {
 54869              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 54870              m.emit(0x27)
 54871              m.mrsd(lcode(v[3]), addr(v[1]), 4)
 54872              m.imm1(toImmAny(v[0]))
 54873          })
 54874      }
 54875      // VGETMANTSS imm8, {sae}, xmm, xmm, xmm{k}{z}
 54876      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) && isXMMkz(vv[0]) {
 54877          self.require(ISA_AVX512F)
 54878          p.domain = DomainAVX
 54879          p.add(0, func(m *_Encoding, v []interface{}) {
 54880              m.emit(0x62)
 54881              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 54882              m.emit(0x7d ^ (hlcode(v[3]) << 3))
 54883              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 54884              m.emit(0x27)
 54885              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 54886              m.imm1(toImmAny(v[0]))
 54887          })
 54888      }
 54889      // VGETMANTSS imm8, xmm, xmm, xmm{k}{z}
 54890      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 54891          self.require(ISA_AVX512F)
 54892          p.domain = DomainAVX
 54893          p.add(0, func(m *_Encoding, v []interface{}) {
 54894              m.emit(0x62)
 54895              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 54896              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 54897              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 54898              m.emit(0x27)
 54899              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 54900              m.imm1(toImmAny(v[0]))
 54901          })
 54902      }
 54903      if p.len == 0 {
 54904          panic("invalid operands for VGETMANTSS")
 54905      }
 54906      return p
 54907  }
 54908  
 54909  // VHADDPD performs "Packed Double-FP Horizontal Add".
 54910  //
 54911  // Mnemonic        : VHADDPD
 54912  // Supported forms : (4 forms)
 54913  //
 54914  //    * VHADDPD xmm, xmm, xmm     [AVX]
 54915  //    * VHADDPD m128, xmm, xmm    [AVX]
 54916  //    * VHADDPD ymm, ymm, ymm     [AVX]
 54917  //    * VHADDPD m256, ymm, ymm    [AVX]
 54918  //
 54919  func (self *Program) VHADDPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 54920      p := self.alloc("VHADDPD", 3, Operands { v0, v1, v2 })
 54921      // VHADDPD xmm, xmm, xmm
 54922      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 54923          self.require(ISA_AVX)
 54924          p.domain = DomainAVX
 54925          p.add(0, func(m *_Encoding, v []interface{}) {
 54926              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 54927              m.emit(0x7c)
 54928              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 54929          })
 54930      }
 54931      // VHADDPD m128, xmm, xmm
 54932      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 54933          self.require(ISA_AVX)
 54934          p.domain = DomainAVX
 54935          p.add(0, func(m *_Encoding, v []interface{}) {
 54936              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 54937              m.emit(0x7c)
 54938              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 54939          })
 54940      }
 54941      // VHADDPD ymm, ymm, ymm
 54942      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 54943          self.require(ISA_AVX)
 54944          p.domain = DomainAVX
 54945          p.add(0, func(m *_Encoding, v []interface{}) {
 54946              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 54947              m.emit(0x7c)
 54948              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 54949          })
 54950      }
 54951      // VHADDPD m256, ymm, ymm
 54952      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 54953          self.require(ISA_AVX)
 54954          p.domain = DomainAVX
 54955          p.add(0, func(m *_Encoding, v []interface{}) {
 54956              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 54957              m.emit(0x7c)
 54958              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 54959          })
 54960      }
 54961      if p.len == 0 {
 54962          panic("invalid operands for VHADDPD")
 54963      }
 54964      return p
 54965  }
 54966  
 54967  // VHADDPS performs "Packed Single-FP Horizontal Add".
 54968  //
 54969  // Mnemonic        : VHADDPS
 54970  // Supported forms : (4 forms)
 54971  //
 54972  //    * VHADDPS xmm, xmm, xmm     [AVX]
 54973  //    * VHADDPS m128, xmm, xmm    [AVX]
 54974  //    * VHADDPS ymm, ymm, ymm     [AVX]
 54975  //    * VHADDPS m256, ymm, ymm    [AVX]
 54976  //
 54977  func (self *Program) VHADDPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 54978      p := self.alloc("VHADDPS", 3, Operands { v0, v1, v2 })
 54979      // VHADDPS xmm, xmm, xmm
 54980      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 54981          self.require(ISA_AVX)
 54982          p.domain = DomainAVX
 54983          p.add(0, func(m *_Encoding, v []interface{}) {
 54984              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 54985              m.emit(0x7c)
 54986              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 54987          })
 54988      }
 54989      // VHADDPS m128, xmm, xmm
 54990      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 54991          self.require(ISA_AVX)
 54992          p.domain = DomainAVX
 54993          p.add(0, func(m *_Encoding, v []interface{}) {
 54994              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 54995              m.emit(0x7c)
 54996              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 54997          })
 54998      }
 54999      // VHADDPS ymm, ymm, ymm
 55000      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 55001          self.require(ISA_AVX)
 55002          p.domain = DomainAVX
 55003          p.add(0, func(m *_Encoding, v []interface{}) {
 55004              m.vex2(7, hcode(v[2]), v[0], hlcode(v[1]))
 55005              m.emit(0x7c)
 55006              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 55007          })
 55008      }
 55009      // VHADDPS m256, ymm, ymm
 55010      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 55011          self.require(ISA_AVX)
 55012          p.domain = DomainAVX
 55013          p.add(0, func(m *_Encoding, v []interface{}) {
 55014              m.vex2(7, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 55015              m.emit(0x7c)
 55016              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 55017          })
 55018      }
 55019      if p.len == 0 {
 55020          panic("invalid operands for VHADDPS")
 55021      }
 55022      return p
 55023  }
 55024  
 55025  // VHSUBPD performs "Packed Double-FP Horizontal Subtract".
 55026  //
 55027  // Mnemonic        : VHSUBPD
 55028  // Supported forms : (4 forms)
 55029  //
 55030  //    * VHSUBPD xmm, xmm, xmm     [AVX]
 55031  //    * VHSUBPD m128, xmm, xmm    [AVX]
 55032  //    * VHSUBPD ymm, ymm, ymm     [AVX]
 55033  //    * VHSUBPD m256, ymm, ymm    [AVX]
 55034  //
 55035  func (self *Program) VHSUBPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 55036      p := self.alloc("VHSUBPD", 3, Operands { v0, v1, v2 })
 55037      // VHSUBPD xmm, xmm, xmm
 55038      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 55039          self.require(ISA_AVX)
 55040          p.domain = DomainAVX
 55041          p.add(0, func(m *_Encoding, v []interface{}) {
 55042              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 55043              m.emit(0x7d)
 55044              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 55045          })
 55046      }
 55047      // VHSUBPD m128, xmm, xmm
 55048      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 55049          self.require(ISA_AVX)
 55050          p.domain = DomainAVX
 55051          p.add(0, func(m *_Encoding, v []interface{}) {
 55052              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 55053              m.emit(0x7d)
 55054              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 55055          })
 55056      }
 55057      // VHSUBPD ymm, ymm, ymm
 55058      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 55059          self.require(ISA_AVX)
 55060          p.domain = DomainAVX
 55061          p.add(0, func(m *_Encoding, v []interface{}) {
 55062              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 55063              m.emit(0x7d)
 55064              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 55065          })
 55066      }
 55067      // VHSUBPD m256, ymm, ymm
 55068      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 55069          self.require(ISA_AVX)
 55070          p.domain = DomainAVX
 55071          p.add(0, func(m *_Encoding, v []interface{}) {
 55072              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 55073              m.emit(0x7d)
 55074              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 55075          })
 55076      }
 55077      if p.len == 0 {
 55078          panic("invalid operands for VHSUBPD")
 55079      }
 55080      return p
 55081  }
 55082  
 55083  // VHSUBPS performs "Packed Single-FP Horizontal Subtract".
 55084  //
 55085  // Mnemonic        : VHSUBPS
 55086  // Supported forms : (4 forms)
 55087  //
 55088  //    * VHSUBPS xmm, xmm, xmm     [AVX]
 55089  //    * VHSUBPS m128, xmm, xmm    [AVX]
 55090  //    * VHSUBPS ymm, ymm, ymm     [AVX]
 55091  //    * VHSUBPS m256, ymm, ymm    [AVX]
 55092  //
 55093  func (self *Program) VHSUBPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 55094      p := self.alloc("VHSUBPS", 3, Operands { v0, v1, v2 })
 55095      // VHSUBPS xmm, xmm, xmm
 55096      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 55097          self.require(ISA_AVX)
 55098          p.domain = DomainAVX
 55099          p.add(0, func(m *_Encoding, v []interface{}) {
 55100              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 55101              m.emit(0x7d)
 55102              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 55103          })
 55104      }
 55105      // VHSUBPS m128, xmm, xmm
 55106      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 55107          self.require(ISA_AVX)
 55108          p.domain = DomainAVX
 55109          p.add(0, func(m *_Encoding, v []interface{}) {
 55110              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 55111              m.emit(0x7d)
 55112              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 55113          })
 55114      }
 55115      // VHSUBPS ymm, ymm, ymm
 55116      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 55117          self.require(ISA_AVX)
 55118          p.domain = DomainAVX
 55119          p.add(0, func(m *_Encoding, v []interface{}) {
 55120              m.vex2(7, hcode(v[2]), v[0], hlcode(v[1]))
 55121              m.emit(0x7d)
 55122              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 55123          })
 55124      }
 55125      // VHSUBPS m256, ymm, ymm
 55126      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 55127          self.require(ISA_AVX)
 55128          p.domain = DomainAVX
 55129          p.add(0, func(m *_Encoding, v []interface{}) {
 55130              m.vex2(7, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 55131              m.emit(0x7d)
 55132              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 55133          })
 55134      }
 55135      if p.len == 0 {
 55136          panic("invalid operands for VHSUBPS")
 55137      }
 55138      return p
 55139  }
 55140  
 55141  // VINSERTF128 performs "Insert Packed Floating-Point Values".
 55142  //
 55143  // Mnemonic        : VINSERTF128
 55144  // Supported forms : (2 forms)
 55145  //
 55146  //    * VINSERTF128 imm8, xmm, ymm, ymm     [AVX]
 55147  //    * VINSERTF128 imm8, m128, ymm, ymm    [AVX]
 55148  //
 55149  func (self *Program) VINSERTF128(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 55150      p := self.alloc("VINSERTF128", 4, Operands { v0, v1, v2, v3 })
 55151      // VINSERTF128 imm8, xmm, ymm, ymm
 55152      if isImm8(v0) && isXMM(v1) && isYMM(v2) && isYMM(v3) {
 55153          self.require(ISA_AVX)
 55154          p.domain = DomainAVX
 55155          p.add(0, func(m *_Encoding, v []interface{}) {
 55156              m.emit(0xc4)
 55157              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 55158              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 55159              m.emit(0x18)
 55160              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55161              m.imm1(toImmAny(v[0]))
 55162          })
 55163      }
 55164      // VINSERTF128 imm8, m128, ymm, ymm
 55165      if isImm8(v0) && isM128(v1) && isYMM(v2) && isYMM(v3) {
 55166          self.require(ISA_AVX)
 55167          p.domain = DomainAVX
 55168          p.add(0, func(m *_Encoding, v []interface{}) {
 55169              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 55170              m.emit(0x18)
 55171              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 55172              m.imm1(toImmAny(v[0]))
 55173          })
 55174      }
 55175      if p.len == 0 {
 55176          panic("invalid operands for VINSERTF128")
 55177      }
 55178      return p
 55179  }
 55180  
 55181  // VINSERTF32X4 performs "Insert 128 Bits of Packed Single-Precision Floating-Point Values".
 55182  //
 55183  // Mnemonic        : VINSERTF32X4
 55184  // Supported forms : (4 forms)
 55185  //
 55186  //    * VINSERTF32X4 imm8, xmm, zmm, zmm{k}{z}     [AVX512F]
 55187  //    * VINSERTF32X4 imm8, m128, zmm, zmm{k}{z}    [AVX512F]
 55188  //    * VINSERTF32X4 imm8, xmm, ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 55189  //    * VINSERTF32X4 imm8, m128, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 55190  //
 55191  func (self *Program) VINSERTF32X4(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 55192      p := self.alloc("VINSERTF32X4", 4, Operands { v0, v1, v2, v3 })
 55193      // VINSERTF32X4 imm8, xmm, zmm, zmm{k}{z}
 55194      if isImm8(v0) && isEVEXXMM(v1) && isZMM(v2) && isZMMkz(v3) {
 55195          self.require(ISA_AVX512F)
 55196          p.domain = DomainAVX
 55197          p.add(0, func(m *_Encoding, v []interface{}) {
 55198              m.emit(0x62)
 55199              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55200              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 55201              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 55202              m.emit(0x18)
 55203              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55204              m.imm1(toImmAny(v[0]))
 55205          })
 55206      }
 55207      // VINSERTF32X4 imm8, m128, zmm, zmm{k}{z}
 55208      if isImm8(v0) && isM128(v1) && isZMM(v2) && isZMMkz(v3) {
 55209          self.require(ISA_AVX512F)
 55210          p.domain = DomainAVX
 55211          p.add(0, func(m *_Encoding, v []interface{}) {
 55212              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55213              m.emit(0x18)
 55214              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 55215              m.imm1(toImmAny(v[0]))
 55216          })
 55217      }
 55218      // VINSERTF32X4 imm8, xmm, ymm, ymm{k}{z}
 55219      if isImm8(v0) && isEVEXXMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 55220          self.require(ISA_AVX512VL | ISA_AVX512F)
 55221          p.domain = DomainAVX
 55222          p.add(0, func(m *_Encoding, v []interface{}) {
 55223              m.emit(0x62)
 55224              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55225              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 55226              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 55227              m.emit(0x18)
 55228              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55229              m.imm1(toImmAny(v[0]))
 55230          })
 55231      }
 55232      // VINSERTF32X4 imm8, m128, ymm, ymm{k}{z}
 55233      if isImm8(v0) && isM128(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 55234          self.require(ISA_AVX512VL | ISA_AVX512F)
 55235          p.domain = DomainAVX
 55236          p.add(0, func(m *_Encoding, v []interface{}) {
 55237              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55238              m.emit(0x18)
 55239              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 55240              m.imm1(toImmAny(v[0]))
 55241          })
 55242      }
 55243      if p.len == 0 {
 55244          panic("invalid operands for VINSERTF32X4")
 55245      }
 55246      return p
 55247  }
 55248  
 55249  // VINSERTF32X8 performs "Insert 256 Bits of Packed Single-Precision Floating-Point Values".
 55250  //
 55251  // Mnemonic        : VINSERTF32X8
 55252  // Supported forms : (2 forms)
 55253  //
 55254  //    * VINSERTF32X8 imm8, ymm, zmm, zmm{k}{z}     [AVX512DQ]
 55255  //    * VINSERTF32X8 imm8, m256, zmm, zmm{k}{z}    [AVX512DQ]
 55256  //
 55257  func (self *Program) VINSERTF32X8(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 55258      p := self.alloc("VINSERTF32X8", 4, Operands { v0, v1, v2, v3 })
 55259      // VINSERTF32X8 imm8, ymm, zmm, zmm{k}{z}
 55260      if isImm8(v0) && isEVEXYMM(v1) && isZMM(v2) && isZMMkz(v3) {
 55261          self.require(ISA_AVX512DQ)
 55262          p.domain = DomainAVX
 55263          p.add(0, func(m *_Encoding, v []interface{}) {
 55264              m.emit(0x62)
 55265              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55266              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 55267              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 55268              m.emit(0x1a)
 55269              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55270              m.imm1(toImmAny(v[0]))
 55271          })
 55272      }
 55273      // VINSERTF32X8 imm8, m256, zmm, zmm{k}{z}
 55274      if isImm8(v0) && isM256(v1) && isZMM(v2) && isZMMkz(v3) {
 55275          self.require(ISA_AVX512DQ)
 55276          p.domain = DomainAVX
 55277          p.add(0, func(m *_Encoding, v []interface{}) {
 55278              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55279              m.emit(0x1a)
 55280              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 55281              m.imm1(toImmAny(v[0]))
 55282          })
 55283      }
 55284      if p.len == 0 {
 55285          panic("invalid operands for VINSERTF32X8")
 55286      }
 55287      return p
 55288  }
 55289  
 55290  // VINSERTF64X2 performs "Insert 128 Bits of Packed Double-Precision Floating-Point Values".
 55291  //
 55292  // Mnemonic        : VINSERTF64X2
 55293  // Supported forms : (4 forms)
 55294  //
 55295  //    * VINSERTF64X2 imm8, xmm, zmm, zmm{k}{z}     [AVX512DQ]
 55296  //    * VINSERTF64X2 imm8, m128, zmm, zmm{k}{z}    [AVX512DQ]
 55297  //    * VINSERTF64X2 imm8, xmm, ymm, ymm{k}{z}     [AVX512DQ,AVX512VL]
 55298  //    * VINSERTF64X2 imm8, m128, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 55299  //
 55300  func (self *Program) VINSERTF64X2(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 55301      p := self.alloc("VINSERTF64X2", 4, Operands { v0, v1, v2, v3 })
 55302      // VINSERTF64X2 imm8, xmm, zmm, zmm{k}{z}
 55303      if isImm8(v0) && isEVEXXMM(v1) && isZMM(v2) && isZMMkz(v3) {
 55304          self.require(ISA_AVX512DQ)
 55305          p.domain = DomainAVX
 55306          p.add(0, func(m *_Encoding, v []interface{}) {
 55307              m.emit(0x62)
 55308              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55309              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 55310              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 55311              m.emit(0x18)
 55312              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55313              m.imm1(toImmAny(v[0]))
 55314          })
 55315      }
 55316      // VINSERTF64X2 imm8, m128, zmm, zmm{k}{z}
 55317      if isImm8(v0) && isM128(v1) && isZMM(v2) && isZMMkz(v3) {
 55318          self.require(ISA_AVX512DQ)
 55319          p.domain = DomainAVX
 55320          p.add(0, func(m *_Encoding, v []interface{}) {
 55321              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55322              m.emit(0x18)
 55323              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 55324              m.imm1(toImmAny(v[0]))
 55325          })
 55326      }
 55327      // VINSERTF64X2 imm8, xmm, ymm, ymm{k}{z}
 55328      if isImm8(v0) && isEVEXXMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 55329          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 55330          p.domain = DomainAVX
 55331          p.add(0, func(m *_Encoding, v []interface{}) {
 55332              m.emit(0x62)
 55333              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55334              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 55335              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 55336              m.emit(0x18)
 55337              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55338              m.imm1(toImmAny(v[0]))
 55339          })
 55340      }
 55341      // VINSERTF64X2 imm8, m128, ymm, ymm{k}{z}
 55342      if isImm8(v0) && isM128(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 55343          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 55344          p.domain = DomainAVX
 55345          p.add(0, func(m *_Encoding, v []interface{}) {
 55346              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55347              m.emit(0x18)
 55348              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 55349              m.imm1(toImmAny(v[0]))
 55350          })
 55351      }
 55352      if p.len == 0 {
 55353          panic("invalid operands for VINSERTF64X2")
 55354      }
 55355      return p
 55356  }
 55357  
 55358  // VINSERTF64X4 performs "Insert 256 Bits of Packed Double-Precision Floating-Point Values".
 55359  //
 55360  // Mnemonic        : VINSERTF64X4
 55361  // Supported forms : (2 forms)
 55362  //
 55363  //    * VINSERTF64X4 imm8, ymm, zmm, zmm{k}{z}     [AVX512F]
 55364  //    * VINSERTF64X4 imm8, m256, zmm, zmm{k}{z}    [AVX512F]
 55365  //
 55366  func (self *Program) VINSERTF64X4(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 55367      p := self.alloc("VINSERTF64X4", 4, Operands { v0, v1, v2, v3 })
 55368      // VINSERTF64X4 imm8, ymm, zmm, zmm{k}{z}
 55369      if isImm8(v0) && isEVEXYMM(v1) && isZMM(v2) && isZMMkz(v3) {
 55370          self.require(ISA_AVX512F)
 55371          p.domain = DomainAVX
 55372          p.add(0, func(m *_Encoding, v []interface{}) {
 55373              m.emit(0x62)
 55374              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55375              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 55376              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 55377              m.emit(0x1a)
 55378              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55379              m.imm1(toImmAny(v[0]))
 55380          })
 55381      }
 55382      // VINSERTF64X4 imm8, m256, zmm, zmm{k}{z}
 55383      if isImm8(v0) && isM256(v1) && isZMM(v2) && isZMMkz(v3) {
 55384          self.require(ISA_AVX512F)
 55385          p.domain = DomainAVX
 55386          p.add(0, func(m *_Encoding, v []interface{}) {
 55387              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55388              m.emit(0x1a)
 55389              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 55390              m.imm1(toImmAny(v[0]))
 55391          })
 55392      }
 55393      if p.len == 0 {
 55394          panic("invalid operands for VINSERTF64X4")
 55395      }
 55396      return p
 55397  }
 55398  
 55399  // VINSERTI128 performs "Insert Packed Integer Values".
 55400  //
 55401  // Mnemonic        : VINSERTI128
 55402  // Supported forms : (2 forms)
 55403  //
 55404  //    * VINSERTI128 imm8, xmm, ymm, ymm     [AVX2]
 55405  //    * VINSERTI128 imm8, m128, ymm, ymm    [AVX2]
 55406  //
 55407  func (self *Program) VINSERTI128(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 55408      p := self.alloc("VINSERTI128", 4, Operands { v0, v1, v2, v3 })
 55409      // VINSERTI128 imm8, xmm, ymm, ymm
 55410      if isImm8(v0) && isXMM(v1) && isYMM(v2) && isYMM(v3) {
 55411          self.require(ISA_AVX2)
 55412          p.domain = DomainAVX
 55413          p.add(0, func(m *_Encoding, v []interface{}) {
 55414              m.emit(0xc4)
 55415              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 55416              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 55417              m.emit(0x38)
 55418              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55419              m.imm1(toImmAny(v[0]))
 55420          })
 55421      }
 55422      // VINSERTI128 imm8, m128, ymm, ymm
 55423      if isImm8(v0) && isM128(v1) && isYMM(v2) && isYMM(v3) {
 55424          self.require(ISA_AVX2)
 55425          p.domain = DomainAVX
 55426          p.add(0, func(m *_Encoding, v []interface{}) {
 55427              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 55428              m.emit(0x38)
 55429              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 55430              m.imm1(toImmAny(v[0]))
 55431          })
 55432      }
 55433      if p.len == 0 {
 55434          panic("invalid operands for VINSERTI128")
 55435      }
 55436      return p
 55437  }
 55438  
 55439  // VINSERTI32X4 performs "Insert 128 Bits of Packed Doubleword Integer Values".
 55440  //
 55441  // Mnemonic        : VINSERTI32X4
 55442  // Supported forms : (4 forms)
 55443  //
 55444  //    * VINSERTI32X4 imm8, xmm, zmm, zmm{k}{z}     [AVX512F]
 55445  //    * VINSERTI32X4 imm8, m128, zmm, zmm{k}{z}    [AVX512F]
 55446  //    * VINSERTI32X4 imm8, xmm, ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 55447  //    * VINSERTI32X4 imm8, m128, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 55448  //
 55449  func (self *Program) VINSERTI32X4(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 55450      p := self.alloc("VINSERTI32X4", 4, Operands { v0, v1, v2, v3 })
 55451      // VINSERTI32X4 imm8, xmm, zmm, zmm{k}{z}
 55452      if isImm8(v0) && isEVEXXMM(v1) && isZMM(v2) && isZMMkz(v3) {
 55453          self.require(ISA_AVX512F)
 55454          p.domain = DomainAVX
 55455          p.add(0, func(m *_Encoding, v []interface{}) {
 55456              m.emit(0x62)
 55457              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55458              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 55459              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 55460              m.emit(0x38)
 55461              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55462              m.imm1(toImmAny(v[0]))
 55463          })
 55464      }
 55465      // VINSERTI32X4 imm8, m128, zmm, zmm{k}{z}
 55466      if isImm8(v0) && isM128(v1) && isZMM(v2) && isZMMkz(v3) {
 55467          self.require(ISA_AVX512F)
 55468          p.domain = DomainAVX
 55469          p.add(0, func(m *_Encoding, v []interface{}) {
 55470              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55471              m.emit(0x38)
 55472              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 55473              m.imm1(toImmAny(v[0]))
 55474          })
 55475      }
 55476      // VINSERTI32X4 imm8, xmm, ymm, ymm{k}{z}
 55477      if isImm8(v0) && isEVEXXMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 55478          self.require(ISA_AVX512VL | ISA_AVX512F)
 55479          p.domain = DomainAVX
 55480          p.add(0, func(m *_Encoding, v []interface{}) {
 55481              m.emit(0x62)
 55482              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55483              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 55484              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 55485              m.emit(0x38)
 55486              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55487              m.imm1(toImmAny(v[0]))
 55488          })
 55489      }
 55490      // VINSERTI32X4 imm8, m128, ymm, ymm{k}{z}
 55491      if isImm8(v0) && isM128(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 55492          self.require(ISA_AVX512VL | ISA_AVX512F)
 55493          p.domain = DomainAVX
 55494          p.add(0, func(m *_Encoding, v []interface{}) {
 55495              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55496              m.emit(0x38)
 55497              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 55498              m.imm1(toImmAny(v[0]))
 55499          })
 55500      }
 55501      if p.len == 0 {
 55502          panic("invalid operands for VINSERTI32X4")
 55503      }
 55504      return p
 55505  }
 55506  
 55507  // VINSERTI32X8 performs "Insert 256 Bits of Packed Doubleword Integer Values".
 55508  //
 55509  // Mnemonic        : VINSERTI32X8
 55510  // Supported forms : (2 forms)
 55511  //
 55512  //    * VINSERTI32X8 imm8, ymm, zmm, zmm{k}{z}     [AVX512DQ]
 55513  //    * VINSERTI32X8 imm8, m256, zmm, zmm{k}{z}    [AVX512DQ]
 55514  //
 55515  func (self *Program) VINSERTI32X8(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 55516      p := self.alloc("VINSERTI32X8", 4, Operands { v0, v1, v2, v3 })
 55517      // VINSERTI32X8 imm8, ymm, zmm, zmm{k}{z}
 55518      if isImm8(v0) && isEVEXYMM(v1) && isZMM(v2) && isZMMkz(v3) {
 55519          self.require(ISA_AVX512DQ)
 55520          p.domain = DomainAVX
 55521          p.add(0, func(m *_Encoding, v []interface{}) {
 55522              m.emit(0x62)
 55523              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55524              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 55525              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 55526              m.emit(0x3a)
 55527              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55528              m.imm1(toImmAny(v[0]))
 55529          })
 55530      }
 55531      // VINSERTI32X8 imm8, m256, zmm, zmm{k}{z}
 55532      if isImm8(v0) && isM256(v1) && isZMM(v2) && isZMMkz(v3) {
 55533          self.require(ISA_AVX512DQ)
 55534          p.domain = DomainAVX
 55535          p.add(0, func(m *_Encoding, v []interface{}) {
 55536              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55537              m.emit(0x3a)
 55538              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 55539              m.imm1(toImmAny(v[0]))
 55540          })
 55541      }
 55542      if p.len == 0 {
 55543          panic("invalid operands for VINSERTI32X8")
 55544      }
 55545      return p
 55546  }
 55547  
 55548  // VINSERTI64X2 performs "Insert 128 Bits of Packed Quadword Integer Values".
 55549  //
 55550  // Mnemonic        : VINSERTI64X2
 55551  // Supported forms : (4 forms)
 55552  //
 55553  //    * VINSERTI64X2 imm8, xmm, zmm, zmm{k}{z}     [AVX512DQ]
 55554  //    * VINSERTI64X2 imm8, m128, zmm, zmm{k}{z}    [AVX512DQ]
 55555  //    * VINSERTI64X2 imm8, xmm, ymm, ymm{k}{z}     [AVX512DQ,AVX512VL]
 55556  //    * VINSERTI64X2 imm8, m128, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 55557  //
 55558  func (self *Program) VINSERTI64X2(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 55559      p := self.alloc("VINSERTI64X2", 4, Operands { v0, v1, v2, v3 })
 55560      // VINSERTI64X2 imm8, xmm, zmm, zmm{k}{z}
 55561      if isImm8(v0) && isEVEXXMM(v1) && isZMM(v2) && isZMMkz(v3) {
 55562          self.require(ISA_AVX512DQ)
 55563          p.domain = DomainAVX
 55564          p.add(0, func(m *_Encoding, v []interface{}) {
 55565              m.emit(0x62)
 55566              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55567              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 55568              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 55569              m.emit(0x38)
 55570              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55571              m.imm1(toImmAny(v[0]))
 55572          })
 55573      }
 55574      // VINSERTI64X2 imm8, m128, zmm, zmm{k}{z}
 55575      if isImm8(v0) && isM128(v1) && isZMM(v2) && isZMMkz(v3) {
 55576          self.require(ISA_AVX512DQ)
 55577          p.domain = DomainAVX
 55578          p.add(0, func(m *_Encoding, v []interface{}) {
 55579              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55580              m.emit(0x38)
 55581              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 55582              m.imm1(toImmAny(v[0]))
 55583          })
 55584      }
 55585      // VINSERTI64X2 imm8, xmm, ymm, ymm{k}{z}
 55586      if isImm8(v0) && isEVEXXMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 55587          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 55588          p.domain = DomainAVX
 55589          p.add(0, func(m *_Encoding, v []interface{}) {
 55590              m.emit(0x62)
 55591              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55592              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 55593              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 55594              m.emit(0x38)
 55595              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55596              m.imm1(toImmAny(v[0]))
 55597          })
 55598      }
 55599      // VINSERTI64X2 imm8, m128, ymm, ymm{k}{z}
 55600      if isImm8(v0) && isM128(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 55601          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 55602          p.domain = DomainAVX
 55603          p.add(0, func(m *_Encoding, v []interface{}) {
 55604              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55605              m.emit(0x38)
 55606              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 55607              m.imm1(toImmAny(v[0]))
 55608          })
 55609      }
 55610      if p.len == 0 {
 55611          panic("invalid operands for VINSERTI64X2")
 55612      }
 55613      return p
 55614  }
 55615  
 55616  // VINSERTI64X4 performs "Insert 256 Bits of Packed Quadword Integer Values".
 55617  //
 55618  // Mnemonic        : VINSERTI64X4
 55619  // Supported forms : (2 forms)
 55620  //
 55621  //    * VINSERTI64X4 imm8, ymm, zmm, zmm{k}{z}     [AVX512F]
 55622  //    * VINSERTI64X4 imm8, m256, zmm, zmm{k}{z}    [AVX512F]
 55623  //
 55624  func (self *Program) VINSERTI64X4(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 55625      p := self.alloc("VINSERTI64X4", 4, Operands { v0, v1, v2, v3 })
 55626      // VINSERTI64X4 imm8, ymm, zmm, zmm{k}{z}
 55627      if isImm8(v0) && isEVEXYMM(v1) && isZMM(v2) && isZMMkz(v3) {
 55628          self.require(ISA_AVX512F)
 55629          p.domain = DomainAVX
 55630          p.add(0, func(m *_Encoding, v []interface{}) {
 55631              m.emit(0x62)
 55632              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55633              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 55634              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 55635              m.emit(0x3a)
 55636              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55637              m.imm1(toImmAny(v[0]))
 55638          })
 55639      }
 55640      // VINSERTI64X4 imm8, m256, zmm, zmm{k}{z}
 55641      if isImm8(v0) && isM256(v1) && isZMM(v2) && isZMMkz(v3) {
 55642          self.require(ISA_AVX512F)
 55643          p.domain = DomainAVX
 55644          p.add(0, func(m *_Encoding, v []interface{}) {
 55645              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55646              m.emit(0x3a)
 55647              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 55648              m.imm1(toImmAny(v[0]))
 55649          })
 55650      }
 55651      if p.len == 0 {
 55652          panic("invalid operands for VINSERTI64X4")
 55653      }
 55654      return p
 55655  }
 55656  
 55657  // VINSERTPS performs "Insert Packed Single Precision Floating-Point Value".
 55658  //
 55659  // Mnemonic        : VINSERTPS
 55660  // Supported forms : (4 forms)
 55661  //
 55662  //    * VINSERTPS imm8, xmm, xmm, xmm    [AVX]
 55663  //    * VINSERTPS imm8, m32, xmm, xmm    [AVX]
 55664  //    * VINSERTPS imm8, xmm, xmm, xmm    [AVX512F]
 55665  //    * VINSERTPS imm8, m32, xmm, xmm    [AVX512F]
 55666  //
 55667  func (self *Program) VINSERTPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 55668      p := self.alloc("VINSERTPS", 4, Operands { v0, v1, v2, v3 })
 55669      // VINSERTPS imm8, xmm, xmm, xmm
 55670      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 55671          self.require(ISA_AVX)
 55672          p.domain = DomainAVX
 55673          p.add(0, func(m *_Encoding, v []interface{}) {
 55674              m.emit(0xc4)
 55675              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 55676              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 55677              m.emit(0x21)
 55678              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55679              m.imm1(toImmAny(v[0]))
 55680          })
 55681      }
 55682      // VINSERTPS imm8, m32, xmm, xmm
 55683      if isImm8(v0) && isM32(v1) && isXMM(v2) && isXMM(v3) {
 55684          self.require(ISA_AVX)
 55685          p.domain = DomainAVX
 55686          p.add(0, func(m *_Encoding, v []interface{}) {
 55687              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 55688              m.emit(0x21)
 55689              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 55690              m.imm1(toImmAny(v[0]))
 55691          })
 55692      }
 55693      // VINSERTPS imm8, xmm, xmm, xmm
 55694      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) {
 55695          self.require(ISA_AVX512F)
 55696          p.domain = DomainAVX
 55697          p.add(0, func(m *_Encoding, v []interface{}) {
 55698              m.emit(0x62)
 55699              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55700              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 55701              m.emit((0x08 ^ (ecode(v[2]) << 3)) | 0x00)
 55702              m.emit(0x21)
 55703              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55704              m.imm1(toImmAny(v[0]))
 55705          })
 55706      }
 55707      // VINSERTPS imm8, m32, xmm, xmm
 55708      if isImm8(v0) && isM32(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) {
 55709          self.require(ISA_AVX512F)
 55710          p.domain = DomainAVX
 55711          p.add(0, func(m *_Encoding, v []interface{}) {
 55712              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), 0, 0, 0)
 55713              m.emit(0x21)
 55714              m.mrsd(lcode(v[3]), addr(v[1]), 4)
 55715              m.imm1(toImmAny(v[0]))
 55716          })
 55717      }
 55718      if p.len == 0 {
 55719          panic("invalid operands for VINSERTPS")
 55720      }
 55721      return p
 55722  }
 55723  
 55724  // VLDDQU performs "Load Unaligned Integer 128 Bits".
 55725  //
 55726  // Mnemonic        : VLDDQU
 55727  // Supported forms : (2 forms)
 55728  //
 55729  //    * VLDDQU m128, xmm    [AVX]
 55730  //    * VLDDQU m256, ymm    [AVX]
 55731  //
 55732  func (self *Program) VLDDQU(v0 interface{}, v1 interface{}) *Instruction {
 55733      p := self.alloc("VLDDQU", 2, Operands { v0, v1 })
 55734      // VLDDQU m128, xmm
 55735      if isM128(v0) && isXMM(v1) {
 55736          self.require(ISA_AVX)
 55737          p.domain = DomainAVX
 55738          p.add(0, func(m *_Encoding, v []interface{}) {
 55739              m.vex2(3, hcode(v[1]), addr(v[0]), 0)
 55740              m.emit(0xf0)
 55741              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 55742          })
 55743      }
 55744      // VLDDQU m256, ymm
 55745      if isM256(v0) && isYMM(v1) {
 55746          self.require(ISA_AVX)
 55747          p.domain = DomainAVX
 55748          p.add(0, func(m *_Encoding, v []interface{}) {
 55749              m.vex2(7, hcode(v[1]), addr(v[0]), 0)
 55750              m.emit(0xf0)
 55751              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 55752          })
 55753      }
 55754      if p.len == 0 {
 55755          panic("invalid operands for VLDDQU")
 55756      }
 55757      return p
 55758  }
 55759  
 55760  // VLDMXCSR performs "Load MXCSR Register".
 55761  //
 55762  // Mnemonic        : VLDMXCSR
 55763  // Supported forms : (1 form)
 55764  //
 55765  //    * VLDMXCSR m32    [AVX]
 55766  //
 55767  func (self *Program) VLDMXCSR(v0 interface{}) *Instruction {
 55768      p := self.alloc("VLDMXCSR", 1, Operands { v0 })
 55769      // VLDMXCSR m32
 55770      if isM32(v0) {
 55771          self.require(ISA_AVX)
 55772          p.domain = DomainAVX
 55773          p.add(0, func(m *_Encoding, v []interface{}) {
 55774              m.vex2(0, 0, addr(v[0]), 0)
 55775              m.emit(0xae)
 55776              m.mrsd(2, addr(v[0]), 1)
 55777          })
 55778      }
 55779      if p.len == 0 {
 55780          panic("invalid operands for VLDMXCSR")
 55781      }
 55782      return p
 55783  }
 55784  
 55785  // VMASKMOVDQU performs "Store Selected Bytes of Double Quadword".
 55786  //
 55787  // Mnemonic        : VMASKMOVDQU
 55788  // Supported forms : (1 form)
 55789  //
 55790  //    * VMASKMOVDQU xmm, xmm    [AVX]
 55791  //
 55792  func (self *Program) VMASKMOVDQU(v0 interface{}, v1 interface{}) *Instruction {
 55793      p := self.alloc("VMASKMOVDQU", 2, Operands { v0, v1 })
 55794      // VMASKMOVDQU xmm, xmm
 55795      if isXMM(v0) && isXMM(v1) {
 55796          self.require(ISA_AVX)
 55797          p.domain = DomainAVX
 55798          p.add(0, func(m *_Encoding, v []interface{}) {
 55799              m.vex2(1, hcode(v[1]), v[0], 0)
 55800              m.emit(0xf7)
 55801              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 55802          })
 55803      }
 55804      if p.len == 0 {
 55805          panic("invalid operands for VMASKMOVDQU")
 55806      }
 55807      return p
 55808  }
 55809  
 55810  // VMASKMOVPD performs "Conditional Move Packed Double-Precision Floating-Point Values".
 55811  //
 55812  // Mnemonic        : VMASKMOVPD
 55813  // Supported forms : (4 forms)
 55814  //
 55815  //    * VMASKMOVPD m128, xmm, xmm    [AVX]
 55816  //    * VMASKMOVPD m256, ymm, ymm    [AVX]
 55817  //    * VMASKMOVPD xmm, xmm, m128    [AVX]
 55818  //    * VMASKMOVPD ymm, ymm, m256    [AVX]
 55819  //
 55820  func (self *Program) VMASKMOVPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 55821      p := self.alloc("VMASKMOVPD", 3, Operands { v0, v1, v2 })
 55822      // VMASKMOVPD m128, xmm, xmm
 55823      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 55824          self.require(ISA_AVX)
 55825          p.domain = DomainAVX
 55826          p.add(0, func(m *_Encoding, v []interface{}) {
 55827              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 55828              m.emit(0x2d)
 55829              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 55830          })
 55831      }
 55832      // VMASKMOVPD m256, ymm, ymm
 55833      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 55834          self.require(ISA_AVX)
 55835          p.domain = DomainAVX
 55836          p.add(0, func(m *_Encoding, v []interface{}) {
 55837              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 55838              m.emit(0x2d)
 55839              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 55840          })
 55841      }
 55842      // VMASKMOVPD xmm, xmm, m128
 55843      if isXMM(v0) && isXMM(v1) && isM128(v2) {
 55844          self.require(ISA_AVX)
 55845          p.domain = DomainAVX
 55846          p.add(0, func(m *_Encoding, v []interface{}) {
 55847              m.vex3(0xc4, 0b10, 0x01, hcode(v[0]), addr(v[2]), hlcode(v[1]))
 55848              m.emit(0x2f)
 55849              m.mrsd(lcode(v[0]), addr(v[2]), 1)
 55850          })
 55851      }
 55852      // VMASKMOVPD ymm, ymm, m256
 55853      if isYMM(v0) && isYMM(v1) && isM256(v2) {
 55854          self.require(ISA_AVX)
 55855          p.domain = DomainAVX
 55856          p.add(0, func(m *_Encoding, v []interface{}) {
 55857              m.vex3(0xc4, 0b10, 0x05, hcode(v[0]), addr(v[2]), hlcode(v[1]))
 55858              m.emit(0x2f)
 55859              m.mrsd(lcode(v[0]), addr(v[2]), 1)
 55860          })
 55861      }
 55862      if p.len == 0 {
 55863          panic("invalid operands for VMASKMOVPD")
 55864      }
 55865      return p
 55866  }
 55867  
 55868  // VMASKMOVPS performs "Conditional Move Packed Single-Precision Floating-Point Values".
 55869  //
 55870  // Mnemonic        : VMASKMOVPS
 55871  // Supported forms : (4 forms)
 55872  //
 55873  //    * VMASKMOVPS m128, xmm, xmm    [AVX]
 55874  //    * VMASKMOVPS m256, ymm, ymm    [AVX]
 55875  //    * VMASKMOVPS xmm, xmm, m128    [AVX]
 55876  //    * VMASKMOVPS ymm, ymm, m256    [AVX]
 55877  //
 55878  func (self *Program) VMASKMOVPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 55879      p := self.alloc("VMASKMOVPS", 3, Operands { v0, v1, v2 })
 55880      // VMASKMOVPS m128, xmm, xmm
 55881      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 55882          self.require(ISA_AVX)
 55883          p.domain = DomainAVX
 55884          p.add(0, func(m *_Encoding, v []interface{}) {
 55885              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 55886              m.emit(0x2c)
 55887              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 55888          })
 55889      }
 55890      // VMASKMOVPS m256, ymm, ymm
 55891      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 55892          self.require(ISA_AVX)
 55893          p.domain = DomainAVX
 55894          p.add(0, func(m *_Encoding, v []interface{}) {
 55895              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 55896              m.emit(0x2c)
 55897              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 55898          })
 55899      }
 55900      // VMASKMOVPS xmm, xmm, m128
 55901      if isXMM(v0) && isXMM(v1) && isM128(v2) {
 55902          self.require(ISA_AVX)
 55903          p.domain = DomainAVX
 55904          p.add(0, func(m *_Encoding, v []interface{}) {
 55905              m.vex3(0xc4, 0b10, 0x01, hcode(v[0]), addr(v[2]), hlcode(v[1]))
 55906              m.emit(0x2e)
 55907              m.mrsd(lcode(v[0]), addr(v[2]), 1)
 55908          })
 55909      }
 55910      // VMASKMOVPS ymm, ymm, m256
 55911      if isYMM(v0) && isYMM(v1) && isM256(v2) {
 55912          self.require(ISA_AVX)
 55913          p.domain = DomainAVX
 55914          p.add(0, func(m *_Encoding, v []interface{}) {
 55915              m.vex3(0xc4, 0b10, 0x05, hcode(v[0]), addr(v[2]), hlcode(v[1]))
 55916              m.emit(0x2e)
 55917              m.mrsd(lcode(v[0]), addr(v[2]), 1)
 55918          })
 55919      }
 55920      if p.len == 0 {
 55921          panic("invalid operands for VMASKMOVPS")
 55922      }
 55923      return p
 55924  }
 55925  
 55926  // VMAXPD performs "Return Maximum Packed Double-Precision Floating-Point Values".
 55927  //
 55928  // Mnemonic        : VMAXPD
 55929  // Supported forms : (11 forms)
 55930  //
 55931  //    * VMAXPD xmm, xmm, xmm                   [AVX]
 55932  //    * VMAXPD m128, xmm, xmm                  [AVX]
 55933  //    * VMAXPD ymm, ymm, ymm                   [AVX]
 55934  //    * VMAXPD m256, ymm, ymm                  [AVX]
 55935  //    * VMAXPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 55936  //    * VMAXPD {sae}, zmm, zmm, zmm{k}{z}      [AVX512F]
 55937  //    * VMAXPD zmm, zmm, zmm{k}{z}             [AVX512F]
 55938  //    * VMAXPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 55939  //    * VMAXPD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 55940  //    * VMAXPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 55941  //    * VMAXPD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 55942  //
 55943  func (self *Program) VMAXPD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 55944      var p *Instruction
 55945      switch len(vv) {
 55946          case 0  : p = self.alloc("VMAXPD", 3, Operands { v0, v1, v2 })
 55947          case 1  : p = self.alloc("VMAXPD", 4, Operands { v0, v1, v2, vv[0] })
 55948          default : panic("instruction VMAXPD takes 3 or 4 operands")
 55949      }
 55950      // VMAXPD xmm, xmm, xmm
 55951      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 55952          self.require(ISA_AVX)
 55953          p.domain = DomainAVX
 55954          p.add(0, func(m *_Encoding, v []interface{}) {
 55955              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 55956              m.emit(0x5f)
 55957              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 55958          })
 55959      }
 55960      // VMAXPD m128, xmm, xmm
 55961      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 55962          self.require(ISA_AVX)
 55963          p.domain = DomainAVX
 55964          p.add(0, func(m *_Encoding, v []interface{}) {
 55965              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 55966              m.emit(0x5f)
 55967              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 55968          })
 55969      }
 55970      // VMAXPD ymm, ymm, ymm
 55971      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 55972          self.require(ISA_AVX)
 55973          p.domain = DomainAVX
 55974          p.add(0, func(m *_Encoding, v []interface{}) {
 55975              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 55976              m.emit(0x5f)
 55977              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 55978          })
 55979      }
 55980      // VMAXPD m256, ymm, ymm
 55981      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 55982          self.require(ISA_AVX)
 55983          p.domain = DomainAVX
 55984          p.add(0, func(m *_Encoding, v []interface{}) {
 55985              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 55986              m.emit(0x5f)
 55987              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 55988          })
 55989      }
 55990      // VMAXPD m512/m64bcst, zmm, zmm{k}{z}
 55991      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 55992          self.require(ISA_AVX512F)
 55993          p.domain = DomainAVX
 55994          p.add(0, func(m *_Encoding, v []interface{}) {
 55995              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 55996              m.emit(0x5f)
 55997              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 55998          })
 55999      }
 56000      // VMAXPD {sae}, zmm, zmm, zmm{k}{z}
 56001      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 56002          self.require(ISA_AVX512F)
 56003          p.domain = DomainAVX
 56004          p.add(0, func(m *_Encoding, v []interface{}) {
 56005              m.emit(0x62)
 56006              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 56007              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 56008              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 56009              m.emit(0x5f)
 56010              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 56011          })
 56012      }
 56013      // VMAXPD zmm, zmm, zmm{k}{z}
 56014      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 56015          self.require(ISA_AVX512F)
 56016          p.domain = DomainAVX
 56017          p.add(0, func(m *_Encoding, v []interface{}) {
 56018              m.emit(0x62)
 56019              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56020              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 56021              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 56022              m.emit(0x5f)
 56023              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56024          })
 56025      }
 56026      // VMAXPD m128/m64bcst, xmm, xmm{k}{z}
 56027      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56028          self.require(ISA_AVX512VL | ISA_AVX512F)
 56029          p.domain = DomainAVX
 56030          p.add(0, func(m *_Encoding, v []interface{}) {
 56031              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56032              m.emit(0x5f)
 56033              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 56034          })
 56035      }
 56036      // VMAXPD xmm, xmm, xmm{k}{z}
 56037      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56038          self.require(ISA_AVX512VL | ISA_AVX512F)
 56039          p.domain = DomainAVX
 56040          p.add(0, func(m *_Encoding, v []interface{}) {
 56041              m.emit(0x62)
 56042              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56043              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 56044              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 56045              m.emit(0x5f)
 56046              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56047          })
 56048      }
 56049      // VMAXPD m256/m64bcst, ymm, ymm{k}{z}
 56050      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 56051          self.require(ISA_AVX512VL | ISA_AVX512F)
 56052          p.domain = DomainAVX
 56053          p.add(0, func(m *_Encoding, v []interface{}) {
 56054              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56055              m.emit(0x5f)
 56056              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 56057          })
 56058      }
 56059      // VMAXPD ymm, ymm, ymm{k}{z}
 56060      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 56061          self.require(ISA_AVX512VL | ISA_AVX512F)
 56062          p.domain = DomainAVX
 56063          p.add(0, func(m *_Encoding, v []interface{}) {
 56064              m.emit(0x62)
 56065              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56066              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 56067              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 56068              m.emit(0x5f)
 56069              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56070          })
 56071      }
 56072      if p.len == 0 {
 56073          panic("invalid operands for VMAXPD")
 56074      }
 56075      return p
 56076  }
 56077  
 56078  // VMAXPS performs "Return Maximum Packed Single-Precision Floating-Point Values".
 56079  //
 56080  // Mnemonic        : VMAXPS
 56081  // Supported forms : (11 forms)
 56082  //
 56083  //    * VMAXPS xmm, xmm, xmm                   [AVX]
 56084  //    * VMAXPS m128, xmm, xmm                  [AVX]
 56085  //    * VMAXPS ymm, ymm, ymm                   [AVX]
 56086  //    * VMAXPS m256, ymm, ymm                  [AVX]
 56087  //    * VMAXPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 56088  //    * VMAXPS {sae}, zmm, zmm, zmm{k}{z}      [AVX512F]
 56089  //    * VMAXPS zmm, zmm, zmm{k}{z}             [AVX512F]
 56090  //    * VMAXPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 56091  //    * VMAXPS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 56092  //    * VMAXPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 56093  //    * VMAXPS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 56094  //
 56095  func (self *Program) VMAXPS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 56096      var p *Instruction
 56097      switch len(vv) {
 56098          case 0  : p = self.alloc("VMAXPS", 3, Operands { v0, v1, v2 })
 56099          case 1  : p = self.alloc("VMAXPS", 4, Operands { v0, v1, v2, vv[0] })
 56100          default : panic("instruction VMAXPS takes 3 or 4 operands")
 56101      }
 56102      // VMAXPS xmm, xmm, xmm
 56103      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 56104          self.require(ISA_AVX)
 56105          p.domain = DomainAVX
 56106          p.add(0, func(m *_Encoding, v []interface{}) {
 56107              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 56108              m.emit(0x5f)
 56109              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56110          })
 56111      }
 56112      // VMAXPS m128, xmm, xmm
 56113      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 56114          self.require(ISA_AVX)
 56115          p.domain = DomainAVX
 56116          p.add(0, func(m *_Encoding, v []interface{}) {
 56117              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 56118              m.emit(0x5f)
 56119              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 56120          })
 56121      }
 56122      // VMAXPS ymm, ymm, ymm
 56123      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 56124          self.require(ISA_AVX)
 56125          p.domain = DomainAVX
 56126          p.add(0, func(m *_Encoding, v []interface{}) {
 56127              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 56128              m.emit(0x5f)
 56129              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56130          })
 56131      }
 56132      // VMAXPS m256, ymm, ymm
 56133      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 56134          self.require(ISA_AVX)
 56135          p.domain = DomainAVX
 56136          p.add(0, func(m *_Encoding, v []interface{}) {
 56137              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 56138              m.emit(0x5f)
 56139              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 56140          })
 56141      }
 56142      // VMAXPS m512/m32bcst, zmm, zmm{k}{z}
 56143      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 56144          self.require(ISA_AVX512F)
 56145          p.domain = DomainAVX
 56146          p.add(0, func(m *_Encoding, v []interface{}) {
 56147              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56148              m.emit(0x5f)
 56149              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 56150          })
 56151      }
 56152      // VMAXPS {sae}, zmm, zmm, zmm{k}{z}
 56153      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 56154          self.require(ISA_AVX512F)
 56155          p.domain = DomainAVX
 56156          p.add(0, func(m *_Encoding, v []interface{}) {
 56157              m.emit(0x62)
 56158              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 56159              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 56160              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 56161              m.emit(0x5f)
 56162              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 56163          })
 56164      }
 56165      // VMAXPS zmm, zmm, zmm{k}{z}
 56166      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 56167          self.require(ISA_AVX512F)
 56168          p.domain = DomainAVX
 56169          p.add(0, func(m *_Encoding, v []interface{}) {
 56170              m.emit(0x62)
 56171              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56172              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 56173              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 56174              m.emit(0x5f)
 56175              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56176          })
 56177      }
 56178      // VMAXPS m128/m32bcst, xmm, xmm{k}{z}
 56179      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56180          self.require(ISA_AVX512VL | ISA_AVX512F)
 56181          p.domain = DomainAVX
 56182          p.add(0, func(m *_Encoding, v []interface{}) {
 56183              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56184              m.emit(0x5f)
 56185              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 56186          })
 56187      }
 56188      // VMAXPS xmm, xmm, xmm{k}{z}
 56189      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56190          self.require(ISA_AVX512VL | ISA_AVX512F)
 56191          p.domain = DomainAVX
 56192          p.add(0, func(m *_Encoding, v []interface{}) {
 56193              m.emit(0x62)
 56194              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56195              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 56196              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 56197              m.emit(0x5f)
 56198              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56199          })
 56200      }
 56201      // VMAXPS m256/m32bcst, ymm, ymm{k}{z}
 56202      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 56203          self.require(ISA_AVX512VL | ISA_AVX512F)
 56204          p.domain = DomainAVX
 56205          p.add(0, func(m *_Encoding, v []interface{}) {
 56206              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56207              m.emit(0x5f)
 56208              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 56209          })
 56210      }
 56211      // VMAXPS ymm, ymm, ymm{k}{z}
 56212      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 56213          self.require(ISA_AVX512VL | ISA_AVX512F)
 56214          p.domain = DomainAVX
 56215          p.add(0, func(m *_Encoding, v []interface{}) {
 56216              m.emit(0x62)
 56217              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56218              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 56219              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 56220              m.emit(0x5f)
 56221              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56222          })
 56223      }
 56224      if p.len == 0 {
 56225          panic("invalid operands for VMAXPS")
 56226      }
 56227      return p
 56228  }
 56229  
 56230  // VMAXSD performs "Return Maximum Scalar Double-Precision Floating-Point Value".
 56231  //
 56232  // Mnemonic        : VMAXSD
 56233  // Supported forms : (5 forms)
 56234  //
 56235  //    * VMAXSD xmm, xmm, xmm                 [AVX]
 56236  //    * VMAXSD m64, xmm, xmm                 [AVX]
 56237  //    * VMAXSD m64, xmm, xmm{k}{z}           [AVX512F]
 56238  //    * VMAXSD {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 56239  //    * VMAXSD xmm, xmm, xmm{k}{z}           [AVX512F]
 56240  //
 56241  func (self *Program) VMAXSD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 56242      var p *Instruction
 56243      switch len(vv) {
 56244          case 0  : p = self.alloc("VMAXSD", 3, Operands { v0, v1, v2 })
 56245          case 1  : p = self.alloc("VMAXSD", 4, Operands { v0, v1, v2, vv[0] })
 56246          default : panic("instruction VMAXSD takes 3 or 4 operands")
 56247      }
 56248      // VMAXSD xmm, xmm, xmm
 56249      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 56250          self.require(ISA_AVX)
 56251          p.domain = DomainAVX
 56252          p.add(0, func(m *_Encoding, v []interface{}) {
 56253              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 56254              m.emit(0x5f)
 56255              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56256          })
 56257      }
 56258      // VMAXSD m64, xmm, xmm
 56259      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 56260          self.require(ISA_AVX)
 56261          p.domain = DomainAVX
 56262          p.add(0, func(m *_Encoding, v []interface{}) {
 56263              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 56264              m.emit(0x5f)
 56265              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 56266          })
 56267      }
 56268      // VMAXSD m64, xmm, xmm{k}{z}
 56269      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56270          self.require(ISA_AVX512F)
 56271          p.domain = DomainAVX
 56272          p.add(0, func(m *_Encoding, v []interface{}) {
 56273              m.evex(0b01, 0x87, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 56274              m.emit(0x5f)
 56275              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 56276          })
 56277      }
 56278      // VMAXSD {sae}, xmm, xmm, xmm{k}{z}
 56279      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 56280          self.require(ISA_AVX512F)
 56281          p.domain = DomainAVX
 56282          p.add(0, func(m *_Encoding, v []interface{}) {
 56283              m.emit(0x62)
 56284              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 56285              m.emit(0xff ^ (hlcode(v[2]) << 3))
 56286              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 56287              m.emit(0x5f)
 56288              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 56289          })
 56290      }
 56291      // VMAXSD xmm, xmm, xmm{k}{z}
 56292      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56293          self.require(ISA_AVX512F)
 56294          p.domain = DomainAVX
 56295          p.add(0, func(m *_Encoding, v []interface{}) {
 56296              m.emit(0x62)
 56297              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56298              m.emit(0xff ^ (hlcode(v[1]) << 3))
 56299              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 56300              m.emit(0x5f)
 56301              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56302          })
 56303      }
 56304      if p.len == 0 {
 56305          panic("invalid operands for VMAXSD")
 56306      }
 56307      return p
 56308  }
 56309  
 56310  // VMAXSS performs "Return Maximum Scalar Single-Precision Floating-Point Value".
 56311  //
 56312  // Mnemonic        : VMAXSS
 56313  // Supported forms : (5 forms)
 56314  //
 56315  //    * VMAXSS xmm, xmm, xmm                 [AVX]
 56316  //    * VMAXSS m32, xmm, xmm                 [AVX]
 56317  //    * VMAXSS m32, xmm, xmm{k}{z}           [AVX512F]
 56318  //    * VMAXSS {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 56319  //    * VMAXSS xmm, xmm, xmm{k}{z}           [AVX512F]
 56320  //
 56321  func (self *Program) VMAXSS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 56322      var p *Instruction
 56323      switch len(vv) {
 56324          case 0  : p = self.alloc("VMAXSS", 3, Operands { v0, v1, v2 })
 56325          case 1  : p = self.alloc("VMAXSS", 4, Operands { v0, v1, v2, vv[0] })
 56326          default : panic("instruction VMAXSS takes 3 or 4 operands")
 56327      }
 56328      // VMAXSS xmm, xmm, xmm
 56329      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 56330          self.require(ISA_AVX)
 56331          p.domain = DomainAVX
 56332          p.add(0, func(m *_Encoding, v []interface{}) {
 56333              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 56334              m.emit(0x5f)
 56335              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56336          })
 56337      }
 56338      // VMAXSS m32, xmm, xmm
 56339      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 56340          self.require(ISA_AVX)
 56341          p.domain = DomainAVX
 56342          p.add(0, func(m *_Encoding, v []interface{}) {
 56343              m.vex2(2, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 56344              m.emit(0x5f)
 56345              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 56346          })
 56347      }
 56348      // VMAXSS m32, xmm, xmm{k}{z}
 56349      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56350          self.require(ISA_AVX512F)
 56351          p.domain = DomainAVX
 56352          p.add(0, func(m *_Encoding, v []interface{}) {
 56353              m.evex(0b01, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 56354              m.emit(0x5f)
 56355              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 56356          })
 56357      }
 56358      // VMAXSS {sae}, xmm, xmm, xmm{k}{z}
 56359      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 56360          self.require(ISA_AVX512F)
 56361          p.domain = DomainAVX
 56362          p.add(0, func(m *_Encoding, v []interface{}) {
 56363              m.emit(0x62)
 56364              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 56365              m.emit(0x7e ^ (hlcode(v[2]) << 3))
 56366              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 56367              m.emit(0x5f)
 56368              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 56369          })
 56370      }
 56371      // VMAXSS xmm, xmm, xmm{k}{z}
 56372      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56373          self.require(ISA_AVX512F)
 56374          p.domain = DomainAVX
 56375          p.add(0, func(m *_Encoding, v []interface{}) {
 56376              m.emit(0x62)
 56377              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56378              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 56379              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 56380              m.emit(0x5f)
 56381              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56382          })
 56383      }
 56384      if p.len == 0 {
 56385          panic("invalid operands for VMAXSS")
 56386      }
 56387      return p
 56388  }
 56389  
 56390  // VMINPD performs "Return Minimum Packed Double-Precision Floating-Point Values".
 56391  //
 56392  // Mnemonic        : VMINPD
 56393  // Supported forms : (11 forms)
 56394  //
 56395  //    * VMINPD xmm, xmm, xmm                   [AVX]
 56396  //    * VMINPD m128, xmm, xmm                  [AVX]
 56397  //    * VMINPD ymm, ymm, ymm                   [AVX]
 56398  //    * VMINPD m256, ymm, ymm                  [AVX]
 56399  //    * VMINPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 56400  //    * VMINPD {sae}, zmm, zmm, zmm{k}{z}      [AVX512F]
 56401  //    * VMINPD zmm, zmm, zmm{k}{z}             [AVX512F]
 56402  //    * VMINPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 56403  //    * VMINPD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 56404  //    * VMINPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 56405  //    * VMINPD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 56406  //
 56407  func (self *Program) VMINPD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 56408      var p *Instruction
 56409      switch len(vv) {
 56410          case 0  : p = self.alloc("VMINPD", 3, Operands { v0, v1, v2 })
 56411          case 1  : p = self.alloc("VMINPD", 4, Operands { v0, v1, v2, vv[0] })
 56412          default : panic("instruction VMINPD takes 3 or 4 operands")
 56413      }
 56414      // VMINPD xmm, xmm, xmm
 56415      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 56416          self.require(ISA_AVX)
 56417          p.domain = DomainAVX
 56418          p.add(0, func(m *_Encoding, v []interface{}) {
 56419              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 56420              m.emit(0x5d)
 56421              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56422          })
 56423      }
 56424      // VMINPD m128, xmm, xmm
 56425      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 56426          self.require(ISA_AVX)
 56427          p.domain = DomainAVX
 56428          p.add(0, func(m *_Encoding, v []interface{}) {
 56429              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 56430              m.emit(0x5d)
 56431              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 56432          })
 56433      }
 56434      // VMINPD ymm, ymm, ymm
 56435      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 56436          self.require(ISA_AVX)
 56437          p.domain = DomainAVX
 56438          p.add(0, func(m *_Encoding, v []interface{}) {
 56439              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 56440              m.emit(0x5d)
 56441              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56442          })
 56443      }
 56444      // VMINPD m256, ymm, ymm
 56445      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 56446          self.require(ISA_AVX)
 56447          p.domain = DomainAVX
 56448          p.add(0, func(m *_Encoding, v []interface{}) {
 56449              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 56450              m.emit(0x5d)
 56451              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 56452          })
 56453      }
 56454      // VMINPD m512/m64bcst, zmm, zmm{k}{z}
 56455      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 56456          self.require(ISA_AVX512F)
 56457          p.domain = DomainAVX
 56458          p.add(0, func(m *_Encoding, v []interface{}) {
 56459              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56460              m.emit(0x5d)
 56461              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 56462          })
 56463      }
 56464      // VMINPD {sae}, zmm, zmm, zmm{k}{z}
 56465      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 56466          self.require(ISA_AVX512F)
 56467          p.domain = DomainAVX
 56468          p.add(0, func(m *_Encoding, v []interface{}) {
 56469              m.emit(0x62)
 56470              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 56471              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 56472              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 56473              m.emit(0x5d)
 56474              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 56475          })
 56476      }
 56477      // VMINPD zmm, zmm, zmm{k}{z}
 56478      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 56479          self.require(ISA_AVX512F)
 56480          p.domain = DomainAVX
 56481          p.add(0, func(m *_Encoding, v []interface{}) {
 56482              m.emit(0x62)
 56483              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56484              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 56485              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 56486              m.emit(0x5d)
 56487              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56488          })
 56489      }
 56490      // VMINPD m128/m64bcst, xmm, xmm{k}{z}
 56491      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56492          self.require(ISA_AVX512VL | ISA_AVX512F)
 56493          p.domain = DomainAVX
 56494          p.add(0, func(m *_Encoding, v []interface{}) {
 56495              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56496              m.emit(0x5d)
 56497              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 56498          })
 56499      }
 56500      // VMINPD xmm, xmm, xmm{k}{z}
 56501      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56502          self.require(ISA_AVX512VL | ISA_AVX512F)
 56503          p.domain = DomainAVX
 56504          p.add(0, func(m *_Encoding, v []interface{}) {
 56505              m.emit(0x62)
 56506              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56507              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 56508              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 56509              m.emit(0x5d)
 56510              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56511          })
 56512      }
 56513      // VMINPD m256/m64bcst, ymm, ymm{k}{z}
 56514      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 56515          self.require(ISA_AVX512VL | ISA_AVX512F)
 56516          p.domain = DomainAVX
 56517          p.add(0, func(m *_Encoding, v []interface{}) {
 56518              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56519              m.emit(0x5d)
 56520              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 56521          })
 56522      }
 56523      // VMINPD ymm, ymm, ymm{k}{z}
 56524      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 56525          self.require(ISA_AVX512VL | ISA_AVX512F)
 56526          p.domain = DomainAVX
 56527          p.add(0, func(m *_Encoding, v []interface{}) {
 56528              m.emit(0x62)
 56529              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56530              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 56531              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 56532              m.emit(0x5d)
 56533              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56534          })
 56535      }
 56536      if p.len == 0 {
 56537          panic("invalid operands for VMINPD")
 56538      }
 56539      return p
 56540  }
 56541  
 56542  // VMINPS performs "Return Minimum Packed Single-Precision Floating-Point Values".
 56543  //
 56544  // Mnemonic        : VMINPS
 56545  // Supported forms : (11 forms)
 56546  //
 56547  //    * VMINPS xmm, xmm, xmm                   [AVX]
 56548  //    * VMINPS m128, xmm, xmm                  [AVX]
 56549  //    * VMINPS ymm, ymm, ymm                   [AVX]
 56550  //    * VMINPS m256, ymm, ymm                  [AVX]
 56551  //    * VMINPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 56552  //    * VMINPS {sae}, zmm, zmm, zmm{k}{z}      [AVX512F]
 56553  //    * VMINPS zmm, zmm, zmm{k}{z}             [AVX512F]
 56554  //    * VMINPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 56555  //    * VMINPS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 56556  //    * VMINPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 56557  //    * VMINPS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 56558  //
 56559  func (self *Program) VMINPS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 56560      var p *Instruction
 56561      switch len(vv) {
 56562          case 0  : p = self.alloc("VMINPS", 3, Operands { v0, v1, v2 })
 56563          case 1  : p = self.alloc("VMINPS", 4, Operands { v0, v1, v2, vv[0] })
 56564          default : panic("instruction VMINPS takes 3 or 4 operands")
 56565      }
 56566      // VMINPS xmm, xmm, xmm
 56567      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 56568          self.require(ISA_AVX)
 56569          p.domain = DomainAVX
 56570          p.add(0, func(m *_Encoding, v []interface{}) {
 56571              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 56572              m.emit(0x5d)
 56573              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56574          })
 56575      }
 56576      // VMINPS m128, xmm, xmm
 56577      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 56578          self.require(ISA_AVX)
 56579          p.domain = DomainAVX
 56580          p.add(0, func(m *_Encoding, v []interface{}) {
 56581              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 56582              m.emit(0x5d)
 56583              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 56584          })
 56585      }
 56586      // VMINPS ymm, ymm, ymm
 56587      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 56588          self.require(ISA_AVX)
 56589          p.domain = DomainAVX
 56590          p.add(0, func(m *_Encoding, v []interface{}) {
 56591              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 56592              m.emit(0x5d)
 56593              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56594          })
 56595      }
 56596      // VMINPS m256, ymm, ymm
 56597      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 56598          self.require(ISA_AVX)
 56599          p.domain = DomainAVX
 56600          p.add(0, func(m *_Encoding, v []interface{}) {
 56601              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 56602              m.emit(0x5d)
 56603              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 56604          })
 56605      }
 56606      // VMINPS m512/m32bcst, zmm, zmm{k}{z}
 56607      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 56608          self.require(ISA_AVX512F)
 56609          p.domain = DomainAVX
 56610          p.add(0, func(m *_Encoding, v []interface{}) {
 56611              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56612              m.emit(0x5d)
 56613              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 56614          })
 56615      }
 56616      // VMINPS {sae}, zmm, zmm, zmm{k}{z}
 56617      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 56618          self.require(ISA_AVX512F)
 56619          p.domain = DomainAVX
 56620          p.add(0, func(m *_Encoding, v []interface{}) {
 56621              m.emit(0x62)
 56622              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 56623              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 56624              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 56625              m.emit(0x5d)
 56626              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 56627          })
 56628      }
 56629      // VMINPS zmm, zmm, zmm{k}{z}
 56630      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 56631          self.require(ISA_AVX512F)
 56632          p.domain = DomainAVX
 56633          p.add(0, func(m *_Encoding, v []interface{}) {
 56634              m.emit(0x62)
 56635              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56636              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 56637              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 56638              m.emit(0x5d)
 56639              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56640          })
 56641      }
 56642      // VMINPS m128/m32bcst, xmm, xmm{k}{z}
 56643      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56644          self.require(ISA_AVX512VL | ISA_AVX512F)
 56645          p.domain = DomainAVX
 56646          p.add(0, func(m *_Encoding, v []interface{}) {
 56647              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56648              m.emit(0x5d)
 56649              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 56650          })
 56651      }
 56652      // VMINPS xmm, xmm, xmm{k}{z}
 56653      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56654          self.require(ISA_AVX512VL | ISA_AVX512F)
 56655          p.domain = DomainAVX
 56656          p.add(0, func(m *_Encoding, v []interface{}) {
 56657              m.emit(0x62)
 56658              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56659              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 56660              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 56661              m.emit(0x5d)
 56662              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56663          })
 56664      }
 56665      // VMINPS m256/m32bcst, ymm, ymm{k}{z}
 56666      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 56667          self.require(ISA_AVX512VL | ISA_AVX512F)
 56668          p.domain = DomainAVX
 56669          p.add(0, func(m *_Encoding, v []interface{}) {
 56670              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56671              m.emit(0x5d)
 56672              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 56673          })
 56674      }
 56675      // VMINPS ymm, ymm, ymm{k}{z}
 56676      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 56677          self.require(ISA_AVX512VL | ISA_AVX512F)
 56678          p.domain = DomainAVX
 56679          p.add(0, func(m *_Encoding, v []interface{}) {
 56680              m.emit(0x62)
 56681              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56682              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 56683              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 56684              m.emit(0x5d)
 56685              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56686          })
 56687      }
 56688      if p.len == 0 {
 56689          panic("invalid operands for VMINPS")
 56690      }
 56691      return p
 56692  }
 56693  
 56694  // VMINSD performs "Return Minimum Scalar Double-Precision Floating-Point Value".
 56695  //
 56696  // Mnemonic        : VMINSD
 56697  // Supported forms : (5 forms)
 56698  //
 56699  //    * VMINSD xmm, xmm, xmm                 [AVX]
 56700  //    * VMINSD m64, xmm, xmm                 [AVX]
 56701  //    * VMINSD m64, xmm, xmm{k}{z}           [AVX512F]
 56702  //    * VMINSD {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 56703  //    * VMINSD xmm, xmm, xmm{k}{z}           [AVX512F]
 56704  //
 56705  func (self *Program) VMINSD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 56706      var p *Instruction
 56707      switch len(vv) {
 56708          case 0  : p = self.alloc("VMINSD", 3, Operands { v0, v1, v2 })
 56709          case 1  : p = self.alloc("VMINSD", 4, Operands { v0, v1, v2, vv[0] })
 56710          default : panic("instruction VMINSD takes 3 or 4 operands")
 56711      }
 56712      // VMINSD xmm, xmm, xmm
 56713      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 56714          self.require(ISA_AVX)
 56715          p.domain = DomainAVX
 56716          p.add(0, func(m *_Encoding, v []interface{}) {
 56717              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 56718              m.emit(0x5d)
 56719              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56720          })
 56721      }
 56722      // VMINSD m64, xmm, xmm
 56723      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 56724          self.require(ISA_AVX)
 56725          p.domain = DomainAVX
 56726          p.add(0, func(m *_Encoding, v []interface{}) {
 56727              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 56728              m.emit(0x5d)
 56729              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 56730          })
 56731      }
 56732      // VMINSD m64, xmm, xmm{k}{z}
 56733      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56734          self.require(ISA_AVX512F)
 56735          p.domain = DomainAVX
 56736          p.add(0, func(m *_Encoding, v []interface{}) {
 56737              m.evex(0b01, 0x87, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 56738              m.emit(0x5d)
 56739              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 56740          })
 56741      }
 56742      // VMINSD {sae}, xmm, xmm, xmm{k}{z}
 56743      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 56744          self.require(ISA_AVX512F)
 56745          p.domain = DomainAVX
 56746          p.add(0, func(m *_Encoding, v []interface{}) {
 56747              m.emit(0x62)
 56748              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 56749              m.emit(0xff ^ (hlcode(v[2]) << 3))
 56750              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 56751              m.emit(0x5d)
 56752              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 56753          })
 56754      }
 56755      // VMINSD xmm, xmm, xmm{k}{z}
 56756      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56757          self.require(ISA_AVX512F)
 56758          p.domain = DomainAVX
 56759          p.add(0, func(m *_Encoding, v []interface{}) {
 56760              m.emit(0x62)
 56761              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56762              m.emit(0xff ^ (hlcode(v[1]) << 3))
 56763              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 56764              m.emit(0x5d)
 56765              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56766          })
 56767      }
 56768      if p.len == 0 {
 56769          panic("invalid operands for VMINSD")
 56770      }
 56771      return p
 56772  }
 56773  
 56774  // VMINSS performs "Return Minimum Scalar Single-Precision Floating-Point Value".
 56775  //
 56776  // Mnemonic        : VMINSS
 56777  // Supported forms : (5 forms)
 56778  //
 56779  //    * VMINSS xmm, xmm, xmm                 [AVX]
 56780  //    * VMINSS m32, xmm, xmm                 [AVX]
 56781  //    * VMINSS m32, xmm, xmm{k}{z}           [AVX512F]
 56782  //    * VMINSS {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 56783  //    * VMINSS xmm, xmm, xmm{k}{z}           [AVX512F]
 56784  //
 56785  func (self *Program) VMINSS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 56786      var p *Instruction
 56787      switch len(vv) {
 56788          case 0  : p = self.alloc("VMINSS", 3, Operands { v0, v1, v2 })
 56789          case 1  : p = self.alloc("VMINSS", 4, Operands { v0, v1, v2, vv[0] })
 56790          default : panic("instruction VMINSS takes 3 or 4 operands")
 56791      }
 56792      // VMINSS xmm, xmm, xmm
 56793      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 56794          self.require(ISA_AVX)
 56795          p.domain = DomainAVX
 56796          p.add(0, func(m *_Encoding, v []interface{}) {
 56797              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 56798              m.emit(0x5d)
 56799              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56800          })
 56801      }
 56802      // VMINSS m32, xmm, xmm
 56803      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 56804          self.require(ISA_AVX)
 56805          p.domain = DomainAVX
 56806          p.add(0, func(m *_Encoding, v []interface{}) {
 56807              m.vex2(2, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 56808              m.emit(0x5d)
 56809              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 56810          })
 56811      }
 56812      // VMINSS m32, xmm, xmm{k}{z}
 56813      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56814          self.require(ISA_AVX512F)
 56815          p.domain = DomainAVX
 56816          p.add(0, func(m *_Encoding, v []interface{}) {
 56817              m.evex(0b01, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 56818              m.emit(0x5d)
 56819              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 56820          })
 56821      }
 56822      // VMINSS {sae}, xmm, xmm, xmm{k}{z}
 56823      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 56824          self.require(ISA_AVX512F)
 56825          p.domain = DomainAVX
 56826          p.add(0, func(m *_Encoding, v []interface{}) {
 56827              m.emit(0x62)
 56828              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 56829              m.emit(0x7e ^ (hlcode(v[2]) << 3))
 56830              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 56831              m.emit(0x5d)
 56832              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 56833          })
 56834      }
 56835      // VMINSS xmm, xmm, xmm{k}{z}
 56836      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56837          self.require(ISA_AVX512F)
 56838          p.domain = DomainAVX
 56839          p.add(0, func(m *_Encoding, v []interface{}) {
 56840              m.emit(0x62)
 56841              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56842              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 56843              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 56844              m.emit(0x5d)
 56845              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56846          })
 56847      }
 56848      if p.len == 0 {
 56849          panic("invalid operands for VMINSS")
 56850      }
 56851      return p
 56852  }
 56853  
 56854  // VMOVAPD performs "Move Aligned Packed Double-Precision Floating-Point Values".
 56855  //
 56856  // Mnemonic        : VMOVAPD
 56857  // Supported forms : (15 forms)
 56858  //
 56859  //    * VMOVAPD xmm, xmm           [AVX]
 56860  //    * VMOVAPD m128, xmm          [AVX]
 56861  //    * VMOVAPD ymm, ymm           [AVX]
 56862  //    * VMOVAPD m256, ymm          [AVX]
 56863  //    * VMOVAPD xmm, m128          [AVX]
 56864  //    * VMOVAPD ymm, m256          [AVX]
 56865  //    * VMOVAPD zmm, m512{k}{z}    [AVX512F]
 56866  //    * VMOVAPD zmm, zmm{k}{z}     [AVX512F]
 56867  //    * VMOVAPD m512, zmm{k}{z}    [AVX512F]
 56868  //    * VMOVAPD xmm, m128{k}{z}    [AVX512F,AVX512VL]
 56869  //    * VMOVAPD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 56870  //    * VMOVAPD ymm, m256{k}{z}    [AVX512F,AVX512VL]
 56871  //    * VMOVAPD ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 56872  //    * VMOVAPD m128, xmm{k}{z}    [AVX512F,AVX512VL]
 56873  //    * VMOVAPD m256, ymm{k}{z}    [AVX512F,AVX512VL]
 56874  //
 56875  func (self *Program) VMOVAPD(v0 interface{}, v1 interface{}) *Instruction {
 56876      p := self.alloc("VMOVAPD", 2, Operands { v0, v1 })
 56877      // VMOVAPD xmm, xmm
 56878      if isXMM(v0) && isXMM(v1) {
 56879          self.require(ISA_AVX)
 56880          p.domain = DomainAVX
 56881          p.add(0, func(m *_Encoding, v []interface{}) {
 56882              m.vex2(1, hcode(v[1]), v[0], 0)
 56883              m.emit(0x28)
 56884              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 56885          })
 56886          p.add(0, func(m *_Encoding, v []interface{}) {
 56887              m.vex2(1, hcode(v[0]), v[1], 0)
 56888              m.emit(0x29)
 56889              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 56890          })
 56891      }
 56892      // VMOVAPD m128, xmm
 56893      if isM128(v0) && isXMM(v1) {
 56894          self.require(ISA_AVX)
 56895          p.domain = DomainAVX
 56896          p.add(0, func(m *_Encoding, v []interface{}) {
 56897              m.vex2(1, hcode(v[1]), addr(v[0]), 0)
 56898              m.emit(0x28)
 56899              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 56900          })
 56901      }
 56902      // VMOVAPD ymm, ymm
 56903      if isYMM(v0) && isYMM(v1) {
 56904          self.require(ISA_AVX)
 56905          p.domain = DomainAVX
 56906          p.add(0, func(m *_Encoding, v []interface{}) {
 56907              m.vex2(5, hcode(v[1]), v[0], 0)
 56908              m.emit(0x28)
 56909              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 56910          })
 56911          p.add(0, func(m *_Encoding, v []interface{}) {
 56912              m.vex2(5, hcode(v[0]), v[1], 0)
 56913              m.emit(0x29)
 56914              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 56915          })
 56916      }
 56917      // VMOVAPD m256, ymm
 56918      if isM256(v0) && isYMM(v1) {
 56919          self.require(ISA_AVX)
 56920          p.domain = DomainAVX
 56921          p.add(0, func(m *_Encoding, v []interface{}) {
 56922              m.vex2(5, hcode(v[1]), addr(v[0]), 0)
 56923              m.emit(0x28)
 56924              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 56925          })
 56926      }
 56927      // VMOVAPD xmm, m128
 56928      if isXMM(v0) && isM128(v1) {
 56929          self.require(ISA_AVX)
 56930          p.domain = DomainAVX
 56931          p.add(0, func(m *_Encoding, v []interface{}) {
 56932              m.vex2(1, hcode(v[0]), addr(v[1]), 0)
 56933              m.emit(0x29)
 56934              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 56935          })
 56936      }
 56937      // VMOVAPD ymm, m256
 56938      if isYMM(v0) && isM256(v1) {
 56939          self.require(ISA_AVX)
 56940          p.domain = DomainAVX
 56941          p.add(0, func(m *_Encoding, v []interface{}) {
 56942              m.vex2(5, hcode(v[0]), addr(v[1]), 0)
 56943              m.emit(0x29)
 56944              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 56945          })
 56946      }
 56947      // VMOVAPD zmm, m512{k}{z}
 56948      if isZMM(v0) && isM512kz(v1) {
 56949          self.require(ISA_AVX512F)
 56950          p.domain = DomainAVX
 56951          p.add(0, func(m *_Encoding, v []interface{}) {
 56952              m.evex(0b01, 0x85, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 56953              m.emit(0x29)
 56954              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 56955          })
 56956      }
 56957      // VMOVAPD zmm, zmm{k}{z}
 56958      if isZMM(v0) && isZMMkz(v1) {
 56959          self.require(ISA_AVX512F)
 56960          p.domain = DomainAVX
 56961          p.add(0, func(m *_Encoding, v []interface{}) {
 56962              m.emit(0x62)
 56963              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 56964              m.emit(0xfd)
 56965              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 56966              m.emit(0x28)
 56967              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 56968          })
 56969          p.add(0, func(m *_Encoding, v []interface{}) {
 56970              m.emit(0x62)
 56971              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 56972              m.emit(0xfd)
 56973              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 56974              m.emit(0x29)
 56975              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 56976          })
 56977      }
 56978      // VMOVAPD m512, zmm{k}{z}
 56979      if isM512(v0) && isZMMkz(v1) {
 56980          self.require(ISA_AVX512F)
 56981          p.domain = DomainAVX
 56982          p.add(0, func(m *_Encoding, v []interface{}) {
 56983              m.evex(0b01, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 56984              m.emit(0x28)
 56985              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 56986          })
 56987      }
 56988      // VMOVAPD xmm, m128{k}{z}
 56989      if isEVEXXMM(v0) && isM128kz(v1) {
 56990          self.require(ISA_AVX512VL | ISA_AVX512F)
 56991          p.domain = DomainAVX
 56992          p.add(0, func(m *_Encoding, v []interface{}) {
 56993              m.evex(0b01, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 56994              m.emit(0x29)
 56995              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 56996          })
 56997      }
 56998      // VMOVAPD xmm, xmm{k}{z}
 56999      if isEVEXXMM(v0) && isXMMkz(v1) {
 57000          self.require(ISA_AVX512VL | ISA_AVX512F)
 57001          p.domain = DomainAVX
 57002          p.add(0, func(m *_Encoding, v []interface{}) {
 57003              m.emit(0x62)
 57004              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57005              m.emit(0xfd)
 57006              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 57007              m.emit(0x28)
 57008              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57009          })
 57010          p.add(0, func(m *_Encoding, v []interface{}) {
 57011              m.emit(0x62)
 57012              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57013              m.emit(0xfd)
 57014              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 57015              m.emit(0x29)
 57016              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57017          })
 57018      }
 57019      // VMOVAPD ymm, m256{k}{z}
 57020      if isEVEXYMM(v0) && isM256kz(v1) {
 57021          self.require(ISA_AVX512VL | ISA_AVX512F)
 57022          p.domain = DomainAVX
 57023          p.add(0, func(m *_Encoding, v []interface{}) {
 57024              m.evex(0b01, 0x85, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 57025              m.emit(0x29)
 57026              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 57027          })
 57028      }
 57029      // VMOVAPD ymm, ymm{k}{z}
 57030      if isEVEXYMM(v0) && isYMMkz(v1) {
 57031          self.require(ISA_AVX512VL | ISA_AVX512F)
 57032          p.domain = DomainAVX
 57033          p.add(0, func(m *_Encoding, v []interface{}) {
 57034              m.emit(0x62)
 57035              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57036              m.emit(0xfd)
 57037              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 57038              m.emit(0x28)
 57039              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57040          })
 57041          p.add(0, func(m *_Encoding, v []interface{}) {
 57042              m.emit(0x62)
 57043              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57044              m.emit(0xfd)
 57045              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 57046              m.emit(0x29)
 57047              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57048          })
 57049      }
 57050      // VMOVAPD m128, xmm{k}{z}
 57051      if isM128(v0) && isXMMkz(v1) {
 57052          self.require(ISA_AVX512VL | ISA_AVX512F)
 57053          p.domain = DomainAVX
 57054          p.add(0, func(m *_Encoding, v []interface{}) {
 57055              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57056              m.emit(0x28)
 57057              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 57058          })
 57059      }
 57060      // VMOVAPD m256, ymm{k}{z}
 57061      if isM256(v0) && isYMMkz(v1) {
 57062          self.require(ISA_AVX512VL | ISA_AVX512F)
 57063          p.domain = DomainAVX
 57064          p.add(0, func(m *_Encoding, v []interface{}) {
 57065              m.evex(0b01, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57066              m.emit(0x28)
 57067              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 57068          })
 57069      }
 57070      if p.len == 0 {
 57071          panic("invalid operands for VMOVAPD")
 57072      }
 57073      return p
 57074  }
 57075  
 57076  // VMOVAPS performs "Move Aligned Packed Single-Precision Floating-Point Values".
 57077  //
 57078  // Mnemonic        : VMOVAPS
 57079  // Supported forms : (15 forms)
 57080  //
 57081  //    * VMOVAPS xmm, xmm           [AVX]
 57082  //    * VMOVAPS m128, xmm          [AVX]
 57083  //    * VMOVAPS ymm, ymm           [AVX]
 57084  //    * VMOVAPS m256, ymm          [AVX]
 57085  //    * VMOVAPS xmm, m128          [AVX]
 57086  //    * VMOVAPS ymm, m256          [AVX]
 57087  //    * VMOVAPS zmm, m512{k}{z}    [AVX512F]
 57088  //    * VMOVAPS zmm, zmm{k}{z}     [AVX512F]
 57089  //    * VMOVAPS m512, zmm{k}{z}    [AVX512F]
 57090  //    * VMOVAPS xmm, m128{k}{z}    [AVX512F,AVX512VL]
 57091  //    * VMOVAPS xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 57092  //    * VMOVAPS ymm, m256{k}{z}    [AVX512F,AVX512VL]
 57093  //    * VMOVAPS ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 57094  //    * VMOVAPS m128, xmm{k}{z}    [AVX512F,AVX512VL]
 57095  //    * VMOVAPS m256, ymm{k}{z}    [AVX512F,AVX512VL]
 57096  //
 57097  func (self *Program) VMOVAPS(v0 interface{}, v1 interface{}) *Instruction {
 57098      p := self.alloc("VMOVAPS", 2, Operands { v0, v1 })
 57099      // VMOVAPS xmm, xmm
 57100      if isXMM(v0) && isXMM(v1) {
 57101          self.require(ISA_AVX)
 57102          p.domain = DomainAVX
 57103          p.add(0, func(m *_Encoding, v []interface{}) {
 57104              m.vex2(0, hcode(v[1]), v[0], 0)
 57105              m.emit(0x28)
 57106              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57107          })
 57108          p.add(0, func(m *_Encoding, v []interface{}) {
 57109              m.vex2(0, hcode(v[0]), v[1], 0)
 57110              m.emit(0x29)
 57111              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57112          })
 57113      }
 57114      // VMOVAPS m128, xmm
 57115      if isM128(v0) && isXMM(v1) {
 57116          self.require(ISA_AVX)
 57117          p.domain = DomainAVX
 57118          p.add(0, func(m *_Encoding, v []interface{}) {
 57119              m.vex2(0, hcode(v[1]), addr(v[0]), 0)
 57120              m.emit(0x28)
 57121              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 57122          })
 57123      }
 57124      // VMOVAPS ymm, ymm
 57125      if isYMM(v0) && isYMM(v1) {
 57126          self.require(ISA_AVX)
 57127          p.domain = DomainAVX
 57128          p.add(0, func(m *_Encoding, v []interface{}) {
 57129              m.vex2(4, hcode(v[1]), v[0], 0)
 57130              m.emit(0x28)
 57131              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57132          })
 57133          p.add(0, func(m *_Encoding, v []interface{}) {
 57134              m.vex2(4, hcode(v[0]), v[1], 0)
 57135              m.emit(0x29)
 57136              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57137          })
 57138      }
 57139      // VMOVAPS m256, ymm
 57140      if isM256(v0) && isYMM(v1) {
 57141          self.require(ISA_AVX)
 57142          p.domain = DomainAVX
 57143          p.add(0, func(m *_Encoding, v []interface{}) {
 57144              m.vex2(4, hcode(v[1]), addr(v[0]), 0)
 57145              m.emit(0x28)
 57146              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 57147          })
 57148      }
 57149      // VMOVAPS xmm, m128
 57150      if isXMM(v0) && isM128(v1) {
 57151          self.require(ISA_AVX)
 57152          p.domain = DomainAVX
 57153          p.add(0, func(m *_Encoding, v []interface{}) {
 57154              m.vex2(0, hcode(v[0]), addr(v[1]), 0)
 57155              m.emit(0x29)
 57156              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 57157          })
 57158      }
 57159      // VMOVAPS ymm, m256
 57160      if isYMM(v0) && isM256(v1) {
 57161          self.require(ISA_AVX)
 57162          p.domain = DomainAVX
 57163          p.add(0, func(m *_Encoding, v []interface{}) {
 57164              m.vex2(4, hcode(v[0]), addr(v[1]), 0)
 57165              m.emit(0x29)
 57166              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 57167          })
 57168      }
 57169      // VMOVAPS zmm, m512{k}{z}
 57170      if isZMM(v0) && isM512kz(v1) {
 57171          self.require(ISA_AVX512F)
 57172          p.domain = DomainAVX
 57173          p.add(0, func(m *_Encoding, v []interface{}) {
 57174              m.evex(0b01, 0x04, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 57175              m.emit(0x29)
 57176              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 57177          })
 57178      }
 57179      // VMOVAPS zmm, zmm{k}{z}
 57180      if isZMM(v0) && isZMMkz(v1) {
 57181          self.require(ISA_AVX512F)
 57182          p.domain = DomainAVX
 57183          p.add(0, func(m *_Encoding, v []interface{}) {
 57184              m.emit(0x62)
 57185              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57186              m.emit(0x7c)
 57187              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 57188              m.emit(0x28)
 57189              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57190          })
 57191          p.add(0, func(m *_Encoding, v []interface{}) {
 57192              m.emit(0x62)
 57193              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57194              m.emit(0x7c)
 57195              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 57196              m.emit(0x29)
 57197              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57198          })
 57199      }
 57200      // VMOVAPS m512, zmm{k}{z}
 57201      if isM512(v0) && isZMMkz(v1) {
 57202          self.require(ISA_AVX512F)
 57203          p.domain = DomainAVX
 57204          p.add(0, func(m *_Encoding, v []interface{}) {
 57205              m.evex(0b01, 0x04, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57206              m.emit(0x28)
 57207              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 57208          })
 57209      }
 57210      // VMOVAPS xmm, m128{k}{z}
 57211      if isEVEXXMM(v0) && isM128kz(v1) {
 57212          self.require(ISA_AVX512VL | ISA_AVX512F)
 57213          p.domain = DomainAVX
 57214          p.add(0, func(m *_Encoding, v []interface{}) {
 57215              m.evex(0b01, 0x04, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 57216              m.emit(0x29)
 57217              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 57218          })
 57219      }
 57220      // VMOVAPS xmm, xmm{k}{z}
 57221      if isEVEXXMM(v0) && isXMMkz(v1) {
 57222          self.require(ISA_AVX512VL | ISA_AVX512F)
 57223          p.domain = DomainAVX
 57224          p.add(0, func(m *_Encoding, v []interface{}) {
 57225              m.emit(0x62)
 57226              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57227              m.emit(0x7c)
 57228              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 57229              m.emit(0x28)
 57230              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57231          })
 57232          p.add(0, func(m *_Encoding, v []interface{}) {
 57233              m.emit(0x62)
 57234              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57235              m.emit(0x7c)
 57236              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 57237              m.emit(0x29)
 57238              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57239          })
 57240      }
 57241      // VMOVAPS ymm, m256{k}{z}
 57242      if isEVEXYMM(v0) && isM256kz(v1) {
 57243          self.require(ISA_AVX512VL | ISA_AVX512F)
 57244          p.domain = DomainAVX
 57245          p.add(0, func(m *_Encoding, v []interface{}) {
 57246              m.evex(0b01, 0x04, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 57247              m.emit(0x29)
 57248              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 57249          })
 57250      }
 57251      // VMOVAPS ymm, ymm{k}{z}
 57252      if isEVEXYMM(v0) && isYMMkz(v1) {
 57253          self.require(ISA_AVX512VL | ISA_AVX512F)
 57254          p.domain = DomainAVX
 57255          p.add(0, func(m *_Encoding, v []interface{}) {
 57256              m.emit(0x62)
 57257              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57258              m.emit(0x7c)
 57259              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 57260              m.emit(0x28)
 57261              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57262          })
 57263          p.add(0, func(m *_Encoding, v []interface{}) {
 57264              m.emit(0x62)
 57265              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57266              m.emit(0x7c)
 57267              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 57268              m.emit(0x29)
 57269              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57270          })
 57271      }
 57272      // VMOVAPS m128, xmm{k}{z}
 57273      if isM128(v0) && isXMMkz(v1) {
 57274          self.require(ISA_AVX512VL | ISA_AVX512F)
 57275          p.domain = DomainAVX
 57276          p.add(0, func(m *_Encoding, v []interface{}) {
 57277              m.evex(0b01, 0x04, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57278              m.emit(0x28)
 57279              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 57280          })
 57281      }
 57282      // VMOVAPS m256, ymm{k}{z}
 57283      if isM256(v0) && isYMMkz(v1) {
 57284          self.require(ISA_AVX512VL | ISA_AVX512F)
 57285          p.domain = DomainAVX
 57286          p.add(0, func(m *_Encoding, v []interface{}) {
 57287              m.evex(0b01, 0x04, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57288              m.emit(0x28)
 57289              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 57290          })
 57291      }
 57292      if p.len == 0 {
 57293          panic("invalid operands for VMOVAPS")
 57294      }
 57295      return p
 57296  }
 57297  
 57298  // VMOVD performs "Move Doubleword".
 57299  //
 57300  // Mnemonic        : VMOVD
 57301  // Supported forms : (8 forms)
 57302  //
 57303  //    * VMOVD xmm, r32    [AVX]
 57304  //    * VMOVD r32, xmm    [AVX]
 57305  //    * VMOVD m32, xmm    [AVX]
 57306  //    * VMOVD xmm, m32    [AVX]
 57307  //    * VMOVD xmm, r32    [AVX512F]
 57308  //    * VMOVD r32, xmm    [AVX512F]
 57309  //    * VMOVD m32, xmm    [AVX512F]
 57310  //    * VMOVD xmm, m32    [AVX512F]
 57311  //
 57312  func (self *Program) VMOVD(v0 interface{}, v1 interface{}) *Instruction {
 57313      p := self.alloc("VMOVD", 2, Operands { v0, v1 })
 57314      // VMOVD xmm, r32
 57315      if isXMM(v0) && isReg32(v1) {
 57316          self.require(ISA_AVX)
 57317          p.domain = DomainAVX
 57318          p.add(0, func(m *_Encoding, v []interface{}) {
 57319              m.vex2(1, hcode(v[0]), v[1], 0)
 57320              m.emit(0x7e)
 57321              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57322          })
 57323      }
 57324      // VMOVD r32, xmm
 57325      if isReg32(v0) && isXMM(v1) {
 57326          self.require(ISA_AVX)
 57327          p.domain = DomainAVX
 57328          p.add(0, func(m *_Encoding, v []interface{}) {
 57329              m.vex2(1, hcode(v[1]), v[0], 0)
 57330              m.emit(0x6e)
 57331              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57332          })
 57333      }
 57334      // VMOVD m32, xmm
 57335      if isM32(v0) && isXMM(v1) {
 57336          self.require(ISA_AVX)
 57337          p.domain = DomainAVX
 57338          p.add(0, func(m *_Encoding, v []interface{}) {
 57339              m.vex2(1, hcode(v[1]), addr(v[0]), 0)
 57340              m.emit(0x6e)
 57341              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 57342          })
 57343      }
 57344      // VMOVD xmm, m32
 57345      if isXMM(v0) && isM32(v1) {
 57346          self.require(ISA_AVX)
 57347          p.domain = DomainAVX
 57348          p.add(0, func(m *_Encoding, v []interface{}) {
 57349              m.vex2(1, hcode(v[0]), addr(v[1]), 0)
 57350              m.emit(0x7e)
 57351              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 57352          })
 57353      }
 57354      // VMOVD xmm, r32
 57355      if isEVEXXMM(v0) && isReg32(v1) {
 57356          self.require(ISA_AVX512F)
 57357          p.domain = DomainAVX
 57358          p.add(0, func(m *_Encoding, v []interface{}) {
 57359              m.emit(0x62)
 57360              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57361              m.emit(0x7d)
 57362              m.emit(0x08)
 57363              m.emit(0x7e)
 57364              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57365          })
 57366      }
 57367      // VMOVD r32, xmm
 57368      if isReg32(v0) && isEVEXXMM(v1) {
 57369          self.require(ISA_AVX512F)
 57370          p.domain = DomainAVX
 57371          p.add(0, func(m *_Encoding, v []interface{}) {
 57372              m.emit(0x62)
 57373              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57374              m.emit(0x7d)
 57375              m.emit(0x08)
 57376              m.emit(0x6e)
 57377              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57378          })
 57379      }
 57380      // VMOVD m32, xmm
 57381      if isM32(v0) && isEVEXXMM(v1) {
 57382          self.require(ISA_AVX512F)
 57383          p.domain = DomainAVX
 57384          p.add(0, func(m *_Encoding, v []interface{}) {
 57385              m.evex(0b01, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 57386              m.emit(0x6e)
 57387              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 57388          })
 57389      }
 57390      // VMOVD xmm, m32
 57391      if isEVEXXMM(v0) && isM32(v1) {
 57392          self.require(ISA_AVX512F)
 57393          p.domain = DomainAVX
 57394          p.add(0, func(m *_Encoding, v []interface{}) {
 57395              m.evex(0b01, 0x05, 0b00, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 57396              m.emit(0x7e)
 57397              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 57398          })
 57399      }
 57400      if p.len == 0 {
 57401          panic("invalid operands for VMOVD")
 57402      }
 57403      return p
 57404  }
 57405  
 57406  // VMOVDDUP performs "Move One Double-FP and Duplicate".
 57407  //
 57408  // Mnemonic        : VMOVDDUP
 57409  // Supported forms : (10 forms)
 57410  //
 57411  //    * VMOVDDUP xmm, xmm           [AVX]
 57412  //    * VMOVDDUP m64, xmm           [AVX]
 57413  //    * VMOVDDUP ymm, ymm           [AVX]
 57414  //    * VMOVDDUP m256, ymm          [AVX]
 57415  //    * VMOVDDUP zmm, zmm{k}{z}     [AVX512F]
 57416  //    * VMOVDDUP m512, zmm{k}{z}    [AVX512F]
 57417  //    * VMOVDDUP xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 57418  //    * VMOVDDUP ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 57419  //    * VMOVDDUP m64, xmm{k}{z}     [AVX512F,AVX512VL]
 57420  //    * VMOVDDUP m256, ymm{k}{z}    [AVX512F,AVX512VL]
 57421  //
 57422  func (self *Program) VMOVDDUP(v0 interface{}, v1 interface{}) *Instruction {
 57423      p := self.alloc("VMOVDDUP", 2, Operands { v0, v1 })
 57424      // VMOVDDUP xmm, xmm
 57425      if isXMM(v0) && isXMM(v1) {
 57426          self.require(ISA_AVX)
 57427          p.domain = DomainAVX
 57428          p.add(0, func(m *_Encoding, v []interface{}) {
 57429              m.vex2(3, hcode(v[1]), v[0], 0)
 57430              m.emit(0x12)
 57431              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57432          })
 57433      }
 57434      // VMOVDDUP m64, xmm
 57435      if isM64(v0) && isXMM(v1) {
 57436          self.require(ISA_AVX)
 57437          p.domain = DomainAVX
 57438          p.add(0, func(m *_Encoding, v []interface{}) {
 57439              m.vex2(3, hcode(v[1]), addr(v[0]), 0)
 57440              m.emit(0x12)
 57441              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 57442          })
 57443      }
 57444      // VMOVDDUP ymm, ymm
 57445      if isYMM(v0) && isYMM(v1) {
 57446          self.require(ISA_AVX)
 57447          p.domain = DomainAVX
 57448          p.add(0, func(m *_Encoding, v []interface{}) {
 57449              m.vex2(7, hcode(v[1]), v[0], 0)
 57450              m.emit(0x12)
 57451              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57452          })
 57453      }
 57454      // VMOVDDUP m256, ymm
 57455      if isM256(v0) && isYMM(v1) {
 57456          self.require(ISA_AVX)
 57457          p.domain = DomainAVX
 57458          p.add(0, func(m *_Encoding, v []interface{}) {
 57459              m.vex2(7, hcode(v[1]), addr(v[0]), 0)
 57460              m.emit(0x12)
 57461              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 57462          })
 57463      }
 57464      // VMOVDDUP zmm, zmm{k}{z}
 57465      if isZMM(v0) && isZMMkz(v1) {
 57466          self.require(ISA_AVX512F)
 57467          p.domain = DomainAVX
 57468          p.add(0, func(m *_Encoding, v []interface{}) {
 57469              m.emit(0x62)
 57470              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57471              m.emit(0xff)
 57472              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 57473              m.emit(0x12)
 57474              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57475          })
 57476      }
 57477      // VMOVDDUP m512, zmm{k}{z}
 57478      if isM512(v0) && isZMMkz(v1) {
 57479          self.require(ISA_AVX512F)
 57480          p.domain = DomainAVX
 57481          p.add(0, func(m *_Encoding, v []interface{}) {
 57482              m.evex(0b01, 0x87, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57483              m.emit(0x12)
 57484              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 57485          })
 57486      }
 57487      // VMOVDDUP xmm, xmm{k}{z}
 57488      if isEVEXXMM(v0) && isXMMkz(v1) {
 57489          self.require(ISA_AVX512VL | ISA_AVX512F)
 57490          p.domain = DomainAVX
 57491          p.add(0, func(m *_Encoding, v []interface{}) {
 57492              m.emit(0x62)
 57493              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57494              m.emit(0xff)
 57495              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 57496              m.emit(0x12)
 57497              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57498          })
 57499      }
 57500      // VMOVDDUP ymm, ymm{k}{z}
 57501      if isEVEXYMM(v0) && isYMMkz(v1) {
 57502          self.require(ISA_AVX512VL | ISA_AVX512F)
 57503          p.domain = DomainAVX
 57504          p.add(0, func(m *_Encoding, v []interface{}) {
 57505              m.emit(0x62)
 57506              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57507              m.emit(0xff)
 57508              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 57509              m.emit(0x12)
 57510              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57511          })
 57512      }
 57513      // VMOVDDUP m64, xmm{k}{z}
 57514      if isM64(v0) && isXMMkz(v1) {
 57515          self.require(ISA_AVX512VL | ISA_AVX512F)
 57516          p.domain = DomainAVX
 57517          p.add(0, func(m *_Encoding, v []interface{}) {
 57518              m.evex(0b01, 0x87, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57519              m.emit(0x12)
 57520              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 57521          })
 57522      }
 57523      // VMOVDDUP m256, ymm{k}{z}
 57524      if isM256(v0) && isYMMkz(v1) {
 57525          self.require(ISA_AVX512VL | ISA_AVX512F)
 57526          p.domain = DomainAVX
 57527          p.add(0, func(m *_Encoding, v []interface{}) {
 57528              m.evex(0b01, 0x87, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57529              m.emit(0x12)
 57530              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 57531          })
 57532      }
 57533      if p.len == 0 {
 57534          panic("invalid operands for VMOVDDUP")
 57535      }
 57536      return p
 57537  }
 57538  
 57539  // VMOVDQA performs "Move Aligned Double Quadword".
 57540  //
 57541  // Mnemonic        : VMOVDQA
 57542  // Supported forms : (6 forms)
 57543  //
 57544  //    * VMOVDQA xmm, xmm     [AVX]
 57545  //    * VMOVDQA m128, xmm    [AVX]
 57546  //    * VMOVDQA ymm, ymm     [AVX]
 57547  //    * VMOVDQA m256, ymm    [AVX]
 57548  //    * VMOVDQA xmm, m128    [AVX]
 57549  //    * VMOVDQA ymm, m256    [AVX]
 57550  //
 57551  func (self *Program) VMOVDQA(v0 interface{}, v1 interface{}) *Instruction {
 57552      p := self.alloc("VMOVDQA", 2, Operands { v0, v1 })
 57553      // VMOVDQA xmm, xmm
 57554      if isXMM(v0) && isXMM(v1) {
 57555          self.require(ISA_AVX)
 57556          p.domain = DomainAVX
 57557          p.add(0, func(m *_Encoding, v []interface{}) {
 57558              m.vex2(1, hcode(v[1]), v[0], 0)
 57559              m.emit(0x6f)
 57560              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57561          })
 57562          p.add(0, func(m *_Encoding, v []interface{}) {
 57563              m.vex2(1, hcode(v[0]), v[1], 0)
 57564              m.emit(0x7f)
 57565              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57566          })
 57567      }
 57568      // VMOVDQA m128, xmm
 57569      if isM128(v0) && isXMM(v1) {
 57570          self.require(ISA_AVX)
 57571          p.domain = DomainAVX
 57572          p.add(0, func(m *_Encoding, v []interface{}) {
 57573              m.vex2(1, hcode(v[1]), addr(v[0]), 0)
 57574              m.emit(0x6f)
 57575              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 57576          })
 57577      }
 57578      // VMOVDQA ymm, ymm
 57579      if isYMM(v0) && isYMM(v1) {
 57580          self.require(ISA_AVX)
 57581          p.domain = DomainAVX
 57582          p.add(0, func(m *_Encoding, v []interface{}) {
 57583              m.vex2(5, hcode(v[1]), v[0], 0)
 57584              m.emit(0x6f)
 57585              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57586          })
 57587          p.add(0, func(m *_Encoding, v []interface{}) {
 57588              m.vex2(5, hcode(v[0]), v[1], 0)
 57589              m.emit(0x7f)
 57590              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57591          })
 57592      }
 57593      // VMOVDQA m256, ymm
 57594      if isM256(v0) && isYMM(v1) {
 57595          self.require(ISA_AVX)
 57596          p.domain = DomainAVX
 57597          p.add(0, func(m *_Encoding, v []interface{}) {
 57598              m.vex2(5, hcode(v[1]), addr(v[0]), 0)
 57599              m.emit(0x6f)
 57600              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 57601          })
 57602      }
 57603      // VMOVDQA xmm, m128
 57604      if isXMM(v0) && isM128(v1) {
 57605          self.require(ISA_AVX)
 57606          p.domain = DomainAVX
 57607          p.add(0, func(m *_Encoding, v []interface{}) {
 57608              m.vex2(1, hcode(v[0]), addr(v[1]), 0)
 57609              m.emit(0x7f)
 57610              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 57611          })
 57612      }
 57613      // VMOVDQA ymm, m256
 57614      if isYMM(v0) && isM256(v1) {
 57615          self.require(ISA_AVX)
 57616          p.domain = DomainAVX
 57617          p.add(0, func(m *_Encoding, v []interface{}) {
 57618              m.vex2(5, hcode(v[0]), addr(v[1]), 0)
 57619              m.emit(0x7f)
 57620              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 57621          })
 57622      }
 57623      if p.len == 0 {
 57624          panic("invalid operands for VMOVDQA")
 57625      }
 57626      return p
 57627  }
 57628  
 57629  // VMOVDQA32 performs "Move Aligned Doubleword Values".
 57630  //
 57631  // Mnemonic        : VMOVDQA32
 57632  // Supported forms : (9 forms)
 57633  //
 57634  //    * VMOVDQA32 zmm, m512{k}{z}    [AVX512F]
 57635  //    * VMOVDQA32 zmm, zmm{k}{z}     [AVX512F]
 57636  //    * VMOVDQA32 m512, zmm{k}{z}    [AVX512F]
 57637  //    * VMOVDQA32 xmm, m128{k}{z}    [AVX512F,AVX512VL]
 57638  //    * VMOVDQA32 xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 57639  //    * VMOVDQA32 ymm, m256{k}{z}    [AVX512F,AVX512VL]
 57640  //    * VMOVDQA32 ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 57641  //    * VMOVDQA32 m128, xmm{k}{z}    [AVX512F,AVX512VL]
 57642  //    * VMOVDQA32 m256, ymm{k}{z}    [AVX512F,AVX512VL]
 57643  //
 57644  func (self *Program) VMOVDQA32(v0 interface{}, v1 interface{}) *Instruction {
 57645      p := self.alloc("VMOVDQA32", 2, Operands { v0, v1 })
 57646      // VMOVDQA32 zmm, m512{k}{z}
 57647      if isZMM(v0) && isM512kz(v1) {
 57648          self.require(ISA_AVX512F)
 57649          p.domain = DomainAVX
 57650          p.add(0, func(m *_Encoding, v []interface{}) {
 57651              m.evex(0b01, 0x05, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 57652              m.emit(0x7f)
 57653              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 57654          })
 57655      }
 57656      // VMOVDQA32 zmm, zmm{k}{z}
 57657      if isZMM(v0) && isZMMkz(v1) {
 57658          self.require(ISA_AVX512F)
 57659          p.domain = DomainAVX
 57660          p.add(0, func(m *_Encoding, v []interface{}) {
 57661              m.emit(0x62)
 57662              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57663              m.emit(0x7d)
 57664              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 57665              m.emit(0x6f)
 57666              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57667          })
 57668          p.add(0, func(m *_Encoding, v []interface{}) {
 57669              m.emit(0x62)
 57670              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57671              m.emit(0x7d)
 57672              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 57673              m.emit(0x7f)
 57674              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57675          })
 57676      }
 57677      // VMOVDQA32 m512, zmm{k}{z}
 57678      if isM512(v0) && isZMMkz(v1) {
 57679          self.require(ISA_AVX512F)
 57680          p.domain = DomainAVX
 57681          p.add(0, func(m *_Encoding, v []interface{}) {
 57682              m.evex(0b01, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57683              m.emit(0x6f)
 57684              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 57685          })
 57686      }
 57687      // VMOVDQA32 xmm, m128{k}{z}
 57688      if isEVEXXMM(v0) && isM128kz(v1) {
 57689          self.require(ISA_AVX512VL | ISA_AVX512F)
 57690          p.domain = DomainAVX
 57691          p.add(0, func(m *_Encoding, v []interface{}) {
 57692              m.evex(0b01, 0x05, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 57693              m.emit(0x7f)
 57694              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 57695          })
 57696      }
 57697      // VMOVDQA32 xmm, xmm{k}{z}
 57698      if isEVEXXMM(v0) && isXMMkz(v1) {
 57699          self.require(ISA_AVX512VL | ISA_AVX512F)
 57700          p.domain = DomainAVX
 57701          p.add(0, func(m *_Encoding, v []interface{}) {
 57702              m.emit(0x62)
 57703              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57704              m.emit(0x7d)
 57705              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 57706              m.emit(0x6f)
 57707              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57708          })
 57709          p.add(0, func(m *_Encoding, v []interface{}) {
 57710              m.emit(0x62)
 57711              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57712              m.emit(0x7d)
 57713              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 57714              m.emit(0x7f)
 57715              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57716          })
 57717      }
 57718      // VMOVDQA32 ymm, m256{k}{z}
 57719      if isEVEXYMM(v0) && isM256kz(v1) {
 57720          self.require(ISA_AVX512VL | ISA_AVX512F)
 57721          p.domain = DomainAVX
 57722          p.add(0, func(m *_Encoding, v []interface{}) {
 57723              m.evex(0b01, 0x05, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 57724              m.emit(0x7f)
 57725              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 57726          })
 57727      }
 57728      // VMOVDQA32 ymm, ymm{k}{z}
 57729      if isEVEXYMM(v0) && isYMMkz(v1) {
 57730          self.require(ISA_AVX512VL | ISA_AVX512F)
 57731          p.domain = DomainAVX
 57732          p.add(0, func(m *_Encoding, v []interface{}) {
 57733              m.emit(0x62)
 57734              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57735              m.emit(0x7d)
 57736              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 57737              m.emit(0x6f)
 57738              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57739          })
 57740          p.add(0, func(m *_Encoding, v []interface{}) {
 57741              m.emit(0x62)
 57742              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57743              m.emit(0x7d)
 57744              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 57745              m.emit(0x7f)
 57746              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57747          })
 57748      }
 57749      // VMOVDQA32 m128, xmm{k}{z}
 57750      if isM128(v0) && isXMMkz(v1) {
 57751          self.require(ISA_AVX512VL | ISA_AVX512F)
 57752          p.domain = DomainAVX
 57753          p.add(0, func(m *_Encoding, v []interface{}) {
 57754              m.evex(0b01, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57755              m.emit(0x6f)
 57756              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 57757          })
 57758      }
 57759      // VMOVDQA32 m256, ymm{k}{z}
 57760      if isM256(v0) && isYMMkz(v1) {
 57761          self.require(ISA_AVX512VL | ISA_AVX512F)
 57762          p.domain = DomainAVX
 57763          p.add(0, func(m *_Encoding, v []interface{}) {
 57764              m.evex(0b01, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57765              m.emit(0x6f)
 57766              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 57767          })
 57768      }
 57769      if p.len == 0 {
 57770          panic("invalid operands for VMOVDQA32")
 57771      }
 57772      return p
 57773  }
 57774  
 57775  // VMOVDQA64 performs "Move Aligned Quadword Values".
 57776  //
 57777  // Mnemonic        : VMOVDQA64
 57778  // Supported forms : (9 forms)
 57779  //
 57780  //    * VMOVDQA64 zmm, m512{k}{z}    [AVX512F]
 57781  //    * VMOVDQA64 zmm, zmm{k}{z}     [AVX512F]
 57782  //    * VMOVDQA64 m512, zmm{k}{z}    [AVX512F]
 57783  //    * VMOVDQA64 xmm, m128{k}{z}    [AVX512F,AVX512VL]
 57784  //    * VMOVDQA64 xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 57785  //    * VMOVDQA64 ymm, m256{k}{z}    [AVX512F,AVX512VL]
 57786  //    * VMOVDQA64 ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 57787  //    * VMOVDQA64 m128, xmm{k}{z}    [AVX512F,AVX512VL]
 57788  //    * VMOVDQA64 m256, ymm{k}{z}    [AVX512F,AVX512VL]
 57789  //
 57790  func (self *Program) VMOVDQA64(v0 interface{}, v1 interface{}) *Instruction {
 57791      p := self.alloc("VMOVDQA64", 2, Operands { v0, v1 })
 57792      // VMOVDQA64 zmm, m512{k}{z}
 57793      if isZMM(v0) && isM512kz(v1) {
 57794          self.require(ISA_AVX512F)
 57795          p.domain = DomainAVX
 57796          p.add(0, func(m *_Encoding, v []interface{}) {
 57797              m.evex(0b01, 0x85, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 57798              m.emit(0x7f)
 57799              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 57800          })
 57801      }
 57802      // VMOVDQA64 zmm, zmm{k}{z}
 57803      if isZMM(v0) && isZMMkz(v1) {
 57804          self.require(ISA_AVX512F)
 57805          p.domain = DomainAVX
 57806          p.add(0, func(m *_Encoding, v []interface{}) {
 57807              m.emit(0x62)
 57808              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57809              m.emit(0xfd)
 57810              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 57811              m.emit(0x6f)
 57812              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57813          })
 57814          p.add(0, func(m *_Encoding, v []interface{}) {
 57815              m.emit(0x62)
 57816              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57817              m.emit(0xfd)
 57818              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 57819              m.emit(0x7f)
 57820              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57821          })
 57822      }
 57823      // VMOVDQA64 m512, zmm{k}{z}
 57824      if isM512(v0) && isZMMkz(v1) {
 57825          self.require(ISA_AVX512F)
 57826          p.domain = DomainAVX
 57827          p.add(0, func(m *_Encoding, v []interface{}) {
 57828              m.evex(0b01, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57829              m.emit(0x6f)
 57830              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 57831          })
 57832      }
 57833      // VMOVDQA64 xmm, m128{k}{z}
 57834      if isEVEXXMM(v0) && isM128kz(v1) {
 57835          self.require(ISA_AVX512VL | ISA_AVX512F)
 57836          p.domain = DomainAVX
 57837          p.add(0, func(m *_Encoding, v []interface{}) {
 57838              m.evex(0b01, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 57839              m.emit(0x7f)
 57840              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 57841          })
 57842      }
 57843      // VMOVDQA64 xmm, xmm{k}{z}
 57844      if isEVEXXMM(v0) && isXMMkz(v1) {
 57845          self.require(ISA_AVX512VL | ISA_AVX512F)
 57846          p.domain = DomainAVX
 57847          p.add(0, func(m *_Encoding, v []interface{}) {
 57848              m.emit(0x62)
 57849              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57850              m.emit(0xfd)
 57851              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 57852              m.emit(0x6f)
 57853              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57854          })
 57855          p.add(0, func(m *_Encoding, v []interface{}) {
 57856              m.emit(0x62)
 57857              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57858              m.emit(0xfd)
 57859              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 57860              m.emit(0x7f)
 57861              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57862          })
 57863      }
 57864      // VMOVDQA64 ymm, m256{k}{z}
 57865      if isEVEXYMM(v0) && isM256kz(v1) {
 57866          self.require(ISA_AVX512VL | ISA_AVX512F)
 57867          p.domain = DomainAVX
 57868          p.add(0, func(m *_Encoding, v []interface{}) {
 57869              m.evex(0b01, 0x85, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 57870              m.emit(0x7f)
 57871              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 57872          })
 57873      }
 57874      // VMOVDQA64 ymm, ymm{k}{z}
 57875      if isEVEXYMM(v0) && isYMMkz(v1) {
 57876          self.require(ISA_AVX512VL | ISA_AVX512F)
 57877          p.domain = DomainAVX
 57878          p.add(0, func(m *_Encoding, v []interface{}) {
 57879              m.emit(0x62)
 57880              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57881              m.emit(0xfd)
 57882              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 57883              m.emit(0x6f)
 57884              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57885          })
 57886          p.add(0, func(m *_Encoding, v []interface{}) {
 57887              m.emit(0x62)
 57888              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57889              m.emit(0xfd)
 57890              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 57891              m.emit(0x7f)
 57892              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57893          })
 57894      }
 57895      // VMOVDQA64 m128, xmm{k}{z}
 57896      if isM128(v0) && isXMMkz(v1) {
 57897          self.require(ISA_AVX512VL | ISA_AVX512F)
 57898          p.domain = DomainAVX
 57899          p.add(0, func(m *_Encoding, v []interface{}) {
 57900              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57901              m.emit(0x6f)
 57902              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 57903          })
 57904      }
 57905      // VMOVDQA64 m256, ymm{k}{z}
 57906      if isM256(v0) && isYMMkz(v1) {
 57907          self.require(ISA_AVX512VL | ISA_AVX512F)
 57908          p.domain = DomainAVX
 57909          p.add(0, func(m *_Encoding, v []interface{}) {
 57910              m.evex(0b01, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57911              m.emit(0x6f)
 57912              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 57913          })
 57914      }
 57915      if p.len == 0 {
 57916          panic("invalid operands for VMOVDQA64")
 57917      }
 57918      return p
 57919  }
 57920  
 57921  // VMOVDQU performs "Move Unaligned Double Quadword".
 57922  //
 57923  // Mnemonic        : VMOVDQU
 57924  // Supported forms : (6 forms)
 57925  //
 57926  //    * VMOVDQU xmm, xmm     [AVX]
 57927  //    * VMOVDQU m128, xmm    [AVX]
 57928  //    * VMOVDQU ymm, ymm     [AVX]
 57929  //    * VMOVDQU m256, ymm    [AVX]
 57930  //    * VMOVDQU xmm, m128    [AVX]
 57931  //    * VMOVDQU ymm, m256    [AVX]
 57932  //
 57933  func (self *Program) VMOVDQU(v0 interface{}, v1 interface{}) *Instruction {
 57934      p := self.alloc("VMOVDQU", 2, Operands { v0, v1 })
 57935      // VMOVDQU xmm, xmm
 57936      if isXMM(v0) && isXMM(v1) {
 57937          self.require(ISA_AVX)
 57938          p.domain = DomainAVX
 57939          p.add(0, func(m *_Encoding, v []interface{}) {
 57940              m.vex2(2, hcode(v[1]), v[0], 0)
 57941              m.emit(0x6f)
 57942              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57943          })
 57944          p.add(0, func(m *_Encoding, v []interface{}) {
 57945              m.vex2(2, hcode(v[0]), v[1], 0)
 57946              m.emit(0x7f)
 57947              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57948          })
 57949      }
 57950      // VMOVDQU m128, xmm
 57951      if isM128(v0) && isXMM(v1) {
 57952          self.require(ISA_AVX)
 57953          p.domain = DomainAVX
 57954          p.add(0, func(m *_Encoding, v []interface{}) {
 57955              m.vex2(2, hcode(v[1]), addr(v[0]), 0)
 57956              m.emit(0x6f)
 57957              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 57958          })
 57959      }
 57960      // VMOVDQU ymm, ymm
 57961      if isYMM(v0) && isYMM(v1) {
 57962          self.require(ISA_AVX)
 57963          p.domain = DomainAVX
 57964          p.add(0, func(m *_Encoding, v []interface{}) {
 57965              m.vex2(6, hcode(v[1]), v[0], 0)
 57966              m.emit(0x6f)
 57967              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57968          })
 57969          p.add(0, func(m *_Encoding, v []interface{}) {
 57970              m.vex2(6, hcode(v[0]), v[1], 0)
 57971              m.emit(0x7f)
 57972              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57973          })
 57974      }
 57975      // VMOVDQU m256, ymm
 57976      if isM256(v0) && isYMM(v1) {
 57977          self.require(ISA_AVX)
 57978          p.domain = DomainAVX
 57979          p.add(0, func(m *_Encoding, v []interface{}) {
 57980              m.vex2(6, hcode(v[1]), addr(v[0]), 0)
 57981              m.emit(0x6f)
 57982              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 57983          })
 57984      }
 57985      // VMOVDQU xmm, m128
 57986      if isXMM(v0) && isM128(v1) {
 57987          self.require(ISA_AVX)
 57988          p.domain = DomainAVX
 57989          p.add(0, func(m *_Encoding, v []interface{}) {
 57990              m.vex2(2, hcode(v[0]), addr(v[1]), 0)
 57991              m.emit(0x7f)
 57992              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 57993          })
 57994      }
 57995      // VMOVDQU ymm, m256
 57996      if isYMM(v0) && isM256(v1) {
 57997          self.require(ISA_AVX)
 57998          p.domain = DomainAVX
 57999          p.add(0, func(m *_Encoding, v []interface{}) {
 58000              m.vex2(6, hcode(v[0]), addr(v[1]), 0)
 58001              m.emit(0x7f)
 58002              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 58003          })
 58004      }
 58005      if p.len == 0 {
 58006          panic("invalid operands for VMOVDQU")
 58007      }
 58008      return p
 58009  }
 58010  
 58011  // VMOVDQU16 performs "Move Unaligned Word Values".
 58012  //
 58013  // Mnemonic        : VMOVDQU16
 58014  // Supported forms : (9 forms)
 58015  //
 58016  //    * VMOVDQU16 zmm, m512{k}{z}    [AVX512BW]
 58017  //    * VMOVDQU16 zmm, zmm{k}{z}     [AVX512BW]
 58018  //    * VMOVDQU16 m512, zmm{k}{z}    [AVX512BW]
 58019  //    * VMOVDQU16 xmm, m128{k}{z}    [AVX512BW,AVX512VL]
 58020  //    * VMOVDQU16 xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 58021  //    * VMOVDQU16 ymm, m256{k}{z}    [AVX512BW,AVX512VL]
 58022  //    * VMOVDQU16 ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 58023  //    * VMOVDQU16 m128, xmm{k}{z}    [AVX512BW,AVX512VL]
 58024  //    * VMOVDQU16 m256, ymm{k}{z}    [AVX512BW,AVX512VL]
 58025  //
 58026  func (self *Program) VMOVDQU16(v0 interface{}, v1 interface{}) *Instruction {
 58027      p := self.alloc("VMOVDQU16", 2, Operands { v0, v1 })
 58028      // VMOVDQU16 zmm, m512{k}{z}
 58029      if isZMM(v0) && isM512kz(v1) {
 58030          self.require(ISA_AVX512BW)
 58031          p.domain = DomainAVX
 58032          p.add(0, func(m *_Encoding, v []interface{}) {
 58033              m.evex(0b01, 0x87, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58034              m.emit(0x7f)
 58035              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 58036          })
 58037      }
 58038      // VMOVDQU16 zmm, zmm{k}{z}
 58039      if isZMM(v0) && isZMMkz(v1) {
 58040          self.require(ISA_AVX512BW)
 58041          p.domain = DomainAVX
 58042          p.add(0, func(m *_Encoding, v []interface{}) {
 58043              m.emit(0x62)
 58044              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58045              m.emit(0xff)
 58046              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 58047              m.emit(0x6f)
 58048              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58049          })
 58050          p.add(0, func(m *_Encoding, v []interface{}) {
 58051              m.emit(0x62)
 58052              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58053              m.emit(0xff)
 58054              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 58055              m.emit(0x7f)
 58056              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58057          })
 58058      }
 58059      // VMOVDQU16 m512, zmm{k}{z}
 58060      if isM512(v0) && isZMMkz(v1) {
 58061          self.require(ISA_AVX512BW)
 58062          p.domain = DomainAVX
 58063          p.add(0, func(m *_Encoding, v []interface{}) {
 58064              m.evex(0b01, 0x87, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58065              m.emit(0x6f)
 58066              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 58067          })
 58068      }
 58069      // VMOVDQU16 xmm, m128{k}{z}
 58070      if isEVEXXMM(v0) && isM128kz(v1) {
 58071          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58072          p.domain = DomainAVX
 58073          p.add(0, func(m *_Encoding, v []interface{}) {
 58074              m.evex(0b01, 0x87, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58075              m.emit(0x7f)
 58076              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 58077          })
 58078      }
 58079      // VMOVDQU16 xmm, xmm{k}{z}
 58080      if isEVEXXMM(v0) && isXMMkz(v1) {
 58081          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58082          p.domain = DomainAVX
 58083          p.add(0, func(m *_Encoding, v []interface{}) {
 58084              m.emit(0x62)
 58085              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58086              m.emit(0xff)
 58087              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 58088              m.emit(0x6f)
 58089              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58090          })
 58091          p.add(0, func(m *_Encoding, v []interface{}) {
 58092              m.emit(0x62)
 58093              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58094              m.emit(0xff)
 58095              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 58096              m.emit(0x7f)
 58097              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58098          })
 58099      }
 58100      // VMOVDQU16 ymm, m256{k}{z}
 58101      if isEVEXYMM(v0) && isM256kz(v1) {
 58102          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58103          p.domain = DomainAVX
 58104          p.add(0, func(m *_Encoding, v []interface{}) {
 58105              m.evex(0b01, 0x87, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58106              m.emit(0x7f)
 58107              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 58108          })
 58109      }
 58110      // VMOVDQU16 ymm, ymm{k}{z}
 58111      if isEVEXYMM(v0) && isYMMkz(v1) {
 58112          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58113          p.domain = DomainAVX
 58114          p.add(0, func(m *_Encoding, v []interface{}) {
 58115              m.emit(0x62)
 58116              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58117              m.emit(0xff)
 58118              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 58119              m.emit(0x6f)
 58120              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58121          })
 58122          p.add(0, func(m *_Encoding, v []interface{}) {
 58123              m.emit(0x62)
 58124              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58125              m.emit(0xff)
 58126              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 58127              m.emit(0x7f)
 58128              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58129          })
 58130      }
 58131      // VMOVDQU16 m128, xmm{k}{z}
 58132      if isM128(v0) && isXMMkz(v1) {
 58133          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58134          p.domain = DomainAVX
 58135          p.add(0, func(m *_Encoding, v []interface{}) {
 58136              m.evex(0b01, 0x87, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58137              m.emit(0x6f)
 58138              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 58139          })
 58140      }
 58141      // VMOVDQU16 m256, ymm{k}{z}
 58142      if isM256(v0) && isYMMkz(v1) {
 58143          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58144          p.domain = DomainAVX
 58145          p.add(0, func(m *_Encoding, v []interface{}) {
 58146              m.evex(0b01, 0x87, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58147              m.emit(0x6f)
 58148              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 58149          })
 58150      }
 58151      if p.len == 0 {
 58152          panic("invalid operands for VMOVDQU16")
 58153      }
 58154      return p
 58155  }
 58156  
 58157  // VMOVDQU32 performs "Move Unaligned Doubleword Values".
 58158  //
 58159  // Mnemonic        : VMOVDQU32
 58160  // Supported forms : (9 forms)
 58161  //
 58162  //    * VMOVDQU32 zmm, m512{k}{z}    [AVX512F]
 58163  //    * VMOVDQU32 zmm, zmm{k}{z}     [AVX512F]
 58164  //    * VMOVDQU32 m512, zmm{k}{z}    [AVX512F]
 58165  //    * VMOVDQU32 xmm, m128{k}{z}    [AVX512F,AVX512VL]
 58166  //    * VMOVDQU32 xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 58167  //    * VMOVDQU32 ymm, m256{k}{z}    [AVX512F,AVX512VL]
 58168  //    * VMOVDQU32 ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 58169  //    * VMOVDQU32 m128, xmm{k}{z}    [AVX512F,AVX512VL]
 58170  //    * VMOVDQU32 m256, ymm{k}{z}    [AVX512F,AVX512VL]
 58171  //
 58172  func (self *Program) VMOVDQU32(v0 interface{}, v1 interface{}) *Instruction {
 58173      p := self.alloc("VMOVDQU32", 2, Operands { v0, v1 })
 58174      // VMOVDQU32 zmm, m512{k}{z}
 58175      if isZMM(v0) && isM512kz(v1) {
 58176          self.require(ISA_AVX512F)
 58177          p.domain = DomainAVX
 58178          p.add(0, func(m *_Encoding, v []interface{}) {
 58179              m.evex(0b01, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58180              m.emit(0x7f)
 58181              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 58182          })
 58183      }
 58184      // VMOVDQU32 zmm, zmm{k}{z}
 58185      if isZMM(v0) && isZMMkz(v1) {
 58186          self.require(ISA_AVX512F)
 58187          p.domain = DomainAVX
 58188          p.add(0, func(m *_Encoding, v []interface{}) {
 58189              m.emit(0x62)
 58190              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58191              m.emit(0x7e)
 58192              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 58193              m.emit(0x6f)
 58194              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58195          })
 58196          p.add(0, func(m *_Encoding, v []interface{}) {
 58197              m.emit(0x62)
 58198              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58199              m.emit(0x7e)
 58200              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 58201              m.emit(0x7f)
 58202              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58203          })
 58204      }
 58205      // VMOVDQU32 m512, zmm{k}{z}
 58206      if isM512(v0) && isZMMkz(v1) {
 58207          self.require(ISA_AVX512F)
 58208          p.domain = DomainAVX
 58209          p.add(0, func(m *_Encoding, v []interface{}) {
 58210              m.evex(0b01, 0x06, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58211              m.emit(0x6f)
 58212              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 58213          })
 58214      }
 58215      // VMOVDQU32 xmm, m128{k}{z}
 58216      if isEVEXXMM(v0) && isM128kz(v1) {
 58217          self.require(ISA_AVX512VL | ISA_AVX512F)
 58218          p.domain = DomainAVX
 58219          p.add(0, func(m *_Encoding, v []interface{}) {
 58220              m.evex(0b01, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58221              m.emit(0x7f)
 58222              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 58223          })
 58224      }
 58225      // VMOVDQU32 xmm, xmm{k}{z}
 58226      if isEVEXXMM(v0) && isXMMkz(v1) {
 58227          self.require(ISA_AVX512VL | ISA_AVX512F)
 58228          p.domain = DomainAVX
 58229          p.add(0, func(m *_Encoding, v []interface{}) {
 58230              m.emit(0x62)
 58231              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58232              m.emit(0x7e)
 58233              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 58234              m.emit(0x6f)
 58235              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58236          })
 58237          p.add(0, func(m *_Encoding, v []interface{}) {
 58238              m.emit(0x62)
 58239              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58240              m.emit(0x7e)
 58241              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 58242              m.emit(0x7f)
 58243              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58244          })
 58245      }
 58246      // VMOVDQU32 ymm, m256{k}{z}
 58247      if isEVEXYMM(v0) && isM256kz(v1) {
 58248          self.require(ISA_AVX512VL | ISA_AVX512F)
 58249          p.domain = DomainAVX
 58250          p.add(0, func(m *_Encoding, v []interface{}) {
 58251              m.evex(0b01, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58252              m.emit(0x7f)
 58253              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 58254          })
 58255      }
 58256      // VMOVDQU32 ymm, ymm{k}{z}
 58257      if isEVEXYMM(v0) && isYMMkz(v1) {
 58258          self.require(ISA_AVX512VL | ISA_AVX512F)
 58259          p.domain = DomainAVX
 58260          p.add(0, func(m *_Encoding, v []interface{}) {
 58261              m.emit(0x62)
 58262              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58263              m.emit(0x7e)
 58264              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 58265              m.emit(0x6f)
 58266              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58267          })
 58268          p.add(0, func(m *_Encoding, v []interface{}) {
 58269              m.emit(0x62)
 58270              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58271              m.emit(0x7e)
 58272              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 58273              m.emit(0x7f)
 58274              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58275          })
 58276      }
 58277      // VMOVDQU32 m128, xmm{k}{z}
 58278      if isM128(v0) && isXMMkz(v1) {
 58279          self.require(ISA_AVX512VL | ISA_AVX512F)
 58280          p.domain = DomainAVX
 58281          p.add(0, func(m *_Encoding, v []interface{}) {
 58282              m.evex(0b01, 0x06, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58283              m.emit(0x6f)
 58284              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 58285          })
 58286      }
 58287      // VMOVDQU32 m256, ymm{k}{z}
 58288      if isM256(v0) && isYMMkz(v1) {
 58289          self.require(ISA_AVX512VL | ISA_AVX512F)
 58290          p.domain = DomainAVX
 58291          p.add(0, func(m *_Encoding, v []interface{}) {
 58292              m.evex(0b01, 0x06, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58293              m.emit(0x6f)
 58294              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 58295          })
 58296      }
 58297      if p.len == 0 {
 58298          panic("invalid operands for VMOVDQU32")
 58299      }
 58300      return p
 58301  }
 58302  
 58303  // VMOVDQU64 performs "Move Unaligned Quadword Values".
 58304  //
 58305  // Mnemonic        : VMOVDQU64
 58306  // Supported forms : (9 forms)
 58307  //
 58308  //    * VMOVDQU64 zmm, m512{k}{z}    [AVX512F]
 58309  //    * VMOVDQU64 zmm, zmm{k}{z}     [AVX512F]
 58310  //    * VMOVDQU64 m512, zmm{k}{z}    [AVX512F]
 58311  //    * VMOVDQU64 xmm, m128{k}{z}    [AVX512F,AVX512VL]
 58312  //    * VMOVDQU64 xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 58313  //    * VMOVDQU64 ymm, m256{k}{z}    [AVX512F,AVX512VL]
 58314  //    * VMOVDQU64 ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 58315  //    * VMOVDQU64 m128, xmm{k}{z}    [AVX512F,AVX512VL]
 58316  //    * VMOVDQU64 m256, ymm{k}{z}    [AVX512F,AVX512VL]
 58317  //
 58318  func (self *Program) VMOVDQU64(v0 interface{}, v1 interface{}) *Instruction {
 58319      p := self.alloc("VMOVDQU64", 2, Operands { v0, v1 })
 58320      // VMOVDQU64 zmm, m512{k}{z}
 58321      if isZMM(v0) && isM512kz(v1) {
 58322          self.require(ISA_AVX512F)
 58323          p.domain = DomainAVX
 58324          p.add(0, func(m *_Encoding, v []interface{}) {
 58325              m.evex(0b01, 0x86, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58326              m.emit(0x7f)
 58327              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 58328          })
 58329      }
 58330      // VMOVDQU64 zmm, zmm{k}{z}
 58331      if isZMM(v0) && isZMMkz(v1) {
 58332          self.require(ISA_AVX512F)
 58333          p.domain = DomainAVX
 58334          p.add(0, func(m *_Encoding, v []interface{}) {
 58335              m.emit(0x62)
 58336              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58337              m.emit(0xfe)
 58338              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 58339              m.emit(0x6f)
 58340              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58341          })
 58342          p.add(0, func(m *_Encoding, v []interface{}) {
 58343              m.emit(0x62)
 58344              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58345              m.emit(0xfe)
 58346              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 58347              m.emit(0x7f)
 58348              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58349          })
 58350      }
 58351      // VMOVDQU64 m512, zmm{k}{z}
 58352      if isM512(v0) && isZMMkz(v1) {
 58353          self.require(ISA_AVX512F)
 58354          p.domain = DomainAVX
 58355          p.add(0, func(m *_Encoding, v []interface{}) {
 58356              m.evex(0b01, 0x86, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58357              m.emit(0x6f)
 58358              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 58359          })
 58360      }
 58361      // VMOVDQU64 xmm, m128{k}{z}
 58362      if isEVEXXMM(v0) && isM128kz(v1) {
 58363          self.require(ISA_AVX512VL | ISA_AVX512F)
 58364          p.domain = DomainAVX
 58365          p.add(0, func(m *_Encoding, v []interface{}) {
 58366              m.evex(0b01, 0x86, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58367              m.emit(0x7f)
 58368              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 58369          })
 58370      }
 58371      // VMOVDQU64 xmm, xmm{k}{z}
 58372      if isEVEXXMM(v0) && isXMMkz(v1) {
 58373          self.require(ISA_AVX512VL | ISA_AVX512F)
 58374          p.domain = DomainAVX
 58375          p.add(0, func(m *_Encoding, v []interface{}) {
 58376              m.emit(0x62)
 58377              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58378              m.emit(0xfe)
 58379              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 58380              m.emit(0x6f)
 58381              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58382          })
 58383          p.add(0, func(m *_Encoding, v []interface{}) {
 58384              m.emit(0x62)
 58385              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58386              m.emit(0xfe)
 58387              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 58388              m.emit(0x7f)
 58389              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58390          })
 58391      }
 58392      // VMOVDQU64 ymm, m256{k}{z}
 58393      if isEVEXYMM(v0) && isM256kz(v1) {
 58394          self.require(ISA_AVX512VL | ISA_AVX512F)
 58395          p.domain = DomainAVX
 58396          p.add(0, func(m *_Encoding, v []interface{}) {
 58397              m.evex(0b01, 0x86, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58398              m.emit(0x7f)
 58399              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 58400          })
 58401      }
 58402      // VMOVDQU64 ymm, ymm{k}{z}
 58403      if isEVEXYMM(v0) && isYMMkz(v1) {
 58404          self.require(ISA_AVX512VL | ISA_AVX512F)
 58405          p.domain = DomainAVX
 58406          p.add(0, func(m *_Encoding, v []interface{}) {
 58407              m.emit(0x62)
 58408              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58409              m.emit(0xfe)
 58410              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 58411              m.emit(0x6f)
 58412              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58413          })
 58414          p.add(0, func(m *_Encoding, v []interface{}) {
 58415              m.emit(0x62)
 58416              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58417              m.emit(0xfe)
 58418              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 58419              m.emit(0x7f)
 58420              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58421          })
 58422      }
 58423      // VMOVDQU64 m128, xmm{k}{z}
 58424      if isM128(v0) && isXMMkz(v1) {
 58425          self.require(ISA_AVX512VL | ISA_AVX512F)
 58426          p.domain = DomainAVX
 58427          p.add(0, func(m *_Encoding, v []interface{}) {
 58428              m.evex(0b01, 0x86, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58429              m.emit(0x6f)
 58430              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 58431          })
 58432      }
 58433      // VMOVDQU64 m256, ymm{k}{z}
 58434      if isM256(v0) && isYMMkz(v1) {
 58435          self.require(ISA_AVX512VL | ISA_AVX512F)
 58436          p.domain = DomainAVX
 58437          p.add(0, func(m *_Encoding, v []interface{}) {
 58438              m.evex(0b01, 0x86, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58439              m.emit(0x6f)
 58440              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 58441          })
 58442      }
 58443      if p.len == 0 {
 58444          panic("invalid operands for VMOVDQU64")
 58445      }
 58446      return p
 58447  }
 58448  
 58449  // VMOVDQU8 performs "Move Unaligned Byte Values".
 58450  //
 58451  // Mnemonic        : VMOVDQU8
 58452  // Supported forms : (9 forms)
 58453  //
 58454  //    * VMOVDQU8 zmm, m512{k}{z}    [AVX512BW]
 58455  //    * VMOVDQU8 zmm, zmm{k}{z}     [AVX512BW]
 58456  //    * VMOVDQU8 m512, zmm{k}{z}    [AVX512BW]
 58457  //    * VMOVDQU8 xmm, m128{k}{z}    [AVX512BW,AVX512VL]
 58458  //    * VMOVDQU8 xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 58459  //    * VMOVDQU8 ymm, m256{k}{z}    [AVX512BW,AVX512VL]
 58460  //    * VMOVDQU8 ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 58461  //    * VMOVDQU8 m128, xmm{k}{z}    [AVX512BW,AVX512VL]
 58462  //    * VMOVDQU8 m256, ymm{k}{z}    [AVX512BW,AVX512VL]
 58463  //
 58464  func (self *Program) VMOVDQU8(v0 interface{}, v1 interface{}) *Instruction {
 58465      p := self.alloc("VMOVDQU8", 2, Operands { v0, v1 })
 58466      // VMOVDQU8 zmm, m512{k}{z}
 58467      if isZMM(v0) && isM512kz(v1) {
 58468          self.require(ISA_AVX512BW)
 58469          p.domain = DomainAVX
 58470          p.add(0, func(m *_Encoding, v []interface{}) {
 58471              m.evex(0b01, 0x07, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58472              m.emit(0x7f)
 58473              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 58474          })
 58475      }
 58476      // VMOVDQU8 zmm, zmm{k}{z}
 58477      if isZMM(v0) && isZMMkz(v1) {
 58478          self.require(ISA_AVX512BW)
 58479          p.domain = DomainAVX
 58480          p.add(0, func(m *_Encoding, v []interface{}) {
 58481              m.emit(0x62)
 58482              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58483              m.emit(0x7f)
 58484              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 58485              m.emit(0x6f)
 58486              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58487          })
 58488          p.add(0, func(m *_Encoding, v []interface{}) {
 58489              m.emit(0x62)
 58490              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58491              m.emit(0x7f)
 58492              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 58493              m.emit(0x7f)
 58494              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58495          })
 58496      }
 58497      // VMOVDQU8 m512, zmm{k}{z}
 58498      if isM512(v0) && isZMMkz(v1) {
 58499          self.require(ISA_AVX512BW)
 58500          p.domain = DomainAVX
 58501          p.add(0, func(m *_Encoding, v []interface{}) {
 58502              m.evex(0b01, 0x07, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58503              m.emit(0x6f)
 58504              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 58505          })
 58506      }
 58507      // VMOVDQU8 xmm, m128{k}{z}
 58508      if isEVEXXMM(v0) && isM128kz(v1) {
 58509          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58510          p.domain = DomainAVX
 58511          p.add(0, func(m *_Encoding, v []interface{}) {
 58512              m.evex(0b01, 0x07, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58513              m.emit(0x7f)
 58514              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 58515          })
 58516      }
 58517      // VMOVDQU8 xmm, xmm{k}{z}
 58518      if isEVEXXMM(v0) && isXMMkz(v1) {
 58519          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58520          p.domain = DomainAVX
 58521          p.add(0, func(m *_Encoding, v []interface{}) {
 58522              m.emit(0x62)
 58523              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58524              m.emit(0x7f)
 58525              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 58526              m.emit(0x6f)
 58527              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58528          })
 58529          p.add(0, func(m *_Encoding, v []interface{}) {
 58530              m.emit(0x62)
 58531              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58532              m.emit(0x7f)
 58533              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 58534              m.emit(0x7f)
 58535              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58536          })
 58537      }
 58538      // VMOVDQU8 ymm, m256{k}{z}
 58539      if isEVEXYMM(v0) && isM256kz(v1) {
 58540          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58541          p.domain = DomainAVX
 58542          p.add(0, func(m *_Encoding, v []interface{}) {
 58543              m.evex(0b01, 0x07, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58544              m.emit(0x7f)
 58545              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 58546          })
 58547      }
 58548      // VMOVDQU8 ymm, ymm{k}{z}
 58549      if isEVEXYMM(v0) && isYMMkz(v1) {
 58550          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58551          p.domain = DomainAVX
 58552          p.add(0, func(m *_Encoding, v []interface{}) {
 58553              m.emit(0x62)
 58554              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58555              m.emit(0x7f)
 58556              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 58557              m.emit(0x6f)
 58558              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58559          })
 58560          p.add(0, func(m *_Encoding, v []interface{}) {
 58561              m.emit(0x62)
 58562              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58563              m.emit(0x7f)
 58564              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 58565              m.emit(0x7f)
 58566              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58567          })
 58568      }
 58569      // VMOVDQU8 m128, xmm{k}{z}
 58570      if isM128(v0) && isXMMkz(v1) {
 58571          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58572          p.domain = DomainAVX
 58573          p.add(0, func(m *_Encoding, v []interface{}) {
 58574              m.evex(0b01, 0x07, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58575              m.emit(0x6f)
 58576              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 58577          })
 58578      }
 58579      // VMOVDQU8 m256, ymm{k}{z}
 58580      if isM256(v0) && isYMMkz(v1) {
 58581          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58582          p.domain = DomainAVX
 58583          p.add(0, func(m *_Encoding, v []interface{}) {
 58584              m.evex(0b01, 0x07, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58585              m.emit(0x6f)
 58586              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 58587          })
 58588      }
 58589      if p.len == 0 {
 58590          panic("invalid operands for VMOVDQU8")
 58591      }
 58592      return p
 58593  }
 58594  
 58595  // VMOVHLPS performs "Move Packed Single-Precision Floating-Point Values High to Low".
 58596  //
 58597  // Mnemonic        : VMOVHLPS
 58598  // Supported forms : (2 forms)
 58599  //
 58600  //    * VMOVHLPS xmm, xmm, xmm    [AVX]
 58601  //    * VMOVHLPS xmm, xmm, xmm    [AVX512F]
 58602  //
 58603  func (self *Program) VMOVHLPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 58604      p := self.alloc("VMOVHLPS", 3, Operands { v0, v1, v2 })
 58605      // VMOVHLPS xmm, xmm, xmm
 58606      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 58607          self.require(ISA_AVX)
 58608          p.domain = DomainAVX
 58609          p.add(0, func(m *_Encoding, v []interface{}) {
 58610              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 58611              m.emit(0x12)
 58612              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 58613          })
 58614      }
 58615      // VMOVHLPS xmm, xmm, xmm
 58616      if isEVEXXMM(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 58617          self.require(ISA_AVX512F)
 58618          p.domain = DomainAVX
 58619          p.add(0, func(m *_Encoding, v []interface{}) {
 58620              m.emit(0x62)
 58621              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 58622              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 58623              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x00)
 58624              m.emit(0x12)
 58625              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 58626          })
 58627      }
 58628      if p.len == 0 {
 58629          panic("invalid operands for VMOVHLPS")
 58630      }
 58631      return p
 58632  }
 58633  
 58634  // VMOVHPD performs "Move High Packed Double-Precision Floating-Point Value".
 58635  //
 58636  // Mnemonic        : VMOVHPD
 58637  // Supported forms : (4 forms)
 58638  //
 58639  //    * VMOVHPD xmm, m64         [AVX]
 58640  //    * VMOVHPD m64, xmm, xmm    [AVX]
 58641  //    * VMOVHPD xmm, m64         [AVX512F]
 58642  //    * VMOVHPD m64, xmm, xmm    [AVX512F]
 58643  //
 58644  func (self *Program) VMOVHPD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 58645      var p *Instruction
 58646      switch len(vv) {
 58647          case 0  : p = self.alloc("VMOVHPD", 2, Operands { v0, v1 })
 58648          case 1  : p = self.alloc("VMOVHPD", 3, Operands { v0, v1, vv[0] })
 58649          default : panic("instruction VMOVHPD takes 2 or 3 operands")
 58650      }
 58651      // VMOVHPD xmm, m64
 58652      if len(vv) == 0 && isXMM(v0) && isM64(v1) {
 58653          self.require(ISA_AVX)
 58654          p.domain = DomainAVX
 58655          p.add(0, func(m *_Encoding, v []interface{}) {
 58656              m.vex2(1, hcode(v[0]), addr(v[1]), 0)
 58657              m.emit(0x17)
 58658              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 58659          })
 58660      }
 58661      // VMOVHPD m64, xmm, xmm
 58662      if len(vv) == 1 && isM64(v0) && isXMM(v1) && isXMM(vv[0]) {
 58663          self.require(ISA_AVX)
 58664          p.domain = DomainAVX
 58665          p.add(0, func(m *_Encoding, v []interface{}) {
 58666              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 58667              m.emit(0x16)
 58668              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 58669          })
 58670      }
 58671      // VMOVHPD xmm, m64
 58672      if len(vv) == 0 && isEVEXXMM(v0) && isM64(v1) {
 58673          self.require(ISA_AVX512F)
 58674          p.domain = DomainAVX
 58675          p.add(0, func(m *_Encoding, v []interface{}) {
 58676              m.evex(0b01, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 58677              m.emit(0x17)
 58678              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 58679          })
 58680      }
 58681      // VMOVHPD m64, xmm, xmm
 58682      if len(vv) == 1 && isM64(v0) && isEVEXXMM(v1) && isEVEXXMM(vv[0]) {
 58683          self.require(ISA_AVX512F)
 58684          p.domain = DomainAVX
 58685          p.add(0, func(m *_Encoding, v []interface{}) {
 58686              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 58687              m.emit(0x16)
 58688              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 58689          })
 58690      }
 58691      if p.len == 0 {
 58692          panic("invalid operands for VMOVHPD")
 58693      }
 58694      return p
 58695  }
 58696  
 58697  // VMOVHPS performs "Move High Packed Single-Precision Floating-Point Values".
 58698  //
 58699  // Mnemonic        : VMOVHPS
 58700  // Supported forms : (4 forms)
 58701  //
 58702  //    * VMOVHPS xmm, m64         [AVX]
 58703  //    * VMOVHPS m64, xmm, xmm    [AVX]
 58704  //    * VMOVHPS xmm, m64         [AVX512F]
 58705  //    * VMOVHPS m64, xmm, xmm    [AVX512F]
 58706  //
 58707  func (self *Program) VMOVHPS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 58708      var p *Instruction
 58709      switch len(vv) {
 58710          case 0  : p = self.alloc("VMOVHPS", 2, Operands { v0, v1 })
 58711          case 1  : p = self.alloc("VMOVHPS", 3, Operands { v0, v1, vv[0] })
 58712          default : panic("instruction VMOVHPS takes 2 or 3 operands")
 58713      }
 58714      // VMOVHPS xmm, m64
 58715      if len(vv) == 0 && isXMM(v0) && isM64(v1) {
 58716          self.require(ISA_AVX)
 58717          p.domain = DomainAVX
 58718          p.add(0, func(m *_Encoding, v []interface{}) {
 58719              m.vex2(0, hcode(v[0]), addr(v[1]), 0)
 58720              m.emit(0x17)
 58721              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 58722          })
 58723      }
 58724      // VMOVHPS m64, xmm, xmm
 58725      if len(vv) == 1 && isM64(v0) && isXMM(v1) && isXMM(vv[0]) {
 58726          self.require(ISA_AVX)
 58727          p.domain = DomainAVX
 58728          p.add(0, func(m *_Encoding, v []interface{}) {
 58729              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 58730              m.emit(0x16)
 58731              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 58732          })
 58733      }
 58734      // VMOVHPS xmm, m64
 58735      if len(vv) == 0 && isEVEXXMM(v0) && isM64(v1) {
 58736          self.require(ISA_AVX512F)
 58737          p.domain = DomainAVX
 58738          p.add(0, func(m *_Encoding, v []interface{}) {
 58739              m.evex(0b01, 0x04, 0b00, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 58740              m.emit(0x17)
 58741              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 58742          })
 58743      }
 58744      // VMOVHPS m64, xmm, xmm
 58745      if len(vv) == 1 && isM64(v0) && isEVEXXMM(v1) && isEVEXXMM(vv[0]) {
 58746          self.require(ISA_AVX512F)
 58747          p.domain = DomainAVX
 58748          p.add(0, func(m *_Encoding, v []interface{}) {
 58749              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 58750              m.emit(0x16)
 58751              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 58752          })
 58753      }
 58754      if p.len == 0 {
 58755          panic("invalid operands for VMOVHPS")
 58756      }
 58757      return p
 58758  }
 58759  
 58760  // VMOVLHPS performs "Move Packed Single-Precision Floating-Point Values Low to High".
 58761  //
 58762  // Mnemonic        : VMOVLHPS
 58763  // Supported forms : (2 forms)
 58764  //
 58765  //    * VMOVLHPS xmm, xmm, xmm    [AVX]
 58766  //    * VMOVLHPS xmm, xmm, xmm    [AVX512F]
 58767  //
 58768  func (self *Program) VMOVLHPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 58769      p := self.alloc("VMOVLHPS", 3, Operands { v0, v1, v2 })
 58770      // VMOVLHPS xmm, xmm, xmm
 58771      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 58772          self.require(ISA_AVX)
 58773          p.domain = DomainAVX
 58774          p.add(0, func(m *_Encoding, v []interface{}) {
 58775              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 58776              m.emit(0x16)
 58777              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 58778          })
 58779      }
 58780      // VMOVLHPS xmm, xmm, xmm
 58781      if isEVEXXMM(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 58782          self.require(ISA_AVX512F)
 58783          p.domain = DomainAVX
 58784          p.add(0, func(m *_Encoding, v []interface{}) {
 58785              m.emit(0x62)
 58786              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 58787              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 58788              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x00)
 58789              m.emit(0x16)
 58790              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 58791          })
 58792      }
 58793      if p.len == 0 {
 58794          panic("invalid operands for VMOVLHPS")
 58795      }
 58796      return p
 58797  }
 58798  
 58799  // VMOVLPD performs "Move Low Packed Double-Precision Floating-Point Value".
 58800  //
 58801  // Mnemonic        : VMOVLPD
 58802  // Supported forms : (4 forms)
 58803  //
 58804  //    * VMOVLPD xmm, m64         [AVX]
 58805  //    * VMOVLPD m64, xmm, xmm    [AVX]
 58806  //    * VMOVLPD xmm, m64         [AVX512F]
 58807  //    * VMOVLPD m64, xmm, xmm    [AVX512F]
 58808  //
 58809  func (self *Program) VMOVLPD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 58810      var p *Instruction
 58811      switch len(vv) {
 58812          case 0  : p = self.alloc("VMOVLPD", 2, Operands { v0, v1 })
 58813          case 1  : p = self.alloc("VMOVLPD", 3, Operands { v0, v1, vv[0] })
 58814          default : panic("instruction VMOVLPD takes 2 or 3 operands")
 58815      }
 58816      // VMOVLPD xmm, m64
 58817      if len(vv) == 0 && isXMM(v0) && isM64(v1) {
 58818          self.require(ISA_AVX)
 58819          p.domain = DomainAVX
 58820          p.add(0, func(m *_Encoding, v []interface{}) {
 58821              m.vex2(1, hcode(v[0]), addr(v[1]), 0)
 58822              m.emit(0x13)
 58823              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 58824          })
 58825      }
 58826      // VMOVLPD m64, xmm, xmm
 58827      if len(vv) == 1 && isM64(v0) && isXMM(v1) && isXMM(vv[0]) {
 58828          self.require(ISA_AVX)
 58829          p.domain = DomainAVX
 58830          p.add(0, func(m *_Encoding, v []interface{}) {
 58831              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 58832              m.emit(0x12)
 58833              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 58834          })
 58835      }
 58836      // VMOVLPD xmm, m64
 58837      if len(vv) == 0 && isEVEXXMM(v0) && isM64(v1) {
 58838          self.require(ISA_AVX512F)
 58839          p.domain = DomainAVX
 58840          p.add(0, func(m *_Encoding, v []interface{}) {
 58841              m.evex(0b01, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 58842              m.emit(0x13)
 58843              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 58844          })
 58845      }
 58846      // VMOVLPD m64, xmm, xmm
 58847      if len(vv) == 1 && isM64(v0) && isEVEXXMM(v1) && isEVEXXMM(vv[0]) {
 58848          self.require(ISA_AVX512F)
 58849          p.domain = DomainAVX
 58850          p.add(0, func(m *_Encoding, v []interface{}) {
 58851              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 58852              m.emit(0x12)
 58853              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 58854          })
 58855      }
 58856      if p.len == 0 {
 58857          panic("invalid operands for VMOVLPD")
 58858      }
 58859      return p
 58860  }
 58861  
 58862  // VMOVLPS performs "Move Low Packed Single-Precision Floating-Point Values".
 58863  //
 58864  // Mnemonic        : VMOVLPS
 58865  // Supported forms : (4 forms)
 58866  //
 58867  //    * VMOVLPS xmm, m64         [AVX]
 58868  //    * VMOVLPS m64, xmm, xmm    [AVX]
 58869  //    * VMOVLPS xmm, m64         [AVX512F]
 58870  //    * VMOVLPS m64, xmm, xmm    [AVX512F]
 58871  //
 58872  func (self *Program) VMOVLPS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 58873      var p *Instruction
 58874      switch len(vv) {
 58875          case 0  : p = self.alloc("VMOVLPS", 2, Operands { v0, v1 })
 58876          case 1  : p = self.alloc("VMOVLPS", 3, Operands { v0, v1, vv[0] })
 58877          default : panic("instruction VMOVLPS takes 2 or 3 operands")
 58878      }
 58879      // VMOVLPS xmm, m64
 58880      if len(vv) == 0 && isXMM(v0) && isM64(v1) {
 58881          self.require(ISA_AVX)
 58882          p.domain = DomainAVX
 58883          p.add(0, func(m *_Encoding, v []interface{}) {
 58884              m.vex2(0, hcode(v[0]), addr(v[1]), 0)
 58885              m.emit(0x13)
 58886              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 58887          })
 58888      }
 58889      // VMOVLPS m64, xmm, xmm
 58890      if len(vv) == 1 && isM64(v0) && isXMM(v1) && isXMM(vv[0]) {
 58891          self.require(ISA_AVX)
 58892          p.domain = DomainAVX
 58893          p.add(0, func(m *_Encoding, v []interface{}) {
 58894              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 58895              m.emit(0x12)
 58896              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 58897          })
 58898      }
 58899      // VMOVLPS xmm, m64
 58900      if len(vv) == 0 && isEVEXXMM(v0) && isM64(v1) {
 58901          self.require(ISA_AVX512F)
 58902          p.domain = DomainAVX
 58903          p.add(0, func(m *_Encoding, v []interface{}) {
 58904              m.evex(0b01, 0x04, 0b00, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 58905              m.emit(0x13)
 58906              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 58907          })
 58908      }
 58909      // VMOVLPS m64, xmm, xmm
 58910      if len(vv) == 1 && isM64(v0) && isEVEXXMM(v1) && isEVEXXMM(vv[0]) {
 58911          self.require(ISA_AVX512F)
 58912          p.domain = DomainAVX
 58913          p.add(0, func(m *_Encoding, v []interface{}) {
 58914              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 58915              m.emit(0x12)
 58916              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 58917          })
 58918      }
 58919      if p.len == 0 {
 58920          panic("invalid operands for VMOVLPS")
 58921      }
 58922      return p
 58923  }
 58924  
 58925  // VMOVMSKPD performs "Extract Packed Double-Precision Floating-Point Sign Mask".
 58926  //
 58927  // Mnemonic        : VMOVMSKPD
 58928  // Supported forms : (2 forms)
 58929  //
 58930  //    * VMOVMSKPD xmm, r32    [AVX]
 58931  //    * VMOVMSKPD ymm, r32    [AVX]
 58932  //
 58933  func (self *Program) VMOVMSKPD(v0 interface{}, v1 interface{}) *Instruction {
 58934      p := self.alloc("VMOVMSKPD", 2, Operands { v0, v1 })
 58935      // VMOVMSKPD xmm, r32
 58936      if isXMM(v0) && isReg32(v1) {
 58937          self.require(ISA_AVX)
 58938          p.domain = DomainAVX
 58939          p.add(0, func(m *_Encoding, v []interface{}) {
 58940              m.vex2(1, hcode(v[1]), v[0], 0)
 58941              m.emit(0x50)
 58942              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58943          })
 58944      }
 58945      // VMOVMSKPD ymm, r32
 58946      if isYMM(v0) && isReg32(v1) {
 58947          self.require(ISA_AVX)
 58948          p.domain = DomainAVX
 58949          p.add(0, func(m *_Encoding, v []interface{}) {
 58950              m.vex2(5, hcode(v[1]), v[0], 0)
 58951              m.emit(0x50)
 58952              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58953          })
 58954      }
 58955      if p.len == 0 {
 58956          panic("invalid operands for VMOVMSKPD")
 58957      }
 58958      return p
 58959  }
 58960  
 58961  // VMOVMSKPS performs "Extract Packed Single-Precision Floating-Point Sign Mask".
 58962  //
 58963  // Mnemonic        : VMOVMSKPS
 58964  // Supported forms : (2 forms)
 58965  //
 58966  //    * VMOVMSKPS xmm, r32    [AVX]
 58967  //    * VMOVMSKPS ymm, r32    [AVX]
 58968  //
 58969  func (self *Program) VMOVMSKPS(v0 interface{}, v1 interface{}) *Instruction {
 58970      p := self.alloc("VMOVMSKPS", 2, Operands { v0, v1 })
 58971      // VMOVMSKPS xmm, r32
 58972      if isXMM(v0) && isReg32(v1) {
 58973          self.require(ISA_AVX)
 58974          p.domain = DomainAVX
 58975          p.add(0, func(m *_Encoding, v []interface{}) {
 58976              m.vex2(0, hcode(v[1]), v[0], 0)
 58977              m.emit(0x50)
 58978              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58979          })
 58980      }
 58981      // VMOVMSKPS ymm, r32
 58982      if isYMM(v0) && isReg32(v1) {
 58983          self.require(ISA_AVX)
 58984          p.domain = DomainAVX
 58985          p.add(0, func(m *_Encoding, v []interface{}) {
 58986              m.vex2(4, hcode(v[1]), v[0], 0)
 58987              m.emit(0x50)
 58988              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58989          })
 58990      }
 58991      if p.len == 0 {
 58992          panic("invalid operands for VMOVMSKPS")
 58993      }
 58994      return p
 58995  }
 58996  
 58997  // VMOVNTDQ performs "Store Double Quadword Using Non-Temporal Hint".
 58998  //
 58999  // Mnemonic        : VMOVNTDQ
 59000  // Supported forms : (5 forms)
 59001  //
 59002  //    * VMOVNTDQ xmm, m128    [AVX]
 59003  //    * VMOVNTDQ ymm, m256    [AVX]
 59004  //    * VMOVNTDQ zmm, m512    [AVX512F]
 59005  //    * VMOVNTDQ xmm, m128    [AVX512F,AVX512VL]
 59006  //    * VMOVNTDQ ymm, m256    [AVX512F,AVX512VL]
 59007  //
 59008  func (self *Program) VMOVNTDQ(v0 interface{}, v1 interface{}) *Instruction {
 59009      p := self.alloc("VMOVNTDQ", 2, Operands { v0, v1 })
 59010      // VMOVNTDQ xmm, m128
 59011      if isXMM(v0) && isM128(v1) {
 59012          self.require(ISA_AVX)
 59013          p.domain = DomainAVX
 59014          p.add(0, func(m *_Encoding, v []interface{}) {
 59015              m.vex2(1, hcode(v[0]), addr(v[1]), 0)
 59016              m.emit(0xe7)
 59017              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 59018          })
 59019      }
 59020      // VMOVNTDQ ymm, m256
 59021      if isYMM(v0) && isM256(v1) {
 59022          self.require(ISA_AVX)
 59023          p.domain = DomainAVX
 59024          p.add(0, func(m *_Encoding, v []interface{}) {
 59025              m.vex2(5, hcode(v[0]), addr(v[1]), 0)
 59026              m.emit(0xe7)
 59027              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 59028          })
 59029      }
 59030      // VMOVNTDQ zmm, m512
 59031      if isZMM(v0) && isM512(v1) {
 59032          self.require(ISA_AVX512F)
 59033          p.domain = DomainAVX
 59034          p.add(0, func(m *_Encoding, v []interface{}) {
 59035              m.evex(0b01, 0x05, 0b10, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 59036              m.emit(0xe7)
 59037              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 59038          })
 59039      }
 59040      // VMOVNTDQ xmm, m128
 59041      if isEVEXXMM(v0) && isM128(v1) {
 59042          self.require(ISA_AVX512VL | ISA_AVX512F)
 59043          p.domain = DomainAVX
 59044          p.add(0, func(m *_Encoding, v []interface{}) {
 59045              m.evex(0b01, 0x05, 0b00, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 59046              m.emit(0xe7)
 59047              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 59048          })
 59049      }
 59050      // VMOVNTDQ ymm, m256
 59051      if isEVEXYMM(v0) && isM256(v1) {
 59052          self.require(ISA_AVX512VL | ISA_AVX512F)
 59053          p.domain = DomainAVX
 59054          p.add(0, func(m *_Encoding, v []interface{}) {
 59055              m.evex(0b01, 0x05, 0b01, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 59056              m.emit(0xe7)
 59057              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 59058          })
 59059      }
 59060      if p.len == 0 {
 59061          panic("invalid operands for VMOVNTDQ")
 59062      }
 59063      return p
 59064  }
 59065  
 59066  // VMOVNTDQA performs "Load Double Quadword Non-Temporal Aligned Hint".
 59067  //
 59068  // Mnemonic        : VMOVNTDQA
 59069  // Supported forms : (5 forms)
 59070  //
 59071  //    * VMOVNTDQA m128, xmm    [AVX]
 59072  //    * VMOVNTDQA m256, ymm    [AVX2]
 59073  //    * VMOVNTDQA m512, zmm    [AVX512F]
 59074  //    * VMOVNTDQA m128, xmm    [AVX512F,AVX512VL]
 59075  //    * VMOVNTDQA m256, ymm    [AVX512F,AVX512VL]
 59076  //
 59077  func (self *Program) VMOVNTDQA(v0 interface{}, v1 interface{}) *Instruction {
 59078      p := self.alloc("VMOVNTDQA", 2, Operands { v0, v1 })
 59079      // VMOVNTDQA m128, xmm
 59080      if isM128(v0) && isXMM(v1) {
 59081          self.require(ISA_AVX)
 59082          p.domain = DomainAVX
 59083          p.add(0, func(m *_Encoding, v []interface{}) {
 59084              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 59085              m.emit(0x2a)
 59086              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59087          })
 59088      }
 59089      // VMOVNTDQA m256, ymm
 59090      if isM256(v0) && isYMM(v1) {
 59091          self.require(ISA_AVX2)
 59092          p.domain = DomainAVX
 59093          p.add(0, func(m *_Encoding, v []interface{}) {
 59094              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 59095              m.emit(0x2a)
 59096              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59097          })
 59098      }
 59099      // VMOVNTDQA m512, zmm
 59100      if isM512(v0) && isZMM(v1) {
 59101          self.require(ISA_AVX512F)
 59102          p.domain = DomainAVX
 59103          p.add(0, func(m *_Encoding, v []interface{}) {
 59104              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 59105              m.emit(0x2a)
 59106              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 59107          })
 59108      }
 59109      // VMOVNTDQA m128, xmm
 59110      if isM128(v0) && isEVEXXMM(v1) {
 59111          self.require(ISA_AVX512VL | ISA_AVX512F)
 59112          p.domain = DomainAVX
 59113          p.add(0, func(m *_Encoding, v []interface{}) {
 59114              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 59115              m.emit(0x2a)
 59116              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 59117          })
 59118      }
 59119      // VMOVNTDQA m256, ymm
 59120      if isM256(v0) && isEVEXYMM(v1) {
 59121          self.require(ISA_AVX512VL | ISA_AVX512F)
 59122          p.domain = DomainAVX
 59123          p.add(0, func(m *_Encoding, v []interface{}) {
 59124              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 59125              m.emit(0x2a)
 59126              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 59127          })
 59128      }
 59129      if p.len == 0 {
 59130          panic("invalid operands for VMOVNTDQA")
 59131      }
 59132      return p
 59133  }
 59134  
 59135  // VMOVNTPD performs "Store Packed Double-Precision Floating-Point Values Using Non-Temporal Hint".
 59136  //
 59137  // Mnemonic        : VMOVNTPD
 59138  // Supported forms : (5 forms)
 59139  //
 59140  //    * VMOVNTPD xmm, m128    [AVX]
 59141  //    * VMOVNTPD ymm, m256    [AVX]
 59142  //    * VMOVNTPD zmm, m512    [AVX512F]
 59143  //    * VMOVNTPD xmm, m128    [AVX512F,AVX512VL]
 59144  //    * VMOVNTPD ymm, m256    [AVX512F,AVX512VL]
 59145  //
 59146  func (self *Program) VMOVNTPD(v0 interface{}, v1 interface{}) *Instruction {
 59147      p := self.alloc("VMOVNTPD", 2, Operands { v0, v1 })
 59148      // VMOVNTPD xmm, m128
 59149      if isXMM(v0) && isM128(v1) {
 59150          self.require(ISA_AVX)
 59151          p.domain = DomainAVX
 59152          p.add(0, func(m *_Encoding, v []interface{}) {
 59153              m.vex2(1, hcode(v[0]), addr(v[1]), 0)
 59154              m.emit(0x2b)
 59155              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 59156          })
 59157      }
 59158      // VMOVNTPD ymm, m256
 59159      if isYMM(v0) && isM256(v1) {
 59160          self.require(ISA_AVX)
 59161          p.domain = DomainAVX
 59162          p.add(0, func(m *_Encoding, v []interface{}) {
 59163              m.vex2(5, hcode(v[0]), addr(v[1]), 0)
 59164              m.emit(0x2b)
 59165              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 59166          })
 59167      }
 59168      // VMOVNTPD zmm, m512
 59169      if isZMM(v0) && isM512(v1) {
 59170          self.require(ISA_AVX512F)
 59171          p.domain = DomainAVX
 59172          p.add(0, func(m *_Encoding, v []interface{}) {
 59173              m.evex(0b01, 0x85, 0b10, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 59174              m.emit(0x2b)
 59175              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 59176          })
 59177      }
 59178      // VMOVNTPD xmm, m128
 59179      if isEVEXXMM(v0) && isM128(v1) {
 59180          self.require(ISA_AVX512VL | ISA_AVX512F)
 59181          p.domain = DomainAVX
 59182          p.add(0, func(m *_Encoding, v []interface{}) {
 59183              m.evex(0b01, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 59184              m.emit(0x2b)
 59185              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 59186          })
 59187      }
 59188      // VMOVNTPD ymm, m256
 59189      if isEVEXYMM(v0) && isM256(v1) {
 59190          self.require(ISA_AVX512VL | ISA_AVX512F)
 59191          p.domain = DomainAVX
 59192          p.add(0, func(m *_Encoding, v []interface{}) {
 59193              m.evex(0b01, 0x85, 0b01, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 59194              m.emit(0x2b)
 59195              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 59196          })
 59197      }
 59198      if p.len == 0 {
 59199          panic("invalid operands for VMOVNTPD")
 59200      }
 59201      return p
 59202  }
 59203  
 59204  // VMOVNTPS performs "Store Packed Single-Precision Floating-Point Values Using Non-Temporal Hint".
 59205  //
 59206  // Mnemonic        : VMOVNTPS
 59207  // Supported forms : (5 forms)
 59208  //
 59209  //    * VMOVNTPS xmm, m128    [AVX]
 59210  //    * VMOVNTPS ymm, m256    [AVX]
 59211  //    * VMOVNTPS zmm, m512    [AVX512F]
 59212  //    * VMOVNTPS xmm, m128    [AVX512F,AVX512VL]
 59213  //    * VMOVNTPS ymm, m256    [AVX512F,AVX512VL]
 59214  //
 59215  func (self *Program) VMOVNTPS(v0 interface{}, v1 interface{}) *Instruction {
 59216      p := self.alloc("VMOVNTPS", 2, Operands { v0, v1 })
 59217      // VMOVNTPS xmm, m128
 59218      if isXMM(v0) && isM128(v1) {
 59219          self.require(ISA_AVX)
 59220          p.domain = DomainAVX
 59221          p.add(0, func(m *_Encoding, v []interface{}) {
 59222              m.vex2(0, hcode(v[0]), addr(v[1]), 0)
 59223              m.emit(0x2b)
 59224              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 59225          })
 59226      }
 59227      // VMOVNTPS ymm, m256
 59228      if isYMM(v0) && isM256(v1) {
 59229          self.require(ISA_AVX)
 59230          p.domain = DomainAVX
 59231          p.add(0, func(m *_Encoding, v []interface{}) {
 59232              m.vex2(4, hcode(v[0]), addr(v[1]), 0)
 59233              m.emit(0x2b)
 59234              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 59235          })
 59236      }
 59237      // VMOVNTPS zmm, m512
 59238      if isZMM(v0) && isM512(v1) {
 59239          self.require(ISA_AVX512F)
 59240          p.domain = DomainAVX
 59241          p.add(0, func(m *_Encoding, v []interface{}) {
 59242              m.evex(0b01, 0x04, 0b10, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 59243              m.emit(0x2b)
 59244              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 59245          })
 59246      }
 59247      // VMOVNTPS xmm, m128
 59248      if isEVEXXMM(v0) && isM128(v1) {
 59249          self.require(ISA_AVX512VL | ISA_AVX512F)
 59250          p.domain = DomainAVX
 59251          p.add(0, func(m *_Encoding, v []interface{}) {
 59252              m.evex(0b01, 0x05, 0b00, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 59253              m.emit(0x2b)
 59254              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 59255          })
 59256      }
 59257      // VMOVNTPS ymm, m256
 59258      if isEVEXYMM(v0) && isM256(v1) {
 59259          self.require(ISA_AVX512VL | ISA_AVX512F)
 59260          p.domain = DomainAVX
 59261          p.add(0, func(m *_Encoding, v []interface{}) {
 59262              m.evex(0b01, 0x05, 0b01, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 59263              m.emit(0x2b)
 59264              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 59265          })
 59266      }
 59267      if p.len == 0 {
 59268          panic("invalid operands for VMOVNTPS")
 59269      }
 59270      return p
 59271  }
 59272  
 59273  // VMOVQ performs "Move Quadword".
 59274  //
 59275  // Mnemonic        : VMOVQ
 59276  // Supported forms : (10 forms)
 59277  //
 59278  //    * VMOVQ xmm, r64    [AVX]
 59279  //    * VMOVQ r64, xmm    [AVX]
 59280  //    * VMOVQ xmm, xmm    [AVX]
 59281  //    * VMOVQ m64, xmm    [AVX]
 59282  //    * VMOVQ xmm, m64    [AVX]
 59283  //    * VMOVQ xmm, r64    [AVX512F]
 59284  //    * VMOVQ r64, xmm    [AVX512F]
 59285  //    * VMOVQ xmm, xmm    [AVX512F]
 59286  //    * VMOVQ m64, xmm    [AVX512F]
 59287  //    * VMOVQ xmm, m64    [AVX512F]
 59288  //
 59289  func (self *Program) VMOVQ(v0 interface{}, v1 interface{}) *Instruction {
 59290      p := self.alloc("VMOVQ", 2, Operands { v0, v1 })
 59291      // VMOVQ xmm, r64
 59292      if isXMM(v0) && isReg64(v1) {
 59293          self.require(ISA_AVX)
 59294          p.domain = DomainAVX
 59295          p.add(0, func(m *_Encoding, v []interface{}) {
 59296              m.emit(0xc4)
 59297              m.emit(0xe1 ^ (hcode(v[0]) << 7) ^ (hcode(v[1]) << 5))
 59298              m.emit(0xf9)
 59299              m.emit(0x7e)
 59300              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 59301          })
 59302      }
 59303      // VMOVQ r64, xmm
 59304      if isReg64(v0) && isXMM(v1) {
 59305          self.require(ISA_AVX)
 59306          p.domain = DomainAVX
 59307          p.add(0, func(m *_Encoding, v []interface{}) {
 59308              m.emit(0xc4)
 59309              m.emit(0xe1 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 59310              m.emit(0xf9)
 59311              m.emit(0x6e)
 59312              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59313          })
 59314      }
 59315      // VMOVQ xmm, xmm
 59316      if isXMM(v0) && isXMM(v1) {
 59317          self.require(ISA_AVX)
 59318          p.domain = DomainAVX
 59319          p.add(0, func(m *_Encoding, v []interface{}) {
 59320              m.vex2(2, hcode(v[1]), v[0], 0)
 59321              m.emit(0x7e)
 59322              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59323          })
 59324          p.add(0, func(m *_Encoding, v []interface{}) {
 59325              m.vex2(1, hcode(v[0]), v[1], 0)
 59326              m.emit(0xd6)
 59327              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 59328          })
 59329      }
 59330      // VMOVQ m64, xmm
 59331      if isM64(v0) && isXMM(v1) {
 59332          self.require(ISA_AVX)
 59333          p.domain = DomainAVX
 59334          p.add(0, func(m *_Encoding, v []interface{}) {
 59335              m.vex2(2, hcode(v[1]), addr(v[0]), 0)
 59336              m.emit(0x7e)
 59337              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59338          })
 59339          p.add(0, func(m *_Encoding, v []interface{}) {
 59340              m.vex3(0xc4, 0b1, 0x81, hcode(v[1]), addr(v[0]), 0)
 59341              m.emit(0x6e)
 59342              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59343          })
 59344      }
 59345      // VMOVQ xmm, m64
 59346      if isXMM(v0) && isM64(v1) {
 59347          self.require(ISA_AVX)
 59348          p.domain = DomainAVX
 59349          p.add(0, func(m *_Encoding, v []interface{}) {
 59350              m.vex2(1, hcode(v[0]), addr(v[1]), 0)
 59351              m.emit(0xd6)
 59352              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 59353          })
 59354          p.add(0, func(m *_Encoding, v []interface{}) {
 59355              m.vex3(0xc4, 0b1, 0x81, hcode(v[0]), addr(v[1]), 0)
 59356              m.emit(0x7e)
 59357              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 59358          })
 59359      }
 59360      // VMOVQ xmm, r64
 59361      if isEVEXXMM(v0) && isReg64(v1) {
 59362          self.require(ISA_AVX512F)
 59363          p.domain = DomainAVX
 59364          p.add(0, func(m *_Encoding, v []interface{}) {
 59365              m.emit(0x62)
 59366              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 59367              m.emit(0xfd)
 59368              m.emit(0x08)
 59369              m.emit(0x7e)
 59370              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 59371          })
 59372      }
 59373      // VMOVQ r64, xmm
 59374      if isReg64(v0) && isEVEXXMM(v1) {
 59375          self.require(ISA_AVX512F)
 59376          p.domain = DomainAVX
 59377          p.add(0, func(m *_Encoding, v []interface{}) {
 59378              m.emit(0x62)
 59379              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 59380              m.emit(0xfd)
 59381              m.emit(0x08)
 59382              m.emit(0x6e)
 59383              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59384          })
 59385      }
 59386      // VMOVQ xmm, xmm
 59387      if isEVEXXMM(v0) && isEVEXXMM(v1) {
 59388          self.require(ISA_AVX512F)
 59389          p.domain = DomainAVX
 59390          p.add(0, func(m *_Encoding, v []interface{}) {
 59391              m.emit(0x62)
 59392              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 59393              m.emit(0xfe)
 59394              m.emit(0x08)
 59395              m.emit(0x7e)
 59396              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59397          })
 59398          p.add(0, func(m *_Encoding, v []interface{}) {
 59399              m.emit(0x62)
 59400              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 59401              m.emit(0xfd)
 59402              m.emit(0x08)
 59403              m.emit(0xd6)
 59404              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 59405          })
 59406      }
 59407      // VMOVQ m64, xmm
 59408      if isM64(v0) && isEVEXXMM(v1) {
 59409          self.require(ISA_AVX512F)
 59410          p.domain = DomainAVX
 59411          p.add(0, func(m *_Encoding, v []interface{}) {
 59412              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 59413              m.emit(0x6e)
 59414              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 59415          })
 59416          p.add(0, func(m *_Encoding, v []interface{}) {
 59417              m.evex(0b01, 0x86, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 59418              m.emit(0x7e)
 59419              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 59420          })
 59421      }
 59422      // VMOVQ xmm, m64
 59423      if isEVEXXMM(v0) && isM64(v1) {
 59424          self.require(ISA_AVX512F)
 59425          p.domain = DomainAVX
 59426          p.add(0, func(m *_Encoding, v []interface{}) {
 59427              m.evex(0b01, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 59428              m.emit(0x7e)
 59429              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 59430          })
 59431          p.add(0, func(m *_Encoding, v []interface{}) {
 59432              m.evex(0b01, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 59433              m.emit(0xd6)
 59434              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 59435          })
 59436      }
 59437      if p.len == 0 {
 59438          panic("invalid operands for VMOVQ")
 59439      }
 59440      return p
 59441  }
 59442  
 59443  // VMOVSD performs "Move Scalar Double-Precision Floating-Point Value".
 59444  //
 59445  // Mnemonic        : VMOVSD
 59446  // Supported forms : (6 forms)
 59447  //
 59448  //    * VMOVSD m64, xmm               [AVX]
 59449  //    * VMOVSD xmm, m64               [AVX]
 59450  //    * VMOVSD xmm, xmm, xmm          [AVX]
 59451  //    * VMOVSD xmm, m64{k}            [AVX512F]
 59452  //    * VMOVSD m64, xmm{k}{z}         [AVX512F]
 59453  //    * VMOVSD xmm, xmm, xmm{k}{z}    [AVX512F]
 59454  //
 59455  func (self *Program) VMOVSD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 59456      var p *Instruction
 59457      switch len(vv) {
 59458          case 0  : p = self.alloc("VMOVSD", 2, Operands { v0, v1 })
 59459          case 1  : p = self.alloc("VMOVSD", 3, Operands { v0, v1, vv[0] })
 59460          default : panic("instruction VMOVSD takes 2 or 3 operands")
 59461      }
 59462      // VMOVSD m64, xmm
 59463      if len(vv) == 0 && isM64(v0) && isXMM(v1) {
 59464          self.require(ISA_AVX)
 59465          p.domain = DomainAVX
 59466          p.add(0, func(m *_Encoding, v []interface{}) {
 59467              m.vex2(3, hcode(v[1]), addr(v[0]), 0)
 59468              m.emit(0x10)
 59469              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59470          })
 59471      }
 59472      // VMOVSD xmm, m64
 59473      if len(vv) == 0 && isXMM(v0) && isM64(v1) {
 59474          self.require(ISA_AVX)
 59475          p.domain = DomainAVX
 59476          p.add(0, func(m *_Encoding, v []interface{}) {
 59477              m.vex2(3, hcode(v[0]), addr(v[1]), 0)
 59478              m.emit(0x11)
 59479              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 59480          })
 59481      }
 59482      // VMOVSD xmm, xmm, xmm
 59483      if len(vv) == 1 && isXMM(v0) && isXMM(v1) && isXMM(vv[0]) {
 59484          self.require(ISA_AVX)
 59485          p.domain = DomainAVX
 59486          p.add(0, func(m *_Encoding, v []interface{}) {
 59487              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 59488              m.emit(0x10)
 59489              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 59490          })
 59491          p.add(0, func(m *_Encoding, v []interface{}) {
 59492              m.vex2(3, hcode(v[0]), v[2], hlcode(v[1]))
 59493              m.emit(0x11)
 59494              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[2]))
 59495          })
 59496      }
 59497      // VMOVSD xmm, m64{k}
 59498      if len(vv) == 0 && isEVEXXMM(v0) && isM64k(v1) {
 59499          self.require(ISA_AVX512F)
 59500          p.domain = DomainAVX
 59501          p.add(0, func(m *_Encoding, v []interface{}) {
 59502              m.evex(0b01, 0x87, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 59503              m.emit(0x11)
 59504              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 59505          })
 59506      }
 59507      // VMOVSD m64, xmm{k}{z}
 59508      if len(vv) == 0 && isM64(v0) && isXMMkz(v1) {
 59509          self.require(ISA_AVX512F)
 59510          p.domain = DomainAVX
 59511          p.add(0, func(m *_Encoding, v []interface{}) {
 59512              m.evex(0b01, 0x87, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 59513              m.emit(0x10)
 59514              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 59515          })
 59516      }
 59517      // VMOVSD xmm, xmm, xmm{k}{z}
 59518      if len(vv) == 1 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(vv[0]) {
 59519          self.require(ISA_AVX512F)
 59520          p.domain = DomainAVX
 59521          p.add(0, func(m *_Encoding, v []interface{}) {
 59522              m.emit(0x62)
 59523              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 59524              m.emit(0xff ^ (hlcode(v[1]) << 3))
 59525              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 59526              m.emit(0x10)
 59527              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 59528          })
 59529          p.add(0, func(m *_Encoding, v []interface{}) {
 59530              m.emit(0x62)
 59531              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[0]) << 4)))
 59532              m.emit(0xff ^ (hlcode(v[1]) << 3))
 59533              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 59534              m.emit(0x11)
 59535              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[2]))
 59536          })
 59537      }
 59538      if p.len == 0 {
 59539          panic("invalid operands for VMOVSD")
 59540      }
 59541      return p
 59542  }
 59543  
 59544  // VMOVSHDUP performs "Move Packed Single-FP High and Duplicate".
 59545  //
 59546  // Mnemonic        : VMOVSHDUP
 59547  // Supported forms : (10 forms)
 59548  //
 59549  //    * VMOVSHDUP xmm, xmm           [AVX]
 59550  //    * VMOVSHDUP m128, xmm          [AVX]
 59551  //    * VMOVSHDUP ymm, ymm           [AVX]
 59552  //    * VMOVSHDUP m256, ymm          [AVX]
 59553  //    * VMOVSHDUP zmm, zmm{k}{z}     [AVX512F]
 59554  //    * VMOVSHDUP m512, zmm{k}{z}    [AVX512F]
 59555  //    * VMOVSHDUP xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 59556  //    * VMOVSHDUP ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 59557  //    * VMOVSHDUP m128, xmm{k}{z}    [AVX512F,AVX512VL]
 59558  //    * VMOVSHDUP m256, ymm{k}{z}    [AVX512F,AVX512VL]
 59559  //
 59560  func (self *Program) VMOVSHDUP(v0 interface{}, v1 interface{}) *Instruction {
 59561      p := self.alloc("VMOVSHDUP", 2, Operands { v0, v1 })
 59562      // VMOVSHDUP xmm, xmm
 59563      if isXMM(v0) && isXMM(v1) {
 59564          self.require(ISA_AVX)
 59565          p.domain = DomainAVX
 59566          p.add(0, func(m *_Encoding, v []interface{}) {
 59567              m.vex2(2, hcode(v[1]), v[0], 0)
 59568              m.emit(0x16)
 59569              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59570          })
 59571      }
 59572      // VMOVSHDUP m128, xmm
 59573      if isM128(v0) && isXMM(v1) {
 59574          self.require(ISA_AVX)
 59575          p.domain = DomainAVX
 59576          p.add(0, func(m *_Encoding, v []interface{}) {
 59577              m.vex2(2, hcode(v[1]), addr(v[0]), 0)
 59578              m.emit(0x16)
 59579              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59580          })
 59581      }
 59582      // VMOVSHDUP ymm, ymm
 59583      if isYMM(v0) && isYMM(v1) {
 59584          self.require(ISA_AVX)
 59585          p.domain = DomainAVX
 59586          p.add(0, func(m *_Encoding, v []interface{}) {
 59587              m.vex2(6, hcode(v[1]), v[0], 0)
 59588              m.emit(0x16)
 59589              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59590          })
 59591      }
 59592      // VMOVSHDUP m256, ymm
 59593      if isM256(v0) && isYMM(v1) {
 59594          self.require(ISA_AVX)
 59595          p.domain = DomainAVX
 59596          p.add(0, func(m *_Encoding, v []interface{}) {
 59597              m.vex2(6, hcode(v[1]), addr(v[0]), 0)
 59598              m.emit(0x16)
 59599              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59600          })
 59601      }
 59602      // VMOVSHDUP zmm, zmm{k}{z}
 59603      if isZMM(v0) && isZMMkz(v1) {
 59604          self.require(ISA_AVX512F)
 59605          p.domain = DomainAVX
 59606          p.add(0, func(m *_Encoding, v []interface{}) {
 59607              m.emit(0x62)
 59608              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 59609              m.emit(0x7e)
 59610              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 59611              m.emit(0x16)
 59612              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59613          })
 59614      }
 59615      // VMOVSHDUP m512, zmm{k}{z}
 59616      if isM512(v0) && isZMMkz(v1) {
 59617          self.require(ISA_AVX512F)
 59618          p.domain = DomainAVX
 59619          p.add(0, func(m *_Encoding, v []interface{}) {
 59620              m.evex(0b01, 0x06, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 59621              m.emit(0x16)
 59622              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 59623          })
 59624      }
 59625      // VMOVSHDUP xmm, xmm{k}{z}
 59626      if isEVEXXMM(v0) && isXMMkz(v1) {
 59627          self.require(ISA_AVX512VL | ISA_AVX512F)
 59628          p.domain = DomainAVX
 59629          p.add(0, func(m *_Encoding, v []interface{}) {
 59630              m.emit(0x62)
 59631              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 59632              m.emit(0x7e)
 59633              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 59634              m.emit(0x16)
 59635              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59636          })
 59637      }
 59638      // VMOVSHDUP ymm, ymm{k}{z}
 59639      if isEVEXYMM(v0) && isYMMkz(v1) {
 59640          self.require(ISA_AVX512VL | ISA_AVX512F)
 59641          p.domain = DomainAVX
 59642          p.add(0, func(m *_Encoding, v []interface{}) {
 59643              m.emit(0x62)
 59644              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 59645              m.emit(0x7e)
 59646              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 59647              m.emit(0x16)
 59648              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59649          })
 59650      }
 59651      // VMOVSHDUP m128, xmm{k}{z}
 59652      if isM128(v0) && isXMMkz(v1) {
 59653          self.require(ISA_AVX512VL | ISA_AVX512F)
 59654          p.domain = DomainAVX
 59655          p.add(0, func(m *_Encoding, v []interface{}) {
 59656              m.evex(0b01, 0x06, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 59657              m.emit(0x16)
 59658              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 59659          })
 59660      }
 59661      // VMOVSHDUP m256, ymm{k}{z}
 59662      if isM256(v0) && isYMMkz(v1) {
 59663          self.require(ISA_AVX512VL | ISA_AVX512F)
 59664          p.domain = DomainAVX
 59665          p.add(0, func(m *_Encoding, v []interface{}) {
 59666              m.evex(0b01, 0x06, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 59667              m.emit(0x16)
 59668              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 59669          })
 59670      }
 59671      if p.len == 0 {
 59672          panic("invalid operands for VMOVSHDUP")
 59673      }
 59674      return p
 59675  }
 59676  
 59677  // VMOVSLDUP performs "Move Packed Single-FP Low and Duplicate".
 59678  //
 59679  // Mnemonic        : VMOVSLDUP
 59680  // Supported forms : (10 forms)
 59681  //
 59682  //    * VMOVSLDUP xmm, xmm           [AVX]
 59683  //    * VMOVSLDUP m128, xmm          [AVX]
 59684  //    * VMOVSLDUP ymm, ymm           [AVX]
 59685  //    * VMOVSLDUP m256, ymm          [AVX]
 59686  //    * VMOVSLDUP zmm, zmm{k}{z}     [AVX512F]
 59687  //    * VMOVSLDUP m512, zmm{k}{z}    [AVX512F]
 59688  //    * VMOVSLDUP xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 59689  //    * VMOVSLDUP ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 59690  //    * VMOVSLDUP m128, xmm{k}{z}    [AVX512F,AVX512VL]
 59691  //    * VMOVSLDUP m256, ymm{k}{z}    [AVX512F,AVX512VL]
 59692  //
 59693  func (self *Program) VMOVSLDUP(v0 interface{}, v1 interface{}) *Instruction {
 59694      p := self.alloc("VMOVSLDUP", 2, Operands { v0, v1 })
 59695      // VMOVSLDUP xmm, xmm
 59696      if isXMM(v0) && isXMM(v1) {
 59697          self.require(ISA_AVX)
 59698          p.domain = DomainAVX
 59699          p.add(0, func(m *_Encoding, v []interface{}) {
 59700              m.vex2(2, hcode(v[1]), v[0], 0)
 59701              m.emit(0x12)
 59702              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59703          })
 59704      }
 59705      // VMOVSLDUP m128, xmm
 59706      if isM128(v0) && isXMM(v1) {
 59707          self.require(ISA_AVX)
 59708          p.domain = DomainAVX
 59709          p.add(0, func(m *_Encoding, v []interface{}) {
 59710              m.vex2(2, hcode(v[1]), addr(v[0]), 0)
 59711              m.emit(0x12)
 59712              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59713          })
 59714      }
 59715      // VMOVSLDUP ymm, ymm
 59716      if isYMM(v0) && isYMM(v1) {
 59717          self.require(ISA_AVX)
 59718          p.domain = DomainAVX
 59719          p.add(0, func(m *_Encoding, v []interface{}) {
 59720              m.vex2(6, hcode(v[1]), v[0], 0)
 59721              m.emit(0x12)
 59722              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59723          })
 59724      }
 59725      // VMOVSLDUP m256, ymm
 59726      if isM256(v0) && isYMM(v1) {
 59727          self.require(ISA_AVX)
 59728          p.domain = DomainAVX
 59729          p.add(0, func(m *_Encoding, v []interface{}) {
 59730              m.vex2(6, hcode(v[1]), addr(v[0]), 0)
 59731              m.emit(0x12)
 59732              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59733          })
 59734      }
 59735      // VMOVSLDUP zmm, zmm{k}{z}
 59736      if isZMM(v0) && isZMMkz(v1) {
 59737          self.require(ISA_AVX512F)
 59738          p.domain = DomainAVX
 59739          p.add(0, func(m *_Encoding, v []interface{}) {
 59740              m.emit(0x62)
 59741              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 59742              m.emit(0x7e)
 59743              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 59744              m.emit(0x12)
 59745              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59746          })
 59747      }
 59748      // VMOVSLDUP m512, zmm{k}{z}
 59749      if isM512(v0) && isZMMkz(v1) {
 59750          self.require(ISA_AVX512F)
 59751          p.domain = DomainAVX
 59752          p.add(0, func(m *_Encoding, v []interface{}) {
 59753              m.evex(0b01, 0x06, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 59754              m.emit(0x12)
 59755              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 59756          })
 59757      }
 59758      // VMOVSLDUP xmm, xmm{k}{z}
 59759      if isEVEXXMM(v0) && isXMMkz(v1) {
 59760          self.require(ISA_AVX512VL | ISA_AVX512F)
 59761          p.domain = DomainAVX
 59762          p.add(0, func(m *_Encoding, v []interface{}) {
 59763              m.emit(0x62)
 59764              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 59765              m.emit(0x7e)
 59766              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 59767              m.emit(0x12)
 59768              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59769          })
 59770      }
 59771      // VMOVSLDUP ymm, ymm{k}{z}
 59772      if isEVEXYMM(v0) && isYMMkz(v1) {
 59773          self.require(ISA_AVX512VL | ISA_AVX512F)
 59774          p.domain = DomainAVX
 59775          p.add(0, func(m *_Encoding, v []interface{}) {
 59776              m.emit(0x62)
 59777              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 59778              m.emit(0x7e)
 59779              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 59780              m.emit(0x12)
 59781              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59782          })
 59783      }
 59784      // VMOVSLDUP m128, xmm{k}{z}
 59785      if isM128(v0) && isXMMkz(v1) {
 59786          self.require(ISA_AVX512VL | ISA_AVX512F)
 59787          p.domain = DomainAVX
 59788          p.add(0, func(m *_Encoding, v []interface{}) {
 59789              m.evex(0b01, 0x06, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 59790              m.emit(0x12)
 59791              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 59792          })
 59793      }
 59794      // VMOVSLDUP m256, ymm{k}{z}
 59795      if isM256(v0) && isYMMkz(v1) {
 59796          self.require(ISA_AVX512VL | ISA_AVX512F)
 59797          p.domain = DomainAVX
 59798          p.add(0, func(m *_Encoding, v []interface{}) {
 59799              m.evex(0b01, 0x06, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 59800              m.emit(0x12)
 59801              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 59802          })
 59803      }
 59804      if p.len == 0 {
 59805          panic("invalid operands for VMOVSLDUP")
 59806      }
 59807      return p
 59808  }
 59809  
 59810  // VMOVSS performs "Move Scalar Single-Precision Floating-Point Values".
 59811  //
 59812  // Mnemonic        : VMOVSS
 59813  // Supported forms : (6 forms)
 59814  //
 59815  //    * VMOVSS m32, xmm               [AVX]
 59816  //    * VMOVSS xmm, m32               [AVX]
 59817  //    * VMOVSS xmm, xmm, xmm          [AVX]
 59818  //    * VMOVSS xmm, m32{k}            [AVX512F]
 59819  //    * VMOVSS m32, xmm{k}{z}         [AVX512F]
 59820  //    * VMOVSS xmm, xmm, xmm{k}{z}    [AVX512F]
 59821  //
 59822  func (self *Program) VMOVSS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 59823      var p *Instruction
 59824      switch len(vv) {
 59825          case 0  : p = self.alloc("VMOVSS", 2, Operands { v0, v1 })
 59826          case 1  : p = self.alloc("VMOVSS", 3, Operands { v0, v1, vv[0] })
 59827          default : panic("instruction VMOVSS takes 2 or 3 operands")
 59828      }
 59829      // VMOVSS m32, xmm
 59830      if len(vv) == 0 && isM32(v0) && isXMM(v1) {
 59831          self.require(ISA_AVX)
 59832          p.domain = DomainAVX
 59833          p.add(0, func(m *_Encoding, v []interface{}) {
 59834              m.vex2(2, hcode(v[1]), addr(v[0]), 0)
 59835              m.emit(0x10)
 59836              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59837          })
 59838      }
 59839      // VMOVSS xmm, m32
 59840      if len(vv) == 0 && isXMM(v0) && isM32(v1) {
 59841          self.require(ISA_AVX)
 59842          p.domain = DomainAVX
 59843          p.add(0, func(m *_Encoding, v []interface{}) {
 59844              m.vex2(2, hcode(v[0]), addr(v[1]), 0)
 59845              m.emit(0x11)
 59846              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 59847          })
 59848      }
 59849      // VMOVSS xmm, xmm, xmm
 59850      if len(vv) == 1 && isXMM(v0) && isXMM(v1) && isXMM(vv[0]) {
 59851          self.require(ISA_AVX)
 59852          p.domain = DomainAVX
 59853          p.add(0, func(m *_Encoding, v []interface{}) {
 59854              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 59855              m.emit(0x10)
 59856              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 59857          })
 59858          p.add(0, func(m *_Encoding, v []interface{}) {
 59859              m.vex2(2, hcode(v[0]), v[2], hlcode(v[1]))
 59860              m.emit(0x11)
 59861              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[2]))
 59862          })
 59863      }
 59864      // VMOVSS xmm, m32{k}
 59865      if len(vv) == 0 && isEVEXXMM(v0) && isM32k(v1) {
 59866          self.require(ISA_AVX512F)
 59867          p.domain = DomainAVX
 59868          p.add(0, func(m *_Encoding, v []interface{}) {
 59869              m.evex(0b01, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 59870              m.emit(0x11)
 59871              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 59872          })
 59873      }
 59874      // VMOVSS m32, xmm{k}{z}
 59875      if len(vv) == 0 && isM32(v0) && isXMMkz(v1) {
 59876          self.require(ISA_AVX512F)
 59877          p.domain = DomainAVX
 59878          p.add(0, func(m *_Encoding, v []interface{}) {
 59879              m.evex(0b01, 0x06, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 59880              m.emit(0x10)
 59881              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 59882          })
 59883      }
 59884      // VMOVSS xmm, xmm, xmm{k}{z}
 59885      if len(vv) == 1 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(vv[0]) {
 59886          self.require(ISA_AVX512F)
 59887          p.domain = DomainAVX
 59888          p.add(0, func(m *_Encoding, v []interface{}) {
 59889              m.emit(0x62)
 59890              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 59891              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 59892              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 59893              m.emit(0x10)
 59894              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 59895          })
 59896          p.add(0, func(m *_Encoding, v []interface{}) {
 59897              m.emit(0x62)
 59898              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[0]) << 4)))
 59899              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 59900              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 59901              m.emit(0x11)
 59902              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[2]))
 59903          })
 59904      }
 59905      if p.len == 0 {
 59906          panic("invalid operands for VMOVSS")
 59907      }
 59908      return p
 59909  }
 59910  
 59911  // VMOVUPD performs "Move Unaligned Packed Double-Precision Floating-Point Values".
 59912  //
 59913  // Mnemonic        : VMOVUPD
 59914  // Supported forms : (15 forms)
 59915  //
 59916  //    * VMOVUPD xmm, xmm           [AVX]
 59917  //    * VMOVUPD m128, xmm          [AVX]
 59918  //    * VMOVUPD ymm, ymm           [AVX]
 59919  //    * VMOVUPD m256, ymm          [AVX]
 59920  //    * VMOVUPD xmm, m128          [AVX]
 59921  //    * VMOVUPD ymm, m256          [AVX]
 59922  //    * VMOVUPD zmm, m512{k}{z}    [AVX512F]
 59923  //    * VMOVUPD zmm, zmm{k}{z}     [AVX512F]
 59924  //    * VMOVUPD m512, zmm{k}{z}    [AVX512F]
 59925  //    * VMOVUPD xmm, m128{k}{z}    [AVX512F,AVX512VL]
 59926  //    * VMOVUPD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 59927  //    * VMOVUPD ymm, m256{k}{z}    [AVX512F,AVX512VL]
 59928  //    * VMOVUPD ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 59929  //    * VMOVUPD m128, xmm{k}{z}    [AVX512F,AVX512VL]
 59930  //    * VMOVUPD m256, ymm{k}{z}    [AVX512F,AVX512VL]
 59931  //
 59932  func (self *Program) VMOVUPD(v0 interface{}, v1 interface{}) *Instruction {
 59933      p := self.alloc("VMOVUPD", 2, Operands { v0, v1 })
 59934      // VMOVUPD xmm, xmm
 59935      if isXMM(v0) && isXMM(v1) {
 59936          self.require(ISA_AVX)
 59937          p.domain = DomainAVX
 59938          p.add(0, func(m *_Encoding, v []interface{}) {
 59939              m.vex2(1, hcode(v[1]), v[0], 0)
 59940              m.emit(0x10)
 59941              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59942          })
 59943          p.add(0, func(m *_Encoding, v []interface{}) {
 59944              m.vex2(1, hcode(v[0]), v[1], 0)
 59945              m.emit(0x11)
 59946              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 59947          })
 59948      }
 59949      // VMOVUPD m128, xmm
 59950      if isM128(v0) && isXMM(v1) {
 59951          self.require(ISA_AVX)
 59952          p.domain = DomainAVX
 59953          p.add(0, func(m *_Encoding, v []interface{}) {
 59954              m.vex2(1, hcode(v[1]), addr(v[0]), 0)
 59955              m.emit(0x10)
 59956              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59957          })
 59958      }
 59959      // VMOVUPD ymm, ymm
 59960      if isYMM(v0) && isYMM(v1) {
 59961          self.require(ISA_AVX)
 59962          p.domain = DomainAVX
 59963          p.add(0, func(m *_Encoding, v []interface{}) {
 59964              m.vex2(5, hcode(v[1]), v[0], 0)
 59965              m.emit(0x10)
 59966              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59967          })
 59968          p.add(0, func(m *_Encoding, v []interface{}) {
 59969              m.vex2(5, hcode(v[0]), v[1], 0)
 59970              m.emit(0x11)
 59971              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 59972          })
 59973      }
 59974      // VMOVUPD m256, ymm
 59975      if isM256(v0) && isYMM(v1) {
 59976          self.require(ISA_AVX)
 59977          p.domain = DomainAVX
 59978          p.add(0, func(m *_Encoding, v []interface{}) {
 59979              m.vex2(5, hcode(v[1]), addr(v[0]), 0)
 59980              m.emit(0x10)
 59981              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59982          })
 59983      }
 59984      // VMOVUPD xmm, m128
 59985      if isXMM(v0) && isM128(v1) {
 59986          self.require(ISA_AVX)
 59987          p.domain = DomainAVX
 59988          p.add(0, func(m *_Encoding, v []interface{}) {
 59989              m.vex2(1, hcode(v[0]), addr(v[1]), 0)
 59990              m.emit(0x11)
 59991              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 59992          })
 59993      }
 59994      // VMOVUPD ymm, m256
 59995      if isYMM(v0) && isM256(v1) {
 59996          self.require(ISA_AVX)
 59997          p.domain = DomainAVX
 59998          p.add(0, func(m *_Encoding, v []interface{}) {
 59999              m.vex2(5, hcode(v[0]), addr(v[1]), 0)
 60000              m.emit(0x11)
 60001              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 60002          })
 60003      }
 60004      // VMOVUPD zmm, m512{k}{z}
 60005      if isZMM(v0) && isM512kz(v1) {
 60006          self.require(ISA_AVX512F)
 60007          p.domain = DomainAVX
 60008          p.add(0, func(m *_Encoding, v []interface{}) {
 60009              m.evex(0b01, 0x85, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 60010              m.emit(0x11)
 60011              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 60012          })
 60013      }
 60014      // VMOVUPD zmm, zmm{k}{z}
 60015      if isZMM(v0) && isZMMkz(v1) {
 60016          self.require(ISA_AVX512F)
 60017          p.domain = DomainAVX
 60018          p.add(0, func(m *_Encoding, v []interface{}) {
 60019              m.emit(0x62)
 60020              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 60021              m.emit(0xfd)
 60022              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 60023              m.emit(0x10)
 60024              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 60025          })
 60026          p.add(0, func(m *_Encoding, v []interface{}) {
 60027              m.emit(0x62)
 60028              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 60029              m.emit(0xfd)
 60030              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 60031              m.emit(0x11)
 60032              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 60033          })
 60034      }
 60035      // VMOVUPD m512, zmm{k}{z}
 60036      if isM512(v0) && isZMMkz(v1) {
 60037          self.require(ISA_AVX512F)
 60038          p.domain = DomainAVX
 60039          p.add(0, func(m *_Encoding, v []interface{}) {
 60040              m.evex(0b01, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 60041              m.emit(0x10)
 60042              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 60043          })
 60044      }
 60045      // VMOVUPD xmm, m128{k}{z}
 60046      if isEVEXXMM(v0) && isM128kz(v1) {
 60047          self.require(ISA_AVX512VL | ISA_AVX512F)
 60048          p.domain = DomainAVX
 60049          p.add(0, func(m *_Encoding, v []interface{}) {
 60050              m.evex(0b01, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 60051              m.emit(0x11)
 60052              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 60053          })
 60054      }
 60055      // VMOVUPD xmm, xmm{k}{z}
 60056      if isEVEXXMM(v0) && isXMMkz(v1) {
 60057          self.require(ISA_AVX512VL | ISA_AVX512F)
 60058          p.domain = DomainAVX
 60059          p.add(0, func(m *_Encoding, v []interface{}) {
 60060              m.emit(0x62)
 60061              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 60062              m.emit(0xfd)
 60063              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 60064              m.emit(0x10)
 60065              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 60066          })
 60067          p.add(0, func(m *_Encoding, v []interface{}) {
 60068              m.emit(0x62)
 60069              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 60070              m.emit(0xfd)
 60071              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 60072              m.emit(0x11)
 60073              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 60074          })
 60075      }
 60076      // VMOVUPD ymm, m256{k}{z}
 60077      if isEVEXYMM(v0) && isM256kz(v1) {
 60078          self.require(ISA_AVX512VL | ISA_AVX512F)
 60079          p.domain = DomainAVX
 60080          p.add(0, func(m *_Encoding, v []interface{}) {
 60081              m.evex(0b01, 0x85, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 60082              m.emit(0x11)
 60083              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 60084          })
 60085      }
 60086      // VMOVUPD ymm, ymm{k}{z}
 60087      if isEVEXYMM(v0) && isYMMkz(v1) {
 60088          self.require(ISA_AVX512VL | ISA_AVX512F)
 60089          p.domain = DomainAVX
 60090          p.add(0, func(m *_Encoding, v []interface{}) {
 60091              m.emit(0x62)
 60092              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 60093              m.emit(0xfd)
 60094              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 60095              m.emit(0x10)
 60096              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 60097          })
 60098          p.add(0, func(m *_Encoding, v []interface{}) {
 60099              m.emit(0x62)
 60100              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 60101              m.emit(0xfd)
 60102              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 60103              m.emit(0x11)
 60104              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 60105          })
 60106      }
 60107      // VMOVUPD m128, xmm{k}{z}
 60108      if isM128(v0) && isXMMkz(v1) {
 60109          self.require(ISA_AVX512VL | ISA_AVX512F)
 60110          p.domain = DomainAVX
 60111          p.add(0, func(m *_Encoding, v []interface{}) {
 60112              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 60113              m.emit(0x10)
 60114              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 60115          })
 60116      }
 60117      // VMOVUPD m256, ymm{k}{z}
 60118      if isM256(v0) && isYMMkz(v1) {
 60119          self.require(ISA_AVX512VL | ISA_AVX512F)
 60120          p.domain = DomainAVX
 60121          p.add(0, func(m *_Encoding, v []interface{}) {
 60122              m.evex(0b01, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 60123              m.emit(0x10)
 60124              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 60125          })
 60126      }
 60127      if p.len == 0 {
 60128          panic("invalid operands for VMOVUPD")
 60129      }
 60130      return p
 60131  }
 60132  
 60133  // VMOVUPS performs "Move Unaligned Packed Single-Precision Floating-Point Values".
 60134  //
 60135  // Mnemonic        : VMOVUPS
 60136  // Supported forms : (15 forms)
 60137  //
 60138  //    * VMOVUPS xmm, xmm           [AVX]
 60139  //    * VMOVUPS m128, xmm          [AVX]
 60140  //    * VMOVUPS ymm, ymm           [AVX]
 60141  //    * VMOVUPS m256, ymm          [AVX]
 60142  //    * VMOVUPS xmm, m128          [AVX]
 60143  //    * VMOVUPS ymm, m256          [AVX]
 60144  //    * VMOVUPS zmm, m512{k}{z}    [AVX512F]
 60145  //    * VMOVUPS zmm, zmm{k}{z}     [AVX512F]
 60146  //    * VMOVUPS m512, zmm{k}{z}    [AVX512F]
 60147  //    * VMOVUPS xmm, m128{k}{z}    [AVX512F,AVX512VL]
 60148  //    * VMOVUPS xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 60149  //    * VMOVUPS ymm, m256{k}{z}    [AVX512F,AVX512VL]
 60150  //    * VMOVUPS ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 60151  //    * VMOVUPS m128, xmm{k}{z}    [AVX512F,AVX512VL]
 60152  //    * VMOVUPS m256, ymm{k}{z}    [AVX512F,AVX512VL]
 60153  //
 60154  func (self *Program) VMOVUPS(v0 interface{}, v1 interface{}) *Instruction {
 60155      p := self.alloc("VMOVUPS", 2, Operands { v0, v1 })
 60156      // VMOVUPS xmm, xmm
 60157      if isXMM(v0) && isXMM(v1) {
 60158          self.require(ISA_AVX)
 60159          p.domain = DomainAVX
 60160          p.add(0, func(m *_Encoding, v []interface{}) {
 60161              m.vex2(0, hcode(v[1]), v[0], 0)
 60162              m.emit(0x10)
 60163              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 60164          })
 60165          p.add(0, func(m *_Encoding, v []interface{}) {
 60166              m.vex2(0, hcode(v[0]), v[1], 0)
 60167              m.emit(0x11)
 60168              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 60169          })
 60170      }
 60171      // VMOVUPS m128, xmm
 60172      if isM128(v0) && isXMM(v1) {
 60173          self.require(ISA_AVX)
 60174          p.domain = DomainAVX
 60175          p.add(0, func(m *_Encoding, v []interface{}) {
 60176              m.vex2(0, hcode(v[1]), addr(v[0]), 0)
 60177              m.emit(0x10)
 60178              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 60179          })
 60180      }
 60181      // VMOVUPS ymm, ymm
 60182      if isYMM(v0) && isYMM(v1) {
 60183          self.require(ISA_AVX)
 60184          p.domain = DomainAVX
 60185          p.add(0, func(m *_Encoding, v []interface{}) {
 60186              m.vex2(4, hcode(v[1]), v[0], 0)
 60187              m.emit(0x10)
 60188              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 60189          })
 60190          p.add(0, func(m *_Encoding, v []interface{}) {
 60191              m.vex2(4, hcode(v[0]), v[1], 0)
 60192              m.emit(0x11)
 60193              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 60194          })
 60195      }
 60196      // VMOVUPS m256, ymm
 60197      if isM256(v0) && isYMM(v1) {
 60198          self.require(ISA_AVX)
 60199          p.domain = DomainAVX
 60200          p.add(0, func(m *_Encoding, v []interface{}) {
 60201              m.vex2(4, hcode(v[1]), addr(v[0]), 0)
 60202              m.emit(0x10)
 60203              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 60204          })
 60205      }
 60206      // VMOVUPS xmm, m128
 60207      if isXMM(v0) && isM128(v1) {
 60208          self.require(ISA_AVX)
 60209          p.domain = DomainAVX
 60210          p.add(0, func(m *_Encoding, v []interface{}) {
 60211              m.vex2(0, hcode(v[0]), addr(v[1]), 0)
 60212              m.emit(0x11)
 60213              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 60214          })
 60215      }
 60216      // VMOVUPS ymm, m256
 60217      if isYMM(v0) && isM256(v1) {
 60218          self.require(ISA_AVX)
 60219          p.domain = DomainAVX
 60220          p.add(0, func(m *_Encoding, v []interface{}) {
 60221              m.vex2(4, hcode(v[0]), addr(v[1]), 0)
 60222              m.emit(0x11)
 60223              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 60224          })
 60225      }
 60226      // VMOVUPS zmm, m512{k}{z}
 60227      if isZMM(v0) && isM512kz(v1) {
 60228          self.require(ISA_AVX512F)
 60229          p.domain = DomainAVX
 60230          p.add(0, func(m *_Encoding, v []interface{}) {
 60231              m.evex(0b01, 0x04, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 60232              m.emit(0x11)
 60233              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 60234          })
 60235      }
 60236      // VMOVUPS zmm, zmm{k}{z}
 60237      if isZMM(v0) && isZMMkz(v1) {
 60238          self.require(ISA_AVX512F)
 60239          p.domain = DomainAVX
 60240          p.add(0, func(m *_Encoding, v []interface{}) {
 60241              m.emit(0x62)
 60242              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 60243              m.emit(0x7c)
 60244              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 60245              m.emit(0x10)
 60246              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 60247          })
 60248          p.add(0, func(m *_Encoding, v []interface{}) {
 60249              m.emit(0x62)
 60250              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 60251              m.emit(0x7c)
 60252              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 60253              m.emit(0x11)
 60254              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 60255          })
 60256      }
 60257      // VMOVUPS m512, zmm{k}{z}
 60258      if isM512(v0) && isZMMkz(v1) {
 60259          self.require(ISA_AVX512F)
 60260          p.domain = DomainAVX
 60261          p.add(0, func(m *_Encoding, v []interface{}) {
 60262              m.evex(0b01, 0x04, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 60263              m.emit(0x10)
 60264              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 60265          })
 60266      }
 60267      // VMOVUPS xmm, m128{k}{z}
 60268      if isEVEXXMM(v0) && isM128kz(v1) {
 60269          self.require(ISA_AVX512VL | ISA_AVX512F)
 60270          p.domain = DomainAVX
 60271          p.add(0, func(m *_Encoding, v []interface{}) {
 60272              m.evex(0b01, 0x04, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 60273              m.emit(0x11)
 60274              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 60275          })
 60276      }
 60277      // VMOVUPS xmm, xmm{k}{z}
 60278      if isEVEXXMM(v0) && isXMMkz(v1) {
 60279          self.require(ISA_AVX512VL | ISA_AVX512F)
 60280          p.domain = DomainAVX
 60281          p.add(0, func(m *_Encoding, v []interface{}) {
 60282              m.emit(0x62)
 60283              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 60284              m.emit(0x7c)
 60285              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 60286              m.emit(0x10)
 60287              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 60288          })
 60289          p.add(0, func(m *_Encoding, v []interface{}) {
 60290              m.emit(0x62)
 60291              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 60292              m.emit(0x7c)
 60293              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 60294              m.emit(0x11)
 60295              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 60296          })
 60297      }
 60298      // VMOVUPS ymm, m256{k}{z}
 60299      if isEVEXYMM(v0) && isM256kz(v1) {
 60300          self.require(ISA_AVX512VL | ISA_AVX512F)
 60301          p.domain = DomainAVX
 60302          p.add(0, func(m *_Encoding, v []interface{}) {
 60303              m.evex(0b01, 0x04, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 60304              m.emit(0x11)
 60305              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 60306          })
 60307      }
 60308      // VMOVUPS ymm, ymm{k}{z}
 60309      if isEVEXYMM(v0) && isYMMkz(v1) {
 60310          self.require(ISA_AVX512VL | ISA_AVX512F)
 60311          p.domain = DomainAVX
 60312          p.add(0, func(m *_Encoding, v []interface{}) {
 60313              m.emit(0x62)
 60314              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 60315              m.emit(0x7c)
 60316              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 60317              m.emit(0x10)
 60318              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 60319          })
 60320          p.add(0, func(m *_Encoding, v []interface{}) {
 60321              m.emit(0x62)
 60322              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 60323              m.emit(0x7c)
 60324              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 60325              m.emit(0x11)
 60326              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 60327          })
 60328      }
 60329      // VMOVUPS m128, xmm{k}{z}
 60330      if isM128(v0) && isXMMkz(v1) {
 60331          self.require(ISA_AVX512VL | ISA_AVX512F)
 60332          p.domain = DomainAVX
 60333          p.add(0, func(m *_Encoding, v []interface{}) {
 60334              m.evex(0b01, 0x04, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 60335              m.emit(0x10)
 60336              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 60337          })
 60338      }
 60339      // VMOVUPS m256, ymm{k}{z}
 60340      if isM256(v0) && isYMMkz(v1) {
 60341          self.require(ISA_AVX512VL | ISA_AVX512F)
 60342          p.domain = DomainAVX
 60343          p.add(0, func(m *_Encoding, v []interface{}) {
 60344              m.evex(0b01, 0x04, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 60345              m.emit(0x10)
 60346              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 60347          })
 60348      }
 60349      if p.len == 0 {
 60350          panic("invalid operands for VMOVUPS")
 60351      }
 60352      return p
 60353  }
 60354  
 60355  // VMPSADBW performs "Compute Multiple Packed Sums of Absolute Difference".
 60356  //
 60357  // Mnemonic        : VMPSADBW
 60358  // Supported forms : (4 forms)
 60359  //
 60360  //    * VMPSADBW imm8, xmm, xmm, xmm     [AVX]
 60361  //    * VMPSADBW imm8, m128, xmm, xmm    [AVX]
 60362  //    * VMPSADBW imm8, ymm, ymm, ymm     [AVX2]
 60363  //    * VMPSADBW imm8, m256, ymm, ymm    [AVX2]
 60364  //
 60365  func (self *Program) VMPSADBW(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 60366      p := self.alloc("VMPSADBW", 4, Operands { v0, v1, v2, v3 })
 60367      // VMPSADBW imm8, xmm, xmm, xmm
 60368      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 60369          self.require(ISA_AVX)
 60370          p.domain = DomainAVX
 60371          p.add(0, func(m *_Encoding, v []interface{}) {
 60372              m.emit(0xc4)
 60373              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 60374              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 60375              m.emit(0x42)
 60376              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 60377              m.imm1(toImmAny(v[0]))
 60378          })
 60379      }
 60380      // VMPSADBW imm8, m128, xmm, xmm
 60381      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 60382          self.require(ISA_AVX)
 60383          p.domain = DomainAVX
 60384          p.add(0, func(m *_Encoding, v []interface{}) {
 60385              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 60386              m.emit(0x42)
 60387              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 60388              m.imm1(toImmAny(v[0]))
 60389          })
 60390      }
 60391      // VMPSADBW imm8, ymm, ymm, ymm
 60392      if isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 60393          self.require(ISA_AVX2)
 60394          p.domain = DomainAVX
 60395          p.add(0, func(m *_Encoding, v []interface{}) {
 60396              m.emit(0xc4)
 60397              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 60398              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 60399              m.emit(0x42)
 60400              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 60401              m.imm1(toImmAny(v[0]))
 60402          })
 60403      }
 60404      // VMPSADBW imm8, m256, ymm, ymm
 60405      if isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 60406          self.require(ISA_AVX2)
 60407          p.domain = DomainAVX
 60408          p.add(0, func(m *_Encoding, v []interface{}) {
 60409              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 60410              m.emit(0x42)
 60411              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 60412              m.imm1(toImmAny(v[0]))
 60413          })
 60414      }
 60415      if p.len == 0 {
 60416          panic("invalid operands for VMPSADBW")
 60417      }
 60418      return p
 60419  }
 60420  
 60421  // VMULPD performs "Multiply Packed Double-Precision Floating-Point Values".
 60422  //
 60423  // Mnemonic        : VMULPD
 60424  // Supported forms : (11 forms)
 60425  //
 60426  //    * VMULPD xmm, xmm, xmm                   [AVX]
 60427  //    * VMULPD m128, xmm, xmm                  [AVX]
 60428  //    * VMULPD ymm, ymm, ymm                   [AVX]
 60429  //    * VMULPD m256, ymm, ymm                  [AVX]
 60430  //    * VMULPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 60431  //    * VMULPD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 60432  //    * VMULPD zmm, zmm, zmm{k}{z}             [AVX512F]
 60433  //    * VMULPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 60434  //    * VMULPD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 60435  //    * VMULPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 60436  //    * VMULPD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 60437  //
 60438  func (self *Program) VMULPD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 60439      var p *Instruction
 60440      switch len(vv) {
 60441          case 0  : p = self.alloc("VMULPD", 3, Operands { v0, v1, v2 })
 60442          case 1  : p = self.alloc("VMULPD", 4, Operands { v0, v1, v2, vv[0] })
 60443          default : panic("instruction VMULPD takes 3 or 4 operands")
 60444      }
 60445      // VMULPD xmm, xmm, xmm
 60446      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 60447          self.require(ISA_AVX)
 60448          p.domain = DomainAVX
 60449          p.add(0, func(m *_Encoding, v []interface{}) {
 60450              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 60451              m.emit(0x59)
 60452              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60453          })
 60454      }
 60455      // VMULPD m128, xmm, xmm
 60456      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 60457          self.require(ISA_AVX)
 60458          p.domain = DomainAVX
 60459          p.add(0, func(m *_Encoding, v []interface{}) {
 60460              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 60461              m.emit(0x59)
 60462              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 60463          })
 60464      }
 60465      // VMULPD ymm, ymm, ymm
 60466      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 60467          self.require(ISA_AVX)
 60468          p.domain = DomainAVX
 60469          p.add(0, func(m *_Encoding, v []interface{}) {
 60470              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 60471              m.emit(0x59)
 60472              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60473          })
 60474      }
 60475      // VMULPD m256, ymm, ymm
 60476      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 60477          self.require(ISA_AVX)
 60478          p.domain = DomainAVX
 60479          p.add(0, func(m *_Encoding, v []interface{}) {
 60480              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 60481              m.emit(0x59)
 60482              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 60483          })
 60484      }
 60485      // VMULPD m512/m64bcst, zmm, zmm{k}{z}
 60486      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 60487          self.require(ISA_AVX512F)
 60488          p.domain = DomainAVX
 60489          p.add(0, func(m *_Encoding, v []interface{}) {
 60490              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 60491              m.emit(0x59)
 60492              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 60493          })
 60494      }
 60495      // VMULPD {er}, zmm, zmm, zmm{k}{z}
 60496      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 60497          self.require(ISA_AVX512F)
 60498          p.domain = DomainAVX
 60499          p.add(0, func(m *_Encoding, v []interface{}) {
 60500              m.emit(0x62)
 60501              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 60502              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 60503              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 60504              m.emit(0x59)
 60505              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 60506          })
 60507      }
 60508      // VMULPD zmm, zmm, zmm{k}{z}
 60509      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 60510          self.require(ISA_AVX512F)
 60511          p.domain = DomainAVX
 60512          p.add(0, func(m *_Encoding, v []interface{}) {
 60513              m.emit(0x62)
 60514              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 60515              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 60516              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 60517              m.emit(0x59)
 60518              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60519          })
 60520      }
 60521      // VMULPD m128/m64bcst, xmm, xmm{k}{z}
 60522      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 60523          self.require(ISA_AVX512VL | ISA_AVX512F)
 60524          p.domain = DomainAVX
 60525          p.add(0, func(m *_Encoding, v []interface{}) {
 60526              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 60527              m.emit(0x59)
 60528              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 60529          })
 60530      }
 60531      // VMULPD xmm, xmm, xmm{k}{z}
 60532      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 60533          self.require(ISA_AVX512VL | ISA_AVX512F)
 60534          p.domain = DomainAVX
 60535          p.add(0, func(m *_Encoding, v []interface{}) {
 60536              m.emit(0x62)
 60537              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 60538              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 60539              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 60540              m.emit(0x59)
 60541              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60542          })
 60543      }
 60544      // VMULPD m256/m64bcst, ymm, ymm{k}{z}
 60545      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 60546          self.require(ISA_AVX512VL | ISA_AVX512F)
 60547          p.domain = DomainAVX
 60548          p.add(0, func(m *_Encoding, v []interface{}) {
 60549              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 60550              m.emit(0x59)
 60551              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 60552          })
 60553      }
 60554      // VMULPD ymm, ymm, ymm{k}{z}
 60555      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 60556          self.require(ISA_AVX512VL | ISA_AVX512F)
 60557          p.domain = DomainAVX
 60558          p.add(0, func(m *_Encoding, v []interface{}) {
 60559              m.emit(0x62)
 60560              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 60561              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 60562              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 60563              m.emit(0x59)
 60564              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60565          })
 60566      }
 60567      if p.len == 0 {
 60568          panic("invalid operands for VMULPD")
 60569      }
 60570      return p
 60571  }
 60572  
 60573  // VMULPS performs "Multiply Packed Single-Precision Floating-Point Values".
 60574  //
 60575  // Mnemonic        : VMULPS
 60576  // Supported forms : (11 forms)
 60577  //
 60578  //    * VMULPS xmm, xmm, xmm                   [AVX]
 60579  //    * VMULPS m128, xmm, xmm                  [AVX]
 60580  //    * VMULPS ymm, ymm, ymm                   [AVX]
 60581  //    * VMULPS m256, ymm, ymm                  [AVX]
 60582  //    * VMULPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 60583  //    * VMULPS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 60584  //    * VMULPS zmm, zmm, zmm{k}{z}             [AVX512F]
 60585  //    * VMULPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 60586  //    * VMULPS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 60587  //    * VMULPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 60588  //    * VMULPS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 60589  //
 60590  func (self *Program) VMULPS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 60591      var p *Instruction
 60592      switch len(vv) {
 60593          case 0  : p = self.alloc("VMULPS", 3, Operands { v0, v1, v2 })
 60594          case 1  : p = self.alloc("VMULPS", 4, Operands { v0, v1, v2, vv[0] })
 60595          default : panic("instruction VMULPS takes 3 or 4 operands")
 60596      }
 60597      // VMULPS xmm, xmm, xmm
 60598      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 60599          self.require(ISA_AVX)
 60600          p.domain = DomainAVX
 60601          p.add(0, func(m *_Encoding, v []interface{}) {
 60602              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 60603              m.emit(0x59)
 60604              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60605          })
 60606      }
 60607      // VMULPS m128, xmm, xmm
 60608      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 60609          self.require(ISA_AVX)
 60610          p.domain = DomainAVX
 60611          p.add(0, func(m *_Encoding, v []interface{}) {
 60612              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 60613              m.emit(0x59)
 60614              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 60615          })
 60616      }
 60617      // VMULPS ymm, ymm, ymm
 60618      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 60619          self.require(ISA_AVX)
 60620          p.domain = DomainAVX
 60621          p.add(0, func(m *_Encoding, v []interface{}) {
 60622              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 60623              m.emit(0x59)
 60624              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60625          })
 60626      }
 60627      // VMULPS m256, ymm, ymm
 60628      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 60629          self.require(ISA_AVX)
 60630          p.domain = DomainAVX
 60631          p.add(0, func(m *_Encoding, v []interface{}) {
 60632              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 60633              m.emit(0x59)
 60634              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 60635          })
 60636      }
 60637      // VMULPS m512/m32bcst, zmm, zmm{k}{z}
 60638      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 60639          self.require(ISA_AVX512F)
 60640          p.domain = DomainAVX
 60641          p.add(0, func(m *_Encoding, v []interface{}) {
 60642              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 60643              m.emit(0x59)
 60644              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 60645          })
 60646      }
 60647      // VMULPS {er}, zmm, zmm, zmm{k}{z}
 60648      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 60649          self.require(ISA_AVX512F)
 60650          p.domain = DomainAVX
 60651          p.add(0, func(m *_Encoding, v []interface{}) {
 60652              m.emit(0x62)
 60653              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 60654              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 60655              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 60656              m.emit(0x59)
 60657              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 60658          })
 60659      }
 60660      // VMULPS zmm, zmm, zmm{k}{z}
 60661      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 60662          self.require(ISA_AVX512F)
 60663          p.domain = DomainAVX
 60664          p.add(0, func(m *_Encoding, v []interface{}) {
 60665              m.emit(0x62)
 60666              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 60667              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 60668              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 60669              m.emit(0x59)
 60670              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60671          })
 60672      }
 60673      // VMULPS m128/m32bcst, xmm, xmm{k}{z}
 60674      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 60675          self.require(ISA_AVX512VL | ISA_AVX512F)
 60676          p.domain = DomainAVX
 60677          p.add(0, func(m *_Encoding, v []interface{}) {
 60678              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 60679              m.emit(0x59)
 60680              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 60681          })
 60682      }
 60683      // VMULPS xmm, xmm, xmm{k}{z}
 60684      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 60685          self.require(ISA_AVX512VL | ISA_AVX512F)
 60686          p.domain = DomainAVX
 60687          p.add(0, func(m *_Encoding, v []interface{}) {
 60688              m.emit(0x62)
 60689              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 60690              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 60691              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 60692              m.emit(0x59)
 60693              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60694          })
 60695      }
 60696      // VMULPS m256/m32bcst, ymm, ymm{k}{z}
 60697      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 60698          self.require(ISA_AVX512VL | ISA_AVX512F)
 60699          p.domain = DomainAVX
 60700          p.add(0, func(m *_Encoding, v []interface{}) {
 60701              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 60702              m.emit(0x59)
 60703              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 60704          })
 60705      }
 60706      // VMULPS ymm, ymm, ymm{k}{z}
 60707      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 60708          self.require(ISA_AVX512VL | ISA_AVX512F)
 60709          p.domain = DomainAVX
 60710          p.add(0, func(m *_Encoding, v []interface{}) {
 60711              m.emit(0x62)
 60712              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 60713              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 60714              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 60715              m.emit(0x59)
 60716              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60717          })
 60718      }
 60719      if p.len == 0 {
 60720          panic("invalid operands for VMULPS")
 60721      }
 60722      return p
 60723  }
 60724  
 60725  // VMULSD performs "Multiply Scalar Double-Precision Floating-Point Values".
 60726  //
 60727  // Mnemonic        : VMULSD
 60728  // Supported forms : (5 forms)
 60729  //
 60730  //    * VMULSD xmm, xmm, xmm                [AVX]
 60731  //    * VMULSD m64, xmm, xmm                [AVX]
 60732  //    * VMULSD m64, xmm, xmm{k}{z}          [AVX512F]
 60733  //    * VMULSD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 60734  //    * VMULSD xmm, xmm, xmm{k}{z}          [AVX512F]
 60735  //
 60736  func (self *Program) VMULSD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 60737      var p *Instruction
 60738      switch len(vv) {
 60739          case 0  : p = self.alloc("VMULSD", 3, Operands { v0, v1, v2 })
 60740          case 1  : p = self.alloc("VMULSD", 4, Operands { v0, v1, v2, vv[0] })
 60741          default : panic("instruction VMULSD takes 3 or 4 operands")
 60742      }
 60743      // VMULSD xmm, xmm, xmm
 60744      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 60745          self.require(ISA_AVX)
 60746          p.domain = DomainAVX
 60747          p.add(0, func(m *_Encoding, v []interface{}) {
 60748              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 60749              m.emit(0x59)
 60750              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60751          })
 60752      }
 60753      // VMULSD m64, xmm, xmm
 60754      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 60755          self.require(ISA_AVX)
 60756          p.domain = DomainAVX
 60757          p.add(0, func(m *_Encoding, v []interface{}) {
 60758              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 60759              m.emit(0x59)
 60760              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 60761          })
 60762      }
 60763      // VMULSD m64, xmm, xmm{k}{z}
 60764      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 60765          self.require(ISA_AVX512F)
 60766          p.domain = DomainAVX
 60767          p.add(0, func(m *_Encoding, v []interface{}) {
 60768              m.evex(0b01, 0x87, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 60769              m.emit(0x59)
 60770              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 60771          })
 60772      }
 60773      // VMULSD {er}, xmm, xmm, xmm{k}{z}
 60774      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 60775          self.require(ISA_AVX512F)
 60776          p.domain = DomainAVX
 60777          p.add(0, func(m *_Encoding, v []interface{}) {
 60778              m.emit(0x62)
 60779              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 60780              m.emit(0xff ^ (hlcode(v[2]) << 3))
 60781              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 60782              m.emit(0x59)
 60783              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 60784          })
 60785      }
 60786      // VMULSD xmm, xmm, xmm{k}{z}
 60787      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 60788          self.require(ISA_AVX512F)
 60789          p.domain = DomainAVX
 60790          p.add(0, func(m *_Encoding, v []interface{}) {
 60791              m.emit(0x62)
 60792              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 60793              m.emit(0xff ^ (hlcode(v[1]) << 3))
 60794              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 60795              m.emit(0x59)
 60796              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60797          })
 60798      }
 60799      if p.len == 0 {
 60800          panic("invalid operands for VMULSD")
 60801      }
 60802      return p
 60803  }
 60804  
 60805  // VMULSS performs "Multiply Scalar Single-Precision Floating-Point Values".
 60806  //
 60807  // Mnemonic        : VMULSS
 60808  // Supported forms : (5 forms)
 60809  //
 60810  //    * VMULSS xmm, xmm, xmm                [AVX]
 60811  //    * VMULSS m32, xmm, xmm                [AVX]
 60812  //    * VMULSS m32, xmm, xmm{k}{z}          [AVX512F]
 60813  //    * VMULSS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 60814  //    * VMULSS xmm, xmm, xmm{k}{z}          [AVX512F]
 60815  //
 60816  func (self *Program) VMULSS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 60817      var p *Instruction
 60818      switch len(vv) {
 60819          case 0  : p = self.alloc("VMULSS", 3, Operands { v0, v1, v2 })
 60820          case 1  : p = self.alloc("VMULSS", 4, Operands { v0, v1, v2, vv[0] })
 60821          default : panic("instruction VMULSS takes 3 or 4 operands")
 60822      }
 60823      // VMULSS xmm, xmm, xmm
 60824      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 60825          self.require(ISA_AVX)
 60826          p.domain = DomainAVX
 60827          p.add(0, func(m *_Encoding, v []interface{}) {
 60828              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 60829              m.emit(0x59)
 60830              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60831          })
 60832      }
 60833      // VMULSS m32, xmm, xmm
 60834      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 60835          self.require(ISA_AVX)
 60836          p.domain = DomainAVX
 60837          p.add(0, func(m *_Encoding, v []interface{}) {
 60838              m.vex2(2, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 60839              m.emit(0x59)
 60840              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 60841          })
 60842      }
 60843      // VMULSS m32, xmm, xmm{k}{z}
 60844      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 60845          self.require(ISA_AVX512F)
 60846          p.domain = DomainAVX
 60847          p.add(0, func(m *_Encoding, v []interface{}) {
 60848              m.evex(0b01, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 60849              m.emit(0x59)
 60850              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 60851          })
 60852      }
 60853      // VMULSS {er}, xmm, xmm, xmm{k}{z}
 60854      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 60855          self.require(ISA_AVX512F)
 60856          p.domain = DomainAVX
 60857          p.add(0, func(m *_Encoding, v []interface{}) {
 60858              m.emit(0x62)
 60859              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 60860              m.emit(0x7e ^ (hlcode(v[2]) << 3))
 60861              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 60862              m.emit(0x59)
 60863              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 60864          })
 60865      }
 60866      // VMULSS xmm, xmm, xmm{k}{z}
 60867      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 60868          self.require(ISA_AVX512F)
 60869          p.domain = DomainAVX
 60870          p.add(0, func(m *_Encoding, v []interface{}) {
 60871              m.emit(0x62)
 60872              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 60873              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 60874              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 60875              m.emit(0x59)
 60876              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60877          })
 60878      }
 60879      if p.len == 0 {
 60880          panic("invalid operands for VMULSS")
 60881      }
 60882      return p
 60883  }
 60884  
 60885  // VORPD performs "Bitwise Logical OR of Double-Precision Floating-Point Values".
 60886  //
 60887  // Mnemonic        : VORPD
 60888  // Supported forms : (10 forms)
 60889  //
 60890  //    * VORPD xmm, xmm, xmm                   [AVX]
 60891  //    * VORPD m128, xmm, xmm                  [AVX]
 60892  //    * VORPD ymm, ymm, ymm                   [AVX]
 60893  //    * VORPD m256, ymm, ymm                  [AVX]
 60894  //    * VORPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512DQ]
 60895  //    * VORPD zmm, zmm, zmm{k}{z}             [AVX512DQ]
 60896  //    * VORPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 60897  //    * VORPD xmm, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 60898  //    * VORPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 60899  //    * VORPD ymm, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 60900  //
 60901  func (self *Program) VORPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 60902      p := self.alloc("VORPD", 3, Operands { v0, v1, v2 })
 60903      // VORPD xmm, xmm, xmm
 60904      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 60905          self.require(ISA_AVX)
 60906          p.domain = DomainAVX
 60907          p.add(0, func(m *_Encoding, v []interface{}) {
 60908              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 60909              m.emit(0x56)
 60910              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60911          })
 60912      }
 60913      // VORPD m128, xmm, xmm
 60914      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 60915          self.require(ISA_AVX)
 60916          p.domain = DomainAVX
 60917          p.add(0, func(m *_Encoding, v []interface{}) {
 60918              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 60919              m.emit(0x56)
 60920              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 60921          })
 60922      }
 60923      // VORPD ymm, ymm, ymm
 60924      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 60925          self.require(ISA_AVX)
 60926          p.domain = DomainAVX
 60927          p.add(0, func(m *_Encoding, v []interface{}) {
 60928              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 60929              m.emit(0x56)
 60930              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60931          })
 60932      }
 60933      // VORPD m256, ymm, ymm
 60934      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 60935          self.require(ISA_AVX)
 60936          p.domain = DomainAVX
 60937          p.add(0, func(m *_Encoding, v []interface{}) {
 60938              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 60939              m.emit(0x56)
 60940              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 60941          })
 60942      }
 60943      // VORPD m512/m64bcst, zmm, zmm{k}{z}
 60944      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 60945          self.require(ISA_AVX512DQ)
 60946          p.domain = DomainAVX
 60947          p.add(0, func(m *_Encoding, v []interface{}) {
 60948              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 60949              m.emit(0x56)
 60950              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 60951          })
 60952      }
 60953      // VORPD zmm, zmm, zmm{k}{z}
 60954      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 60955          self.require(ISA_AVX512DQ)
 60956          p.domain = DomainAVX
 60957          p.add(0, func(m *_Encoding, v []interface{}) {
 60958              m.emit(0x62)
 60959              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 60960              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 60961              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 60962              m.emit(0x56)
 60963              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60964          })
 60965      }
 60966      // VORPD m128/m64bcst, xmm, xmm{k}{z}
 60967      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 60968          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 60969          p.domain = DomainAVX
 60970          p.add(0, func(m *_Encoding, v []interface{}) {
 60971              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 60972              m.emit(0x56)
 60973              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 60974          })
 60975      }
 60976      // VORPD xmm, xmm, xmm{k}{z}
 60977      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 60978          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 60979          p.domain = DomainAVX
 60980          p.add(0, func(m *_Encoding, v []interface{}) {
 60981              m.emit(0x62)
 60982              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 60983              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 60984              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 60985              m.emit(0x56)
 60986              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60987          })
 60988      }
 60989      // VORPD m256/m64bcst, ymm, ymm{k}{z}
 60990      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 60991          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 60992          p.domain = DomainAVX
 60993          p.add(0, func(m *_Encoding, v []interface{}) {
 60994              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 60995              m.emit(0x56)
 60996              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 60997          })
 60998      }
 60999      // VORPD ymm, ymm, ymm{k}{z}
 61000      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 61001          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 61002          p.domain = DomainAVX
 61003          p.add(0, func(m *_Encoding, v []interface{}) {
 61004              m.emit(0x62)
 61005              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 61006              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 61007              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 61008              m.emit(0x56)
 61009              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61010          })
 61011      }
 61012      if p.len == 0 {
 61013          panic("invalid operands for VORPD")
 61014      }
 61015      return p
 61016  }
 61017  
 61018  // VORPS performs "Bitwise Logical OR of Single-Precision Floating-Point Values".
 61019  //
 61020  // Mnemonic        : VORPS
 61021  // Supported forms : (10 forms)
 61022  //
 61023  //    * VORPS xmm, xmm, xmm                   [AVX]
 61024  //    * VORPS m128, xmm, xmm                  [AVX]
 61025  //    * VORPS ymm, ymm, ymm                   [AVX]
 61026  //    * VORPS m256, ymm, ymm                  [AVX]
 61027  //    * VORPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512DQ]
 61028  //    * VORPS zmm, zmm, zmm{k}{z}             [AVX512DQ]
 61029  //    * VORPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 61030  //    * VORPS xmm, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 61031  //    * VORPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 61032  //    * VORPS ymm, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 61033  //
 61034  func (self *Program) VORPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 61035      p := self.alloc("VORPS", 3, Operands { v0, v1, v2 })
 61036      // VORPS xmm, xmm, xmm
 61037      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 61038          self.require(ISA_AVX)
 61039          p.domain = DomainAVX
 61040          p.add(0, func(m *_Encoding, v []interface{}) {
 61041              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 61042              m.emit(0x56)
 61043              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61044          })
 61045      }
 61046      // VORPS m128, xmm, xmm
 61047      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 61048          self.require(ISA_AVX)
 61049          p.domain = DomainAVX
 61050          p.add(0, func(m *_Encoding, v []interface{}) {
 61051              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 61052              m.emit(0x56)
 61053              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 61054          })
 61055      }
 61056      // VORPS ymm, ymm, ymm
 61057      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 61058          self.require(ISA_AVX)
 61059          p.domain = DomainAVX
 61060          p.add(0, func(m *_Encoding, v []interface{}) {
 61061              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 61062              m.emit(0x56)
 61063              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61064          })
 61065      }
 61066      // VORPS m256, ymm, ymm
 61067      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 61068          self.require(ISA_AVX)
 61069          p.domain = DomainAVX
 61070          p.add(0, func(m *_Encoding, v []interface{}) {
 61071              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 61072              m.emit(0x56)
 61073              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 61074          })
 61075      }
 61076      // VORPS m512/m32bcst, zmm, zmm{k}{z}
 61077      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 61078          self.require(ISA_AVX512DQ)
 61079          p.domain = DomainAVX
 61080          p.add(0, func(m *_Encoding, v []interface{}) {
 61081              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 61082              m.emit(0x56)
 61083              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 61084          })
 61085      }
 61086      // VORPS zmm, zmm, zmm{k}{z}
 61087      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 61088          self.require(ISA_AVX512DQ)
 61089          p.domain = DomainAVX
 61090          p.add(0, func(m *_Encoding, v []interface{}) {
 61091              m.emit(0x62)
 61092              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 61093              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 61094              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 61095              m.emit(0x56)
 61096              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61097          })
 61098      }
 61099      // VORPS m128/m32bcst, xmm, xmm{k}{z}
 61100      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 61101          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 61102          p.domain = DomainAVX
 61103          p.add(0, func(m *_Encoding, v []interface{}) {
 61104              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 61105              m.emit(0x56)
 61106              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 61107          })
 61108      }
 61109      // VORPS xmm, xmm, xmm{k}{z}
 61110      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 61111          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 61112          p.domain = DomainAVX
 61113          p.add(0, func(m *_Encoding, v []interface{}) {
 61114              m.emit(0x62)
 61115              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 61116              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 61117              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 61118              m.emit(0x56)
 61119              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61120          })
 61121      }
 61122      // VORPS m256/m32bcst, ymm, ymm{k}{z}
 61123      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 61124          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 61125          p.domain = DomainAVX
 61126          p.add(0, func(m *_Encoding, v []interface{}) {
 61127              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 61128              m.emit(0x56)
 61129              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 61130          })
 61131      }
 61132      // VORPS ymm, ymm, ymm{k}{z}
 61133      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 61134          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 61135          p.domain = DomainAVX
 61136          p.add(0, func(m *_Encoding, v []interface{}) {
 61137              m.emit(0x62)
 61138              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 61139              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 61140              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 61141              m.emit(0x56)
 61142              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61143          })
 61144      }
 61145      if p.len == 0 {
 61146          panic("invalid operands for VORPS")
 61147      }
 61148      return p
 61149  }
 61150  
 61151  // VPABSB performs "Packed Absolute Value of Byte Integers".
 61152  //
 61153  // Mnemonic        : VPABSB
 61154  // Supported forms : (10 forms)
 61155  //
 61156  //    * VPABSB xmm, xmm           [AVX]
 61157  //    * VPABSB m128, xmm          [AVX]
 61158  //    * VPABSB ymm, ymm           [AVX2]
 61159  //    * VPABSB m256, ymm          [AVX2]
 61160  //    * VPABSB zmm, zmm{k}{z}     [AVX512BW]
 61161  //    * VPABSB m512, zmm{k}{z}    [AVX512BW]
 61162  //    * VPABSB xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 61163  //    * VPABSB ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 61164  //    * VPABSB m128, xmm{k}{z}    [AVX512BW,AVX512VL]
 61165  //    * VPABSB m256, ymm{k}{z}    [AVX512BW,AVX512VL]
 61166  //
 61167  func (self *Program) VPABSB(v0 interface{}, v1 interface{}) *Instruction {
 61168      p := self.alloc("VPABSB", 2, Operands { v0, v1 })
 61169      // VPABSB xmm, xmm
 61170      if isXMM(v0) && isXMM(v1) {
 61171          self.require(ISA_AVX)
 61172          p.domain = DomainAVX
 61173          p.add(0, func(m *_Encoding, v []interface{}) {
 61174              m.emit(0xc4)
 61175              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 61176              m.emit(0x79)
 61177              m.emit(0x1c)
 61178              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61179          })
 61180      }
 61181      // VPABSB m128, xmm
 61182      if isM128(v0) && isXMM(v1) {
 61183          self.require(ISA_AVX)
 61184          p.domain = DomainAVX
 61185          p.add(0, func(m *_Encoding, v []interface{}) {
 61186              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 61187              m.emit(0x1c)
 61188              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 61189          })
 61190      }
 61191      // VPABSB ymm, ymm
 61192      if isYMM(v0) && isYMM(v1) {
 61193          self.require(ISA_AVX2)
 61194          p.domain = DomainAVX
 61195          p.add(0, func(m *_Encoding, v []interface{}) {
 61196              m.emit(0xc4)
 61197              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 61198              m.emit(0x7d)
 61199              m.emit(0x1c)
 61200              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61201          })
 61202      }
 61203      // VPABSB m256, ymm
 61204      if isM256(v0) && isYMM(v1) {
 61205          self.require(ISA_AVX2)
 61206          p.domain = DomainAVX
 61207          p.add(0, func(m *_Encoding, v []interface{}) {
 61208              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 61209              m.emit(0x1c)
 61210              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 61211          })
 61212      }
 61213      // VPABSB zmm, zmm{k}{z}
 61214      if isZMM(v0) && isZMMkz(v1) {
 61215          self.require(ISA_AVX512BW)
 61216          p.domain = DomainAVX
 61217          p.add(0, func(m *_Encoding, v []interface{}) {
 61218              m.emit(0x62)
 61219              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61220              m.emit(0x7d)
 61221              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 61222              m.emit(0x1c)
 61223              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61224          })
 61225      }
 61226      // VPABSB m512, zmm{k}{z}
 61227      if isM512(v0) && isZMMkz(v1) {
 61228          self.require(ISA_AVX512BW)
 61229          p.domain = DomainAVX
 61230          p.add(0, func(m *_Encoding, v []interface{}) {
 61231              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 61232              m.emit(0x1c)
 61233              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 61234          })
 61235      }
 61236      // VPABSB xmm, xmm{k}{z}
 61237      if isEVEXXMM(v0) && isXMMkz(v1) {
 61238          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61239          p.domain = DomainAVX
 61240          p.add(0, func(m *_Encoding, v []interface{}) {
 61241              m.emit(0x62)
 61242              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61243              m.emit(0x7d)
 61244              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 61245              m.emit(0x1c)
 61246              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61247          })
 61248      }
 61249      // VPABSB ymm, ymm{k}{z}
 61250      if isEVEXYMM(v0) && isYMMkz(v1) {
 61251          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61252          p.domain = DomainAVX
 61253          p.add(0, func(m *_Encoding, v []interface{}) {
 61254              m.emit(0x62)
 61255              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61256              m.emit(0x7d)
 61257              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 61258              m.emit(0x1c)
 61259              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61260          })
 61261      }
 61262      // VPABSB m128, xmm{k}{z}
 61263      if isM128(v0) && isXMMkz(v1) {
 61264          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61265          p.domain = DomainAVX
 61266          p.add(0, func(m *_Encoding, v []interface{}) {
 61267              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 61268              m.emit(0x1c)
 61269              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 61270          })
 61271      }
 61272      // VPABSB m256, ymm{k}{z}
 61273      if isM256(v0) && isYMMkz(v1) {
 61274          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61275          p.domain = DomainAVX
 61276          p.add(0, func(m *_Encoding, v []interface{}) {
 61277              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 61278              m.emit(0x1c)
 61279              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 61280          })
 61281      }
 61282      if p.len == 0 {
 61283          panic("invalid operands for VPABSB")
 61284      }
 61285      return p
 61286  }
 61287  
 61288  // VPABSD performs "Packed Absolute Value of Doubleword Integers".
 61289  //
 61290  // Mnemonic        : VPABSD
 61291  // Supported forms : (10 forms)
 61292  //
 61293  //    * VPABSD xmm, xmm                   [AVX]
 61294  //    * VPABSD m128, xmm                  [AVX]
 61295  //    * VPABSD ymm, ymm                   [AVX2]
 61296  //    * VPABSD m256, ymm                  [AVX2]
 61297  //    * VPABSD m512/m32bcst, zmm{k}{z}    [AVX512F]
 61298  //    * VPABSD zmm, zmm{k}{z}             [AVX512F]
 61299  //    * VPABSD m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 61300  //    * VPABSD m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 61301  //    * VPABSD xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 61302  //    * VPABSD ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 61303  //
 61304  func (self *Program) VPABSD(v0 interface{}, v1 interface{}) *Instruction {
 61305      p := self.alloc("VPABSD", 2, Operands { v0, v1 })
 61306      // VPABSD xmm, xmm
 61307      if isXMM(v0) && isXMM(v1) {
 61308          self.require(ISA_AVX)
 61309          p.domain = DomainAVX
 61310          p.add(0, func(m *_Encoding, v []interface{}) {
 61311              m.emit(0xc4)
 61312              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 61313              m.emit(0x79)
 61314              m.emit(0x1e)
 61315              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61316          })
 61317      }
 61318      // VPABSD m128, xmm
 61319      if isM128(v0) && isXMM(v1) {
 61320          self.require(ISA_AVX)
 61321          p.domain = DomainAVX
 61322          p.add(0, func(m *_Encoding, v []interface{}) {
 61323              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 61324              m.emit(0x1e)
 61325              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 61326          })
 61327      }
 61328      // VPABSD ymm, ymm
 61329      if isYMM(v0) && isYMM(v1) {
 61330          self.require(ISA_AVX2)
 61331          p.domain = DomainAVX
 61332          p.add(0, func(m *_Encoding, v []interface{}) {
 61333              m.emit(0xc4)
 61334              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 61335              m.emit(0x7d)
 61336              m.emit(0x1e)
 61337              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61338          })
 61339      }
 61340      // VPABSD m256, ymm
 61341      if isM256(v0) && isYMM(v1) {
 61342          self.require(ISA_AVX2)
 61343          p.domain = DomainAVX
 61344          p.add(0, func(m *_Encoding, v []interface{}) {
 61345              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 61346              m.emit(0x1e)
 61347              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 61348          })
 61349      }
 61350      // VPABSD m512/m32bcst, zmm{k}{z}
 61351      if isM512M32bcst(v0) && isZMMkz(v1) {
 61352          self.require(ISA_AVX512F)
 61353          p.domain = DomainAVX
 61354          p.add(0, func(m *_Encoding, v []interface{}) {
 61355              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 61356              m.emit(0x1e)
 61357              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 61358          })
 61359      }
 61360      // VPABSD zmm, zmm{k}{z}
 61361      if isZMM(v0) && isZMMkz(v1) {
 61362          self.require(ISA_AVX512F)
 61363          p.domain = DomainAVX
 61364          p.add(0, func(m *_Encoding, v []interface{}) {
 61365              m.emit(0x62)
 61366              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61367              m.emit(0x7d)
 61368              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 61369              m.emit(0x1e)
 61370              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61371          })
 61372      }
 61373      // VPABSD m128/m32bcst, xmm{k}{z}
 61374      if isM128M32bcst(v0) && isXMMkz(v1) {
 61375          self.require(ISA_AVX512VL | ISA_AVX512F)
 61376          p.domain = DomainAVX
 61377          p.add(0, func(m *_Encoding, v []interface{}) {
 61378              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 61379              m.emit(0x1e)
 61380              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 61381          })
 61382      }
 61383      // VPABSD m256/m32bcst, ymm{k}{z}
 61384      if isM256M32bcst(v0) && isYMMkz(v1) {
 61385          self.require(ISA_AVX512VL | ISA_AVX512F)
 61386          p.domain = DomainAVX
 61387          p.add(0, func(m *_Encoding, v []interface{}) {
 61388              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 61389              m.emit(0x1e)
 61390              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 61391          })
 61392      }
 61393      // VPABSD xmm, xmm{k}{z}
 61394      if isEVEXXMM(v0) && isXMMkz(v1) {
 61395          self.require(ISA_AVX512VL | ISA_AVX512F)
 61396          p.domain = DomainAVX
 61397          p.add(0, func(m *_Encoding, v []interface{}) {
 61398              m.emit(0x62)
 61399              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61400              m.emit(0x7d)
 61401              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 61402              m.emit(0x1e)
 61403              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61404          })
 61405      }
 61406      // VPABSD ymm, ymm{k}{z}
 61407      if isEVEXYMM(v0) && isYMMkz(v1) {
 61408          self.require(ISA_AVX512VL | ISA_AVX512F)
 61409          p.domain = DomainAVX
 61410          p.add(0, func(m *_Encoding, v []interface{}) {
 61411              m.emit(0x62)
 61412              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61413              m.emit(0x7d)
 61414              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 61415              m.emit(0x1e)
 61416              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61417          })
 61418      }
 61419      if p.len == 0 {
 61420          panic("invalid operands for VPABSD")
 61421      }
 61422      return p
 61423  }
 61424  
 61425  // VPABSQ performs "Packed Absolute Value of Quadword Integers".
 61426  //
 61427  // Mnemonic        : VPABSQ
 61428  // Supported forms : (6 forms)
 61429  //
 61430  //    * VPABSQ m512/m64bcst, zmm{k}{z}    [AVX512F]
 61431  //    * VPABSQ zmm, zmm{k}{z}             [AVX512F]
 61432  //    * VPABSQ m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 61433  //    * VPABSQ m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 61434  //    * VPABSQ xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 61435  //    * VPABSQ ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 61436  //
 61437  func (self *Program) VPABSQ(v0 interface{}, v1 interface{}) *Instruction {
 61438      p := self.alloc("VPABSQ", 2, Operands { v0, v1 })
 61439      // VPABSQ m512/m64bcst, zmm{k}{z}
 61440      if isM512M64bcst(v0) && isZMMkz(v1) {
 61441          self.require(ISA_AVX512F)
 61442          p.domain = DomainAVX
 61443          p.add(0, func(m *_Encoding, v []interface{}) {
 61444              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 61445              m.emit(0x1f)
 61446              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 61447          })
 61448      }
 61449      // VPABSQ zmm, zmm{k}{z}
 61450      if isZMM(v0) && isZMMkz(v1) {
 61451          self.require(ISA_AVX512F)
 61452          p.domain = DomainAVX
 61453          p.add(0, func(m *_Encoding, v []interface{}) {
 61454              m.emit(0x62)
 61455              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61456              m.emit(0xfd)
 61457              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 61458              m.emit(0x1f)
 61459              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61460          })
 61461      }
 61462      // VPABSQ m128/m64bcst, xmm{k}{z}
 61463      if isM128M64bcst(v0) && isXMMkz(v1) {
 61464          self.require(ISA_AVX512VL | ISA_AVX512F)
 61465          p.domain = DomainAVX
 61466          p.add(0, func(m *_Encoding, v []interface{}) {
 61467              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 61468              m.emit(0x1f)
 61469              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 61470          })
 61471      }
 61472      // VPABSQ m256/m64bcst, ymm{k}{z}
 61473      if isM256M64bcst(v0) && isYMMkz(v1) {
 61474          self.require(ISA_AVX512VL | ISA_AVX512F)
 61475          p.domain = DomainAVX
 61476          p.add(0, func(m *_Encoding, v []interface{}) {
 61477              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 61478              m.emit(0x1f)
 61479              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 61480          })
 61481      }
 61482      // VPABSQ xmm, xmm{k}{z}
 61483      if isEVEXXMM(v0) && isXMMkz(v1) {
 61484          self.require(ISA_AVX512VL | ISA_AVX512F)
 61485          p.domain = DomainAVX
 61486          p.add(0, func(m *_Encoding, v []interface{}) {
 61487              m.emit(0x62)
 61488              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61489              m.emit(0xfd)
 61490              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 61491              m.emit(0x1f)
 61492              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61493          })
 61494      }
 61495      // VPABSQ ymm, ymm{k}{z}
 61496      if isEVEXYMM(v0) && isYMMkz(v1) {
 61497          self.require(ISA_AVX512VL | ISA_AVX512F)
 61498          p.domain = DomainAVX
 61499          p.add(0, func(m *_Encoding, v []interface{}) {
 61500              m.emit(0x62)
 61501              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61502              m.emit(0xfd)
 61503              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 61504              m.emit(0x1f)
 61505              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61506          })
 61507      }
 61508      if p.len == 0 {
 61509          panic("invalid operands for VPABSQ")
 61510      }
 61511      return p
 61512  }
 61513  
 61514  // VPABSW performs "Packed Absolute Value of Word Integers".
 61515  //
 61516  // Mnemonic        : VPABSW
 61517  // Supported forms : (10 forms)
 61518  //
 61519  //    * VPABSW xmm, xmm           [AVX]
 61520  //    * VPABSW m128, xmm          [AVX]
 61521  //    * VPABSW ymm, ymm           [AVX2]
 61522  //    * VPABSW m256, ymm          [AVX2]
 61523  //    * VPABSW zmm, zmm{k}{z}     [AVX512BW]
 61524  //    * VPABSW m512, zmm{k}{z}    [AVX512BW]
 61525  //    * VPABSW xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 61526  //    * VPABSW ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 61527  //    * VPABSW m128, xmm{k}{z}    [AVX512BW,AVX512VL]
 61528  //    * VPABSW m256, ymm{k}{z}    [AVX512BW,AVX512VL]
 61529  //
 61530  func (self *Program) VPABSW(v0 interface{}, v1 interface{}) *Instruction {
 61531      p := self.alloc("VPABSW", 2, Operands { v0, v1 })
 61532      // VPABSW xmm, xmm
 61533      if isXMM(v0) && isXMM(v1) {
 61534          self.require(ISA_AVX)
 61535          p.domain = DomainAVX
 61536          p.add(0, func(m *_Encoding, v []interface{}) {
 61537              m.emit(0xc4)
 61538              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 61539              m.emit(0x79)
 61540              m.emit(0x1d)
 61541              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61542          })
 61543      }
 61544      // VPABSW m128, xmm
 61545      if isM128(v0) && isXMM(v1) {
 61546          self.require(ISA_AVX)
 61547          p.domain = DomainAVX
 61548          p.add(0, func(m *_Encoding, v []interface{}) {
 61549              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 61550              m.emit(0x1d)
 61551              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 61552          })
 61553      }
 61554      // VPABSW ymm, ymm
 61555      if isYMM(v0) && isYMM(v1) {
 61556          self.require(ISA_AVX2)
 61557          p.domain = DomainAVX
 61558          p.add(0, func(m *_Encoding, v []interface{}) {
 61559              m.emit(0xc4)
 61560              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 61561              m.emit(0x7d)
 61562              m.emit(0x1d)
 61563              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61564          })
 61565      }
 61566      // VPABSW m256, ymm
 61567      if isM256(v0) && isYMM(v1) {
 61568          self.require(ISA_AVX2)
 61569          p.domain = DomainAVX
 61570          p.add(0, func(m *_Encoding, v []interface{}) {
 61571              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 61572              m.emit(0x1d)
 61573              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 61574          })
 61575      }
 61576      // VPABSW zmm, zmm{k}{z}
 61577      if isZMM(v0) && isZMMkz(v1) {
 61578          self.require(ISA_AVX512BW)
 61579          p.domain = DomainAVX
 61580          p.add(0, func(m *_Encoding, v []interface{}) {
 61581              m.emit(0x62)
 61582              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61583              m.emit(0x7d)
 61584              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 61585              m.emit(0x1d)
 61586              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61587          })
 61588      }
 61589      // VPABSW m512, zmm{k}{z}
 61590      if isM512(v0) && isZMMkz(v1) {
 61591          self.require(ISA_AVX512BW)
 61592          p.domain = DomainAVX
 61593          p.add(0, func(m *_Encoding, v []interface{}) {
 61594              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 61595              m.emit(0x1d)
 61596              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 61597          })
 61598      }
 61599      // VPABSW xmm, xmm{k}{z}
 61600      if isEVEXXMM(v0) && isXMMkz(v1) {
 61601          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61602          p.domain = DomainAVX
 61603          p.add(0, func(m *_Encoding, v []interface{}) {
 61604              m.emit(0x62)
 61605              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61606              m.emit(0x7d)
 61607              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 61608              m.emit(0x1d)
 61609              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61610          })
 61611      }
 61612      // VPABSW ymm, ymm{k}{z}
 61613      if isEVEXYMM(v0) && isYMMkz(v1) {
 61614          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61615          p.domain = DomainAVX
 61616          p.add(0, func(m *_Encoding, v []interface{}) {
 61617              m.emit(0x62)
 61618              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61619              m.emit(0x7d)
 61620              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 61621              m.emit(0x1d)
 61622              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61623          })
 61624      }
 61625      // VPABSW m128, xmm{k}{z}
 61626      if isM128(v0) && isXMMkz(v1) {
 61627          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61628          p.domain = DomainAVX
 61629          p.add(0, func(m *_Encoding, v []interface{}) {
 61630              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 61631              m.emit(0x1d)
 61632              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 61633          })
 61634      }
 61635      // VPABSW m256, ymm{k}{z}
 61636      if isM256(v0) && isYMMkz(v1) {
 61637          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61638          p.domain = DomainAVX
 61639          p.add(0, func(m *_Encoding, v []interface{}) {
 61640              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 61641              m.emit(0x1d)
 61642              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 61643          })
 61644      }
 61645      if p.len == 0 {
 61646          panic("invalid operands for VPABSW")
 61647      }
 61648      return p
 61649  }
 61650  
 61651  // VPACKSSDW performs "Pack Doublewords into Words with Signed Saturation".
 61652  //
 61653  // Mnemonic        : VPACKSSDW
 61654  // Supported forms : (10 forms)
 61655  //
 61656  //    * VPACKSSDW xmm, xmm, xmm                   [AVX]
 61657  //    * VPACKSSDW m128, xmm, xmm                  [AVX]
 61658  //    * VPACKSSDW ymm, ymm, ymm                   [AVX2]
 61659  //    * VPACKSSDW m256, ymm, ymm                  [AVX2]
 61660  //    * VPACKSSDW m512/m32bcst, zmm, zmm{k}{z}    [AVX512BW]
 61661  //    * VPACKSSDW zmm, zmm, zmm{k}{z}             [AVX512BW]
 61662  //    * VPACKSSDW m128/m32bcst, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 61663  //    * VPACKSSDW xmm, xmm, xmm{k}{z}             [AVX512BW,AVX512VL]
 61664  //    * VPACKSSDW m256/m32bcst, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 61665  //    * VPACKSSDW ymm, ymm, ymm{k}{z}             [AVX512BW,AVX512VL]
 61666  //
 61667  func (self *Program) VPACKSSDW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 61668      p := self.alloc("VPACKSSDW", 3, Operands { v0, v1, v2 })
 61669      // VPACKSSDW xmm, xmm, xmm
 61670      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 61671          self.require(ISA_AVX)
 61672          p.domain = DomainAVX
 61673          p.add(0, func(m *_Encoding, v []interface{}) {
 61674              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 61675              m.emit(0x6b)
 61676              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61677          })
 61678      }
 61679      // VPACKSSDW m128, xmm, xmm
 61680      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 61681          self.require(ISA_AVX)
 61682          p.domain = DomainAVX
 61683          p.add(0, func(m *_Encoding, v []interface{}) {
 61684              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 61685              m.emit(0x6b)
 61686              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 61687          })
 61688      }
 61689      // VPACKSSDW ymm, ymm, ymm
 61690      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 61691          self.require(ISA_AVX2)
 61692          p.domain = DomainAVX
 61693          p.add(0, func(m *_Encoding, v []interface{}) {
 61694              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 61695              m.emit(0x6b)
 61696              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61697          })
 61698      }
 61699      // VPACKSSDW m256, ymm, ymm
 61700      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 61701          self.require(ISA_AVX2)
 61702          p.domain = DomainAVX
 61703          p.add(0, func(m *_Encoding, v []interface{}) {
 61704              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 61705              m.emit(0x6b)
 61706              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 61707          })
 61708      }
 61709      // VPACKSSDW m512/m32bcst, zmm, zmm{k}{z}
 61710      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 61711          self.require(ISA_AVX512BW)
 61712          p.domain = DomainAVX
 61713          p.add(0, func(m *_Encoding, v []interface{}) {
 61714              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 61715              m.emit(0x6b)
 61716              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 61717          })
 61718      }
 61719      // VPACKSSDW zmm, zmm, zmm{k}{z}
 61720      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 61721          self.require(ISA_AVX512BW)
 61722          p.domain = DomainAVX
 61723          p.add(0, func(m *_Encoding, v []interface{}) {
 61724              m.emit(0x62)
 61725              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 61726              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 61727              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 61728              m.emit(0x6b)
 61729              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61730          })
 61731      }
 61732      // VPACKSSDW m128/m32bcst, xmm, xmm{k}{z}
 61733      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 61734          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61735          p.domain = DomainAVX
 61736          p.add(0, func(m *_Encoding, v []interface{}) {
 61737              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 61738              m.emit(0x6b)
 61739              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 61740          })
 61741      }
 61742      // VPACKSSDW xmm, xmm, xmm{k}{z}
 61743      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 61744          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61745          p.domain = DomainAVX
 61746          p.add(0, func(m *_Encoding, v []interface{}) {
 61747              m.emit(0x62)
 61748              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 61749              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 61750              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 61751              m.emit(0x6b)
 61752              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61753          })
 61754      }
 61755      // VPACKSSDW m256/m32bcst, ymm, ymm{k}{z}
 61756      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 61757          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61758          p.domain = DomainAVX
 61759          p.add(0, func(m *_Encoding, v []interface{}) {
 61760              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 61761              m.emit(0x6b)
 61762              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 61763          })
 61764      }
 61765      // VPACKSSDW ymm, ymm, ymm{k}{z}
 61766      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 61767          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61768          p.domain = DomainAVX
 61769          p.add(0, func(m *_Encoding, v []interface{}) {
 61770              m.emit(0x62)
 61771              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 61772              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 61773              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 61774              m.emit(0x6b)
 61775              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61776          })
 61777      }
 61778      if p.len == 0 {
 61779          panic("invalid operands for VPACKSSDW")
 61780      }
 61781      return p
 61782  }
 61783  
 61784  // VPACKSSWB performs "Pack Words into Bytes with Signed Saturation".
 61785  //
 61786  // Mnemonic        : VPACKSSWB
 61787  // Supported forms : (10 forms)
 61788  //
 61789  //    * VPACKSSWB xmm, xmm, xmm           [AVX]
 61790  //    * VPACKSSWB m128, xmm, xmm          [AVX]
 61791  //    * VPACKSSWB ymm, ymm, ymm           [AVX2]
 61792  //    * VPACKSSWB m256, ymm, ymm          [AVX2]
 61793  //    * VPACKSSWB zmm, zmm, zmm{k}{z}     [AVX512BW]
 61794  //    * VPACKSSWB m512, zmm, zmm{k}{z}    [AVX512BW]
 61795  //    * VPACKSSWB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 61796  //    * VPACKSSWB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 61797  //    * VPACKSSWB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 61798  //    * VPACKSSWB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 61799  //
 61800  func (self *Program) VPACKSSWB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 61801      p := self.alloc("VPACKSSWB", 3, Operands { v0, v1, v2 })
 61802      // VPACKSSWB xmm, xmm, xmm
 61803      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 61804          self.require(ISA_AVX)
 61805          p.domain = DomainAVX
 61806          p.add(0, func(m *_Encoding, v []interface{}) {
 61807              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 61808              m.emit(0x63)
 61809              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61810          })
 61811      }
 61812      // VPACKSSWB m128, xmm, xmm
 61813      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 61814          self.require(ISA_AVX)
 61815          p.domain = DomainAVX
 61816          p.add(0, func(m *_Encoding, v []interface{}) {
 61817              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 61818              m.emit(0x63)
 61819              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 61820          })
 61821      }
 61822      // VPACKSSWB ymm, ymm, ymm
 61823      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 61824          self.require(ISA_AVX2)
 61825          p.domain = DomainAVX
 61826          p.add(0, func(m *_Encoding, v []interface{}) {
 61827              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 61828              m.emit(0x63)
 61829              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61830          })
 61831      }
 61832      // VPACKSSWB m256, ymm, ymm
 61833      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 61834          self.require(ISA_AVX2)
 61835          p.domain = DomainAVX
 61836          p.add(0, func(m *_Encoding, v []interface{}) {
 61837              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 61838              m.emit(0x63)
 61839              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 61840          })
 61841      }
 61842      // VPACKSSWB zmm, zmm, zmm{k}{z}
 61843      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 61844          self.require(ISA_AVX512BW)
 61845          p.domain = DomainAVX
 61846          p.add(0, func(m *_Encoding, v []interface{}) {
 61847              m.emit(0x62)
 61848              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 61849              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 61850              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 61851              m.emit(0x63)
 61852              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61853          })
 61854      }
 61855      // VPACKSSWB m512, zmm, zmm{k}{z}
 61856      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 61857          self.require(ISA_AVX512BW)
 61858          p.domain = DomainAVX
 61859          p.add(0, func(m *_Encoding, v []interface{}) {
 61860              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 61861              m.emit(0x63)
 61862              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 61863          })
 61864      }
 61865      // VPACKSSWB xmm, xmm, xmm{k}{z}
 61866      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 61867          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61868          p.domain = DomainAVX
 61869          p.add(0, func(m *_Encoding, v []interface{}) {
 61870              m.emit(0x62)
 61871              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 61872              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 61873              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 61874              m.emit(0x63)
 61875              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61876          })
 61877      }
 61878      // VPACKSSWB m128, xmm, xmm{k}{z}
 61879      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 61880          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61881          p.domain = DomainAVX
 61882          p.add(0, func(m *_Encoding, v []interface{}) {
 61883              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 61884              m.emit(0x63)
 61885              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 61886          })
 61887      }
 61888      // VPACKSSWB ymm, ymm, ymm{k}{z}
 61889      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 61890          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61891          p.domain = DomainAVX
 61892          p.add(0, func(m *_Encoding, v []interface{}) {
 61893              m.emit(0x62)
 61894              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 61895              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 61896              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 61897              m.emit(0x63)
 61898              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61899          })
 61900      }
 61901      // VPACKSSWB m256, ymm, ymm{k}{z}
 61902      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 61903          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61904          p.domain = DomainAVX
 61905          p.add(0, func(m *_Encoding, v []interface{}) {
 61906              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 61907              m.emit(0x63)
 61908              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 61909          })
 61910      }
 61911      if p.len == 0 {
 61912          panic("invalid operands for VPACKSSWB")
 61913      }
 61914      return p
 61915  }
 61916  
 61917  // VPACKUSDW performs "Pack Doublewords into Words with Unsigned Saturation".
 61918  //
 61919  // Mnemonic        : VPACKUSDW
 61920  // Supported forms : (10 forms)
 61921  //
 61922  //    * VPACKUSDW xmm, xmm, xmm                   [AVX]
 61923  //    * VPACKUSDW m128, xmm, xmm                  [AVX]
 61924  //    * VPACKUSDW ymm, ymm, ymm                   [AVX2]
 61925  //    * VPACKUSDW m256, ymm, ymm                  [AVX2]
 61926  //    * VPACKUSDW m512/m32bcst, zmm, zmm{k}{z}    [AVX512BW]
 61927  //    * VPACKUSDW zmm, zmm, zmm{k}{z}             [AVX512BW]
 61928  //    * VPACKUSDW m128/m32bcst, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 61929  //    * VPACKUSDW xmm, xmm, xmm{k}{z}             [AVX512BW,AVX512VL]
 61930  //    * VPACKUSDW m256/m32bcst, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 61931  //    * VPACKUSDW ymm, ymm, ymm{k}{z}             [AVX512BW,AVX512VL]
 61932  //
 61933  func (self *Program) VPACKUSDW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 61934      p := self.alloc("VPACKUSDW", 3, Operands { v0, v1, v2 })
 61935      // VPACKUSDW xmm, xmm, xmm
 61936      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 61937          self.require(ISA_AVX)
 61938          p.domain = DomainAVX
 61939          p.add(0, func(m *_Encoding, v []interface{}) {
 61940              m.emit(0xc4)
 61941              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 61942              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 61943              m.emit(0x2b)
 61944              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61945          })
 61946      }
 61947      // VPACKUSDW m128, xmm, xmm
 61948      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 61949          self.require(ISA_AVX)
 61950          p.domain = DomainAVX
 61951          p.add(0, func(m *_Encoding, v []interface{}) {
 61952              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 61953              m.emit(0x2b)
 61954              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 61955          })
 61956      }
 61957      // VPACKUSDW ymm, ymm, ymm
 61958      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 61959          self.require(ISA_AVX2)
 61960          p.domain = DomainAVX
 61961          p.add(0, func(m *_Encoding, v []interface{}) {
 61962              m.emit(0xc4)
 61963              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 61964              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 61965              m.emit(0x2b)
 61966              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61967          })
 61968      }
 61969      // VPACKUSDW m256, ymm, ymm
 61970      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 61971          self.require(ISA_AVX2)
 61972          p.domain = DomainAVX
 61973          p.add(0, func(m *_Encoding, v []interface{}) {
 61974              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 61975              m.emit(0x2b)
 61976              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 61977          })
 61978      }
 61979      // VPACKUSDW m512/m32bcst, zmm, zmm{k}{z}
 61980      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 61981          self.require(ISA_AVX512BW)
 61982          p.domain = DomainAVX
 61983          p.add(0, func(m *_Encoding, v []interface{}) {
 61984              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 61985              m.emit(0x2b)
 61986              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 61987          })
 61988      }
 61989      // VPACKUSDW zmm, zmm, zmm{k}{z}
 61990      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 61991          self.require(ISA_AVX512BW)
 61992          p.domain = DomainAVX
 61993          p.add(0, func(m *_Encoding, v []interface{}) {
 61994              m.emit(0x62)
 61995              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 61996              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 61997              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 61998              m.emit(0x2b)
 61999              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62000          })
 62001      }
 62002      // VPACKUSDW m128/m32bcst, xmm, xmm{k}{z}
 62003      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62004          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62005          p.domain = DomainAVX
 62006          p.add(0, func(m *_Encoding, v []interface{}) {
 62007              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 62008              m.emit(0x2b)
 62009              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 62010          })
 62011      }
 62012      // VPACKUSDW xmm, xmm, xmm{k}{z}
 62013      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62014          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62015          p.domain = DomainAVX
 62016          p.add(0, func(m *_Encoding, v []interface{}) {
 62017              m.emit(0x62)
 62018              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62019              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62020              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 62021              m.emit(0x2b)
 62022              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62023          })
 62024      }
 62025      // VPACKUSDW m256/m32bcst, ymm, ymm{k}{z}
 62026      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62027          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62028          p.domain = DomainAVX
 62029          p.add(0, func(m *_Encoding, v []interface{}) {
 62030              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 62031              m.emit(0x2b)
 62032              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 62033          })
 62034      }
 62035      // VPACKUSDW ymm, ymm, ymm{k}{z}
 62036      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62037          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62038          p.domain = DomainAVX
 62039          p.add(0, func(m *_Encoding, v []interface{}) {
 62040              m.emit(0x62)
 62041              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62042              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62043              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 62044              m.emit(0x2b)
 62045              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62046          })
 62047      }
 62048      if p.len == 0 {
 62049          panic("invalid operands for VPACKUSDW")
 62050      }
 62051      return p
 62052  }
 62053  
 62054  // VPACKUSWB performs "Pack Words into Bytes with Unsigned Saturation".
 62055  //
 62056  // Mnemonic        : VPACKUSWB
 62057  // Supported forms : (10 forms)
 62058  //
 62059  //    * VPACKUSWB xmm, xmm, xmm           [AVX]
 62060  //    * VPACKUSWB m128, xmm, xmm          [AVX]
 62061  //    * VPACKUSWB ymm, ymm, ymm           [AVX2]
 62062  //    * VPACKUSWB m256, ymm, ymm          [AVX2]
 62063  //    * VPACKUSWB zmm, zmm, zmm{k}{z}     [AVX512BW]
 62064  //    * VPACKUSWB m512, zmm, zmm{k}{z}    [AVX512BW]
 62065  //    * VPACKUSWB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 62066  //    * VPACKUSWB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 62067  //    * VPACKUSWB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 62068  //    * VPACKUSWB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 62069  //
 62070  func (self *Program) VPACKUSWB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 62071      p := self.alloc("VPACKUSWB", 3, Operands { v0, v1, v2 })
 62072      // VPACKUSWB xmm, xmm, xmm
 62073      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 62074          self.require(ISA_AVX)
 62075          p.domain = DomainAVX
 62076          p.add(0, func(m *_Encoding, v []interface{}) {
 62077              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 62078              m.emit(0x67)
 62079              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62080          })
 62081      }
 62082      // VPACKUSWB m128, xmm, xmm
 62083      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 62084          self.require(ISA_AVX)
 62085          p.domain = DomainAVX
 62086          p.add(0, func(m *_Encoding, v []interface{}) {
 62087              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62088              m.emit(0x67)
 62089              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62090          })
 62091      }
 62092      // VPACKUSWB ymm, ymm, ymm
 62093      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 62094          self.require(ISA_AVX2)
 62095          p.domain = DomainAVX
 62096          p.add(0, func(m *_Encoding, v []interface{}) {
 62097              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 62098              m.emit(0x67)
 62099              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62100          })
 62101      }
 62102      // VPACKUSWB m256, ymm, ymm
 62103      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 62104          self.require(ISA_AVX2)
 62105          p.domain = DomainAVX
 62106          p.add(0, func(m *_Encoding, v []interface{}) {
 62107              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62108              m.emit(0x67)
 62109              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62110          })
 62111      }
 62112      // VPACKUSWB zmm, zmm, zmm{k}{z}
 62113      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 62114          self.require(ISA_AVX512BW)
 62115          p.domain = DomainAVX
 62116          p.add(0, func(m *_Encoding, v []interface{}) {
 62117              m.emit(0x62)
 62118              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62119              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62120              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 62121              m.emit(0x67)
 62122              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62123          })
 62124      }
 62125      // VPACKUSWB m512, zmm, zmm{k}{z}
 62126      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 62127          self.require(ISA_AVX512BW)
 62128          p.domain = DomainAVX
 62129          p.add(0, func(m *_Encoding, v []interface{}) {
 62130              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62131              m.emit(0x67)
 62132              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 62133          })
 62134      }
 62135      // VPACKUSWB xmm, xmm, xmm{k}{z}
 62136      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62137          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62138          p.domain = DomainAVX
 62139          p.add(0, func(m *_Encoding, v []interface{}) {
 62140              m.emit(0x62)
 62141              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62142              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62143              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 62144              m.emit(0x67)
 62145              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62146          })
 62147      }
 62148      // VPACKUSWB m128, xmm, xmm{k}{z}
 62149      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62150          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62151          p.domain = DomainAVX
 62152          p.add(0, func(m *_Encoding, v []interface{}) {
 62153              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62154              m.emit(0x67)
 62155              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 62156          })
 62157      }
 62158      // VPACKUSWB ymm, ymm, ymm{k}{z}
 62159      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62160          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62161          p.domain = DomainAVX
 62162          p.add(0, func(m *_Encoding, v []interface{}) {
 62163              m.emit(0x62)
 62164              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62165              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62166              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 62167              m.emit(0x67)
 62168              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62169          })
 62170      }
 62171      // VPACKUSWB m256, ymm, ymm{k}{z}
 62172      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62173          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62174          p.domain = DomainAVX
 62175          p.add(0, func(m *_Encoding, v []interface{}) {
 62176              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62177              m.emit(0x67)
 62178              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 62179          })
 62180      }
 62181      if p.len == 0 {
 62182          panic("invalid operands for VPACKUSWB")
 62183      }
 62184      return p
 62185  }
 62186  
 62187  // VPADDB performs "Add Packed Byte Integers".
 62188  //
 62189  // Mnemonic        : VPADDB
 62190  // Supported forms : (10 forms)
 62191  //
 62192  //    * VPADDB xmm, xmm, xmm           [AVX]
 62193  //    * VPADDB m128, xmm, xmm          [AVX]
 62194  //    * VPADDB ymm, ymm, ymm           [AVX2]
 62195  //    * VPADDB m256, ymm, ymm          [AVX2]
 62196  //    * VPADDB zmm, zmm, zmm{k}{z}     [AVX512BW]
 62197  //    * VPADDB m512, zmm, zmm{k}{z}    [AVX512BW]
 62198  //    * VPADDB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 62199  //    * VPADDB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 62200  //    * VPADDB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 62201  //    * VPADDB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 62202  //
 62203  func (self *Program) VPADDB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 62204      p := self.alloc("VPADDB", 3, Operands { v0, v1, v2 })
 62205      // VPADDB xmm, xmm, xmm
 62206      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 62207          self.require(ISA_AVX)
 62208          p.domain = DomainAVX
 62209          p.add(0, func(m *_Encoding, v []interface{}) {
 62210              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 62211              m.emit(0xfc)
 62212              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62213          })
 62214      }
 62215      // VPADDB m128, xmm, xmm
 62216      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 62217          self.require(ISA_AVX)
 62218          p.domain = DomainAVX
 62219          p.add(0, func(m *_Encoding, v []interface{}) {
 62220              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62221              m.emit(0xfc)
 62222              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62223          })
 62224      }
 62225      // VPADDB ymm, ymm, ymm
 62226      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 62227          self.require(ISA_AVX2)
 62228          p.domain = DomainAVX
 62229          p.add(0, func(m *_Encoding, v []interface{}) {
 62230              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 62231              m.emit(0xfc)
 62232              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62233          })
 62234      }
 62235      // VPADDB m256, ymm, ymm
 62236      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 62237          self.require(ISA_AVX2)
 62238          p.domain = DomainAVX
 62239          p.add(0, func(m *_Encoding, v []interface{}) {
 62240              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62241              m.emit(0xfc)
 62242              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62243          })
 62244      }
 62245      // VPADDB zmm, zmm, zmm{k}{z}
 62246      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 62247          self.require(ISA_AVX512BW)
 62248          p.domain = DomainAVX
 62249          p.add(0, func(m *_Encoding, v []interface{}) {
 62250              m.emit(0x62)
 62251              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62252              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62253              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 62254              m.emit(0xfc)
 62255              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62256          })
 62257      }
 62258      // VPADDB m512, zmm, zmm{k}{z}
 62259      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 62260          self.require(ISA_AVX512BW)
 62261          p.domain = DomainAVX
 62262          p.add(0, func(m *_Encoding, v []interface{}) {
 62263              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62264              m.emit(0xfc)
 62265              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 62266          })
 62267      }
 62268      // VPADDB xmm, xmm, xmm{k}{z}
 62269      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62270          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62271          p.domain = DomainAVX
 62272          p.add(0, func(m *_Encoding, v []interface{}) {
 62273              m.emit(0x62)
 62274              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62275              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62276              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 62277              m.emit(0xfc)
 62278              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62279          })
 62280      }
 62281      // VPADDB m128, xmm, xmm{k}{z}
 62282      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62283          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62284          p.domain = DomainAVX
 62285          p.add(0, func(m *_Encoding, v []interface{}) {
 62286              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62287              m.emit(0xfc)
 62288              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 62289          })
 62290      }
 62291      // VPADDB ymm, ymm, ymm{k}{z}
 62292      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62293          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62294          p.domain = DomainAVX
 62295          p.add(0, func(m *_Encoding, v []interface{}) {
 62296              m.emit(0x62)
 62297              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62298              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62299              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 62300              m.emit(0xfc)
 62301              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62302          })
 62303      }
 62304      // VPADDB m256, ymm, ymm{k}{z}
 62305      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62306          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62307          p.domain = DomainAVX
 62308          p.add(0, func(m *_Encoding, v []interface{}) {
 62309              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62310              m.emit(0xfc)
 62311              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 62312          })
 62313      }
 62314      if p.len == 0 {
 62315          panic("invalid operands for VPADDB")
 62316      }
 62317      return p
 62318  }
 62319  
 62320  // VPADDD performs "Add Packed Doubleword Integers".
 62321  //
 62322  // Mnemonic        : VPADDD
 62323  // Supported forms : (10 forms)
 62324  //
 62325  //    * VPADDD xmm, xmm, xmm                   [AVX]
 62326  //    * VPADDD m128, xmm, xmm                  [AVX]
 62327  //    * VPADDD ymm, ymm, ymm                   [AVX2]
 62328  //    * VPADDD m256, ymm, ymm                  [AVX2]
 62329  //    * VPADDD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 62330  //    * VPADDD zmm, zmm, zmm{k}{z}             [AVX512F]
 62331  //    * VPADDD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 62332  //    * VPADDD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 62333  //    * VPADDD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 62334  //    * VPADDD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 62335  //
 62336  func (self *Program) VPADDD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 62337      p := self.alloc("VPADDD", 3, Operands { v0, v1, v2 })
 62338      // VPADDD xmm, xmm, xmm
 62339      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 62340          self.require(ISA_AVX)
 62341          p.domain = DomainAVX
 62342          p.add(0, func(m *_Encoding, v []interface{}) {
 62343              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 62344              m.emit(0xfe)
 62345              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62346          })
 62347      }
 62348      // VPADDD m128, xmm, xmm
 62349      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 62350          self.require(ISA_AVX)
 62351          p.domain = DomainAVX
 62352          p.add(0, func(m *_Encoding, v []interface{}) {
 62353              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62354              m.emit(0xfe)
 62355              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62356          })
 62357      }
 62358      // VPADDD ymm, ymm, ymm
 62359      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 62360          self.require(ISA_AVX2)
 62361          p.domain = DomainAVX
 62362          p.add(0, func(m *_Encoding, v []interface{}) {
 62363              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 62364              m.emit(0xfe)
 62365              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62366          })
 62367      }
 62368      // VPADDD m256, ymm, ymm
 62369      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 62370          self.require(ISA_AVX2)
 62371          p.domain = DomainAVX
 62372          p.add(0, func(m *_Encoding, v []interface{}) {
 62373              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62374              m.emit(0xfe)
 62375              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62376          })
 62377      }
 62378      // VPADDD m512/m32bcst, zmm, zmm{k}{z}
 62379      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 62380          self.require(ISA_AVX512F)
 62381          p.domain = DomainAVX
 62382          p.add(0, func(m *_Encoding, v []interface{}) {
 62383              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 62384              m.emit(0xfe)
 62385              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 62386          })
 62387      }
 62388      // VPADDD zmm, zmm, zmm{k}{z}
 62389      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 62390          self.require(ISA_AVX512F)
 62391          p.domain = DomainAVX
 62392          p.add(0, func(m *_Encoding, v []interface{}) {
 62393              m.emit(0x62)
 62394              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62395              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62396              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 62397              m.emit(0xfe)
 62398              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62399          })
 62400      }
 62401      // VPADDD m128/m32bcst, xmm, xmm{k}{z}
 62402      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62403          self.require(ISA_AVX512VL | ISA_AVX512F)
 62404          p.domain = DomainAVX
 62405          p.add(0, func(m *_Encoding, v []interface{}) {
 62406              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 62407              m.emit(0xfe)
 62408              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 62409          })
 62410      }
 62411      // VPADDD xmm, xmm, xmm{k}{z}
 62412      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62413          self.require(ISA_AVX512VL | ISA_AVX512F)
 62414          p.domain = DomainAVX
 62415          p.add(0, func(m *_Encoding, v []interface{}) {
 62416              m.emit(0x62)
 62417              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62418              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62419              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 62420              m.emit(0xfe)
 62421              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62422          })
 62423      }
 62424      // VPADDD m256/m32bcst, ymm, ymm{k}{z}
 62425      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62426          self.require(ISA_AVX512VL | ISA_AVX512F)
 62427          p.domain = DomainAVX
 62428          p.add(0, func(m *_Encoding, v []interface{}) {
 62429              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 62430              m.emit(0xfe)
 62431              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 62432          })
 62433      }
 62434      // VPADDD ymm, ymm, ymm{k}{z}
 62435      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62436          self.require(ISA_AVX512VL | ISA_AVX512F)
 62437          p.domain = DomainAVX
 62438          p.add(0, func(m *_Encoding, v []interface{}) {
 62439              m.emit(0x62)
 62440              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62441              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62442              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 62443              m.emit(0xfe)
 62444              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62445          })
 62446      }
 62447      if p.len == 0 {
 62448          panic("invalid operands for VPADDD")
 62449      }
 62450      return p
 62451  }
 62452  
 62453  // VPADDQ performs "Add Packed Quadword Integers".
 62454  //
 62455  // Mnemonic        : VPADDQ
 62456  // Supported forms : (10 forms)
 62457  //
 62458  //    * VPADDQ xmm, xmm, xmm                   [AVX]
 62459  //    * VPADDQ m128, xmm, xmm                  [AVX]
 62460  //    * VPADDQ ymm, ymm, ymm                   [AVX2]
 62461  //    * VPADDQ m256, ymm, ymm                  [AVX2]
 62462  //    * VPADDQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 62463  //    * VPADDQ zmm, zmm, zmm{k}{z}             [AVX512F]
 62464  //    * VPADDQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 62465  //    * VPADDQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 62466  //    * VPADDQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 62467  //    * VPADDQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 62468  //
 62469  func (self *Program) VPADDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 62470      p := self.alloc("VPADDQ", 3, Operands { v0, v1, v2 })
 62471      // VPADDQ xmm, xmm, xmm
 62472      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 62473          self.require(ISA_AVX)
 62474          p.domain = DomainAVX
 62475          p.add(0, func(m *_Encoding, v []interface{}) {
 62476              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 62477              m.emit(0xd4)
 62478              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62479          })
 62480      }
 62481      // VPADDQ m128, xmm, xmm
 62482      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 62483          self.require(ISA_AVX)
 62484          p.domain = DomainAVX
 62485          p.add(0, func(m *_Encoding, v []interface{}) {
 62486              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62487              m.emit(0xd4)
 62488              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62489          })
 62490      }
 62491      // VPADDQ ymm, ymm, ymm
 62492      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 62493          self.require(ISA_AVX2)
 62494          p.domain = DomainAVX
 62495          p.add(0, func(m *_Encoding, v []interface{}) {
 62496              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 62497              m.emit(0xd4)
 62498              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62499          })
 62500      }
 62501      // VPADDQ m256, ymm, ymm
 62502      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 62503          self.require(ISA_AVX2)
 62504          p.domain = DomainAVX
 62505          p.add(0, func(m *_Encoding, v []interface{}) {
 62506              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62507              m.emit(0xd4)
 62508              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62509          })
 62510      }
 62511      // VPADDQ m512/m64bcst, zmm, zmm{k}{z}
 62512      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 62513          self.require(ISA_AVX512F)
 62514          p.domain = DomainAVX
 62515          p.add(0, func(m *_Encoding, v []interface{}) {
 62516              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 62517              m.emit(0xd4)
 62518              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 62519          })
 62520      }
 62521      // VPADDQ zmm, zmm, zmm{k}{z}
 62522      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 62523          self.require(ISA_AVX512F)
 62524          p.domain = DomainAVX
 62525          p.add(0, func(m *_Encoding, v []interface{}) {
 62526              m.emit(0x62)
 62527              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62528              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 62529              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 62530              m.emit(0xd4)
 62531              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62532          })
 62533      }
 62534      // VPADDQ m128/m64bcst, xmm, xmm{k}{z}
 62535      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62536          self.require(ISA_AVX512VL | ISA_AVX512F)
 62537          p.domain = DomainAVX
 62538          p.add(0, func(m *_Encoding, v []interface{}) {
 62539              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 62540              m.emit(0xd4)
 62541              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 62542          })
 62543      }
 62544      // VPADDQ xmm, xmm, xmm{k}{z}
 62545      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62546          self.require(ISA_AVX512VL | ISA_AVX512F)
 62547          p.domain = DomainAVX
 62548          p.add(0, func(m *_Encoding, v []interface{}) {
 62549              m.emit(0x62)
 62550              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62551              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 62552              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 62553              m.emit(0xd4)
 62554              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62555          })
 62556      }
 62557      // VPADDQ m256/m64bcst, ymm, ymm{k}{z}
 62558      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62559          self.require(ISA_AVX512VL | ISA_AVX512F)
 62560          p.domain = DomainAVX
 62561          p.add(0, func(m *_Encoding, v []interface{}) {
 62562              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 62563              m.emit(0xd4)
 62564              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 62565          })
 62566      }
 62567      // VPADDQ ymm, ymm, ymm{k}{z}
 62568      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62569          self.require(ISA_AVX512VL | ISA_AVX512F)
 62570          p.domain = DomainAVX
 62571          p.add(0, func(m *_Encoding, v []interface{}) {
 62572              m.emit(0x62)
 62573              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62574              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 62575              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 62576              m.emit(0xd4)
 62577              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62578          })
 62579      }
 62580      if p.len == 0 {
 62581          panic("invalid operands for VPADDQ")
 62582      }
 62583      return p
 62584  }
 62585  
 62586  // VPADDSB performs "Add Packed Signed Byte Integers with Signed Saturation".
 62587  //
 62588  // Mnemonic        : VPADDSB
 62589  // Supported forms : (10 forms)
 62590  //
 62591  //    * VPADDSB xmm, xmm, xmm           [AVX]
 62592  //    * VPADDSB m128, xmm, xmm          [AVX]
 62593  //    * VPADDSB ymm, ymm, ymm           [AVX2]
 62594  //    * VPADDSB m256, ymm, ymm          [AVX2]
 62595  //    * VPADDSB zmm, zmm, zmm{k}{z}     [AVX512BW]
 62596  //    * VPADDSB m512, zmm, zmm{k}{z}    [AVX512BW]
 62597  //    * VPADDSB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 62598  //    * VPADDSB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 62599  //    * VPADDSB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 62600  //    * VPADDSB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 62601  //
 62602  func (self *Program) VPADDSB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 62603      p := self.alloc("VPADDSB", 3, Operands { v0, v1, v2 })
 62604      // VPADDSB xmm, xmm, xmm
 62605      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 62606          self.require(ISA_AVX)
 62607          p.domain = DomainAVX
 62608          p.add(0, func(m *_Encoding, v []interface{}) {
 62609              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 62610              m.emit(0xec)
 62611              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62612          })
 62613      }
 62614      // VPADDSB m128, xmm, xmm
 62615      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 62616          self.require(ISA_AVX)
 62617          p.domain = DomainAVX
 62618          p.add(0, func(m *_Encoding, v []interface{}) {
 62619              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62620              m.emit(0xec)
 62621              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62622          })
 62623      }
 62624      // VPADDSB ymm, ymm, ymm
 62625      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 62626          self.require(ISA_AVX2)
 62627          p.domain = DomainAVX
 62628          p.add(0, func(m *_Encoding, v []interface{}) {
 62629              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 62630              m.emit(0xec)
 62631              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62632          })
 62633      }
 62634      // VPADDSB m256, ymm, ymm
 62635      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 62636          self.require(ISA_AVX2)
 62637          p.domain = DomainAVX
 62638          p.add(0, func(m *_Encoding, v []interface{}) {
 62639              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62640              m.emit(0xec)
 62641              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62642          })
 62643      }
 62644      // VPADDSB zmm, zmm, zmm{k}{z}
 62645      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 62646          self.require(ISA_AVX512BW)
 62647          p.domain = DomainAVX
 62648          p.add(0, func(m *_Encoding, v []interface{}) {
 62649              m.emit(0x62)
 62650              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62651              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62652              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 62653              m.emit(0xec)
 62654              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62655          })
 62656      }
 62657      // VPADDSB m512, zmm, zmm{k}{z}
 62658      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 62659          self.require(ISA_AVX512BW)
 62660          p.domain = DomainAVX
 62661          p.add(0, func(m *_Encoding, v []interface{}) {
 62662              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62663              m.emit(0xec)
 62664              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 62665          })
 62666      }
 62667      // VPADDSB xmm, xmm, xmm{k}{z}
 62668      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62669          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62670          p.domain = DomainAVX
 62671          p.add(0, func(m *_Encoding, v []interface{}) {
 62672              m.emit(0x62)
 62673              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62674              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62675              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 62676              m.emit(0xec)
 62677              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62678          })
 62679      }
 62680      // VPADDSB m128, xmm, xmm{k}{z}
 62681      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62682          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62683          p.domain = DomainAVX
 62684          p.add(0, func(m *_Encoding, v []interface{}) {
 62685              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62686              m.emit(0xec)
 62687              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 62688          })
 62689      }
 62690      // VPADDSB ymm, ymm, ymm{k}{z}
 62691      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62692          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62693          p.domain = DomainAVX
 62694          p.add(0, func(m *_Encoding, v []interface{}) {
 62695              m.emit(0x62)
 62696              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62697              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62698              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 62699              m.emit(0xec)
 62700              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62701          })
 62702      }
 62703      // VPADDSB m256, ymm, ymm{k}{z}
 62704      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62705          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62706          p.domain = DomainAVX
 62707          p.add(0, func(m *_Encoding, v []interface{}) {
 62708              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62709              m.emit(0xec)
 62710              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 62711          })
 62712      }
 62713      if p.len == 0 {
 62714          panic("invalid operands for VPADDSB")
 62715      }
 62716      return p
 62717  }
 62718  
 62719  // VPADDSW performs "Add Packed Signed Word Integers with Signed Saturation".
 62720  //
 62721  // Mnemonic        : VPADDSW
 62722  // Supported forms : (10 forms)
 62723  //
 62724  //    * VPADDSW xmm, xmm, xmm           [AVX]
 62725  //    * VPADDSW m128, xmm, xmm          [AVX]
 62726  //    * VPADDSW ymm, ymm, ymm           [AVX2]
 62727  //    * VPADDSW m256, ymm, ymm          [AVX2]
 62728  //    * VPADDSW zmm, zmm, zmm{k}{z}     [AVX512BW]
 62729  //    * VPADDSW m512, zmm, zmm{k}{z}    [AVX512BW]
 62730  //    * VPADDSW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 62731  //    * VPADDSW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 62732  //    * VPADDSW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 62733  //    * VPADDSW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 62734  //
 62735  func (self *Program) VPADDSW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 62736      p := self.alloc("VPADDSW", 3, Operands { v0, v1, v2 })
 62737      // VPADDSW xmm, xmm, xmm
 62738      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 62739          self.require(ISA_AVX)
 62740          p.domain = DomainAVX
 62741          p.add(0, func(m *_Encoding, v []interface{}) {
 62742              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 62743              m.emit(0xed)
 62744              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62745          })
 62746      }
 62747      // VPADDSW m128, xmm, xmm
 62748      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 62749          self.require(ISA_AVX)
 62750          p.domain = DomainAVX
 62751          p.add(0, func(m *_Encoding, v []interface{}) {
 62752              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62753              m.emit(0xed)
 62754              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62755          })
 62756      }
 62757      // VPADDSW ymm, ymm, ymm
 62758      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 62759          self.require(ISA_AVX2)
 62760          p.domain = DomainAVX
 62761          p.add(0, func(m *_Encoding, v []interface{}) {
 62762              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 62763              m.emit(0xed)
 62764              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62765          })
 62766      }
 62767      // VPADDSW m256, ymm, ymm
 62768      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 62769          self.require(ISA_AVX2)
 62770          p.domain = DomainAVX
 62771          p.add(0, func(m *_Encoding, v []interface{}) {
 62772              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62773              m.emit(0xed)
 62774              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62775          })
 62776      }
 62777      // VPADDSW zmm, zmm, zmm{k}{z}
 62778      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 62779          self.require(ISA_AVX512BW)
 62780          p.domain = DomainAVX
 62781          p.add(0, func(m *_Encoding, v []interface{}) {
 62782              m.emit(0x62)
 62783              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62784              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62785              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 62786              m.emit(0xed)
 62787              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62788          })
 62789      }
 62790      // VPADDSW m512, zmm, zmm{k}{z}
 62791      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 62792          self.require(ISA_AVX512BW)
 62793          p.domain = DomainAVX
 62794          p.add(0, func(m *_Encoding, v []interface{}) {
 62795              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62796              m.emit(0xed)
 62797              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 62798          })
 62799      }
 62800      // VPADDSW xmm, xmm, xmm{k}{z}
 62801      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62802          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62803          p.domain = DomainAVX
 62804          p.add(0, func(m *_Encoding, v []interface{}) {
 62805              m.emit(0x62)
 62806              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62807              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62808              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 62809              m.emit(0xed)
 62810              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62811          })
 62812      }
 62813      // VPADDSW m128, xmm, xmm{k}{z}
 62814      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62815          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62816          p.domain = DomainAVX
 62817          p.add(0, func(m *_Encoding, v []interface{}) {
 62818              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62819              m.emit(0xed)
 62820              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 62821          })
 62822      }
 62823      // VPADDSW ymm, ymm, ymm{k}{z}
 62824      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62825          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62826          p.domain = DomainAVX
 62827          p.add(0, func(m *_Encoding, v []interface{}) {
 62828              m.emit(0x62)
 62829              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62830              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62831              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 62832              m.emit(0xed)
 62833              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62834          })
 62835      }
 62836      // VPADDSW m256, ymm, ymm{k}{z}
 62837      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62838          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62839          p.domain = DomainAVX
 62840          p.add(0, func(m *_Encoding, v []interface{}) {
 62841              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62842              m.emit(0xed)
 62843              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 62844          })
 62845      }
 62846      if p.len == 0 {
 62847          panic("invalid operands for VPADDSW")
 62848      }
 62849      return p
 62850  }
 62851  
 62852  // VPADDUSB performs "Add Packed Unsigned Byte Integers with Unsigned Saturation".
 62853  //
 62854  // Mnemonic        : VPADDUSB
 62855  // Supported forms : (10 forms)
 62856  //
 62857  //    * VPADDUSB xmm, xmm, xmm           [AVX]
 62858  //    * VPADDUSB m128, xmm, xmm          [AVX]
 62859  //    * VPADDUSB ymm, ymm, ymm           [AVX2]
 62860  //    * VPADDUSB m256, ymm, ymm          [AVX2]
 62861  //    * VPADDUSB zmm, zmm, zmm{k}{z}     [AVX512BW]
 62862  //    * VPADDUSB m512, zmm, zmm{k}{z}    [AVX512BW]
 62863  //    * VPADDUSB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 62864  //    * VPADDUSB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 62865  //    * VPADDUSB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 62866  //    * VPADDUSB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 62867  //
 62868  func (self *Program) VPADDUSB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 62869      p := self.alloc("VPADDUSB", 3, Operands { v0, v1, v2 })
 62870      // VPADDUSB xmm, xmm, xmm
 62871      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 62872          self.require(ISA_AVX)
 62873          p.domain = DomainAVX
 62874          p.add(0, func(m *_Encoding, v []interface{}) {
 62875              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 62876              m.emit(0xdc)
 62877              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62878          })
 62879      }
 62880      // VPADDUSB m128, xmm, xmm
 62881      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 62882          self.require(ISA_AVX)
 62883          p.domain = DomainAVX
 62884          p.add(0, func(m *_Encoding, v []interface{}) {
 62885              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62886              m.emit(0xdc)
 62887              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62888          })
 62889      }
 62890      // VPADDUSB ymm, ymm, ymm
 62891      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 62892          self.require(ISA_AVX2)
 62893          p.domain = DomainAVX
 62894          p.add(0, func(m *_Encoding, v []interface{}) {
 62895              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 62896              m.emit(0xdc)
 62897              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62898          })
 62899      }
 62900      // VPADDUSB m256, ymm, ymm
 62901      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 62902          self.require(ISA_AVX2)
 62903          p.domain = DomainAVX
 62904          p.add(0, func(m *_Encoding, v []interface{}) {
 62905              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62906              m.emit(0xdc)
 62907              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62908          })
 62909      }
 62910      // VPADDUSB zmm, zmm, zmm{k}{z}
 62911      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 62912          self.require(ISA_AVX512BW)
 62913          p.domain = DomainAVX
 62914          p.add(0, func(m *_Encoding, v []interface{}) {
 62915              m.emit(0x62)
 62916              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62917              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62918              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 62919              m.emit(0xdc)
 62920              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62921          })
 62922      }
 62923      // VPADDUSB m512, zmm, zmm{k}{z}
 62924      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 62925          self.require(ISA_AVX512BW)
 62926          p.domain = DomainAVX
 62927          p.add(0, func(m *_Encoding, v []interface{}) {
 62928              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62929              m.emit(0xdc)
 62930              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 62931          })
 62932      }
 62933      // VPADDUSB xmm, xmm, xmm{k}{z}
 62934      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62935          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62936          p.domain = DomainAVX
 62937          p.add(0, func(m *_Encoding, v []interface{}) {
 62938              m.emit(0x62)
 62939              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62940              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62941              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 62942              m.emit(0xdc)
 62943              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62944          })
 62945      }
 62946      // VPADDUSB m128, xmm, xmm{k}{z}
 62947      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62948          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62949          p.domain = DomainAVX
 62950          p.add(0, func(m *_Encoding, v []interface{}) {
 62951              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62952              m.emit(0xdc)
 62953              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 62954          })
 62955      }
 62956      // VPADDUSB ymm, ymm, ymm{k}{z}
 62957      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62958          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62959          p.domain = DomainAVX
 62960          p.add(0, func(m *_Encoding, v []interface{}) {
 62961              m.emit(0x62)
 62962              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62963              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62964              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 62965              m.emit(0xdc)
 62966              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62967          })
 62968      }
 62969      // VPADDUSB m256, ymm, ymm{k}{z}
 62970      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62971          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62972          p.domain = DomainAVX
 62973          p.add(0, func(m *_Encoding, v []interface{}) {
 62974              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62975              m.emit(0xdc)
 62976              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 62977          })
 62978      }
 62979      if p.len == 0 {
 62980          panic("invalid operands for VPADDUSB")
 62981      }
 62982      return p
 62983  }
 62984  
 62985  // VPADDUSW performs "Add Packed Unsigned Word Integers with Unsigned Saturation".
 62986  //
 62987  // Mnemonic        : VPADDUSW
 62988  // Supported forms : (10 forms)
 62989  //
 62990  //    * VPADDUSW xmm, xmm, xmm           [AVX]
 62991  //    * VPADDUSW m128, xmm, xmm          [AVX]
 62992  //    * VPADDUSW ymm, ymm, ymm           [AVX2]
 62993  //    * VPADDUSW m256, ymm, ymm          [AVX2]
 62994  //    * VPADDUSW zmm, zmm, zmm{k}{z}     [AVX512BW]
 62995  //    * VPADDUSW m512, zmm, zmm{k}{z}    [AVX512BW]
 62996  //    * VPADDUSW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 62997  //    * VPADDUSW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 62998  //    * VPADDUSW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 62999  //    * VPADDUSW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 63000  //
 63001  func (self *Program) VPADDUSW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 63002      p := self.alloc("VPADDUSW", 3, Operands { v0, v1, v2 })
 63003      // VPADDUSW xmm, xmm, xmm
 63004      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 63005          self.require(ISA_AVX)
 63006          p.domain = DomainAVX
 63007          p.add(0, func(m *_Encoding, v []interface{}) {
 63008              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 63009              m.emit(0xdd)
 63010              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63011          })
 63012      }
 63013      // VPADDUSW m128, xmm, xmm
 63014      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 63015          self.require(ISA_AVX)
 63016          p.domain = DomainAVX
 63017          p.add(0, func(m *_Encoding, v []interface{}) {
 63018              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 63019              m.emit(0xdd)
 63020              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 63021          })
 63022      }
 63023      // VPADDUSW ymm, ymm, ymm
 63024      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 63025          self.require(ISA_AVX2)
 63026          p.domain = DomainAVX
 63027          p.add(0, func(m *_Encoding, v []interface{}) {
 63028              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 63029              m.emit(0xdd)
 63030              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63031          })
 63032      }
 63033      // VPADDUSW m256, ymm, ymm
 63034      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 63035          self.require(ISA_AVX2)
 63036          p.domain = DomainAVX
 63037          p.add(0, func(m *_Encoding, v []interface{}) {
 63038              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 63039              m.emit(0xdd)
 63040              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 63041          })
 63042      }
 63043      // VPADDUSW zmm, zmm, zmm{k}{z}
 63044      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 63045          self.require(ISA_AVX512BW)
 63046          p.domain = DomainAVX
 63047          p.add(0, func(m *_Encoding, v []interface{}) {
 63048              m.emit(0x62)
 63049              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63050              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63051              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 63052              m.emit(0xdd)
 63053              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63054          })
 63055      }
 63056      // VPADDUSW m512, zmm, zmm{k}{z}
 63057      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 63058          self.require(ISA_AVX512BW)
 63059          p.domain = DomainAVX
 63060          p.add(0, func(m *_Encoding, v []interface{}) {
 63061              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 63062              m.emit(0xdd)
 63063              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 63064          })
 63065      }
 63066      // VPADDUSW xmm, xmm, xmm{k}{z}
 63067      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63068          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63069          p.domain = DomainAVX
 63070          p.add(0, func(m *_Encoding, v []interface{}) {
 63071              m.emit(0x62)
 63072              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63073              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63074              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 63075              m.emit(0xdd)
 63076              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63077          })
 63078      }
 63079      // VPADDUSW m128, xmm, xmm{k}{z}
 63080      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63081          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63082          p.domain = DomainAVX
 63083          p.add(0, func(m *_Encoding, v []interface{}) {
 63084              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 63085              m.emit(0xdd)
 63086              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 63087          })
 63088      }
 63089      // VPADDUSW ymm, ymm, ymm{k}{z}
 63090      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63091          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63092          p.domain = DomainAVX
 63093          p.add(0, func(m *_Encoding, v []interface{}) {
 63094              m.emit(0x62)
 63095              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63096              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63097              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 63098              m.emit(0xdd)
 63099              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63100          })
 63101      }
 63102      // VPADDUSW m256, ymm, ymm{k}{z}
 63103      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63104          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63105          p.domain = DomainAVX
 63106          p.add(0, func(m *_Encoding, v []interface{}) {
 63107              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 63108              m.emit(0xdd)
 63109              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 63110          })
 63111      }
 63112      if p.len == 0 {
 63113          panic("invalid operands for VPADDUSW")
 63114      }
 63115      return p
 63116  }
 63117  
 63118  // VPADDW performs "Add Packed Word Integers".
 63119  //
 63120  // Mnemonic        : VPADDW
 63121  // Supported forms : (10 forms)
 63122  //
 63123  //    * VPADDW xmm, xmm, xmm           [AVX]
 63124  //    * VPADDW m128, xmm, xmm          [AVX]
 63125  //    * VPADDW ymm, ymm, ymm           [AVX2]
 63126  //    * VPADDW m256, ymm, ymm          [AVX2]
 63127  //    * VPADDW zmm, zmm, zmm{k}{z}     [AVX512BW]
 63128  //    * VPADDW m512, zmm, zmm{k}{z}    [AVX512BW]
 63129  //    * VPADDW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 63130  //    * VPADDW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 63131  //    * VPADDW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 63132  //    * VPADDW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 63133  //
 63134  func (self *Program) VPADDW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 63135      p := self.alloc("VPADDW", 3, Operands { v0, v1, v2 })
 63136      // VPADDW xmm, xmm, xmm
 63137      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 63138          self.require(ISA_AVX)
 63139          p.domain = DomainAVX
 63140          p.add(0, func(m *_Encoding, v []interface{}) {
 63141              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 63142              m.emit(0xfd)
 63143              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63144          })
 63145      }
 63146      // VPADDW m128, xmm, xmm
 63147      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 63148          self.require(ISA_AVX)
 63149          p.domain = DomainAVX
 63150          p.add(0, func(m *_Encoding, v []interface{}) {
 63151              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 63152              m.emit(0xfd)
 63153              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 63154          })
 63155      }
 63156      // VPADDW ymm, ymm, ymm
 63157      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 63158          self.require(ISA_AVX2)
 63159          p.domain = DomainAVX
 63160          p.add(0, func(m *_Encoding, v []interface{}) {
 63161              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 63162              m.emit(0xfd)
 63163              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63164          })
 63165      }
 63166      // VPADDW m256, ymm, ymm
 63167      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 63168          self.require(ISA_AVX2)
 63169          p.domain = DomainAVX
 63170          p.add(0, func(m *_Encoding, v []interface{}) {
 63171              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 63172              m.emit(0xfd)
 63173              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 63174          })
 63175      }
 63176      // VPADDW zmm, zmm, zmm{k}{z}
 63177      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 63178          self.require(ISA_AVX512BW)
 63179          p.domain = DomainAVX
 63180          p.add(0, func(m *_Encoding, v []interface{}) {
 63181              m.emit(0x62)
 63182              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63183              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63184              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 63185              m.emit(0xfd)
 63186              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63187          })
 63188      }
 63189      // VPADDW m512, zmm, zmm{k}{z}
 63190      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 63191          self.require(ISA_AVX512BW)
 63192          p.domain = DomainAVX
 63193          p.add(0, func(m *_Encoding, v []interface{}) {
 63194              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 63195              m.emit(0xfd)
 63196              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 63197          })
 63198      }
 63199      // VPADDW xmm, xmm, xmm{k}{z}
 63200      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63201          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63202          p.domain = DomainAVX
 63203          p.add(0, func(m *_Encoding, v []interface{}) {
 63204              m.emit(0x62)
 63205              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63206              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63207              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 63208              m.emit(0xfd)
 63209              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63210          })
 63211      }
 63212      // VPADDW m128, xmm, xmm{k}{z}
 63213      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63214          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63215          p.domain = DomainAVX
 63216          p.add(0, func(m *_Encoding, v []interface{}) {
 63217              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 63218              m.emit(0xfd)
 63219              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 63220          })
 63221      }
 63222      // VPADDW ymm, ymm, ymm{k}{z}
 63223      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63224          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63225          p.domain = DomainAVX
 63226          p.add(0, func(m *_Encoding, v []interface{}) {
 63227              m.emit(0x62)
 63228              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63229              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63230              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 63231              m.emit(0xfd)
 63232              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63233          })
 63234      }
 63235      // VPADDW m256, ymm, ymm{k}{z}
 63236      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63237          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63238          p.domain = DomainAVX
 63239          p.add(0, func(m *_Encoding, v []interface{}) {
 63240              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 63241              m.emit(0xfd)
 63242              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 63243          })
 63244      }
 63245      if p.len == 0 {
 63246          panic("invalid operands for VPADDW")
 63247      }
 63248      return p
 63249  }
 63250  
 63251  // VPALIGNR performs "Packed Align Right".
 63252  //
 63253  // Mnemonic        : VPALIGNR
 63254  // Supported forms : (10 forms)
 63255  //
 63256  //    * VPALIGNR imm8, xmm, xmm, xmm           [AVX]
 63257  //    * VPALIGNR imm8, m128, xmm, xmm          [AVX]
 63258  //    * VPALIGNR imm8, ymm, ymm, ymm           [AVX2]
 63259  //    * VPALIGNR imm8, m256, ymm, ymm          [AVX2]
 63260  //    * VPALIGNR imm8, zmm, zmm, zmm{k}{z}     [AVX512BW]
 63261  //    * VPALIGNR imm8, m512, zmm, zmm{k}{z}    [AVX512BW]
 63262  //    * VPALIGNR imm8, xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 63263  //    * VPALIGNR imm8, m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 63264  //    * VPALIGNR imm8, ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 63265  //    * VPALIGNR imm8, m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 63266  //
 63267  func (self *Program) VPALIGNR(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 63268      p := self.alloc("VPALIGNR", 4, Operands { v0, v1, v2, v3 })
 63269      // VPALIGNR imm8, xmm, xmm, xmm
 63270      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 63271          self.require(ISA_AVX)
 63272          p.domain = DomainAVX
 63273          p.add(0, func(m *_Encoding, v []interface{}) {
 63274              m.emit(0xc4)
 63275              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 63276              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 63277              m.emit(0x0f)
 63278              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 63279              m.imm1(toImmAny(v[0]))
 63280          })
 63281      }
 63282      // VPALIGNR imm8, m128, xmm, xmm
 63283      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 63284          self.require(ISA_AVX)
 63285          p.domain = DomainAVX
 63286          p.add(0, func(m *_Encoding, v []interface{}) {
 63287              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 63288              m.emit(0x0f)
 63289              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 63290              m.imm1(toImmAny(v[0]))
 63291          })
 63292      }
 63293      // VPALIGNR imm8, ymm, ymm, ymm
 63294      if isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 63295          self.require(ISA_AVX2)
 63296          p.domain = DomainAVX
 63297          p.add(0, func(m *_Encoding, v []interface{}) {
 63298              m.emit(0xc4)
 63299              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 63300              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 63301              m.emit(0x0f)
 63302              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 63303              m.imm1(toImmAny(v[0]))
 63304          })
 63305      }
 63306      // VPALIGNR imm8, m256, ymm, ymm
 63307      if isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 63308          self.require(ISA_AVX2)
 63309          p.domain = DomainAVX
 63310          p.add(0, func(m *_Encoding, v []interface{}) {
 63311              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 63312              m.emit(0x0f)
 63313              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 63314              m.imm1(toImmAny(v[0]))
 63315          })
 63316      }
 63317      // VPALIGNR imm8, zmm, zmm, zmm{k}{z}
 63318      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 63319          self.require(ISA_AVX512BW)
 63320          p.domain = DomainAVX
 63321          p.add(0, func(m *_Encoding, v []interface{}) {
 63322              m.emit(0x62)
 63323              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 63324              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 63325              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 63326              m.emit(0x0f)
 63327              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 63328              m.imm1(toImmAny(v[0]))
 63329          })
 63330      }
 63331      // VPALIGNR imm8, m512, zmm, zmm{k}{z}
 63332      if isImm8(v0) && isM512(v1) && isZMM(v2) && isZMMkz(v3) {
 63333          self.require(ISA_AVX512BW)
 63334          p.domain = DomainAVX
 63335          p.add(0, func(m *_Encoding, v []interface{}) {
 63336              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 63337              m.emit(0x0f)
 63338              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 63339              m.imm1(toImmAny(v[0]))
 63340          })
 63341      }
 63342      // VPALIGNR imm8, xmm, xmm, xmm{k}{z}
 63343      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 63344          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63345          p.domain = DomainAVX
 63346          p.add(0, func(m *_Encoding, v []interface{}) {
 63347              m.emit(0x62)
 63348              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 63349              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 63350              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 63351              m.emit(0x0f)
 63352              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 63353              m.imm1(toImmAny(v[0]))
 63354          })
 63355      }
 63356      // VPALIGNR imm8, m128, xmm, xmm{k}{z}
 63357      if isImm8(v0) && isM128(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 63358          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63359          p.domain = DomainAVX
 63360          p.add(0, func(m *_Encoding, v []interface{}) {
 63361              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 63362              m.emit(0x0f)
 63363              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 63364              m.imm1(toImmAny(v[0]))
 63365          })
 63366      }
 63367      // VPALIGNR imm8, ymm, ymm, ymm{k}{z}
 63368      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 63369          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63370          p.domain = DomainAVX
 63371          p.add(0, func(m *_Encoding, v []interface{}) {
 63372              m.emit(0x62)
 63373              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 63374              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 63375              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 63376              m.emit(0x0f)
 63377              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 63378              m.imm1(toImmAny(v[0]))
 63379          })
 63380      }
 63381      // VPALIGNR imm8, m256, ymm, ymm{k}{z}
 63382      if isImm8(v0) && isM256(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 63383          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63384          p.domain = DomainAVX
 63385          p.add(0, func(m *_Encoding, v []interface{}) {
 63386              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 63387              m.emit(0x0f)
 63388              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 63389              m.imm1(toImmAny(v[0]))
 63390          })
 63391      }
 63392      if p.len == 0 {
 63393          panic("invalid operands for VPALIGNR")
 63394      }
 63395      return p
 63396  }
 63397  
 63398  // VPAND performs "Packed Bitwise Logical AND".
 63399  //
 63400  // Mnemonic        : VPAND
 63401  // Supported forms : (4 forms)
 63402  //
 63403  //    * VPAND xmm, xmm, xmm     [AVX]
 63404  //    * VPAND m128, xmm, xmm    [AVX]
 63405  //    * VPAND ymm, ymm, ymm     [AVX2]
 63406  //    * VPAND m256, ymm, ymm    [AVX2]
 63407  //
 63408  func (self *Program) VPAND(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 63409      p := self.alloc("VPAND", 3, Operands { v0, v1, v2 })
 63410      // VPAND xmm, xmm, xmm
 63411      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 63412          self.require(ISA_AVX)
 63413          p.domain = DomainAVX
 63414          p.add(0, func(m *_Encoding, v []interface{}) {
 63415              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 63416              m.emit(0xdb)
 63417              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63418          })
 63419      }
 63420      // VPAND m128, xmm, xmm
 63421      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 63422          self.require(ISA_AVX)
 63423          p.domain = DomainAVX
 63424          p.add(0, func(m *_Encoding, v []interface{}) {
 63425              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 63426              m.emit(0xdb)
 63427              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 63428          })
 63429      }
 63430      // VPAND ymm, ymm, ymm
 63431      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 63432          self.require(ISA_AVX2)
 63433          p.domain = DomainAVX
 63434          p.add(0, func(m *_Encoding, v []interface{}) {
 63435              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 63436              m.emit(0xdb)
 63437              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63438          })
 63439      }
 63440      // VPAND m256, ymm, ymm
 63441      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 63442          self.require(ISA_AVX2)
 63443          p.domain = DomainAVX
 63444          p.add(0, func(m *_Encoding, v []interface{}) {
 63445              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 63446              m.emit(0xdb)
 63447              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 63448          })
 63449      }
 63450      if p.len == 0 {
 63451          panic("invalid operands for VPAND")
 63452      }
 63453      return p
 63454  }
 63455  
 63456  // VPANDD performs "Bitwise Logical AND of Packed Doubleword Integers".
 63457  //
 63458  // Mnemonic        : VPANDD
 63459  // Supported forms : (6 forms)
 63460  //
 63461  //    * VPANDD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 63462  //    * VPANDD zmm, zmm, zmm{k}{z}             [AVX512F]
 63463  //    * VPANDD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 63464  //    * VPANDD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 63465  //    * VPANDD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 63466  //    * VPANDD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 63467  //
 63468  func (self *Program) VPANDD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 63469      p := self.alloc("VPANDD", 3, Operands { v0, v1, v2 })
 63470      // VPANDD m512/m32bcst, zmm, zmm{k}{z}
 63471      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 63472          self.require(ISA_AVX512F)
 63473          p.domain = DomainAVX
 63474          p.add(0, func(m *_Encoding, v []interface{}) {
 63475              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63476              m.emit(0xdb)
 63477              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 63478          })
 63479      }
 63480      // VPANDD zmm, zmm, zmm{k}{z}
 63481      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 63482          self.require(ISA_AVX512F)
 63483          p.domain = DomainAVX
 63484          p.add(0, func(m *_Encoding, v []interface{}) {
 63485              m.emit(0x62)
 63486              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63487              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63488              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 63489              m.emit(0xdb)
 63490              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63491          })
 63492      }
 63493      // VPANDD m128/m32bcst, xmm, xmm{k}{z}
 63494      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63495          self.require(ISA_AVX512VL | ISA_AVX512F)
 63496          p.domain = DomainAVX
 63497          p.add(0, func(m *_Encoding, v []interface{}) {
 63498              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63499              m.emit(0xdb)
 63500              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 63501          })
 63502      }
 63503      // VPANDD xmm, xmm, xmm{k}{z}
 63504      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63505          self.require(ISA_AVX512VL | ISA_AVX512F)
 63506          p.domain = DomainAVX
 63507          p.add(0, func(m *_Encoding, v []interface{}) {
 63508              m.emit(0x62)
 63509              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63510              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63511              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 63512              m.emit(0xdb)
 63513              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63514          })
 63515      }
 63516      // VPANDD m256/m32bcst, ymm, ymm{k}{z}
 63517      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63518          self.require(ISA_AVX512VL | ISA_AVX512F)
 63519          p.domain = DomainAVX
 63520          p.add(0, func(m *_Encoding, v []interface{}) {
 63521              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63522              m.emit(0xdb)
 63523              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 63524          })
 63525      }
 63526      // VPANDD ymm, ymm, ymm{k}{z}
 63527      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63528          self.require(ISA_AVX512VL | ISA_AVX512F)
 63529          p.domain = DomainAVX
 63530          p.add(0, func(m *_Encoding, v []interface{}) {
 63531              m.emit(0x62)
 63532              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63533              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63534              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 63535              m.emit(0xdb)
 63536              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63537          })
 63538      }
 63539      if p.len == 0 {
 63540          panic("invalid operands for VPANDD")
 63541      }
 63542      return p
 63543  }
 63544  
 63545  // VPANDN performs "Packed Bitwise Logical AND NOT".
 63546  //
 63547  // Mnemonic        : VPANDN
 63548  // Supported forms : (4 forms)
 63549  //
 63550  //    * VPANDN xmm, xmm, xmm     [AVX]
 63551  //    * VPANDN m128, xmm, xmm    [AVX]
 63552  //    * VPANDN ymm, ymm, ymm     [AVX2]
 63553  //    * VPANDN m256, ymm, ymm    [AVX2]
 63554  //
 63555  func (self *Program) VPANDN(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 63556      p := self.alloc("VPANDN", 3, Operands { v0, v1, v2 })
 63557      // VPANDN xmm, xmm, xmm
 63558      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 63559          self.require(ISA_AVX)
 63560          p.domain = DomainAVX
 63561          p.add(0, func(m *_Encoding, v []interface{}) {
 63562              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 63563              m.emit(0xdf)
 63564              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63565          })
 63566      }
 63567      // VPANDN m128, xmm, xmm
 63568      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 63569          self.require(ISA_AVX)
 63570          p.domain = DomainAVX
 63571          p.add(0, func(m *_Encoding, v []interface{}) {
 63572              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 63573              m.emit(0xdf)
 63574              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 63575          })
 63576      }
 63577      // VPANDN ymm, ymm, ymm
 63578      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 63579          self.require(ISA_AVX2)
 63580          p.domain = DomainAVX
 63581          p.add(0, func(m *_Encoding, v []interface{}) {
 63582              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 63583              m.emit(0xdf)
 63584              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63585          })
 63586      }
 63587      // VPANDN m256, ymm, ymm
 63588      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 63589          self.require(ISA_AVX2)
 63590          p.domain = DomainAVX
 63591          p.add(0, func(m *_Encoding, v []interface{}) {
 63592              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 63593              m.emit(0xdf)
 63594              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 63595          })
 63596      }
 63597      if p.len == 0 {
 63598          panic("invalid operands for VPANDN")
 63599      }
 63600      return p
 63601  }
 63602  
 63603  // VPANDND performs "Bitwise Logical AND NOT of Packed Doubleword Integers".
 63604  //
 63605  // Mnemonic        : VPANDND
 63606  // Supported forms : (6 forms)
 63607  //
 63608  //    * VPANDND m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 63609  //    * VPANDND zmm, zmm, zmm{k}{z}             [AVX512F]
 63610  //    * VPANDND m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 63611  //    * VPANDND xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 63612  //    * VPANDND m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 63613  //    * VPANDND ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 63614  //
 63615  func (self *Program) VPANDND(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 63616      p := self.alloc("VPANDND", 3, Operands { v0, v1, v2 })
 63617      // VPANDND m512/m32bcst, zmm, zmm{k}{z}
 63618      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 63619          self.require(ISA_AVX512F)
 63620          p.domain = DomainAVX
 63621          p.add(0, func(m *_Encoding, v []interface{}) {
 63622              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63623              m.emit(0xdf)
 63624              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 63625          })
 63626      }
 63627      // VPANDND zmm, zmm, zmm{k}{z}
 63628      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 63629          self.require(ISA_AVX512F)
 63630          p.domain = DomainAVX
 63631          p.add(0, func(m *_Encoding, v []interface{}) {
 63632              m.emit(0x62)
 63633              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63634              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63635              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 63636              m.emit(0xdf)
 63637              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63638          })
 63639      }
 63640      // VPANDND m128/m32bcst, xmm, xmm{k}{z}
 63641      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63642          self.require(ISA_AVX512VL | ISA_AVX512F)
 63643          p.domain = DomainAVX
 63644          p.add(0, func(m *_Encoding, v []interface{}) {
 63645              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63646              m.emit(0xdf)
 63647              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 63648          })
 63649      }
 63650      // VPANDND xmm, xmm, xmm{k}{z}
 63651      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63652          self.require(ISA_AVX512VL | ISA_AVX512F)
 63653          p.domain = DomainAVX
 63654          p.add(0, func(m *_Encoding, v []interface{}) {
 63655              m.emit(0x62)
 63656              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63657              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63658              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 63659              m.emit(0xdf)
 63660              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63661          })
 63662      }
 63663      // VPANDND m256/m32bcst, ymm, ymm{k}{z}
 63664      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63665          self.require(ISA_AVX512VL | ISA_AVX512F)
 63666          p.domain = DomainAVX
 63667          p.add(0, func(m *_Encoding, v []interface{}) {
 63668              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63669              m.emit(0xdf)
 63670              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 63671          })
 63672      }
 63673      // VPANDND ymm, ymm, ymm{k}{z}
 63674      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63675          self.require(ISA_AVX512VL | ISA_AVX512F)
 63676          p.domain = DomainAVX
 63677          p.add(0, func(m *_Encoding, v []interface{}) {
 63678              m.emit(0x62)
 63679              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63680              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63681              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 63682              m.emit(0xdf)
 63683              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63684          })
 63685      }
 63686      if p.len == 0 {
 63687          panic("invalid operands for VPANDND")
 63688      }
 63689      return p
 63690  }
 63691  
 63692  // VPANDNQ performs "Bitwise Logical AND NOT of Packed Quadword Integers".
 63693  //
 63694  // Mnemonic        : VPANDNQ
 63695  // Supported forms : (6 forms)
 63696  //
 63697  //    * VPANDNQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 63698  //    * VPANDNQ zmm, zmm, zmm{k}{z}             [AVX512F]
 63699  //    * VPANDNQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 63700  //    * VPANDNQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 63701  //    * VPANDNQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 63702  //    * VPANDNQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 63703  //
 63704  func (self *Program) VPANDNQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 63705      p := self.alloc("VPANDNQ", 3, Operands { v0, v1, v2 })
 63706      // VPANDNQ m512/m64bcst, zmm, zmm{k}{z}
 63707      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 63708          self.require(ISA_AVX512F)
 63709          p.domain = DomainAVX
 63710          p.add(0, func(m *_Encoding, v []interface{}) {
 63711              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63712              m.emit(0xdf)
 63713              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 63714          })
 63715      }
 63716      // VPANDNQ zmm, zmm, zmm{k}{z}
 63717      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 63718          self.require(ISA_AVX512F)
 63719          p.domain = DomainAVX
 63720          p.add(0, func(m *_Encoding, v []interface{}) {
 63721              m.emit(0x62)
 63722              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63723              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 63724              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 63725              m.emit(0xdf)
 63726              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63727          })
 63728      }
 63729      // VPANDNQ m128/m64bcst, xmm, xmm{k}{z}
 63730      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63731          self.require(ISA_AVX512VL | ISA_AVX512F)
 63732          p.domain = DomainAVX
 63733          p.add(0, func(m *_Encoding, v []interface{}) {
 63734              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63735              m.emit(0xdf)
 63736              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 63737          })
 63738      }
 63739      // VPANDNQ xmm, xmm, xmm{k}{z}
 63740      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63741          self.require(ISA_AVX512VL | ISA_AVX512F)
 63742          p.domain = DomainAVX
 63743          p.add(0, func(m *_Encoding, v []interface{}) {
 63744              m.emit(0x62)
 63745              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63746              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 63747              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 63748              m.emit(0xdf)
 63749              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63750          })
 63751      }
 63752      // VPANDNQ m256/m64bcst, ymm, ymm{k}{z}
 63753      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63754          self.require(ISA_AVX512VL | ISA_AVX512F)
 63755          p.domain = DomainAVX
 63756          p.add(0, func(m *_Encoding, v []interface{}) {
 63757              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63758              m.emit(0xdf)
 63759              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 63760          })
 63761      }
 63762      // VPANDNQ ymm, ymm, ymm{k}{z}
 63763      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63764          self.require(ISA_AVX512VL | ISA_AVX512F)
 63765          p.domain = DomainAVX
 63766          p.add(0, func(m *_Encoding, v []interface{}) {
 63767              m.emit(0x62)
 63768              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63769              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 63770              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 63771              m.emit(0xdf)
 63772              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63773          })
 63774      }
 63775      if p.len == 0 {
 63776          panic("invalid operands for VPANDNQ")
 63777      }
 63778      return p
 63779  }
 63780  
 63781  // VPANDQ performs "Bitwise Logical AND of Packed Quadword Integers".
 63782  //
 63783  // Mnemonic        : VPANDQ
 63784  // Supported forms : (6 forms)
 63785  //
 63786  //    * VPANDQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 63787  //    * VPANDQ zmm, zmm, zmm{k}{z}             [AVX512F]
 63788  //    * VPANDQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 63789  //    * VPANDQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 63790  //    * VPANDQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 63791  //    * VPANDQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 63792  //
 63793  func (self *Program) VPANDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 63794      p := self.alloc("VPANDQ", 3, Operands { v0, v1, v2 })
 63795      // VPANDQ m512/m64bcst, zmm, zmm{k}{z}
 63796      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 63797          self.require(ISA_AVX512F)
 63798          p.domain = DomainAVX
 63799          p.add(0, func(m *_Encoding, v []interface{}) {
 63800              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63801              m.emit(0xdb)
 63802              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 63803          })
 63804      }
 63805      // VPANDQ zmm, zmm, zmm{k}{z}
 63806      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 63807          self.require(ISA_AVX512F)
 63808          p.domain = DomainAVX
 63809          p.add(0, func(m *_Encoding, v []interface{}) {
 63810              m.emit(0x62)
 63811              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63812              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 63813              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 63814              m.emit(0xdb)
 63815              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63816          })
 63817      }
 63818      // VPANDQ m128/m64bcst, xmm, xmm{k}{z}
 63819      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63820          self.require(ISA_AVX512VL | ISA_AVX512F)
 63821          p.domain = DomainAVX
 63822          p.add(0, func(m *_Encoding, v []interface{}) {
 63823              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63824              m.emit(0xdb)
 63825              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 63826          })
 63827      }
 63828      // VPANDQ xmm, xmm, xmm{k}{z}
 63829      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63830          self.require(ISA_AVX512VL | ISA_AVX512F)
 63831          p.domain = DomainAVX
 63832          p.add(0, func(m *_Encoding, v []interface{}) {
 63833              m.emit(0x62)
 63834              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63835              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 63836              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 63837              m.emit(0xdb)
 63838              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63839          })
 63840      }
 63841      // VPANDQ m256/m64bcst, ymm, ymm{k}{z}
 63842      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63843          self.require(ISA_AVX512VL | ISA_AVX512F)
 63844          p.domain = DomainAVX
 63845          p.add(0, func(m *_Encoding, v []interface{}) {
 63846              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63847              m.emit(0xdb)
 63848              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 63849          })
 63850      }
 63851      // VPANDQ ymm, ymm, ymm{k}{z}
 63852      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63853          self.require(ISA_AVX512VL | ISA_AVX512F)
 63854          p.domain = DomainAVX
 63855          p.add(0, func(m *_Encoding, v []interface{}) {
 63856              m.emit(0x62)
 63857              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63858              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 63859              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 63860              m.emit(0xdb)
 63861              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63862          })
 63863      }
 63864      if p.len == 0 {
 63865          panic("invalid operands for VPANDQ")
 63866      }
 63867      return p
 63868  }
 63869  
 63870  // VPAVGB performs "Average Packed Byte Integers".
 63871  //
 63872  // Mnemonic        : VPAVGB
 63873  // Supported forms : (10 forms)
 63874  //
 63875  //    * VPAVGB xmm, xmm, xmm           [AVX]
 63876  //    * VPAVGB m128, xmm, xmm          [AVX]
 63877  //    * VPAVGB ymm, ymm, ymm           [AVX2]
 63878  //    * VPAVGB m256, ymm, ymm          [AVX2]
 63879  //    * VPAVGB zmm, zmm, zmm{k}{z}     [AVX512BW]
 63880  //    * VPAVGB m512, zmm, zmm{k}{z}    [AVX512BW]
 63881  //    * VPAVGB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 63882  //    * VPAVGB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 63883  //    * VPAVGB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 63884  //    * VPAVGB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 63885  //
 63886  func (self *Program) VPAVGB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 63887      p := self.alloc("VPAVGB", 3, Operands { v0, v1, v2 })
 63888      // VPAVGB xmm, xmm, xmm
 63889      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 63890          self.require(ISA_AVX)
 63891          p.domain = DomainAVX
 63892          p.add(0, func(m *_Encoding, v []interface{}) {
 63893              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 63894              m.emit(0xe0)
 63895              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63896          })
 63897      }
 63898      // VPAVGB m128, xmm, xmm
 63899      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 63900          self.require(ISA_AVX)
 63901          p.domain = DomainAVX
 63902          p.add(0, func(m *_Encoding, v []interface{}) {
 63903              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 63904              m.emit(0xe0)
 63905              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 63906          })
 63907      }
 63908      // VPAVGB ymm, ymm, ymm
 63909      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 63910          self.require(ISA_AVX2)
 63911          p.domain = DomainAVX
 63912          p.add(0, func(m *_Encoding, v []interface{}) {
 63913              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 63914              m.emit(0xe0)
 63915              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63916          })
 63917      }
 63918      // VPAVGB m256, ymm, ymm
 63919      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 63920          self.require(ISA_AVX2)
 63921          p.domain = DomainAVX
 63922          p.add(0, func(m *_Encoding, v []interface{}) {
 63923              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 63924              m.emit(0xe0)
 63925              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 63926          })
 63927      }
 63928      // VPAVGB zmm, zmm, zmm{k}{z}
 63929      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 63930          self.require(ISA_AVX512BW)
 63931          p.domain = DomainAVX
 63932          p.add(0, func(m *_Encoding, v []interface{}) {
 63933              m.emit(0x62)
 63934              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63935              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63936              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 63937              m.emit(0xe0)
 63938              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63939          })
 63940      }
 63941      // VPAVGB m512, zmm, zmm{k}{z}
 63942      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 63943          self.require(ISA_AVX512BW)
 63944          p.domain = DomainAVX
 63945          p.add(0, func(m *_Encoding, v []interface{}) {
 63946              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 63947              m.emit(0xe0)
 63948              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 63949          })
 63950      }
 63951      // VPAVGB xmm, xmm, xmm{k}{z}
 63952      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63953          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63954          p.domain = DomainAVX
 63955          p.add(0, func(m *_Encoding, v []interface{}) {
 63956              m.emit(0x62)
 63957              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63958              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63959              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 63960              m.emit(0xe0)
 63961              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63962          })
 63963      }
 63964      // VPAVGB m128, xmm, xmm{k}{z}
 63965      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63966          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63967          p.domain = DomainAVX
 63968          p.add(0, func(m *_Encoding, v []interface{}) {
 63969              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 63970              m.emit(0xe0)
 63971              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 63972          })
 63973      }
 63974      // VPAVGB ymm, ymm, ymm{k}{z}
 63975      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63976          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63977          p.domain = DomainAVX
 63978          p.add(0, func(m *_Encoding, v []interface{}) {
 63979              m.emit(0x62)
 63980              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63981              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63982              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 63983              m.emit(0xe0)
 63984              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63985          })
 63986      }
 63987      // VPAVGB m256, ymm, ymm{k}{z}
 63988      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63989          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63990          p.domain = DomainAVX
 63991          p.add(0, func(m *_Encoding, v []interface{}) {
 63992              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 63993              m.emit(0xe0)
 63994              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 63995          })
 63996      }
 63997      if p.len == 0 {
 63998          panic("invalid operands for VPAVGB")
 63999      }
 64000      return p
 64001  }
 64002  
 64003  // VPAVGW performs "Average Packed Word Integers".
 64004  //
 64005  // Mnemonic        : VPAVGW
 64006  // Supported forms : (10 forms)
 64007  //
 64008  //    * VPAVGW xmm, xmm, xmm           [AVX]
 64009  //    * VPAVGW m128, xmm, xmm          [AVX]
 64010  //    * VPAVGW ymm, ymm, ymm           [AVX2]
 64011  //    * VPAVGW m256, ymm, ymm          [AVX2]
 64012  //    * VPAVGW zmm, zmm, zmm{k}{z}     [AVX512BW]
 64013  //    * VPAVGW m512, zmm, zmm{k}{z}    [AVX512BW]
 64014  //    * VPAVGW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 64015  //    * VPAVGW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 64016  //    * VPAVGW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 64017  //    * VPAVGW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 64018  //
 64019  func (self *Program) VPAVGW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 64020      p := self.alloc("VPAVGW", 3, Operands { v0, v1, v2 })
 64021      // VPAVGW xmm, xmm, xmm
 64022      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 64023          self.require(ISA_AVX)
 64024          p.domain = DomainAVX
 64025          p.add(0, func(m *_Encoding, v []interface{}) {
 64026              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 64027              m.emit(0xe3)
 64028              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64029          })
 64030      }
 64031      // VPAVGW m128, xmm, xmm
 64032      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 64033          self.require(ISA_AVX)
 64034          p.domain = DomainAVX
 64035          p.add(0, func(m *_Encoding, v []interface{}) {
 64036              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 64037              m.emit(0xe3)
 64038              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 64039          })
 64040      }
 64041      // VPAVGW ymm, ymm, ymm
 64042      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 64043          self.require(ISA_AVX2)
 64044          p.domain = DomainAVX
 64045          p.add(0, func(m *_Encoding, v []interface{}) {
 64046              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 64047              m.emit(0xe3)
 64048              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64049          })
 64050      }
 64051      // VPAVGW m256, ymm, ymm
 64052      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 64053          self.require(ISA_AVX2)
 64054          p.domain = DomainAVX
 64055          p.add(0, func(m *_Encoding, v []interface{}) {
 64056              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 64057              m.emit(0xe3)
 64058              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 64059          })
 64060      }
 64061      // VPAVGW zmm, zmm, zmm{k}{z}
 64062      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 64063          self.require(ISA_AVX512BW)
 64064          p.domain = DomainAVX
 64065          p.add(0, func(m *_Encoding, v []interface{}) {
 64066              m.emit(0x62)
 64067              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64068              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 64069              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 64070              m.emit(0xe3)
 64071              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64072          })
 64073      }
 64074      // VPAVGW m512, zmm, zmm{k}{z}
 64075      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 64076          self.require(ISA_AVX512BW)
 64077          p.domain = DomainAVX
 64078          p.add(0, func(m *_Encoding, v []interface{}) {
 64079              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 64080              m.emit(0xe3)
 64081              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 64082          })
 64083      }
 64084      // VPAVGW xmm, xmm, xmm{k}{z}
 64085      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 64086          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64087          p.domain = DomainAVX
 64088          p.add(0, func(m *_Encoding, v []interface{}) {
 64089              m.emit(0x62)
 64090              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64091              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 64092              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 64093              m.emit(0xe3)
 64094              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64095          })
 64096      }
 64097      // VPAVGW m128, xmm, xmm{k}{z}
 64098      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 64099          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64100          p.domain = DomainAVX
 64101          p.add(0, func(m *_Encoding, v []interface{}) {
 64102              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 64103              m.emit(0xe3)
 64104              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 64105          })
 64106      }
 64107      // VPAVGW ymm, ymm, ymm{k}{z}
 64108      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 64109          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64110          p.domain = DomainAVX
 64111          p.add(0, func(m *_Encoding, v []interface{}) {
 64112              m.emit(0x62)
 64113              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64114              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 64115              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 64116              m.emit(0xe3)
 64117              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64118          })
 64119      }
 64120      // VPAVGW m256, ymm, ymm{k}{z}
 64121      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 64122          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64123          p.domain = DomainAVX
 64124          p.add(0, func(m *_Encoding, v []interface{}) {
 64125              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 64126              m.emit(0xe3)
 64127              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 64128          })
 64129      }
 64130      if p.len == 0 {
 64131          panic("invalid operands for VPAVGW")
 64132      }
 64133      return p
 64134  }
 64135  
 64136  // VPBLENDD performs "Blend Packed Doublewords".
 64137  //
 64138  // Mnemonic        : VPBLENDD
 64139  // Supported forms : (4 forms)
 64140  //
 64141  //    * VPBLENDD imm8, xmm, xmm, xmm     [AVX2]
 64142  //    * VPBLENDD imm8, m128, xmm, xmm    [AVX2]
 64143  //    * VPBLENDD imm8, ymm, ymm, ymm     [AVX2]
 64144  //    * VPBLENDD imm8, m256, ymm, ymm    [AVX2]
 64145  //
 64146  func (self *Program) VPBLENDD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 64147      p := self.alloc("VPBLENDD", 4, Operands { v0, v1, v2, v3 })
 64148      // VPBLENDD imm8, xmm, xmm, xmm
 64149      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 64150          self.require(ISA_AVX2)
 64151          p.domain = DomainAVX
 64152          p.add(0, func(m *_Encoding, v []interface{}) {
 64153              m.emit(0xc4)
 64154              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 64155              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 64156              m.emit(0x02)
 64157              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 64158              m.imm1(toImmAny(v[0]))
 64159          })
 64160      }
 64161      // VPBLENDD imm8, m128, xmm, xmm
 64162      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 64163          self.require(ISA_AVX2)
 64164          p.domain = DomainAVX
 64165          p.add(0, func(m *_Encoding, v []interface{}) {
 64166              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 64167              m.emit(0x02)
 64168              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 64169              m.imm1(toImmAny(v[0]))
 64170          })
 64171      }
 64172      // VPBLENDD imm8, ymm, ymm, ymm
 64173      if isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 64174          self.require(ISA_AVX2)
 64175          p.domain = DomainAVX
 64176          p.add(0, func(m *_Encoding, v []interface{}) {
 64177              m.emit(0xc4)
 64178              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 64179              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 64180              m.emit(0x02)
 64181              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 64182              m.imm1(toImmAny(v[0]))
 64183          })
 64184      }
 64185      // VPBLENDD imm8, m256, ymm, ymm
 64186      if isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 64187          self.require(ISA_AVX2)
 64188          p.domain = DomainAVX
 64189          p.add(0, func(m *_Encoding, v []interface{}) {
 64190              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 64191              m.emit(0x02)
 64192              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 64193              m.imm1(toImmAny(v[0]))
 64194          })
 64195      }
 64196      if p.len == 0 {
 64197          panic("invalid operands for VPBLENDD")
 64198      }
 64199      return p
 64200  }
 64201  
 64202  // VPBLENDMB performs "Blend Byte Vectors Using an OpMask Control".
 64203  //
 64204  // Mnemonic        : VPBLENDMB
 64205  // Supported forms : (6 forms)
 64206  //
 64207  //    * VPBLENDMB zmm, zmm, zmm{k}{z}     [AVX512BW]
 64208  //    * VPBLENDMB m512, zmm, zmm{k}{z}    [AVX512BW]
 64209  //    * VPBLENDMB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 64210  //    * VPBLENDMB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 64211  //    * VPBLENDMB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 64212  //    * VPBLENDMB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 64213  //
 64214  func (self *Program) VPBLENDMB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 64215      p := self.alloc("VPBLENDMB", 3, Operands { v0, v1, v2 })
 64216      // VPBLENDMB zmm, zmm, zmm{k}{z}
 64217      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 64218          self.require(ISA_AVX512BW)
 64219          p.domain = DomainAVX
 64220          p.add(0, func(m *_Encoding, v []interface{}) {
 64221              m.emit(0x62)
 64222              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64223              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 64224              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 64225              m.emit(0x66)
 64226              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64227          })
 64228      }
 64229      // VPBLENDMB m512, zmm, zmm{k}{z}
 64230      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 64231          self.require(ISA_AVX512BW)
 64232          p.domain = DomainAVX
 64233          p.add(0, func(m *_Encoding, v []interface{}) {
 64234              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 64235              m.emit(0x66)
 64236              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 64237          })
 64238      }
 64239      // VPBLENDMB xmm, xmm, xmm{k}{z}
 64240      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 64241          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64242          p.domain = DomainAVX
 64243          p.add(0, func(m *_Encoding, v []interface{}) {
 64244              m.emit(0x62)
 64245              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64246              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 64247              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 64248              m.emit(0x66)
 64249              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64250          })
 64251      }
 64252      // VPBLENDMB m128, xmm, xmm{k}{z}
 64253      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 64254          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64255          p.domain = DomainAVX
 64256          p.add(0, func(m *_Encoding, v []interface{}) {
 64257              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 64258              m.emit(0x66)
 64259              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 64260          })
 64261      }
 64262      // VPBLENDMB ymm, ymm, ymm{k}{z}
 64263      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 64264          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64265          p.domain = DomainAVX
 64266          p.add(0, func(m *_Encoding, v []interface{}) {
 64267              m.emit(0x62)
 64268              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64269              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 64270              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 64271              m.emit(0x66)
 64272              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64273          })
 64274      }
 64275      // VPBLENDMB m256, ymm, ymm{k}{z}
 64276      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 64277          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64278          p.domain = DomainAVX
 64279          p.add(0, func(m *_Encoding, v []interface{}) {
 64280              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 64281              m.emit(0x66)
 64282              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 64283          })
 64284      }
 64285      if p.len == 0 {
 64286          panic("invalid operands for VPBLENDMB")
 64287      }
 64288      return p
 64289  }
 64290  
 64291  // VPBLENDMD performs "Blend Doubleword Vectors Using an OpMask Control".
 64292  //
 64293  // Mnemonic        : VPBLENDMD
 64294  // Supported forms : (6 forms)
 64295  //
 64296  //    * VPBLENDMD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 64297  //    * VPBLENDMD zmm, zmm, zmm{k}{z}             [AVX512F]
 64298  //    * VPBLENDMD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 64299  //    * VPBLENDMD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 64300  //    * VPBLENDMD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 64301  //    * VPBLENDMD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 64302  //
 64303  func (self *Program) VPBLENDMD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 64304      p := self.alloc("VPBLENDMD", 3, Operands { v0, v1, v2 })
 64305      // VPBLENDMD m512/m32bcst, zmm, zmm{k}{z}
 64306      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 64307          self.require(ISA_AVX512F)
 64308          p.domain = DomainAVX
 64309          p.add(0, func(m *_Encoding, v []interface{}) {
 64310              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 64311              m.emit(0x64)
 64312              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 64313          })
 64314      }
 64315      // VPBLENDMD zmm, zmm, zmm{k}{z}
 64316      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 64317          self.require(ISA_AVX512F)
 64318          p.domain = DomainAVX
 64319          p.add(0, func(m *_Encoding, v []interface{}) {
 64320              m.emit(0x62)
 64321              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64322              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 64323              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 64324              m.emit(0x64)
 64325              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64326          })
 64327      }
 64328      // VPBLENDMD m128/m32bcst, xmm, xmm{k}{z}
 64329      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 64330          self.require(ISA_AVX512VL | ISA_AVX512F)
 64331          p.domain = DomainAVX
 64332          p.add(0, func(m *_Encoding, v []interface{}) {
 64333              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 64334              m.emit(0x64)
 64335              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 64336          })
 64337      }
 64338      // VPBLENDMD xmm, xmm, xmm{k}{z}
 64339      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 64340          self.require(ISA_AVX512VL | ISA_AVX512F)
 64341          p.domain = DomainAVX
 64342          p.add(0, func(m *_Encoding, v []interface{}) {
 64343              m.emit(0x62)
 64344              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64345              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 64346              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 64347              m.emit(0x64)
 64348              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64349          })
 64350      }
 64351      // VPBLENDMD m256/m32bcst, ymm, ymm{k}{z}
 64352      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 64353          self.require(ISA_AVX512VL | ISA_AVX512F)
 64354          p.domain = DomainAVX
 64355          p.add(0, func(m *_Encoding, v []interface{}) {
 64356              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 64357              m.emit(0x64)
 64358              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 64359          })
 64360      }
 64361      // VPBLENDMD ymm, ymm, ymm{k}{z}
 64362      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 64363          self.require(ISA_AVX512VL | ISA_AVX512F)
 64364          p.domain = DomainAVX
 64365          p.add(0, func(m *_Encoding, v []interface{}) {
 64366              m.emit(0x62)
 64367              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64368              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 64369              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 64370              m.emit(0x64)
 64371              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64372          })
 64373      }
 64374      if p.len == 0 {
 64375          panic("invalid operands for VPBLENDMD")
 64376      }
 64377      return p
 64378  }
 64379  
 64380  // VPBLENDMQ performs "Blend Quadword Vectors Using an OpMask Control".
 64381  //
 64382  // Mnemonic        : VPBLENDMQ
 64383  // Supported forms : (6 forms)
 64384  //
 64385  //    * VPBLENDMQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 64386  //    * VPBLENDMQ zmm, zmm, zmm{k}{z}             [AVX512F]
 64387  //    * VPBLENDMQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 64388  //    * VPBLENDMQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 64389  //    * VPBLENDMQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 64390  //    * VPBLENDMQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 64391  //
 64392  func (self *Program) VPBLENDMQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 64393      p := self.alloc("VPBLENDMQ", 3, Operands { v0, v1, v2 })
 64394      // VPBLENDMQ m512/m64bcst, zmm, zmm{k}{z}
 64395      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 64396          self.require(ISA_AVX512F)
 64397          p.domain = DomainAVX
 64398          p.add(0, func(m *_Encoding, v []interface{}) {
 64399              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 64400              m.emit(0x64)
 64401              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 64402          })
 64403      }
 64404      // VPBLENDMQ zmm, zmm, zmm{k}{z}
 64405      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 64406          self.require(ISA_AVX512F)
 64407          p.domain = DomainAVX
 64408          p.add(0, func(m *_Encoding, v []interface{}) {
 64409              m.emit(0x62)
 64410              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64411              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 64412              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 64413              m.emit(0x64)
 64414              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64415          })
 64416      }
 64417      // VPBLENDMQ m128/m64bcst, xmm, xmm{k}{z}
 64418      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 64419          self.require(ISA_AVX512VL | ISA_AVX512F)
 64420          p.domain = DomainAVX
 64421          p.add(0, func(m *_Encoding, v []interface{}) {
 64422              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 64423              m.emit(0x64)
 64424              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 64425          })
 64426      }
 64427      // VPBLENDMQ xmm, xmm, xmm{k}{z}
 64428      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 64429          self.require(ISA_AVX512VL | ISA_AVX512F)
 64430          p.domain = DomainAVX
 64431          p.add(0, func(m *_Encoding, v []interface{}) {
 64432              m.emit(0x62)
 64433              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64434              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 64435              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 64436              m.emit(0x64)
 64437              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64438          })
 64439      }
 64440      // VPBLENDMQ m256/m64bcst, ymm, ymm{k}{z}
 64441      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 64442          self.require(ISA_AVX512VL | ISA_AVX512F)
 64443          p.domain = DomainAVX
 64444          p.add(0, func(m *_Encoding, v []interface{}) {
 64445              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 64446              m.emit(0x64)
 64447              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 64448          })
 64449      }
 64450      // VPBLENDMQ ymm, ymm, ymm{k}{z}
 64451      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 64452          self.require(ISA_AVX512VL | ISA_AVX512F)
 64453          p.domain = DomainAVX
 64454          p.add(0, func(m *_Encoding, v []interface{}) {
 64455              m.emit(0x62)
 64456              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64457              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 64458              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 64459              m.emit(0x64)
 64460              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64461          })
 64462      }
 64463      if p.len == 0 {
 64464          panic("invalid operands for VPBLENDMQ")
 64465      }
 64466      return p
 64467  }
 64468  
 64469  // VPBLENDMW performs "Blend Word Vectors Using an OpMask Control".
 64470  //
 64471  // Mnemonic        : VPBLENDMW
 64472  // Supported forms : (6 forms)
 64473  //
 64474  //    * VPBLENDMW zmm, zmm, zmm{k}{z}     [AVX512BW]
 64475  //    * VPBLENDMW m512, zmm, zmm{k}{z}    [AVX512BW]
 64476  //    * VPBLENDMW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 64477  //    * VPBLENDMW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 64478  //    * VPBLENDMW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 64479  //    * VPBLENDMW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 64480  //
 64481  func (self *Program) VPBLENDMW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 64482      p := self.alloc("VPBLENDMW", 3, Operands { v0, v1, v2 })
 64483      // VPBLENDMW zmm, zmm, zmm{k}{z}
 64484      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 64485          self.require(ISA_AVX512BW)
 64486          p.domain = DomainAVX
 64487          p.add(0, func(m *_Encoding, v []interface{}) {
 64488              m.emit(0x62)
 64489              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64490              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 64491              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 64492              m.emit(0x66)
 64493              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64494          })
 64495      }
 64496      // VPBLENDMW m512, zmm, zmm{k}{z}
 64497      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 64498          self.require(ISA_AVX512BW)
 64499          p.domain = DomainAVX
 64500          p.add(0, func(m *_Encoding, v []interface{}) {
 64501              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 64502              m.emit(0x66)
 64503              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 64504          })
 64505      }
 64506      // VPBLENDMW xmm, xmm, xmm{k}{z}
 64507      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 64508          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64509          p.domain = DomainAVX
 64510          p.add(0, func(m *_Encoding, v []interface{}) {
 64511              m.emit(0x62)
 64512              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64513              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 64514              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 64515              m.emit(0x66)
 64516              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64517          })
 64518      }
 64519      // VPBLENDMW m128, xmm, xmm{k}{z}
 64520      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 64521          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64522          p.domain = DomainAVX
 64523          p.add(0, func(m *_Encoding, v []interface{}) {
 64524              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 64525              m.emit(0x66)
 64526              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 64527          })
 64528      }
 64529      // VPBLENDMW ymm, ymm, ymm{k}{z}
 64530      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 64531          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64532          p.domain = DomainAVX
 64533          p.add(0, func(m *_Encoding, v []interface{}) {
 64534              m.emit(0x62)
 64535              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64536              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 64537              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 64538              m.emit(0x66)
 64539              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64540          })
 64541      }
 64542      // VPBLENDMW m256, ymm, ymm{k}{z}
 64543      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 64544          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64545          p.domain = DomainAVX
 64546          p.add(0, func(m *_Encoding, v []interface{}) {
 64547              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 64548              m.emit(0x66)
 64549              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 64550          })
 64551      }
 64552      if p.len == 0 {
 64553          panic("invalid operands for VPBLENDMW")
 64554      }
 64555      return p
 64556  }
 64557  
 64558  // VPBLENDVB performs "Variable Blend Packed Bytes".
 64559  //
 64560  // Mnemonic        : VPBLENDVB
 64561  // Supported forms : (4 forms)
 64562  //
 64563  //    * VPBLENDVB xmm, xmm, xmm, xmm     [AVX]
 64564  //    * VPBLENDVB xmm, m128, xmm, xmm    [AVX]
 64565  //    * VPBLENDVB ymm, ymm, ymm, ymm     [AVX2]
 64566  //    * VPBLENDVB ymm, m256, ymm, ymm    [AVX2]
 64567  //
 64568  func (self *Program) VPBLENDVB(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 64569      p := self.alloc("VPBLENDVB", 4, Operands { v0, v1, v2, v3 })
 64570      // VPBLENDVB xmm, xmm, xmm, xmm
 64571      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 64572          self.require(ISA_AVX)
 64573          p.domain = DomainAVX
 64574          p.add(0, func(m *_Encoding, v []interface{}) {
 64575              m.emit(0xc4)
 64576              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 64577              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 64578              m.emit(0x4c)
 64579              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 64580              m.emit(hlcode(v[0]) << 4)
 64581          })
 64582      }
 64583      // VPBLENDVB xmm, m128, xmm, xmm
 64584      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 64585          self.require(ISA_AVX)
 64586          p.domain = DomainAVX
 64587          p.add(0, func(m *_Encoding, v []interface{}) {
 64588              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 64589              m.emit(0x4c)
 64590              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 64591              m.emit(hlcode(v[0]) << 4)
 64592          })
 64593      }
 64594      // VPBLENDVB ymm, ymm, ymm, ymm
 64595      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 64596          self.require(ISA_AVX2)
 64597          p.domain = DomainAVX
 64598          p.add(0, func(m *_Encoding, v []interface{}) {
 64599              m.emit(0xc4)
 64600              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 64601              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 64602              m.emit(0x4c)
 64603              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 64604              m.emit(hlcode(v[0]) << 4)
 64605          })
 64606      }
 64607      // VPBLENDVB ymm, m256, ymm, ymm
 64608      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 64609          self.require(ISA_AVX2)
 64610          p.domain = DomainAVX
 64611          p.add(0, func(m *_Encoding, v []interface{}) {
 64612              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 64613              m.emit(0x4c)
 64614              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 64615              m.emit(hlcode(v[0]) << 4)
 64616          })
 64617      }
 64618      if p.len == 0 {
 64619          panic("invalid operands for VPBLENDVB")
 64620      }
 64621      return p
 64622  }
 64623  
 64624  // VPBLENDW performs "Blend Packed Words".
 64625  //
 64626  // Mnemonic        : VPBLENDW
 64627  // Supported forms : (4 forms)
 64628  //
 64629  //    * VPBLENDW imm8, xmm, xmm, xmm     [AVX]
 64630  //    * VPBLENDW imm8, m128, xmm, xmm    [AVX]
 64631  //    * VPBLENDW imm8, ymm, ymm, ymm     [AVX2]
 64632  //    * VPBLENDW imm8, m256, ymm, ymm    [AVX2]
 64633  //
 64634  func (self *Program) VPBLENDW(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 64635      p := self.alloc("VPBLENDW", 4, Operands { v0, v1, v2, v3 })
 64636      // VPBLENDW imm8, xmm, xmm, xmm
 64637      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 64638          self.require(ISA_AVX)
 64639          p.domain = DomainAVX
 64640          p.add(0, func(m *_Encoding, v []interface{}) {
 64641              m.emit(0xc4)
 64642              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 64643              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 64644              m.emit(0x0e)
 64645              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 64646              m.imm1(toImmAny(v[0]))
 64647          })
 64648      }
 64649      // VPBLENDW imm8, m128, xmm, xmm
 64650      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 64651          self.require(ISA_AVX)
 64652          p.domain = DomainAVX
 64653          p.add(0, func(m *_Encoding, v []interface{}) {
 64654              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 64655              m.emit(0x0e)
 64656              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 64657              m.imm1(toImmAny(v[0]))
 64658          })
 64659      }
 64660      // VPBLENDW imm8, ymm, ymm, ymm
 64661      if isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 64662          self.require(ISA_AVX2)
 64663          p.domain = DomainAVX
 64664          p.add(0, func(m *_Encoding, v []interface{}) {
 64665              m.emit(0xc4)
 64666              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 64667              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 64668              m.emit(0x0e)
 64669              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 64670              m.imm1(toImmAny(v[0]))
 64671          })
 64672      }
 64673      // VPBLENDW imm8, m256, ymm, ymm
 64674      if isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 64675          self.require(ISA_AVX2)
 64676          p.domain = DomainAVX
 64677          p.add(0, func(m *_Encoding, v []interface{}) {
 64678              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 64679              m.emit(0x0e)
 64680              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 64681              m.imm1(toImmAny(v[0]))
 64682          })
 64683      }
 64684      if p.len == 0 {
 64685          panic("invalid operands for VPBLENDW")
 64686      }
 64687      return p
 64688  }
 64689  
 64690  // VPBROADCASTB performs "Broadcast Byte Integer".
 64691  //
 64692  // Mnemonic        : VPBROADCASTB
 64693  // Supported forms : (13 forms)
 64694  //
 64695  //    * VPBROADCASTB xmm, xmm          [AVX2]
 64696  //    * VPBROADCASTB m8, xmm           [AVX2]
 64697  //    * VPBROADCASTB xmm, ymm          [AVX2]
 64698  //    * VPBROADCASTB m8, ymm           [AVX2]
 64699  //    * VPBROADCASTB r32, zmm{k}{z}    [AVX512BW]
 64700  //    * VPBROADCASTB xmm, zmm{k}{z}    [AVX512BW]
 64701  //    * VPBROADCASTB m8, zmm{k}{z}     [AVX512BW]
 64702  //    * VPBROADCASTB r32, xmm{k}{z}    [AVX512BW,AVX512VL]
 64703  //    * VPBROADCASTB r32, ymm{k}{z}    [AVX512BW,AVX512VL]
 64704  //    * VPBROADCASTB xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 64705  //    * VPBROADCASTB xmm, ymm{k}{z}    [AVX512BW,AVX512VL]
 64706  //    * VPBROADCASTB m8, xmm{k}{z}     [AVX512BW,AVX512VL]
 64707  //    * VPBROADCASTB m8, ymm{k}{z}     [AVX512BW,AVX512VL]
 64708  //
 64709  func (self *Program) VPBROADCASTB(v0 interface{}, v1 interface{}) *Instruction {
 64710      p := self.alloc("VPBROADCASTB", 2, Operands { v0, v1 })
 64711      // VPBROADCASTB xmm, xmm
 64712      if isXMM(v0) && isXMM(v1) {
 64713          self.require(ISA_AVX2)
 64714          p.domain = DomainAVX
 64715          p.add(0, func(m *_Encoding, v []interface{}) {
 64716              m.emit(0xc4)
 64717              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 64718              m.emit(0x79)
 64719              m.emit(0x78)
 64720              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64721          })
 64722      }
 64723      // VPBROADCASTB m8, xmm
 64724      if isM8(v0) && isXMM(v1) {
 64725          self.require(ISA_AVX2)
 64726          p.domain = DomainAVX
 64727          p.add(0, func(m *_Encoding, v []interface{}) {
 64728              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 64729              m.emit(0x78)
 64730              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 64731          })
 64732      }
 64733      // VPBROADCASTB xmm, ymm
 64734      if isXMM(v0) && isYMM(v1) {
 64735          self.require(ISA_AVX2)
 64736          p.domain = DomainAVX
 64737          p.add(0, func(m *_Encoding, v []interface{}) {
 64738              m.emit(0xc4)
 64739              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 64740              m.emit(0x7d)
 64741              m.emit(0x78)
 64742              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64743          })
 64744      }
 64745      // VPBROADCASTB m8, ymm
 64746      if isM8(v0) && isYMM(v1) {
 64747          self.require(ISA_AVX2)
 64748          p.domain = DomainAVX
 64749          p.add(0, func(m *_Encoding, v []interface{}) {
 64750              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 64751              m.emit(0x78)
 64752              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 64753          })
 64754      }
 64755      // VPBROADCASTB r32, zmm{k}{z}
 64756      if isReg32(v0) && isZMMkz(v1) {
 64757          self.require(ISA_AVX512BW)
 64758          p.domain = DomainAVX
 64759          p.add(0, func(m *_Encoding, v []interface{}) {
 64760              m.emit(0x62)
 64761              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 64762              m.emit(0x7d)
 64763              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 64764              m.emit(0x7a)
 64765              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64766          })
 64767      }
 64768      // VPBROADCASTB xmm, zmm{k}{z}
 64769      if isEVEXXMM(v0) && isZMMkz(v1) {
 64770          self.require(ISA_AVX512BW)
 64771          p.domain = DomainAVX
 64772          p.add(0, func(m *_Encoding, v []interface{}) {
 64773              m.emit(0x62)
 64774              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 64775              m.emit(0x7d)
 64776              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 64777              m.emit(0x78)
 64778              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64779          })
 64780      }
 64781      // VPBROADCASTB m8, zmm{k}{z}
 64782      if isM8(v0) && isZMMkz(v1) {
 64783          self.require(ISA_AVX512BW)
 64784          p.domain = DomainAVX
 64785          p.add(0, func(m *_Encoding, v []interface{}) {
 64786              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 64787              m.emit(0x78)
 64788              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 64789          })
 64790      }
 64791      // VPBROADCASTB r32, xmm{k}{z}
 64792      if isReg32(v0) && isXMMkz(v1) {
 64793          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64794          p.domain = DomainAVX
 64795          p.add(0, func(m *_Encoding, v []interface{}) {
 64796              m.emit(0x62)
 64797              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 64798              m.emit(0x7d)
 64799              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 64800              m.emit(0x7a)
 64801              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64802          })
 64803      }
 64804      // VPBROADCASTB r32, ymm{k}{z}
 64805      if isReg32(v0) && isYMMkz(v1) {
 64806          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64807          p.domain = DomainAVX
 64808          p.add(0, func(m *_Encoding, v []interface{}) {
 64809              m.emit(0x62)
 64810              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 64811              m.emit(0x7d)
 64812              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 64813              m.emit(0x7a)
 64814              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64815          })
 64816      }
 64817      // VPBROADCASTB xmm, xmm{k}{z}
 64818      if isEVEXXMM(v0) && isXMMkz(v1) {
 64819          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64820          p.domain = DomainAVX
 64821          p.add(0, func(m *_Encoding, v []interface{}) {
 64822              m.emit(0x62)
 64823              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 64824              m.emit(0x7d)
 64825              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 64826              m.emit(0x78)
 64827              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64828          })
 64829      }
 64830      // VPBROADCASTB xmm, ymm{k}{z}
 64831      if isEVEXXMM(v0) && isYMMkz(v1) {
 64832          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64833          p.domain = DomainAVX
 64834          p.add(0, func(m *_Encoding, v []interface{}) {
 64835              m.emit(0x62)
 64836              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 64837              m.emit(0x7d)
 64838              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 64839              m.emit(0x78)
 64840              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64841          })
 64842      }
 64843      // VPBROADCASTB m8, xmm{k}{z}
 64844      if isM8(v0) && isXMMkz(v1) {
 64845          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64846          p.domain = DomainAVX
 64847          p.add(0, func(m *_Encoding, v []interface{}) {
 64848              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 64849              m.emit(0x78)
 64850              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 64851          })
 64852      }
 64853      // VPBROADCASTB m8, ymm{k}{z}
 64854      if isM8(v0) && isYMMkz(v1) {
 64855          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64856          p.domain = DomainAVX
 64857          p.add(0, func(m *_Encoding, v []interface{}) {
 64858              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 64859              m.emit(0x78)
 64860              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 64861          })
 64862      }
 64863      if p.len == 0 {
 64864          panic("invalid operands for VPBROADCASTB")
 64865      }
 64866      return p
 64867  }
 64868  
 64869  // VPBROADCASTD performs "Broadcast Doubleword Integer".
 64870  //
 64871  // Mnemonic        : VPBROADCASTD
 64872  // Supported forms : (13 forms)
 64873  //
 64874  //    * VPBROADCASTD xmm, xmm          [AVX2]
 64875  //    * VPBROADCASTD m32, xmm          [AVX2]
 64876  //    * VPBROADCASTD xmm, ymm          [AVX2]
 64877  //    * VPBROADCASTD m32, ymm          [AVX2]
 64878  //    * VPBROADCASTD r32, zmm{k}{z}    [AVX512F]
 64879  //    * VPBROADCASTD xmm, zmm{k}{z}    [AVX512F]
 64880  //    * VPBROADCASTD m32, zmm{k}{z}    [AVX512F]
 64881  //    * VPBROADCASTD r32, xmm{k}{z}    [AVX512F,AVX512VL]
 64882  //    * VPBROADCASTD r32, ymm{k}{z}    [AVX512F,AVX512VL]
 64883  //    * VPBROADCASTD xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 64884  //    * VPBROADCASTD xmm, ymm{k}{z}    [AVX512F,AVX512VL]
 64885  //    * VPBROADCASTD m32, xmm{k}{z}    [AVX512F,AVX512VL]
 64886  //    * VPBROADCASTD m32, ymm{k}{z}    [AVX512F,AVX512VL]
 64887  //
 64888  func (self *Program) VPBROADCASTD(v0 interface{}, v1 interface{}) *Instruction {
 64889      p := self.alloc("VPBROADCASTD", 2, Operands { v0, v1 })
 64890      // VPBROADCASTD xmm, xmm
 64891      if isXMM(v0) && isXMM(v1) {
 64892          self.require(ISA_AVX2)
 64893          p.domain = DomainAVX
 64894          p.add(0, func(m *_Encoding, v []interface{}) {
 64895              m.emit(0xc4)
 64896              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 64897              m.emit(0x79)
 64898              m.emit(0x58)
 64899              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64900          })
 64901      }
 64902      // VPBROADCASTD m32, xmm
 64903      if isM32(v0) && isXMM(v1) {
 64904          self.require(ISA_AVX2)
 64905          p.domain = DomainAVX
 64906          p.add(0, func(m *_Encoding, v []interface{}) {
 64907              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 64908              m.emit(0x58)
 64909              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 64910          })
 64911      }
 64912      // VPBROADCASTD xmm, ymm
 64913      if isXMM(v0) && isYMM(v1) {
 64914          self.require(ISA_AVX2)
 64915          p.domain = DomainAVX
 64916          p.add(0, func(m *_Encoding, v []interface{}) {
 64917              m.emit(0xc4)
 64918              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 64919              m.emit(0x7d)
 64920              m.emit(0x58)
 64921              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64922          })
 64923      }
 64924      // VPBROADCASTD m32, ymm
 64925      if isM32(v0) && isYMM(v1) {
 64926          self.require(ISA_AVX2)
 64927          p.domain = DomainAVX
 64928          p.add(0, func(m *_Encoding, v []interface{}) {
 64929              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 64930              m.emit(0x58)
 64931              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 64932          })
 64933      }
 64934      // VPBROADCASTD r32, zmm{k}{z}
 64935      if isReg32(v0) && isZMMkz(v1) {
 64936          self.require(ISA_AVX512F)
 64937          p.domain = DomainAVX
 64938          p.add(0, func(m *_Encoding, v []interface{}) {
 64939              m.emit(0x62)
 64940              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 64941              m.emit(0x7d)
 64942              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 64943              m.emit(0x7c)
 64944              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64945          })
 64946      }
 64947      // VPBROADCASTD xmm, zmm{k}{z}
 64948      if isEVEXXMM(v0) && isZMMkz(v1) {
 64949          self.require(ISA_AVX512F)
 64950          p.domain = DomainAVX
 64951          p.add(0, func(m *_Encoding, v []interface{}) {
 64952              m.emit(0x62)
 64953              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 64954              m.emit(0x7d)
 64955              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 64956              m.emit(0x58)
 64957              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64958          })
 64959      }
 64960      // VPBROADCASTD m32, zmm{k}{z}
 64961      if isM32(v0) && isZMMkz(v1) {
 64962          self.require(ISA_AVX512F)
 64963          p.domain = DomainAVX
 64964          p.add(0, func(m *_Encoding, v []interface{}) {
 64965              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 64966              m.emit(0x58)
 64967              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 64968          })
 64969      }
 64970      // VPBROADCASTD r32, xmm{k}{z}
 64971      if isReg32(v0) && isXMMkz(v1) {
 64972          self.require(ISA_AVX512VL | ISA_AVX512F)
 64973          p.domain = DomainAVX
 64974          p.add(0, func(m *_Encoding, v []interface{}) {
 64975              m.emit(0x62)
 64976              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 64977              m.emit(0x7d)
 64978              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 64979              m.emit(0x7c)
 64980              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64981          })
 64982      }
 64983      // VPBROADCASTD r32, ymm{k}{z}
 64984      if isReg32(v0) && isYMMkz(v1) {
 64985          self.require(ISA_AVX512VL | ISA_AVX512F)
 64986          p.domain = DomainAVX
 64987          p.add(0, func(m *_Encoding, v []interface{}) {
 64988              m.emit(0x62)
 64989              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 64990              m.emit(0x7d)
 64991              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 64992              m.emit(0x7c)
 64993              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64994          })
 64995      }
 64996      // VPBROADCASTD xmm, xmm{k}{z}
 64997      if isEVEXXMM(v0) && isXMMkz(v1) {
 64998          self.require(ISA_AVX512VL | ISA_AVX512F)
 64999          p.domain = DomainAVX
 65000          p.add(0, func(m *_Encoding, v []interface{}) {
 65001              m.emit(0x62)
 65002              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65003              m.emit(0x7d)
 65004              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 65005              m.emit(0x58)
 65006              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65007          })
 65008      }
 65009      // VPBROADCASTD xmm, ymm{k}{z}
 65010      if isEVEXXMM(v0) && isYMMkz(v1) {
 65011          self.require(ISA_AVX512VL | ISA_AVX512F)
 65012          p.domain = DomainAVX
 65013          p.add(0, func(m *_Encoding, v []interface{}) {
 65014              m.emit(0x62)
 65015              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65016              m.emit(0x7d)
 65017              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 65018              m.emit(0x58)
 65019              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65020          })
 65021      }
 65022      // VPBROADCASTD m32, xmm{k}{z}
 65023      if isM32(v0) && isXMMkz(v1) {
 65024          self.require(ISA_AVX512VL | ISA_AVX512F)
 65025          p.domain = DomainAVX
 65026          p.add(0, func(m *_Encoding, v []interface{}) {
 65027              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 65028              m.emit(0x58)
 65029              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 65030          })
 65031      }
 65032      // VPBROADCASTD m32, ymm{k}{z}
 65033      if isM32(v0) && isYMMkz(v1) {
 65034          self.require(ISA_AVX512VL | ISA_AVX512F)
 65035          p.domain = DomainAVX
 65036          p.add(0, func(m *_Encoding, v []interface{}) {
 65037              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 65038              m.emit(0x58)
 65039              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 65040          })
 65041      }
 65042      if p.len == 0 {
 65043          panic("invalid operands for VPBROADCASTD")
 65044      }
 65045      return p
 65046  }
 65047  
 65048  // VPBROADCASTMB2Q performs "Broadcast Low Byte of Mask Register to Packed Quadword Values".
 65049  //
 65050  // Mnemonic        : VPBROADCASTMB2Q
 65051  // Supported forms : (3 forms)
 65052  //
 65053  //    * VPBROADCASTMB2Q k, xmm    [AVX512CD,AVX512VL]
 65054  //    * VPBROADCASTMB2Q k, ymm    [AVX512CD,AVX512VL]
 65055  //    * VPBROADCASTMB2Q k, zmm    [AVX512CD]
 65056  //
 65057  func (self *Program) VPBROADCASTMB2Q(v0 interface{}, v1 interface{}) *Instruction {
 65058      p := self.alloc("VPBROADCASTMB2Q", 2, Operands { v0, v1 })
 65059      // VPBROADCASTMB2Q k, xmm
 65060      if isK(v0) && isEVEXXMM(v1) {
 65061          self.require(ISA_AVX512VL | ISA_AVX512CD)
 65062          p.domain = DomainAVX
 65063          p.add(0, func(m *_Encoding, v []interface{}) {
 65064              m.emit(0x62)
 65065              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65066              m.emit(0xfe)
 65067              m.emit(0x08)
 65068              m.emit(0x2a)
 65069              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65070          })
 65071      }
 65072      // VPBROADCASTMB2Q k, ymm
 65073      if isK(v0) && isEVEXYMM(v1) {
 65074          self.require(ISA_AVX512VL | ISA_AVX512CD)
 65075          p.domain = DomainAVX
 65076          p.add(0, func(m *_Encoding, v []interface{}) {
 65077              m.emit(0x62)
 65078              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65079              m.emit(0xfe)
 65080              m.emit(0x28)
 65081              m.emit(0x2a)
 65082              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65083          })
 65084      }
 65085      // VPBROADCASTMB2Q k, zmm
 65086      if isK(v0) && isZMM(v1) {
 65087          self.require(ISA_AVX512CD)
 65088          p.domain = DomainAVX
 65089          p.add(0, func(m *_Encoding, v []interface{}) {
 65090              m.emit(0x62)
 65091              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65092              m.emit(0xfe)
 65093              m.emit(0x48)
 65094              m.emit(0x2a)
 65095              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65096          })
 65097      }
 65098      if p.len == 0 {
 65099          panic("invalid operands for VPBROADCASTMB2Q")
 65100      }
 65101      return p
 65102  }
 65103  
 65104  // VPBROADCASTMW2D performs "Broadcast Low Word of Mask Register to Packed Doubleword Values".
 65105  //
 65106  // Mnemonic        : VPBROADCASTMW2D
 65107  // Supported forms : (3 forms)
 65108  //
 65109  //    * VPBROADCASTMW2D k, xmm    [AVX512CD,AVX512VL]
 65110  //    * VPBROADCASTMW2D k, ymm    [AVX512CD,AVX512VL]
 65111  //    * VPBROADCASTMW2D k, zmm    [AVX512CD]
 65112  //
 65113  func (self *Program) VPBROADCASTMW2D(v0 interface{}, v1 interface{}) *Instruction {
 65114      p := self.alloc("VPBROADCASTMW2D", 2, Operands { v0, v1 })
 65115      // VPBROADCASTMW2D k, xmm
 65116      if isK(v0) && isEVEXXMM(v1) {
 65117          self.require(ISA_AVX512VL | ISA_AVX512CD)
 65118          p.domain = DomainAVX
 65119          p.add(0, func(m *_Encoding, v []interface{}) {
 65120              m.emit(0x62)
 65121              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65122              m.emit(0x7e)
 65123              m.emit(0x08)
 65124              m.emit(0x3a)
 65125              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65126          })
 65127      }
 65128      // VPBROADCASTMW2D k, ymm
 65129      if isK(v0) && isEVEXYMM(v1) {
 65130          self.require(ISA_AVX512VL | ISA_AVX512CD)
 65131          p.domain = DomainAVX
 65132          p.add(0, func(m *_Encoding, v []interface{}) {
 65133              m.emit(0x62)
 65134              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65135              m.emit(0x7e)
 65136              m.emit(0x28)
 65137              m.emit(0x3a)
 65138              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65139          })
 65140      }
 65141      // VPBROADCASTMW2D k, zmm
 65142      if isK(v0) && isZMM(v1) {
 65143          self.require(ISA_AVX512CD)
 65144          p.domain = DomainAVX
 65145          p.add(0, func(m *_Encoding, v []interface{}) {
 65146              m.emit(0x62)
 65147              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65148              m.emit(0x7e)
 65149              m.emit(0x48)
 65150              m.emit(0x3a)
 65151              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65152          })
 65153      }
 65154      if p.len == 0 {
 65155          panic("invalid operands for VPBROADCASTMW2D")
 65156      }
 65157      return p
 65158  }
 65159  
 65160  // VPBROADCASTQ performs "Broadcast Quadword Integer".
 65161  //
 65162  // Mnemonic        : VPBROADCASTQ
 65163  // Supported forms : (13 forms)
 65164  //
 65165  //    * VPBROADCASTQ xmm, xmm          [AVX2]
 65166  //    * VPBROADCASTQ m64, xmm          [AVX2]
 65167  //    * VPBROADCASTQ xmm, ymm          [AVX2]
 65168  //    * VPBROADCASTQ m64, ymm          [AVX2]
 65169  //    * VPBROADCASTQ r64, zmm{k}{z}    [AVX512F]
 65170  //    * VPBROADCASTQ xmm, zmm{k}{z}    [AVX512F]
 65171  //    * VPBROADCASTQ m64, zmm{k}{z}    [AVX512F]
 65172  //    * VPBROADCASTQ r64, xmm{k}{z}    [AVX512F,AVX512VL]
 65173  //    * VPBROADCASTQ r64, ymm{k}{z}    [AVX512F,AVX512VL]
 65174  //    * VPBROADCASTQ xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 65175  //    * VPBROADCASTQ xmm, ymm{k}{z}    [AVX512F,AVX512VL]
 65176  //    * VPBROADCASTQ m64, xmm{k}{z}    [AVX512F,AVX512VL]
 65177  //    * VPBROADCASTQ m64, ymm{k}{z}    [AVX512F,AVX512VL]
 65178  //
 65179  func (self *Program) VPBROADCASTQ(v0 interface{}, v1 interface{}) *Instruction {
 65180      p := self.alloc("VPBROADCASTQ", 2, Operands { v0, v1 })
 65181      // VPBROADCASTQ xmm, xmm
 65182      if isXMM(v0) && isXMM(v1) {
 65183          self.require(ISA_AVX2)
 65184          p.domain = DomainAVX
 65185          p.add(0, func(m *_Encoding, v []interface{}) {
 65186              m.emit(0xc4)
 65187              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 65188              m.emit(0x79)
 65189              m.emit(0x59)
 65190              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65191          })
 65192      }
 65193      // VPBROADCASTQ m64, xmm
 65194      if isM64(v0) && isXMM(v1) {
 65195          self.require(ISA_AVX2)
 65196          p.domain = DomainAVX
 65197          p.add(0, func(m *_Encoding, v []interface{}) {
 65198              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 65199              m.emit(0x59)
 65200              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 65201          })
 65202      }
 65203      // VPBROADCASTQ xmm, ymm
 65204      if isXMM(v0) && isYMM(v1) {
 65205          self.require(ISA_AVX2)
 65206          p.domain = DomainAVX
 65207          p.add(0, func(m *_Encoding, v []interface{}) {
 65208              m.emit(0xc4)
 65209              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 65210              m.emit(0x7d)
 65211              m.emit(0x59)
 65212              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65213          })
 65214      }
 65215      // VPBROADCASTQ m64, ymm
 65216      if isM64(v0) && isYMM(v1) {
 65217          self.require(ISA_AVX2)
 65218          p.domain = DomainAVX
 65219          p.add(0, func(m *_Encoding, v []interface{}) {
 65220              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 65221              m.emit(0x59)
 65222              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 65223          })
 65224      }
 65225      // VPBROADCASTQ r64, zmm{k}{z}
 65226      if isReg64(v0) && isZMMkz(v1) {
 65227          self.require(ISA_AVX512F)
 65228          p.domain = DomainAVX
 65229          p.add(0, func(m *_Encoding, v []interface{}) {
 65230              m.emit(0x62)
 65231              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65232              m.emit(0xfd)
 65233              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 65234              m.emit(0x7c)
 65235              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65236          })
 65237      }
 65238      // VPBROADCASTQ xmm, zmm{k}{z}
 65239      if isEVEXXMM(v0) && isZMMkz(v1) {
 65240          self.require(ISA_AVX512F)
 65241          p.domain = DomainAVX
 65242          p.add(0, func(m *_Encoding, v []interface{}) {
 65243              m.emit(0x62)
 65244              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65245              m.emit(0xfd)
 65246              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 65247              m.emit(0x59)
 65248              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65249          })
 65250      }
 65251      // VPBROADCASTQ m64, zmm{k}{z}
 65252      if isM64(v0) && isZMMkz(v1) {
 65253          self.require(ISA_AVX512F)
 65254          p.domain = DomainAVX
 65255          p.add(0, func(m *_Encoding, v []interface{}) {
 65256              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 65257              m.emit(0x59)
 65258              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 65259          })
 65260      }
 65261      // VPBROADCASTQ r64, xmm{k}{z}
 65262      if isReg64(v0) && isXMMkz(v1) {
 65263          self.require(ISA_AVX512VL | ISA_AVX512F)
 65264          p.domain = DomainAVX
 65265          p.add(0, func(m *_Encoding, v []interface{}) {
 65266              m.emit(0x62)
 65267              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65268              m.emit(0xfd)
 65269              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 65270              m.emit(0x7c)
 65271              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65272          })
 65273      }
 65274      // VPBROADCASTQ r64, ymm{k}{z}
 65275      if isReg64(v0) && isYMMkz(v1) {
 65276          self.require(ISA_AVX512VL | ISA_AVX512F)
 65277          p.domain = DomainAVX
 65278          p.add(0, func(m *_Encoding, v []interface{}) {
 65279              m.emit(0x62)
 65280              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65281              m.emit(0xfd)
 65282              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 65283              m.emit(0x7c)
 65284              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65285          })
 65286      }
 65287      // VPBROADCASTQ xmm, xmm{k}{z}
 65288      if isEVEXXMM(v0) && isXMMkz(v1) {
 65289          self.require(ISA_AVX512VL | ISA_AVX512F)
 65290          p.domain = DomainAVX
 65291          p.add(0, func(m *_Encoding, v []interface{}) {
 65292              m.emit(0x62)
 65293              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65294              m.emit(0xfd)
 65295              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 65296              m.emit(0x59)
 65297              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65298          })
 65299      }
 65300      // VPBROADCASTQ xmm, ymm{k}{z}
 65301      if isEVEXXMM(v0) && isYMMkz(v1) {
 65302          self.require(ISA_AVX512VL | ISA_AVX512F)
 65303          p.domain = DomainAVX
 65304          p.add(0, func(m *_Encoding, v []interface{}) {
 65305              m.emit(0x62)
 65306              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65307              m.emit(0xfd)
 65308              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 65309              m.emit(0x59)
 65310              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65311          })
 65312      }
 65313      // VPBROADCASTQ m64, xmm{k}{z}
 65314      if isM64(v0) && isXMMkz(v1) {
 65315          self.require(ISA_AVX512VL | ISA_AVX512F)
 65316          p.domain = DomainAVX
 65317          p.add(0, func(m *_Encoding, v []interface{}) {
 65318              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 65319              m.emit(0x59)
 65320              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 65321          })
 65322      }
 65323      // VPBROADCASTQ m64, ymm{k}{z}
 65324      if isM64(v0) && isYMMkz(v1) {
 65325          self.require(ISA_AVX512VL | ISA_AVX512F)
 65326          p.domain = DomainAVX
 65327          p.add(0, func(m *_Encoding, v []interface{}) {
 65328              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 65329              m.emit(0x59)
 65330              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 65331          })
 65332      }
 65333      if p.len == 0 {
 65334          panic("invalid operands for VPBROADCASTQ")
 65335      }
 65336      return p
 65337  }
 65338  
 65339  // VPBROADCASTW performs "Broadcast Word Integer".
 65340  //
 65341  // Mnemonic        : VPBROADCASTW
 65342  // Supported forms : (13 forms)
 65343  //
 65344  //    * VPBROADCASTW xmm, xmm          [AVX2]
 65345  //    * VPBROADCASTW m16, xmm          [AVX2]
 65346  //    * VPBROADCASTW xmm, ymm          [AVX2]
 65347  //    * VPBROADCASTW m16, ymm          [AVX2]
 65348  //    * VPBROADCASTW r32, zmm{k}{z}    [AVX512BW]
 65349  //    * VPBROADCASTW xmm, zmm{k}{z}    [AVX512BW]
 65350  //    * VPBROADCASTW m16, zmm{k}{z}    [AVX512BW]
 65351  //    * VPBROADCASTW r32, xmm{k}{z}    [AVX512BW,AVX512VL]
 65352  //    * VPBROADCASTW r32, ymm{k}{z}    [AVX512BW,AVX512VL]
 65353  //    * VPBROADCASTW xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 65354  //    * VPBROADCASTW xmm, ymm{k}{z}    [AVX512BW,AVX512VL]
 65355  //    * VPBROADCASTW m16, xmm{k}{z}    [AVX512BW,AVX512VL]
 65356  //    * VPBROADCASTW m16, ymm{k}{z}    [AVX512BW,AVX512VL]
 65357  //
 65358  func (self *Program) VPBROADCASTW(v0 interface{}, v1 interface{}) *Instruction {
 65359      p := self.alloc("VPBROADCASTW", 2, Operands { v0, v1 })
 65360      // VPBROADCASTW xmm, xmm
 65361      if isXMM(v0) && isXMM(v1) {
 65362          self.require(ISA_AVX2)
 65363          p.domain = DomainAVX
 65364          p.add(0, func(m *_Encoding, v []interface{}) {
 65365              m.emit(0xc4)
 65366              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 65367              m.emit(0x79)
 65368              m.emit(0x79)
 65369              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65370          })
 65371      }
 65372      // VPBROADCASTW m16, xmm
 65373      if isM16(v0) && isXMM(v1) {
 65374          self.require(ISA_AVX2)
 65375          p.domain = DomainAVX
 65376          p.add(0, func(m *_Encoding, v []interface{}) {
 65377              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 65378              m.emit(0x79)
 65379              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 65380          })
 65381      }
 65382      // VPBROADCASTW xmm, ymm
 65383      if isXMM(v0) && isYMM(v1) {
 65384          self.require(ISA_AVX2)
 65385          p.domain = DomainAVX
 65386          p.add(0, func(m *_Encoding, v []interface{}) {
 65387              m.emit(0xc4)
 65388              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 65389              m.emit(0x7d)
 65390              m.emit(0x79)
 65391              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65392          })
 65393      }
 65394      // VPBROADCASTW m16, ymm
 65395      if isM16(v0) && isYMM(v1) {
 65396          self.require(ISA_AVX2)
 65397          p.domain = DomainAVX
 65398          p.add(0, func(m *_Encoding, v []interface{}) {
 65399              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 65400              m.emit(0x79)
 65401              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 65402          })
 65403      }
 65404      // VPBROADCASTW r32, zmm{k}{z}
 65405      if isReg32(v0) && isZMMkz(v1) {
 65406          self.require(ISA_AVX512BW)
 65407          p.domain = DomainAVX
 65408          p.add(0, func(m *_Encoding, v []interface{}) {
 65409              m.emit(0x62)
 65410              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65411              m.emit(0x7d)
 65412              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 65413              m.emit(0x7b)
 65414              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65415          })
 65416      }
 65417      // VPBROADCASTW xmm, zmm{k}{z}
 65418      if isEVEXXMM(v0) && isZMMkz(v1) {
 65419          self.require(ISA_AVX512BW)
 65420          p.domain = DomainAVX
 65421          p.add(0, func(m *_Encoding, v []interface{}) {
 65422              m.emit(0x62)
 65423              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65424              m.emit(0x7d)
 65425              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 65426              m.emit(0x79)
 65427              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65428          })
 65429      }
 65430      // VPBROADCASTW m16, zmm{k}{z}
 65431      if isM16(v0) && isZMMkz(v1) {
 65432          self.require(ISA_AVX512BW)
 65433          p.domain = DomainAVX
 65434          p.add(0, func(m *_Encoding, v []interface{}) {
 65435              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 65436              m.emit(0x79)
 65437              m.mrsd(lcode(v[1]), addr(v[0]), 2)
 65438          })
 65439      }
 65440      // VPBROADCASTW r32, xmm{k}{z}
 65441      if isReg32(v0) && isXMMkz(v1) {
 65442          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65443          p.domain = DomainAVX
 65444          p.add(0, func(m *_Encoding, v []interface{}) {
 65445              m.emit(0x62)
 65446              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65447              m.emit(0x7d)
 65448              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 65449              m.emit(0x7b)
 65450              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65451          })
 65452      }
 65453      // VPBROADCASTW r32, ymm{k}{z}
 65454      if isReg32(v0) && isYMMkz(v1) {
 65455          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65456          p.domain = DomainAVX
 65457          p.add(0, func(m *_Encoding, v []interface{}) {
 65458              m.emit(0x62)
 65459              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65460              m.emit(0x7d)
 65461              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 65462              m.emit(0x7b)
 65463              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65464          })
 65465      }
 65466      // VPBROADCASTW xmm, xmm{k}{z}
 65467      if isEVEXXMM(v0) && isXMMkz(v1) {
 65468          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65469          p.domain = DomainAVX
 65470          p.add(0, func(m *_Encoding, v []interface{}) {
 65471              m.emit(0x62)
 65472              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65473              m.emit(0x7d)
 65474              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 65475              m.emit(0x79)
 65476              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65477          })
 65478      }
 65479      // VPBROADCASTW xmm, ymm{k}{z}
 65480      if isEVEXXMM(v0) && isYMMkz(v1) {
 65481          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65482          p.domain = DomainAVX
 65483          p.add(0, func(m *_Encoding, v []interface{}) {
 65484              m.emit(0x62)
 65485              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65486              m.emit(0x7d)
 65487              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 65488              m.emit(0x79)
 65489              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65490          })
 65491      }
 65492      // VPBROADCASTW m16, xmm{k}{z}
 65493      if isM16(v0) && isXMMkz(v1) {
 65494          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65495          p.domain = DomainAVX
 65496          p.add(0, func(m *_Encoding, v []interface{}) {
 65497              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 65498              m.emit(0x79)
 65499              m.mrsd(lcode(v[1]), addr(v[0]), 2)
 65500          })
 65501      }
 65502      // VPBROADCASTW m16, ymm{k}{z}
 65503      if isM16(v0) && isYMMkz(v1) {
 65504          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65505          p.domain = DomainAVX
 65506          p.add(0, func(m *_Encoding, v []interface{}) {
 65507              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 65508              m.emit(0x79)
 65509              m.mrsd(lcode(v[1]), addr(v[0]), 2)
 65510          })
 65511      }
 65512      if p.len == 0 {
 65513          panic("invalid operands for VPBROADCASTW")
 65514      }
 65515      return p
 65516  }
 65517  
 65518  // VPCLMULQDQ performs "Carry-Less Quadword Multiplication".
 65519  //
 65520  // Mnemonic        : VPCLMULQDQ
 65521  // Supported forms : (2 forms)
 65522  //
 65523  //    * VPCLMULQDQ imm8, xmm, xmm, xmm     [AVX,PCLMULQDQ]
 65524  //    * VPCLMULQDQ imm8, m128, xmm, xmm    [AVX,PCLMULQDQ]
 65525  //
 65526  func (self *Program) VPCLMULQDQ(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 65527      p := self.alloc("VPCLMULQDQ", 4, Operands { v0, v1, v2, v3 })
 65528      // VPCLMULQDQ imm8, xmm, xmm, xmm
 65529      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 65530          self.require(ISA_AVX | ISA_PCLMULQDQ)
 65531          p.domain = DomainCrypto
 65532          p.add(0, func(m *_Encoding, v []interface{}) {
 65533              m.emit(0xc4)
 65534              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 65535              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 65536              m.emit(0x44)
 65537              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 65538              m.imm1(toImmAny(v[0]))
 65539          })
 65540      }
 65541      // VPCLMULQDQ imm8, m128, xmm, xmm
 65542      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 65543          self.require(ISA_AVX | ISA_PCLMULQDQ)
 65544          p.domain = DomainCrypto
 65545          p.add(0, func(m *_Encoding, v []interface{}) {
 65546              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 65547              m.emit(0x44)
 65548              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 65549              m.imm1(toImmAny(v[0]))
 65550          })
 65551      }
 65552      if p.len == 0 {
 65553          panic("invalid operands for VPCLMULQDQ")
 65554      }
 65555      return p
 65556  }
 65557  
 65558  // VPCMOV performs "Packed Conditional Move".
 65559  //
 65560  // Mnemonic        : VPCMOV
 65561  // Supported forms : (6 forms)
 65562  //
 65563  //    * VPCMOV xmm, xmm, xmm, xmm     [XOP]
 65564  //    * VPCMOV m128, xmm, xmm, xmm    [XOP]
 65565  //    * VPCMOV xmm, m128, xmm, xmm    [XOP]
 65566  //    * VPCMOV ymm, ymm, ymm, ymm     [XOP]
 65567  //    * VPCMOV m256, ymm, ymm, ymm    [XOP]
 65568  //    * VPCMOV ymm, m256, ymm, ymm    [XOP]
 65569  //
 65570  func (self *Program) VPCMOV(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 65571      p := self.alloc("VPCMOV", 4, Operands { v0, v1, v2, v3 })
 65572      // VPCMOV xmm, xmm, xmm, xmm
 65573      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 65574          self.require(ISA_XOP)
 65575          p.domain = DomainAMDSpecific
 65576          p.add(0, func(m *_Encoding, v []interface{}) {
 65577              m.emit(0x8f)
 65578              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 65579              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 65580              m.emit(0xa2)
 65581              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 65582              m.emit(hlcode(v[0]) << 4)
 65583          })
 65584          p.add(0, func(m *_Encoding, v []interface{}) {
 65585              m.emit(0x8f)
 65586              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 65587              m.emit(0xf8 ^ (hlcode(v[2]) << 3))
 65588              m.emit(0xa2)
 65589              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 65590              m.emit(hlcode(v[1]) << 4)
 65591          })
 65592      }
 65593      // VPCMOV m128, xmm, xmm, xmm
 65594      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 65595          self.require(ISA_XOP)
 65596          p.domain = DomainAMDSpecific
 65597          p.add(0, func(m *_Encoding, v []interface{}) {
 65598              m.vex3(0x8f, 0b1000, 0x80, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 65599              m.emit(0xa2)
 65600              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 65601              m.emit(hlcode(v[1]) << 4)
 65602          })
 65603      }
 65604      // VPCMOV xmm, m128, xmm, xmm
 65605      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 65606          self.require(ISA_XOP)
 65607          p.domain = DomainAMDSpecific
 65608          p.add(0, func(m *_Encoding, v []interface{}) {
 65609              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 65610              m.emit(0xa2)
 65611              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 65612              m.emit(hlcode(v[0]) << 4)
 65613          })
 65614      }
 65615      // VPCMOV ymm, ymm, ymm, ymm
 65616      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 65617          self.require(ISA_XOP)
 65618          p.domain = DomainAMDSpecific
 65619          p.add(0, func(m *_Encoding, v []interface{}) {
 65620              m.emit(0x8f)
 65621              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 65622              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 65623              m.emit(0xa2)
 65624              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 65625              m.emit(hlcode(v[0]) << 4)
 65626          })
 65627          p.add(0, func(m *_Encoding, v []interface{}) {
 65628              m.emit(0x8f)
 65629              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 65630              m.emit(0xfc ^ (hlcode(v[2]) << 3))
 65631              m.emit(0xa2)
 65632              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 65633              m.emit(hlcode(v[1]) << 4)
 65634          })
 65635      }
 65636      // VPCMOV m256, ymm, ymm, ymm
 65637      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 65638          self.require(ISA_XOP)
 65639          p.domain = DomainAMDSpecific
 65640          p.add(0, func(m *_Encoding, v []interface{}) {
 65641              m.vex3(0x8f, 0b1000, 0x84, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 65642              m.emit(0xa2)
 65643              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 65644              m.emit(hlcode(v[1]) << 4)
 65645          })
 65646      }
 65647      // VPCMOV ymm, m256, ymm, ymm
 65648      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 65649          self.require(ISA_XOP)
 65650          p.domain = DomainAMDSpecific
 65651          p.add(0, func(m *_Encoding, v []interface{}) {
 65652              m.vex3(0x8f, 0b1000, 0x04, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 65653              m.emit(0xa2)
 65654              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 65655              m.emit(hlcode(v[0]) << 4)
 65656          })
 65657      }
 65658      if p.len == 0 {
 65659          panic("invalid operands for VPCMOV")
 65660      }
 65661      return p
 65662  }
 65663  
 65664  // VPCMPB performs "Compare Packed Signed Byte Values".
 65665  //
 65666  // Mnemonic        : VPCMPB
 65667  // Supported forms : (6 forms)
 65668  //
 65669  //    * VPCMPB imm8, zmm, zmm, k{k}     [AVX512BW]
 65670  //    * VPCMPB imm8, m512, zmm, k{k}    [AVX512BW]
 65671  //    * VPCMPB imm8, xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 65672  //    * VPCMPB imm8, m128, xmm, k{k}    [AVX512BW,AVX512VL]
 65673  //    * VPCMPB imm8, ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 65674  //    * VPCMPB imm8, m256, ymm, k{k}    [AVX512BW,AVX512VL]
 65675  //
 65676  func (self *Program) VPCMPB(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 65677      p := self.alloc("VPCMPB", 4, Operands { v0, v1, v2, v3 })
 65678      // VPCMPB imm8, zmm, zmm, k{k}
 65679      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isKk(v3) {
 65680          self.require(ISA_AVX512BW)
 65681          p.domain = DomainAVX
 65682          p.add(0, func(m *_Encoding, v []interface{}) {
 65683              m.emit(0x62)
 65684              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 65685              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 65686              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 65687              m.emit(0x3f)
 65688              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 65689              m.imm1(toImmAny(v[0]))
 65690          })
 65691      }
 65692      // VPCMPB imm8, m512, zmm, k{k}
 65693      if isImm8(v0) && isM512(v1) && isZMM(v2) && isKk(v3) {
 65694          self.require(ISA_AVX512BW)
 65695          p.domain = DomainAVX
 65696          p.add(0, func(m *_Encoding, v []interface{}) {
 65697              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 65698              m.emit(0x3f)
 65699              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 65700              m.imm1(toImmAny(v[0]))
 65701          })
 65702      }
 65703      // VPCMPB imm8, xmm, xmm, k{k}
 65704      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 65705          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65706          p.domain = DomainAVX
 65707          p.add(0, func(m *_Encoding, v []interface{}) {
 65708              m.emit(0x62)
 65709              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 65710              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 65711              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 65712              m.emit(0x3f)
 65713              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 65714              m.imm1(toImmAny(v[0]))
 65715          })
 65716      }
 65717      // VPCMPB imm8, m128, xmm, k{k}
 65718      if isImm8(v0) && isM128(v1) && isEVEXXMM(v2) && isKk(v3) {
 65719          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65720          p.domain = DomainAVX
 65721          p.add(0, func(m *_Encoding, v []interface{}) {
 65722              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 65723              m.emit(0x3f)
 65724              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 65725              m.imm1(toImmAny(v[0]))
 65726          })
 65727      }
 65728      // VPCMPB imm8, ymm, ymm, k{k}
 65729      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isKk(v3) {
 65730          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65731          p.domain = DomainAVX
 65732          p.add(0, func(m *_Encoding, v []interface{}) {
 65733              m.emit(0x62)
 65734              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 65735              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 65736              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 65737              m.emit(0x3f)
 65738              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 65739              m.imm1(toImmAny(v[0]))
 65740          })
 65741      }
 65742      // VPCMPB imm8, m256, ymm, k{k}
 65743      if isImm8(v0) && isM256(v1) && isEVEXYMM(v2) && isKk(v3) {
 65744          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65745          p.domain = DomainAVX
 65746          p.add(0, func(m *_Encoding, v []interface{}) {
 65747              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 65748              m.emit(0x3f)
 65749              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 65750              m.imm1(toImmAny(v[0]))
 65751          })
 65752      }
 65753      if p.len == 0 {
 65754          panic("invalid operands for VPCMPB")
 65755      }
 65756      return p
 65757  }
 65758  
 65759  // VPCMPD performs "Compare Packed Signed Doubleword Values".
 65760  //
 65761  // Mnemonic        : VPCMPD
 65762  // Supported forms : (6 forms)
 65763  //
 65764  //    * VPCMPD imm8, m512/m32bcst, zmm, k{k}    [AVX512F]
 65765  //    * VPCMPD imm8, zmm, zmm, k{k}             [AVX512F]
 65766  //    * VPCMPD imm8, m128/m32bcst, xmm, k{k}    [AVX512F,AVX512VL]
 65767  //    * VPCMPD imm8, xmm, xmm, k{k}             [AVX512F,AVX512VL]
 65768  //    * VPCMPD imm8, m256/m32bcst, ymm, k{k}    [AVX512F,AVX512VL]
 65769  //    * VPCMPD imm8, ymm, ymm, k{k}             [AVX512F,AVX512VL]
 65770  //
 65771  func (self *Program) VPCMPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 65772      p := self.alloc("VPCMPD", 4, Operands { v0, v1, v2, v3 })
 65773      // VPCMPD imm8, m512/m32bcst, zmm, k{k}
 65774      if isImm8(v0) && isM512M32bcst(v1) && isZMM(v2) && isKk(v3) {
 65775          self.require(ISA_AVX512F)
 65776          p.domain = DomainAVX
 65777          p.add(0, func(m *_Encoding, v []interface{}) {
 65778              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 65779              m.emit(0x1f)
 65780              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 65781              m.imm1(toImmAny(v[0]))
 65782          })
 65783      }
 65784      // VPCMPD imm8, zmm, zmm, k{k}
 65785      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isKk(v3) {
 65786          self.require(ISA_AVX512F)
 65787          p.domain = DomainAVX
 65788          p.add(0, func(m *_Encoding, v []interface{}) {
 65789              m.emit(0x62)
 65790              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 65791              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 65792              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 65793              m.emit(0x1f)
 65794              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 65795              m.imm1(toImmAny(v[0]))
 65796          })
 65797      }
 65798      // VPCMPD imm8, m128/m32bcst, xmm, k{k}
 65799      if isImm8(v0) && isM128M32bcst(v1) && isEVEXXMM(v2) && isKk(v3) {
 65800          self.require(ISA_AVX512VL | ISA_AVX512F)
 65801          p.domain = DomainAVX
 65802          p.add(0, func(m *_Encoding, v []interface{}) {
 65803              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 65804              m.emit(0x1f)
 65805              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 65806              m.imm1(toImmAny(v[0]))
 65807          })
 65808      }
 65809      // VPCMPD imm8, xmm, xmm, k{k}
 65810      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 65811          self.require(ISA_AVX512VL | ISA_AVX512F)
 65812          p.domain = DomainAVX
 65813          p.add(0, func(m *_Encoding, v []interface{}) {
 65814              m.emit(0x62)
 65815              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 65816              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 65817              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 65818              m.emit(0x1f)
 65819              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 65820              m.imm1(toImmAny(v[0]))
 65821          })
 65822      }
 65823      // VPCMPD imm8, m256/m32bcst, ymm, k{k}
 65824      if isImm8(v0) && isM256M32bcst(v1) && isEVEXYMM(v2) && isKk(v3) {
 65825          self.require(ISA_AVX512VL | ISA_AVX512F)
 65826          p.domain = DomainAVX
 65827          p.add(0, func(m *_Encoding, v []interface{}) {
 65828              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 65829              m.emit(0x1f)
 65830              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 65831              m.imm1(toImmAny(v[0]))
 65832          })
 65833      }
 65834      // VPCMPD imm8, ymm, ymm, k{k}
 65835      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isKk(v3) {
 65836          self.require(ISA_AVX512VL | ISA_AVX512F)
 65837          p.domain = DomainAVX
 65838          p.add(0, func(m *_Encoding, v []interface{}) {
 65839              m.emit(0x62)
 65840              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 65841              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 65842              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 65843              m.emit(0x1f)
 65844              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 65845              m.imm1(toImmAny(v[0]))
 65846          })
 65847      }
 65848      if p.len == 0 {
 65849          panic("invalid operands for VPCMPD")
 65850      }
 65851      return p
 65852  }
 65853  
 65854  // VPCMPEQB performs "Compare Packed Byte Data for Equality".
 65855  //
 65856  // Mnemonic        : VPCMPEQB
 65857  // Supported forms : (10 forms)
 65858  //
 65859  //    * VPCMPEQB xmm, xmm, xmm      [AVX]
 65860  //    * VPCMPEQB m128, xmm, xmm     [AVX]
 65861  //    * VPCMPEQB ymm, ymm, ymm      [AVX2]
 65862  //    * VPCMPEQB m256, ymm, ymm     [AVX2]
 65863  //    * VPCMPEQB zmm, zmm, k{k}     [AVX512BW]
 65864  //    * VPCMPEQB m512, zmm, k{k}    [AVX512BW]
 65865  //    * VPCMPEQB xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 65866  //    * VPCMPEQB m128, xmm, k{k}    [AVX512BW,AVX512VL]
 65867  //    * VPCMPEQB ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 65868  //    * VPCMPEQB m256, ymm, k{k}    [AVX512BW,AVX512VL]
 65869  //
 65870  func (self *Program) VPCMPEQB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 65871      p := self.alloc("VPCMPEQB", 3, Operands { v0, v1, v2 })
 65872      // VPCMPEQB xmm, xmm, xmm
 65873      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 65874          self.require(ISA_AVX)
 65875          p.domain = DomainAVX
 65876          p.add(0, func(m *_Encoding, v []interface{}) {
 65877              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 65878              m.emit(0x74)
 65879              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 65880          })
 65881      }
 65882      // VPCMPEQB m128, xmm, xmm
 65883      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 65884          self.require(ISA_AVX)
 65885          p.domain = DomainAVX
 65886          p.add(0, func(m *_Encoding, v []interface{}) {
 65887              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 65888              m.emit(0x74)
 65889              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 65890          })
 65891      }
 65892      // VPCMPEQB ymm, ymm, ymm
 65893      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 65894          self.require(ISA_AVX2)
 65895          p.domain = DomainAVX
 65896          p.add(0, func(m *_Encoding, v []interface{}) {
 65897              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 65898              m.emit(0x74)
 65899              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 65900          })
 65901      }
 65902      // VPCMPEQB m256, ymm, ymm
 65903      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 65904          self.require(ISA_AVX2)
 65905          p.domain = DomainAVX
 65906          p.add(0, func(m *_Encoding, v []interface{}) {
 65907              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 65908              m.emit(0x74)
 65909              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 65910          })
 65911      }
 65912      // VPCMPEQB zmm, zmm, k{k}
 65913      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 65914          self.require(ISA_AVX512BW)
 65915          p.domain = DomainAVX
 65916          p.add(0, func(m *_Encoding, v []interface{}) {
 65917              m.emit(0x62)
 65918              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 65919              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 65920              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 65921              m.emit(0x74)
 65922              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 65923          })
 65924      }
 65925      // VPCMPEQB m512, zmm, k{k}
 65926      if isM512(v0) && isZMM(v1) && isKk(v2) {
 65927          self.require(ISA_AVX512BW)
 65928          p.domain = DomainAVX
 65929          p.add(0, func(m *_Encoding, v []interface{}) {
 65930              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 65931              m.emit(0x74)
 65932              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 65933          })
 65934      }
 65935      // VPCMPEQB xmm, xmm, k{k}
 65936      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 65937          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65938          p.domain = DomainAVX
 65939          p.add(0, func(m *_Encoding, v []interface{}) {
 65940              m.emit(0x62)
 65941              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 65942              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 65943              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 65944              m.emit(0x74)
 65945              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 65946          })
 65947      }
 65948      // VPCMPEQB m128, xmm, k{k}
 65949      if isM128(v0) && isEVEXXMM(v1) && isKk(v2) {
 65950          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65951          p.domain = DomainAVX
 65952          p.add(0, func(m *_Encoding, v []interface{}) {
 65953              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 65954              m.emit(0x74)
 65955              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 65956          })
 65957      }
 65958      // VPCMPEQB ymm, ymm, k{k}
 65959      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 65960          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65961          p.domain = DomainAVX
 65962          p.add(0, func(m *_Encoding, v []interface{}) {
 65963              m.emit(0x62)
 65964              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 65965              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 65966              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 65967              m.emit(0x74)
 65968              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 65969          })
 65970      }
 65971      // VPCMPEQB m256, ymm, k{k}
 65972      if isM256(v0) && isEVEXYMM(v1) && isKk(v2) {
 65973          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65974          p.domain = DomainAVX
 65975          p.add(0, func(m *_Encoding, v []interface{}) {
 65976              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 65977              m.emit(0x74)
 65978              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 65979          })
 65980      }
 65981      if p.len == 0 {
 65982          panic("invalid operands for VPCMPEQB")
 65983      }
 65984      return p
 65985  }
 65986  
 65987  // VPCMPEQD performs "Compare Packed Doubleword Data for Equality".
 65988  //
 65989  // Mnemonic        : VPCMPEQD
 65990  // Supported forms : (10 forms)
 65991  //
 65992  //    * VPCMPEQD xmm, xmm, xmm              [AVX]
 65993  //    * VPCMPEQD m128, xmm, xmm             [AVX]
 65994  //    * VPCMPEQD ymm, ymm, ymm              [AVX2]
 65995  //    * VPCMPEQD m256, ymm, ymm             [AVX2]
 65996  //    * VPCMPEQD m512/m32bcst, zmm, k{k}    [AVX512F]
 65997  //    * VPCMPEQD zmm, zmm, k{k}             [AVX512F]
 65998  //    * VPCMPEQD m128/m32bcst, xmm, k{k}    [AVX512F,AVX512VL]
 65999  //    * VPCMPEQD xmm, xmm, k{k}             [AVX512F,AVX512VL]
 66000  //    * VPCMPEQD m256/m32bcst, ymm, k{k}    [AVX512F,AVX512VL]
 66001  //    * VPCMPEQD ymm, ymm, k{k}             [AVX512F,AVX512VL]
 66002  //
 66003  func (self *Program) VPCMPEQD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 66004      p := self.alloc("VPCMPEQD", 3, Operands { v0, v1, v2 })
 66005      // VPCMPEQD xmm, xmm, xmm
 66006      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 66007          self.require(ISA_AVX)
 66008          p.domain = DomainAVX
 66009          p.add(0, func(m *_Encoding, v []interface{}) {
 66010              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 66011              m.emit(0x76)
 66012              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66013          })
 66014      }
 66015      // VPCMPEQD m128, xmm, xmm
 66016      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 66017          self.require(ISA_AVX)
 66018          p.domain = DomainAVX
 66019          p.add(0, func(m *_Encoding, v []interface{}) {
 66020              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66021              m.emit(0x76)
 66022              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66023          })
 66024      }
 66025      // VPCMPEQD ymm, ymm, ymm
 66026      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 66027          self.require(ISA_AVX2)
 66028          p.domain = DomainAVX
 66029          p.add(0, func(m *_Encoding, v []interface{}) {
 66030              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 66031              m.emit(0x76)
 66032              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66033          })
 66034      }
 66035      // VPCMPEQD m256, ymm, ymm
 66036      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 66037          self.require(ISA_AVX2)
 66038          p.domain = DomainAVX
 66039          p.add(0, func(m *_Encoding, v []interface{}) {
 66040              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66041              m.emit(0x76)
 66042              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66043          })
 66044      }
 66045      // VPCMPEQD m512/m32bcst, zmm, k{k}
 66046      if isM512M32bcst(v0) && isZMM(v1) && isKk(v2) {
 66047          self.require(ISA_AVX512F)
 66048          p.domain = DomainAVX
 66049          p.add(0, func(m *_Encoding, v []interface{}) {
 66050              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66051              m.emit(0x76)
 66052              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 66053          })
 66054      }
 66055      // VPCMPEQD zmm, zmm, k{k}
 66056      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 66057          self.require(ISA_AVX512F)
 66058          p.domain = DomainAVX
 66059          p.add(0, func(m *_Encoding, v []interface{}) {
 66060              m.emit(0x62)
 66061              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66062              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66063              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 66064              m.emit(0x76)
 66065              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66066          })
 66067      }
 66068      // VPCMPEQD m128/m32bcst, xmm, k{k}
 66069      if isM128M32bcst(v0) && isEVEXXMM(v1) && isKk(v2) {
 66070          self.require(ISA_AVX512VL | ISA_AVX512F)
 66071          p.domain = DomainAVX
 66072          p.add(0, func(m *_Encoding, v []interface{}) {
 66073              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66074              m.emit(0x76)
 66075              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 66076          })
 66077      }
 66078      // VPCMPEQD xmm, xmm, k{k}
 66079      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 66080          self.require(ISA_AVX512VL | ISA_AVX512F)
 66081          p.domain = DomainAVX
 66082          p.add(0, func(m *_Encoding, v []interface{}) {
 66083              m.emit(0x62)
 66084              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66085              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66086              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 66087              m.emit(0x76)
 66088              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66089          })
 66090      }
 66091      // VPCMPEQD m256/m32bcst, ymm, k{k}
 66092      if isM256M32bcst(v0) && isEVEXYMM(v1) && isKk(v2) {
 66093          self.require(ISA_AVX512VL | ISA_AVX512F)
 66094          p.domain = DomainAVX
 66095          p.add(0, func(m *_Encoding, v []interface{}) {
 66096              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66097              m.emit(0x76)
 66098              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 66099          })
 66100      }
 66101      // VPCMPEQD ymm, ymm, k{k}
 66102      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 66103          self.require(ISA_AVX512VL | ISA_AVX512F)
 66104          p.domain = DomainAVX
 66105          p.add(0, func(m *_Encoding, v []interface{}) {
 66106              m.emit(0x62)
 66107              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66108              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66109              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 66110              m.emit(0x76)
 66111              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66112          })
 66113      }
 66114      if p.len == 0 {
 66115          panic("invalid operands for VPCMPEQD")
 66116      }
 66117      return p
 66118  }
 66119  
 66120  // VPCMPEQQ performs "Compare Packed Quadword Data for Equality".
 66121  //
 66122  // Mnemonic        : VPCMPEQQ
 66123  // Supported forms : (10 forms)
 66124  //
 66125  //    * VPCMPEQQ xmm, xmm, xmm              [AVX]
 66126  //    * VPCMPEQQ m128, xmm, xmm             [AVX]
 66127  //    * VPCMPEQQ ymm, ymm, ymm              [AVX2]
 66128  //    * VPCMPEQQ m256, ymm, ymm             [AVX2]
 66129  //    * VPCMPEQQ m512/m64bcst, zmm, k{k}    [AVX512F]
 66130  //    * VPCMPEQQ zmm, zmm, k{k}             [AVX512F]
 66131  //    * VPCMPEQQ m128/m64bcst, xmm, k{k}    [AVX512F,AVX512VL]
 66132  //    * VPCMPEQQ xmm, xmm, k{k}             [AVX512F,AVX512VL]
 66133  //    * VPCMPEQQ m256/m64bcst, ymm, k{k}    [AVX512F,AVX512VL]
 66134  //    * VPCMPEQQ ymm, ymm, k{k}             [AVX512F,AVX512VL]
 66135  //
 66136  func (self *Program) VPCMPEQQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 66137      p := self.alloc("VPCMPEQQ", 3, Operands { v0, v1, v2 })
 66138      // VPCMPEQQ xmm, xmm, xmm
 66139      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 66140          self.require(ISA_AVX)
 66141          p.domain = DomainAVX
 66142          p.add(0, func(m *_Encoding, v []interface{}) {
 66143              m.emit(0xc4)
 66144              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 66145              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 66146              m.emit(0x29)
 66147              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66148          })
 66149      }
 66150      // VPCMPEQQ m128, xmm, xmm
 66151      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 66152          self.require(ISA_AVX)
 66153          p.domain = DomainAVX
 66154          p.add(0, func(m *_Encoding, v []interface{}) {
 66155              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66156              m.emit(0x29)
 66157              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66158          })
 66159      }
 66160      // VPCMPEQQ ymm, ymm, ymm
 66161      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 66162          self.require(ISA_AVX2)
 66163          p.domain = DomainAVX
 66164          p.add(0, func(m *_Encoding, v []interface{}) {
 66165              m.emit(0xc4)
 66166              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 66167              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66168              m.emit(0x29)
 66169              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66170          })
 66171      }
 66172      // VPCMPEQQ m256, ymm, ymm
 66173      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 66174          self.require(ISA_AVX2)
 66175          p.domain = DomainAVX
 66176          p.add(0, func(m *_Encoding, v []interface{}) {
 66177              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66178              m.emit(0x29)
 66179              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66180          })
 66181      }
 66182      // VPCMPEQQ m512/m64bcst, zmm, k{k}
 66183      if isM512M64bcst(v0) && isZMM(v1) && isKk(v2) {
 66184          self.require(ISA_AVX512F)
 66185          p.domain = DomainAVX
 66186          p.add(0, func(m *_Encoding, v []interface{}) {
 66187              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66188              m.emit(0x29)
 66189              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 66190          })
 66191      }
 66192      // VPCMPEQQ zmm, zmm, k{k}
 66193      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 66194          self.require(ISA_AVX512F)
 66195          p.domain = DomainAVX
 66196          p.add(0, func(m *_Encoding, v []interface{}) {
 66197              m.emit(0x62)
 66198              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66199              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 66200              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 66201              m.emit(0x29)
 66202              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66203          })
 66204      }
 66205      // VPCMPEQQ m128/m64bcst, xmm, k{k}
 66206      if isM128M64bcst(v0) && isEVEXXMM(v1) && isKk(v2) {
 66207          self.require(ISA_AVX512VL | ISA_AVX512F)
 66208          p.domain = DomainAVX
 66209          p.add(0, func(m *_Encoding, v []interface{}) {
 66210              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66211              m.emit(0x29)
 66212              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 66213          })
 66214      }
 66215      // VPCMPEQQ xmm, xmm, k{k}
 66216      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 66217          self.require(ISA_AVX512VL | ISA_AVX512F)
 66218          p.domain = DomainAVX
 66219          p.add(0, func(m *_Encoding, v []interface{}) {
 66220              m.emit(0x62)
 66221              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66222              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 66223              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 66224              m.emit(0x29)
 66225              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66226          })
 66227      }
 66228      // VPCMPEQQ m256/m64bcst, ymm, k{k}
 66229      if isM256M64bcst(v0) && isEVEXYMM(v1) && isKk(v2) {
 66230          self.require(ISA_AVX512VL | ISA_AVX512F)
 66231          p.domain = DomainAVX
 66232          p.add(0, func(m *_Encoding, v []interface{}) {
 66233              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66234              m.emit(0x29)
 66235              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 66236          })
 66237      }
 66238      // VPCMPEQQ ymm, ymm, k{k}
 66239      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 66240          self.require(ISA_AVX512VL | ISA_AVX512F)
 66241          p.domain = DomainAVX
 66242          p.add(0, func(m *_Encoding, v []interface{}) {
 66243              m.emit(0x62)
 66244              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66245              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 66246              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 66247              m.emit(0x29)
 66248              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66249          })
 66250      }
 66251      if p.len == 0 {
 66252          panic("invalid operands for VPCMPEQQ")
 66253      }
 66254      return p
 66255  }
 66256  
 66257  // VPCMPEQW performs "Compare Packed Word Data for Equality".
 66258  //
 66259  // Mnemonic        : VPCMPEQW
 66260  // Supported forms : (10 forms)
 66261  //
 66262  //    * VPCMPEQW xmm, xmm, xmm      [AVX]
 66263  //    * VPCMPEQW m128, xmm, xmm     [AVX]
 66264  //    * VPCMPEQW ymm, ymm, ymm      [AVX2]
 66265  //    * VPCMPEQW m256, ymm, ymm     [AVX2]
 66266  //    * VPCMPEQW zmm, zmm, k{k}     [AVX512BW]
 66267  //    * VPCMPEQW m512, zmm, k{k}    [AVX512BW]
 66268  //    * VPCMPEQW xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 66269  //    * VPCMPEQW m128, xmm, k{k}    [AVX512BW,AVX512VL]
 66270  //    * VPCMPEQW ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 66271  //    * VPCMPEQW m256, ymm, k{k}    [AVX512BW,AVX512VL]
 66272  //
 66273  func (self *Program) VPCMPEQW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 66274      p := self.alloc("VPCMPEQW", 3, Operands { v0, v1, v2 })
 66275      // VPCMPEQW xmm, xmm, xmm
 66276      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 66277          self.require(ISA_AVX)
 66278          p.domain = DomainAVX
 66279          p.add(0, func(m *_Encoding, v []interface{}) {
 66280              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 66281              m.emit(0x75)
 66282              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66283          })
 66284      }
 66285      // VPCMPEQW m128, xmm, xmm
 66286      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 66287          self.require(ISA_AVX)
 66288          p.domain = DomainAVX
 66289          p.add(0, func(m *_Encoding, v []interface{}) {
 66290              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66291              m.emit(0x75)
 66292              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66293          })
 66294      }
 66295      // VPCMPEQW ymm, ymm, ymm
 66296      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 66297          self.require(ISA_AVX2)
 66298          p.domain = DomainAVX
 66299          p.add(0, func(m *_Encoding, v []interface{}) {
 66300              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 66301              m.emit(0x75)
 66302              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66303          })
 66304      }
 66305      // VPCMPEQW m256, ymm, ymm
 66306      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 66307          self.require(ISA_AVX2)
 66308          p.domain = DomainAVX
 66309          p.add(0, func(m *_Encoding, v []interface{}) {
 66310              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66311              m.emit(0x75)
 66312              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66313          })
 66314      }
 66315      // VPCMPEQW zmm, zmm, k{k}
 66316      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 66317          self.require(ISA_AVX512BW)
 66318          p.domain = DomainAVX
 66319          p.add(0, func(m *_Encoding, v []interface{}) {
 66320              m.emit(0x62)
 66321              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66322              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66323              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 66324              m.emit(0x75)
 66325              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66326          })
 66327      }
 66328      // VPCMPEQW m512, zmm, k{k}
 66329      if isM512(v0) && isZMM(v1) && isKk(v2) {
 66330          self.require(ISA_AVX512BW)
 66331          p.domain = DomainAVX
 66332          p.add(0, func(m *_Encoding, v []interface{}) {
 66333              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 66334              m.emit(0x75)
 66335              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 66336          })
 66337      }
 66338      // VPCMPEQW xmm, xmm, k{k}
 66339      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 66340          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66341          p.domain = DomainAVX
 66342          p.add(0, func(m *_Encoding, v []interface{}) {
 66343              m.emit(0x62)
 66344              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66345              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66346              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 66347              m.emit(0x75)
 66348              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66349          })
 66350      }
 66351      // VPCMPEQW m128, xmm, k{k}
 66352      if isM128(v0) && isEVEXXMM(v1) && isKk(v2) {
 66353          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66354          p.domain = DomainAVX
 66355          p.add(0, func(m *_Encoding, v []interface{}) {
 66356              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 66357              m.emit(0x75)
 66358              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 66359          })
 66360      }
 66361      // VPCMPEQW ymm, ymm, k{k}
 66362      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 66363          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66364          p.domain = DomainAVX
 66365          p.add(0, func(m *_Encoding, v []interface{}) {
 66366              m.emit(0x62)
 66367              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66368              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66369              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 66370              m.emit(0x75)
 66371              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66372          })
 66373      }
 66374      // VPCMPEQW m256, ymm, k{k}
 66375      if isM256(v0) && isEVEXYMM(v1) && isKk(v2) {
 66376          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66377          p.domain = DomainAVX
 66378          p.add(0, func(m *_Encoding, v []interface{}) {
 66379              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 66380              m.emit(0x75)
 66381              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 66382          })
 66383      }
 66384      if p.len == 0 {
 66385          panic("invalid operands for VPCMPEQW")
 66386      }
 66387      return p
 66388  }
 66389  
 66390  // VPCMPESTRI performs "Packed Compare Explicit Length Strings, Return Index".
 66391  //
 66392  // Mnemonic        : VPCMPESTRI
 66393  // Supported forms : (2 forms)
 66394  //
 66395  //    * VPCMPESTRI imm8, xmm, xmm     [AVX]
 66396  //    * VPCMPESTRI imm8, m128, xmm    [AVX]
 66397  //
 66398  func (self *Program) VPCMPESTRI(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 66399      p := self.alloc("VPCMPESTRI", 3, Operands { v0, v1, v2 })
 66400      // VPCMPESTRI imm8, xmm, xmm
 66401      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 66402          self.require(ISA_AVX)
 66403          p.domain = DomainAVX
 66404          p.add(0, func(m *_Encoding, v []interface{}) {
 66405              m.emit(0xc4)
 66406              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 66407              m.emit(0x79)
 66408              m.emit(0x61)
 66409              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 66410              m.imm1(toImmAny(v[0]))
 66411          })
 66412      }
 66413      // VPCMPESTRI imm8, m128, xmm
 66414      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 66415          self.require(ISA_AVX)
 66416          p.domain = DomainAVX
 66417          p.add(0, func(m *_Encoding, v []interface{}) {
 66418              m.vex3(0xc4, 0b11, 0x01, hcode(v[2]), addr(v[1]), 0)
 66419              m.emit(0x61)
 66420              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 66421              m.imm1(toImmAny(v[0]))
 66422          })
 66423      }
 66424      if p.len == 0 {
 66425          panic("invalid operands for VPCMPESTRI")
 66426      }
 66427      return p
 66428  }
 66429  
 66430  // VPCMPESTRM performs "Packed Compare Explicit Length Strings, Return Mask".
 66431  //
 66432  // Mnemonic        : VPCMPESTRM
 66433  // Supported forms : (2 forms)
 66434  //
 66435  //    * VPCMPESTRM imm8, xmm, xmm     [AVX]
 66436  //    * VPCMPESTRM imm8, m128, xmm    [AVX]
 66437  //
 66438  func (self *Program) VPCMPESTRM(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 66439      p := self.alloc("VPCMPESTRM", 3, Operands { v0, v1, v2 })
 66440      // VPCMPESTRM imm8, xmm, xmm
 66441      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 66442          self.require(ISA_AVX)
 66443          p.domain = DomainAVX
 66444          p.add(0, func(m *_Encoding, v []interface{}) {
 66445              m.emit(0xc4)
 66446              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 66447              m.emit(0x79)
 66448              m.emit(0x60)
 66449              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 66450              m.imm1(toImmAny(v[0]))
 66451          })
 66452      }
 66453      // VPCMPESTRM imm8, m128, xmm
 66454      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 66455          self.require(ISA_AVX)
 66456          p.domain = DomainAVX
 66457          p.add(0, func(m *_Encoding, v []interface{}) {
 66458              m.vex3(0xc4, 0b11, 0x01, hcode(v[2]), addr(v[1]), 0)
 66459              m.emit(0x60)
 66460              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 66461              m.imm1(toImmAny(v[0]))
 66462          })
 66463      }
 66464      if p.len == 0 {
 66465          panic("invalid operands for VPCMPESTRM")
 66466      }
 66467      return p
 66468  }
 66469  
 66470  // VPCMPGTB performs "Compare Packed Signed Byte Integers for Greater Than".
 66471  //
 66472  // Mnemonic        : VPCMPGTB
 66473  // Supported forms : (10 forms)
 66474  //
 66475  //    * VPCMPGTB xmm, xmm, xmm      [AVX]
 66476  //    * VPCMPGTB m128, xmm, xmm     [AVX]
 66477  //    * VPCMPGTB ymm, ymm, ymm      [AVX2]
 66478  //    * VPCMPGTB m256, ymm, ymm     [AVX2]
 66479  //    * VPCMPGTB zmm, zmm, k{k}     [AVX512BW]
 66480  //    * VPCMPGTB m512, zmm, k{k}    [AVX512BW]
 66481  //    * VPCMPGTB xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 66482  //    * VPCMPGTB m128, xmm, k{k}    [AVX512BW,AVX512VL]
 66483  //    * VPCMPGTB ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 66484  //    * VPCMPGTB m256, ymm, k{k}    [AVX512BW,AVX512VL]
 66485  //
 66486  func (self *Program) VPCMPGTB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 66487      p := self.alloc("VPCMPGTB", 3, Operands { v0, v1, v2 })
 66488      // VPCMPGTB xmm, xmm, xmm
 66489      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 66490          self.require(ISA_AVX)
 66491          p.domain = DomainAVX
 66492          p.add(0, func(m *_Encoding, v []interface{}) {
 66493              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 66494              m.emit(0x64)
 66495              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66496          })
 66497      }
 66498      // VPCMPGTB m128, xmm, xmm
 66499      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 66500          self.require(ISA_AVX)
 66501          p.domain = DomainAVX
 66502          p.add(0, func(m *_Encoding, v []interface{}) {
 66503              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66504              m.emit(0x64)
 66505              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66506          })
 66507      }
 66508      // VPCMPGTB ymm, ymm, ymm
 66509      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 66510          self.require(ISA_AVX2)
 66511          p.domain = DomainAVX
 66512          p.add(0, func(m *_Encoding, v []interface{}) {
 66513              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 66514              m.emit(0x64)
 66515              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66516          })
 66517      }
 66518      // VPCMPGTB m256, ymm, ymm
 66519      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 66520          self.require(ISA_AVX2)
 66521          p.domain = DomainAVX
 66522          p.add(0, func(m *_Encoding, v []interface{}) {
 66523              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66524              m.emit(0x64)
 66525              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66526          })
 66527      }
 66528      // VPCMPGTB zmm, zmm, k{k}
 66529      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 66530          self.require(ISA_AVX512BW)
 66531          p.domain = DomainAVX
 66532          p.add(0, func(m *_Encoding, v []interface{}) {
 66533              m.emit(0x62)
 66534              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66535              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66536              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 66537              m.emit(0x64)
 66538              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66539          })
 66540      }
 66541      // VPCMPGTB m512, zmm, k{k}
 66542      if isM512(v0) && isZMM(v1) && isKk(v2) {
 66543          self.require(ISA_AVX512BW)
 66544          p.domain = DomainAVX
 66545          p.add(0, func(m *_Encoding, v []interface{}) {
 66546              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 66547              m.emit(0x64)
 66548              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 66549          })
 66550      }
 66551      // VPCMPGTB xmm, xmm, k{k}
 66552      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 66553          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66554          p.domain = DomainAVX
 66555          p.add(0, func(m *_Encoding, v []interface{}) {
 66556              m.emit(0x62)
 66557              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66558              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66559              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 66560              m.emit(0x64)
 66561              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66562          })
 66563      }
 66564      // VPCMPGTB m128, xmm, k{k}
 66565      if isM128(v0) && isEVEXXMM(v1) && isKk(v2) {
 66566          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66567          p.domain = DomainAVX
 66568          p.add(0, func(m *_Encoding, v []interface{}) {
 66569              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 66570              m.emit(0x64)
 66571              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 66572          })
 66573      }
 66574      // VPCMPGTB ymm, ymm, k{k}
 66575      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 66576          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66577          p.domain = DomainAVX
 66578          p.add(0, func(m *_Encoding, v []interface{}) {
 66579              m.emit(0x62)
 66580              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66581              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66582              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 66583              m.emit(0x64)
 66584              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66585          })
 66586      }
 66587      // VPCMPGTB m256, ymm, k{k}
 66588      if isM256(v0) && isEVEXYMM(v1) && isKk(v2) {
 66589          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66590          p.domain = DomainAVX
 66591          p.add(0, func(m *_Encoding, v []interface{}) {
 66592              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 66593              m.emit(0x64)
 66594              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 66595          })
 66596      }
 66597      if p.len == 0 {
 66598          panic("invalid operands for VPCMPGTB")
 66599      }
 66600      return p
 66601  }
 66602  
 66603  // VPCMPGTD performs "Compare Packed Signed Doubleword Integers for Greater Than".
 66604  //
 66605  // Mnemonic        : VPCMPGTD
 66606  // Supported forms : (10 forms)
 66607  //
 66608  //    * VPCMPGTD xmm, xmm, xmm              [AVX]
 66609  //    * VPCMPGTD m128, xmm, xmm             [AVX]
 66610  //    * VPCMPGTD ymm, ymm, ymm              [AVX2]
 66611  //    * VPCMPGTD m256, ymm, ymm             [AVX2]
 66612  //    * VPCMPGTD m512/m32bcst, zmm, k{k}    [AVX512F]
 66613  //    * VPCMPGTD zmm, zmm, k{k}             [AVX512F]
 66614  //    * VPCMPGTD m128/m32bcst, xmm, k{k}    [AVX512F,AVX512VL]
 66615  //    * VPCMPGTD xmm, xmm, k{k}             [AVX512F,AVX512VL]
 66616  //    * VPCMPGTD m256/m32bcst, ymm, k{k}    [AVX512F,AVX512VL]
 66617  //    * VPCMPGTD ymm, ymm, k{k}             [AVX512F,AVX512VL]
 66618  //
 66619  func (self *Program) VPCMPGTD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 66620      p := self.alloc("VPCMPGTD", 3, Operands { v0, v1, v2 })
 66621      // VPCMPGTD xmm, xmm, xmm
 66622      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 66623          self.require(ISA_AVX)
 66624          p.domain = DomainAVX
 66625          p.add(0, func(m *_Encoding, v []interface{}) {
 66626              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 66627              m.emit(0x66)
 66628              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66629          })
 66630      }
 66631      // VPCMPGTD m128, xmm, xmm
 66632      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 66633          self.require(ISA_AVX)
 66634          p.domain = DomainAVX
 66635          p.add(0, func(m *_Encoding, v []interface{}) {
 66636              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66637              m.emit(0x66)
 66638              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66639          })
 66640      }
 66641      // VPCMPGTD ymm, ymm, ymm
 66642      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 66643          self.require(ISA_AVX2)
 66644          p.domain = DomainAVX
 66645          p.add(0, func(m *_Encoding, v []interface{}) {
 66646              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 66647              m.emit(0x66)
 66648              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66649          })
 66650      }
 66651      // VPCMPGTD m256, ymm, ymm
 66652      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 66653          self.require(ISA_AVX2)
 66654          p.domain = DomainAVX
 66655          p.add(0, func(m *_Encoding, v []interface{}) {
 66656              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66657              m.emit(0x66)
 66658              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66659          })
 66660      }
 66661      // VPCMPGTD m512/m32bcst, zmm, k{k}
 66662      if isM512M32bcst(v0) && isZMM(v1) && isKk(v2) {
 66663          self.require(ISA_AVX512F)
 66664          p.domain = DomainAVX
 66665          p.add(0, func(m *_Encoding, v []interface{}) {
 66666              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66667              m.emit(0x66)
 66668              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 66669          })
 66670      }
 66671      // VPCMPGTD zmm, zmm, k{k}
 66672      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 66673          self.require(ISA_AVX512F)
 66674          p.domain = DomainAVX
 66675          p.add(0, func(m *_Encoding, v []interface{}) {
 66676              m.emit(0x62)
 66677              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66678              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66679              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 66680              m.emit(0x66)
 66681              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66682          })
 66683      }
 66684      // VPCMPGTD m128/m32bcst, xmm, k{k}
 66685      if isM128M32bcst(v0) && isEVEXXMM(v1) && isKk(v2) {
 66686          self.require(ISA_AVX512VL | ISA_AVX512F)
 66687          p.domain = DomainAVX
 66688          p.add(0, func(m *_Encoding, v []interface{}) {
 66689              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66690              m.emit(0x66)
 66691              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 66692          })
 66693      }
 66694      // VPCMPGTD xmm, xmm, k{k}
 66695      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 66696          self.require(ISA_AVX512VL | ISA_AVX512F)
 66697          p.domain = DomainAVX
 66698          p.add(0, func(m *_Encoding, v []interface{}) {
 66699              m.emit(0x62)
 66700              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66701              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66702              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 66703              m.emit(0x66)
 66704              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66705          })
 66706      }
 66707      // VPCMPGTD m256/m32bcst, ymm, k{k}
 66708      if isM256M32bcst(v0) && isEVEXYMM(v1) && isKk(v2) {
 66709          self.require(ISA_AVX512VL | ISA_AVX512F)
 66710          p.domain = DomainAVX
 66711          p.add(0, func(m *_Encoding, v []interface{}) {
 66712              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66713              m.emit(0x66)
 66714              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 66715          })
 66716      }
 66717      // VPCMPGTD ymm, ymm, k{k}
 66718      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 66719          self.require(ISA_AVX512VL | ISA_AVX512F)
 66720          p.domain = DomainAVX
 66721          p.add(0, func(m *_Encoding, v []interface{}) {
 66722              m.emit(0x62)
 66723              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66724              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66725              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 66726              m.emit(0x66)
 66727              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66728          })
 66729      }
 66730      if p.len == 0 {
 66731          panic("invalid operands for VPCMPGTD")
 66732      }
 66733      return p
 66734  }
 66735  
 66736  // VPCMPGTQ performs "Compare Packed Data for Greater Than".
 66737  //
 66738  // Mnemonic        : VPCMPGTQ
 66739  // Supported forms : (10 forms)
 66740  //
 66741  //    * VPCMPGTQ xmm, xmm, xmm              [AVX]
 66742  //    * VPCMPGTQ m128, xmm, xmm             [AVX]
 66743  //    * VPCMPGTQ ymm, ymm, ymm              [AVX2]
 66744  //    * VPCMPGTQ m256, ymm, ymm             [AVX2]
 66745  //    * VPCMPGTQ m512/m64bcst, zmm, k{k}    [AVX512F]
 66746  //    * VPCMPGTQ zmm, zmm, k{k}             [AVX512F]
 66747  //    * VPCMPGTQ m128/m64bcst, xmm, k{k}    [AVX512F,AVX512VL]
 66748  //    * VPCMPGTQ xmm, xmm, k{k}             [AVX512F,AVX512VL]
 66749  //    * VPCMPGTQ m256/m64bcst, ymm, k{k}    [AVX512F,AVX512VL]
 66750  //    * VPCMPGTQ ymm, ymm, k{k}             [AVX512F,AVX512VL]
 66751  //
 66752  func (self *Program) VPCMPGTQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 66753      p := self.alloc("VPCMPGTQ", 3, Operands { v0, v1, v2 })
 66754      // VPCMPGTQ xmm, xmm, xmm
 66755      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 66756          self.require(ISA_AVX)
 66757          p.domain = DomainAVX
 66758          p.add(0, func(m *_Encoding, v []interface{}) {
 66759              m.emit(0xc4)
 66760              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 66761              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 66762              m.emit(0x37)
 66763              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66764          })
 66765      }
 66766      // VPCMPGTQ m128, xmm, xmm
 66767      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 66768          self.require(ISA_AVX)
 66769          p.domain = DomainAVX
 66770          p.add(0, func(m *_Encoding, v []interface{}) {
 66771              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66772              m.emit(0x37)
 66773              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66774          })
 66775      }
 66776      // VPCMPGTQ ymm, ymm, ymm
 66777      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 66778          self.require(ISA_AVX2)
 66779          p.domain = DomainAVX
 66780          p.add(0, func(m *_Encoding, v []interface{}) {
 66781              m.emit(0xc4)
 66782              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 66783              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66784              m.emit(0x37)
 66785              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66786          })
 66787      }
 66788      // VPCMPGTQ m256, ymm, ymm
 66789      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 66790          self.require(ISA_AVX2)
 66791          p.domain = DomainAVX
 66792          p.add(0, func(m *_Encoding, v []interface{}) {
 66793              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66794              m.emit(0x37)
 66795              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66796          })
 66797      }
 66798      // VPCMPGTQ m512/m64bcst, zmm, k{k}
 66799      if isM512M64bcst(v0) && isZMM(v1) && isKk(v2) {
 66800          self.require(ISA_AVX512F)
 66801          p.domain = DomainAVX
 66802          p.add(0, func(m *_Encoding, v []interface{}) {
 66803              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66804              m.emit(0x37)
 66805              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 66806          })
 66807      }
 66808      // VPCMPGTQ zmm, zmm, k{k}
 66809      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 66810          self.require(ISA_AVX512F)
 66811          p.domain = DomainAVX
 66812          p.add(0, func(m *_Encoding, v []interface{}) {
 66813              m.emit(0x62)
 66814              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66815              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 66816              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 66817              m.emit(0x37)
 66818              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66819          })
 66820      }
 66821      // VPCMPGTQ m128/m64bcst, xmm, k{k}
 66822      if isM128M64bcst(v0) && isEVEXXMM(v1) && isKk(v2) {
 66823          self.require(ISA_AVX512VL | ISA_AVX512F)
 66824          p.domain = DomainAVX
 66825          p.add(0, func(m *_Encoding, v []interface{}) {
 66826              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66827              m.emit(0x37)
 66828              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 66829          })
 66830      }
 66831      // VPCMPGTQ xmm, xmm, k{k}
 66832      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 66833          self.require(ISA_AVX512VL | ISA_AVX512F)
 66834          p.domain = DomainAVX
 66835          p.add(0, func(m *_Encoding, v []interface{}) {
 66836              m.emit(0x62)
 66837              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66838              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 66839              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 66840              m.emit(0x37)
 66841              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66842          })
 66843      }
 66844      // VPCMPGTQ m256/m64bcst, ymm, k{k}
 66845      if isM256M64bcst(v0) && isEVEXYMM(v1) && isKk(v2) {
 66846          self.require(ISA_AVX512VL | ISA_AVX512F)
 66847          p.domain = DomainAVX
 66848          p.add(0, func(m *_Encoding, v []interface{}) {
 66849              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66850              m.emit(0x37)
 66851              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 66852          })
 66853      }
 66854      // VPCMPGTQ ymm, ymm, k{k}
 66855      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 66856          self.require(ISA_AVX512VL | ISA_AVX512F)
 66857          p.domain = DomainAVX
 66858          p.add(0, func(m *_Encoding, v []interface{}) {
 66859              m.emit(0x62)
 66860              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66861              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 66862              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 66863              m.emit(0x37)
 66864              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66865          })
 66866      }
 66867      if p.len == 0 {
 66868          panic("invalid operands for VPCMPGTQ")
 66869      }
 66870      return p
 66871  }
 66872  
 66873  // VPCMPGTW performs "Compare Packed Signed Word Integers for Greater Than".
 66874  //
 66875  // Mnemonic        : VPCMPGTW
 66876  // Supported forms : (10 forms)
 66877  //
 66878  //    * VPCMPGTW xmm, xmm, xmm      [AVX]
 66879  //    * VPCMPGTW m128, xmm, xmm     [AVX]
 66880  //    * VPCMPGTW ymm, ymm, ymm      [AVX2]
 66881  //    * VPCMPGTW m256, ymm, ymm     [AVX2]
 66882  //    * VPCMPGTW zmm, zmm, k{k}     [AVX512BW]
 66883  //    * VPCMPGTW m512, zmm, k{k}    [AVX512BW]
 66884  //    * VPCMPGTW xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 66885  //    * VPCMPGTW m128, xmm, k{k}    [AVX512BW,AVX512VL]
 66886  //    * VPCMPGTW ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 66887  //    * VPCMPGTW m256, ymm, k{k}    [AVX512BW,AVX512VL]
 66888  //
 66889  func (self *Program) VPCMPGTW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 66890      p := self.alloc("VPCMPGTW", 3, Operands { v0, v1, v2 })
 66891      // VPCMPGTW xmm, xmm, xmm
 66892      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 66893          self.require(ISA_AVX)
 66894          p.domain = DomainAVX
 66895          p.add(0, func(m *_Encoding, v []interface{}) {
 66896              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 66897              m.emit(0x65)
 66898              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66899          })
 66900      }
 66901      // VPCMPGTW m128, xmm, xmm
 66902      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 66903          self.require(ISA_AVX)
 66904          p.domain = DomainAVX
 66905          p.add(0, func(m *_Encoding, v []interface{}) {
 66906              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66907              m.emit(0x65)
 66908              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66909          })
 66910      }
 66911      // VPCMPGTW ymm, ymm, ymm
 66912      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 66913          self.require(ISA_AVX2)
 66914          p.domain = DomainAVX
 66915          p.add(0, func(m *_Encoding, v []interface{}) {
 66916              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 66917              m.emit(0x65)
 66918              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66919          })
 66920      }
 66921      // VPCMPGTW m256, ymm, ymm
 66922      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 66923          self.require(ISA_AVX2)
 66924          p.domain = DomainAVX
 66925          p.add(0, func(m *_Encoding, v []interface{}) {
 66926              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66927              m.emit(0x65)
 66928              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66929          })
 66930      }
 66931      // VPCMPGTW zmm, zmm, k{k}
 66932      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 66933          self.require(ISA_AVX512BW)
 66934          p.domain = DomainAVX
 66935          p.add(0, func(m *_Encoding, v []interface{}) {
 66936              m.emit(0x62)
 66937              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66938              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66939              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 66940              m.emit(0x65)
 66941              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66942          })
 66943      }
 66944      // VPCMPGTW m512, zmm, k{k}
 66945      if isM512(v0) && isZMM(v1) && isKk(v2) {
 66946          self.require(ISA_AVX512BW)
 66947          p.domain = DomainAVX
 66948          p.add(0, func(m *_Encoding, v []interface{}) {
 66949              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 66950              m.emit(0x65)
 66951              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 66952          })
 66953      }
 66954      // VPCMPGTW xmm, xmm, k{k}
 66955      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 66956          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66957          p.domain = DomainAVX
 66958          p.add(0, func(m *_Encoding, v []interface{}) {
 66959              m.emit(0x62)
 66960              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66961              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66962              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 66963              m.emit(0x65)
 66964              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66965          })
 66966      }
 66967      // VPCMPGTW m128, xmm, k{k}
 66968      if isM128(v0) && isEVEXXMM(v1) && isKk(v2) {
 66969          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66970          p.domain = DomainAVX
 66971          p.add(0, func(m *_Encoding, v []interface{}) {
 66972              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 66973              m.emit(0x65)
 66974              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 66975          })
 66976      }
 66977      // VPCMPGTW ymm, ymm, k{k}
 66978      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 66979          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66980          p.domain = DomainAVX
 66981          p.add(0, func(m *_Encoding, v []interface{}) {
 66982              m.emit(0x62)
 66983              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66984              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66985              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 66986              m.emit(0x65)
 66987              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66988          })
 66989      }
 66990      // VPCMPGTW m256, ymm, k{k}
 66991      if isM256(v0) && isEVEXYMM(v1) && isKk(v2) {
 66992          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66993          p.domain = DomainAVX
 66994          p.add(0, func(m *_Encoding, v []interface{}) {
 66995              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 66996              m.emit(0x65)
 66997              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 66998          })
 66999      }
 67000      if p.len == 0 {
 67001          panic("invalid operands for VPCMPGTW")
 67002      }
 67003      return p
 67004  }
 67005  
 67006  // VPCMPISTRI performs "Packed Compare Implicit Length Strings, Return Index".
 67007  //
 67008  // Mnemonic        : VPCMPISTRI
 67009  // Supported forms : (2 forms)
 67010  //
 67011  //    * VPCMPISTRI imm8, xmm, xmm     [AVX]
 67012  //    * VPCMPISTRI imm8, m128, xmm    [AVX]
 67013  //
 67014  func (self *Program) VPCMPISTRI(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 67015      p := self.alloc("VPCMPISTRI", 3, Operands { v0, v1, v2 })
 67016      // VPCMPISTRI imm8, xmm, xmm
 67017      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 67018          self.require(ISA_AVX)
 67019          p.domain = DomainAVX
 67020          p.add(0, func(m *_Encoding, v []interface{}) {
 67021              m.emit(0xc4)
 67022              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 67023              m.emit(0x79)
 67024              m.emit(0x63)
 67025              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 67026              m.imm1(toImmAny(v[0]))
 67027          })
 67028      }
 67029      // VPCMPISTRI imm8, m128, xmm
 67030      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 67031          self.require(ISA_AVX)
 67032          p.domain = DomainAVX
 67033          p.add(0, func(m *_Encoding, v []interface{}) {
 67034              m.vex3(0xc4, 0b11, 0x01, hcode(v[2]), addr(v[1]), 0)
 67035              m.emit(0x63)
 67036              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 67037              m.imm1(toImmAny(v[0]))
 67038          })
 67039      }
 67040      if p.len == 0 {
 67041          panic("invalid operands for VPCMPISTRI")
 67042      }
 67043      return p
 67044  }
 67045  
 67046  // VPCMPISTRM performs "Packed Compare Implicit Length Strings, Return Mask".
 67047  //
 67048  // Mnemonic        : VPCMPISTRM
 67049  // Supported forms : (2 forms)
 67050  //
 67051  //    * VPCMPISTRM imm8, xmm, xmm     [AVX]
 67052  //    * VPCMPISTRM imm8, m128, xmm    [AVX]
 67053  //
 67054  func (self *Program) VPCMPISTRM(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 67055      p := self.alloc("VPCMPISTRM", 3, Operands { v0, v1, v2 })
 67056      // VPCMPISTRM imm8, xmm, xmm
 67057      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 67058          self.require(ISA_AVX)
 67059          p.domain = DomainAVX
 67060          p.add(0, func(m *_Encoding, v []interface{}) {
 67061              m.emit(0xc4)
 67062              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 67063              m.emit(0x79)
 67064              m.emit(0x62)
 67065              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 67066              m.imm1(toImmAny(v[0]))
 67067          })
 67068      }
 67069      // VPCMPISTRM imm8, m128, xmm
 67070      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 67071          self.require(ISA_AVX)
 67072          p.domain = DomainAVX
 67073          p.add(0, func(m *_Encoding, v []interface{}) {
 67074              m.vex3(0xc4, 0b11, 0x01, hcode(v[2]), addr(v[1]), 0)
 67075              m.emit(0x62)
 67076              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 67077              m.imm1(toImmAny(v[0]))
 67078          })
 67079      }
 67080      if p.len == 0 {
 67081          panic("invalid operands for VPCMPISTRM")
 67082      }
 67083      return p
 67084  }
 67085  
 67086  // VPCMPQ performs "Compare Packed Signed Quadword Values".
 67087  //
 67088  // Mnemonic        : VPCMPQ
 67089  // Supported forms : (6 forms)
 67090  //
 67091  //    * VPCMPQ imm8, m512/m64bcst, zmm, k{k}    [AVX512F]
 67092  //    * VPCMPQ imm8, zmm, zmm, k{k}             [AVX512F]
 67093  //    * VPCMPQ imm8, m128/m64bcst, xmm, k{k}    [AVX512F,AVX512VL]
 67094  //    * VPCMPQ imm8, xmm, xmm, k{k}             [AVX512F,AVX512VL]
 67095  //    * VPCMPQ imm8, m256/m64bcst, ymm, k{k}    [AVX512F,AVX512VL]
 67096  //    * VPCMPQ imm8, ymm, ymm, k{k}             [AVX512F,AVX512VL]
 67097  //
 67098  func (self *Program) VPCMPQ(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 67099      p := self.alloc("VPCMPQ", 4, Operands { v0, v1, v2, v3 })
 67100      // VPCMPQ imm8, m512/m64bcst, zmm, k{k}
 67101      if isImm8(v0) && isM512M64bcst(v1) && isZMM(v2) && isKk(v3) {
 67102          self.require(ISA_AVX512F)
 67103          p.domain = DomainAVX
 67104          p.add(0, func(m *_Encoding, v []interface{}) {
 67105              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 67106              m.emit(0x1f)
 67107              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 67108              m.imm1(toImmAny(v[0]))
 67109          })
 67110      }
 67111      // VPCMPQ imm8, zmm, zmm, k{k}
 67112      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isKk(v3) {
 67113          self.require(ISA_AVX512F)
 67114          p.domain = DomainAVX
 67115          p.add(0, func(m *_Encoding, v []interface{}) {
 67116              m.emit(0x62)
 67117              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67118              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67119              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 67120              m.emit(0x1f)
 67121              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67122              m.imm1(toImmAny(v[0]))
 67123          })
 67124      }
 67125      // VPCMPQ imm8, m128/m64bcst, xmm, k{k}
 67126      if isImm8(v0) && isM128M64bcst(v1) && isEVEXXMM(v2) && isKk(v3) {
 67127          self.require(ISA_AVX512VL | ISA_AVX512F)
 67128          p.domain = DomainAVX
 67129          p.add(0, func(m *_Encoding, v []interface{}) {
 67130              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 67131              m.emit(0x1f)
 67132              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 67133              m.imm1(toImmAny(v[0]))
 67134          })
 67135      }
 67136      // VPCMPQ imm8, xmm, xmm, k{k}
 67137      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 67138          self.require(ISA_AVX512VL | ISA_AVX512F)
 67139          p.domain = DomainAVX
 67140          p.add(0, func(m *_Encoding, v []interface{}) {
 67141              m.emit(0x62)
 67142              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67143              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67144              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 67145              m.emit(0x1f)
 67146              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67147              m.imm1(toImmAny(v[0]))
 67148          })
 67149      }
 67150      // VPCMPQ imm8, m256/m64bcst, ymm, k{k}
 67151      if isImm8(v0) && isM256M64bcst(v1) && isEVEXYMM(v2) && isKk(v3) {
 67152          self.require(ISA_AVX512VL | ISA_AVX512F)
 67153          p.domain = DomainAVX
 67154          p.add(0, func(m *_Encoding, v []interface{}) {
 67155              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 67156              m.emit(0x1f)
 67157              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 67158              m.imm1(toImmAny(v[0]))
 67159          })
 67160      }
 67161      // VPCMPQ imm8, ymm, ymm, k{k}
 67162      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isKk(v3) {
 67163          self.require(ISA_AVX512VL | ISA_AVX512F)
 67164          p.domain = DomainAVX
 67165          p.add(0, func(m *_Encoding, v []interface{}) {
 67166              m.emit(0x62)
 67167              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67168              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67169              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 67170              m.emit(0x1f)
 67171              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67172              m.imm1(toImmAny(v[0]))
 67173          })
 67174      }
 67175      if p.len == 0 {
 67176          panic("invalid operands for VPCMPQ")
 67177      }
 67178      return p
 67179  }
 67180  
 67181  // VPCMPUB performs "Compare Packed Unsigned Byte Values".
 67182  //
 67183  // Mnemonic        : VPCMPUB
 67184  // Supported forms : (6 forms)
 67185  //
 67186  //    * VPCMPUB imm8, zmm, zmm, k{k}     [AVX512BW]
 67187  //    * VPCMPUB imm8, m512, zmm, k{k}    [AVX512BW]
 67188  //    * VPCMPUB imm8, xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 67189  //    * VPCMPUB imm8, m128, xmm, k{k}    [AVX512BW,AVX512VL]
 67190  //    * VPCMPUB imm8, ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 67191  //    * VPCMPUB imm8, m256, ymm, k{k}    [AVX512BW,AVX512VL]
 67192  //
 67193  func (self *Program) VPCMPUB(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 67194      p := self.alloc("VPCMPUB", 4, Operands { v0, v1, v2, v3 })
 67195      // VPCMPUB imm8, zmm, zmm, k{k}
 67196      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isKk(v3) {
 67197          self.require(ISA_AVX512BW)
 67198          p.domain = DomainAVX
 67199          p.add(0, func(m *_Encoding, v []interface{}) {
 67200              m.emit(0x62)
 67201              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67202              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 67203              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 67204              m.emit(0x3e)
 67205              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67206              m.imm1(toImmAny(v[0]))
 67207          })
 67208      }
 67209      // VPCMPUB imm8, m512, zmm, k{k}
 67210      if isImm8(v0) && isM512(v1) && isZMM(v2) && isKk(v3) {
 67211          self.require(ISA_AVX512BW)
 67212          p.domain = DomainAVX
 67213          p.add(0, func(m *_Encoding, v []interface{}) {
 67214              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 67215              m.emit(0x3e)
 67216              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 67217              m.imm1(toImmAny(v[0]))
 67218          })
 67219      }
 67220      // VPCMPUB imm8, xmm, xmm, k{k}
 67221      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 67222          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67223          p.domain = DomainAVX
 67224          p.add(0, func(m *_Encoding, v []interface{}) {
 67225              m.emit(0x62)
 67226              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67227              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 67228              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 67229              m.emit(0x3e)
 67230              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67231              m.imm1(toImmAny(v[0]))
 67232          })
 67233      }
 67234      // VPCMPUB imm8, m128, xmm, k{k}
 67235      if isImm8(v0) && isM128(v1) && isEVEXXMM(v2) && isKk(v3) {
 67236          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67237          p.domain = DomainAVX
 67238          p.add(0, func(m *_Encoding, v []interface{}) {
 67239              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 67240              m.emit(0x3e)
 67241              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 67242              m.imm1(toImmAny(v[0]))
 67243          })
 67244      }
 67245      // VPCMPUB imm8, ymm, ymm, k{k}
 67246      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isKk(v3) {
 67247          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67248          p.domain = DomainAVX
 67249          p.add(0, func(m *_Encoding, v []interface{}) {
 67250              m.emit(0x62)
 67251              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67252              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 67253              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 67254              m.emit(0x3e)
 67255              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67256              m.imm1(toImmAny(v[0]))
 67257          })
 67258      }
 67259      // VPCMPUB imm8, m256, ymm, k{k}
 67260      if isImm8(v0) && isM256(v1) && isEVEXYMM(v2) && isKk(v3) {
 67261          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67262          p.domain = DomainAVX
 67263          p.add(0, func(m *_Encoding, v []interface{}) {
 67264              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 67265              m.emit(0x3e)
 67266              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 67267              m.imm1(toImmAny(v[0]))
 67268          })
 67269      }
 67270      if p.len == 0 {
 67271          panic("invalid operands for VPCMPUB")
 67272      }
 67273      return p
 67274  }
 67275  
 67276  // VPCMPUD performs "Compare Packed Unsigned Doubleword Values".
 67277  //
 67278  // Mnemonic        : VPCMPUD
 67279  // Supported forms : (6 forms)
 67280  //
 67281  //    * VPCMPUD imm8, m512/m32bcst, zmm, k{k}    [AVX512F]
 67282  //    * VPCMPUD imm8, zmm, zmm, k{k}             [AVX512F]
 67283  //    * VPCMPUD imm8, m128/m32bcst, xmm, k{k}    [AVX512F,AVX512VL]
 67284  //    * VPCMPUD imm8, xmm, xmm, k{k}             [AVX512F,AVX512VL]
 67285  //    * VPCMPUD imm8, m256/m32bcst, ymm, k{k}    [AVX512F,AVX512VL]
 67286  //    * VPCMPUD imm8, ymm, ymm, k{k}             [AVX512F,AVX512VL]
 67287  //
 67288  func (self *Program) VPCMPUD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 67289      p := self.alloc("VPCMPUD", 4, Operands { v0, v1, v2, v3 })
 67290      // VPCMPUD imm8, m512/m32bcst, zmm, k{k}
 67291      if isImm8(v0) && isM512M32bcst(v1) && isZMM(v2) && isKk(v3) {
 67292          self.require(ISA_AVX512F)
 67293          p.domain = DomainAVX
 67294          p.add(0, func(m *_Encoding, v []interface{}) {
 67295              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 67296              m.emit(0x1e)
 67297              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 67298              m.imm1(toImmAny(v[0]))
 67299          })
 67300      }
 67301      // VPCMPUD imm8, zmm, zmm, k{k}
 67302      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isKk(v3) {
 67303          self.require(ISA_AVX512F)
 67304          p.domain = DomainAVX
 67305          p.add(0, func(m *_Encoding, v []interface{}) {
 67306              m.emit(0x62)
 67307              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67308              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 67309              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 67310              m.emit(0x1e)
 67311              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67312              m.imm1(toImmAny(v[0]))
 67313          })
 67314      }
 67315      // VPCMPUD imm8, m128/m32bcst, xmm, k{k}
 67316      if isImm8(v0) && isM128M32bcst(v1) && isEVEXXMM(v2) && isKk(v3) {
 67317          self.require(ISA_AVX512VL | ISA_AVX512F)
 67318          p.domain = DomainAVX
 67319          p.add(0, func(m *_Encoding, v []interface{}) {
 67320              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 67321              m.emit(0x1e)
 67322              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 67323              m.imm1(toImmAny(v[0]))
 67324          })
 67325      }
 67326      // VPCMPUD imm8, xmm, xmm, k{k}
 67327      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 67328          self.require(ISA_AVX512VL | ISA_AVX512F)
 67329          p.domain = DomainAVX
 67330          p.add(0, func(m *_Encoding, v []interface{}) {
 67331              m.emit(0x62)
 67332              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67333              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 67334              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 67335              m.emit(0x1e)
 67336              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67337              m.imm1(toImmAny(v[0]))
 67338          })
 67339      }
 67340      // VPCMPUD imm8, m256/m32bcst, ymm, k{k}
 67341      if isImm8(v0) && isM256M32bcst(v1) && isEVEXYMM(v2) && isKk(v3) {
 67342          self.require(ISA_AVX512VL | ISA_AVX512F)
 67343          p.domain = DomainAVX
 67344          p.add(0, func(m *_Encoding, v []interface{}) {
 67345              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 67346              m.emit(0x1e)
 67347              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 67348              m.imm1(toImmAny(v[0]))
 67349          })
 67350      }
 67351      // VPCMPUD imm8, ymm, ymm, k{k}
 67352      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isKk(v3) {
 67353          self.require(ISA_AVX512VL | ISA_AVX512F)
 67354          p.domain = DomainAVX
 67355          p.add(0, func(m *_Encoding, v []interface{}) {
 67356              m.emit(0x62)
 67357              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67358              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 67359              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 67360              m.emit(0x1e)
 67361              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67362              m.imm1(toImmAny(v[0]))
 67363          })
 67364      }
 67365      if p.len == 0 {
 67366          panic("invalid operands for VPCMPUD")
 67367      }
 67368      return p
 67369  }
 67370  
 67371  // VPCMPUQ performs "Compare Packed Unsigned Quadword Values".
 67372  //
 67373  // Mnemonic        : VPCMPUQ
 67374  // Supported forms : (6 forms)
 67375  //
 67376  //    * VPCMPUQ imm8, m512/m64bcst, zmm, k{k}    [AVX512F]
 67377  //    * VPCMPUQ imm8, zmm, zmm, k{k}             [AVX512F]
 67378  //    * VPCMPUQ imm8, m128/m64bcst, xmm, k{k}    [AVX512F,AVX512VL]
 67379  //    * VPCMPUQ imm8, xmm, xmm, k{k}             [AVX512F,AVX512VL]
 67380  //    * VPCMPUQ imm8, m256/m64bcst, ymm, k{k}    [AVX512F,AVX512VL]
 67381  //    * VPCMPUQ imm8, ymm, ymm, k{k}             [AVX512F,AVX512VL]
 67382  //
 67383  func (self *Program) VPCMPUQ(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 67384      p := self.alloc("VPCMPUQ", 4, Operands { v0, v1, v2, v3 })
 67385      // VPCMPUQ imm8, m512/m64bcst, zmm, k{k}
 67386      if isImm8(v0) && isM512M64bcst(v1) && isZMM(v2) && isKk(v3) {
 67387          self.require(ISA_AVX512F)
 67388          p.domain = DomainAVX
 67389          p.add(0, func(m *_Encoding, v []interface{}) {
 67390              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 67391              m.emit(0x1e)
 67392              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 67393              m.imm1(toImmAny(v[0]))
 67394          })
 67395      }
 67396      // VPCMPUQ imm8, zmm, zmm, k{k}
 67397      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isKk(v3) {
 67398          self.require(ISA_AVX512F)
 67399          p.domain = DomainAVX
 67400          p.add(0, func(m *_Encoding, v []interface{}) {
 67401              m.emit(0x62)
 67402              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67403              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67404              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 67405              m.emit(0x1e)
 67406              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67407              m.imm1(toImmAny(v[0]))
 67408          })
 67409      }
 67410      // VPCMPUQ imm8, m128/m64bcst, xmm, k{k}
 67411      if isImm8(v0) && isM128M64bcst(v1) && isEVEXXMM(v2) && isKk(v3) {
 67412          self.require(ISA_AVX512VL | ISA_AVX512F)
 67413          p.domain = DomainAVX
 67414          p.add(0, func(m *_Encoding, v []interface{}) {
 67415              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 67416              m.emit(0x1e)
 67417              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 67418              m.imm1(toImmAny(v[0]))
 67419          })
 67420      }
 67421      // VPCMPUQ imm8, xmm, xmm, k{k}
 67422      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 67423          self.require(ISA_AVX512VL | ISA_AVX512F)
 67424          p.domain = DomainAVX
 67425          p.add(0, func(m *_Encoding, v []interface{}) {
 67426              m.emit(0x62)
 67427              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67428              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67429              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 67430              m.emit(0x1e)
 67431              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67432              m.imm1(toImmAny(v[0]))
 67433          })
 67434      }
 67435      // VPCMPUQ imm8, m256/m64bcst, ymm, k{k}
 67436      if isImm8(v0) && isM256M64bcst(v1) && isEVEXYMM(v2) && isKk(v3) {
 67437          self.require(ISA_AVX512VL | ISA_AVX512F)
 67438          p.domain = DomainAVX
 67439          p.add(0, func(m *_Encoding, v []interface{}) {
 67440              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 67441              m.emit(0x1e)
 67442              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 67443              m.imm1(toImmAny(v[0]))
 67444          })
 67445      }
 67446      // VPCMPUQ imm8, ymm, ymm, k{k}
 67447      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isKk(v3) {
 67448          self.require(ISA_AVX512VL | ISA_AVX512F)
 67449          p.domain = DomainAVX
 67450          p.add(0, func(m *_Encoding, v []interface{}) {
 67451              m.emit(0x62)
 67452              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67453              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67454              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 67455              m.emit(0x1e)
 67456              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67457              m.imm1(toImmAny(v[0]))
 67458          })
 67459      }
 67460      if p.len == 0 {
 67461          panic("invalid operands for VPCMPUQ")
 67462      }
 67463      return p
 67464  }
 67465  
 67466  // VPCMPUW performs "Compare Packed Unsigned Word Values".
 67467  //
 67468  // Mnemonic        : VPCMPUW
 67469  // Supported forms : (6 forms)
 67470  //
 67471  //    * VPCMPUW imm8, zmm, zmm, k{k}     [AVX512BW]
 67472  //    * VPCMPUW imm8, m512, zmm, k{k}    [AVX512BW]
 67473  //    * VPCMPUW imm8, xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 67474  //    * VPCMPUW imm8, m128, xmm, k{k}    [AVX512BW,AVX512VL]
 67475  //    * VPCMPUW imm8, ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 67476  //    * VPCMPUW imm8, m256, ymm, k{k}    [AVX512BW,AVX512VL]
 67477  //
 67478  func (self *Program) VPCMPUW(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 67479      p := self.alloc("VPCMPUW", 4, Operands { v0, v1, v2, v3 })
 67480      // VPCMPUW imm8, zmm, zmm, k{k}
 67481      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isKk(v3) {
 67482          self.require(ISA_AVX512BW)
 67483          p.domain = DomainAVX
 67484          p.add(0, func(m *_Encoding, v []interface{}) {
 67485              m.emit(0x62)
 67486              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67487              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67488              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 67489              m.emit(0x3e)
 67490              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67491              m.imm1(toImmAny(v[0]))
 67492          })
 67493      }
 67494      // VPCMPUW imm8, m512, zmm, k{k}
 67495      if isImm8(v0) && isM512(v1) && isZMM(v2) && isKk(v3) {
 67496          self.require(ISA_AVX512BW)
 67497          p.domain = DomainAVX
 67498          p.add(0, func(m *_Encoding, v []interface{}) {
 67499              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 67500              m.emit(0x3e)
 67501              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 67502              m.imm1(toImmAny(v[0]))
 67503          })
 67504      }
 67505      // VPCMPUW imm8, xmm, xmm, k{k}
 67506      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 67507          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67508          p.domain = DomainAVX
 67509          p.add(0, func(m *_Encoding, v []interface{}) {
 67510              m.emit(0x62)
 67511              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67512              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67513              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 67514              m.emit(0x3e)
 67515              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67516              m.imm1(toImmAny(v[0]))
 67517          })
 67518      }
 67519      // VPCMPUW imm8, m128, xmm, k{k}
 67520      if isImm8(v0) && isM128(v1) && isEVEXXMM(v2) && isKk(v3) {
 67521          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67522          p.domain = DomainAVX
 67523          p.add(0, func(m *_Encoding, v []interface{}) {
 67524              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 67525              m.emit(0x3e)
 67526              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 67527              m.imm1(toImmAny(v[0]))
 67528          })
 67529      }
 67530      // VPCMPUW imm8, ymm, ymm, k{k}
 67531      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isKk(v3) {
 67532          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67533          p.domain = DomainAVX
 67534          p.add(0, func(m *_Encoding, v []interface{}) {
 67535              m.emit(0x62)
 67536              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67537              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67538              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 67539              m.emit(0x3e)
 67540              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67541              m.imm1(toImmAny(v[0]))
 67542          })
 67543      }
 67544      // VPCMPUW imm8, m256, ymm, k{k}
 67545      if isImm8(v0) && isM256(v1) && isEVEXYMM(v2) && isKk(v3) {
 67546          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67547          p.domain = DomainAVX
 67548          p.add(0, func(m *_Encoding, v []interface{}) {
 67549              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 67550              m.emit(0x3e)
 67551              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 67552              m.imm1(toImmAny(v[0]))
 67553          })
 67554      }
 67555      if p.len == 0 {
 67556          panic("invalid operands for VPCMPUW")
 67557      }
 67558      return p
 67559  }
 67560  
 67561  // VPCMPW performs "Compare Packed Signed Word Values".
 67562  //
 67563  // Mnemonic        : VPCMPW
 67564  // Supported forms : (6 forms)
 67565  //
 67566  //    * VPCMPW imm8, zmm, zmm, k{k}     [AVX512BW]
 67567  //    * VPCMPW imm8, m512, zmm, k{k}    [AVX512BW]
 67568  //    * VPCMPW imm8, xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 67569  //    * VPCMPW imm8, m128, xmm, k{k}    [AVX512BW,AVX512VL]
 67570  //    * VPCMPW imm8, ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 67571  //    * VPCMPW imm8, m256, ymm, k{k}    [AVX512BW,AVX512VL]
 67572  //
 67573  func (self *Program) VPCMPW(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 67574      p := self.alloc("VPCMPW", 4, Operands { v0, v1, v2, v3 })
 67575      // VPCMPW imm8, zmm, zmm, k{k}
 67576      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isKk(v3) {
 67577          self.require(ISA_AVX512BW)
 67578          p.domain = DomainAVX
 67579          p.add(0, func(m *_Encoding, v []interface{}) {
 67580              m.emit(0x62)
 67581              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67582              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67583              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 67584              m.emit(0x3f)
 67585              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67586              m.imm1(toImmAny(v[0]))
 67587          })
 67588      }
 67589      // VPCMPW imm8, m512, zmm, k{k}
 67590      if isImm8(v0) && isM512(v1) && isZMM(v2) && isKk(v3) {
 67591          self.require(ISA_AVX512BW)
 67592          p.domain = DomainAVX
 67593          p.add(0, func(m *_Encoding, v []interface{}) {
 67594              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 67595              m.emit(0x3f)
 67596              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 67597              m.imm1(toImmAny(v[0]))
 67598          })
 67599      }
 67600      // VPCMPW imm8, xmm, xmm, k{k}
 67601      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 67602          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67603          p.domain = DomainAVX
 67604          p.add(0, func(m *_Encoding, v []interface{}) {
 67605              m.emit(0x62)
 67606              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67607              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67608              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 67609              m.emit(0x3f)
 67610              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67611              m.imm1(toImmAny(v[0]))
 67612          })
 67613      }
 67614      // VPCMPW imm8, m128, xmm, k{k}
 67615      if isImm8(v0) && isM128(v1) && isEVEXXMM(v2) && isKk(v3) {
 67616          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67617          p.domain = DomainAVX
 67618          p.add(0, func(m *_Encoding, v []interface{}) {
 67619              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 67620              m.emit(0x3f)
 67621              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 67622              m.imm1(toImmAny(v[0]))
 67623          })
 67624      }
 67625      // VPCMPW imm8, ymm, ymm, k{k}
 67626      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isKk(v3) {
 67627          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67628          p.domain = DomainAVX
 67629          p.add(0, func(m *_Encoding, v []interface{}) {
 67630              m.emit(0x62)
 67631              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67632              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67633              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 67634              m.emit(0x3f)
 67635              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67636              m.imm1(toImmAny(v[0]))
 67637          })
 67638      }
 67639      // VPCMPW imm8, m256, ymm, k{k}
 67640      if isImm8(v0) && isM256(v1) && isEVEXYMM(v2) && isKk(v3) {
 67641          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67642          p.domain = DomainAVX
 67643          p.add(0, func(m *_Encoding, v []interface{}) {
 67644              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 67645              m.emit(0x3f)
 67646              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 67647              m.imm1(toImmAny(v[0]))
 67648          })
 67649      }
 67650      if p.len == 0 {
 67651          panic("invalid operands for VPCMPW")
 67652      }
 67653      return p
 67654  }
 67655  
 67656  // VPCOMB performs "Compare Packed Signed Byte Integers".
 67657  //
 67658  // Mnemonic        : VPCOMB
 67659  // Supported forms : (2 forms)
 67660  //
 67661  //    * VPCOMB imm8, xmm, xmm, xmm     [XOP]
 67662  //    * VPCOMB imm8, m128, xmm, xmm    [XOP]
 67663  //
 67664  func (self *Program) VPCOMB(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 67665      p := self.alloc("VPCOMB", 4, Operands { v0, v1, v2, v3 })
 67666      // VPCOMB imm8, xmm, xmm, xmm
 67667      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 67668          self.require(ISA_XOP)
 67669          p.domain = DomainAMDSpecific
 67670          p.add(0, func(m *_Encoding, v []interface{}) {
 67671              m.emit(0x8f)
 67672              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 67673              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 67674              m.emit(0xcc)
 67675              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67676              m.imm1(toImmAny(v[0]))
 67677          })
 67678      }
 67679      // VPCOMB imm8, m128, xmm, xmm
 67680      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 67681          self.require(ISA_XOP)
 67682          p.domain = DomainAMDSpecific
 67683          p.add(0, func(m *_Encoding, v []interface{}) {
 67684              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 67685              m.emit(0xcc)
 67686              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 67687              m.imm1(toImmAny(v[0]))
 67688          })
 67689      }
 67690      if p.len == 0 {
 67691          panic("invalid operands for VPCOMB")
 67692      }
 67693      return p
 67694  }
 67695  
 67696  // VPCOMD performs "Compare Packed Signed Doubleword Integers".
 67697  //
 67698  // Mnemonic        : VPCOMD
 67699  // Supported forms : (2 forms)
 67700  //
 67701  //    * VPCOMD imm8, xmm, xmm, xmm     [XOP]
 67702  //    * VPCOMD imm8, m128, xmm, xmm    [XOP]
 67703  //
 67704  func (self *Program) VPCOMD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 67705      p := self.alloc("VPCOMD", 4, Operands { v0, v1, v2, v3 })
 67706      // VPCOMD imm8, xmm, xmm, xmm
 67707      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 67708          self.require(ISA_XOP)
 67709          p.domain = DomainAMDSpecific
 67710          p.add(0, func(m *_Encoding, v []interface{}) {
 67711              m.emit(0x8f)
 67712              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 67713              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 67714              m.emit(0xce)
 67715              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67716              m.imm1(toImmAny(v[0]))
 67717          })
 67718      }
 67719      // VPCOMD imm8, m128, xmm, xmm
 67720      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 67721          self.require(ISA_XOP)
 67722          p.domain = DomainAMDSpecific
 67723          p.add(0, func(m *_Encoding, v []interface{}) {
 67724              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 67725              m.emit(0xce)
 67726              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 67727              m.imm1(toImmAny(v[0]))
 67728          })
 67729      }
 67730      if p.len == 0 {
 67731          panic("invalid operands for VPCOMD")
 67732      }
 67733      return p
 67734  }
 67735  
 67736  // VPCOMPRESSD performs "Store Sparse Packed Doubleword Integer Values into Dense Memory/Register".
 67737  //
 67738  // Mnemonic        : VPCOMPRESSD
 67739  // Supported forms : (6 forms)
 67740  //
 67741  //    * VPCOMPRESSD zmm, zmm{k}{z}     [AVX512F]
 67742  //    * VPCOMPRESSD zmm, m512{k}{z}    [AVX512F]
 67743  //    * VPCOMPRESSD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 67744  //    * VPCOMPRESSD xmm, m128{k}{z}    [AVX512F,AVX512VL]
 67745  //    * VPCOMPRESSD ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 67746  //    * VPCOMPRESSD ymm, m256{k}{z}    [AVX512F,AVX512VL]
 67747  //
 67748  func (self *Program) VPCOMPRESSD(v0 interface{}, v1 interface{}) *Instruction {
 67749      p := self.alloc("VPCOMPRESSD", 2, Operands { v0, v1 })
 67750      // VPCOMPRESSD zmm, zmm{k}{z}
 67751      if isZMM(v0) && isZMMkz(v1) {
 67752          self.require(ISA_AVX512F)
 67753          p.domain = DomainAVX
 67754          p.add(0, func(m *_Encoding, v []interface{}) {
 67755              m.emit(0x62)
 67756              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 67757              m.emit(0x7d)
 67758              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 67759              m.emit(0x8b)
 67760              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 67761          })
 67762      }
 67763      // VPCOMPRESSD zmm, m512{k}{z}
 67764      if isZMM(v0) && isM512kz(v1) {
 67765          self.require(ISA_AVX512F)
 67766          p.domain = DomainAVX
 67767          p.add(0, func(m *_Encoding, v []interface{}) {
 67768              m.evex(0b10, 0x05, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 67769              m.emit(0x8b)
 67770              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 67771          })
 67772      }
 67773      // VPCOMPRESSD xmm, xmm{k}{z}
 67774      if isEVEXXMM(v0) && isXMMkz(v1) {
 67775          self.require(ISA_AVX512VL | ISA_AVX512F)
 67776          p.domain = DomainAVX
 67777          p.add(0, func(m *_Encoding, v []interface{}) {
 67778              m.emit(0x62)
 67779              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 67780              m.emit(0x7d)
 67781              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 67782              m.emit(0x8b)
 67783              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 67784          })
 67785      }
 67786      // VPCOMPRESSD xmm, m128{k}{z}
 67787      if isEVEXXMM(v0) && isM128kz(v1) {
 67788          self.require(ISA_AVX512VL | ISA_AVX512F)
 67789          p.domain = DomainAVX
 67790          p.add(0, func(m *_Encoding, v []interface{}) {
 67791              m.evex(0b10, 0x05, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 67792              m.emit(0x8b)
 67793              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 67794          })
 67795      }
 67796      // VPCOMPRESSD ymm, ymm{k}{z}
 67797      if isEVEXYMM(v0) && isYMMkz(v1) {
 67798          self.require(ISA_AVX512VL | ISA_AVX512F)
 67799          p.domain = DomainAVX
 67800          p.add(0, func(m *_Encoding, v []interface{}) {
 67801              m.emit(0x62)
 67802              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 67803              m.emit(0x7d)
 67804              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 67805              m.emit(0x8b)
 67806              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 67807          })
 67808      }
 67809      // VPCOMPRESSD ymm, m256{k}{z}
 67810      if isEVEXYMM(v0) && isM256kz(v1) {
 67811          self.require(ISA_AVX512VL | ISA_AVX512F)
 67812          p.domain = DomainAVX
 67813          p.add(0, func(m *_Encoding, v []interface{}) {
 67814              m.evex(0b10, 0x05, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 67815              m.emit(0x8b)
 67816              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 67817          })
 67818      }
 67819      if p.len == 0 {
 67820          panic("invalid operands for VPCOMPRESSD")
 67821      }
 67822      return p
 67823  }
 67824  
 67825  // VPCOMPRESSQ performs "Store Sparse Packed Quadword Integer Values into Dense Memory/Register".
 67826  //
 67827  // Mnemonic        : VPCOMPRESSQ
 67828  // Supported forms : (6 forms)
 67829  //
 67830  //    * VPCOMPRESSQ zmm, zmm{k}{z}     [AVX512F]
 67831  //    * VPCOMPRESSQ zmm, m512{k}{z}    [AVX512F]
 67832  //    * VPCOMPRESSQ xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 67833  //    * VPCOMPRESSQ xmm, m128{k}{z}    [AVX512F,AVX512VL]
 67834  //    * VPCOMPRESSQ ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 67835  //    * VPCOMPRESSQ ymm, m256{k}{z}    [AVX512F,AVX512VL]
 67836  //
 67837  func (self *Program) VPCOMPRESSQ(v0 interface{}, v1 interface{}) *Instruction {
 67838      p := self.alloc("VPCOMPRESSQ", 2, Operands { v0, v1 })
 67839      // VPCOMPRESSQ zmm, zmm{k}{z}
 67840      if isZMM(v0) && isZMMkz(v1) {
 67841          self.require(ISA_AVX512F)
 67842          p.domain = DomainAVX
 67843          p.add(0, func(m *_Encoding, v []interface{}) {
 67844              m.emit(0x62)
 67845              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 67846              m.emit(0xfd)
 67847              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 67848              m.emit(0x8b)
 67849              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 67850          })
 67851      }
 67852      // VPCOMPRESSQ zmm, m512{k}{z}
 67853      if isZMM(v0) && isM512kz(v1) {
 67854          self.require(ISA_AVX512F)
 67855          p.domain = DomainAVX
 67856          p.add(0, func(m *_Encoding, v []interface{}) {
 67857              m.evex(0b10, 0x85, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 67858              m.emit(0x8b)
 67859              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 67860          })
 67861      }
 67862      // VPCOMPRESSQ xmm, xmm{k}{z}
 67863      if isEVEXXMM(v0) && isXMMkz(v1) {
 67864          self.require(ISA_AVX512VL | ISA_AVX512F)
 67865          p.domain = DomainAVX
 67866          p.add(0, func(m *_Encoding, v []interface{}) {
 67867              m.emit(0x62)
 67868              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 67869              m.emit(0xfd)
 67870              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 67871              m.emit(0x8b)
 67872              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 67873          })
 67874      }
 67875      // VPCOMPRESSQ xmm, m128{k}{z}
 67876      if isEVEXXMM(v0) && isM128kz(v1) {
 67877          self.require(ISA_AVX512VL | ISA_AVX512F)
 67878          p.domain = DomainAVX
 67879          p.add(0, func(m *_Encoding, v []interface{}) {
 67880              m.evex(0b10, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 67881              m.emit(0x8b)
 67882              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 67883          })
 67884      }
 67885      // VPCOMPRESSQ ymm, ymm{k}{z}
 67886      if isEVEXYMM(v0) && isYMMkz(v1) {
 67887          self.require(ISA_AVX512VL | ISA_AVX512F)
 67888          p.domain = DomainAVX
 67889          p.add(0, func(m *_Encoding, v []interface{}) {
 67890              m.emit(0x62)
 67891              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 67892              m.emit(0xfd)
 67893              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 67894              m.emit(0x8b)
 67895              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 67896          })
 67897      }
 67898      // VPCOMPRESSQ ymm, m256{k}{z}
 67899      if isEVEXYMM(v0) && isM256kz(v1) {
 67900          self.require(ISA_AVX512VL | ISA_AVX512F)
 67901          p.domain = DomainAVX
 67902          p.add(0, func(m *_Encoding, v []interface{}) {
 67903              m.evex(0b10, 0x85, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 67904              m.emit(0x8b)
 67905              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 67906          })
 67907      }
 67908      if p.len == 0 {
 67909          panic("invalid operands for VPCOMPRESSQ")
 67910      }
 67911      return p
 67912  }
 67913  
 67914  // VPCOMQ performs "Compare Packed Signed Quadword Integers".
 67915  //
 67916  // Mnemonic        : VPCOMQ
 67917  // Supported forms : (2 forms)
 67918  //
 67919  //    * VPCOMQ imm8, xmm, xmm, xmm     [XOP]
 67920  //    * VPCOMQ imm8, m128, xmm, xmm    [XOP]
 67921  //
 67922  func (self *Program) VPCOMQ(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 67923      p := self.alloc("VPCOMQ", 4, Operands { v0, v1, v2, v3 })
 67924      // VPCOMQ imm8, xmm, xmm, xmm
 67925      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 67926          self.require(ISA_XOP)
 67927          p.domain = DomainAMDSpecific
 67928          p.add(0, func(m *_Encoding, v []interface{}) {
 67929              m.emit(0x8f)
 67930              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 67931              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 67932              m.emit(0xcf)
 67933              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67934              m.imm1(toImmAny(v[0]))
 67935          })
 67936      }
 67937      // VPCOMQ imm8, m128, xmm, xmm
 67938      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 67939          self.require(ISA_XOP)
 67940          p.domain = DomainAMDSpecific
 67941          p.add(0, func(m *_Encoding, v []interface{}) {
 67942              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 67943              m.emit(0xcf)
 67944              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 67945              m.imm1(toImmAny(v[0]))
 67946          })
 67947      }
 67948      if p.len == 0 {
 67949          panic("invalid operands for VPCOMQ")
 67950      }
 67951      return p
 67952  }
 67953  
 67954  // VPCOMUB performs "Compare Packed Unsigned Byte Integers".
 67955  //
 67956  // Mnemonic        : VPCOMUB
 67957  // Supported forms : (2 forms)
 67958  //
 67959  //    * VPCOMUB imm8, xmm, xmm, xmm     [XOP]
 67960  //    * VPCOMUB imm8, m128, xmm, xmm    [XOP]
 67961  //
 67962  func (self *Program) VPCOMUB(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 67963      p := self.alloc("VPCOMUB", 4, Operands { v0, v1, v2, v3 })
 67964      // VPCOMUB imm8, xmm, xmm, xmm
 67965      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 67966          self.require(ISA_XOP)
 67967          p.domain = DomainAMDSpecific
 67968          p.add(0, func(m *_Encoding, v []interface{}) {
 67969              m.emit(0x8f)
 67970              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 67971              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 67972              m.emit(0xec)
 67973              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67974              m.imm1(toImmAny(v[0]))
 67975          })
 67976      }
 67977      // VPCOMUB imm8, m128, xmm, xmm
 67978      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 67979          self.require(ISA_XOP)
 67980          p.domain = DomainAMDSpecific
 67981          p.add(0, func(m *_Encoding, v []interface{}) {
 67982              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 67983              m.emit(0xec)
 67984              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 67985              m.imm1(toImmAny(v[0]))
 67986          })
 67987      }
 67988      if p.len == 0 {
 67989          panic("invalid operands for VPCOMUB")
 67990      }
 67991      return p
 67992  }
 67993  
 67994  // VPCOMUD performs "Compare Packed Unsigned Doubleword Integers".
 67995  //
 67996  // Mnemonic        : VPCOMUD
 67997  // Supported forms : (2 forms)
 67998  //
 67999  //    * VPCOMUD imm8, xmm, xmm, xmm     [XOP]
 68000  //    * VPCOMUD imm8, m128, xmm, xmm    [XOP]
 68001  //
 68002  func (self *Program) VPCOMUD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 68003      p := self.alloc("VPCOMUD", 4, Operands { v0, v1, v2, v3 })
 68004      // VPCOMUD imm8, xmm, xmm, xmm
 68005      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 68006          self.require(ISA_XOP)
 68007          p.domain = DomainAMDSpecific
 68008          p.add(0, func(m *_Encoding, v []interface{}) {
 68009              m.emit(0x8f)
 68010              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 68011              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 68012              m.emit(0xee)
 68013              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 68014              m.imm1(toImmAny(v[0]))
 68015          })
 68016      }
 68017      // VPCOMUD imm8, m128, xmm, xmm
 68018      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 68019          self.require(ISA_XOP)
 68020          p.domain = DomainAMDSpecific
 68021          p.add(0, func(m *_Encoding, v []interface{}) {
 68022              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 68023              m.emit(0xee)
 68024              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 68025              m.imm1(toImmAny(v[0]))
 68026          })
 68027      }
 68028      if p.len == 0 {
 68029          panic("invalid operands for VPCOMUD")
 68030      }
 68031      return p
 68032  }
 68033  
 68034  // VPCOMUQ performs "Compare Packed Unsigned Quadword Integers".
 68035  //
 68036  // Mnemonic        : VPCOMUQ
 68037  // Supported forms : (2 forms)
 68038  //
 68039  //    * VPCOMUQ imm8, xmm, xmm, xmm     [XOP]
 68040  //    * VPCOMUQ imm8, m128, xmm, xmm    [XOP]
 68041  //
 68042  func (self *Program) VPCOMUQ(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 68043      p := self.alloc("VPCOMUQ", 4, Operands { v0, v1, v2, v3 })
 68044      // VPCOMUQ imm8, xmm, xmm, xmm
 68045      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 68046          self.require(ISA_XOP)
 68047          p.domain = DomainAMDSpecific
 68048          p.add(0, func(m *_Encoding, v []interface{}) {
 68049              m.emit(0x8f)
 68050              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 68051              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 68052              m.emit(0xef)
 68053              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 68054              m.imm1(toImmAny(v[0]))
 68055          })
 68056      }
 68057      // VPCOMUQ imm8, m128, xmm, xmm
 68058      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 68059          self.require(ISA_XOP)
 68060          p.domain = DomainAMDSpecific
 68061          p.add(0, func(m *_Encoding, v []interface{}) {
 68062              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 68063              m.emit(0xef)
 68064              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 68065              m.imm1(toImmAny(v[0]))
 68066          })
 68067      }
 68068      if p.len == 0 {
 68069          panic("invalid operands for VPCOMUQ")
 68070      }
 68071      return p
 68072  }
 68073  
 68074  // VPCOMUW performs "Compare Packed Unsigned Word Integers".
 68075  //
 68076  // Mnemonic        : VPCOMUW
 68077  // Supported forms : (2 forms)
 68078  //
 68079  //    * VPCOMUW imm8, xmm, xmm, xmm     [XOP]
 68080  //    * VPCOMUW imm8, m128, xmm, xmm    [XOP]
 68081  //
 68082  func (self *Program) VPCOMUW(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 68083      p := self.alloc("VPCOMUW", 4, Operands { v0, v1, v2, v3 })
 68084      // VPCOMUW imm8, xmm, xmm, xmm
 68085      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 68086          self.require(ISA_XOP)
 68087          p.domain = DomainAMDSpecific
 68088          p.add(0, func(m *_Encoding, v []interface{}) {
 68089              m.emit(0x8f)
 68090              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 68091              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 68092              m.emit(0xed)
 68093              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 68094              m.imm1(toImmAny(v[0]))
 68095          })
 68096      }
 68097      // VPCOMUW imm8, m128, xmm, xmm
 68098      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 68099          self.require(ISA_XOP)
 68100          p.domain = DomainAMDSpecific
 68101          p.add(0, func(m *_Encoding, v []interface{}) {
 68102              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 68103              m.emit(0xed)
 68104              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 68105              m.imm1(toImmAny(v[0]))
 68106          })
 68107      }
 68108      if p.len == 0 {
 68109          panic("invalid operands for VPCOMUW")
 68110      }
 68111      return p
 68112  }
 68113  
 68114  // VPCOMW performs "Compare Packed Signed Word Integers".
 68115  //
 68116  // Mnemonic        : VPCOMW
 68117  // Supported forms : (2 forms)
 68118  //
 68119  //    * VPCOMW imm8, xmm, xmm, xmm     [XOP]
 68120  //    * VPCOMW imm8, m128, xmm, xmm    [XOP]
 68121  //
 68122  func (self *Program) VPCOMW(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 68123      p := self.alloc("VPCOMW", 4, Operands { v0, v1, v2, v3 })
 68124      // VPCOMW imm8, xmm, xmm, xmm
 68125      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 68126          self.require(ISA_XOP)
 68127          p.domain = DomainAMDSpecific
 68128          p.add(0, func(m *_Encoding, v []interface{}) {
 68129              m.emit(0x8f)
 68130              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 68131              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 68132              m.emit(0xcd)
 68133              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 68134              m.imm1(toImmAny(v[0]))
 68135          })
 68136      }
 68137      // VPCOMW imm8, m128, xmm, xmm
 68138      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 68139          self.require(ISA_XOP)
 68140          p.domain = DomainAMDSpecific
 68141          p.add(0, func(m *_Encoding, v []interface{}) {
 68142              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 68143              m.emit(0xcd)
 68144              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 68145              m.imm1(toImmAny(v[0]))
 68146          })
 68147      }
 68148      if p.len == 0 {
 68149          panic("invalid operands for VPCOMW")
 68150      }
 68151      return p
 68152  }
 68153  
 68154  // VPCONFLICTD performs "Detect Conflicts Within a Vector of Packed Doubleword Values into Dense Memory/Register".
 68155  //
 68156  // Mnemonic        : VPCONFLICTD
 68157  // Supported forms : (6 forms)
 68158  //
 68159  //    * VPCONFLICTD m128/m32bcst, xmm{k}{z}    [AVX512CD,AVX512VL]
 68160  //    * VPCONFLICTD m256/m32bcst, ymm{k}{z}    [AVX512CD,AVX512VL]
 68161  //    * VPCONFLICTD m512/m32bcst, zmm{k}{z}    [AVX512CD]
 68162  //    * VPCONFLICTD xmm, xmm{k}{z}             [AVX512CD,AVX512VL]
 68163  //    * VPCONFLICTD ymm, ymm{k}{z}             [AVX512CD,AVX512VL]
 68164  //    * VPCONFLICTD zmm, zmm{k}{z}             [AVX512CD]
 68165  //
 68166  func (self *Program) VPCONFLICTD(v0 interface{}, v1 interface{}) *Instruction {
 68167      p := self.alloc("VPCONFLICTD", 2, Operands { v0, v1 })
 68168      // VPCONFLICTD m128/m32bcst, xmm{k}{z}
 68169      if isM128M32bcst(v0) && isXMMkz(v1) {
 68170          self.require(ISA_AVX512VL | ISA_AVX512CD)
 68171          p.domain = DomainAVX
 68172          p.add(0, func(m *_Encoding, v []interface{}) {
 68173              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 68174              m.emit(0xc4)
 68175              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 68176          })
 68177      }
 68178      // VPCONFLICTD m256/m32bcst, ymm{k}{z}
 68179      if isM256M32bcst(v0) && isYMMkz(v1) {
 68180          self.require(ISA_AVX512VL | ISA_AVX512CD)
 68181          p.domain = DomainAVX
 68182          p.add(0, func(m *_Encoding, v []interface{}) {
 68183              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 68184              m.emit(0xc4)
 68185              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 68186          })
 68187      }
 68188      // VPCONFLICTD m512/m32bcst, zmm{k}{z}
 68189      if isM512M32bcst(v0) && isZMMkz(v1) {
 68190          self.require(ISA_AVX512CD)
 68191          p.domain = DomainAVX
 68192          p.add(0, func(m *_Encoding, v []interface{}) {
 68193              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 68194              m.emit(0xc4)
 68195              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 68196          })
 68197      }
 68198      // VPCONFLICTD xmm, xmm{k}{z}
 68199      if isEVEXXMM(v0) && isXMMkz(v1) {
 68200          self.require(ISA_AVX512VL | ISA_AVX512CD)
 68201          p.domain = DomainAVX
 68202          p.add(0, func(m *_Encoding, v []interface{}) {
 68203              m.emit(0x62)
 68204              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 68205              m.emit(0x7d)
 68206              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 68207              m.emit(0xc4)
 68208              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 68209          })
 68210      }
 68211      // VPCONFLICTD ymm, ymm{k}{z}
 68212      if isEVEXYMM(v0) && isYMMkz(v1) {
 68213          self.require(ISA_AVX512VL | ISA_AVX512CD)
 68214          p.domain = DomainAVX
 68215          p.add(0, func(m *_Encoding, v []interface{}) {
 68216              m.emit(0x62)
 68217              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 68218              m.emit(0x7d)
 68219              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 68220              m.emit(0xc4)
 68221              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 68222          })
 68223      }
 68224      // VPCONFLICTD zmm, zmm{k}{z}
 68225      if isZMM(v0) && isZMMkz(v1) {
 68226          self.require(ISA_AVX512CD)
 68227          p.domain = DomainAVX
 68228          p.add(0, func(m *_Encoding, v []interface{}) {
 68229              m.emit(0x62)
 68230              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 68231              m.emit(0x7d)
 68232              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 68233              m.emit(0xc4)
 68234              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 68235          })
 68236      }
 68237      if p.len == 0 {
 68238          panic("invalid operands for VPCONFLICTD")
 68239      }
 68240      return p
 68241  }
 68242  
 68243  // VPCONFLICTQ performs "Detect Conflicts Within a Vector of Packed Quadword Values into Dense Memory/Register".
 68244  //
 68245  // Mnemonic        : VPCONFLICTQ
 68246  // Supported forms : (6 forms)
 68247  //
 68248  //    * VPCONFLICTQ m128/m64bcst, xmm{k}{z}    [AVX512CD,AVX512VL]
 68249  //    * VPCONFLICTQ m256/m64bcst, ymm{k}{z}    [AVX512CD,AVX512VL]
 68250  //    * VPCONFLICTQ m512/m64bcst, zmm{k}{z}    [AVX512CD]
 68251  //    * VPCONFLICTQ xmm, xmm{k}{z}             [AVX512CD,AVX512VL]
 68252  //    * VPCONFLICTQ ymm, ymm{k}{z}             [AVX512CD,AVX512VL]
 68253  //    * VPCONFLICTQ zmm, zmm{k}{z}             [AVX512CD]
 68254  //
 68255  func (self *Program) VPCONFLICTQ(v0 interface{}, v1 interface{}) *Instruction {
 68256      p := self.alloc("VPCONFLICTQ", 2, Operands { v0, v1 })
 68257      // VPCONFLICTQ m128/m64bcst, xmm{k}{z}
 68258      if isM128M64bcst(v0) && isXMMkz(v1) {
 68259          self.require(ISA_AVX512VL | ISA_AVX512CD)
 68260          p.domain = DomainAVX
 68261          p.add(0, func(m *_Encoding, v []interface{}) {
 68262              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 68263              m.emit(0xc4)
 68264              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 68265          })
 68266      }
 68267      // VPCONFLICTQ m256/m64bcst, ymm{k}{z}
 68268      if isM256M64bcst(v0) && isYMMkz(v1) {
 68269          self.require(ISA_AVX512VL | ISA_AVX512CD)
 68270          p.domain = DomainAVX
 68271          p.add(0, func(m *_Encoding, v []interface{}) {
 68272              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 68273              m.emit(0xc4)
 68274              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 68275          })
 68276      }
 68277      // VPCONFLICTQ m512/m64bcst, zmm{k}{z}
 68278      if isM512M64bcst(v0) && isZMMkz(v1) {
 68279          self.require(ISA_AVX512CD)
 68280          p.domain = DomainAVX
 68281          p.add(0, func(m *_Encoding, v []interface{}) {
 68282              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 68283              m.emit(0xc4)
 68284              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 68285          })
 68286      }
 68287      // VPCONFLICTQ xmm, xmm{k}{z}
 68288      if isEVEXXMM(v0) && isXMMkz(v1) {
 68289          self.require(ISA_AVX512VL | ISA_AVX512CD)
 68290          p.domain = DomainAVX
 68291          p.add(0, func(m *_Encoding, v []interface{}) {
 68292              m.emit(0x62)
 68293              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 68294              m.emit(0xfd)
 68295              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 68296              m.emit(0xc4)
 68297              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 68298          })
 68299      }
 68300      // VPCONFLICTQ ymm, ymm{k}{z}
 68301      if isEVEXYMM(v0) && isYMMkz(v1) {
 68302          self.require(ISA_AVX512VL | ISA_AVX512CD)
 68303          p.domain = DomainAVX
 68304          p.add(0, func(m *_Encoding, v []interface{}) {
 68305              m.emit(0x62)
 68306              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 68307              m.emit(0xfd)
 68308              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 68309              m.emit(0xc4)
 68310              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 68311          })
 68312      }
 68313      // VPCONFLICTQ zmm, zmm{k}{z}
 68314      if isZMM(v0) && isZMMkz(v1) {
 68315          self.require(ISA_AVX512CD)
 68316          p.domain = DomainAVX
 68317          p.add(0, func(m *_Encoding, v []interface{}) {
 68318              m.emit(0x62)
 68319              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 68320              m.emit(0xfd)
 68321              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 68322              m.emit(0xc4)
 68323              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 68324          })
 68325      }
 68326      if p.len == 0 {
 68327          panic("invalid operands for VPCONFLICTQ")
 68328      }
 68329      return p
 68330  }
 68331  
 68332  // VPERM2F128 performs "Permute Floating-Point Values".
 68333  //
 68334  // Mnemonic        : VPERM2F128
 68335  // Supported forms : (2 forms)
 68336  //
 68337  //    * VPERM2F128 imm8, ymm, ymm, ymm     [AVX]
 68338  //    * VPERM2F128 imm8, m256, ymm, ymm    [AVX]
 68339  //
 68340  func (self *Program) VPERM2F128(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 68341      p := self.alloc("VPERM2F128", 4, Operands { v0, v1, v2, v3 })
 68342      // VPERM2F128 imm8, ymm, ymm, ymm
 68343      if isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 68344          self.require(ISA_AVX)
 68345          p.domain = DomainAVX
 68346          p.add(0, func(m *_Encoding, v []interface{}) {
 68347              m.emit(0xc4)
 68348              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 68349              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 68350              m.emit(0x06)
 68351              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 68352              m.imm1(toImmAny(v[0]))
 68353          })
 68354      }
 68355      // VPERM2F128 imm8, m256, ymm, ymm
 68356      if isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 68357          self.require(ISA_AVX)
 68358          p.domain = DomainAVX
 68359          p.add(0, func(m *_Encoding, v []interface{}) {
 68360              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 68361              m.emit(0x06)
 68362              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 68363              m.imm1(toImmAny(v[0]))
 68364          })
 68365      }
 68366      if p.len == 0 {
 68367          panic("invalid operands for VPERM2F128")
 68368      }
 68369      return p
 68370  }
 68371  
 68372  // VPERM2I128 performs "Permute 128-Bit Integer Values".
 68373  //
 68374  // Mnemonic        : VPERM2I128
 68375  // Supported forms : (2 forms)
 68376  //
 68377  //    * VPERM2I128 imm8, ymm, ymm, ymm     [AVX2]
 68378  //    * VPERM2I128 imm8, m256, ymm, ymm    [AVX2]
 68379  //
 68380  func (self *Program) VPERM2I128(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 68381      p := self.alloc("VPERM2I128", 4, Operands { v0, v1, v2, v3 })
 68382      // VPERM2I128 imm8, ymm, ymm, ymm
 68383      if isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 68384          self.require(ISA_AVX2)
 68385          p.domain = DomainAVX
 68386          p.add(0, func(m *_Encoding, v []interface{}) {
 68387              m.emit(0xc4)
 68388              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 68389              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 68390              m.emit(0x46)
 68391              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 68392              m.imm1(toImmAny(v[0]))
 68393          })
 68394      }
 68395      // VPERM2I128 imm8, m256, ymm, ymm
 68396      if isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 68397          self.require(ISA_AVX2)
 68398          p.domain = DomainAVX
 68399          p.add(0, func(m *_Encoding, v []interface{}) {
 68400              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 68401              m.emit(0x46)
 68402              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 68403              m.imm1(toImmAny(v[0]))
 68404          })
 68405      }
 68406      if p.len == 0 {
 68407          panic("invalid operands for VPERM2I128")
 68408      }
 68409      return p
 68410  }
 68411  
 68412  // VPERMB performs "Permute Byte Integers".
 68413  //
 68414  // Mnemonic        : VPERMB
 68415  // Supported forms : (6 forms)
 68416  //
 68417  //    * VPERMB xmm, xmm, xmm{k}{z}     [AVX512VBMI,AVX512VL]
 68418  //    * VPERMB m128, xmm, xmm{k}{z}    [AVX512VBMI,AVX512VL]
 68419  //    * VPERMB ymm, ymm, ymm{k}{z}     [AVX512VBMI,AVX512VL]
 68420  //    * VPERMB m256, ymm, ymm{k}{z}    [AVX512VBMI,AVX512VL]
 68421  //    * VPERMB zmm, zmm, zmm{k}{z}     [AVX512VBMI]
 68422  //    * VPERMB m512, zmm, zmm{k}{z}    [AVX512VBMI]
 68423  //
 68424  func (self *Program) VPERMB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 68425      p := self.alloc("VPERMB", 3, Operands { v0, v1, v2 })
 68426      // VPERMB xmm, xmm, xmm{k}{z}
 68427      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 68428          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 68429          p.domain = DomainAVX
 68430          p.add(0, func(m *_Encoding, v []interface{}) {
 68431              m.emit(0x62)
 68432              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68433              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68434              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 68435              m.emit(0x8d)
 68436              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68437          })
 68438      }
 68439      // VPERMB m128, xmm, xmm{k}{z}
 68440      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 68441          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 68442          p.domain = DomainAVX
 68443          p.add(0, func(m *_Encoding, v []interface{}) {
 68444              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 68445              m.emit(0x8d)
 68446              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 68447          })
 68448      }
 68449      // VPERMB ymm, ymm, ymm{k}{z}
 68450      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68451          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 68452          p.domain = DomainAVX
 68453          p.add(0, func(m *_Encoding, v []interface{}) {
 68454              m.emit(0x62)
 68455              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68456              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68457              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 68458              m.emit(0x8d)
 68459              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68460          })
 68461      }
 68462      // VPERMB m256, ymm, ymm{k}{z}
 68463      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68464          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 68465          p.domain = DomainAVX
 68466          p.add(0, func(m *_Encoding, v []interface{}) {
 68467              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 68468              m.emit(0x8d)
 68469              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 68470          })
 68471      }
 68472      // VPERMB zmm, zmm, zmm{k}{z}
 68473      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 68474          self.require(ISA_AVX512VBMI)
 68475          p.domain = DomainAVX
 68476          p.add(0, func(m *_Encoding, v []interface{}) {
 68477              m.emit(0x62)
 68478              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68479              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68480              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 68481              m.emit(0x8d)
 68482              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68483          })
 68484      }
 68485      // VPERMB m512, zmm, zmm{k}{z}
 68486      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 68487          self.require(ISA_AVX512VBMI)
 68488          p.domain = DomainAVX
 68489          p.add(0, func(m *_Encoding, v []interface{}) {
 68490              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 68491              m.emit(0x8d)
 68492              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 68493          })
 68494      }
 68495      if p.len == 0 {
 68496          panic("invalid operands for VPERMB")
 68497      }
 68498      return p
 68499  }
 68500  
 68501  // VPERMD performs "Permute Doubleword Integers".
 68502  //
 68503  // Mnemonic        : VPERMD
 68504  // Supported forms : (6 forms)
 68505  //
 68506  //    * VPERMD ymm, ymm, ymm                   [AVX2]
 68507  //    * VPERMD m256, ymm, ymm                  [AVX2]
 68508  //    * VPERMD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 68509  //    * VPERMD zmm, zmm, zmm{k}{z}             [AVX512F]
 68510  //    * VPERMD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 68511  //    * VPERMD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 68512  //
 68513  func (self *Program) VPERMD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 68514      p := self.alloc("VPERMD", 3, Operands { v0, v1, v2 })
 68515      // VPERMD ymm, ymm, ymm
 68516      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 68517          self.require(ISA_AVX2)
 68518          p.domain = DomainAVX
 68519          p.add(0, func(m *_Encoding, v []interface{}) {
 68520              m.emit(0xc4)
 68521              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 68522              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68523              m.emit(0x36)
 68524              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68525          })
 68526      }
 68527      // VPERMD m256, ymm, ymm
 68528      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 68529          self.require(ISA_AVX2)
 68530          p.domain = DomainAVX
 68531          p.add(0, func(m *_Encoding, v []interface{}) {
 68532              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 68533              m.emit(0x36)
 68534              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 68535          })
 68536      }
 68537      // VPERMD m512/m32bcst, zmm, zmm{k}{z}
 68538      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 68539          self.require(ISA_AVX512F)
 68540          p.domain = DomainAVX
 68541          p.add(0, func(m *_Encoding, v []interface{}) {
 68542              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68543              m.emit(0x36)
 68544              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 68545          })
 68546      }
 68547      // VPERMD zmm, zmm, zmm{k}{z}
 68548      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 68549          self.require(ISA_AVX512F)
 68550          p.domain = DomainAVX
 68551          p.add(0, func(m *_Encoding, v []interface{}) {
 68552              m.emit(0x62)
 68553              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68554              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68555              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 68556              m.emit(0x36)
 68557              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68558          })
 68559      }
 68560      // VPERMD m256/m32bcst, ymm, ymm{k}{z}
 68561      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68562          self.require(ISA_AVX512VL | ISA_AVX512F)
 68563          p.domain = DomainAVX
 68564          p.add(0, func(m *_Encoding, v []interface{}) {
 68565              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68566              m.emit(0x36)
 68567              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 68568          })
 68569      }
 68570      // VPERMD ymm, ymm, ymm{k}{z}
 68571      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68572          self.require(ISA_AVX512VL | ISA_AVX512F)
 68573          p.domain = DomainAVX
 68574          p.add(0, func(m *_Encoding, v []interface{}) {
 68575              m.emit(0x62)
 68576              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68577              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68578              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 68579              m.emit(0x36)
 68580              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68581          })
 68582      }
 68583      if p.len == 0 {
 68584          panic("invalid operands for VPERMD")
 68585      }
 68586      return p
 68587  }
 68588  
 68589  // VPERMI2B performs "Full Permute of Bytes From Two Tables Overwriting the Index".
 68590  //
 68591  // Mnemonic        : VPERMI2B
 68592  // Supported forms : (6 forms)
 68593  //
 68594  //    * VPERMI2B xmm, xmm, xmm{k}{z}     [AVX512VBMI,AVX512VL]
 68595  //    * VPERMI2B m128, xmm, xmm{k}{z}    [AVX512VBMI,AVX512VL]
 68596  //    * VPERMI2B ymm, ymm, ymm{k}{z}     [AVX512VBMI,AVX512VL]
 68597  //    * VPERMI2B m256, ymm, ymm{k}{z}    [AVX512VBMI,AVX512VL]
 68598  //    * VPERMI2B zmm, zmm, zmm{k}{z}     [AVX512VBMI]
 68599  //    * VPERMI2B m512, zmm, zmm{k}{z}    [AVX512VBMI]
 68600  //
 68601  func (self *Program) VPERMI2B(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 68602      p := self.alloc("VPERMI2B", 3, Operands { v0, v1, v2 })
 68603      // VPERMI2B xmm, xmm, xmm{k}{z}
 68604      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 68605          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 68606          p.domain = DomainAVX
 68607          p.add(0, func(m *_Encoding, v []interface{}) {
 68608              m.emit(0x62)
 68609              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68610              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68611              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 68612              m.emit(0x75)
 68613              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68614          })
 68615      }
 68616      // VPERMI2B m128, xmm, xmm{k}{z}
 68617      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 68618          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 68619          p.domain = DomainAVX
 68620          p.add(0, func(m *_Encoding, v []interface{}) {
 68621              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 68622              m.emit(0x75)
 68623              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 68624          })
 68625      }
 68626      // VPERMI2B ymm, ymm, ymm{k}{z}
 68627      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68628          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 68629          p.domain = DomainAVX
 68630          p.add(0, func(m *_Encoding, v []interface{}) {
 68631              m.emit(0x62)
 68632              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68633              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68634              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 68635              m.emit(0x75)
 68636              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68637          })
 68638      }
 68639      // VPERMI2B m256, ymm, ymm{k}{z}
 68640      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68641          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 68642          p.domain = DomainAVX
 68643          p.add(0, func(m *_Encoding, v []interface{}) {
 68644              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 68645              m.emit(0x75)
 68646              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 68647          })
 68648      }
 68649      // VPERMI2B zmm, zmm, zmm{k}{z}
 68650      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 68651          self.require(ISA_AVX512VBMI)
 68652          p.domain = DomainAVX
 68653          p.add(0, func(m *_Encoding, v []interface{}) {
 68654              m.emit(0x62)
 68655              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68656              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68657              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 68658              m.emit(0x75)
 68659              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68660          })
 68661      }
 68662      // VPERMI2B m512, zmm, zmm{k}{z}
 68663      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 68664          self.require(ISA_AVX512VBMI)
 68665          p.domain = DomainAVX
 68666          p.add(0, func(m *_Encoding, v []interface{}) {
 68667              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 68668              m.emit(0x75)
 68669              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 68670          })
 68671      }
 68672      if p.len == 0 {
 68673          panic("invalid operands for VPERMI2B")
 68674      }
 68675      return p
 68676  }
 68677  
 68678  // VPERMI2D performs "Full Permute of Doublewords From Two Tables Overwriting the Index".
 68679  //
 68680  // Mnemonic        : VPERMI2D
 68681  // Supported forms : (6 forms)
 68682  //
 68683  //    * VPERMI2D m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 68684  //    * VPERMI2D zmm, zmm, zmm{k}{z}             [AVX512F]
 68685  //    * VPERMI2D m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 68686  //    * VPERMI2D xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 68687  //    * VPERMI2D m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 68688  //    * VPERMI2D ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 68689  //
 68690  func (self *Program) VPERMI2D(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 68691      p := self.alloc("VPERMI2D", 3, Operands { v0, v1, v2 })
 68692      // VPERMI2D m512/m32bcst, zmm, zmm{k}{z}
 68693      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 68694          self.require(ISA_AVX512F)
 68695          p.domain = DomainAVX
 68696          p.add(0, func(m *_Encoding, v []interface{}) {
 68697              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68698              m.emit(0x76)
 68699              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 68700          })
 68701      }
 68702      // VPERMI2D zmm, zmm, zmm{k}{z}
 68703      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 68704          self.require(ISA_AVX512F)
 68705          p.domain = DomainAVX
 68706          p.add(0, func(m *_Encoding, v []interface{}) {
 68707              m.emit(0x62)
 68708              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68709              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68710              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 68711              m.emit(0x76)
 68712              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68713          })
 68714      }
 68715      // VPERMI2D m128/m32bcst, xmm, xmm{k}{z}
 68716      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 68717          self.require(ISA_AVX512VL | ISA_AVX512F)
 68718          p.domain = DomainAVX
 68719          p.add(0, func(m *_Encoding, v []interface{}) {
 68720              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68721              m.emit(0x76)
 68722              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 68723          })
 68724      }
 68725      // VPERMI2D xmm, xmm, xmm{k}{z}
 68726      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 68727          self.require(ISA_AVX512VL | ISA_AVX512F)
 68728          p.domain = DomainAVX
 68729          p.add(0, func(m *_Encoding, v []interface{}) {
 68730              m.emit(0x62)
 68731              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68732              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68733              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 68734              m.emit(0x76)
 68735              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68736          })
 68737      }
 68738      // VPERMI2D m256/m32bcst, ymm, ymm{k}{z}
 68739      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68740          self.require(ISA_AVX512VL | ISA_AVX512F)
 68741          p.domain = DomainAVX
 68742          p.add(0, func(m *_Encoding, v []interface{}) {
 68743              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68744              m.emit(0x76)
 68745              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 68746          })
 68747      }
 68748      // VPERMI2D ymm, ymm, ymm{k}{z}
 68749      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68750          self.require(ISA_AVX512VL | ISA_AVX512F)
 68751          p.domain = DomainAVX
 68752          p.add(0, func(m *_Encoding, v []interface{}) {
 68753              m.emit(0x62)
 68754              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68755              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68756              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 68757              m.emit(0x76)
 68758              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68759          })
 68760      }
 68761      if p.len == 0 {
 68762          panic("invalid operands for VPERMI2D")
 68763      }
 68764      return p
 68765  }
 68766  
 68767  // VPERMI2PD performs "Full Permute of Double-Precision Floating-Point Values From Two Tables Overwriting the Index".
 68768  //
 68769  // Mnemonic        : VPERMI2PD
 68770  // Supported forms : (6 forms)
 68771  //
 68772  //    * VPERMI2PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 68773  //    * VPERMI2PD zmm, zmm, zmm{k}{z}             [AVX512F]
 68774  //    * VPERMI2PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 68775  //    * VPERMI2PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 68776  //    * VPERMI2PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 68777  //    * VPERMI2PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 68778  //
 68779  func (self *Program) VPERMI2PD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 68780      p := self.alloc("VPERMI2PD", 3, Operands { v0, v1, v2 })
 68781      // VPERMI2PD m512/m64bcst, zmm, zmm{k}{z}
 68782      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 68783          self.require(ISA_AVX512F)
 68784          p.domain = DomainAVX
 68785          p.add(0, func(m *_Encoding, v []interface{}) {
 68786              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68787              m.emit(0x77)
 68788              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 68789          })
 68790      }
 68791      // VPERMI2PD zmm, zmm, zmm{k}{z}
 68792      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 68793          self.require(ISA_AVX512F)
 68794          p.domain = DomainAVX
 68795          p.add(0, func(m *_Encoding, v []interface{}) {
 68796              m.emit(0x62)
 68797              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68798              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 68799              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 68800              m.emit(0x77)
 68801              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68802          })
 68803      }
 68804      // VPERMI2PD m128/m64bcst, xmm, xmm{k}{z}
 68805      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 68806          self.require(ISA_AVX512VL | ISA_AVX512F)
 68807          p.domain = DomainAVX
 68808          p.add(0, func(m *_Encoding, v []interface{}) {
 68809              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68810              m.emit(0x77)
 68811              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 68812          })
 68813      }
 68814      // VPERMI2PD xmm, xmm, xmm{k}{z}
 68815      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 68816          self.require(ISA_AVX512VL | ISA_AVX512F)
 68817          p.domain = DomainAVX
 68818          p.add(0, func(m *_Encoding, v []interface{}) {
 68819              m.emit(0x62)
 68820              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68821              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 68822              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 68823              m.emit(0x77)
 68824              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68825          })
 68826      }
 68827      // VPERMI2PD m256/m64bcst, ymm, ymm{k}{z}
 68828      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68829          self.require(ISA_AVX512VL | ISA_AVX512F)
 68830          p.domain = DomainAVX
 68831          p.add(0, func(m *_Encoding, v []interface{}) {
 68832              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68833              m.emit(0x77)
 68834              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 68835          })
 68836      }
 68837      // VPERMI2PD ymm, ymm, ymm{k}{z}
 68838      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68839          self.require(ISA_AVX512VL | ISA_AVX512F)
 68840          p.domain = DomainAVX
 68841          p.add(0, func(m *_Encoding, v []interface{}) {
 68842              m.emit(0x62)
 68843              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68844              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 68845              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 68846              m.emit(0x77)
 68847              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68848          })
 68849      }
 68850      if p.len == 0 {
 68851          panic("invalid operands for VPERMI2PD")
 68852      }
 68853      return p
 68854  }
 68855  
 68856  // VPERMI2PS performs "Full Permute of Single-Precision Floating-Point Values From Two Tables Overwriting the Index".
 68857  //
 68858  // Mnemonic        : VPERMI2PS
 68859  // Supported forms : (6 forms)
 68860  //
 68861  //    * VPERMI2PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 68862  //    * VPERMI2PS zmm, zmm, zmm{k}{z}             [AVX512F]
 68863  //    * VPERMI2PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 68864  //    * VPERMI2PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 68865  //    * VPERMI2PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 68866  //    * VPERMI2PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 68867  //
 68868  func (self *Program) VPERMI2PS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 68869      p := self.alloc("VPERMI2PS", 3, Operands { v0, v1, v2 })
 68870      // VPERMI2PS m512/m32bcst, zmm, zmm{k}{z}
 68871      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 68872          self.require(ISA_AVX512F)
 68873          p.domain = DomainAVX
 68874          p.add(0, func(m *_Encoding, v []interface{}) {
 68875              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68876              m.emit(0x77)
 68877              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 68878          })
 68879      }
 68880      // VPERMI2PS zmm, zmm, zmm{k}{z}
 68881      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 68882          self.require(ISA_AVX512F)
 68883          p.domain = DomainAVX
 68884          p.add(0, func(m *_Encoding, v []interface{}) {
 68885              m.emit(0x62)
 68886              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68887              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68888              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 68889              m.emit(0x77)
 68890              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68891          })
 68892      }
 68893      // VPERMI2PS m128/m32bcst, xmm, xmm{k}{z}
 68894      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 68895          self.require(ISA_AVX512VL | ISA_AVX512F)
 68896          p.domain = DomainAVX
 68897          p.add(0, func(m *_Encoding, v []interface{}) {
 68898              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68899              m.emit(0x77)
 68900              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 68901          })
 68902      }
 68903      // VPERMI2PS xmm, xmm, xmm{k}{z}
 68904      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 68905          self.require(ISA_AVX512VL | ISA_AVX512F)
 68906          p.domain = DomainAVX
 68907          p.add(0, func(m *_Encoding, v []interface{}) {
 68908              m.emit(0x62)
 68909              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68910              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68911              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 68912              m.emit(0x77)
 68913              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68914          })
 68915      }
 68916      // VPERMI2PS m256/m32bcst, ymm, ymm{k}{z}
 68917      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68918          self.require(ISA_AVX512VL | ISA_AVX512F)
 68919          p.domain = DomainAVX
 68920          p.add(0, func(m *_Encoding, v []interface{}) {
 68921              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68922              m.emit(0x77)
 68923              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 68924          })
 68925      }
 68926      // VPERMI2PS ymm, ymm, ymm{k}{z}
 68927      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68928          self.require(ISA_AVX512VL | ISA_AVX512F)
 68929          p.domain = DomainAVX
 68930          p.add(0, func(m *_Encoding, v []interface{}) {
 68931              m.emit(0x62)
 68932              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68933              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68934              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 68935              m.emit(0x77)
 68936              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68937          })
 68938      }
 68939      if p.len == 0 {
 68940          panic("invalid operands for VPERMI2PS")
 68941      }
 68942      return p
 68943  }
 68944  
 68945  // VPERMI2Q performs "Full Permute of Quadwords From Two Tables Overwriting the Index".
 68946  //
 68947  // Mnemonic        : VPERMI2Q
 68948  // Supported forms : (6 forms)
 68949  //
 68950  //    * VPERMI2Q m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 68951  //    * VPERMI2Q zmm, zmm, zmm{k}{z}             [AVX512F]
 68952  //    * VPERMI2Q m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 68953  //    * VPERMI2Q xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 68954  //    * VPERMI2Q m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 68955  //    * VPERMI2Q ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 68956  //
 68957  func (self *Program) VPERMI2Q(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 68958      p := self.alloc("VPERMI2Q", 3, Operands { v0, v1, v2 })
 68959      // VPERMI2Q m512/m64bcst, zmm, zmm{k}{z}
 68960      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 68961          self.require(ISA_AVX512F)
 68962          p.domain = DomainAVX
 68963          p.add(0, func(m *_Encoding, v []interface{}) {
 68964              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68965              m.emit(0x76)
 68966              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 68967          })
 68968      }
 68969      // VPERMI2Q zmm, zmm, zmm{k}{z}
 68970      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 68971          self.require(ISA_AVX512F)
 68972          p.domain = DomainAVX
 68973          p.add(0, func(m *_Encoding, v []interface{}) {
 68974              m.emit(0x62)
 68975              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68976              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 68977              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 68978              m.emit(0x76)
 68979              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68980          })
 68981      }
 68982      // VPERMI2Q m128/m64bcst, xmm, xmm{k}{z}
 68983      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 68984          self.require(ISA_AVX512VL | ISA_AVX512F)
 68985          p.domain = DomainAVX
 68986          p.add(0, func(m *_Encoding, v []interface{}) {
 68987              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68988              m.emit(0x76)
 68989              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 68990          })
 68991      }
 68992      // VPERMI2Q xmm, xmm, xmm{k}{z}
 68993      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 68994          self.require(ISA_AVX512VL | ISA_AVX512F)
 68995          p.domain = DomainAVX
 68996          p.add(0, func(m *_Encoding, v []interface{}) {
 68997              m.emit(0x62)
 68998              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68999              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 69000              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 69001              m.emit(0x76)
 69002              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69003          })
 69004      }
 69005      // VPERMI2Q m256/m64bcst, ymm, ymm{k}{z}
 69006      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69007          self.require(ISA_AVX512VL | ISA_AVX512F)
 69008          p.domain = DomainAVX
 69009          p.add(0, func(m *_Encoding, v []interface{}) {
 69010              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 69011              m.emit(0x76)
 69012              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 69013          })
 69014      }
 69015      // VPERMI2Q ymm, ymm, ymm{k}{z}
 69016      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69017          self.require(ISA_AVX512VL | ISA_AVX512F)
 69018          p.domain = DomainAVX
 69019          p.add(0, func(m *_Encoding, v []interface{}) {
 69020              m.emit(0x62)
 69021              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69022              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 69023              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 69024              m.emit(0x76)
 69025              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69026          })
 69027      }
 69028      if p.len == 0 {
 69029          panic("invalid operands for VPERMI2Q")
 69030      }
 69031      return p
 69032  }
 69033  
 69034  // VPERMI2W performs "Full Permute of Words From Two Tables Overwriting the Index".
 69035  //
 69036  // Mnemonic        : VPERMI2W
 69037  // Supported forms : (6 forms)
 69038  //
 69039  //    * VPERMI2W zmm, zmm, zmm{k}{z}     [AVX512BW]
 69040  //    * VPERMI2W m512, zmm, zmm{k}{z}    [AVX512BW]
 69041  //    * VPERMI2W xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 69042  //    * VPERMI2W m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 69043  //    * VPERMI2W ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 69044  //    * VPERMI2W m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 69045  //
 69046  func (self *Program) VPERMI2W(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 69047      p := self.alloc("VPERMI2W", 3, Operands { v0, v1, v2 })
 69048      // VPERMI2W zmm, zmm, zmm{k}{z}
 69049      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 69050          self.require(ISA_AVX512BW)
 69051          p.domain = DomainAVX
 69052          p.add(0, func(m *_Encoding, v []interface{}) {
 69053              m.emit(0x62)
 69054              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69055              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 69056              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 69057              m.emit(0x75)
 69058              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69059          })
 69060      }
 69061      // VPERMI2W m512, zmm, zmm{k}{z}
 69062      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 69063          self.require(ISA_AVX512BW)
 69064          p.domain = DomainAVX
 69065          p.add(0, func(m *_Encoding, v []interface{}) {
 69066              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 69067              m.emit(0x75)
 69068              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 69069          })
 69070      }
 69071      // VPERMI2W xmm, xmm, xmm{k}{z}
 69072      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 69073          self.require(ISA_AVX512VL | ISA_AVX512BW)
 69074          p.domain = DomainAVX
 69075          p.add(0, func(m *_Encoding, v []interface{}) {
 69076              m.emit(0x62)
 69077              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69078              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 69079              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 69080              m.emit(0x75)
 69081              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69082          })
 69083      }
 69084      // VPERMI2W m128, xmm, xmm{k}{z}
 69085      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 69086          self.require(ISA_AVX512VL | ISA_AVX512BW)
 69087          p.domain = DomainAVX
 69088          p.add(0, func(m *_Encoding, v []interface{}) {
 69089              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 69090              m.emit(0x75)
 69091              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 69092          })
 69093      }
 69094      // VPERMI2W ymm, ymm, ymm{k}{z}
 69095      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69096          self.require(ISA_AVX512VL | ISA_AVX512BW)
 69097          p.domain = DomainAVX
 69098          p.add(0, func(m *_Encoding, v []interface{}) {
 69099              m.emit(0x62)
 69100              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69101              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 69102              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 69103              m.emit(0x75)
 69104              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69105          })
 69106      }
 69107      // VPERMI2W m256, ymm, ymm{k}{z}
 69108      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69109          self.require(ISA_AVX512VL | ISA_AVX512BW)
 69110          p.domain = DomainAVX
 69111          p.add(0, func(m *_Encoding, v []interface{}) {
 69112              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 69113              m.emit(0x75)
 69114              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 69115          })
 69116      }
 69117      if p.len == 0 {
 69118          panic("invalid operands for VPERMI2W")
 69119      }
 69120      return p
 69121  }
 69122  
 69123  // VPERMIL2PD performs "Permute Two-Source Double-Precision Floating-Point Vectors".
 69124  //
 69125  // Mnemonic        : VPERMIL2PD
 69126  // Supported forms : (6 forms)
 69127  //
 69128  //    * VPERMIL2PD imm4, xmm, xmm, xmm, xmm     [XOP]
 69129  //    * VPERMIL2PD imm4, m128, xmm, xmm, xmm    [XOP]
 69130  //    * VPERMIL2PD imm4, xmm, m128, xmm, xmm    [XOP]
 69131  //    * VPERMIL2PD imm4, ymm, ymm, ymm, ymm     [XOP]
 69132  //    * VPERMIL2PD imm4, m256, ymm, ymm, ymm    [XOP]
 69133  //    * VPERMIL2PD imm4, ymm, m256, ymm, ymm    [XOP]
 69134  //
 69135  func (self *Program) VPERMIL2PD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, v4 interface{}) *Instruction {
 69136      p := self.alloc("VPERMIL2PD", 5, Operands { v0, v1, v2, v3, v4 })
 69137      // VPERMIL2PD imm4, xmm, xmm, xmm, xmm
 69138      if isImm4(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) && isXMM(v4) {
 69139          self.require(ISA_XOP)
 69140          p.domain = DomainAMDSpecific
 69141          p.add(0, func(m *_Encoding, v []interface{}) {
 69142              m.emit(0xc4)
 69143              m.emit(0xe3 ^ (hcode(v[4]) << 7) ^ (hcode(v[2]) << 5))
 69144              m.emit(0x79 ^ (hlcode(v[3]) << 3))
 69145              m.emit(0x49)
 69146              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 69147              m.emit((hlcode(v[1]) << 4) | imml(v[0]))
 69148          })
 69149          p.add(0, func(m *_Encoding, v []interface{}) {
 69150              m.emit(0xc4)
 69151              m.emit(0xe3 ^ (hcode(v[4]) << 7) ^ (hcode(v[1]) << 5))
 69152              m.emit(0xf9 ^ (hlcode(v[3]) << 3))
 69153              m.emit(0x49)
 69154              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[1]))
 69155              m.emit((hlcode(v[2]) << 4) | imml(v[0]))
 69156          })
 69157      }
 69158      // VPERMIL2PD imm4, m128, xmm, xmm, xmm
 69159      if isImm4(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) && isXMM(v4) {
 69160          self.require(ISA_XOP)
 69161          p.domain = DomainAMDSpecific
 69162          p.add(0, func(m *_Encoding, v []interface{}) {
 69163              m.vex3(0xc4, 0b11, 0x81, hcode(v[4]), addr(v[1]), hlcode(v[3]))
 69164              m.emit(0x49)
 69165              m.mrsd(lcode(v[4]), addr(v[1]), 1)
 69166              m.emit((hlcode(v[2]) << 4) | imml(v[0]))
 69167          })
 69168      }
 69169      // VPERMIL2PD imm4, xmm, m128, xmm, xmm
 69170      if isImm4(v0) && isXMM(v1) && isM128(v2) && isXMM(v3) && isXMM(v4) {
 69171          self.require(ISA_XOP)
 69172          p.domain = DomainAMDSpecific
 69173          p.add(0, func(m *_Encoding, v []interface{}) {
 69174              m.vex3(0xc4, 0b11, 0x01, hcode(v[4]), addr(v[2]), hlcode(v[3]))
 69175              m.emit(0x49)
 69176              m.mrsd(lcode(v[4]), addr(v[2]), 1)
 69177              m.emit((hlcode(v[1]) << 4) | imml(v[0]))
 69178          })
 69179      }
 69180      // VPERMIL2PD imm4, ymm, ymm, ymm, ymm
 69181      if isImm4(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) && isYMM(v4) {
 69182          self.require(ISA_XOP)
 69183          p.domain = DomainAMDSpecific
 69184          p.add(0, func(m *_Encoding, v []interface{}) {
 69185              m.emit(0xc4)
 69186              m.emit(0xe3 ^ (hcode(v[4]) << 7) ^ (hcode(v[2]) << 5))
 69187              m.emit(0x7d ^ (hlcode(v[3]) << 3))
 69188              m.emit(0x49)
 69189              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 69190              m.emit((hlcode(v[1]) << 4) | imml(v[0]))
 69191          })
 69192          p.add(0, func(m *_Encoding, v []interface{}) {
 69193              m.emit(0xc4)
 69194              m.emit(0xe3 ^ (hcode(v[4]) << 7) ^ (hcode(v[1]) << 5))
 69195              m.emit(0xfd ^ (hlcode(v[3]) << 3))
 69196              m.emit(0x49)
 69197              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[1]))
 69198              m.emit((hlcode(v[2]) << 4) | imml(v[0]))
 69199          })
 69200      }
 69201      // VPERMIL2PD imm4, m256, ymm, ymm, ymm
 69202      if isImm4(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) && isYMM(v4) {
 69203          self.require(ISA_XOP)
 69204          p.domain = DomainAMDSpecific
 69205          p.add(0, func(m *_Encoding, v []interface{}) {
 69206              m.vex3(0xc4, 0b11, 0x85, hcode(v[4]), addr(v[1]), hlcode(v[3]))
 69207              m.emit(0x49)
 69208              m.mrsd(lcode(v[4]), addr(v[1]), 1)
 69209              m.emit((hlcode(v[2]) << 4) | imml(v[0]))
 69210          })
 69211      }
 69212      // VPERMIL2PD imm4, ymm, m256, ymm, ymm
 69213      if isImm4(v0) && isYMM(v1) && isM256(v2) && isYMM(v3) && isYMM(v4) {
 69214          self.require(ISA_XOP)
 69215          p.domain = DomainAMDSpecific
 69216          p.add(0, func(m *_Encoding, v []interface{}) {
 69217              m.vex3(0xc4, 0b11, 0x05, hcode(v[4]), addr(v[2]), hlcode(v[3]))
 69218              m.emit(0x49)
 69219              m.mrsd(lcode(v[4]), addr(v[2]), 1)
 69220              m.emit((hlcode(v[1]) << 4) | imml(v[0]))
 69221          })
 69222      }
 69223      if p.len == 0 {
 69224          panic("invalid operands for VPERMIL2PD")
 69225      }
 69226      return p
 69227  }
 69228  
 69229  // VPERMIL2PS performs "Permute Two-Source Single-Precision Floating-Point Vectors".
 69230  //
 69231  // Mnemonic        : VPERMIL2PS
 69232  // Supported forms : (6 forms)
 69233  //
 69234  //    * VPERMIL2PS imm4, xmm, xmm, xmm, xmm     [XOP]
 69235  //    * VPERMIL2PS imm4, m128, xmm, xmm, xmm    [XOP]
 69236  //    * VPERMIL2PS imm4, xmm, m128, xmm, xmm    [XOP]
 69237  //    * VPERMIL2PS imm4, ymm, ymm, ymm, ymm     [XOP]
 69238  //    * VPERMIL2PS imm4, m256, ymm, ymm, ymm    [XOP]
 69239  //    * VPERMIL2PS imm4, ymm, m256, ymm, ymm    [XOP]
 69240  //
 69241  func (self *Program) VPERMIL2PS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, v4 interface{}) *Instruction {
 69242      p := self.alloc("VPERMIL2PS", 5, Operands { v0, v1, v2, v3, v4 })
 69243      // VPERMIL2PS imm4, xmm, xmm, xmm, xmm
 69244      if isImm4(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) && isXMM(v4) {
 69245          self.require(ISA_XOP)
 69246          p.domain = DomainAMDSpecific
 69247          p.add(0, func(m *_Encoding, v []interface{}) {
 69248              m.emit(0xc4)
 69249              m.emit(0xe3 ^ (hcode(v[4]) << 7) ^ (hcode(v[2]) << 5))
 69250              m.emit(0x79 ^ (hlcode(v[3]) << 3))
 69251              m.emit(0x48)
 69252              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 69253              m.emit((hlcode(v[1]) << 4) | imml(v[0]))
 69254          })
 69255          p.add(0, func(m *_Encoding, v []interface{}) {
 69256              m.emit(0xc4)
 69257              m.emit(0xe3 ^ (hcode(v[4]) << 7) ^ (hcode(v[1]) << 5))
 69258              m.emit(0xf9 ^ (hlcode(v[3]) << 3))
 69259              m.emit(0x48)
 69260              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[1]))
 69261              m.emit((hlcode(v[2]) << 4) | imml(v[0]))
 69262          })
 69263      }
 69264      // VPERMIL2PS imm4, m128, xmm, xmm, xmm
 69265      if isImm4(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) && isXMM(v4) {
 69266          self.require(ISA_XOP)
 69267          p.domain = DomainAMDSpecific
 69268          p.add(0, func(m *_Encoding, v []interface{}) {
 69269              m.vex3(0xc4, 0b11, 0x81, hcode(v[4]), addr(v[1]), hlcode(v[3]))
 69270              m.emit(0x48)
 69271              m.mrsd(lcode(v[4]), addr(v[1]), 1)
 69272              m.emit((hlcode(v[2]) << 4) | imml(v[0]))
 69273          })
 69274      }
 69275      // VPERMIL2PS imm4, xmm, m128, xmm, xmm
 69276      if isImm4(v0) && isXMM(v1) && isM128(v2) && isXMM(v3) && isXMM(v4) {
 69277          self.require(ISA_XOP)
 69278          p.domain = DomainAMDSpecific
 69279          p.add(0, func(m *_Encoding, v []interface{}) {
 69280              m.vex3(0xc4, 0b11, 0x01, hcode(v[4]), addr(v[2]), hlcode(v[3]))
 69281              m.emit(0x48)
 69282              m.mrsd(lcode(v[4]), addr(v[2]), 1)
 69283              m.emit((hlcode(v[1]) << 4) | imml(v[0]))
 69284          })
 69285      }
 69286      // VPERMIL2PS imm4, ymm, ymm, ymm, ymm
 69287      if isImm4(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) && isYMM(v4) {
 69288          self.require(ISA_XOP)
 69289          p.domain = DomainAMDSpecific
 69290          p.add(0, func(m *_Encoding, v []interface{}) {
 69291              m.emit(0xc4)
 69292              m.emit(0xe3 ^ (hcode(v[4]) << 7) ^ (hcode(v[2]) << 5))
 69293              m.emit(0x7d ^ (hlcode(v[3]) << 3))
 69294              m.emit(0x48)
 69295              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 69296              m.emit((hlcode(v[1]) << 4) | imml(v[0]))
 69297          })
 69298          p.add(0, func(m *_Encoding, v []interface{}) {
 69299              m.emit(0xc4)
 69300              m.emit(0xe3 ^ (hcode(v[4]) << 7) ^ (hcode(v[1]) << 5))
 69301              m.emit(0xfd ^ (hlcode(v[3]) << 3))
 69302              m.emit(0x48)
 69303              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[1]))
 69304              m.emit((hlcode(v[2]) << 4) | imml(v[0]))
 69305          })
 69306      }
 69307      // VPERMIL2PS imm4, m256, ymm, ymm, ymm
 69308      if isImm4(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) && isYMM(v4) {
 69309          self.require(ISA_XOP)
 69310          p.domain = DomainAMDSpecific
 69311          p.add(0, func(m *_Encoding, v []interface{}) {
 69312              m.vex3(0xc4, 0b11, 0x85, hcode(v[4]), addr(v[1]), hlcode(v[3]))
 69313              m.emit(0x48)
 69314              m.mrsd(lcode(v[4]), addr(v[1]), 1)
 69315              m.emit((hlcode(v[2]) << 4) | imml(v[0]))
 69316          })
 69317      }
 69318      // VPERMIL2PS imm4, ymm, m256, ymm, ymm
 69319      if isImm4(v0) && isYMM(v1) && isM256(v2) && isYMM(v3) && isYMM(v4) {
 69320          self.require(ISA_XOP)
 69321          p.domain = DomainAMDSpecific
 69322          p.add(0, func(m *_Encoding, v []interface{}) {
 69323              m.vex3(0xc4, 0b11, 0x05, hcode(v[4]), addr(v[2]), hlcode(v[3]))
 69324              m.emit(0x48)
 69325              m.mrsd(lcode(v[4]), addr(v[2]), 1)
 69326              m.emit((hlcode(v[1]) << 4) | imml(v[0]))
 69327          })
 69328      }
 69329      if p.len == 0 {
 69330          panic("invalid operands for VPERMIL2PS")
 69331      }
 69332      return p
 69333  }
 69334  
 69335  // VPERMILPD performs "Permute Double-Precision Floating-Point Values".
 69336  //
 69337  // Mnemonic        : VPERMILPD
 69338  // Supported forms : (20 forms)
 69339  //
 69340  //    * VPERMILPD imm8, xmm, xmm                   [AVX]
 69341  //    * VPERMILPD xmm, xmm, xmm                    [AVX]
 69342  //    * VPERMILPD m128, xmm, xmm                   [AVX]
 69343  //    * VPERMILPD imm8, m128, xmm                  [AVX]
 69344  //    * VPERMILPD imm8, ymm, ymm                   [AVX]
 69345  //    * VPERMILPD ymm, ymm, ymm                    [AVX]
 69346  //    * VPERMILPD m256, ymm, ymm                   [AVX]
 69347  //    * VPERMILPD imm8, m256, ymm                  [AVX]
 69348  //    * VPERMILPD imm8, m512/m64bcst, zmm{k}{z}    [AVX512F]
 69349  //    * VPERMILPD m512/m64bcst, zmm, zmm{k}{z}     [AVX512F]
 69350  //    * VPERMILPD imm8, zmm, zmm{k}{z}             [AVX512F]
 69351  //    * VPERMILPD zmm, zmm, zmm{k}{z}              [AVX512F]
 69352  //    * VPERMILPD imm8, m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 69353  //    * VPERMILPD imm8, m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 69354  //    * VPERMILPD m128/m64bcst, xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 69355  //    * VPERMILPD imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 69356  //    * VPERMILPD xmm, xmm, xmm{k}{z}              [AVX512F,AVX512VL]
 69357  //    * VPERMILPD m256/m64bcst, ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 69358  //    * VPERMILPD imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 69359  //    * VPERMILPD ymm, ymm, ymm{k}{z}              [AVX512F,AVX512VL]
 69360  //
 69361  func (self *Program) VPERMILPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 69362      p := self.alloc("VPERMILPD", 3, Operands { v0, v1, v2 })
 69363      // VPERMILPD imm8, xmm, xmm
 69364      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 69365          self.require(ISA_AVX)
 69366          p.domain = DomainAVX
 69367          p.add(0, func(m *_Encoding, v []interface{}) {
 69368              m.emit(0xc4)
 69369              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 69370              m.emit(0x79)
 69371              m.emit(0x05)
 69372              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69373              m.imm1(toImmAny(v[0]))
 69374          })
 69375      }
 69376      // VPERMILPD xmm, xmm, xmm
 69377      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 69378          self.require(ISA_AVX)
 69379          p.domain = DomainAVX
 69380          p.add(0, func(m *_Encoding, v []interface{}) {
 69381              m.emit(0xc4)
 69382              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 69383              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 69384              m.emit(0x0d)
 69385              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69386          })
 69387      }
 69388      // VPERMILPD m128, xmm, xmm
 69389      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 69390          self.require(ISA_AVX)
 69391          p.domain = DomainAVX
 69392          p.add(0, func(m *_Encoding, v []interface{}) {
 69393              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 69394              m.emit(0x0d)
 69395              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 69396          })
 69397      }
 69398      // VPERMILPD imm8, m128, xmm
 69399      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 69400          self.require(ISA_AVX)
 69401          p.domain = DomainAVX
 69402          p.add(0, func(m *_Encoding, v []interface{}) {
 69403              m.vex3(0xc4, 0b11, 0x01, hcode(v[2]), addr(v[1]), 0)
 69404              m.emit(0x05)
 69405              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 69406              m.imm1(toImmAny(v[0]))
 69407          })
 69408      }
 69409      // VPERMILPD imm8, ymm, ymm
 69410      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 69411          self.require(ISA_AVX)
 69412          p.domain = DomainAVX
 69413          p.add(0, func(m *_Encoding, v []interface{}) {
 69414              m.emit(0xc4)
 69415              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 69416              m.emit(0x7d)
 69417              m.emit(0x05)
 69418              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69419              m.imm1(toImmAny(v[0]))
 69420          })
 69421      }
 69422      // VPERMILPD ymm, ymm, ymm
 69423      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 69424          self.require(ISA_AVX)
 69425          p.domain = DomainAVX
 69426          p.add(0, func(m *_Encoding, v []interface{}) {
 69427              m.emit(0xc4)
 69428              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 69429              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 69430              m.emit(0x0d)
 69431              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69432          })
 69433      }
 69434      // VPERMILPD m256, ymm, ymm
 69435      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 69436          self.require(ISA_AVX)
 69437          p.domain = DomainAVX
 69438          p.add(0, func(m *_Encoding, v []interface{}) {
 69439              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 69440              m.emit(0x0d)
 69441              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 69442          })
 69443      }
 69444      // VPERMILPD imm8, m256, ymm
 69445      if isImm8(v0) && isM256(v1) && isYMM(v2) {
 69446          self.require(ISA_AVX)
 69447          p.domain = DomainAVX
 69448          p.add(0, func(m *_Encoding, v []interface{}) {
 69449              m.vex3(0xc4, 0b11, 0x05, hcode(v[2]), addr(v[1]), 0)
 69450              m.emit(0x05)
 69451              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 69452              m.imm1(toImmAny(v[0]))
 69453          })
 69454      }
 69455      // VPERMILPD imm8, m512/m64bcst, zmm{k}{z}
 69456      if isImm8(v0) && isM512M64bcst(v1) && isZMMkz(v2) {
 69457          self.require(ISA_AVX512F)
 69458          p.domain = DomainAVX
 69459          p.add(0, func(m *_Encoding, v []interface{}) {
 69460              m.evex(0b11, 0x85, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 69461              m.emit(0x05)
 69462              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 69463              m.imm1(toImmAny(v[0]))
 69464          })
 69465      }
 69466      // VPERMILPD m512/m64bcst, zmm, zmm{k}{z}
 69467      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 69468          self.require(ISA_AVX512F)
 69469          p.domain = DomainAVX
 69470          p.add(0, func(m *_Encoding, v []interface{}) {
 69471              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 69472              m.emit(0x0d)
 69473              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 69474          })
 69475      }
 69476      // VPERMILPD imm8, zmm, zmm{k}{z}
 69477      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 69478          self.require(ISA_AVX512F)
 69479          p.domain = DomainAVX
 69480          p.add(0, func(m *_Encoding, v []interface{}) {
 69481              m.emit(0x62)
 69482              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 69483              m.emit(0xfd)
 69484              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 69485              m.emit(0x05)
 69486              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69487              m.imm1(toImmAny(v[0]))
 69488          })
 69489      }
 69490      // VPERMILPD zmm, zmm, zmm{k}{z}
 69491      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 69492          self.require(ISA_AVX512F)
 69493          p.domain = DomainAVX
 69494          p.add(0, func(m *_Encoding, v []interface{}) {
 69495              m.emit(0x62)
 69496              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69497              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 69498              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 69499              m.emit(0x0d)
 69500              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69501          })
 69502      }
 69503      // VPERMILPD imm8, m128/m64bcst, xmm{k}{z}
 69504      if isImm8(v0) && isM128M64bcst(v1) && isXMMkz(v2) {
 69505          self.require(ISA_AVX512VL | ISA_AVX512F)
 69506          p.domain = DomainAVX
 69507          p.add(0, func(m *_Encoding, v []interface{}) {
 69508              m.evex(0b11, 0x85, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 69509              m.emit(0x05)
 69510              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 69511              m.imm1(toImmAny(v[0]))
 69512          })
 69513      }
 69514      // VPERMILPD imm8, m256/m64bcst, ymm{k}{z}
 69515      if isImm8(v0) && isM256M64bcst(v1) && isYMMkz(v2) {
 69516          self.require(ISA_AVX512VL | ISA_AVX512F)
 69517          p.domain = DomainAVX
 69518          p.add(0, func(m *_Encoding, v []interface{}) {
 69519              m.evex(0b11, 0x85, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 69520              m.emit(0x05)
 69521              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 69522              m.imm1(toImmAny(v[0]))
 69523          })
 69524      }
 69525      // VPERMILPD m128/m64bcst, xmm, xmm{k}{z}
 69526      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 69527          self.require(ISA_AVX512VL | ISA_AVX512F)
 69528          p.domain = DomainAVX
 69529          p.add(0, func(m *_Encoding, v []interface{}) {
 69530              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 69531              m.emit(0x0d)
 69532              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 69533          })
 69534      }
 69535      // VPERMILPD imm8, xmm, xmm{k}{z}
 69536      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 69537          self.require(ISA_AVX512VL | ISA_AVX512F)
 69538          p.domain = DomainAVX
 69539          p.add(0, func(m *_Encoding, v []interface{}) {
 69540              m.emit(0x62)
 69541              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 69542              m.emit(0xfd)
 69543              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 69544              m.emit(0x05)
 69545              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69546              m.imm1(toImmAny(v[0]))
 69547          })
 69548      }
 69549      // VPERMILPD xmm, xmm, xmm{k}{z}
 69550      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 69551          self.require(ISA_AVX512VL | ISA_AVX512F)
 69552          p.domain = DomainAVX
 69553          p.add(0, func(m *_Encoding, v []interface{}) {
 69554              m.emit(0x62)
 69555              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69556              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 69557              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 69558              m.emit(0x0d)
 69559              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69560          })
 69561      }
 69562      // VPERMILPD m256/m64bcst, ymm, ymm{k}{z}
 69563      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69564          self.require(ISA_AVX512VL | ISA_AVX512F)
 69565          p.domain = DomainAVX
 69566          p.add(0, func(m *_Encoding, v []interface{}) {
 69567              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 69568              m.emit(0x0d)
 69569              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 69570          })
 69571      }
 69572      // VPERMILPD imm8, ymm, ymm{k}{z}
 69573      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69574          self.require(ISA_AVX512VL | ISA_AVX512F)
 69575          p.domain = DomainAVX
 69576          p.add(0, func(m *_Encoding, v []interface{}) {
 69577              m.emit(0x62)
 69578              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 69579              m.emit(0xfd)
 69580              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 69581              m.emit(0x05)
 69582              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69583              m.imm1(toImmAny(v[0]))
 69584          })
 69585      }
 69586      // VPERMILPD ymm, ymm, ymm{k}{z}
 69587      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69588          self.require(ISA_AVX512VL | ISA_AVX512F)
 69589          p.domain = DomainAVX
 69590          p.add(0, func(m *_Encoding, v []interface{}) {
 69591              m.emit(0x62)
 69592              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69593              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 69594              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 69595              m.emit(0x0d)
 69596              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69597          })
 69598      }
 69599      if p.len == 0 {
 69600          panic("invalid operands for VPERMILPD")
 69601      }
 69602      return p
 69603  }
 69604  
 69605  // VPERMILPS performs "Permute Single-Precision Floating-Point Values".
 69606  //
 69607  // Mnemonic        : VPERMILPS
 69608  // Supported forms : (20 forms)
 69609  //
 69610  //    * VPERMILPS imm8, xmm, xmm                   [AVX]
 69611  //    * VPERMILPS xmm, xmm, xmm                    [AVX]
 69612  //    * VPERMILPS m128, xmm, xmm                   [AVX]
 69613  //    * VPERMILPS imm8, m128, xmm                  [AVX]
 69614  //    * VPERMILPS imm8, ymm, ymm                   [AVX]
 69615  //    * VPERMILPS ymm, ymm, ymm                    [AVX]
 69616  //    * VPERMILPS m256, ymm, ymm                   [AVX]
 69617  //    * VPERMILPS imm8, m256, ymm                  [AVX]
 69618  //    * VPERMILPS imm8, m512/m32bcst, zmm{k}{z}    [AVX512F]
 69619  //    * VPERMILPS m512/m32bcst, zmm, zmm{k}{z}     [AVX512F]
 69620  //    * VPERMILPS imm8, zmm, zmm{k}{z}             [AVX512F]
 69621  //    * VPERMILPS zmm, zmm, zmm{k}{z}              [AVX512F]
 69622  //    * VPERMILPS imm8, m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 69623  //    * VPERMILPS imm8, m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 69624  //    * VPERMILPS m128/m32bcst, xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 69625  //    * VPERMILPS imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 69626  //    * VPERMILPS xmm, xmm, xmm{k}{z}              [AVX512F,AVX512VL]
 69627  //    * VPERMILPS m256/m32bcst, ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 69628  //    * VPERMILPS imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 69629  //    * VPERMILPS ymm, ymm, ymm{k}{z}              [AVX512F,AVX512VL]
 69630  //
 69631  func (self *Program) VPERMILPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 69632      p := self.alloc("VPERMILPS", 3, Operands { v0, v1, v2 })
 69633      // VPERMILPS imm8, xmm, xmm
 69634      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 69635          self.require(ISA_AVX)
 69636          p.domain = DomainAVX
 69637          p.add(0, func(m *_Encoding, v []interface{}) {
 69638              m.emit(0xc4)
 69639              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 69640              m.emit(0x79)
 69641              m.emit(0x04)
 69642              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69643              m.imm1(toImmAny(v[0]))
 69644          })
 69645      }
 69646      // VPERMILPS xmm, xmm, xmm
 69647      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 69648          self.require(ISA_AVX)
 69649          p.domain = DomainAVX
 69650          p.add(0, func(m *_Encoding, v []interface{}) {
 69651              m.emit(0xc4)
 69652              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 69653              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 69654              m.emit(0x0c)
 69655              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69656          })
 69657      }
 69658      // VPERMILPS m128, xmm, xmm
 69659      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 69660          self.require(ISA_AVX)
 69661          p.domain = DomainAVX
 69662          p.add(0, func(m *_Encoding, v []interface{}) {
 69663              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 69664              m.emit(0x0c)
 69665              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 69666          })
 69667      }
 69668      // VPERMILPS imm8, m128, xmm
 69669      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 69670          self.require(ISA_AVX)
 69671          p.domain = DomainAVX
 69672          p.add(0, func(m *_Encoding, v []interface{}) {
 69673              m.vex3(0xc4, 0b11, 0x01, hcode(v[2]), addr(v[1]), 0)
 69674              m.emit(0x04)
 69675              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 69676              m.imm1(toImmAny(v[0]))
 69677          })
 69678      }
 69679      // VPERMILPS imm8, ymm, ymm
 69680      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 69681          self.require(ISA_AVX)
 69682          p.domain = DomainAVX
 69683          p.add(0, func(m *_Encoding, v []interface{}) {
 69684              m.emit(0xc4)
 69685              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 69686              m.emit(0x7d)
 69687              m.emit(0x04)
 69688              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69689              m.imm1(toImmAny(v[0]))
 69690          })
 69691      }
 69692      // VPERMILPS ymm, ymm, ymm
 69693      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 69694          self.require(ISA_AVX)
 69695          p.domain = DomainAVX
 69696          p.add(0, func(m *_Encoding, v []interface{}) {
 69697              m.emit(0xc4)
 69698              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 69699              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 69700              m.emit(0x0c)
 69701              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69702          })
 69703      }
 69704      // VPERMILPS m256, ymm, ymm
 69705      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 69706          self.require(ISA_AVX)
 69707          p.domain = DomainAVX
 69708          p.add(0, func(m *_Encoding, v []interface{}) {
 69709              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 69710              m.emit(0x0c)
 69711              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 69712          })
 69713      }
 69714      // VPERMILPS imm8, m256, ymm
 69715      if isImm8(v0) && isM256(v1) && isYMM(v2) {
 69716          self.require(ISA_AVX)
 69717          p.domain = DomainAVX
 69718          p.add(0, func(m *_Encoding, v []interface{}) {
 69719              m.vex3(0xc4, 0b11, 0x05, hcode(v[2]), addr(v[1]), 0)
 69720              m.emit(0x04)
 69721              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 69722              m.imm1(toImmAny(v[0]))
 69723          })
 69724      }
 69725      // VPERMILPS imm8, m512/m32bcst, zmm{k}{z}
 69726      if isImm8(v0) && isM512M32bcst(v1) && isZMMkz(v2) {
 69727          self.require(ISA_AVX512F)
 69728          p.domain = DomainAVX
 69729          p.add(0, func(m *_Encoding, v []interface{}) {
 69730              m.evex(0b11, 0x05, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 69731              m.emit(0x04)
 69732              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 69733              m.imm1(toImmAny(v[0]))
 69734          })
 69735      }
 69736      // VPERMILPS m512/m32bcst, zmm, zmm{k}{z}
 69737      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 69738          self.require(ISA_AVX512F)
 69739          p.domain = DomainAVX
 69740          p.add(0, func(m *_Encoding, v []interface{}) {
 69741              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 69742              m.emit(0x0c)
 69743              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 69744          })
 69745      }
 69746      // VPERMILPS imm8, zmm, zmm{k}{z}
 69747      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 69748          self.require(ISA_AVX512F)
 69749          p.domain = DomainAVX
 69750          p.add(0, func(m *_Encoding, v []interface{}) {
 69751              m.emit(0x62)
 69752              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 69753              m.emit(0x7d)
 69754              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 69755              m.emit(0x04)
 69756              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69757              m.imm1(toImmAny(v[0]))
 69758          })
 69759      }
 69760      // VPERMILPS zmm, zmm, zmm{k}{z}
 69761      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 69762          self.require(ISA_AVX512F)
 69763          p.domain = DomainAVX
 69764          p.add(0, func(m *_Encoding, v []interface{}) {
 69765              m.emit(0x62)
 69766              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69767              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 69768              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 69769              m.emit(0x0c)
 69770              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69771          })
 69772      }
 69773      // VPERMILPS imm8, m128/m32bcst, xmm{k}{z}
 69774      if isImm8(v0) && isM128M32bcst(v1) && isXMMkz(v2) {
 69775          self.require(ISA_AVX512VL | ISA_AVX512F)
 69776          p.domain = DomainAVX
 69777          p.add(0, func(m *_Encoding, v []interface{}) {
 69778              m.evex(0b11, 0x05, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 69779              m.emit(0x04)
 69780              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 69781              m.imm1(toImmAny(v[0]))
 69782          })
 69783      }
 69784      // VPERMILPS imm8, m256/m32bcst, ymm{k}{z}
 69785      if isImm8(v0) && isM256M32bcst(v1) && isYMMkz(v2) {
 69786          self.require(ISA_AVX512VL | ISA_AVX512F)
 69787          p.domain = DomainAVX
 69788          p.add(0, func(m *_Encoding, v []interface{}) {
 69789              m.evex(0b11, 0x05, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 69790              m.emit(0x04)
 69791              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 69792              m.imm1(toImmAny(v[0]))
 69793          })
 69794      }
 69795      // VPERMILPS m128/m32bcst, xmm, xmm{k}{z}
 69796      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 69797          self.require(ISA_AVX512VL | ISA_AVX512F)
 69798          p.domain = DomainAVX
 69799          p.add(0, func(m *_Encoding, v []interface{}) {
 69800              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 69801              m.emit(0x0c)
 69802              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 69803          })
 69804      }
 69805      // VPERMILPS imm8, xmm, xmm{k}{z}
 69806      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 69807          self.require(ISA_AVX512VL | ISA_AVX512F)
 69808          p.domain = DomainAVX
 69809          p.add(0, func(m *_Encoding, v []interface{}) {
 69810              m.emit(0x62)
 69811              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 69812              m.emit(0x7d)
 69813              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 69814              m.emit(0x04)
 69815              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69816              m.imm1(toImmAny(v[0]))
 69817          })
 69818      }
 69819      // VPERMILPS xmm, xmm, xmm{k}{z}
 69820      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 69821          self.require(ISA_AVX512VL | ISA_AVX512F)
 69822          p.domain = DomainAVX
 69823          p.add(0, func(m *_Encoding, v []interface{}) {
 69824              m.emit(0x62)
 69825              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69826              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 69827              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 69828              m.emit(0x0c)
 69829              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69830          })
 69831      }
 69832      // VPERMILPS m256/m32bcst, ymm, ymm{k}{z}
 69833      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69834          self.require(ISA_AVX512VL | ISA_AVX512F)
 69835          p.domain = DomainAVX
 69836          p.add(0, func(m *_Encoding, v []interface{}) {
 69837              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 69838              m.emit(0x0c)
 69839              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 69840          })
 69841      }
 69842      // VPERMILPS imm8, ymm, ymm{k}{z}
 69843      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69844          self.require(ISA_AVX512VL | ISA_AVX512F)
 69845          p.domain = DomainAVX
 69846          p.add(0, func(m *_Encoding, v []interface{}) {
 69847              m.emit(0x62)
 69848              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 69849              m.emit(0x7d)
 69850              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 69851              m.emit(0x04)
 69852              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69853              m.imm1(toImmAny(v[0]))
 69854          })
 69855      }
 69856      // VPERMILPS ymm, ymm, ymm{k}{z}
 69857      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69858          self.require(ISA_AVX512VL | ISA_AVX512F)
 69859          p.domain = DomainAVX
 69860          p.add(0, func(m *_Encoding, v []interface{}) {
 69861              m.emit(0x62)
 69862              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69863              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 69864              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 69865              m.emit(0x0c)
 69866              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69867          })
 69868      }
 69869      if p.len == 0 {
 69870          panic("invalid operands for VPERMILPS")
 69871      }
 69872      return p
 69873  }
 69874  
 69875  // VPERMPD performs "Permute Double-Precision Floating-Point Elements".
 69876  //
 69877  // Mnemonic        : VPERMPD
 69878  // Supported forms : (10 forms)
 69879  //
 69880  //    * VPERMPD imm8, ymm, ymm                   [AVX2]
 69881  //    * VPERMPD imm8, m256, ymm                  [AVX2]
 69882  //    * VPERMPD imm8, m512/m64bcst, zmm{k}{z}    [AVX512F]
 69883  //    * VPERMPD m512/m64bcst, zmm, zmm{k}{z}     [AVX512F]
 69884  //    * VPERMPD imm8, zmm, zmm{k}{z}             [AVX512F]
 69885  //    * VPERMPD zmm, zmm, zmm{k}{z}              [AVX512F]
 69886  //    * VPERMPD imm8, m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 69887  //    * VPERMPD m256/m64bcst, ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 69888  //    * VPERMPD imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 69889  //    * VPERMPD ymm, ymm, ymm{k}{z}              [AVX512F,AVX512VL]
 69890  //
 69891  func (self *Program) VPERMPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 69892      p := self.alloc("VPERMPD", 3, Operands { v0, v1, v2 })
 69893      // VPERMPD imm8, ymm, ymm
 69894      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 69895          self.require(ISA_AVX2)
 69896          p.domain = DomainAVX
 69897          p.add(0, func(m *_Encoding, v []interface{}) {
 69898              m.emit(0xc4)
 69899              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 69900              m.emit(0xfd)
 69901              m.emit(0x01)
 69902              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69903              m.imm1(toImmAny(v[0]))
 69904          })
 69905      }
 69906      // VPERMPD imm8, m256, ymm
 69907      if isImm8(v0) && isM256(v1) && isYMM(v2) {
 69908          self.require(ISA_AVX2)
 69909          p.domain = DomainAVX
 69910          p.add(0, func(m *_Encoding, v []interface{}) {
 69911              m.vex3(0xc4, 0b11, 0x85, hcode(v[2]), addr(v[1]), 0)
 69912              m.emit(0x01)
 69913              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 69914              m.imm1(toImmAny(v[0]))
 69915          })
 69916      }
 69917      // VPERMPD imm8, m512/m64bcst, zmm{k}{z}
 69918      if isImm8(v0) && isM512M64bcst(v1) && isZMMkz(v2) {
 69919          self.require(ISA_AVX512F)
 69920          p.domain = DomainAVX
 69921          p.add(0, func(m *_Encoding, v []interface{}) {
 69922              m.evex(0b11, 0x85, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 69923              m.emit(0x01)
 69924              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 69925              m.imm1(toImmAny(v[0]))
 69926          })
 69927      }
 69928      // VPERMPD m512/m64bcst, zmm, zmm{k}{z}
 69929      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 69930          self.require(ISA_AVX512F)
 69931          p.domain = DomainAVX
 69932          p.add(0, func(m *_Encoding, v []interface{}) {
 69933              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 69934              m.emit(0x16)
 69935              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 69936          })
 69937      }
 69938      // VPERMPD imm8, zmm, zmm{k}{z}
 69939      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 69940          self.require(ISA_AVX512F)
 69941          p.domain = DomainAVX
 69942          p.add(0, func(m *_Encoding, v []interface{}) {
 69943              m.emit(0x62)
 69944              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 69945              m.emit(0xfd)
 69946              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 69947              m.emit(0x01)
 69948              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69949              m.imm1(toImmAny(v[0]))
 69950          })
 69951      }
 69952      // VPERMPD zmm, zmm, zmm{k}{z}
 69953      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 69954          self.require(ISA_AVX512F)
 69955          p.domain = DomainAVX
 69956          p.add(0, func(m *_Encoding, v []interface{}) {
 69957              m.emit(0x62)
 69958              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69959              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 69960              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 69961              m.emit(0x16)
 69962              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69963          })
 69964      }
 69965      // VPERMPD imm8, m256/m64bcst, ymm{k}{z}
 69966      if isImm8(v0) && isM256M64bcst(v1) && isYMMkz(v2) {
 69967          self.require(ISA_AVX512VL | ISA_AVX512F)
 69968          p.domain = DomainAVX
 69969          p.add(0, func(m *_Encoding, v []interface{}) {
 69970              m.evex(0b11, 0x85, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 69971              m.emit(0x01)
 69972              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 69973              m.imm1(toImmAny(v[0]))
 69974          })
 69975      }
 69976      // VPERMPD m256/m64bcst, ymm, ymm{k}{z}
 69977      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69978          self.require(ISA_AVX512VL | ISA_AVX512F)
 69979          p.domain = DomainAVX
 69980          p.add(0, func(m *_Encoding, v []interface{}) {
 69981              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 69982              m.emit(0x16)
 69983              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 69984          })
 69985      }
 69986      // VPERMPD imm8, ymm, ymm{k}{z}
 69987      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69988          self.require(ISA_AVX512VL | ISA_AVX512F)
 69989          p.domain = DomainAVX
 69990          p.add(0, func(m *_Encoding, v []interface{}) {
 69991              m.emit(0x62)
 69992              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 69993              m.emit(0xfd)
 69994              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 69995              m.emit(0x01)
 69996              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69997              m.imm1(toImmAny(v[0]))
 69998          })
 69999      }
 70000      // VPERMPD ymm, ymm, ymm{k}{z}
 70001      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70002          self.require(ISA_AVX512VL | ISA_AVX512F)
 70003          p.domain = DomainAVX
 70004          p.add(0, func(m *_Encoding, v []interface{}) {
 70005              m.emit(0x62)
 70006              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70007              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70008              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 70009              m.emit(0x16)
 70010              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70011          })
 70012      }
 70013      if p.len == 0 {
 70014          panic("invalid operands for VPERMPD")
 70015      }
 70016      return p
 70017  }
 70018  
 70019  // VPERMPS performs "Permute Single-Precision Floating-Point Elements".
 70020  //
 70021  // Mnemonic        : VPERMPS
 70022  // Supported forms : (6 forms)
 70023  //
 70024  //    * VPERMPS ymm, ymm, ymm                   [AVX2]
 70025  //    * VPERMPS m256, ymm, ymm                  [AVX2]
 70026  //    * VPERMPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 70027  //    * VPERMPS zmm, zmm, zmm{k}{z}             [AVX512F]
 70028  //    * VPERMPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 70029  //    * VPERMPS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 70030  //
 70031  func (self *Program) VPERMPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 70032      p := self.alloc("VPERMPS", 3, Operands { v0, v1, v2 })
 70033      // VPERMPS ymm, ymm, ymm
 70034      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 70035          self.require(ISA_AVX2)
 70036          p.domain = DomainAVX
 70037          p.add(0, func(m *_Encoding, v []interface{}) {
 70038              m.emit(0xc4)
 70039              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 70040              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70041              m.emit(0x16)
 70042              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70043          })
 70044      }
 70045      // VPERMPS m256, ymm, ymm
 70046      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 70047          self.require(ISA_AVX2)
 70048          p.domain = DomainAVX
 70049          p.add(0, func(m *_Encoding, v []interface{}) {
 70050              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 70051              m.emit(0x16)
 70052              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 70053          })
 70054      }
 70055      // VPERMPS m512/m32bcst, zmm, zmm{k}{z}
 70056      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 70057          self.require(ISA_AVX512F)
 70058          p.domain = DomainAVX
 70059          p.add(0, func(m *_Encoding, v []interface{}) {
 70060              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70061              m.emit(0x16)
 70062              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 70063          })
 70064      }
 70065      // VPERMPS zmm, zmm, zmm{k}{z}
 70066      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 70067          self.require(ISA_AVX512F)
 70068          p.domain = DomainAVX
 70069          p.add(0, func(m *_Encoding, v []interface{}) {
 70070              m.emit(0x62)
 70071              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70072              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70073              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 70074              m.emit(0x16)
 70075              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70076          })
 70077      }
 70078      // VPERMPS m256/m32bcst, ymm, ymm{k}{z}
 70079      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70080          self.require(ISA_AVX512VL | ISA_AVX512F)
 70081          p.domain = DomainAVX
 70082          p.add(0, func(m *_Encoding, v []interface{}) {
 70083              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70084              m.emit(0x16)
 70085              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 70086          })
 70087      }
 70088      // VPERMPS ymm, ymm, ymm{k}{z}
 70089      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70090          self.require(ISA_AVX512VL | ISA_AVX512F)
 70091          p.domain = DomainAVX
 70092          p.add(0, func(m *_Encoding, v []interface{}) {
 70093              m.emit(0x62)
 70094              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70095              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70096              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 70097              m.emit(0x16)
 70098              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70099          })
 70100      }
 70101      if p.len == 0 {
 70102          panic("invalid operands for VPERMPS")
 70103      }
 70104      return p
 70105  }
 70106  
 70107  // VPERMQ performs "Permute Quadword Integers".
 70108  //
 70109  // Mnemonic        : VPERMQ
 70110  // Supported forms : (10 forms)
 70111  //
 70112  //    * VPERMQ imm8, ymm, ymm                   [AVX2]
 70113  //    * VPERMQ imm8, m256, ymm                  [AVX2]
 70114  //    * VPERMQ imm8, m512/m64bcst, zmm{k}{z}    [AVX512F]
 70115  //    * VPERMQ m512/m64bcst, zmm, zmm{k}{z}     [AVX512F]
 70116  //    * VPERMQ imm8, zmm, zmm{k}{z}             [AVX512F]
 70117  //    * VPERMQ zmm, zmm, zmm{k}{z}              [AVX512F]
 70118  //    * VPERMQ imm8, m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 70119  //    * VPERMQ m256/m64bcst, ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 70120  //    * VPERMQ imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 70121  //    * VPERMQ ymm, ymm, ymm{k}{z}              [AVX512F,AVX512VL]
 70122  //
 70123  func (self *Program) VPERMQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 70124      p := self.alloc("VPERMQ", 3, Operands { v0, v1, v2 })
 70125      // VPERMQ imm8, ymm, ymm
 70126      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 70127          self.require(ISA_AVX2)
 70128          p.domain = DomainAVX
 70129          p.add(0, func(m *_Encoding, v []interface{}) {
 70130              m.emit(0xc4)
 70131              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 70132              m.emit(0xfd)
 70133              m.emit(0x00)
 70134              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 70135              m.imm1(toImmAny(v[0]))
 70136          })
 70137      }
 70138      // VPERMQ imm8, m256, ymm
 70139      if isImm8(v0) && isM256(v1) && isYMM(v2) {
 70140          self.require(ISA_AVX2)
 70141          p.domain = DomainAVX
 70142          p.add(0, func(m *_Encoding, v []interface{}) {
 70143              m.vex3(0xc4, 0b11, 0x85, hcode(v[2]), addr(v[1]), 0)
 70144              m.emit(0x00)
 70145              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 70146              m.imm1(toImmAny(v[0]))
 70147          })
 70148      }
 70149      // VPERMQ imm8, m512/m64bcst, zmm{k}{z}
 70150      if isImm8(v0) && isM512M64bcst(v1) && isZMMkz(v2) {
 70151          self.require(ISA_AVX512F)
 70152          p.domain = DomainAVX
 70153          p.add(0, func(m *_Encoding, v []interface{}) {
 70154              m.evex(0b11, 0x85, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 70155              m.emit(0x00)
 70156              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 70157              m.imm1(toImmAny(v[0]))
 70158          })
 70159      }
 70160      // VPERMQ m512/m64bcst, zmm, zmm{k}{z}
 70161      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 70162          self.require(ISA_AVX512F)
 70163          p.domain = DomainAVX
 70164          p.add(0, func(m *_Encoding, v []interface{}) {
 70165              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70166              m.emit(0x36)
 70167              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 70168          })
 70169      }
 70170      // VPERMQ imm8, zmm, zmm{k}{z}
 70171      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 70172          self.require(ISA_AVX512F)
 70173          p.domain = DomainAVX
 70174          p.add(0, func(m *_Encoding, v []interface{}) {
 70175              m.emit(0x62)
 70176              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 70177              m.emit(0xfd)
 70178              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 70179              m.emit(0x00)
 70180              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 70181              m.imm1(toImmAny(v[0]))
 70182          })
 70183      }
 70184      // VPERMQ zmm, zmm, zmm{k}{z}
 70185      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 70186          self.require(ISA_AVX512F)
 70187          p.domain = DomainAVX
 70188          p.add(0, func(m *_Encoding, v []interface{}) {
 70189              m.emit(0x62)
 70190              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70191              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70192              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 70193              m.emit(0x36)
 70194              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70195          })
 70196      }
 70197      // VPERMQ imm8, m256/m64bcst, ymm{k}{z}
 70198      if isImm8(v0) && isM256M64bcst(v1) && isYMMkz(v2) {
 70199          self.require(ISA_AVX512VL | ISA_AVX512F)
 70200          p.domain = DomainAVX
 70201          p.add(0, func(m *_Encoding, v []interface{}) {
 70202              m.evex(0b11, 0x85, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 70203              m.emit(0x00)
 70204              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 70205              m.imm1(toImmAny(v[0]))
 70206          })
 70207      }
 70208      // VPERMQ m256/m64bcst, ymm, ymm{k}{z}
 70209      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70210          self.require(ISA_AVX512VL | ISA_AVX512F)
 70211          p.domain = DomainAVX
 70212          p.add(0, func(m *_Encoding, v []interface{}) {
 70213              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70214              m.emit(0x36)
 70215              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 70216          })
 70217      }
 70218      // VPERMQ imm8, ymm, ymm{k}{z}
 70219      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70220          self.require(ISA_AVX512VL | ISA_AVX512F)
 70221          p.domain = DomainAVX
 70222          p.add(0, func(m *_Encoding, v []interface{}) {
 70223              m.emit(0x62)
 70224              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 70225              m.emit(0xfd)
 70226              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 70227              m.emit(0x00)
 70228              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 70229              m.imm1(toImmAny(v[0]))
 70230          })
 70231      }
 70232      // VPERMQ ymm, ymm, ymm{k}{z}
 70233      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70234          self.require(ISA_AVX512VL | ISA_AVX512F)
 70235          p.domain = DomainAVX
 70236          p.add(0, func(m *_Encoding, v []interface{}) {
 70237              m.emit(0x62)
 70238              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70239              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70240              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 70241              m.emit(0x36)
 70242              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70243          })
 70244      }
 70245      if p.len == 0 {
 70246          panic("invalid operands for VPERMQ")
 70247      }
 70248      return p
 70249  }
 70250  
 70251  // VPERMT2B performs "Full Permute of Bytes From Two Tables Overwriting a Table".
 70252  //
 70253  // Mnemonic        : VPERMT2B
 70254  // Supported forms : (6 forms)
 70255  //
 70256  //    * VPERMT2B xmm, xmm, xmm{k}{z}     [AVX512VBMI,AVX512VL]
 70257  //    * VPERMT2B m128, xmm, xmm{k}{z}    [AVX512VBMI,AVX512VL]
 70258  //    * VPERMT2B ymm, ymm, ymm{k}{z}     [AVX512VBMI,AVX512VL]
 70259  //    * VPERMT2B m256, ymm, ymm{k}{z}    [AVX512VBMI,AVX512VL]
 70260  //    * VPERMT2B zmm, zmm, zmm{k}{z}     [AVX512VBMI]
 70261  //    * VPERMT2B m512, zmm, zmm{k}{z}    [AVX512VBMI]
 70262  //
 70263  func (self *Program) VPERMT2B(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 70264      p := self.alloc("VPERMT2B", 3, Operands { v0, v1, v2 })
 70265      // VPERMT2B xmm, xmm, xmm{k}{z}
 70266      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70267          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 70268          p.domain = DomainAVX
 70269          p.add(0, func(m *_Encoding, v []interface{}) {
 70270              m.emit(0x62)
 70271              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70272              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70273              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 70274              m.emit(0x7d)
 70275              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70276          })
 70277      }
 70278      // VPERMT2B m128, xmm, xmm{k}{z}
 70279      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70280          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 70281          p.domain = DomainAVX
 70282          p.add(0, func(m *_Encoding, v []interface{}) {
 70283              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 70284              m.emit(0x7d)
 70285              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 70286          })
 70287      }
 70288      // VPERMT2B ymm, ymm, ymm{k}{z}
 70289      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70290          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 70291          p.domain = DomainAVX
 70292          p.add(0, func(m *_Encoding, v []interface{}) {
 70293              m.emit(0x62)
 70294              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70295              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70296              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 70297              m.emit(0x7d)
 70298              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70299          })
 70300      }
 70301      // VPERMT2B m256, ymm, ymm{k}{z}
 70302      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70303          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 70304          p.domain = DomainAVX
 70305          p.add(0, func(m *_Encoding, v []interface{}) {
 70306              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 70307              m.emit(0x7d)
 70308              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 70309          })
 70310      }
 70311      // VPERMT2B zmm, zmm, zmm{k}{z}
 70312      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 70313          self.require(ISA_AVX512VBMI)
 70314          p.domain = DomainAVX
 70315          p.add(0, func(m *_Encoding, v []interface{}) {
 70316              m.emit(0x62)
 70317              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70318              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70319              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 70320              m.emit(0x7d)
 70321              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70322          })
 70323      }
 70324      // VPERMT2B m512, zmm, zmm{k}{z}
 70325      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 70326          self.require(ISA_AVX512VBMI)
 70327          p.domain = DomainAVX
 70328          p.add(0, func(m *_Encoding, v []interface{}) {
 70329              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 70330              m.emit(0x7d)
 70331              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 70332          })
 70333      }
 70334      if p.len == 0 {
 70335          panic("invalid operands for VPERMT2B")
 70336      }
 70337      return p
 70338  }
 70339  
 70340  // VPERMT2D performs "Full Permute of Doublewords From Two Tables Overwriting a Table".
 70341  //
 70342  // Mnemonic        : VPERMT2D
 70343  // Supported forms : (6 forms)
 70344  //
 70345  //    * VPERMT2D m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 70346  //    * VPERMT2D zmm, zmm, zmm{k}{z}             [AVX512F]
 70347  //    * VPERMT2D m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 70348  //    * VPERMT2D xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 70349  //    * VPERMT2D m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 70350  //    * VPERMT2D ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 70351  //
 70352  func (self *Program) VPERMT2D(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 70353      p := self.alloc("VPERMT2D", 3, Operands { v0, v1, v2 })
 70354      // VPERMT2D m512/m32bcst, zmm, zmm{k}{z}
 70355      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 70356          self.require(ISA_AVX512F)
 70357          p.domain = DomainAVX
 70358          p.add(0, func(m *_Encoding, v []interface{}) {
 70359              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70360              m.emit(0x7e)
 70361              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 70362          })
 70363      }
 70364      // VPERMT2D zmm, zmm, zmm{k}{z}
 70365      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 70366          self.require(ISA_AVX512F)
 70367          p.domain = DomainAVX
 70368          p.add(0, func(m *_Encoding, v []interface{}) {
 70369              m.emit(0x62)
 70370              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70371              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70372              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 70373              m.emit(0x7e)
 70374              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70375          })
 70376      }
 70377      // VPERMT2D m128/m32bcst, xmm, xmm{k}{z}
 70378      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70379          self.require(ISA_AVX512VL | ISA_AVX512F)
 70380          p.domain = DomainAVX
 70381          p.add(0, func(m *_Encoding, v []interface{}) {
 70382              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70383              m.emit(0x7e)
 70384              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 70385          })
 70386      }
 70387      // VPERMT2D xmm, xmm, xmm{k}{z}
 70388      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70389          self.require(ISA_AVX512VL | ISA_AVX512F)
 70390          p.domain = DomainAVX
 70391          p.add(0, func(m *_Encoding, v []interface{}) {
 70392              m.emit(0x62)
 70393              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70394              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70395              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 70396              m.emit(0x7e)
 70397              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70398          })
 70399      }
 70400      // VPERMT2D m256/m32bcst, ymm, ymm{k}{z}
 70401      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70402          self.require(ISA_AVX512VL | ISA_AVX512F)
 70403          p.domain = DomainAVX
 70404          p.add(0, func(m *_Encoding, v []interface{}) {
 70405              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70406              m.emit(0x7e)
 70407              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 70408          })
 70409      }
 70410      // VPERMT2D ymm, ymm, ymm{k}{z}
 70411      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70412          self.require(ISA_AVX512VL | ISA_AVX512F)
 70413          p.domain = DomainAVX
 70414          p.add(0, func(m *_Encoding, v []interface{}) {
 70415              m.emit(0x62)
 70416              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70417              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70418              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 70419              m.emit(0x7e)
 70420              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70421          })
 70422      }
 70423      if p.len == 0 {
 70424          panic("invalid operands for VPERMT2D")
 70425      }
 70426      return p
 70427  }
 70428  
 70429  // VPERMT2PD performs "Full Permute of Double-Precision Floating-Point Values From Two Tables Overwriting a Table".
 70430  //
 70431  // Mnemonic        : VPERMT2PD
 70432  // Supported forms : (6 forms)
 70433  //
 70434  //    * VPERMT2PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 70435  //    * VPERMT2PD zmm, zmm, zmm{k}{z}             [AVX512F]
 70436  //    * VPERMT2PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 70437  //    * VPERMT2PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 70438  //    * VPERMT2PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 70439  //    * VPERMT2PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 70440  //
 70441  func (self *Program) VPERMT2PD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 70442      p := self.alloc("VPERMT2PD", 3, Operands { v0, v1, v2 })
 70443      // VPERMT2PD m512/m64bcst, zmm, zmm{k}{z}
 70444      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 70445          self.require(ISA_AVX512F)
 70446          p.domain = DomainAVX
 70447          p.add(0, func(m *_Encoding, v []interface{}) {
 70448              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70449              m.emit(0x7f)
 70450              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 70451          })
 70452      }
 70453      // VPERMT2PD zmm, zmm, zmm{k}{z}
 70454      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 70455          self.require(ISA_AVX512F)
 70456          p.domain = DomainAVX
 70457          p.add(0, func(m *_Encoding, v []interface{}) {
 70458              m.emit(0x62)
 70459              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70460              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70461              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 70462              m.emit(0x7f)
 70463              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70464          })
 70465      }
 70466      // VPERMT2PD m128/m64bcst, xmm, xmm{k}{z}
 70467      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70468          self.require(ISA_AVX512VL | ISA_AVX512F)
 70469          p.domain = DomainAVX
 70470          p.add(0, func(m *_Encoding, v []interface{}) {
 70471              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70472              m.emit(0x7f)
 70473              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 70474          })
 70475      }
 70476      // VPERMT2PD xmm, xmm, xmm{k}{z}
 70477      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70478          self.require(ISA_AVX512VL | ISA_AVX512F)
 70479          p.domain = DomainAVX
 70480          p.add(0, func(m *_Encoding, v []interface{}) {
 70481              m.emit(0x62)
 70482              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70483              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70484              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 70485              m.emit(0x7f)
 70486              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70487          })
 70488      }
 70489      // VPERMT2PD m256/m64bcst, ymm, ymm{k}{z}
 70490      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70491          self.require(ISA_AVX512VL | ISA_AVX512F)
 70492          p.domain = DomainAVX
 70493          p.add(0, func(m *_Encoding, v []interface{}) {
 70494              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70495              m.emit(0x7f)
 70496              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 70497          })
 70498      }
 70499      // VPERMT2PD ymm, ymm, ymm{k}{z}
 70500      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70501          self.require(ISA_AVX512VL | ISA_AVX512F)
 70502          p.domain = DomainAVX
 70503          p.add(0, func(m *_Encoding, v []interface{}) {
 70504              m.emit(0x62)
 70505              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70506              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70507              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 70508              m.emit(0x7f)
 70509              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70510          })
 70511      }
 70512      if p.len == 0 {
 70513          panic("invalid operands for VPERMT2PD")
 70514      }
 70515      return p
 70516  }
 70517  
 70518  // VPERMT2PS performs "Full Permute of Single-Precision Floating-Point Values From Two Tables Overwriting a Table".
 70519  //
 70520  // Mnemonic        : VPERMT2PS
 70521  // Supported forms : (6 forms)
 70522  //
 70523  //    * VPERMT2PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 70524  //    * VPERMT2PS zmm, zmm, zmm{k}{z}             [AVX512F]
 70525  //    * VPERMT2PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 70526  //    * VPERMT2PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 70527  //    * VPERMT2PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 70528  //    * VPERMT2PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 70529  //
 70530  func (self *Program) VPERMT2PS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 70531      p := self.alloc("VPERMT2PS", 3, Operands { v0, v1, v2 })
 70532      // VPERMT2PS m512/m32bcst, zmm, zmm{k}{z}
 70533      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 70534          self.require(ISA_AVX512F)
 70535          p.domain = DomainAVX
 70536          p.add(0, func(m *_Encoding, v []interface{}) {
 70537              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70538              m.emit(0x7f)
 70539              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 70540          })
 70541      }
 70542      // VPERMT2PS zmm, zmm, zmm{k}{z}
 70543      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 70544          self.require(ISA_AVX512F)
 70545          p.domain = DomainAVX
 70546          p.add(0, func(m *_Encoding, v []interface{}) {
 70547              m.emit(0x62)
 70548              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70549              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70550              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 70551              m.emit(0x7f)
 70552              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70553          })
 70554      }
 70555      // VPERMT2PS m128/m32bcst, xmm, xmm{k}{z}
 70556      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70557          self.require(ISA_AVX512VL | ISA_AVX512F)
 70558          p.domain = DomainAVX
 70559          p.add(0, func(m *_Encoding, v []interface{}) {
 70560              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70561              m.emit(0x7f)
 70562              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 70563          })
 70564      }
 70565      // VPERMT2PS xmm, xmm, xmm{k}{z}
 70566      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70567          self.require(ISA_AVX512VL | ISA_AVX512F)
 70568          p.domain = DomainAVX
 70569          p.add(0, func(m *_Encoding, v []interface{}) {
 70570              m.emit(0x62)
 70571              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70572              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70573              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 70574              m.emit(0x7f)
 70575              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70576          })
 70577      }
 70578      // VPERMT2PS m256/m32bcst, ymm, ymm{k}{z}
 70579      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70580          self.require(ISA_AVX512VL | ISA_AVX512F)
 70581          p.domain = DomainAVX
 70582          p.add(0, func(m *_Encoding, v []interface{}) {
 70583              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70584              m.emit(0x7f)
 70585              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 70586          })
 70587      }
 70588      // VPERMT2PS ymm, ymm, ymm{k}{z}
 70589      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70590          self.require(ISA_AVX512VL | ISA_AVX512F)
 70591          p.domain = DomainAVX
 70592          p.add(0, func(m *_Encoding, v []interface{}) {
 70593              m.emit(0x62)
 70594              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70595              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70596              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 70597              m.emit(0x7f)
 70598              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70599          })
 70600      }
 70601      if p.len == 0 {
 70602          panic("invalid operands for VPERMT2PS")
 70603      }
 70604      return p
 70605  }
 70606  
 70607  // VPERMT2Q performs "Full Permute of Quadwords From Two Tables Overwriting a Table".
 70608  //
 70609  // Mnemonic        : VPERMT2Q
 70610  // Supported forms : (6 forms)
 70611  //
 70612  //    * VPERMT2Q m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 70613  //    * VPERMT2Q zmm, zmm, zmm{k}{z}             [AVX512F]
 70614  //    * VPERMT2Q m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 70615  //    * VPERMT2Q xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 70616  //    * VPERMT2Q m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 70617  //    * VPERMT2Q ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 70618  //
 70619  func (self *Program) VPERMT2Q(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 70620      p := self.alloc("VPERMT2Q", 3, Operands { v0, v1, v2 })
 70621      // VPERMT2Q m512/m64bcst, zmm, zmm{k}{z}
 70622      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 70623          self.require(ISA_AVX512F)
 70624          p.domain = DomainAVX
 70625          p.add(0, func(m *_Encoding, v []interface{}) {
 70626              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70627              m.emit(0x7e)
 70628              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 70629          })
 70630      }
 70631      // VPERMT2Q zmm, zmm, zmm{k}{z}
 70632      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 70633          self.require(ISA_AVX512F)
 70634          p.domain = DomainAVX
 70635          p.add(0, func(m *_Encoding, v []interface{}) {
 70636              m.emit(0x62)
 70637              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70638              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70639              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 70640              m.emit(0x7e)
 70641              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70642          })
 70643      }
 70644      // VPERMT2Q m128/m64bcst, xmm, xmm{k}{z}
 70645      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70646          self.require(ISA_AVX512VL | ISA_AVX512F)
 70647          p.domain = DomainAVX
 70648          p.add(0, func(m *_Encoding, v []interface{}) {
 70649              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70650              m.emit(0x7e)
 70651              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 70652          })
 70653      }
 70654      // VPERMT2Q xmm, xmm, xmm{k}{z}
 70655      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70656          self.require(ISA_AVX512VL | ISA_AVX512F)
 70657          p.domain = DomainAVX
 70658          p.add(0, func(m *_Encoding, v []interface{}) {
 70659              m.emit(0x62)
 70660              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70661              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70662              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 70663              m.emit(0x7e)
 70664              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70665          })
 70666      }
 70667      // VPERMT2Q m256/m64bcst, ymm, ymm{k}{z}
 70668      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70669          self.require(ISA_AVX512VL | ISA_AVX512F)
 70670          p.domain = DomainAVX
 70671          p.add(0, func(m *_Encoding, v []interface{}) {
 70672              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70673              m.emit(0x7e)
 70674              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 70675          })
 70676      }
 70677      // VPERMT2Q ymm, ymm, ymm{k}{z}
 70678      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70679          self.require(ISA_AVX512VL | ISA_AVX512F)
 70680          p.domain = DomainAVX
 70681          p.add(0, func(m *_Encoding, v []interface{}) {
 70682              m.emit(0x62)
 70683              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70684              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70685              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 70686              m.emit(0x7e)
 70687              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70688          })
 70689      }
 70690      if p.len == 0 {
 70691          panic("invalid operands for VPERMT2Q")
 70692      }
 70693      return p
 70694  }
 70695  
 70696  // VPERMT2W performs "Full Permute of Words From Two Tables Overwriting a Table".
 70697  //
 70698  // Mnemonic        : VPERMT2W
 70699  // Supported forms : (6 forms)
 70700  //
 70701  //    * VPERMT2W zmm, zmm, zmm{k}{z}     [AVX512BW]
 70702  //    * VPERMT2W m512, zmm, zmm{k}{z}    [AVX512BW]
 70703  //    * VPERMT2W xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 70704  //    * VPERMT2W m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 70705  //    * VPERMT2W ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 70706  //    * VPERMT2W m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 70707  //
 70708  func (self *Program) VPERMT2W(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 70709      p := self.alloc("VPERMT2W", 3, Operands { v0, v1, v2 })
 70710      // VPERMT2W zmm, zmm, zmm{k}{z}
 70711      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 70712          self.require(ISA_AVX512BW)
 70713          p.domain = DomainAVX
 70714          p.add(0, func(m *_Encoding, v []interface{}) {
 70715              m.emit(0x62)
 70716              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70717              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70718              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 70719              m.emit(0x7d)
 70720              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70721          })
 70722      }
 70723      // VPERMT2W m512, zmm, zmm{k}{z}
 70724      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 70725          self.require(ISA_AVX512BW)
 70726          p.domain = DomainAVX
 70727          p.add(0, func(m *_Encoding, v []interface{}) {
 70728              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 70729              m.emit(0x7d)
 70730              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 70731          })
 70732      }
 70733      // VPERMT2W xmm, xmm, xmm{k}{z}
 70734      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70735          self.require(ISA_AVX512VL | ISA_AVX512BW)
 70736          p.domain = DomainAVX
 70737          p.add(0, func(m *_Encoding, v []interface{}) {
 70738              m.emit(0x62)
 70739              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70740              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70741              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 70742              m.emit(0x7d)
 70743              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70744          })
 70745      }
 70746      // VPERMT2W m128, xmm, xmm{k}{z}
 70747      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70748          self.require(ISA_AVX512VL | ISA_AVX512BW)
 70749          p.domain = DomainAVX
 70750          p.add(0, func(m *_Encoding, v []interface{}) {
 70751              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 70752              m.emit(0x7d)
 70753              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 70754          })
 70755      }
 70756      // VPERMT2W ymm, ymm, ymm{k}{z}
 70757      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70758          self.require(ISA_AVX512VL | ISA_AVX512BW)
 70759          p.domain = DomainAVX
 70760          p.add(0, func(m *_Encoding, v []interface{}) {
 70761              m.emit(0x62)
 70762              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70763              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70764              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 70765              m.emit(0x7d)
 70766              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70767          })
 70768      }
 70769      // VPERMT2W m256, ymm, ymm{k}{z}
 70770      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70771          self.require(ISA_AVX512VL | ISA_AVX512BW)
 70772          p.domain = DomainAVX
 70773          p.add(0, func(m *_Encoding, v []interface{}) {
 70774              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 70775              m.emit(0x7d)
 70776              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 70777          })
 70778      }
 70779      if p.len == 0 {
 70780          panic("invalid operands for VPERMT2W")
 70781      }
 70782      return p
 70783  }
 70784  
 70785  // VPERMW performs "Permute Word Integers".
 70786  //
 70787  // Mnemonic        : VPERMW
 70788  // Supported forms : (6 forms)
 70789  //
 70790  //    * VPERMW zmm, zmm, zmm{k}{z}     [AVX512BW]
 70791  //    * VPERMW m512, zmm, zmm{k}{z}    [AVX512BW]
 70792  //    * VPERMW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 70793  //    * VPERMW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 70794  //    * VPERMW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 70795  //    * VPERMW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 70796  //
 70797  func (self *Program) VPERMW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 70798      p := self.alloc("VPERMW", 3, Operands { v0, v1, v2 })
 70799      // VPERMW zmm, zmm, zmm{k}{z}
 70800      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 70801          self.require(ISA_AVX512BW)
 70802          p.domain = DomainAVX
 70803          p.add(0, func(m *_Encoding, v []interface{}) {
 70804              m.emit(0x62)
 70805              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70806              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70807              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 70808              m.emit(0x8d)
 70809              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70810          })
 70811      }
 70812      // VPERMW m512, zmm, zmm{k}{z}
 70813      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 70814          self.require(ISA_AVX512BW)
 70815          p.domain = DomainAVX
 70816          p.add(0, func(m *_Encoding, v []interface{}) {
 70817              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 70818              m.emit(0x8d)
 70819              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 70820          })
 70821      }
 70822      // VPERMW xmm, xmm, xmm{k}{z}
 70823      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70824          self.require(ISA_AVX512VL | ISA_AVX512BW)
 70825          p.domain = DomainAVX
 70826          p.add(0, func(m *_Encoding, v []interface{}) {
 70827              m.emit(0x62)
 70828              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70829              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70830              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 70831              m.emit(0x8d)
 70832              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70833          })
 70834      }
 70835      // VPERMW m128, xmm, xmm{k}{z}
 70836      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70837          self.require(ISA_AVX512VL | ISA_AVX512BW)
 70838          p.domain = DomainAVX
 70839          p.add(0, func(m *_Encoding, v []interface{}) {
 70840              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 70841              m.emit(0x8d)
 70842              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 70843          })
 70844      }
 70845      // VPERMW ymm, ymm, ymm{k}{z}
 70846      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70847          self.require(ISA_AVX512VL | ISA_AVX512BW)
 70848          p.domain = DomainAVX
 70849          p.add(0, func(m *_Encoding, v []interface{}) {
 70850              m.emit(0x62)
 70851              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70852              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70853              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 70854              m.emit(0x8d)
 70855              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70856          })
 70857      }
 70858      // VPERMW m256, ymm, ymm{k}{z}
 70859      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70860          self.require(ISA_AVX512VL | ISA_AVX512BW)
 70861          p.domain = DomainAVX
 70862          p.add(0, func(m *_Encoding, v []interface{}) {
 70863              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 70864              m.emit(0x8d)
 70865              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 70866          })
 70867      }
 70868      if p.len == 0 {
 70869          panic("invalid operands for VPERMW")
 70870      }
 70871      return p
 70872  }
 70873  
 70874  // VPEXPANDD performs "Load Sparse Packed Doubleword Integer Values from Dense Memory/Register".
 70875  //
 70876  // Mnemonic        : VPEXPANDD
 70877  // Supported forms : (6 forms)
 70878  //
 70879  //    * VPEXPANDD zmm, zmm{k}{z}     [AVX512F]
 70880  //    * VPEXPANDD m512, zmm{k}{z}    [AVX512F]
 70881  //    * VPEXPANDD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 70882  //    * VPEXPANDD ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 70883  //    * VPEXPANDD m128, xmm{k}{z}    [AVX512F,AVX512VL]
 70884  //    * VPEXPANDD m256, ymm{k}{z}    [AVX512F,AVX512VL]
 70885  //
 70886  func (self *Program) VPEXPANDD(v0 interface{}, v1 interface{}) *Instruction {
 70887      p := self.alloc("VPEXPANDD", 2, Operands { v0, v1 })
 70888      // VPEXPANDD zmm, zmm{k}{z}
 70889      if isZMM(v0) && isZMMkz(v1) {
 70890          self.require(ISA_AVX512F)
 70891          p.domain = DomainAVX
 70892          p.add(0, func(m *_Encoding, v []interface{}) {
 70893              m.emit(0x62)
 70894              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 70895              m.emit(0x7d)
 70896              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 70897              m.emit(0x89)
 70898              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 70899          })
 70900      }
 70901      // VPEXPANDD m512, zmm{k}{z}
 70902      if isM512(v0) && isZMMkz(v1) {
 70903          self.require(ISA_AVX512F)
 70904          p.domain = DomainAVX
 70905          p.add(0, func(m *_Encoding, v []interface{}) {
 70906              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 70907              m.emit(0x89)
 70908              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 70909          })
 70910      }
 70911      // VPEXPANDD xmm, xmm{k}{z}
 70912      if isEVEXXMM(v0) && isXMMkz(v1) {
 70913          self.require(ISA_AVX512VL | ISA_AVX512F)
 70914          p.domain = DomainAVX
 70915          p.add(0, func(m *_Encoding, v []interface{}) {
 70916              m.emit(0x62)
 70917              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 70918              m.emit(0x7d)
 70919              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 70920              m.emit(0x89)
 70921              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 70922          })
 70923      }
 70924      // VPEXPANDD ymm, ymm{k}{z}
 70925      if isEVEXYMM(v0) && isYMMkz(v1) {
 70926          self.require(ISA_AVX512VL | ISA_AVX512F)
 70927          p.domain = DomainAVX
 70928          p.add(0, func(m *_Encoding, v []interface{}) {
 70929              m.emit(0x62)
 70930              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 70931              m.emit(0x7d)
 70932              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 70933              m.emit(0x89)
 70934              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 70935          })
 70936      }
 70937      // VPEXPANDD m128, xmm{k}{z}
 70938      if isM128(v0) && isXMMkz(v1) {
 70939          self.require(ISA_AVX512VL | ISA_AVX512F)
 70940          p.domain = DomainAVX
 70941          p.add(0, func(m *_Encoding, v []interface{}) {
 70942              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 70943              m.emit(0x89)
 70944              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 70945          })
 70946      }
 70947      // VPEXPANDD m256, ymm{k}{z}
 70948      if isM256(v0) && isYMMkz(v1) {
 70949          self.require(ISA_AVX512VL | ISA_AVX512F)
 70950          p.domain = DomainAVX
 70951          p.add(0, func(m *_Encoding, v []interface{}) {
 70952              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 70953              m.emit(0x89)
 70954              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 70955          })
 70956      }
 70957      if p.len == 0 {
 70958          panic("invalid operands for VPEXPANDD")
 70959      }
 70960      return p
 70961  }
 70962  
 70963  // VPEXPANDQ performs "Load Sparse Packed Quadword Integer Values from Dense Memory/Register".
 70964  //
 70965  // Mnemonic        : VPEXPANDQ
 70966  // Supported forms : (6 forms)
 70967  //
 70968  //    * VPEXPANDQ zmm, zmm{k}{z}     [AVX512F]
 70969  //    * VPEXPANDQ m512, zmm{k}{z}    [AVX512F]
 70970  //    * VPEXPANDQ xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 70971  //    * VPEXPANDQ ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 70972  //    * VPEXPANDQ m128, xmm{k}{z}    [AVX512F,AVX512VL]
 70973  //    * VPEXPANDQ m256, ymm{k}{z}    [AVX512F,AVX512VL]
 70974  //
 70975  func (self *Program) VPEXPANDQ(v0 interface{}, v1 interface{}) *Instruction {
 70976      p := self.alloc("VPEXPANDQ", 2, Operands { v0, v1 })
 70977      // VPEXPANDQ zmm, zmm{k}{z}
 70978      if isZMM(v0) && isZMMkz(v1) {
 70979          self.require(ISA_AVX512F)
 70980          p.domain = DomainAVX
 70981          p.add(0, func(m *_Encoding, v []interface{}) {
 70982              m.emit(0x62)
 70983              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 70984              m.emit(0xfd)
 70985              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 70986              m.emit(0x89)
 70987              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 70988          })
 70989      }
 70990      // VPEXPANDQ m512, zmm{k}{z}
 70991      if isM512(v0) && isZMMkz(v1) {
 70992          self.require(ISA_AVX512F)
 70993          p.domain = DomainAVX
 70994          p.add(0, func(m *_Encoding, v []interface{}) {
 70995              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 70996              m.emit(0x89)
 70997              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 70998          })
 70999      }
 71000      // VPEXPANDQ xmm, xmm{k}{z}
 71001      if isEVEXXMM(v0) && isXMMkz(v1) {
 71002          self.require(ISA_AVX512VL | ISA_AVX512F)
 71003          p.domain = DomainAVX
 71004          p.add(0, func(m *_Encoding, v []interface{}) {
 71005              m.emit(0x62)
 71006              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 71007              m.emit(0xfd)
 71008              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 71009              m.emit(0x89)
 71010              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 71011          })
 71012      }
 71013      // VPEXPANDQ ymm, ymm{k}{z}
 71014      if isEVEXYMM(v0) && isYMMkz(v1) {
 71015          self.require(ISA_AVX512VL | ISA_AVX512F)
 71016          p.domain = DomainAVX
 71017          p.add(0, func(m *_Encoding, v []interface{}) {
 71018              m.emit(0x62)
 71019              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 71020              m.emit(0xfd)
 71021              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 71022              m.emit(0x89)
 71023              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 71024          })
 71025      }
 71026      // VPEXPANDQ m128, xmm{k}{z}
 71027      if isM128(v0) && isXMMkz(v1) {
 71028          self.require(ISA_AVX512VL | ISA_AVX512F)
 71029          p.domain = DomainAVX
 71030          p.add(0, func(m *_Encoding, v []interface{}) {
 71031              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 71032              m.emit(0x89)
 71033              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 71034          })
 71035      }
 71036      // VPEXPANDQ m256, ymm{k}{z}
 71037      if isM256(v0) && isYMMkz(v1) {
 71038          self.require(ISA_AVX512VL | ISA_AVX512F)
 71039          p.domain = DomainAVX
 71040          p.add(0, func(m *_Encoding, v []interface{}) {
 71041              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 71042              m.emit(0x89)
 71043              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 71044          })
 71045      }
 71046      if p.len == 0 {
 71047          panic("invalid operands for VPEXPANDQ")
 71048      }
 71049      return p
 71050  }
 71051  
 71052  // VPEXTRB performs "Extract Byte".
 71053  //
 71054  // Mnemonic        : VPEXTRB
 71055  // Supported forms : (4 forms)
 71056  //
 71057  //    * VPEXTRB imm8, xmm, r32    [AVX]
 71058  //    * VPEXTRB imm8, xmm, m8     [AVX]
 71059  //    * VPEXTRB imm8, xmm, r32    [AVX512BW]
 71060  //    * VPEXTRB imm8, xmm, m8     [AVX512BW]
 71061  //
 71062  func (self *Program) VPEXTRB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 71063      p := self.alloc("VPEXTRB", 3, Operands { v0, v1, v2 })
 71064      // VPEXTRB imm8, xmm, r32
 71065      if isImm8(v0) && isXMM(v1) && isReg32(v2) {
 71066          self.require(ISA_AVX)
 71067          p.domain = DomainAVX
 71068          p.add(0, func(m *_Encoding, v []interface{}) {
 71069              m.emit(0xc4)
 71070              m.emit(0xe3 ^ (hcode(v[1]) << 7) ^ (hcode(v[2]) << 5))
 71071              m.emit(0x79)
 71072              m.emit(0x14)
 71073              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 71074              m.imm1(toImmAny(v[0]))
 71075          })
 71076      }
 71077      // VPEXTRB imm8, xmm, m8
 71078      if isImm8(v0) && isXMM(v1) && isM8(v2) {
 71079          self.require(ISA_AVX)
 71080          p.domain = DomainAVX
 71081          p.add(0, func(m *_Encoding, v []interface{}) {
 71082              m.vex3(0xc4, 0b11, 0x01, hcode(v[1]), addr(v[2]), 0)
 71083              m.emit(0x14)
 71084              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 71085              m.imm1(toImmAny(v[0]))
 71086          })
 71087      }
 71088      // VPEXTRB imm8, xmm, r32
 71089      if isImm8(v0) && isEVEXXMM(v1) && isReg32(v2) {
 71090          self.require(ISA_AVX512BW)
 71091          p.domain = DomainAVX
 71092          p.add(0, func(m *_Encoding, v []interface{}) {
 71093              m.emit(0x62)
 71094              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 71095              m.emit(0x7d)
 71096              m.emit(0x08)
 71097              m.emit(0x14)
 71098              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 71099              m.imm1(toImmAny(v[0]))
 71100          })
 71101      }
 71102      // VPEXTRB imm8, xmm, m8
 71103      if isImm8(v0) && isEVEXXMM(v1) && isM8(v2) {
 71104          self.require(ISA_AVX512BW)
 71105          p.domain = DomainAVX
 71106          p.add(0, func(m *_Encoding, v []interface{}) {
 71107              m.evex(0b11, 0x05, 0b00, ehcode(v[1]), addr(v[2]), 0, 0, 0, 0)
 71108              m.emit(0x14)
 71109              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 71110              m.imm1(toImmAny(v[0]))
 71111          })
 71112      }
 71113      if p.len == 0 {
 71114          panic("invalid operands for VPEXTRB")
 71115      }
 71116      return p
 71117  }
 71118  
 71119  // VPEXTRD performs "Extract Doubleword".
 71120  //
 71121  // Mnemonic        : VPEXTRD
 71122  // Supported forms : (4 forms)
 71123  //
 71124  //    * VPEXTRD imm8, xmm, r32    [AVX]
 71125  //    * VPEXTRD imm8, xmm, m32    [AVX]
 71126  //    * VPEXTRD imm8, xmm, r32    [AVX512DQ]
 71127  //    * VPEXTRD imm8, xmm, m32    [AVX512DQ]
 71128  //
 71129  func (self *Program) VPEXTRD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 71130      p := self.alloc("VPEXTRD", 3, Operands { v0, v1, v2 })
 71131      // VPEXTRD imm8, xmm, r32
 71132      if isImm8(v0) && isXMM(v1) && isReg32(v2) {
 71133          self.require(ISA_AVX)
 71134          p.domain = DomainAVX
 71135          p.add(0, func(m *_Encoding, v []interface{}) {
 71136              m.emit(0xc4)
 71137              m.emit(0xe3 ^ (hcode(v[1]) << 7) ^ (hcode(v[2]) << 5))
 71138              m.emit(0x79)
 71139              m.emit(0x16)
 71140              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 71141              m.imm1(toImmAny(v[0]))
 71142          })
 71143      }
 71144      // VPEXTRD imm8, xmm, m32
 71145      if isImm8(v0) && isXMM(v1) && isM32(v2) {
 71146          self.require(ISA_AVX)
 71147          p.domain = DomainAVX
 71148          p.add(0, func(m *_Encoding, v []interface{}) {
 71149              m.vex3(0xc4, 0b11, 0x01, hcode(v[1]), addr(v[2]), 0)
 71150              m.emit(0x16)
 71151              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 71152              m.imm1(toImmAny(v[0]))
 71153          })
 71154      }
 71155      // VPEXTRD imm8, xmm, r32
 71156      if isImm8(v0) && isEVEXXMM(v1) && isReg32(v2) {
 71157          self.require(ISA_AVX512DQ)
 71158          p.domain = DomainAVX
 71159          p.add(0, func(m *_Encoding, v []interface{}) {
 71160              m.emit(0x62)
 71161              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 71162              m.emit(0x7d)
 71163              m.emit(0x08)
 71164              m.emit(0x16)
 71165              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 71166              m.imm1(toImmAny(v[0]))
 71167          })
 71168      }
 71169      // VPEXTRD imm8, xmm, m32
 71170      if isImm8(v0) && isEVEXXMM(v1) && isM32(v2) {
 71171          self.require(ISA_AVX512DQ)
 71172          p.domain = DomainAVX
 71173          p.add(0, func(m *_Encoding, v []interface{}) {
 71174              m.evex(0b11, 0x05, 0b00, ehcode(v[1]), addr(v[2]), 0, 0, 0, 0)
 71175              m.emit(0x16)
 71176              m.mrsd(lcode(v[1]), addr(v[2]), 4)
 71177              m.imm1(toImmAny(v[0]))
 71178          })
 71179      }
 71180      if p.len == 0 {
 71181          panic("invalid operands for VPEXTRD")
 71182      }
 71183      return p
 71184  }
 71185  
 71186  // VPEXTRQ performs "Extract Quadword".
 71187  //
 71188  // Mnemonic        : VPEXTRQ
 71189  // Supported forms : (4 forms)
 71190  //
 71191  //    * VPEXTRQ imm8, xmm, r64    [AVX]
 71192  //    * VPEXTRQ imm8, xmm, m64    [AVX]
 71193  //    * VPEXTRQ imm8, xmm, r64    [AVX512DQ]
 71194  //    * VPEXTRQ imm8, xmm, m64    [AVX512DQ]
 71195  //
 71196  func (self *Program) VPEXTRQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 71197      p := self.alloc("VPEXTRQ", 3, Operands { v0, v1, v2 })
 71198      // VPEXTRQ imm8, xmm, r64
 71199      if isImm8(v0) && isXMM(v1) && isReg64(v2) {
 71200          self.require(ISA_AVX)
 71201          p.domain = DomainAVX
 71202          p.add(0, func(m *_Encoding, v []interface{}) {
 71203              m.emit(0xc4)
 71204              m.emit(0xe3 ^ (hcode(v[1]) << 7) ^ (hcode(v[2]) << 5))
 71205              m.emit(0xf9)
 71206              m.emit(0x16)
 71207              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 71208              m.imm1(toImmAny(v[0]))
 71209          })
 71210      }
 71211      // VPEXTRQ imm8, xmm, m64
 71212      if isImm8(v0) && isXMM(v1) && isM64(v2) {
 71213          self.require(ISA_AVX)
 71214          p.domain = DomainAVX
 71215          p.add(0, func(m *_Encoding, v []interface{}) {
 71216              m.vex3(0xc4, 0b11, 0x81, hcode(v[1]), addr(v[2]), 0)
 71217              m.emit(0x16)
 71218              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 71219              m.imm1(toImmAny(v[0]))
 71220          })
 71221      }
 71222      // VPEXTRQ imm8, xmm, r64
 71223      if isImm8(v0) && isEVEXXMM(v1) && isReg64(v2) {
 71224          self.require(ISA_AVX512DQ)
 71225          p.domain = DomainAVX
 71226          p.add(0, func(m *_Encoding, v []interface{}) {
 71227              m.emit(0x62)
 71228              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 71229              m.emit(0xfd)
 71230              m.emit(0x08)
 71231              m.emit(0x16)
 71232              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 71233              m.imm1(toImmAny(v[0]))
 71234          })
 71235      }
 71236      // VPEXTRQ imm8, xmm, m64
 71237      if isImm8(v0) && isEVEXXMM(v1) && isM64(v2) {
 71238          self.require(ISA_AVX512DQ)
 71239          p.domain = DomainAVX
 71240          p.add(0, func(m *_Encoding, v []interface{}) {
 71241              m.evex(0b11, 0x85, 0b00, ehcode(v[1]), addr(v[2]), 0, 0, 0, 0)
 71242              m.emit(0x16)
 71243              m.mrsd(lcode(v[1]), addr(v[2]), 8)
 71244              m.imm1(toImmAny(v[0]))
 71245          })
 71246      }
 71247      if p.len == 0 {
 71248          panic("invalid operands for VPEXTRQ")
 71249      }
 71250      return p
 71251  }
 71252  
 71253  // VPEXTRW performs "Extract Word".
 71254  //
 71255  // Mnemonic        : VPEXTRW
 71256  // Supported forms : (4 forms)
 71257  //
 71258  //    * VPEXTRW imm8, xmm, r32    [AVX]
 71259  //    * VPEXTRW imm8, xmm, m16    [AVX]
 71260  //    * VPEXTRW imm8, xmm, r32    [AVX512BW]
 71261  //    * VPEXTRW imm8, xmm, m16    [AVX512BW]
 71262  //
 71263  func (self *Program) VPEXTRW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 71264      p := self.alloc("VPEXTRW", 3, Operands { v0, v1, v2 })
 71265      // VPEXTRW imm8, xmm, r32
 71266      if isImm8(v0) && isXMM(v1) && isReg32(v2) {
 71267          self.require(ISA_AVX)
 71268          p.domain = DomainAVX
 71269          p.add(0, func(m *_Encoding, v []interface{}) {
 71270              m.vex2(1, hcode(v[2]), v[1], 0)
 71271              m.emit(0xc5)
 71272              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 71273              m.imm1(toImmAny(v[0]))
 71274          })
 71275          p.add(0, func(m *_Encoding, v []interface{}) {
 71276              m.emit(0xc4)
 71277              m.emit(0xe3 ^ (hcode(v[1]) << 7) ^ (hcode(v[2]) << 5))
 71278              m.emit(0x79)
 71279              m.emit(0x15)
 71280              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 71281              m.imm1(toImmAny(v[0]))
 71282          })
 71283      }
 71284      // VPEXTRW imm8, xmm, m16
 71285      if isImm8(v0) && isXMM(v1) && isM16(v2) {
 71286          self.require(ISA_AVX)
 71287          p.domain = DomainAVX
 71288          p.add(0, func(m *_Encoding, v []interface{}) {
 71289              m.vex3(0xc4, 0b11, 0x01, hcode(v[1]), addr(v[2]), 0)
 71290              m.emit(0x15)
 71291              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 71292              m.imm1(toImmAny(v[0]))
 71293          })
 71294      }
 71295      // VPEXTRW imm8, xmm, r32
 71296      if isImm8(v0) && isEVEXXMM(v1) && isReg32(v2) {
 71297          self.require(ISA_AVX512BW)
 71298          p.domain = DomainAVX
 71299          p.add(0, func(m *_Encoding, v []interface{}) {
 71300              m.emit(0x62)
 71301              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 71302              m.emit(0x7d)
 71303              m.emit(0x08)
 71304              m.emit(0x15)
 71305              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 71306              m.imm1(toImmAny(v[0]))
 71307          })
 71308          p.add(0, func(m *_Encoding, v []interface{}) {
 71309              m.emit(0x62)
 71310              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 71311              m.emit(0x7d)
 71312              m.emit(0x08)
 71313              m.emit(0xc5)
 71314              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 71315              m.imm1(toImmAny(v[0]))
 71316          })
 71317      }
 71318      // VPEXTRW imm8, xmm, m16
 71319      if isImm8(v0) && isEVEXXMM(v1) && isM16(v2) {
 71320          self.require(ISA_AVX512BW)
 71321          p.domain = DomainAVX
 71322          p.add(0, func(m *_Encoding, v []interface{}) {
 71323              m.evex(0b11, 0x05, 0b00, ehcode(v[1]), addr(v[2]), 0, 0, 0, 0)
 71324              m.emit(0x15)
 71325              m.mrsd(lcode(v[1]), addr(v[2]), 2)
 71326              m.imm1(toImmAny(v[0]))
 71327          })
 71328      }
 71329      if p.len == 0 {
 71330          panic("invalid operands for VPEXTRW")
 71331      }
 71332      return p
 71333  }
 71334  
 71335  // VPGATHERDD performs "Gather Packed Doubleword Values Using Signed Doubleword Indices".
 71336  //
 71337  // Mnemonic        : VPGATHERDD
 71338  // Supported forms : (5 forms)
 71339  //
 71340  //    * VPGATHERDD xmm, vm32x, xmm    [AVX2]
 71341  //    * VPGATHERDD ymm, vm32y, ymm    [AVX2]
 71342  //    * VPGATHERDD vm32z, zmm{k}      [AVX512F]
 71343  //    * VPGATHERDD vm32x, xmm{k}      [AVX512F,AVX512VL]
 71344  //    * VPGATHERDD vm32y, ymm{k}      [AVX512F,AVX512VL]
 71345  //
 71346  func (self *Program) VPGATHERDD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 71347      var p *Instruction
 71348      switch len(vv) {
 71349          case 0  : p = self.alloc("VPGATHERDD", 2, Operands { v0, v1 })
 71350          case 1  : p = self.alloc("VPGATHERDD", 3, Operands { v0, v1, vv[0] })
 71351          default : panic("instruction VPGATHERDD takes 2 or 3 operands")
 71352      }
 71353      // VPGATHERDD xmm, vm32x, xmm
 71354      if len(vv) == 1 && isXMM(v0) && isVMX(v1) && isXMM(vv[0]) {
 71355          self.require(ISA_AVX2)
 71356          p.domain = DomainAVX
 71357          p.add(0, func(m *_Encoding, v []interface{}) {
 71358              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 71359              m.emit(0x90)
 71360              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 71361          })
 71362      }
 71363      // VPGATHERDD ymm, vm32y, ymm
 71364      if len(vv) == 1 && isYMM(v0) && isVMY(v1) && isYMM(vv[0]) {
 71365          self.require(ISA_AVX2)
 71366          p.domain = DomainAVX
 71367          p.add(0, func(m *_Encoding, v []interface{}) {
 71368              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 71369              m.emit(0x90)
 71370              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 71371          })
 71372      }
 71373      // VPGATHERDD vm32z, zmm{k}
 71374      if len(vv) == 0 && isVMZ(v0) && isZMMk(v1) {
 71375          self.require(ISA_AVX512F)
 71376          p.domain = DomainAVX
 71377          p.add(0, func(m *_Encoding, v []interface{}) {
 71378              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71379              m.emit(0x90)
 71380              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 71381          })
 71382      }
 71383      // VPGATHERDD vm32x, xmm{k}
 71384      if len(vv) == 0 && isEVEXVMX(v0) && isXMMk(v1) {
 71385          self.require(ISA_AVX512VL | ISA_AVX512F)
 71386          p.domain = DomainAVX
 71387          p.add(0, func(m *_Encoding, v []interface{}) {
 71388              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71389              m.emit(0x90)
 71390              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 71391          })
 71392      }
 71393      // VPGATHERDD vm32y, ymm{k}
 71394      if len(vv) == 0 && isEVEXVMY(v0) && isYMMk(v1) {
 71395          self.require(ISA_AVX512VL | ISA_AVX512F)
 71396          p.domain = DomainAVX
 71397          p.add(0, func(m *_Encoding, v []interface{}) {
 71398              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71399              m.emit(0x90)
 71400              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 71401          })
 71402      }
 71403      if p.len == 0 {
 71404          panic("invalid operands for VPGATHERDD")
 71405      }
 71406      return p
 71407  }
 71408  
 71409  // VPGATHERDQ performs "Gather Packed Quadword Values Using Signed Doubleword Indices".
 71410  //
 71411  // Mnemonic        : VPGATHERDQ
 71412  // Supported forms : (5 forms)
 71413  //
 71414  //    * VPGATHERDQ xmm, vm32x, xmm    [AVX2]
 71415  //    * VPGATHERDQ ymm, vm32x, ymm    [AVX2]
 71416  //    * VPGATHERDQ vm32y, zmm{k}      [AVX512F]
 71417  //    * VPGATHERDQ vm32x, xmm{k}      [AVX512F,AVX512VL]
 71418  //    * VPGATHERDQ vm32x, ymm{k}      [AVX512F,AVX512VL]
 71419  //
 71420  func (self *Program) VPGATHERDQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 71421      var p *Instruction
 71422      switch len(vv) {
 71423          case 0  : p = self.alloc("VPGATHERDQ", 2, Operands { v0, v1 })
 71424          case 1  : p = self.alloc("VPGATHERDQ", 3, Operands { v0, v1, vv[0] })
 71425          default : panic("instruction VPGATHERDQ takes 2 or 3 operands")
 71426      }
 71427      // VPGATHERDQ xmm, vm32x, xmm
 71428      if len(vv) == 1 && isXMM(v0) && isVMX(v1) && isXMM(vv[0]) {
 71429          self.require(ISA_AVX2)
 71430          p.domain = DomainAVX
 71431          p.add(0, func(m *_Encoding, v []interface{}) {
 71432              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 71433              m.emit(0x90)
 71434              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 71435          })
 71436      }
 71437      // VPGATHERDQ ymm, vm32x, ymm
 71438      if len(vv) == 1 && isYMM(v0) && isVMX(v1) && isYMM(vv[0]) {
 71439          self.require(ISA_AVX2)
 71440          p.domain = DomainAVX
 71441          p.add(0, func(m *_Encoding, v []interface{}) {
 71442              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 71443              m.emit(0x90)
 71444              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 71445          })
 71446      }
 71447      // VPGATHERDQ vm32y, zmm{k}
 71448      if len(vv) == 0 && isEVEXVMY(v0) && isZMMk(v1) {
 71449          self.require(ISA_AVX512F)
 71450          p.domain = DomainAVX
 71451          p.add(0, func(m *_Encoding, v []interface{}) {
 71452              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71453              m.emit(0x90)
 71454              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 71455          })
 71456      }
 71457      // VPGATHERDQ vm32x, xmm{k}
 71458      if len(vv) == 0 && isEVEXVMX(v0) && isXMMk(v1) {
 71459          self.require(ISA_AVX512VL | ISA_AVX512F)
 71460          p.domain = DomainAVX
 71461          p.add(0, func(m *_Encoding, v []interface{}) {
 71462              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71463              m.emit(0x90)
 71464              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 71465          })
 71466      }
 71467      // VPGATHERDQ vm32x, ymm{k}
 71468      if len(vv) == 0 && isEVEXVMX(v0) && isYMMk(v1) {
 71469          self.require(ISA_AVX512VL | ISA_AVX512F)
 71470          p.domain = DomainAVX
 71471          p.add(0, func(m *_Encoding, v []interface{}) {
 71472              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71473              m.emit(0x90)
 71474              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 71475          })
 71476      }
 71477      if p.len == 0 {
 71478          panic("invalid operands for VPGATHERDQ")
 71479      }
 71480      return p
 71481  }
 71482  
 71483  // VPGATHERQD performs "Gather Packed Doubleword Values Using Signed Quadword Indices".
 71484  //
 71485  // Mnemonic        : VPGATHERQD
 71486  // Supported forms : (5 forms)
 71487  //
 71488  //    * VPGATHERQD xmm, vm64x, xmm    [AVX2]
 71489  //    * VPGATHERQD xmm, vm64y, xmm    [AVX2]
 71490  //    * VPGATHERQD vm64z, ymm{k}      [AVX512F]
 71491  //    * VPGATHERQD vm64x, xmm{k}      [AVX512F,AVX512VL]
 71492  //    * VPGATHERQD vm64y, xmm{k}      [AVX512F,AVX512VL]
 71493  //
 71494  func (self *Program) VPGATHERQD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 71495      var p *Instruction
 71496      switch len(vv) {
 71497          case 0  : p = self.alloc("VPGATHERQD", 2, Operands { v0, v1 })
 71498          case 1  : p = self.alloc("VPGATHERQD", 3, Operands { v0, v1, vv[0] })
 71499          default : panic("instruction VPGATHERQD takes 2 or 3 operands")
 71500      }
 71501      // VPGATHERQD xmm, vm64x, xmm
 71502      if len(vv) == 1 && isXMM(v0) && isVMX(v1) && isXMM(vv[0]) {
 71503          self.require(ISA_AVX2)
 71504          p.domain = DomainAVX
 71505          p.add(0, func(m *_Encoding, v []interface{}) {
 71506              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 71507              m.emit(0x91)
 71508              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 71509          })
 71510      }
 71511      // VPGATHERQD xmm, vm64y, xmm
 71512      if len(vv) == 1 && isXMM(v0) && isVMY(v1) && isXMM(vv[0]) {
 71513          self.require(ISA_AVX2)
 71514          p.domain = DomainAVX
 71515          p.add(0, func(m *_Encoding, v []interface{}) {
 71516              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 71517              m.emit(0x91)
 71518              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 71519          })
 71520      }
 71521      // VPGATHERQD vm64z, ymm{k}
 71522      if len(vv) == 0 && isVMZ(v0) && isYMMk(v1) {
 71523          self.require(ISA_AVX512F)
 71524          p.domain = DomainAVX
 71525          p.add(0, func(m *_Encoding, v []interface{}) {
 71526              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71527              m.emit(0x91)
 71528              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 71529          })
 71530      }
 71531      // VPGATHERQD vm64x, xmm{k}
 71532      if len(vv) == 0 && isEVEXVMX(v0) && isXMMk(v1) {
 71533          self.require(ISA_AVX512VL | ISA_AVX512F)
 71534          p.domain = DomainAVX
 71535          p.add(0, func(m *_Encoding, v []interface{}) {
 71536              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71537              m.emit(0x91)
 71538              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 71539          })
 71540      }
 71541      // VPGATHERQD vm64y, xmm{k}
 71542      if len(vv) == 0 && isEVEXVMY(v0) && isXMMk(v1) {
 71543          self.require(ISA_AVX512VL | ISA_AVX512F)
 71544          p.domain = DomainAVX
 71545          p.add(0, func(m *_Encoding, v []interface{}) {
 71546              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71547              m.emit(0x91)
 71548              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 71549          })
 71550      }
 71551      if p.len == 0 {
 71552          panic("invalid operands for VPGATHERQD")
 71553      }
 71554      return p
 71555  }
 71556  
 71557  // VPGATHERQQ performs "Gather Packed Quadword Values Using Signed Quadword Indices".
 71558  //
 71559  // Mnemonic        : VPGATHERQQ
 71560  // Supported forms : (5 forms)
 71561  //
 71562  //    * VPGATHERQQ xmm, vm64x, xmm    [AVX2]
 71563  //    * VPGATHERQQ ymm, vm64y, ymm    [AVX2]
 71564  //    * VPGATHERQQ vm64z, zmm{k}      [AVX512F]
 71565  //    * VPGATHERQQ vm64x, xmm{k}      [AVX512F,AVX512VL]
 71566  //    * VPGATHERQQ vm64y, ymm{k}      [AVX512F,AVX512VL]
 71567  //
 71568  func (self *Program) VPGATHERQQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 71569      var p *Instruction
 71570      switch len(vv) {
 71571          case 0  : p = self.alloc("VPGATHERQQ", 2, Operands { v0, v1 })
 71572          case 1  : p = self.alloc("VPGATHERQQ", 3, Operands { v0, v1, vv[0] })
 71573          default : panic("instruction VPGATHERQQ takes 2 or 3 operands")
 71574      }
 71575      // VPGATHERQQ xmm, vm64x, xmm
 71576      if len(vv) == 1 && isXMM(v0) && isVMX(v1) && isXMM(vv[0]) {
 71577          self.require(ISA_AVX2)
 71578          p.domain = DomainAVX
 71579          p.add(0, func(m *_Encoding, v []interface{}) {
 71580              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 71581              m.emit(0x91)
 71582              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 71583          })
 71584      }
 71585      // VPGATHERQQ ymm, vm64y, ymm
 71586      if len(vv) == 1 && isYMM(v0) && isVMY(v1) && isYMM(vv[0]) {
 71587          self.require(ISA_AVX2)
 71588          p.domain = DomainAVX
 71589          p.add(0, func(m *_Encoding, v []interface{}) {
 71590              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 71591              m.emit(0x91)
 71592              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 71593          })
 71594      }
 71595      // VPGATHERQQ vm64z, zmm{k}
 71596      if len(vv) == 0 && isVMZ(v0) && isZMMk(v1) {
 71597          self.require(ISA_AVX512F)
 71598          p.domain = DomainAVX
 71599          p.add(0, func(m *_Encoding, v []interface{}) {
 71600              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71601              m.emit(0x91)
 71602              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 71603          })
 71604      }
 71605      // VPGATHERQQ vm64x, xmm{k}
 71606      if len(vv) == 0 && isEVEXVMX(v0) && isXMMk(v1) {
 71607          self.require(ISA_AVX512VL | ISA_AVX512F)
 71608          p.domain = DomainAVX
 71609          p.add(0, func(m *_Encoding, v []interface{}) {
 71610              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71611              m.emit(0x91)
 71612              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 71613          })
 71614      }
 71615      // VPGATHERQQ vm64y, ymm{k}
 71616      if len(vv) == 0 && isEVEXVMY(v0) && isYMMk(v1) {
 71617          self.require(ISA_AVX512VL | ISA_AVX512F)
 71618          p.domain = DomainAVX
 71619          p.add(0, func(m *_Encoding, v []interface{}) {
 71620              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71621              m.emit(0x91)
 71622              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 71623          })
 71624      }
 71625      if p.len == 0 {
 71626          panic("invalid operands for VPGATHERQQ")
 71627      }
 71628      return p
 71629  }
 71630  
 71631  // VPHADDBD performs "Packed Horizontal Add Signed Byte to Signed Doubleword".
 71632  //
 71633  // Mnemonic        : VPHADDBD
 71634  // Supported forms : (2 forms)
 71635  //
 71636  //    * VPHADDBD xmm, xmm     [XOP]
 71637  //    * VPHADDBD m128, xmm    [XOP]
 71638  //
 71639  func (self *Program) VPHADDBD(v0 interface{}, v1 interface{}) *Instruction {
 71640      p := self.alloc("VPHADDBD", 2, Operands { v0, v1 })
 71641      // VPHADDBD xmm, xmm
 71642      if isXMM(v0) && isXMM(v1) {
 71643          self.require(ISA_XOP)
 71644          p.domain = DomainAMDSpecific
 71645          p.add(0, func(m *_Encoding, v []interface{}) {
 71646              m.emit(0x8f)
 71647              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 71648              m.emit(0x78)
 71649              m.emit(0xc2)
 71650              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 71651          })
 71652      }
 71653      // VPHADDBD m128, xmm
 71654      if isM128(v0) && isXMM(v1) {
 71655          self.require(ISA_XOP)
 71656          p.domain = DomainAMDSpecific
 71657          p.add(0, func(m *_Encoding, v []interface{}) {
 71658              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 71659              m.emit(0xc2)
 71660              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 71661          })
 71662      }
 71663      if p.len == 0 {
 71664          panic("invalid operands for VPHADDBD")
 71665      }
 71666      return p
 71667  }
 71668  
 71669  // VPHADDBQ performs "Packed Horizontal Add Signed Byte to Signed Quadword".
 71670  //
 71671  // Mnemonic        : VPHADDBQ
 71672  // Supported forms : (2 forms)
 71673  //
 71674  //    * VPHADDBQ xmm, xmm     [XOP]
 71675  //    * VPHADDBQ m128, xmm    [XOP]
 71676  //
 71677  func (self *Program) VPHADDBQ(v0 interface{}, v1 interface{}) *Instruction {
 71678      p := self.alloc("VPHADDBQ", 2, Operands { v0, v1 })
 71679      // VPHADDBQ xmm, xmm
 71680      if isXMM(v0) && isXMM(v1) {
 71681          self.require(ISA_XOP)
 71682          p.domain = DomainAMDSpecific
 71683          p.add(0, func(m *_Encoding, v []interface{}) {
 71684              m.emit(0x8f)
 71685              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 71686              m.emit(0x78)
 71687              m.emit(0xc3)
 71688              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 71689          })
 71690      }
 71691      // VPHADDBQ m128, xmm
 71692      if isM128(v0) && isXMM(v1) {
 71693          self.require(ISA_XOP)
 71694          p.domain = DomainAMDSpecific
 71695          p.add(0, func(m *_Encoding, v []interface{}) {
 71696              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 71697              m.emit(0xc3)
 71698              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 71699          })
 71700      }
 71701      if p.len == 0 {
 71702          panic("invalid operands for VPHADDBQ")
 71703      }
 71704      return p
 71705  }
 71706  
 71707  // VPHADDBW performs "Packed Horizontal Add Signed Byte to Signed Word".
 71708  //
 71709  // Mnemonic        : VPHADDBW
 71710  // Supported forms : (2 forms)
 71711  //
 71712  //    * VPHADDBW xmm, xmm     [XOP]
 71713  //    * VPHADDBW m128, xmm    [XOP]
 71714  //
 71715  func (self *Program) VPHADDBW(v0 interface{}, v1 interface{}) *Instruction {
 71716      p := self.alloc("VPHADDBW", 2, Operands { v0, v1 })
 71717      // VPHADDBW xmm, xmm
 71718      if isXMM(v0) && isXMM(v1) {
 71719          self.require(ISA_XOP)
 71720          p.domain = DomainAMDSpecific
 71721          p.add(0, func(m *_Encoding, v []interface{}) {
 71722              m.emit(0x8f)
 71723              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 71724              m.emit(0x78)
 71725              m.emit(0xc1)
 71726              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 71727          })
 71728      }
 71729      // VPHADDBW m128, xmm
 71730      if isM128(v0) && isXMM(v1) {
 71731          self.require(ISA_XOP)
 71732          p.domain = DomainAMDSpecific
 71733          p.add(0, func(m *_Encoding, v []interface{}) {
 71734              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 71735              m.emit(0xc1)
 71736              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 71737          })
 71738      }
 71739      if p.len == 0 {
 71740          panic("invalid operands for VPHADDBW")
 71741      }
 71742      return p
 71743  }
 71744  
 71745  // VPHADDD performs "Packed Horizontal Add Doubleword Integer".
 71746  //
 71747  // Mnemonic        : VPHADDD
 71748  // Supported forms : (4 forms)
 71749  //
 71750  //    * VPHADDD xmm, xmm, xmm     [AVX]
 71751  //    * VPHADDD m128, xmm, xmm    [AVX]
 71752  //    * VPHADDD ymm, ymm, ymm     [AVX2]
 71753  //    * VPHADDD m256, ymm, ymm    [AVX2]
 71754  //
 71755  func (self *Program) VPHADDD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 71756      p := self.alloc("VPHADDD", 3, Operands { v0, v1, v2 })
 71757      // VPHADDD xmm, xmm, xmm
 71758      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 71759          self.require(ISA_AVX)
 71760          p.domain = DomainAVX
 71761          p.add(0, func(m *_Encoding, v []interface{}) {
 71762              m.emit(0xc4)
 71763              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 71764              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 71765              m.emit(0x02)
 71766              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 71767          })
 71768      }
 71769      // VPHADDD m128, xmm, xmm
 71770      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 71771          self.require(ISA_AVX)
 71772          p.domain = DomainAVX
 71773          p.add(0, func(m *_Encoding, v []interface{}) {
 71774              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 71775              m.emit(0x02)
 71776              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 71777          })
 71778      }
 71779      // VPHADDD ymm, ymm, ymm
 71780      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 71781          self.require(ISA_AVX2)
 71782          p.domain = DomainAVX
 71783          p.add(0, func(m *_Encoding, v []interface{}) {
 71784              m.emit(0xc4)
 71785              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 71786              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 71787              m.emit(0x02)
 71788              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 71789          })
 71790      }
 71791      // VPHADDD m256, ymm, ymm
 71792      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 71793          self.require(ISA_AVX2)
 71794          p.domain = DomainAVX
 71795          p.add(0, func(m *_Encoding, v []interface{}) {
 71796              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 71797              m.emit(0x02)
 71798              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 71799          })
 71800      }
 71801      if p.len == 0 {
 71802          panic("invalid operands for VPHADDD")
 71803      }
 71804      return p
 71805  }
 71806  
 71807  // VPHADDDQ performs "Packed Horizontal Add Signed Doubleword to Signed Quadword".
 71808  //
 71809  // Mnemonic        : VPHADDDQ
 71810  // Supported forms : (2 forms)
 71811  //
 71812  //    * VPHADDDQ xmm, xmm     [XOP]
 71813  //    * VPHADDDQ m128, xmm    [XOP]
 71814  //
 71815  func (self *Program) VPHADDDQ(v0 interface{}, v1 interface{}) *Instruction {
 71816      p := self.alloc("VPHADDDQ", 2, Operands { v0, v1 })
 71817      // VPHADDDQ xmm, xmm
 71818      if isXMM(v0) && isXMM(v1) {
 71819          self.require(ISA_XOP)
 71820          p.domain = DomainAMDSpecific
 71821          p.add(0, func(m *_Encoding, v []interface{}) {
 71822              m.emit(0x8f)
 71823              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 71824              m.emit(0x78)
 71825              m.emit(0xcb)
 71826              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 71827          })
 71828      }
 71829      // VPHADDDQ m128, xmm
 71830      if isM128(v0) && isXMM(v1) {
 71831          self.require(ISA_XOP)
 71832          p.domain = DomainAMDSpecific
 71833          p.add(0, func(m *_Encoding, v []interface{}) {
 71834              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 71835              m.emit(0xcb)
 71836              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 71837          })
 71838      }
 71839      if p.len == 0 {
 71840          panic("invalid operands for VPHADDDQ")
 71841      }
 71842      return p
 71843  }
 71844  
 71845  // VPHADDSW performs "Packed Horizontal Add Signed Word Integers with Signed Saturation".
 71846  //
 71847  // Mnemonic        : VPHADDSW
 71848  // Supported forms : (4 forms)
 71849  //
 71850  //    * VPHADDSW xmm, xmm, xmm     [AVX]
 71851  //    * VPHADDSW m128, xmm, xmm    [AVX]
 71852  //    * VPHADDSW ymm, ymm, ymm     [AVX2]
 71853  //    * VPHADDSW m256, ymm, ymm    [AVX2]
 71854  //
 71855  func (self *Program) VPHADDSW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 71856      p := self.alloc("VPHADDSW", 3, Operands { v0, v1, v2 })
 71857      // VPHADDSW xmm, xmm, xmm
 71858      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 71859          self.require(ISA_AVX)
 71860          p.domain = DomainAVX
 71861          p.add(0, func(m *_Encoding, v []interface{}) {
 71862              m.emit(0xc4)
 71863              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 71864              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 71865              m.emit(0x03)
 71866              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 71867          })
 71868      }
 71869      // VPHADDSW m128, xmm, xmm
 71870      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 71871          self.require(ISA_AVX)
 71872          p.domain = DomainAVX
 71873          p.add(0, func(m *_Encoding, v []interface{}) {
 71874              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 71875              m.emit(0x03)
 71876              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 71877          })
 71878      }
 71879      // VPHADDSW ymm, ymm, ymm
 71880      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 71881          self.require(ISA_AVX2)
 71882          p.domain = DomainAVX
 71883          p.add(0, func(m *_Encoding, v []interface{}) {
 71884              m.emit(0xc4)
 71885              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 71886              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 71887              m.emit(0x03)
 71888              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 71889          })
 71890      }
 71891      // VPHADDSW m256, ymm, ymm
 71892      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 71893          self.require(ISA_AVX2)
 71894          p.domain = DomainAVX
 71895          p.add(0, func(m *_Encoding, v []interface{}) {
 71896              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 71897              m.emit(0x03)
 71898              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 71899          })
 71900      }
 71901      if p.len == 0 {
 71902          panic("invalid operands for VPHADDSW")
 71903      }
 71904      return p
 71905  }
 71906  
 71907  // VPHADDUBD performs "Packed Horizontal Add Unsigned Byte to Doubleword".
 71908  //
 71909  // Mnemonic        : VPHADDUBD
 71910  // Supported forms : (2 forms)
 71911  //
 71912  //    * VPHADDUBD xmm, xmm     [XOP]
 71913  //    * VPHADDUBD m128, xmm    [XOP]
 71914  //
 71915  func (self *Program) VPHADDUBD(v0 interface{}, v1 interface{}) *Instruction {
 71916      p := self.alloc("VPHADDUBD", 2, Operands { v0, v1 })
 71917      // VPHADDUBD xmm, xmm
 71918      if isXMM(v0) && isXMM(v1) {
 71919          self.require(ISA_XOP)
 71920          p.domain = DomainAMDSpecific
 71921          p.add(0, func(m *_Encoding, v []interface{}) {
 71922              m.emit(0x8f)
 71923              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 71924              m.emit(0x78)
 71925              m.emit(0xd2)
 71926              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 71927          })
 71928      }
 71929      // VPHADDUBD m128, xmm
 71930      if isM128(v0) && isXMM(v1) {
 71931          self.require(ISA_XOP)
 71932          p.domain = DomainAMDSpecific
 71933          p.add(0, func(m *_Encoding, v []interface{}) {
 71934              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 71935              m.emit(0xd2)
 71936              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 71937          })
 71938      }
 71939      if p.len == 0 {
 71940          panic("invalid operands for VPHADDUBD")
 71941      }
 71942      return p
 71943  }
 71944  
 71945  // VPHADDUBQ performs "Packed Horizontal Add Unsigned Byte to Quadword".
 71946  //
 71947  // Mnemonic        : VPHADDUBQ
 71948  // Supported forms : (2 forms)
 71949  //
 71950  //    * VPHADDUBQ xmm, xmm     [XOP]
 71951  //    * VPHADDUBQ m128, xmm    [XOP]
 71952  //
 71953  func (self *Program) VPHADDUBQ(v0 interface{}, v1 interface{}) *Instruction {
 71954      p := self.alloc("VPHADDUBQ", 2, Operands { v0, v1 })
 71955      // VPHADDUBQ xmm, xmm
 71956      if isXMM(v0) && isXMM(v1) {
 71957          self.require(ISA_XOP)
 71958          p.domain = DomainAMDSpecific
 71959          p.add(0, func(m *_Encoding, v []interface{}) {
 71960              m.emit(0x8f)
 71961              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 71962              m.emit(0x78)
 71963              m.emit(0xd3)
 71964              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 71965          })
 71966      }
 71967      // VPHADDUBQ m128, xmm
 71968      if isM128(v0) && isXMM(v1) {
 71969          self.require(ISA_XOP)
 71970          p.domain = DomainAMDSpecific
 71971          p.add(0, func(m *_Encoding, v []interface{}) {
 71972              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 71973              m.emit(0xd3)
 71974              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 71975          })
 71976      }
 71977      if p.len == 0 {
 71978          panic("invalid operands for VPHADDUBQ")
 71979      }
 71980      return p
 71981  }
 71982  
 71983  // VPHADDUBW performs "Packed Horizontal Add Unsigned Byte to Word".
 71984  //
 71985  // Mnemonic        : VPHADDUBW
 71986  // Supported forms : (2 forms)
 71987  //
 71988  //    * VPHADDUBW xmm, xmm     [XOP]
 71989  //    * VPHADDUBW m128, xmm    [XOP]
 71990  //
 71991  func (self *Program) VPHADDUBW(v0 interface{}, v1 interface{}) *Instruction {
 71992      p := self.alloc("VPHADDUBW", 2, Operands { v0, v1 })
 71993      // VPHADDUBW xmm, xmm
 71994      if isXMM(v0) && isXMM(v1) {
 71995          self.require(ISA_XOP)
 71996          p.domain = DomainAMDSpecific
 71997          p.add(0, func(m *_Encoding, v []interface{}) {
 71998              m.emit(0x8f)
 71999              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 72000              m.emit(0x78)
 72001              m.emit(0xd1)
 72002              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72003          })
 72004      }
 72005      // VPHADDUBW m128, xmm
 72006      if isM128(v0) && isXMM(v1) {
 72007          self.require(ISA_XOP)
 72008          p.domain = DomainAMDSpecific
 72009          p.add(0, func(m *_Encoding, v []interface{}) {
 72010              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 72011              m.emit(0xd1)
 72012              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 72013          })
 72014      }
 72015      if p.len == 0 {
 72016          panic("invalid operands for VPHADDUBW")
 72017      }
 72018      return p
 72019  }
 72020  
 72021  // VPHADDUDQ performs "Packed Horizontal Add Unsigned Doubleword to Quadword".
 72022  //
 72023  // Mnemonic        : VPHADDUDQ
 72024  // Supported forms : (2 forms)
 72025  //
 72026  //    * VPHADDUDQ xmm, xmm     [XOP]
 72027  //    * VPHADDUDQ m128, xmm    [XOP]
 72028  //
 72029  func (self *Program) VPHADDUDQ(v0 interface{}, v1 interface{}) *Instruction {
 72030      p := self.alloc("VPHADDUDQ", 2, Operands { v0, v1 })
 72031      // VPHADDUDQ xmm, xmm
 72032      if isXMM(v0) && isXMM(v1) {
 72033          self.require(ISA_XOP)
 72034          p.domain = DomainAMDSpecific
 72035          p.add(0, func(m *_Encoding, v []interface{}) {
 72036              m.emit(0x8f)
 72037              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 72038              m.emit(0x78)
 72039              m.emit(0xdb)
 72040              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72041          })
 72042      }
 72043      // VPHADDUDQ m128, xmm
 72044      if isM128(v0) && isXMM(v1) {
 72045          self.require(ISA_XOP)
 72046          p.domain = DomainAMDSpecific
 72047          p.add(0, func(m *_Encoding, v []interface{}) {
 72048              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 72049              m.emit(0xdb)
 72050              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 72051          })
 72052      }
 72053      if p.len == 0 {
 72054          panic("invalid operands for VPHADDUDQ")
 72055      }
 72056      return p
 72057  }
 72058  
 72059  // VPHADDUWD performs "Packed Horizontal Add Unsigned Word to Doubleword".
 72060  //
 72061  // Mnemonic        : VPHADDUWD
 72062  // Supported forms : (2 forms)
 72063  //
 72064  //    * VPHADDUWD xmm, xmm     [XOP]
 72065  //    * VPHADDUWD m128, xmm    [XOP]
 72066  //
 72067  func (self *Program) VPHADDUWD(v0 interface{}, v1 interface{}) *Instruction {
 72068      p := self.alloc("VPHADDUWD", 2, Operands { v0, v1 })
 72069      // VPHADDUWD xmm, xmm
 72070      if isXMM(v0) && isXMM(v1) {
 72071          self.require(ISA_XOP)
 72072          p.domain = DomainAMDSpecific
 72073          p.add(0, func(m *_Encoding, v []interface{}) {
 72074              m.emit(0x8f)
 72075              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 72076              m.emit(0x78)
 72077              m.emit(0xd6)
 72078              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72079          })
 72080      }
 72081      // VPHADDUWD m128, xmm
 72082      if isM128(v0) && isXMM(v1) {
 72083          self.require(ISA_XOP)
 72084          p.domain = DomainAMDSpecific
 72085          p.add(0, func(m *_Encoding, v []interface{}) {
 72086              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 72087              m.emit(0xd6)
 72088              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 72089          })
 72090      }
 72091      if p.len == 0 {
 72092          panic("invalid operands for VPHADDUWD")
 72093      }
 72094      return p
 72095  }
 72096  
 72097  // VPHADDUWQ performs "Packed Horizontal Add Unsigned Word to Quadword".
 72098  //
 72099  // Mnemonic        : VPHADDUWQ
 72100  // Supported forms : (2 forms)
 72101  //
 72102  //    * VPHADDUWQ xmm, xmm     [XOP]
 72103  //    * VPHADDUWQ m128, xmm    [XOP]
 72104  //
 72105  func (self *Program) VPHADDUWQ(v0 interface{}, v1 interface{}) *Instruction {
 72106      p := self.alloc("VPHADDUWQ", 2, Operands { v0, v1 })
 72107      // VPHADDUWQ xmm, xmm
 72108      if isXMM(v0) && isXMM(v1) {
 72109          self.require(ISA_XOP)
 72110          p.domain = DomainAMDSpecific
 72111          p.add(0, func(m *_Encoding, v []interface{}) {
 72112              m.emit(0x8f)
 72113              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 72114              m.emit(0x78)
 72115              m.emit(0xd7)
 72116              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72117          })
 72118      }
 72119      // VPHADDUWQ m128, xmm
 72120      if isM128(v0) && isXMM(v1) {
 72121          self.require(ISA_XOP)
 72122          p.domain = DomainAMDSpecific
 72123          p.add(0, func(m *_Encoding, v []interface{}) {
 72124              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 72125              m.emit(0xd7)
 72126              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 72127          })
 72128      }
 72129      if p.len == 0 {
 72130          panic("invalid operands for VPHADDUWQ")
 72131      }
 72132      return p
 72133  }
 72134  
 72135  // VPHADDW performs "Packed Horizontal Add Word Integers".
 72136  //
 72137  // Mnemonic        : VPHADDW
 72138  // Supported forms : (4 forms)
 72139  //
 72140  //    * VPHADDW xmm, xmm, xmm     [AVX]
 72141  //    * VPHADDW m128, xmm, xmm    [AVX]
 72142  //    * VPHADDW ymm, ymm, ymm     [AVX2]
 72143  //    * VPHADDW m256, ymm, ymm    [AVX2]
 72144  //
 72145  func (self *Program) VPHADDW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 72146      p := self.alloc("VPHADDW", 3, Operands { v0, v1, v2 })
 72147      // VPHADDW xmm, xmm, xmm
 72148      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 72149          self.require(ISA_AVX)
 72150          p.domain = DomainAVX
 72151          p.add(0, func(m *_Encoding, v []interface{}) {
 72152              m.emit(0xc4)
 72153              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 72154              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 72155              m.emit(0x01)
 72156              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 72157          })
 72158      }
 72159      // VPHADDW m128, xmm, xmm
 72160      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 72161          self.require(ISA_AVX)
 72162          p.domain = DomainAVX
 72163          p.add(0, func(m *_Encoding, v []interface{}) {
 72164              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 72165              m.emit(0x01)
 72166              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 72167          })
 72168      }
 72169      // VPHADDW ymm, ymm, ymm
 72170      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 72171          self.require(ISA_AVX2)
 72172          p.domain = DomainAVX
 72173          p.add(0, func(m *_Encoding, v []interface{}) {
 72174              m.emit(0xc4)
 72175              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 72176              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 72177              m.emit(0x01)
 72178              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 72179          })
 72180      }
 72181      // VPHADDW m256, ymm, ymm
 72182      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 72183          self.require(ISA_AVX2)
 72184          p.domain = DomainAVX
 72185          p.add(0, func(m *_Encoding, v []interface{}) {
 72186              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 72187              m.emit(0x01)
 72188              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 72189          })
 72190      }
 72191      if p.len == 0 {
 72192          panic("invalid operands for VPHADDW")
 72193      }
 72194      return p
 72195  }
 72196  
 72197  // VPHADDWD performs "Packed Horizontal Add Signed Word to Signed Doubleword".
 72198  //
 72199  // Mnemonic        : VPHADDWD
 72200  // Supported forms : (2 forms)
 72201  //
 72202  //    * VPHADDWD xmm, xmm     [XOP]
 72203  //    * VPHADDWD m128, xmm    [XOP]
 72204  //
 72205  func (self *Program) VPHADDWD(v0 interface{}, v1 interface{}) *Instruction {
 72206      p := self.alloc("VPHADDWD", 2, Operands { v0, v1 })
 72207      // VPHADDWD xmm, xmm
 72208      if isXMM(v0) && isXMM(v1) {
 72209          self.require(ISA_XOP)
 72210          p.domain = DomainAMDSpecific
 72211          p.add(0, func(m *_Encoding, v []interface{}) {
 72212              m.emit(0x8f)
 72213              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 72214              m.emit(0x78)
 72215              m.emit(0xc6)
 72216              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72217          })
 72218      }
 72219      // VPHADDWD m128, xmm
 72220      if isM128(v0) && isXMM(v1) {
 72221          self.require(ISA_XOP)
 72222          p.domain = DomainAMDSpecific
 72223          p.add(0, func(m *_Encoding, v []interface{}) {
 72224              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 72225              m.emit(0xc6)
 72226              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 72227          })
 72228      }
 72229      if p.len == 0 {
 72230          panic("invalid operands for VPHADDWD")
 72231      }
 72232      return p
 72233  }
 72234  
 72235  // VPHADDWQ performs "Packed Horizontal Add Signed Word to Signed Quadword".
 72236  //
 72237  // Mnemonic        : VPHADDWQ
 72238  // Supported forms : (2 forms)
 72239  //
 72240  //    * VPHADDWQ xmm, xmm     [XOP]
 72241  //    * VPHADDWQ m128, xmm    [XOP]
 72242  //
 72243  func (self *Program) VPHADDWQ(v0 interface{}, v1 interface{}) *Instruction {
 72244      p := self.alloc("VPHADDWQ", 2, Operands { v0, v1 })
 72245      // VPHADDWQ xmm, xmm
 72246      if isXMM(v0) && isXMM(v1) {
 72247          self.require(ISA_XOP)
 72248          p.domain = DomainAMDSpecific
 72249          p.add(0, func(m *_Encoding, v []interface{}) {
 72250              m.emit(0x8f)
 72251              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 72252              m.emit(0x78)
 72253              m.emit(0xc7)
 72254              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72255          })
 72256      }
 72257      // VPHADDWQ m128, xmm
 72258      if isM128(v0) && isXMM(v1) {
 72259          self.require(ISA_XOP)
 72260          p.domain = DomainAMDSpecific
 72261          p.add(0, func(m *_Encoding, v []interface{}) {
 72262              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 72263              m.emit(0xc7)
 72264              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 72265          })
 72266      }
 72267      if p.len == 0 {
 72268          panic("invalid operands for VPHADDWQ")
 72269      }
 72270      return p
 72271  }
 72272  
 72273  // VPHMINPOSUW performs "Packed Horizontal Minimum of Unsigned Word Integers".
 72274  //
 72275  // Mnemonic        : VPHMINPOSUW
 72276  // Supported forms : (2 forms)
 72277  //
 72278  //    * VPHMINPOSUW xmm, xmm     [AVX]
 72279  //    * VPHMINPOSUW m128, xmm    [AVX]
 72280  //
 72281  func (self *Program) VPHMINPOSUW(v0 interface{}, v1 interface{}) *Instruction {
 72282      p := self.alloc("VPHMINPOSUW", 2, Operands { v0, v1 })
 72283      // VPHMINPOSUW xmm, xmm
 72284      if isXMM(v0) && isXMM(v1) {
 72285          self.require(ISA_AVX)
 72286          p.domain = DomainAVX
 72287          p.add(0, func(m *_Encoding, v []interface{}) {
 72288              m.emit(0xc4)
 72289              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 72290              m.emit(0x79)
 72291              m.emit(0x41)
 72292              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72293          })
 72294      }
 72295      // VPHMINPOSUW m128, xmm
 72296      if isM128(v0) && isXMM(v1) {
 72297          self.require(ISA_AVX)
 72298          p.domain = DomainAVX
 72299          p.add(0, func(m *_Encoding, v []interface{}) {
 72300              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 72301              m.emit(0x41)
 72302              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 72303          })
 72304      }
 72305      if p.len == 0 {
 72306          panic("invalid operands for VPHMINPOSUW")
 72307      }
 72308      return p
 72309  }
 72310  
 72311  // VPHSUBBW performs "Packed Horizontal Subtract Signed Byte to Signed Word".
 72312  //
 72313  // Mnemonic        : VPHSUBBW
 72314  // Supported forms : (2 forms)
 72315  //
 72316  //    * VPHSUBBW xmm, xmm     [XOP]
 72317  //    * VPHSUBBW m128, xmm    [XOP]
 72318  //
 72319  func (self *Program) VPHSUBBW(v0 interface{}, v1 interface{}) *Instruction {
 72320      p := self.alloc("VPHSUBBW", 2, Operands { v0, v1 })
 72321      // VPHSUBBW xmm, xmm
 72322      if isXMM(v0) && isXMM(v1) {
 72323          self.require(ISA_XOP)
 72324          p.domain = DomainAMDSpecific
 72325          p.add(0, func(m *_Encoding, v []interface{}) {
 72326              m.emit(0x8f)
 72327              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 72328              m.emit(0x78)
 72329              m.emit(0xe1)
 72330              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72331          })
 72332      }
 72333      // VPHSUBBW m128, xmm
 72334      if isM128(v0) && isXMM(v1) {
 72335          self.require(ISA_XOP)
 72336          p.domain = DomainAMDSpecific
 72337          p.add(0, func(m *_Encoding, v []interface{}) {
 72338              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 72339              m.emit(0xe1)
 72340              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 72341          })
 72342      }
 72343      if p.len == 0 {
 72344          panic("invalid operands for VPHSUBBW")
 72345      }
 72346      return p
 72347  }
 72348  
 72349  // VPHSUBD performs "Packed Horizontal Subtract Doubleword Integers".
 72350  //
 72351  // Mnemonic        : VPHSUBD
 72352  // Supported forms : (4 forms)
 72353  //
 72354  //    * VPHSUBD xmm, xmm, xmm     [AVX]
 72355  //    * VPHSUBD m128, xmm, xmm    [AVX]
 72356  //    * VPHSUBD ymm, ymm, ymm     [AVX2]
 72357  //    * VPHSUBD m256, ymm, ymm    [AVX2]
 72358  //
 72359  func (self *Program) VPHSUBD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 72360      p := self.alloc("VPHSUBD", 3, Operands { v0, v1, v2 })
 72361      // VPHSUBD xmm, xmm, xmm
 72362      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 72363          self.require(ISA_AVX)
 72364          p.domain = DomainAVX
 72365          p.add(0, func(m *_Encoding, v []interface{}) {
 72366              m.emit(0xc4)
 72367              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 72368              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 72369              m.emit(0x06)
 72370              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 72371          })
 72372      }
 72373      // VPHSUBD m128, xmm, xmm
 72374      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 72375          self.require(ISA_AVX)
 72376          p.domain = DomainAVX
 72377          p.add(0, func(m *_Encoding, v []interface{}) {
 72378              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 72379              m.emit(0x06)
 72380              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 72381          })
 72382      }
 72383      // VPHSUBD ymm, ymm, ymm
 72384      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 72385          self.require(ISA_AVX2)
 72386          p.domain = DomainAVX
 72387          p.add(0, func(m *_Encoding, v []interface{}) {
 72388              m.emit(0xc4)
 72389              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 72390              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 72391              m.emit(0x06)
 72392              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 72393          })
 72394      }
 72395      // VPHSUBD m256, ymm, ymm
 72396      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 72397          self.require(ISA_AVX2)
 72398          p.domain = DomainAVX
 72399          p.add(0, func(m *_Encoding, v []interface{}) {
 72400              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 72401              m.emit(0x06)
 72402              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 72403          })
 72404      }
 72405      if p.len == 0 {
 72406          panic("invalid operands for VPHSUBD")
 72407      }
 72408      return p
 72409  }
 72410  
 72411  // VPHSUBDQ performs "Packed Horizontal Subtract Signed Doubleword to Signed Quadword".
 72412  //
 72413  // Mnemonic        : VPHSUBDQ
 72414  // Supported forms : (2 forms)
 72415  //
 72416  //    * VPHSUBDQ xmm, xmm     [XOP]
 72417  //    * VPHSUBDQ m128, xmm    [XOP]
 72418  //
 72419  func (self *Program) VPHSUBDQ(v0 interface{}, v1 interface{}) *Instruction {
 72420      p := self.alloc("VPHSUBDQ", 2, Operands { v0, v1 })
 72421      // VPHSUBDQ xmm, xmm
 72422      if isXMM(v0) && isXMM(v1) {
 72423          self.require(ISA_XOP)
 72424          p.domain = DomainAMDSpecific
 72425          p.add(0, func(m *_Encoding, v []interface{}) {
 72426              m.emit(0x8f)
 72427              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 72428              m.emit(0x78)
 72429              m.emit(0xe3)
 72430              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72431          })
 72432      }
 72433      // VPHSUBDQ m128, xmm
 72434      if isM128(v0) && isXMM(v1) {
 72435          self.require(ISA_XOP)
 72436          p.domain = DomainAMDSpecific
 72437          p.add(0, func(m *_Encoding, v []interface{}) {
 72438              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 72439              m.emit(0xe3)
 72440              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 72441          })
 72442      }
 72443      if p.len == 0 {
 72444          panic("invalid operands for VPHSUBDQ")
 72445      }
 72446      return p
 72447  }
 72448  
 72449  // VPHSUBSW performs "Packed Horizontal Subtract Signed Word Integers with Signed Saturation".
 72450  //
 72451  // Mnemonic        : VPHSUBSW
 72452  // Supported forms : (4 forms)
 72453  //
 72454  //    * VPHSUBSW xmm, xmm, xmm     [AVX]
 72455  //    * VPHSUBSW m128, xmm, xmm    [AVX]
 72456  //    * VPHSUBSW ymm, ymm, ymm     [AVX2]
 72457  //    * VPHSUBSW m256, ymm, ymm    [AVX2]
 72458  //
 72459  func (self *Program) VPHSUBSW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 72460      p := self.alloc("VPHSUBSW", 3, Operands { v0, v1, v2 })
 72461      // VPHSUBSW xmm, xmm, xmm
 72462      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 72463          self.require(ISA_AVX)
 72464          p.domain = DomainAVX
 72465          p.add(0, func(m *_Encoding, v []interface{}) {
 72466              m.emit(0xc4)
 72467              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 72468              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 72469              m.emit(0x07)
 72470              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 72471          })
 72472      }
 72473      // VPHSUBSW m128, xmm, xmm
 72474      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 72475          self.require(ISA_AVX)
 72476          p.domain = DomainAVX
 72477          p.add(0, func(m *_Encoding, v []interface{}) {
 72478              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 72479              m.emit(0x07)
 72480              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 72481          })
 72482      }
 72483      // VPHSUBSW ymm, ymm, ymm
 72484      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 72485          self.require(ISA_AVX2)
 72486          p.domain = DomainAVX
 72487          p.add(0, func(m *_Encoding, v []interface{}) {
 72488              m.emit(0xc4)
 72489              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 72490              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 72491              m.emit(0x07)
 72492              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 72493          })
 72494      }
 72495      // VPHSUBSW m256, ymm, ymm
 72496      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 72497          self.require(ISA_AVX2)
 72498          p.domain = DomainAVX
 72499          p.add(0, func(m *_Encoding, v []interface{}) {
 72500              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 72501              m.emit(0x07)
 72502              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 72503          })
 72504      }
 72505      if p.len == 0 {
 72506          panic("invalid operands for VPHSUBSW")
 72507      }
 72508      return p
 72509  }
 72510  
 72511  // VPHSUBW performs "Packed Horizontal Subtract Word Integers".
 72512  //
 72513  // Mnemonic        : VPHSUBW
 72514  // Supported forms : (4 forms)
 72515  //
 72516  //    * VPHSUBW xmm, xmm, xmm     [AVX]
 72517  //    * VPHSUBW m128, xmm, xmm    [AVX]
 72518  //    * VPHSUBW ymm, ymm, ymm     [AVX2]
 72519  //    * VPHSUBW m256, ymm, ymm    [AVX2]
 72520  //
 72521  func (self *Program) VPHSUBW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 72522      p := self.alloc("VPHSUBW", 3, Operands { v0, v1, v2 })
 72523      // VPHSUBW xmm, xmm, xmm
 72524      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 72525          self.require(ISA_AVX)
 72526          p.domain = DomainAVX
 72527          p.add(0, func(m *_Encoding, v []interface{}) {
 72528              m.emit(0xc4)
 72529              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 72530              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 72531              m.emit(0x05)
 72532              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 72533          })
 72534      }
 72535      // VPHSUBW m128, xmm, xmm
 72536      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 72537          self.require(ISA_AVX)
 72538          p.domain = DomainAVX
 72539          p.add(0, func(m *_Encoding, v []interface{}) {
 72540              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 72541              m.emit(0x05)
 72542              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 72543          })
 72544      }
 72545      // VPHSUBW ymm, ymm, ymm
 72546      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 72547          self.require(ISA_AVX2)
 72548          p.domain = DomainAVX
 72549          p.add(0, func(m *_Encoding, v []interface{}) {
 72550              m.emit(0xc4)
 72551              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 72552              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 72553              m.emit(0x05)
 72554              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 72555          })
 72556      }
 72557      // VPHSUBW m256, ymm, ymm
 72558      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 72559          self.require(ISA_AVX2)
 72560          p.domain = DomainAVX
 72561          p.add(0, func(m *_Encoding, v []interface{}) {
 72562              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 72563              m.emit(0x05)
 72564              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 72565          })
 72566      }
 72567      if p.len == 0 {
 72568          panic("invalid operands for VPHSUBW")
 72569      }
 72570      return p
 72571  }
 72572  
 72573  // VPHSUBWD performs "Packed Horizontal Subtract Signed Word to Signed Doubleword".
 72574  //
 72575  // Mnemonic        : VPHSUBWD
 72576  // Supported forms : (2 forms)
 72577  //
 72578  //    * VPHSUBWD xmm, xmm     [XOP]
 72579  //    * VPHSUBWD m128, xmm    [XOP]
 72580  //
 72581  func (self *Program) VPHSUBWD(v0 interface{}, v1 interface{}) *Instruction {
 72582      p := self.alloc("VPHSUBWD", 2, Operands { v0, v1 })
 72583      // VPHSUBWD xmm, xmm
 72584      if isXMM(v0) && isXMM(v1) {
 72585          self.require(ISA_XOP)
 72586          p.domain = DomainAMDSpecific
 72587          p.add(0, func(m *_Encoding, v []interface{}) {
 72588              m.emit(0x8f)
 72589              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 72590              m.emit(0x78)
 72591              m.emit(0xe2)
 72592              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72593          })
 72594      }
 72595      // VPHSUBWD m128, xmm
 72596      if isM128(v0) && isXMM(v1) {
 72597          self.require(ISA_XOP)
 72598          p.domain = DomainAMDSpecific
 72599          p.add(0, func(m *_Encoding, v []interface{}) {
 72600              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 72601              m.emit(0xe2)
 72602              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 72603          })
 72604      }
 72605      if p.len == 0 {
 72606          panic("invalid operands for VPHSUBWD")
 72607      }
 72608      return p
 72609  }
 72610  
 72611  // VPINSRB performs "Insert Byte".
 72612  //
 72613  // Mnemonic        : VPINSRB
 72614  // Supported forms : (4 forms)
 72615  //
 72616  //    * VPINSRB imm8, r32, xmm, xmm    [AVX]
 72617  //    * VPINSRB imm8, m8, xmm, xmm     [AVX]
 72618  //    * VPINSRB imm8, r32, xmm, xmm    [AVX512BW]
 72619  //    * VPINSRB imm8, m8, xmm, xmm     [AVX512BW]
 72620  //
 72621  func (self *Program) VPINSRB(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 72622      p := self.alloc("VPINSRB", 4, Operands { v0, v1, v2, v3 })
 72623      // VPINSRB imm8, r32, xmm, xmm
 72624      if isImm8(v0) && isReg32(v1) && isXMM(v2) && isXMM(v3) {
 72625          self.require(ISA_AVX)
 72626          p.domain = DomainAVX
 72627          p.add(0, func(m *_Encoding, v []interface{}) {
 72628              m.emit(0xc4)
 72629              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 72630              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 72631              m.emit(0x20)
 72632              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 72633              m.imm1(toImmAny(v[0]))
 72634          })
 72635      }
 72636      // VPINSRB imm8, m8, xmm, xmm
 72637      if isImm8(v0) && isM8(v1) && isXMM(v2) && isXMM(v3) {
 72638          self.require(ISA_AVX)
 72639          p.domain = DomainAVX
 72640          p.add(0, func(m *_Encoding, v []interface{}) {
 72641              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 72642              m.emit(0x20)
 72643              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 72644              m.imm1(toImmAny(v[0]))
 72645          })
 72646      }
 72647      // VPINSRB imm8, r32, xmm, xmm
 72648      if isImm8(v0) && isReg32(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) {
 72649          self.require(ISA_AVX512BW)
 72650          p.domain = DomainAVX
 72651          p.add(0, func(m *_Encoding, v []interface{}) {
 72652              m.emit(0x62)
 72653              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 72654              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 72655              m.emit((0x08 ^ (ecode(v[2]) << 3)) | 0x00)
 72656              m.emit(0x20)
 72657              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 72658              m.imm1(toImmAny(v[0]))
 72659          })
 72660      }
 72661      // VPINSRB imm8, m8, xmm, xmm
 72662      if isImm8(v0) && isM8(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) {
 72663          self.require(ISA_AVX512BW)
 72664          p.domain = DomainAVX
 72665          p.add(0, func(m *_Encoding, v []interface{}) {
 72666              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), 0, 0, 0)
 72667              m.emit(0x20)
 72668              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 72669              m.imm1(toImmAny(v[0]))
 72670          })
 72671      }
 72672      if p.len == 0 {
 72673          panic("invalid operands for VPINSRB")
 72674      }
 72675      return p
 72676  }
 72677  
 72678  // VPINSRD performs "Insert Doubleword".
 72679  //
 72680  // Mnemonic        : VPINSRD
 72681  // Supported forms : (4 forms)
 72682  //
 72683  //    * VPINSRD imm8, r32, xmm, xmm    [AVX]
 72684  //    * VPINSRD imm8, m32, xmm, xmm    [AVX]
 72685  //    * VPINSRD imm8, r32, xmm, xmm    [AVX512DQ]
 72686  //    * VPINSRD imm8, m32, xmm, xmm    [AVX512DQ]
 72687  //
 72688  func (self *Program) VPINSRD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 72689      p := self.alloc("VPINSRD", 4, Operands { v0, v1, v2, v3 })
 72690      // VPINSRD imm8, r32, xmm, xmm
 72691      if isImm8(v0) && isReg32(v1) && isXMM(v2) && isXMM(v3) {
 72692          self.require(ISA_AVX)
 72693          p.domain = DomainAVX
 72694          p.add(0, func(m *_Encoding, v []interface{}) {
 72695              m.emit(0xc4)
 72696              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 72697              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 72698              m.emit(0x22)
 72699              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 72700              m.imm1(toImmAny(v[0]))
 72701          })
 72702      }
 72703      // VPINSRD imm8, m32, xmm, xmm
 72704      if isImm8(v0) && isM32(v1) && isXMM(v2) && isXMM(v3) {
 72705          self.require(ISA_AVX)
 72706          p.domain = DomainAVX
 72707          p.add(0, func(m *_Encoding, v []interface{}) {
 72708              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 72709              m.emit(0x22)
 72710              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 72711              m.imm1(toImmAny(v[0]))
 72712          })
 72713      }
 72714      // VPINSRD imm8, r32, xmm, xmm
 72715      if isImm8(v0) && isReg32(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) {
 72716          self.require(ISA_AVX512DQ)
 72717          p.domain = DomainAVX
 72718          p.add(0, func(m *_Encoding, v []interface{}) {
 72719              m.emit(0x62)
 72720              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 72721              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 72722              m.emit((0x08 ^ (ecode(v[2]) << 3)) | 0x00)
 72723              m.emit(0x22)
 72724              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 72725              m.imm1(toImmAny(v[0]))
 72726          })
 72727      }
 72728      // VPINSRD imm8, m32, xmm, xmm
 72729      if isImm8(v0) && isM32(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) {
 72730          self.require(ISA_AVX512DQ)
 72731          p.domain = DomainAVX
 72732          p.add(0, func(m *_Encoding, v []interface{}) {
 72733              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), 0, 0, 0)
 72734              m.emit(0x22)
 72735              m.mrsd(lcode(v[3]), addr(v[1]), 4)
 72736              m.imm1(toImmAny(v[0]))
 72737          })
 72738      }
 72739      if p.len == 0 {
 72740          panic("invalid operands for VPINSRD")
 72741      }
 72742      return p
 72743  }
 72744  
 72745  // VPINSRQ performs "Insert Quadword".
 72746  //
 72747  // Mnemonic        : VPINSRQ
 72748  // Supported forms : (4 forms)
 72749  //
 72750  //    * VPINSRQ imm8, r64, xmm, xmm    [AVX]
 72751  //    * VPINSRQ imm8, m64, xmm, xmm    [AVX]
 72752  //    * VPINSRQ imm8, r64, xmm, xmm    [AVX512DQ]
 72753  //    * VPINSRQ imm8, m64, xmm, xmm    [AVX512DQ]
 72754  //
 72755  func (self *Program) VPINSRQ(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 72756      p := self.alloc("VPINSRQ", 4, Operands { v0, v1, v2, v3 })
 72757      // VPINSRQ imm8, r64, xmm, xmm
 72758      if isImm8(v0) && isReg64(v1) && isXMM(v2) && isXMM(v3) {
 72759          self.require(ISA_AVX)
 72760          p.domain = DomainAVX
 72761          p.add(0, func(m *_Encoding, v []interface{}) {
 72762              m.emit(0xc4)
 72763              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 72764              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 72765              m.emit(0x22)
 72766              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 72767              m.imm1(toImmAny(v[0]))
 72768          })
 72769      }
 72770      // VPINSRQ imm8, m64, xmm, xmm
 72771      if isImm8(v0) && isM64(v1) && isXMM(v2) && isXMM(v3) {
 72772          self.require(ISA_AVX)
 72773          p.domain = DomainAVX
 72774          p.add(0, func(m *_Encoding, v []interface{}) {
 72775              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 72776              m.emit(0x22)
 72777              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 72778              m.imm1(toImmAny(v[0]))
 72779          })
 72780      }
 72781      // VPINSRQ imm8, r64, xmm, xmm
 72782      if isImm8(v0) && isReg64(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) {
 72783          self.require(ISA_AVX512DQ)
 72784          p.domain = DomainAVX
 72785          p.add(0, func(m *_Encoding, v []interface{}) {
 72786              m.emit(0x62)
 72787              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 72788              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 72789              m.emit((0x08 ^ (ecode(v[2]) << 3)) | 0x00)
 72790              m.emit(0x22)
 72791              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 72792              m.imm1(toImmAny(v[0]))
 72793          })
 72794      }
 72795      // VPINSRQ imm8, m64, xmm, xmm
 72796      if isImm8(v0) && isM64(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) {
 72797          self.require(ISA_AVX512DQ)
 72798          p.domain = DomainAVX
 72799          p.add(0, func(m *_Encoding, v []interface{}) {
 72800              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), 0, 0, 0)
 72801              m.emit(0x22)
 72802              m.mrsd(lcode(v[3]), addr(v[1]), 8)
 72803              m.imm1(toImmAny(v[0]))
 72804          })
 72805      }
 72806      if p.len == 0 {
 72807          panic("invalid operands for VPINSRQ")
 72808      }
 72809      return p
 72810  }
 72811  
 72812  // VPINSRW performs "Insert Word".
 72813  //
 72814  // Mnemonic        : VPINSRW
 72815  // Supported forms : (4 forms)
 72816  //
 72817  //    * VPINSRW imm8, r32, xmm, xmm    [AVX]
 72818  //    * VPINSRW imm8, m16, xmm, xmm    [AVX]
 72819  //    * VPINSRW imm8, r32, xmm, xmm    [AVX512BW]
 72820  //    * VPINSRW imm8, m16, xmm, xmm    [AVX512BW]
 72821  //
 72822  func (self *Program) VPINSRW(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 72823      p := self.alloc("VPINSRW", 4, Operands { v0, v1, v2, v3 })
 72824      // VPINSRW imm8, r32, xmm, xmm
 72825      if isImm8(v0) && isReg32(v1) && isXMM(v2) && isXMM(v3) {
 72826          self.require(ISA_AVX)
 72827          p.domain = DomainAVX
 72828          p.add(0, func(m *_Encoding, v []interface{}) {
 72829              m.vex2(1, hcode(v[3]), v[1], hlcode(v[2]))
 72830              m.emit(0xc4)
 72831              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 72832              m.imm1(toImmAny(v[0]))
 72833          })
 72834      }
 72835      // VPINSRW imm8, m16, xmm, xmm
 72836      if isImm8(v0) && isM16(v1) && isXMM(v2) && isXMM(v3) {
 72837          self.require(ISA_AVX)
 72838          p.domain = DomainAVX
 72839          p.add(0, func(m *_Encoding, v []interface{}) {
 72840              m.vex2(1, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 72841              m.emit(0xc4)
 72842              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 72843              m.imm1(toImmAny(v[0]))
 72844          })
 72845      }
 72846      // VPINSRW imm8, r32, xmm, xmm
 72847      if isImm8(v0) && isReg32(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) {
 72848          self.require(ISA_AVX512BW)
 72849          p.domain = DomainAVX
 72850          p.add(0, func(m *_Encoding, v []interface{}) {
 72851              m.emit(0x62)
 72852              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 72853              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 72854              m.emit((0x08 ^ (ecode(v[2]) << 3)) | 0x00)
 72855              m.emit(0xc4)
 72856              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 72857              m.imm1(toImmAny(v[0]))
 72858          })
 72859      }
 72860      // VPINSRW imm8, m16, xmm, xmm
 72861      if isImm8(v0) && isM16(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) {
 72862          self.require(ISA_AVX512BW)
 72863          p.domain = DomainAVX
 72864          p.add(0, func(m *_Encoding, v []interface{}) {
 72865              m.evex(0b01, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), 0, 0, 0)
 72866              m.emit(0xc4)
 72867              m.mrsd(lcode(v[3]), addr(v[1]), 2)
 72868              m.imm1(toImmAny(v[0]))
 72869          })
 72870      }
 72871      if p.len == 0 {
 72872          panic("invalid operands for VPINSRW")
 72873      }
 72874      return p
 72875  }
 72876  
 72877  // VPLZCNTD performs "Count the Number of Leading Zero Bits for Packed Doubleword Values".
 72878  //
 72879  // Mnemonic        : VPLZCNTD
 72880  // Supported forms : (6 forms)
 72881  //
 72882  //    * VPLZCNTD m128/m32bcst, xmm{k}{z}    [AVX512CD,AVX512VL]
 72883  //    * VPLZCNTD m256/m32bcst, ymm{k}{z}    [AVX512CD,AVX512VL]
 72884  //    * VPLZCNTD m512/m32bcst, zmm{k}{z}    [AVX512CD]
 72885  //    * VPLZCNTD xmm, xmm{k}{z}             [AVX512CD,AVX512VL]
 72886  //    * VPLZCNTD ymm, ymm{k}{z}             [AVX512CD,AVX512VL]
 72887  //    * VPLZCNTD zmm, zmm{k}{z}             [AVX512CD]
 72888  //
 72889  func (self *Program) VPLZCNTD(v0 interface{}, v1 interface{}) *Instruction {
 72890      p := self.alloc("VPLZCNTD", 2, Operands { v0, v1 })
 72891      // VPLZCNTD m128/m32bcst, xmm{k}{z}
 72892      if isM128M32bcst(v0) && isXMMkz(v1) {
 72893          self.require(ISA_AVX512VL | ISA_AVX512CD)
 72894          p.domain = DomainAVX
 72895          p.add(0, func(m *_Encoding, v []interface{}) {
 72896              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 72897              m.emit(0x44)
 72898              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 72899          })
 72900      }
 72901      // VPLZCNTD m256/m32bcst, ymm{k}{z}
 72902      if isM256M32bcst(v0) && isYMMkz(v1) {
 72903          self.require(ISA_AVX512VL | ISA_AVX512CD)
 72904          p.domain = DomainAVX
 72905          p.add(0, func(m *_Encoding, v []interface{}) {
 72906              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 72907              m.emit(0x44)
 72908              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 72909          })
 72910      }
 72911      // VPLZCNTD m512/m32bcst, zmm{k}{z}
 72912      if isM512M32bcst(v0) && isZMMkz(v1) {
 72913          self.require(ISA_AVX512CD)
 72914          p.domain = DomainAVX
 72915          p.add(0, func(m *_Encoding, v []interface{}) {
 72916              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 72917              m.emit(0x44)
 72918              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 72919          })
 72920      }
 72921      // VPLZCNTD xmm, xmm{k}{z}
 72922      if isEVEXXMM(v0) && isXMMkz(v1) {
 72923          self.require(ISA_AVX512VL | ISA_AVX512CD)
 72924          p.domain = DomainAVX
 72925          p.add(0, func(m *_Encoding, v []interface{}) {
 72926              m.emit(0x62)
 72927              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 72928              m.emit(0x7d)
 72929              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 72930              m.emit(0x44)
 72931              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72932          })
 72933      }
 72934      // VPLZCNTD ymm, ymm{k}{z}
 72935      if isEVEXYMM(v0) && isYMMkz(v1) {
 72936          self.require(ISA_AVX512VL | ISA_AVX512CD)
 72937          p.domain = DomainAVX
 72938          p.add(0, func(m *_Encoding, v []interface{}) {
 72939              m.emit(0x62)
 72940              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 72941              m.emit(0x7d)
 72942              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 72943              m.emit(0x44)
 72944              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72945          })
 72946      }
 72947      // VPLZCNTD zmm, zmm{k}{z}
 72948      if isZMM(v0) && isZMMkz(v1) {
 72949          self.require(ISA_AVX512CD)
 72950          p.domain = DomainAVX
 72951          p.add(0, func(m *_Encoding, v []interface{}) {
 72952              m.emit(0x62)
 72953              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 72954              m.emit(0x7d)
 72955              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 72956              m.emit(0x44)
 72957              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72958          })
 72959      }
 72960      if p.len == 0 {
 72961          panic("invalid operands for VPLZCNTD")
 72962      }
 72963      return p
 72964  }
 72965  
 72966  // VPLZCNTQ performs "Count the Number of Leading Zero Bits for Packed Quadword Values".
 72967  //
 72968  // Mnemonic        : VPLZCNTQ
 72969  // Supported forms : (6 forms)
 72970  //
 72971  //    * VPLZCNTQ m128/m64bcst, xmm{k}{z}    [AVX512CD,AVX512VL]
 72972  //    * VPLZCNTQ m256/m64bcst, ymm{k}{z}    [AVX512CD,AVX512VL]
 72973  //    * VPLZCNTQ m512/m64bcst, zmm{k}{z}    [AVX512CD]
 72974  //    * VPLZCNTQ xmm, xmm{k}{z}             [AVX512CD,AVX512VL]
 72975  //    * VPLZCNTQ ymm, ymm{k}{z}             [AVX512CD,AVX512VL]
 72976  //    * VPLZCNTQ zmm, zmm{k}{z}             [AVX512CD]
 72977  //
 72978  func (self *Program) VPLZCNTQ(v0 interface{}, v1 interface{}) *Instruction {
 72979      p := self.alloc("VPLZCNTQ", 2, Operands { v0, v1 })
 72980      // VPLZCNTQ m128/m64bcst, xmm{k}{z}
 72981      if isM128M64bcst(v0) && isXMMkz(v1) {
 72982          self.require(ISA_AVX512VL | ISA_AVX512CD)
 72983          p.domain = DomainAVX
 72984          p.add(0, func(m *_Encoding, v []interface{}) {
 72985              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 72986              m.emit(0x44)
 72987              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 72988          })
 72989      }
 72990      // VPLZCNTQ m256/m64bcst, ymm{k}{z}
 72991      if isM256M64bcst(v0) && isYMMkz(v1) {
 72992          self.require(ISA_AVX512VL | ISA_AVX512CD)
 72993          p.domain = DomainAVX
 72994          p.add(0, func(m *_Encoding, v []interface{}) {
 72995              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 72996              m.emit(0x44)
 72997              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 72998          })
 72999      }
 73000      // VPLZCNTQ m512/m64bcst, zmm{k}{z}
 73001      if isM512M64bcst(v0) && isZMMkz(v1) {
 73002          self.require(ISA_AVX512CD)
 73003          p.domain = DomainAVX
 73004          p.add(0, func(m *_Encoding, v []interface{}) {
 73005              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 73006              m.emit(0x44)
 73007              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 73008          })
 73009      }
 73010      // VPLZCNTQ xmm, xmm{k}{z}
 73011      if isEVEXXMM(v0) && isXMMkz(v1) {
 73012          self.require(ISA_AVX512VL | ISA_AVX512CD)
 73013          p.domain = DomainAVX
 73014          p.add(0, func(m *_Encoding, v []interface{}) {
 73015              m.emit(0x62)
 73016              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 73017              m.emit(0xfd)
 73018              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 73019              m.emit(0x44)
 73020              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 73021          })
 73022      }
 73023      // VPLZCNTQ ymm, ymm{k}{z}
 73024      if isEVEXYMM(v0) && isYMMkz(v1) {
 73025          self.require(ISA_AVX512VL | ISA_AVX512CD)
 73026          p.domain = DomainAVX
 73027          p.add(0, func(m *_Encoding, v []interface{}) {
 73028              m.emit(0x62)
 73029              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 73030              m.emit(0xfd)
 73031              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 73032              m.emit(0x44)
 73033              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 73034          })
 73035      }
 73036      // VPLZCNTQ zmm, zmm{k}{z}
 73037      if isZMM(v0) && isZMMkz(v1) {
 73038          self.require(ISA_AVX512CD)
 73039          p.domain = DomainAVX
 73040          p.add(0, func(m *_Encoding, v []interface{}) {
 73041              m.emit(0x62)
 73042              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 73043              m.emit(0xfd)
 73044              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 73045              m.emit(0x44)
 73046              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 73047          })
 73048      }
 73049      if p.len == 0 {
 73050          panic("invalid operands for VPLZCNTQ")
 73051      }
 73052      return p
 73053  }
 73054  
 73055  // VPMACSDD performs "Packed Multiply Accumulate Signed Doubleword to Signed Doubleword".
 73056  //
 73057  // Mnemonic        : VPMACSDD
 73058  // Supported forms : (2 forms)
 73059  //
 73060  //    * VPMACSDD xmm, xmm, xmm, xmm     [XOP]
 73061  //    * VPMACSDD xmm, m128, xmm, xmm    [XOP]
 73062  //
 73063  func (self *Program) VPMACSDD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73064      p := self.alloc("VPMACSDD", 4, Operands { v0, v1, v2, v3 })
 73065      // VPMACSDD xmm, xmm, xmm, xmm
 73066      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73067          self.require(ISA_XOP)
 73068          p.domain = DomainAMDSpecific
 73069          p.add(0, func(m *_Encoding, v []interface{}) {
 73070              m.emit(0x8f)
 73071              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73072              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73073              m.emit(0x9e)
 73074              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73075              m.emit(hlcode(v[0]) << 4)
 73076          })
 73077      }
 73078      // VPMACSDD xmm, m128, xmm, xmm
 73079      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73080          self.require(ISA_XOP)
 73081          p.domain = DomainAMDSpecific
 73082          p.add(0, func(m *_Encoding, v []interface{}) {
 73083              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73084              m.emit(0x9e)
 73085              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73086              m.emit(hlcode(v[0]) << 4)
 73087          })
 73088      }
 73089      if p.len == 0 {
 73090          panic("invalid operands for VPMACSDD")
 73091      }
 73092      return p
 73093  }
 73094  
 73095  // VPMACSDQH performs "Packed Multiply Accumulate Signed High Doubleword to Signed Quadword".
 73096  //
 73097  // Mnemonic        : VPMACSDQH
 73098  // Supported forms : (2 forms)
 73099  //
 73100  //    * VPMACSDQH xmm, xmm, xmm, xmm     [XOP]
 73101  //    * VPMACSDQH xmm, m128, xmm, xmm    [XOP]
 73102  //
 73103  func (self *Program) VPMACSDQH(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73104      p := self.alloc("VPMACSDQH", 4, Operands { v0, v1, v2, v3 })
 73105      // VPMACSDQH xmm, xmm, xmm, xmm
 73106      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73107          self.require(ISA_XOP)
 73108          p.domain = DomainAMDSpecific
 73109          p.add(0, func(m *_Encoding, v []interface{}) {
 73110              m.emit(0x8f)
 73111              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73112              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73113              m.emit(0x9f)
 73114              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73115              m.emit(hlcode(v[0]) << 4)
 73116          })
 73117      }
 73118      // VPMACSDQH xmm, m128, xmm, xmm
 73119      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73120          self.require(ISA_XOP)
 73121          p.domain = DomainAMDSpecific
 73122          p.add(0, func(m *_Encoding, v []interface{}) {
 73123              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73124              m.emit(0x9f)
 73125              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73126              m.emit(hlcode(v[0]) << 4)
 73127          })
 73128      }
 73129      if p.len == 0 {
 73130          panic("invalid operands for VPMACSDQH")
 73131      }
 73132      return p
 73133  }
 73134  
 73135  // VPMACSDQL performs "Packed Multiply Accumulate Signed Low Doubleword to Signed Quadword".
 73136  //
 73137  // Mnemonic        : VPMACSDQL
 73138  // Supported forms : (2 forms)
 73139  //
 73140  //    * VPMACSDQL xmm, xmm, xmm, xmm     [XOP]
 73141  //    * VPMACSDQL xmm, m128, xmm, xmm    [XOP]
 73142  //
 73143  func (self *Program) VPMACSDQL(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73144      p := self.alloc("VPMACSDQL", 4, Operands { v0, v1, v2, v3 })
 73145      // VPMACSDQL xmm, xmm, xmm, xmm
 73146      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73147          self.require(ISA_XOP)
 73148          p.domain = DomainAMDSpecific
 73149          p.add(0, func(m *_Encoding, v []interface{}) {
 73150              m.emit(0x8f)
 73151              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73152              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73153              m.emit(0x97)
 73154              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73155              m.emit(hlcode(v[0]) << 4)
 73156          })
 73157      }
 73158      // VPMACSDQL xmm, m128, xmm, xmm
 73159      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73160          self.require(ISA_XOP)
 73161          p.domain = DomainAMDSpecific
 73162          p.add(0, func(m *_Encoding, v []interface{}) {
 73163              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73164              m.emit(0x97)
 73165              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73166              m.emit(hlcode(v[0]) << 4)
 73167          })
 73168      }
 73169      if p.len == 0 {
 73170          panic("invalid operands for VPMACSDQL")
 73171      }
 73172      return p
 73173  }
 73174  
 73175  // VPMACSSDD performs "Packed Multiply Accumulate with Saturation Signed Doubleword to Signed Doubleword".
 73176  //
 73177  // Mnemonic        : VPMACSSDD
 73178  // Supported forms : (2 forms)
 73179  //
 73180  //    * VPMACSSDD xmm, xmm, xmm, xmm     [XOP]
 73181  //    * VPMACSSDD xmm, m128, xmm, xmm    [XOP]
 73182  //
 73183  func (self *Program) VPMACSSDD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73184      p := self.alloc("VPMACSSDD", 4, Operands { v0, v1, v2, v3 })
 73185      // VPMACSSDD xmm, xmm, xmm, xmm
 73186      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73187          self.require(ISA_XOP)
 73188          p.domain = DomainAMDSpecific
 73189          p.add(0, func(m *_Encoding, v []interface{}) {
 73190              m.emit(0x8f)
 73191              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73192              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73193              m.emit(0x8e)
 73194              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73195              m.emit(hlcode(v[0]) << 4)
 73196          })
 73197      }
 73198      // VPMACSSDD xmm, m128, xmm, xmm
 73199      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73200          self.require(ISA_XOP)
 73201          p.domain = DomainAMDSpecific
 73202          p.add(0, func(m *_Encoding, v []interface{}) {
 73203              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73204              m.emit(0x8e)
 73205              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73206              m.emit(hlcode(v[0]) << 4)
 73207          })
 73208      }
 73209      if p.len == 0 {
 73210          panic("invalid operands for VPMACSSDD")
 73211      }
 73212      return p
 73213  }
 73214  
 73215  // VPMACSSDQH performs "Packed Multiply Accumulate with Saturation Signed High Doubleword to Signed Quadword".
 73216  //
 73217  // Mnemonic        : VPMACSSDQH
 73218  // Supported forms : (2 forms)
 73219  //
 73220  //    * VPMACSSDQH xmm, xmm, xmm, xmm     [XOP]
 73221  //    * VPMACSSDQH xmm, m128, xmm, xmm    [XOP]
 73222  //
 73223  func (self *Program) VPMACSSDQH(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73224      p := self.alloc("VPMACSSDQH", 4, Operands { v0, v1, v2, v3 })
 73225      // VPMACSSDQH xmm, xmm, xmm, xmm
 73226      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73227          self.require(ISA_XOP)
 73228          p.domain = DomainAMDSpecific
 73229          p.add(0, func(m *_Encoding, v []interface{}) {
 73230              m.emit(0x8f)
 73231              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73232              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73233              m.emit(0x8f)
 73234              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73235              m.emit(hlcode(v[0]) << 4)
 73236          })
 73237      }
 73238      // VPMACSSDQH xmm, m128, xmm, xmm
 73239      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73240          self.require(ISA_XOP)
 73241          p.domain = DomainAMDSpecific
 73242          p.add(0, func(m *_Encoding, v []interface{}) {
 73243              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73244              m.emit(0x8f)
 73245              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73246              m.emit(hlcode(v[0]) << 4)
 73247          })
 73248      }
 73249      if p.len == 0 {
 73250          panic("invalid operands for VPMACSSDQH")
 73251      }
 73252      return p
 73253  }
 73254  
 73255  // VPMACSSDQL performs "Packed Multiply Accumulate with Saturation Signed Low Doubleword to Signed Quadword".
 73256  //
 73257  // Mnemonic        : VPMACSSDQL
 73258  // Supported forms : (2 forms)
 73259  //
 73260  //    * VPMACSSDQL xmm, xmm, xmm, xmm     [XOP]
 73261  //    * VPMACSSDQL xmm, m128, xmm, xmm    [XOP]
 73262  //
 73263  func (self *Program) VPMACSSDQL(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73264      p := self.alloc("VPMACSSDQL", 4, Operands { v0, v1, v2, v3 })
 73265      // VPMACSSDQL xmm, xmm, xmm, xmm
 73266      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73267          self.require(ISA_XOP)
 73268          p.domain = DomainAMDSpecific
 73269          p.add(0, func(m *_Encoding, v []interface{}) {
 73270              m.emit(0x8f)
 73271              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73272              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73273              m.emit(0x87)
 73274              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73275              m.emit(hlcode(v[0]) << 4)
 73276          })
 73277      }
 73278      // VPMACSSDQL xmm, m128, xmm, xmm
 73279      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73280          self.require(ISA_XOP)
 73281          p.domain = DomainAMDSpecific
 73282          p.add(0, func(m *_Encoding, v []interface{}) {
 73283              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73284              m.emit(0x87)
 73285              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73286              m.emit(hlcode(v[0]) << 4)
 73287          })
 73288      }
 73289      if p.len == 0 {
 73290          panic("invalid operands for VPMACSSDQL")
 73291      }
 73292      return p
 73293  }
 73294  
 73295  // VPMACSSWD performs "Packed Multiply Accumulate with Saturation Signed Word to Signed Doubleword".
 73296  //
 73297  // Mnemonic        : VPMACSSWD
 73298  // Supported forms : (2 forms)
 73299  //
 73300  //    * VPMACSSWD xmm, xmm, xmm, xmm     [XOP]
 73301  //    * VPMACSSWD xmm, m128, xmm, xmm    [XOP]
 73302  //
 73303  func (self *Program) VPMACSSWD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73304      p := self.alloc("VPMACSSWD", 4, Operands { v0, v1, v2, v3 })
 73305      // VPMACSSWD xmm, xmm, xmm, xmm
 73306      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73307          self.require(ISA_XOP)
 73308          p.domain = DomainAMDSpecific
 73309          p.add(0, func(m *_Encoding, v []interface{}) {
 73310              m.emit(0x8f)
 73311              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73312              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73313              m.emit(0x86)
 73314              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73315              m.emit(hlcode(v[0]) << 4)
 73316          })
 73317      }
 73318      // VPMACSSWD xmm, m128, xmm, xmm
 73319      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73320          self.require(ISA_XOP)
 73321          p.domain = DomainAMDSpecific
 73322          p.add(0, func(m *_Encoding, v []interface{}) {
 73323              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73324              m.emit(0x86)
 73325              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73326              m.emit(hlcode(v[0]) << 4)
 73327          })
 73328      }
 73329      if p.len == 0 {
 73330          panic("invalid operands for VPMACSSWD")
 73331      }
 73332      return p
 73333  }
 73334  
 73335  // VPMACSSWW performs "Packed Multiply Accumulate with Saturation Signed Word to Signed Word".
 73336  //
 73337  // Mnemonic        : VPMACSSWW
 73338  // Supported forms : (2 forms)
 73339  //
 73340  //    * VPMACSSWW xmm, xmm, xmm, xmm     [XOP]
 73341  //    * VPMACSSWW xmm, m128, xmm, xmm    [XOP]
 73342  //
 73343  func (self *Program) VPMACSSWW(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73344      p := self.alloc("VPMACSSWW", 4, Operands { v0, v1, v2, v3 })
 73345      // VPMACSSWW xmm, xmm, xmm, xmm
 73346      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73347          self.require(ISA_XOP)
 73348          p.domain = DomainAMDSpecific
 73349          p.add(0, func(m *_Encoding, v []interface{}) {
 73350              m.emit(0x8f)
 73351              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73352              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73353              m.emit(0x85)
 73354              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73355              m.emit(hlcode(v[0]) << 4)
 73356          })
 73357      }
 73358      // VPMACSSWW xmm, m128, xmm, xmm
 73359      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73360          self.require(ISA_XOP)
 73361          p.domain = DomainAMDSpecific
 73362          p.add(0, func(m *_Encoding, v []interface{}) {
 73363              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73364              m.emit(0x85)
 73365              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73366              m.emit(hlcode(v[0]) << 4)
 73367          })
 73368      }
 73369      if p.len == 0 {
 73370          panic("invalid operands for VPMACSSWW")
 73371      }
 73372      return p
 73373  }
 73374  
 73375  // VPMACSWD performs "Packed Multiply Accumulate Signed Word to Signed Doubleword".
 73376  //
 73377  // Mnemonic        : VPMACSWD
 73378  // Supported forms : (2 forms)
 73379  //
 73380  //    * VPMACSWD xmm, xmm, xmm, xmm     [XOP]
 73381  //    * VPMACSWD xmm, m128, xmm, xmm    [XOP]
 73382  //
 73383  func (self *Program) VPMACSWD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73384      p := self.alloc("VPMACSWD", 4, Operands { v0, v1, v2, v3 })
 73385      // VPMACSWD xmm, xmm, xmm, xmm
 73386      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73387          self.require(ISA_XOP)
 73388          p.domain = DomainAMDSpecific
 73389          p.add(0, func(m *_Encoding, v []interface{}) {
 73390              m.emit(0x8f)
 73391              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73392              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73393              m.emit(0x96)
 73394              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73395              m.emit(hlcode(v[0]) << 4)
 73396          })
 73397      }
 73398      // VPMACSWD xmm, m128, xmm, xmm
 73399      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73400          self.require(ISA_XOP)
 73401          p.domain = DomainAMDSpecific
 73402          p.add(0, func(m *_Encoding, v []interface{}) {
 73403              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73404              m.emit(0x96)
 73405              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73406              m.emit(hlcode(v[0]) << 4)
 73407          })
 73408      }
 73409      if p.len == 0 {
 73410          panic("invalid operands for VPMACSWD")
 73411      }
 73412      return p
 73413  }
 73414  
 73415  // VPMACSWW performs "Packed Multiply Accumulate Signed Word to Signed Word".
 73416  //
 73417  // Mnemonic        : VPMACSWW
 73418  // Supported forms : (2 forms)
 73419  //
 73420  //    * VPMACSWW xmm, xmm, xmm, xmm     [XOP]
 73421  //    * VPMACSWW xmm, m128, xmm, xmm    [XOP]
 73422  //
 73423  func (self *Program) VPMACSWW(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73424      p := self.alloc("VPMACSWW", 4, Operands { v0, v1, v2, v3 })
 73425      // VPMACSWW xmm, xmm, xmm, xmm
 73426      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73427          self.require(ISA_XOP)
 73428          p.domain = DomainAMDSpecific
 73429          p.add(0, func(m *_Encoding, v []interface{}) {
 73430              m.emit(0x8f)
 73431              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73432              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73433              m.emit(0x95)
 73434              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73435              m.emit(hlcode(v[0]) << 4)
 73436          })
 73437      }
 73438      // VPMACSWW xmm, m128, xmm, xmm
 73439      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73440          self.require(ISA_XOP)
 73441          p.domain = DomainAMDSpecific
 73442          p.add(0, func(m *_Encoding, v []interface{}) {
 73443              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73444              m.emit(0x95)
 73445              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73446              m.emit(hlcode(v[0]) << 4)
 73447          })
 73448      }
 73449      if p.len == 0 {
 73450          panic("invalid operands for VPMACSWW")
 73451      }
 73452      return p
 73453  }
 73454  
 73455  // VPMADCSSWD performs "Packed Multiply Add Accumulate with Saturation Signed Word to Signed Doubleword".
 73456  //
 73457  // Mnemonic        : VPMADCSSWD
 73458  // Supported forms : (2 forms)
 73459  //
 73460  //    * VPMADCSSWD xmm, xmm, xmm, xmm     [XOP]
 73461  //    * VPMADCSSWD xmm, m128, xmm, xmm    [XOP]
 73462  //
 73463  func (self *Program) VPMADCSSWD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73464      p := self.alloc("VPMADCSSWD", 4, Operands { v0, v1, v2, v3 })
 73465      // VPMADCSSWD xmm, xmm, xmm, xmm
 73466      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73467          self.require(ISA_XOP)
 73468          p.domain = DomainAMDSpecific
 73469          p.add(0, func(m *_Encoding, v []interface{}) {
 73470              m.emit(0x8f)
 73471              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73472              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73473              m.emit(0xa6)
 73474              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73475              m.emit(hlcode(v[0]) << 4)
 73476          })
 73477      }
 73478      // VPMADCSSWD xmm, m128, xmm, xmm
 73479      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73480          self.require(ISA_XOP)
 73481          p.domain = DomainAMDSpecific
 73482          p.add(0, func(m *_Encoding, v []interface{}) {
 73483              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73484              m.emit(0xa6)
 73485              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73486              m.emit(hlcode(v[0]) << 4)
 73487          })
 73488      }
 73489      if p.len == 0 {
 73490          panic("invalid operands for VPMADCSSWD")
 73491      }
 73492      return p
 73493  }
 73494  
 73495  // VPMADCSWD performs "Packed Multiply Add Accumulate Signed Word to Signed Doubleword".
 73496  //
 73497  // Mnemonic        : VPMADCSWD
 73498  // Supported forms : (2 forms)
 73499  //
 73500  //    * VPMADCSWD xmm, xmm, xmm, xmm     [XOP]
 73501  //    * VPMADCSWD xmm, m128, xmm, xmm    [XOP]
 73502  //
 73503  func (self *Program) VPMADCSWD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73504      p := self.alloc("VPMADCSWD", 4, Operands { v0, v1, v2, v3 })
 73505      // VPMADCSWD xmm, xmm, xmm, xmm
 73506      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73507          self.require(ISA_XOP)
 73508          p.domain = DomainAMDSpecific
 73509          p.add(0, func(m *_Encoding, v []interface{}) {
 73510              m.emit(0x8f)
 73511              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73512              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73513              m.emit(0xb6)
 73514              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73515              m.emit(hlcode(v[0]) << 4)
 73516          })
 73517      }
 73518      // VPMADCSWD xmm, m128, xmm, xmm
 73519      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73520          self.require(ISA_XOP)
 73521          p.domain = DomainAMDSpecific
 73522          p.add(0, func(m *_Encoding, v []interface{}) {
 73523              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73524              m.emit(0xb6)
 73525              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73526              m.emit(hlcode(v[0]) << 4)
 73527          })
 73528      }
 73529      if p.len == 0 {
 73530          panic("invalid operands for VPMADCSWD")
 73531      }
 73532      return p
 73533  }
 73534  
 73535  // VPMADD52HUQ performs "Packed Multiply of Unsigned 52-bit Unsigned Integers and Add High 52-bit Products to Quadword Accumulators".
 73536  //
 73537  // Mnemonic        : VPMADD52HUQ
 73538  // Supported forms : (6 forms)
 73539  //
 73540  //    * VPMADD52HUQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512IFMA,AVX512VL]
 73541  //    * VPMADD52HUQ xmm, xmm, xmm{k}{z}             [AVX512IFMA,AVX512VL]
 73542  //    * VPMADD52HUQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512IFMA,AVX512VL]
 73543  //    * VPMADD52HUQ ymm, ymm, ymm{k}{z}             [AVX512IFMA,AVX512VL]
 73544  //    * VPMADD52HUQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512IFMA]
 73545  //    * VPMADD52HUQ zmm, zmm, zmm{k}{z}             [AVX512IFMA]
 73546  //
 73547  func (self *Program) VPMADD52HUQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 73548      p := self.alloc("VPMADD52HUQ", 3, Operands { v0, v1, v2 })
 73549      // VPMADD52HUQ m128/m64bcst, xmm, xmm{k}{z}
 73550      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 73551          self.require(ISA_AVX512IFMA | ISA_AVX512VL)
 73552          p.domain = DomainAVX
 73553          p.add(0, func(m *_Encoding, v []interface{}) {
 73554              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 73555              m.emit(0xb5)
 73556              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 73557          })
 73558      }
 73559      // VPMADD52HUQ xmm, xmm, xmm{k}{z}
 73560      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 73561          self.require(ISA_AVX512IFMA | ISA_AVX512VL)
 73562          p.domain = DomainAVX
 73563          p.add(0, func(m *_Encoding, v []interface{}) {
 73564              m.emit(0x62)
 73565              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73566              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 73567              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 73568              m.emit(0xb5)
 73569              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73570          })
 73571      }
 73572      // VPMADD52HUQ m256/m64bcst, ymm, ymm{k}{z}
 73573      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 73574          self.require(ISA_AVX512IFMA | ISA_AVX512VL)
 73575          p.domain = DomainAVX
 73576          p.add(0, func(m *_Encoding, v []interface{}) {
 73577              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 73578              m.emit(0xb5)
 73579              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 73580          })
 73581      }
 73582      // VPMADD52HUQ ymm, ymm, ymm{k}{z}
 73583      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 73584          self.require(ISA_AVX512IFMA | ISA_AVX512VL)
 73585          p.domain = DomainAVX
 73586          p.add(0, func(m *_Encoding, v []interface{}) {
 73587              m.emit(0x62)
 73588              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73589              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 73590              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 73591              m.emit(0xb5)
 73592              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73593          })
 73594      }
 73595      // VPMADD52HUQ m512/m64bcst, zmm, zmm{k}{z}
 73596      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 73597          self.require(ISA_AVX512IFMA)
 73598          p.domain = DomainAVX
 73599          p.add(0, func(m *_Encoding, v []interface{}) {
 73600              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 73601              m.emit(0xb5)
 73602              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 73603          })
 73604      }
 73605      // VPMADD52HUQ zmm, zmm, zmm{k}{z}
 73606      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 73607          self.require(ISA_AVX512IFMA)
 73608          p.domain = DomainAVX
 73609          p.add(0, func(m *_Encoding, v []interface{}) {
 73610              m.emit(0x62)
 73611              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73612              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 73613              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 73614              m.emit(0xb5)
 73615              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73616          })
 73617      }
 73618      if p.len == 0 {
 73619          panic("invalid operands for VPMADD52HUQ")
 73620      }
 73621      return p
 73622  }
 73623  
 73624  // VPMADD52LUQ performs "Packed Multiply of Unsigned 52-bit Integers and Add the Low 52-bit Products to Quadword Accumulators".
 73625  //
 73626  // Mnemonic        : VPMADD52LUQ
 73627  // Supported forms : (6 forms)
 73628  //
 73629  //    * VPMADD52LUQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512IFMA,AVX512VL]
 73630  //    * VPMADD52LUQ xmm, xmm, xmm{k}{z}             [AVX512IFMA,AVX512VL]
 73631  //    * VPMADD52LUQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512IFMA,AVX512VL]
 73632  //    * VPMADD52LUQ ymm, ymm, ymm{k}{z}             [AVX512IFMA,AVX512VL]
 73633  //    * VPMADD52LUQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512IFMA]
 73634  //    * VPMADD52LUQ zmm, zmm, zmm{k}{z}             [AVX512IFMA]
 73635  //
 73636  func (self *Program) VPMADD52LUQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 73637      p := self.alloc("VPMADD52LUQ", 3, Operands { v0, v1, v2 })
 73638      // VPMADD52LUQ m128/m64bcst, xmm, xmm{k}{z}
 73639      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 73640          self.require(ISA_AVX512IFMA | ISA_AVX512VL)
 73641          p.domain = DomainAVX
 73642          p.add(0, func(m *_Encoding, v []interface{}) {
 73643              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 73644              m.emit(0xb4)
 73645              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 73646          })
 73647      }
 73648      // VPMADD52LUQ xmm, xmm, xmm{k}{z}
 73649      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 73650          self.require(ISA_AVX512IFMA | ISA_AVX512VL)
 73651          p.domain = DomainAVX
 73652          p.add(0, func(m *_Encoding, v []interface{}) {
 73653              m.emit(0x62)
 73654              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73655              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 73656              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 73657              m.emit(0xb4)
 73658              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73659          })
 73660      }
 73661      // VPMADD52LUQ m256/m64bcst, ymm, ymm{k}{z}
 73662      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 73663          self.require(ISA_AVX512IFMA | ISA_AVX512VL)
 73664          p.domain = DomainAVX
 73665          p.add(0, func(m *_Encoding, v []interface{}) {
 73666              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 73667              m.emit(0xb4)
 73668              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 73669          })
 73670      }
 73671      // VPMADD52LUQ ymm, ymm, ymm{k}{z}
 73672      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 73673          self.require(ISA_AVX512IFMA | ISA_AVX512VL)
 73674          p.domain = DomainAVX
 73675          p.add(0, func(m *_Encoding, v []interface{}) {
 73676              m.emit(0x62)
 73677              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73678              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 73679              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 73680              m.emit(0xb4)
 73681              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73682          })
 73683      }
 73684      // VPMADD52LUQ m512/m64bcst, zmm, zmm{k}{z}
 73685      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 73686          self.require(ISA_AVX512IFMA)
 73687          p.domain = DomainAVX
 73688          p.add(0, func(m *_Encoding, v []interface{}) {
 73689              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 73690              m.emit(0xb4)
 73691              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 73692          })
 73693      }
 73694      // VPMADD52LUQ zmm, zmm, zmm{k}{z}
 73695      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 73696          self.require(ISA_AVX512IFMA)
 73697          p.domain = DomainAVX
 73698          p.add(0, func(m *_Encoding, v []interface{}) {
 73699              m.emit(0x62)
 73700              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73701              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 73702              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 73703              m.emit(0xb4)
 73704              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73705          })
 73706      }
 73707      if p.len == 0 {
 73708          panic("invalid operands for VPMADD52LUQ")
 73709      }
 73710      return p
 73711  }
 73712  
 73713  // VPMADDUBSW performs "Multiply and Add Packed Signed and Unsigned Byte Integers".
 73714  //
 73715  // Mnemonic        : VPMADDUBSW
 73716  // Supported forms : (10 forms)
 73717  //
 73718  //    * VPMADDUBSW xmm, xmm, xmm           [AVX]
 73719  //    * VPMADDUBSW m128, xmm, xmm          [AVX]
 73720  //    * VPMADDUBSW ymm, ymm, ymm           [AVX2]
 73721  //    * VPMADDUBSW m256, ymm, ymm          [AVX2]
 73722  //    * VPMADDUBSW zmm, zmm, zmm{k}{z}     [AVX512BW]
 73723  //    * VPMADDUBSW m512, zmm, zmm{k}{z}    [AVX512BW]
 73724  //    * VPMADDUBSW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 73725  //    * VPMADDUBSW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 73726  //    * VPMADDUBSW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 73727  //    * VPMADDUBSW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 73728  //
 73729  func (self *Program) VPMADDUBSW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 73730      p := self.alloc("VPMADDUBSW", 3, Operands { v0, v1, v2 })
 73731      // VPMADDUBSW xmm, xmm, xmm
 73732      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 73733          self.require(ISA_AVX)
 73734          p.domain = DomainAVX
 73735          p.add(0, func(m *_Encoding, v []interface{}) {
 73736              m.emit(0xc4)
 73737              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 73738              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 73739              m.emit(0x04)
 73740              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73741          })
 73742      }
 73743      // VPMADDUBSW m128, xmm, xmm
 73744      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 73745          self.require(ISA_AVX)
 73746          p.domain = DomainAVX
 73747          p.add(0, func(m *_Encoding, v []interface{}) {
 73748              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 73749              m.emit(0x04)
 73750              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 73751          })
 73752      }
 73753      // VPMADDUBSW ymm, ymm, ymm
 73754      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 73755          self.require(ISA_AVX2)
 73756          p.domain = DomainAVX
 73757          p.add(0, func(m *_Encoding, v []interface{}) {
 73758              m.emit(0xc4)
 73759              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 73760              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 73761              m.emit(0x04)
 73762              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73763          })
 73764      }
 73765      // VPMADDUBSW m256, ymm, ymm
 73766      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 73767          self.require(ISA_AVX2)
 73768          p.domain = DomainAVX
 73769          p.add(0, func(m *_Encoding, v []interface{}) {
 73770              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 73771              m.emit(0x04)
 73772              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 73773          })
 73774      }
 73775      // VPMADDUBSW zmm, zmm, zmm{k}{z}
 73776      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 73777          self.require(ISA_AVX512BW)
 73778          p.domain = DomainAVX
 73779          p.add(0, func(m *_Encoding, v []interface{}) {
 73780              m.emit(0x62)
 73781              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73782              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 73783              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 73784              m.emit(0x04)
 73785              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73786          })
 73787      }
 73788      // VPMADDUBSW m512, zmm, zmm{k}{z}
 73789      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 73790          self.require(ISA_AVX512BW)
 73791          p.domain = DomainAVX
 73792          p.add(0, func(m *_Encoding, v []interface{}) {
 73793              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 73794              m.emit(0x04)
 73795              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 73796          })
 73797      }
 73798      // VPMADDUBSW xmm, xmm, xmm{k}{z}
 73799      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 73800          self.require(ISA_AVX512VL | ISA_AVX512BW)
 73801          p.domain = DomainAVX
 73802          p.add(0, func(m *_Encoding, v []interface{}) {
 73803              m.emit(0x62)
 73804              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73805              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 73806              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 73807              m.emit(0x04)
 73808              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73809          })
 73810      }
 73811      // VPMADDUBSW m128, xmm, xmm{k}{z}
 73812      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 73813          self.require(ISA_AVX512VL | ISA_AVX512BW)
 73814          p.domain = DomainAVX
 73815          p.add(0, func(m *_Encoding, v []interface{}) {
 73816              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 73817              m.emit(0x04)
 73818              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 73819          })
 73820      }
 73821      // VPMADDUBSW ymm, ymm, ymm{k}{z}
 73822      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 73823          self.require(ISA_AVX512VL | ISA_AVX512BW)
 73824          p.domain = DomainAVX
 73825          p.add(0, func(m *_Encoding, v []interface{}) {
 73826              m.emit(0x62)
 73827              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73828              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 73829              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 73830              m.emit(0x04)
 73831              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73832          })
 73833      }
 73834      // VPMADDUBSW m256, ymm, ymm{k}{z}
 73835      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 73836          self.require(ISA_AVX512VL | ISA_AVX512BW)
 73837          p.domain = DomainAVX
 73838          p.add(0, func(m *_Encoding, v []interface{}) {
 73839              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 73840              m.emit(0x04)
 73841              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 73842          })
 73843      }
 73844      if p.len == 0 {
 73845          panic("invalid operands for VPMADDUBSW")
 73846      }
 73847      return p
 73848  }
 73849  
 73850  // VPMADDWD performs "Multiply and Add Packed Signed Word Integers".
 73851  //
 73852  // Mnemonic        : VPMADDWD
 73853  // Supported forms : (10 forms)
 73854  //
 73855  //    * VPMADDWD xmm, xmm, xmm           [AVX]
 73856  //    * VPMADDWD m128, xmm, xmm          [AVX]
 73857  //    * VPMADDWD ymm, ymm, ymm           [AVX2]
 73858  //    * VPMADDWD m256, ymm, ymm          [AVX2]
 73859  //    * VPMADDWD zmm, zmm, zmm{k}{z}     [AVX512BW]
 73860  //    * VPMADDWD m512, zmm, zmm{k}{z}    [AVX512BW]
 73861  //    * VPMADDWD xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 73862  //    * VPMADDWD m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 73863  //    * VPMADDWD ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 73864  //    * VPMADDWD m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 73865  //
 73866  func (self *Program) VPMADDWD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 73867      p := self.alloc("VPMADDWD", 3, Operands { v0, v1, v2 })
 73868      // VPMADDWD xmm, xmm, xmm
 73869      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 73870          self.require(ISA_AVX)
 73871          p.domain = DomainAVX
 73872          p.add(0, func(m *_Encoding, v []interface{}) {
 73873              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 73874              m.emit(0xf5)
 73875              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73876          })
 73877      }
 73878      // VPMADDWD m128, xmm, xmm
 73879      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 73880          self.require(ISA_AVX)
 73881          p.domain = DomainAVX
 73882          p.add(0, func(m *_Encoding, v []interface{}) {
 73883              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 73884              m.emit(0xf5)
 73885              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 73886          })
 73887      }
 73888      // VPMADDWD ymm, ymm, ymm
 73889      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 73890          self.require(ISA_AVX2)
 73891          p.domain = DomainAVX
 73892          p.add(0, func(m *_Encoding, v []interface{}) {
 73893              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 73894              m.emit(0xf5)
 73895              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73896          })
 73897      }
 73898      // VPMADDWD m256, ymm, ymm
 73899      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 73900          self.require(ISA_AVX2)
 73901          p.domain = DomainAVX
 73902          p.add(0, func(m *_Encoding, v []interface{}) {
 73903              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 73904              m.emit(0xf5)
 73905              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 73906          })
 73907      }
 73908      // VPMADDWD zmm, zmm, zmm{k}{z}
 73909      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 73910          self.require(ISA_AVX512BW)
 73911          p.domain = DomainAVX
 73912          p.add(0, func(m *_Encoding, v []interface{}) {
 73913              m.emit(0x62)
 73914              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73915              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 73916              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 73917              m.emit(0xf5)
 73918              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73919          })
 73920      }
 73921      // VPMADDWD m512, zmm, zmm{k}{z}
 73922      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 73923          self.require(ISA_AVX512BW)
 73924          p.domain = DomainAVX
 73925          p.add(0, func(m *_Encoding, v []interface{}) {
 73926              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 73927              m.emit(0xf5)
 73928              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 73929          })
 73930      }
 73931      // VPMADDWD xmm, xmm, xmm{k}{z}
 73932      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 73933          self.require(ISA_AVX512VL | ISA_AVX512BW)
 73934          p.domain = DomainAVX
 73935          p.add(0, func(m *_Encoding, v []interface{}) {
 73936              m.emit(0x62)
 73937              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73938              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 73939              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 73940              m.emit(0xf5)
 73941              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73942          })
 73943      }
 73944      // VPMADDWD m128, xmm, xmm{k}{z}
 73945      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 73946          self.require(ISA_AVX512VL | ISA_AVX512BW)
 73947          p.domain = DomainAVX
 73948          p.add(0, func(m *_Encoding, v []interface{}) {
 73949              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 73950              m.emit(0xf5)
 73951              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 73952          })
 73953      }
 73954      // VPMADDWD ymm, ymm, ymm{k}{z}
 73955      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 73956          self.require(ISA_AVX512VL | ISA_AVX512BW)
 73957          p.domain = DomainAVX
 73958          p.add(0, func(m *_Encoding, v []interface{}) {
 73959              m.emit(0x62)
 73960              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73961              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 73962              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 73963              m.emit(0xf5)
 73964              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73965          })
 73966      }
 73967      // VPMADDWD m256, ymm, ymm{k}{z}
 73968      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 73969          self.require(ISA_AVX512VL | ISA_AVX512BW)
 73970          p.domain = DomainAVX
 73971          p.add(0, func(m *_Encoding, v []interface{}) {
 73972              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 73973              m.emit(0xf5)
 73974              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 73975          })
 73976      }
 73977      if p.len == 0 {
 73978          panic("invalid operands for VPMADDWD")
 73979      }
 73980      return p
 73981  }
 73982  
 73983  // VPMASKMOVD performs "Conditional Move Packed Doubleword Integers".
 73984  //
 73985  // Mnemonic        : VPMASKMOVD
 73986  // Supported forms : (4 forms)
 73987  //
 73988  //    * VPMASKMOVD m128, xmm, xmm    [AVX2]
 73989  //    * VPMASKMOVD m256, ymm, ymm    [AVX2]
 73990  //    * VPMASKMOVD xmm, xmm, m128    [AVX2]
 73991  //    * VPMASKMOVD ymm, ymm, m256    [AVX2]
 73992  //
 73993  func (self *Program) VPMASKMOVD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 73994      p := self.alloc("VPMASKMOVD", 3, Operands { v0, v1, v2 })
 73995      // VPMASKMOVD m128, xmm, xmm
 73996      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 73997          self.require(ISA_AVX2)
 73998          p.domain = DomainAVX
 73999          p.add(0, func(m *_Encoding, v []interface{}) {
 74000              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74001              m.emit(0x8c)
 74002              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74003          })
 74004      }
 74005      // VPMASKMOVD m256, ymm, ymm
 74006      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 74007          self.require(ISA_AVX2)
 74008          p.domain = DomainAVX
 74009          p.add(0, func(m *_Encoding, v []interface{}) {
 74010              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74011              m.emit(0x8c)
 74012              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74013          })
 74014      }
 74015      // VPMASKMOVD xmm, xmm, m128
 74016      if isXMM(v0) && isXMM(v1) && isM128(v2) {
 74017          self.require(ISA_AVX2)
 74018          p.domain = DomainAVX
 74019          p.add(0, func(m *_Encoding, v []interface{}) {
 74020              m.vex3(0xc4, 0b10, 0x01, hcode(v[0]), addr(v[2]), hlcode(v[1]))
 74021              m.emit(0x8e)
 74022              m.mrsd(lcode(v[0]), addr(v[2]), 1)
 74023          })
 74024      }
 74025      // VPMASKMOVD ymm, ymm, m256
 74026      if isYMM(v0) && isYMM(v1) && isM256(v2) {
 74027          self.require(ISA_AVX2)
 74028          p.domain = DomainAVX
 74029          p.add(0, func(m *_Encoding, v []interface{}) {
 74030              m.vex3(0xc4, 0b10, 0x05, hcode(v[0]), addr(v[2]), hlcode(v[1]))
 74031              m.emit(0x8e)
 74032              m.mrsd(lcode(v[0]), addr(v[2]), 1)
 74033          })
 74034      }
 74035      if p.len == 0 {
 74036          panic("invalid operands for VPMASKMOVD")
 74037      }
 74038      return p
 74039  }
 74040  
 74041  // VPMASKMOVQ performs "Conditional Move Packed Quadword Integers".
 74042  //
 74043  // Mnemonic        : VPMASKMOVQ
 74044  // Supported forms : (4 forms)
 74045  //
 74046  //    * VPMASKMOVQ m128, xmm, xmm    [AVX2]
 74047  //    * VPMASKMOVQ m256, ymm, ymm    [AVX2]
 74048  //    * VPMASKMOVQ xmm, xmm, m128    [AVX2]
 74049  //    * VPMASKMOVQ ymm, ymm, m256    [AVX2]
 74050  //
 74051  func (self *Program) VPMASKMOVQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 74052      p := self.alloc("VPMASKMOVQ", 3, Operands { v0, v1, v2 })
 74053      // VPMASKMOVQ m128, xmm, xmm
 74054      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 74055          self.require(ISA_AVX2)
 74056          p.domain = DomainAVX
 74057          p.add(0, func(m *_Encoding, v []interface{}) {
 74058              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74059              m.emit(0x8c)
 74060              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74061          })
 74062      }
 74063      // VPMASKMOVQ m256, ymm, ymm
 74064      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 74065          self.require(ISA_AVX2)
 74066          p.domain = DomainAVX
 74067          p.add(0, func(m *_Encoding, v []interface{}) {
 74068              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74069              m.emit(0x8c)
 74070              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74071          })
 74072      }
 74073      // VPMASKMOVQ xmm, xmm, m128
 74074      if isXMM(v0) && isXMM(v1) && isM128(v2) {
 74075          self.require(ISA_AVX2)
 74076          p.domain = DomainAVX
 74077          p.add(0, func(m *_Encoding, v []interface{}) {
 74078              m.vex3(0xc4, 0b10, 0x81, hcode(v[0]), addr(v[2]), hlcode(v[1]))
 74079              m.emit(0x8e)
 74080              m.mrsd(lcode(v[0]), addr(v[2]), 1)
 74081          })
 74082      }
 74083      // VPMASKMOVQ ymm, ymm, m256
 74084      if isYMM(v0) && isYMM(v1) && isM256(v2) {
 74085          self.require(ISA_AVX2)
 74086          p.domain = DomainAVX
 74087          p.add(0, func(m *_Encoding, v []interface{}) {
 74088              m.vex3(0xc4, 0b10, 0x85, hcode(v[0]), addr(v[2]), hlcode(v[1]))
 74089              m.emit(0x8e)
 74090              m.mrsd(lcode(v[0]), addr(v[2]), 1)
 74091          })
 74092      }
 74093      if p.len == 0 {
 74094          panic("invalid operands for VPMASKMOVQ")
 74095      }
 74096      return p
 74097  }
 74098  
 74099  // VPMAXSB performs "Maximum of Packed Signed Byte Integers".
 74100  //
 74101  // Mnemonic        : VPMAXSB
 74102  // Supported forms : (10 forms)
 74103  //
 74104  //    * VPMAXSB xmm, xmm, xmm           [AVX]
 74105  //    * VPMAXSB m128, xmm, xmm          [AVX]
 74106  //    * VPMAXSB ymm, ymm, ymm           [AVX2]
 74107  //    * VPMAXSB m256, ymm, ymm          [AVX2]
 74108  //    * VPMAXSB zmm, zmm, zmm{k}{z}     [AVX512BW]
 74109  //    * VPMAXSB m512, zmm, zmm{k}{z}    [AVX512BW]
 74110  //    * VPMAXSB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 74111  //    * VPMAXSB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 74112  //    * VPMAXSB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 74113  //    * VPMAXSB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 74114  //
 74115  func (self *Program) VPMAXSB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 74116      p := self.alloc("VPMAXSB", 3, Operands { v0, v1, v2 })
 74117      // VPMAXSB xmm, xmm, xmm
 74118      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 74119          self.require(ISA_AVX)
 74120          p.domain = DomainAVX
 74121          p.add(0, func(m *_Encoding, v []interface{}) {
 74122              m.emit(0xc4)
 74123              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 74124              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 74125              m.emit(0x3c)
 74126              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74127          })
 74128      }
 74129      // VPMAXSB m128, xmm, xmm
 74130      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 74131          self.require(ISA_AVX)
 74132          p.domain = DomainAVX
 74133          p.add(0, func(m *_Encoding, v []interface{}) {
 74134              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74135              m.emit(0x3c)
 74136              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74137          })
 74138      }
 74139      // VPMAXSB ymm, ymm, ymm
 74140      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 74141          self.require(ISA_AVX2)
 74142          p.domain = DomainAVX
 74143          p.add(0, func(m *_Encoding, v []interface{}) {
 74144              m.emit(0xc4)
 74145              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 74146              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74147              m.emit(0x3c)
 74148              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74149          })
 74150      }
 74151      // VPMAXSB m256, ymm, ymm
 74152      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 74153          self.require(ISA_AVX2)
 74154          p.domain = DomainAVX
 74155          p.add(0, func(m *_Encoding, v []interface{}) {
 74156              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74157              m.emit(0x3c)
 74158              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74159          })
 74160      }
 74161      // VPMAXSB zmm, zmm, zmm{k}{z}
 74162      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 74163          self.require(ISA_AVX512BW)
 74164          p.domain = DomainAVX
 74165          p.add(0, func(m *_Encoding, v []interface{}) {
 74166              m.emit(0x62)
 74167              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74168              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74169              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 74170              m.emit(0x3c)
 74171              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74172          })
 74173      }
 74174      // VPMAXSB m512, zmm, zmm{k}{z}
 74175      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 74176          self.require(ISA_AVX512BW)
 74177          p.domain = DomainAVX
 74178          p.add(0, func(m *_Encoding, v []interface{}) {
 74179              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 74180              m.emit(0x3c)
 74181              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 74182          })
 74183      }
 74184      // VPMAXSB xmm, xmm, xmm{k}{z}
 74185      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74186          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74187          p.domain = DomainAVX
 74188          p.add(0, func(m *_Encoding, v []interface{}) {
 74189              m.emit(0x62)
 74190              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74191              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74192              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 74193              m.emit(0x3c)
 74194              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74195          })
 74196      }
 74197      // VPMAXSB m128, xmm, xmm{k}{z}
 74198      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74199          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74200          p.domain = DomainAVX
 74201          p.add(0, func(m *_Encoding, v []interface{}) {
 74202              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 74203              m.emit(0x3c)
 74204              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 74205          })
 74206      }
 74207      // VPMAXSB ymm, ymm, ymm{k}{z}
 74208      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74209          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74210          p.domain = DomainAVX
 74211          p.add(0, func(m *_Encoding, v []interface{}) {
 74212              m.emit(0x62)
 74213              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74214              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74215              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 74216              m.emit(0x3c)
 74217              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74218          })
 74219      }
 74220      // VPMAXSB m256, ymm, ymm{k}{z}
 74221      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74222          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74223          p.domain = DomainAVX
 74224          p.add(0, func(m *_Encoding, v []interface{}) {
 74225              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 74226              m.emit(0x3c)
 74227              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 74228          })
 74229      }
 74230      if p.len == 0 {
 74231          panic("invalid operands for VPMAXSB")
 74232      }
 74233      return p
 74234  }
 74235  
 74236  // VPMAXSD performs "Maximum of Packed Signed Doubleword Integers".
 74237  //
 74238  // Mnemonic        : VPMAXSD
 74239  // Supported forms : (10 forms)
 74240  //
 74241  //    * VPMAXSD xmm, xmm, xmm                   [AVX]
 74242  //    * VPMAXSD m128, xmm, xmm                  [AVX]
 74243  //    * VPMAXSD ymm, ymm, ymm                   [AVX2]
 74244  //    * VPMAXSD m256, ymm, ymm                  [AVX2]
 74245  //    * VPMAXSD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 74246  //    * VPMAXSD zmm, zmm, zmm{k}{z}             [AVX512F]
 74247  //    * VPMAXSD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 74248  //    * VPMAXSD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 74249  //    * VPMAXSD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 74250  //    * VPMAXSD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 74251  //
 74252  func (self *Program) VPMAXSD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 74253      p := self.alloc("VPMAXSD", 3, Operands { v0, v1, v2 })
 74254      // VPMAXSD xmm, xmm, xmm
 74255      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 74256          self.require(ISA_AVX)
 74257          p.domain = DomainAVX
 74258          p.add(0, func(m *_Encoding, v []interface{}) {
 74259              m.emit(0xc4)
 74260              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 74261              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 74262              m.emit(0x3d)
 74263              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74264          })
 74265      }
 74266      // VPMAXSD m128, xmm, xmm
 74267      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 74268          self.require(ISA_AVX)
 74269          p.domain = DomainAVX
 74270          p.add(0, func(m *_Encoding, v []interface{}) {
 74271              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74272              m.emit(0x3d)
 74273              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74274          })
 74275      }
 74276      // VPMAXSD ymm, ymm, ymm
 74277      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 74278          self.require(ISA_AVX2)
 74279          p.domain = DomainAVX
 74280          p.add(0, func(m *_Encoding, v []interface{}) {
 74281              m.emit(0xc4)
 74282              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 74283              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74284              m.emit(0x3d)
 74285              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74286          })
 74287      }
 74288      // VPMAXSD m256, ymm, ymm
 74289      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 74290          self.require(ISA_AVX2)
 74291          p.domain = DomainAVX
 74292          p.add(0, func(m *_Encoding, v []interface{}) {
 74293              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74294              m.emit(0x3d)
 74295              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74296          })
 74297      }
 74298      // VPMAXSD m512/m32bcst, zmm, zmm{k}{z}
 74299      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 74300          self.require(ISA_AVX512F)
 74301          p.domain = DomainAVX
 74302          p.add(0, func(m *_Encoding, v []interface{}) {
 74303              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74304              m.emit(0x3d)
 74305              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 74306          })
 74307      }
 74308      // VPMAXSD zmm, zmm, zmm{k}{z}
 74309      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 74310          self.require(ISA_AVX512F)
 74311          p.domain = DomainAVX
 74312          p.add(0, func(m *_Encoding, v []interface{}) {
 74313              m.emit(0x62)
 74314              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74315              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74316              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 74317              m.emit(0x3d)
 74318              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74319          })
 74320      }
 74321      // VPMAXSD m128/m32bcst, xmm, xmm{k}{z}
 74322      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74323          self.require(ISA_AVX512VL | ISA_AVX512F)
 74324          p.domain = DomainAVX
 74325          p.add(0, func(m *_Encoding, v []interface{}) {
 74326              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74327              m.emit(0x3d)
 74328              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 74329          })
 74330      }
 74331      // VPMAXSD xmm, xmm, xmm{k}{z}
 74332      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74333          self.require(ISA_AVX512VL | ISA_AVX512F)
 74334          p.domain = DomainAVX
 74335          p.add(0, func(m *_Encoding, v []interface{}) {
 74336              m.emit(0x62)
 74337              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74338              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74339              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 74340              m.emit(0x3d)
 74341              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74342          })
 74343      }
 74344      // VPMAXSD m256/m32bcst, ymm, ymm{k}{z}
 74345      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74346          self.require(ISA_AVX512VL | ISA_AVX512F)
 74347          p.domain = DomainAVX
 74348          p.add(0, func(m *_Encoding, v []interface{}) {
 74349              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74350              m.emit(0x3d)
 74351              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 74352          })
 74353      }
 74354      // VPMAXSD ymm, ymm, ymm{k}{z}
 74355      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74356          self.require(ISA_AVX512VL | ISA_AVX512F)
 74357          p.domain = DomainAVX
 74358          p.add(0, func(m *_Encoding, v []interface{}) {
 74359              m.emit(0x62)
 74360              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74361              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74362              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 74363              m.emit(0x3d)
 74364              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74365          })
 74366      }
 74367      if p.len == 0 {
 74368          panic("invalid operands for VPMAXSD")
 74369      }
 74370      return p
 74371  }
 74372  
 74373  // VPMAXSQ performs "Maximum of Packed Signed Quadword Integers".
 74374  //
 74375  // Mnemonic        : VPMAXSQ
 74376  // Supported forms : (6 forms)
 74377  //
 74378  //    * VPMAXSQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 74379  //    * VPMAXSQ zmm, zmm, zmm{k}{z}             [AVX512F]
 74380  //    * VPMAXSQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 74381  //    * VPMAXSQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 74382  //    * VPMAXSQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 74383  //    * VPMAXSQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 74384  //
 74385  func (self *Program) VPMAXSQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 74386      p := self.alloc("VPMAXSQ", 3, Operands { v0, v1, v2 })
 74387      // VPMAXSQ m512/m64bcst, zmm, zmm{k}{z}
 74388      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 74389          self.require(ISA_AVX512F)
 74390          p.domain = DomainAVX
 74391          p.add(0, func(m *_Encoding, v []interface{}) {
 74392              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74393              m.emit(0x3d)
 74394              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 74395          })
 74396      }
 74397      // VPMAXSQ zmm, zmm, zmm{k}{z}
 74398      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 74399          self.require(ISA_AVX512F)
 74400          p.domain = DomainAVX
 74401          p.add(0, func(m *_Encoding, v []interface{}) {
 74402              m.emit(0x62)
 74403              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74404              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 74405              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 74406              m.emit(0x3d)
 74407              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74408          })
 74409      }
 74410      // VPMAXSQ m128/m64bcst, xmm, xmm{k}{z}
 74411      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74412          self.require(ISA_AVX512VL | ISA_AVX512F)
 74413          p.domain = DomainAVX
 74414          p.add(0, func(m *_Encoding, v []interface{}) {
 74415              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74416              m.emit(0x3d)
 74417              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 74418          })
 74419      }
 74420      // VPMAXSQ xmm, xmm, xmm{k}{z}
 74421      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74422          self.require(ISA_AVX512VL | ISA_AVX512F)
 74423          p.domain = DomainAVX
 74424          p.add(0, func(m *_Encoding, v []interface{}) {
 74425              m.emit(0x62)
 74426              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74427              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 74428              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 74429              m.emit(0x3d)
 74430              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74431          })
 74432      }
 74433      // VPMAXSQ m256/m64bcst, ymm, ymm{k}{z}
 74434      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74435          self.require(ISA_AVX512VL | ISA_AVX512F)
 74436          p.domain = DomainAVX
 74437          p.add(0, func(m *_Encoding, v []interface{}) {
 74438              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74439              m.emit(0x3d)
 74440              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 74441          })
 74442      }
 74443      // VPMAXSQ ymm, ymm, ymm{k}{z}
 74444      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74445          self.require(ISA_AVX512VL | ISA_AVX512F)
 74446          p.domain = DomainAVX
 74447          p.add(0, func(m *_Encoding, v []interface{}) {
 74448              m.emit(0x62)
 74449              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74450              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 74451              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 74452              m.emit(0x3d)
 74453              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74454          })
 74455      }
 74456      if p.len == 0 {
 74457          panic("invalid operands for VPMAXSQ")
 74458      }
 74459      return p
 74460  }
 74461  
 74462  // VPMAXSW performs "Maximum of Packed Signed Word Integers".
 74463  //
 74464  // Mnemonic        : VPMAXSW
 74465  // Supported forms : (10 forms)
 74466  //
 74467  //    * VPMAXSW xmm, xmm, xmm           [AVX]
 74468  //    * VPMAXSW m128, xmm, xmm          [AVX]
 74469  //    * VPMAXSW ymm, ymm, ymm           [AVX2]
 74470  //    * VPMAXSW m256, ymm, ymm          [AVX2]
 74471  //    * VPMAXSW zmm, zmm, zmm{k}{z}     [AVX512BW]
 74472  //    * VPMAXSW m512, zmm, zmm{k}{z}    [AVX512BW]
 74473  //    * VPMAXSW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 74474  //    * VPMAXSW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 74475  //    * VPMAXSW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 74476  //    * VPMAXSW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 74477  //
 74478  func (self *Program) VPMAXSW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 74479      p := self.alloc("VPMAXSW", 3, Operands { v0, v1, v2 })
 74480      // VPMAXSW xmm, xmm, xmm
 74481      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 74482          self.require(ISA_AVX)
 74483          p.domain = DomainAVX
 74484          p.add(0, func(m *_Encoding, v []interface{}) {
 74485              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 74486              m.emit(0xee)
 74487              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74488          })
 74489      }
 74490      // VPMAXSW m128, xmm, xmm
 74491      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 74492          self.require(ISA_AVX)
 74493          p.domain = DomainAVX
 74494          p.add(0, func(m *_Encoding, v []interface{}) {
 74495              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74496              m.emit(0xee)
 74497              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74498          })
 74499      }
 74500      // VPMAXSW ymm, ymm, ymm
 74501      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 74502          self.require(ISA_AVX2)
 74503          p.domain = DomainAVX
 74504          p.add(0, func(m *_Encoding, v []interface{}) {
 74505              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 74506              m.emit(0xee)
 74507              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74508          })
 74509      }
 74510      // VPMAXSW m256, ymm, ymm
 74511      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 74512          self.require(ISA_AVX2)
 74513          p.domain = DomainAVX
 74514          p.add(0, func(m *_Encoding, v []interface{}) {
 74515              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74516              m.emit(0xee)
 74517              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74518          })
 74519      }
 74520      // VPMAXSW zmm, zmm, zmm{k}{z}
 74521      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 74522          self.require(ISA_AVX512BW)
 74523          p.domain = DomainAVX
 74524          p.add(0, func(m *_Encoding, v []interface{}) {
 74525              m.emit(0x62)
 74526              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74527              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74528              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 74529              m.emit(0xee)
 74530              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74531          })
 74532      }
 74533      // VPMAXSW m512, zmm, zmm{k}{z}
 74534      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 74535          self.require(ISA_AVX512BW)
 74536          p.domain = DomainAVX
 74537          p.add(0, func(m *_Encoding, v []interface{}) {
 74538              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 74539              m.emit(0xee)
 74540              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 74541          })
 74542      }
 74543      // VPMAXSW xmm, xmm, xmm{k}{z}
 74544      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74545          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74546          p.domain = DomainAVX
 74547          p.add(0, func(m *_Encoding, v []interface{}) {
 74548              m.emit(0x62)
 74549              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74550              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74551              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 74552              m.emit(0xee)
 74553              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74554          })
 74555      }
 74556      // VPMAXSW m128, xmm, xmm{k}{z}
 74557      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74558          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74559          p.domain = DomainAVX
 74560          p.add(0, func(m *_Encoding, v []interface{}) {
 74561              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 74562              m.emit(0xee)
 74563              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 74564          })
 74565      }
 74566      // VPMAXSW ymm, ymm, ymm{k}{z}
 74567      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74568          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74569          p.domain = DomainAVX
 74570          p.add(0, func(m *_Encoding, v []interface{}) {
 74571              m.emit(0x62)
 74572              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74573              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74574              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 74575              m.emit(0xee)
 74576              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74577          })
 74578      }
 74579      // VPMAXSW m256, ymm, ymm{k}{z}
 74580      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74581          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74582          p.domain = DomainAVX
 74583          p.add(0, func(m *_Encoding, v []interface{}) {
 74584              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 74585              m.emit(0xee)
 74586              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 74587          })
 74588      }
 74589      if p.len == 0 {
 74590          panic("invalid operands for VPMAXSW")
 74591      }
 74592      return p
 74593  }
 74594  
 74595  // VPMAXUB performs "Maximum of Packed Unsigned Byte Integers".
 74596  //
 74597  // Mnemonic        : VPMAXUB
 74598  // Supported forms : (10 forms)
 74599  //
 74600  //    * VPMAXUB xmm, xmm, xmm           [AVX]
 74601  //    * VPMAXUB m128, xmm, xmm          [AVX]
 74602  //    * VPMAXUB ymm, ymm, ymm           [AVX2]
 74603  //    * VPMAXUB m256, ymm, ymm          [AVX2]
 74604  //    * VPMAXUB zmm, zmm, zmm{k}{z}     [AVX512BW]
 74605  //    * VPMAXUB m512, zmm, zmm{k}{z}    [AVX512BW]
 74606  //    * VPMAXUB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 74607  //    * VPMAXUB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 74608  //    * VPMAXUB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 74609  //    * VPMAXUB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 74610  //
 74611  func (self *Program) VPMAXUB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 74612      p := self.alloc("VPMAXUB", 3, Operands { v0, v1, v2 })
 74613      // VPMAXUB xmm, xmm, xmm
 74614      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 74615          self.require(ISA_AVX)
 74616          p.domain = DomainAVX
 74617          p.add(0, func(m *_Encoding, v []interface{}) {
 74618              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 74619              m.emit(0xde)
 74620              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74621          })
 74622      }
 74623      // VPMAXUB m128, xmm, xmm
 74624      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 74625          self.require(ISA_AVX)
 74626          p.domain = DomainAVX
 74627          p.add(0, func(m *_Encoding, v []interface{}) {
 74628              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74629              m.emit(0xde)
 74630              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74631          })
 74632      }
 74633      // VPMAXUB ymm, ymm, ymm
 74634      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 74635          self.require(ISA_AVX2)
 74636          p.domain = DomainAVX
 74637          p.add(0, func(m *_Encoding, v []interface{}) {
 74638              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 74639              m.emit(0xde)
 74640              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74641          })
 74642      }
 74643      // VPMAXUB m256, ymm, ymm
 74644      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 74645          self.require(ISA_AVX2)
 74646          p.domain = DomainAVX
 74647          p.add(0, func(m *_Encoding, v []interface{}) {
 74648              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74649              m.emit(0xde)
 74650              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74651          })
 74652      }
 74653      // VPMAXUB zmm, zmm, zmm{k}{z}
 74654      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 74655          self.require(ISA_AVX512BW)
 74656          p.domain = DomainAVX
 74657          p.add(0, func(m *_Encoding, v []interface{}) {
 74658              m.emit(0x62)
 74659              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74660              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74661              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 74662              m.emit(0xde)
 74663              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74664          })
 74665      }
 74666      // VPMAXUB m512, zmm, zmm{k}{z}
 74667      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 74668          self.require(ISA_AVX512BW)
 74669          p.domain = DomainAVX
 74670          p.add(0, func(m *_Encoding, v []interface{}) {
 74671              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 74672              m.emit(0xde)
 74673              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 74674          })
 74675      }
 74676      // VPMAXUB xmm, xmm, xmm{k}{z}
 74677      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74678          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74679          p.domain = DomainAVX
 74680          p.add(0, func(m *_Encoding, v []interface{}) {
 74681              m.emit(0x62)
 74682              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74683              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74684              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 74685              m.emit(0xde)
 74686              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74687          })
 74688      }
 74689      // VPMAXUB m128, xmm, xmm{k}{z}
 74690      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74691          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74692          p.domain = DomainAVX
 74693          p.add(0, func(m *_Encoding, v []interface{}) {
 74694              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 74695              m.emit(0xde)
 74696              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 74697          })
 74698      }
 74699      // VPMAXUB ymm, ymm, ymm{k}{z}
 74700      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74701          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74702          p.domain = DomainAVX
 74703          p.add(0, func(m *_Encoding, v []interface{}) {
 74704              m.emit(0x62)
 74705              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74706              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74707              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 74708              m.emit(0xde)
 74709              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74710          })
 74711      }
 74712      // VPMAXUB m256, ymm, ymm{k}{z}
 74713      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74714          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74715          p.domain = DomainAVX
 74716          p.add(0, func(m *_Encoding, v []interface{}) {
 74717              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 74718              m.emit(0xde)
 74719              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 74720          })
 74721      }
 74722      if p.len == 0 {
 74723          panic("invalid operands for VPMAXUB")
 74724      }
 74725      return p
 74726  }
 74727  
 74728  // VPMAXUD performs "Maximum of Packed Unsigned Doubleword Integers".
 74729  //
 74730  // Mnemonic        : VPMAXUD
 74731  // Supported forms : (10 forms)
 74732  //
 74733  //    * VPMAXUD xmm, xmm, xmm                   [AVX]
 74734  //    * VPMAXUD m128, xmm, xmm                  [AVX]
 74735  //    * VPMAXUD ymm, ymm, ymm                   [AVX2]
 74736  //    * VPMAXUD m256, ymm, ymm                  [AVX2]
 74737  //    * VPMAXUD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 74738  //    * VPMAXUD zmm, zmm, zmm{k}{z}             [AVX512F]
 74739  //    * VPMAXUD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 74740  //    * VPMAXUD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 74741  //    * VPMAXUD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 74742  //    * VPMAXUD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 74743  //
 74744  func (self *Program) VPMAXUD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 74745      p := self.alloc("VPMAXUD", 3, Operands { v0, v1, v2 })
 74746      // VPMAXUD xmm, xmm, xmm
 74747      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 74748          self.require(ISA_AVX)
 74749          p.domain = DomainAVX
 74750          p.add(0, func(m *_Encoding, v []interface{}) {
 74751              m.emit(0xc4)
 74752              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 74753              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 74754              m.emit(0x3f)
 74755              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74756          })
 74757      }
 74758      // VPMAXUD m128, xmm, xmm
 74759      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 74760          self.require(ISA_AVX)
 74761          p.domain = DomainAVX
 74762          p.add(0, func(m *_Encoding, v []interface{}) {
 74763              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74764              m.emit(0x3f)
 74765              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74766          })
 74767      }
 74768      // VPMAXUD ymm, ymm, ymm
 74769      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 74770          self.require(ISA_AVX2)
 74771          p.domain = DomainAVX
 74772          p.add(0, func(m *_Encoding, v []interface{}) {
 74773              m.emit(0xc4)
 74774              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 74775              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74776              m.emit(0x3f)
 74777              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74778          })
 74779      }
 74780      // VPMAXUD m256, ymm, ymm
 74781      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 74782          self.require(ISA_AVX2)
 74783          p.domain = DomainAVX
 74784          p.add(0, func(m *_Encoding, v []interface{}) {
 74785              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74786              m.emit(0x3f)
 74787              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74788          })
 74789      }
 74790      // VPMAXUD m512/m32bcst, zmm, zmm{k}{z}
 74791      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 74792          self.require(ISA_AVX512F)
 74793          p.domain = DomainAVX
 74794          p.add(0, func(m *_Encoding, v []interface{}) {
 74795              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74796              m.emit(0x3f)
 74797              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 74798          })
 74799      }
 74800      // VPMAXUD zmm, zmm, zmm{k}{z}
 74801      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 74802          self.require(ISA_AVX512F)
 74803          p.domain = DomainAVX
 74804          p.add(0, func(m *_Encoding, v []interface{}) {
 74805              m.emit(0x62)
 74806              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74807              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74808              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 74809              m.emit(0x3f)
 74810              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74811          })
 74812      }
 74813      // VPMAXUD m128/m32bcst, xmm, xmm{k}{z}
 74814      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74815          self.require(ISA_AVX512VL | ISA_AVX512F)
 74816          p.domain = DomainAVX
 74817          p.add(0, func(m *_Encoding, v []interface{}) {
 74818              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74819              m.emit(0x3f)
 74820              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 74821          })
 74822      }
 74823      // VPMAXUD xmm, xmm, xmm{k}{z}
 74824      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74825          self.require(ISA_AVX512VL | ISA_AVX512F)
 74826          p.domain = DomainAVX
 74827          p.add(0, func(m *_Encoding, v []interface{}) {
 74828              m.emit(0x62)
 74829              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74830              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74831              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 74832              m.emit(0x3f)
 74833              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74834          })
 74835      }
 74836      // VPMAXUD m256/m32bcst, ymm, ymm{k}{z}
 74837      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74838          self.require(ISA_AVX512VL | ISA_AVX512F)
 74839          p.domain = DomainAVX
 74840          p.add(0, func(m *_Encoding, v []interface{}) {
 74841              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74842              m.emit(0x3f)
 74843              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 74844          })
 74845      }
 74846      // VPMAXUD ymm, ymm, ymm{k}{z}
 74847      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74848          self.require(ISA_AVX512VL | ISA_AVX512F)
 74849          p.domain = DomainAVX
 74850          p.add(0, func(m *_Encoding, v []interface{}) {
 74851              m.emit(0x62)
 74852              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74853              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74854              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 74855              m.emit(0x3f)
 74856              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74857          })
 74858      }
 74859      if p.len == 0 {
 74860          panic("invalid operands for VPMAXUD")
 74861      }
 74862      return p
 74863  }
 74864  
 74865  // VPMAXUQ performs "Maximum of Packed Unsigned Quadword Integers".
 74866  //
 74867  // Mnemonic        : VPMAXUQ
 74868  // Supported forms : (6 forms)
 74869  //
 74870  //    * VPMAXUQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 74871  //    * VPMAXUQ zmm, zmm, zmm{k}{z}             [AVX512F]
 74872  //    * VPMAXUQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 74873  //    * VPMAXUQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 74874  //    * VPMAXUQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 74875  //    * VPMAXUQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 74876  //
 74877  func (self *Program) VPMAXUQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 74878      p := self.alloc("VPMAXUQ", 3, Operands { v0, v1, v2 })
 74879      // VPMAXUQ m512/m64bcst, zmm, zmm{k}{z}
 74880      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 74881          self.require(ISA_AVX512F)
 74882          p.domain = DomainAVX
 74883          p.add(0, func(m *_Encoding, v []interface{}) {
 74884              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74885              m.emit(0x3f)
 74886              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 74887          })
 74888      }
 74889      // VPMAXUQ zmm, zmm, zmm{k}{z}
 74890      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 74891          self.require(ISA_AVX512F)
 74892          p.domain = DomainAVX
 74893          p.add(0, func(m *_Encoding, v []interface{}) {
 74894              m.emit(0x62)
 74895              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74896              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 74897              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 74898              m.emit(0x3f)
 74899              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74900          })
 74901      }
 74902      // VPMAXUQ m128/m64bcst, xmm, xmm{k}{z}
 74903      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74904          self.require(ISA_AVX512VL | ISA_AVX512F)
 74905          p.domain = DomainAVX
 74906          p.add(0, func(m *_Encoding, v []interface{}) {
 74907              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74908              m.emit(0x3f)
 74909              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 74910          })
 74911      }
 74912      // VPMAXUQ xmm, xmm, xmm{k}{z}
 74913      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74914          self.require(ISA_AVX512VL | ISA_AVX512F)
 74915          p.domain = DomainAVX
 74916          p.add(0, func(m *_Encoding, v []interface{}) {
 74917              m.emit(0x62)
 74918              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74919              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 74920              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 74921              m.emit(0x3f)
 74922              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74923          })
 74924      }
 74925      // VPMAXUQ m256/m64bcst, ymm, ymm{k}{z}
 74926      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74927          self.require(ISA_AVX512VL | ISA_AVX512F)
 74928          p.domain = DomainAVX
 74929          p.add(0, func(m *_Encoding, v []interface{}) {
 74930              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74931              m.emit(0x3f)
 74932              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 74933          })
 74934      }
 74935      // VPMAXUQ ymm, ymm, ymm{k}{z}
 74936      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74937          self.require(ISA_AVX512VL | ISA_AVX512F)
 74938          p.domain = DomainAVX
 74939          p.add(0, func(m *_Encoding, v []interface{}) {
 74940              m.emit(0x62)
 74941              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74942              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 74943              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 74944              m.emit(0x3f)
 74945              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74946          })
 74947      }
 74948      if p.len == 0 {
 74949          panic("invalid operands for VPMAXUQ")
 74950      }
 74951      return p
 74952  }
 74953  
 74954  // VPMAXUW performs "Maximum of Packed Unsigned Word Integers".
 74955  //
 74956  // Mnemonic        : VPMAXUW
 74957  // Supported forms : (10 forms)
 74958  //
 74959  //    * VPMAXUW xmm, xmm, xmm           [AVX]
 74960  //    * VPMAXUW m128, xmm, xmm          [AVX]
 74961  //    * VPMAXUW ymm, ymm, ymm           [AVX2]
 74962  //    * VPMAXUW m256, ymm, ymm          [AVX2]
 74963  //    * VPMAXUW zmm, zmm, zmm{k}{z}     [AVX512BW]
 74964  //    * VPMAXUW m512, zmm, zmm{k}{z}    [AVX512BW]
 74965  //    * VPMAXUW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 74966  //    * VPMAXUW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 74967  //    * VPMAXUW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 74968  //    * VPMAXUW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 74969  //
 74970  func (self *Program) VPMAXUW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 74971      p := self.alloc("VPMAXUW", 3, Operands { v0, v1, v2 })
 74972      // VPMAXUW xmm, xmm, xmm
 74973      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 74974          self.require(ISA_AVX)
 74975          p.domain = DomainAVX
 74976          p.add(0, func(m *_Encoding, v []interface{}) {
 74977              m.emit(0xc4)
 74978              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 74979              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 74980              m.emit(0x3e)
 74981              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74982          })
 74983      }
 74984      // VPMAXUW m128, xmm, xmm
 74985      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 74986          self.require(ISA_AVX)
 74987          p.domain = DomainAVX
 74988          p.add(0, func(m *_Encoding, v []interface{}) {
 74989              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74990              m.emit(0x3e)
 74991              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74992          })
 74993      }
 74994      // VPMAXUW ymm, ymm, ymm
 74995      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 74996          self.require(ISA_AVX2)
 74997          p.domain = DomainAVX
 74998          p.add(0, func(m *_Encoding, v []interface{}) {
 74999              m.emit(0xc4)
 75000              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 75001              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75002              m.emit(0x3e)
 75003              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75004          })
 75005      }
 75006      // VPMAXUW m256, ymm, ymm
 75007      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 75008          self.require(ISA_AVX2)
 75009          p.domain = DomainAVX
 75010          p.add(0, func(m *_Encoding, v []interface{}) {
 75011              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75012              m.emit(0x3e)
 75013              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75014          })
 75015      }
 75016      // VPMAXUW zmm, zmm, zmm{k}{z}
 75017      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 75018          self.require(ISA_AVX512BW)
 75019          p.domain = DomainAVX
 75020          p.add(0, func(m *_Encoding, v []interface{}) {
 75021              m.emit(0x62)
 75022              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75023              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75024              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 75025              m.emit(0x3e)
 75026              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75027          })
 75028      }
 75029      // VPMAXUW m512, zmm, zmm{k}{z}
 75030      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 75031          self.require(ISA_AVX512BW)
 75032          p.domain = DomainAVX
 75033          p.add(0, func(m *_Encoding, v []interface{}) {
 75034              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75035              m.emit(0x3e)
 75036              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 75037          })
 75038      }
 75039      // VPMAXUW xmm, xmm, xmm{k}{z}
 75040      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75041          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75042          p.domain = DomainAVX
 75043          p.add(0, func(m *_Encoding, v []interface{}) {
 75044              m.emit(0x62)
 75045              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75046              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75047              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 75048              m.emit(0x3e)
 75049              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75050          })
 75051      }
 75052      // VPMAXUW m128, xmm, xmm{k}{z}
 75053      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75054          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75055          p.domain = DomainAVX
 75056          p.add(0, func(m *_Encoding, v []interface{}) {
 75057              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75058              m.emit(0x3e)
 75059              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 75060          })
 75061      }
 75062      // VPMAXUW ymm, ymm, ymm{k}{z}
 75063      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75064          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75065          p.domain = DomainAVX
 75066          p.add(0, func(m *_Encoding, v []interface{}) {
 75067              m.emit(0x62)
 75068              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75069              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75070              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 75071              m.emit(0x3e)
 75072              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75073          })
 75074      }
 75075      // VPMAXUW m256, ymm, ymm{k}{z}
 75076      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75077          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75078          p.domain = DomainAVX
 75079          p.add(0, func(m *_Encoding, v []interface{}) {
 75080              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75081              m.emit(0x3e)
 75082              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 75083          })
 75084      }
 75085      if p.len == 0 {
 75086          panic("invalid operands for VPMAXUW")
 75087      }
 75088      return p
 75089  }
 75090  
 75091  // VPMINSB performs "Minimum of Packed Signed Byte Integers".
 75092  //
 75093  // Mnemonic        : VPMINSB
 75094  // Supported forms : (10 forms)
 75095  //
 75096  //    * VPMINSB xmm, xmm, xmm           [AVX]
 75097  //    * VPMINSB m128, xmm, xmm          [AVX]
 75098  //    * VPMINSB ymm, ymm, ymm           [AVX2]
 75099  //    * VPMINSB m256, ymm, ymm          [AVX2]
 75100  //    * VPMINSB zmm, zmm, zmm{k}{z}     [AVX512BW]
 75101  //    * VPMINSB m512, zmm, zmm{k}{z}    [AVX512BW]
 75102  //    * VPMINSB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 75103  //    * VPMINSB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 75104  //    * VPMINSB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 75105  //    * VPMINSB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 75106  //
 75107  func (self *Program) VPMINSB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 75108      p := self.alloc("VPMINSB", 3, Operands { v0, v1, v2 })
 75109      // VPMINSB xmm, xmm, xmm
 75110      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 75111          self.require(ISA_AVX)
 75112          p.domain = DomainAVX
 75113          p.add(0, func(m *_Encoding, v []interface{}) {
 75114              m.emit(0xc4)
 75115              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 75116              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 75117              m.emit(0x38)
 75118              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75119          })
 75120      }
 75121      // VPMINSB m128, xmm, xmm
 75122      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 75123          self.require(ISA_AVX)
 75124          p.domain = DomainAVX
 75125          p.add(0, func(m *_Encoding, v []interface{}) {
 75126              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75127              m.emit(0x38)
 75128              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75129          })
 75130      }
 75131      // VPMINSB ymm, ymm, ymm
 75132      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 75133          self.require(ISA_AVX2)
 75134          p.domain = DomainAVX
 75135          p.add(0, func(m *_Encoding, v []interface{}) {
 75136              m.emit(0xc4)
 75137              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 75138              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75139              m.emit(0x38)
 75140              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75141          })
 75142      }
 75143      // VPMINSB m256, ymm, ymm
 75144      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 75145          self.require(ISA_AVX2)
 75146          p.domain = DomainAVX
 75147          p.add(0, func(m *_Encoding, v []interface{}) {
 75148              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75149              m.emit(0x38)
 75150              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75151          })
 75152      }
 75153      // VPMINSB zmm, zmm, zmm{k}{z}
 75154      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 75155          self.require(ISA_AVX512BW)
 75156          p.domain = DomainAVX
 75157          p.add(0, func(m *_Encoding, v []interface{}) {
 75158              m.emit(0x62)
 75159              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75160              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75161              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 75162              m.emit(0x38)
 75163              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75164          })
 75165      }
 75166      // VPMINSB m512, zmm, zmm{k}{z}
 75167      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 75168          self.require(ISA_AVX512BW)
 75169          p.domain = DomainAVX
 75170          p.add(0, func(m *_Encoding, v []interface{}) {
 75171              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75172              m.emit(0x38)
 75173              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 75174          })
 75175      }
 75176      // VPMINSB xmm, xmm, xmm{k}{z}
 75177      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75178          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75179          p.domain = DomainAVX
 75180          p.add(0, func(m *_Encoding, v []interface{}) {
 75181              m.emit(0x62)
 75182              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75183              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75184              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 75185              m.emit(0x38)
 75186              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75187          })
 75188      }
 75189      // VPMINSB m128, xmm, xmm{k}{z}
 75190      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75191          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75192          p.domain = DomainAVX
 75193          p.add(0, func(m *_Encoding, v []interface{}) {
 75194              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75195              m.emit(0x38)
 75196              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 75197          })
 75198      }
 75199      // VPMINSB ymm, ymm, ymm{k}{z}
 75200      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75201          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75202          p.domain = DomainAVX
 75203          p.add(0, func(m *_Encoding, v []interface{}) {
 75204              m.emit(0x62)
 75205              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75206              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75207              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 75208              m.emit(0x38)
 75209              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75210          })
 75211      }
 75212      // VPMINSB m256, ymm, ymm{k}{z}
 75213      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75214          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75215          p.domain = DomainAVX
 75216          p.add(0, func(m *_Encoding, v []interface{}) {
 75217              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75218              m.emit(0x38)
 75219              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 75220          })
 75221      }
 75222      if p.len == 0 {
 75223          panic("invalid operands for VPMINSB")
 75224      }
 75225      return p
 75226  }
 75227  
 75228  // VPMINSD performs "Minimum of Packed Signed Doubleword Integers".
 75229  //
 75230  // Mnemonic        : VPMINSD
 75231  // Supported forms : (10 forms)
 75232  //
 75233  //    * VPMINSD xmm, xmm, xmm                   [AVX]
 75234  //    * VPMINSD m128, xmm, xmm                  [AVX]
 75235  //    * VPMINSD ymm, ymm, ymm                   [AVX2]
 75236  //    * VPMINSD m256, ymm, ymm                  [AVX2]
 75237  //    * VPMINSD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 75238  //    * VPMINSD zmm, zmm, zmm{k}{z}             [AVX512F]
 75239  //    * VPMINSD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 75240  //    * VPMINSD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 75241  //    * VPMINSD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 75242  //    * VPMINSD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 75243  //
 75244  func (self *Program) VPMINSD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 75245      p := self.alloc("VPMINSD", 3, Operands { v0, v1, v2 })
 75246      // VPMINSD xmm, xmm, xmm
 75247      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 75248          self.require(ISA_AVX)
 75249          p.domain = DomainAVX
 75250          p.add(0, func(m *_Encoding, v []interface{}) {
 75251              m.emit(0xc4)
 75252              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 75253              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 75254              m.emit(0x39)
 75255              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75256          })
 75257      }
 75258      // VPMINSD m128, xmm, xmm
 75259      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 75260          self.require(ISA_AVX)
 75261          p.domain = DomainAVX
 75262          p.add(0, func(m *_Encoding, v []interface{}) {
 75263              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75264              m.emit(0x39)
 75265              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75266          })
 75267      }
 75268      // VPMINSD ymm, ymm, ymm
 75269      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 75270          self.require(ISA_AVX2)
 75271          p.domain = DomainAVX
 75272          p.add(0, func(m *_Encoding, v []interface{}) {
 75273              m.emit(0xc4)
 75274              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 75275              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75276              m.emit(0x39)
 75277              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75278          })
 75279      }
 75280      // VPMINSD m256, ymm, ymm
 75281      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 75282          self.require(ISA_AVX2)
 75283          p.domain = DomainAVX
 75284          p.add(0, func(m *_Encoding, v []interface{}) {
 75285              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75286              m.emit(0x39)
 75287              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75288          })
 75289      }
 75290      // VPMINSD m512/m32bcst, zmm, zmm{k}{z}
 75291      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 75292          self.require(ISA_AVX512F)
 75293          p.domain = DomainAVX
 75294          p.add(0, func(m *_Encoding, v []interface{}) {
 75295              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75296              m.emit(0x39)
 75297              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 75298          })
 75299      }
 75300      // VPMINSD zmm, zmm, zmm{k}{z}
 75301      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 75302          self.require(ISA_AVX512F)
 75303          p.domain = DomainAVX
 75304          p.add(0, func(m *_Encoding, v []interface{}) {
 75305              m.emit(0x62)
 75306              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75307              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75308              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 75309              m.emit(0x39)
 75310              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75311          })
 75312      }
 75313      // VPMINSD m128/m32bcst, xmm, xmm{k}{z}
 75314      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75315          self.require(ISA_AVX512VL | ISA_AVX512F)
 75316          p.domain = DomainAVX
 75317          p.add(0, func(m *_Encoding, v []interface{}) {
 75318              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75319              m.emit(0x39)
 75320              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 75321          })
 75322      }
 75323      // VPMINSD xmm, xmm, xmm{k}{z}
 75324      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75325          self.require(ISA_AVX512VL | ISA_AVX512F)
 75326          p.domain = DomainAVX
 75327          p.add(0, func(m *_Encoding, v []interface{}) {
 75328              m.emit(0x62)
 75329              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75330              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75331              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 75332              m.emit(0x39)
 75333              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75334          })
 75335      }
 75336      // VPMINSD m256/m32bcst, ymm, ymm{k}{z}
 75337      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75338          self.require(ISA_AVX512VL | ISA_AVX512F)
 75339          p.domain = DomainAVX
 75340          p.add(0, func(m *_Encoding, v []interface{}) {
 75341              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75342              m.emit(0x39)
 75343              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 75344          })
 75345      }
 75346      // VPMINSD ymm, ymm, ymm{k}{z}
 75347      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75348          self.require(ISA_AVX512VL | ISA_AVX512F)
 75349          p.domain = DomainAVX
 75350          p.add(0, func(m *_Encoding, v []interface{}) {
 75351              m.emit(0x62)
 75352              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75353              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75354              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 75355              m.emit(0x39)
 75356              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75357          })
 75358      }
 75359      if p.len == 0 {
 75360          panic("invalid operands for VPMINSD")
 75361      }
 75362      return p
 75363  }
 75364  
 75365  // VPMINSQ performs "Minimum of Packed Signed Quadword Integers".
 75366  //
 75367  // Mnemonic        : VPMINSQ
 75368  // Supported forms : (6 forms)
 75369  //
 75370  //    * VPMINSQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 75371  //    * VPMINSQ zmm, zmm, zmm{k}{z}             [AVX512F]
 75372  //    * VPMINSQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 75373  //    * VPMINSQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 75374  //    * VPMINSQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 75375  //    * VPMINSQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 75376  //
 75377  func (self *Program) VPMINSQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 75378      p := self.alloc("VPMINSQ", 3, Operands { v0, v1, v2 })
 75379      // VPMINSQ m512/m64bcst, zmm, zmm{k}{z}
 75380      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 75381          self.require(ISA_AVX512F)
 75382          p.domain = DomainAVX
 75383          p.add(0, func(m *_Encoding, v []interface{}) {
 75384              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75385              m.emit(0x39)
 75386              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 75387          })
 75388      }
 75389      // VPMINSQ zmm, zmm, zmm{k}{z}
 75390      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 75391          self.require(ISA_AVX512F)
 75392          p.domain = DomainAVX
 75393          p.add(0, func(m *_Encoding, v []interface{}) {
 75394              m.emit(0x62)
 75395              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75396              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 75397              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 75398              m.emit(0x39)
 75399              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75400          })
 75401      }
 75402      // VPMINSQ m128/m64bcst, xmm, xmm{k}{z}
 75403      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75404          self.require(ISA_AVX512VL | ISA_AVX512F)
 75405          p.domain = DomainAVX
 75406          p.add(0, func(m *_Encoding, v []interface{}) {
 75407              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75408              m.emit(0x39)
 75409              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 75410          })
 75411      }
 75412      // VPMINSQ xmm, xmm, xmm{k}{z}
 75413      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75414          self.require(ISA_AVX512VL | ISA_AVX512F)
 75415          p.domain = DomainAVX
 75416          p.add(0, func(m *_Encoding, v []interface{}) {
 75417              m.emit(0x62)
 75418              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75419              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 75420              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 75421              m.emit(0x39)
 75422              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75423          })
 75424      }
 75425      // VPMINSQ m256/m64bcst, ymm, ymm{k}{z}
 75426      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75427          self.require(ISA_AVX512VL | ISA_AVX512F)
 75428          p.domain = DomainAVX
 75429          p.add(0, func(m *_Encoding, v []interface{}) {
 75430              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75431              m.emit(0x39)
 75432              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 75433          })
 75434      }
 75435      // VPMINSQ ymm, ymm, ymm{k}{z}
 75436      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75437          self.require(ISA_AVX512VL | ISA_AVX512F)
 75438          p.domain = DomainAVX
 75439          p.add(0, func(m *_Encoding, v []interface{}) {
 75440              m.emit(0x62)
 75441              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75442              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 75443              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 75444              m.emit(0x39)
 75445              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75446          })
 75447      }
 75448      if p.len == 0 {
 75449          panic("invalid operands for VPMINSQ")
 75450      }
 75451      return p
 75452  }
 75453  
 75454  // VPMINSW performs "Minimum of Packed Signed Word Integers".
 75455  //
 75456  // Mnemonic        : VPMINSW
 75457  // Supported forms : (10 forms)
 75458  //
 75459  //    * VPMINSW xmm, xmm, xmm           [AVX]
 75460  //    * VPMINSW m128, xmm, xmm          [AVX]
 75461  //    * VPMINSW ymm, ymm, ymm           [AVX2]
 75462  //    * VPMINSW m256, ymm, ymm          [AVX2]
 75463  //    * VPMINSW zmm, zmm, zmm{k}{z}     [AVX512BW]
 75464  //    * VPMINSW m512, zmm, zmm{k}{z}    [AVX512BW]
 75465  //    * VPMINSW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 75466  //    * VPMINSW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 75467  //    * VPMINSW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 75468  //    * VPMINSW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 75469  //
 75470  func (self *Program) VPMINSW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 75471      p := self.alloc("VPMINSW", 3, Operands { v0, v1, v2 })
 75472      // VPMINSW xmm, xmm, xmm
 75473      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 75474          self.require(ISA_AVX)
 75475          p.domain = DomainAVX
 75476          p.add(0, func(m *_Encoding, v []interface{}) {
 75477              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 75478              m.emit(0xea)
 75479              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75480          })
 75481      }
 75482      // VPMINSW m128, xmm, xmm
 75483      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 75484          self.require(ISA_AVX)
 75485          p.domain = DomainAVX
 75486          p.add(0, func(m *_Encoding, v []interface{}) {
 75487              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75488              m.emit(0xea)
 75489              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75490          })
 75491      }
 75492      // VPMINSW ymm, ymm, ymm
 75493      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 75494          self.require(ISA_AVX2)
 75495          p.domain = DomainAVX
 75496          p.add(0, func(m *_Encoding, v []interface{}) {
 75497              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 75498              m.emit(0xea)
 75499              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75500          })
 75501      }
 75502      // VPMINSW m256, ymm, ymm
 75503      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 75504          self.require(ISA_AVX2)
 75505          p.domain = DomainAVX
 75506          p.add(0, func(m *_Encoding, v []interface{}) {
 75507              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75508              m.emit(0xea)
 75509              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75510          })
 75511      }
 75512      // VPMINSW zmm, zmm, zmm{k}{z}
 75513      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 75514          self.require(ISA_AVX512BW)
 75515          p.domain = DomainAVX
 75516          p.add(0, func(m *_Encoding, v []interface{}) {
 75517              m.emit(0x62)
 75518              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75519              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75520              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 75521              m.emit(0xea)
 75522              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75523          })
 75524      }
 75525      // VPMINSW m512, zmm, zmm{k}{z}
 75526      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 75527          self.require(ISA_AVX512BW)
 75528          p.domain = DomainAVX
 75529          p.add(0, func(m *_Encoding, v []interface{}) {
 75530              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75531              m.emit(0xea)
 75532              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 75533          })
 75534      }
 75535      // VPMINSW xmm, xmm, xmm{k}{z}
 75536      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75537          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75538          p.domain = DomainAVX
 75539          p.add(0, func(m *_Encoding, v []interface{}) {
 75540              m.emit(0x62)
 75541              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75542              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75543              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 75544              m.emit(0xea)
 75545              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75546          })
 75547      }
 75548      // VPMINSW m128, xmm, xmm{k}{z}
 75549      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75550          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75551          p.domain = DomainAVX
 75552          p.add(0, func(m *_Encoding, v []interface{}) {
 75553              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75554              m.emit(0xea)
 75555              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 75556          })
 75557      }
 75558      // VPMINSW ymm, ymm, ymm{k}{z}
 75559      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75560          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75561          p.domain = DomainAVX
 75562          p.add(0, func(m *_Encoding, v []interface{}) {
 75563              m.emit(0x62)
 75564              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75565              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75566              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 75567              m.emit(0xea)
 75568              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75569          })
 75570      }
 75571      // VPMINSW m256, ymm, ymm{k}{z}
 75572      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75573          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75574          p.domain = DomainAVX
 75575          p.add(0, func(m *_Encoding, v []interface{}) {
 75576              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75577              m.emit(0xea)
 75578              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 75579          })
 75580      }
 75581      if p.len == 0 {
 75582          panic("invalid operands for VPMINSW")
 75583      }
 75584      return p
 75585  }
 75586  
 75587  // VPMINUB performs "Minimum of Packed Unsigned Byte Integers".
 75588  //
 75589  // Mnemonic        : VPMINUB
 75590  // Supported forms : (10 forms)
 75591  //
 75592  //    * VPMINUB xmm, xmm, xmm           [AVX]
 75593  //    * VPMINUB m128, xmm, xmm          [AVX]
 75594  //    * VPMINUB ymm, ymm, ymm           [AVX2]
 75595  //    * VPMINUB m256, ymm, ymm          [AVX2]
 75596  //    * VPMINUB zmm, zmm, zmm{k}{z}     [AVX512BW]
 75597  //    * VPMINUB m512, zmm, zmm{k}{z}    [AVX512BW]
 75598  //    * VPMINUB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 75599  //    * VPMINUB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 75600  //    * VPMINUB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 75601  //    * VPMINUB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 75602  //
 75603  func (self *Program) VPMINUB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 75604      p := self.alloc("VPMINUB", 3, Operands { v0, v1, v2 })
 75605      // VPMINUB xmm, xmm, xmm
 75606      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 75607          self.require(ISA_AVX)
 75608          p.domain = DomainAVX
 75609          p.add(0, func(m *_Encoding, v []interface{}) {
 75610              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 75611              m.emit(0xda)
 75612              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75613          })
 75614      }
 75615      // VPMINUB m128, xmm, xmm
 75616      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 75617          self.require(ISA_AVX)
 75618          p.domain = DomainAVX
 75619          p.add(0, func(m *_Encoding, v []interface{}) {
 75620              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75621              m.emit(0xda)
 75622              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75623          })
 75624      }
 75625      // VPMINUB ymm, ymm, ymm
 75626      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 75627          self.require(ISA_AVX2)
 75628          p.domain = DomainAVX
 75629          p.add(0, func(m *_Encoding, v []interface{}) {
 75630              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 75631              m.emit(0xda)
 75632              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75633          })
 75634      }
 75635      // VPMINUB m256, ymm, ymm
 75636      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 75637          self.require(ISA_AVX2)
 75638          p.domain = DomainAVX
 75639          p.add(0, func(m *_Encoding, v []interface{}) {
 75640              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75641              m.emit(0xda)
 75642              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75643          })
 75644      }
 75645      // VPMINUB zmm, zmm, zmm{k}{z}
 75646      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 75647          self.require(ISA_AVX512BW)
 75648          p.domain = DomainAVX
 75649          p.add(0, func(m *_Encoding, v []interface{}) {
 75650              m.emit(0x62)
 75651              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75652              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75653              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 75654              m.emit(0xda)
 75655              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75656          })
 75657      }
 75658      // VPMINUB m512, zmm, zmm{k}{z}
 75659      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 75660          self.require(ISA_AVX512BW)
 75661          p.domain = DomainAVX
 75662          p.add(0, func(m *_Encoding, v []interface{}) {
 75663              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75664              m.emit(0xda)
 75665              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 75666          })
 75667      }
 75668      // VPMINUB xmm, xmm, xmm{k}{z}
 75669      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75670          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75671          p.domain = DomainAVX
 75672          p.add(0, func(m *_Encoding, v []interface{}) {
 75673              m.emit(0x62)
 75674              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75675              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75676              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 75677              m.emit(0xda)
 75678              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75679          })
 75680      }
 75681      // VPMINUB m128, xmm, xmm{k}{z}
 75682      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75683          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75684          p.domain = DomainAVX
 75685          p.add(0, func(m *_Encoding, v []interface{}) {
 75686              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75687              m.emit(0xda)
 75688              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 75689          })
 75690      }
 75691      // VPMINUB ymm, ymm, ymm{k}{z}
 75692      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75693          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75694          p.domain = DomainAVX
 75695          p.add(0, func(m *_Encoding, v []interface{}) {
 75696              m.emit(0x62)
 75697              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75698              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75699              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 75700              m.emit(0xda)
 75701              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75702          })
 75703      }
 75704      // VPMINUB m256, ymm, ymm{k}{z}
 75705      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75706          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75707          p.domain = DomainAVX
 75708          p.add(0, func(m *_Encoding, v []interface{}) {
 75709              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75710              m.emit(0xda)
 75711              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 75712          })
 75713      }
 75714      if p.len == 0 {
 75715          panic("invalid operands for VPMINUB")
 75716      }
 75717      return p
 75718  }
 75719  
 75720  // VPMINUD performs "Minimum of Packed Unsigned Doubleword Integers".
 75721  //
 75722  // Mnemonic        : VPMINUD
 75723  // Supported forms : (10 forms)
 75724  //
 75725  //    * VPMINUD xmm, xmm, xmm                   [AVX]
 75726  //    * VPMINUD m128, xmm, xmm                  [AVX]
 75727  //    * VPMINUD ymm, ymm, ymm                   [AVX2]
 75728  //    * VPMINUD m256, ymm, ymm                  [AVX2]
 75729  //    * VPMINUD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 75730  //    * VPMINUD zmm, zmm, zmm{k}{z}             [AVX512F]
 75731  //    * VPMINUD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 75732  //    * VPMINUD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 75733  //    * VPMINUD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 75734  //    * VPMINUD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 75735  //
 75736  func (self *Program) VPMINUD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 75737      p := self.alloc("VPMINUD", 3, Operands { v0, v1, v2 })
 75738      // VPMINUD xmm, xmm, xmm
 75739      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 75740          self.require(ISA_AVX)
 75741          p.domain = DomainAVX
 75742          p.add(0, func(m *_Encoding, v []interface{}) {
 75743              m.emit(0xc4)
 75744              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 75745              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 75746              m.emit(0x3b)
 75747              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75748          })
 75749      }
 75750      // VPMINUD m128, xmm, xmm
 75751      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 75752          self.require(ISA_AVX)
 75753          p.domain = DomainAVX
 75754          p.add(0, func(m *_Encoding, v []interface{}) {
 75755              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75756              m.emit(0x3b)
 75757              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75758          })
 75759      }
 75760      // VPMINUD ymm, ymm, ymm
 75761      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 75762          self.require(ISA_AVX2)
 75763          p.domain = DomainAVX
 75764          p.add(0, func(m *_Encoding, v []interface{}) {
 75765              m.emit(0xc4)
 75766              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 75767              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75768              m.emit(0x3b)
 75769              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75770          })
 75771      }
 75772      // VPMINUD m256, ymm, ymm
 75773      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 75774          self.require(ISA_AVX2)
 75775          p.domain = DomainAVX
 75776          p.add(0, func(m *_Encoding, v []interface{}) {
 75777              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75778              m.emit(0x3b)
 75779              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75780          })
 75781      }
 75782      // VPMINUD m512/m32bcst, zmm, zmm{k}{z}
 75783      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 75784          self.require(ISA_AVX512F)
 75785          p.domain = DomainAVX
 75786          p.add(0, func(m *_Encoding, v []interface{}) {
 75787              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75788              m.emit(0x3b)
 75789              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 75790          })
 75791      }
 75792      // VPMINUD zmm, zmm, zmm{k}{z}
 75793      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 75794          self.require(ISA_AVX512F)
 75795          p.domain = DomainAVX
 75796          p.add(0, func(m *_Encoding, v []interface{}) {
 75797              m.emit(0x62)
 75798              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75799              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75800              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 75801              m.emit(0x3b)
 75802              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75803          })
 75804      }
 75805      // VPMINUD m128/m32bcst, xmm, xmm{k}{z}
 75806      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75807          self.require(ISA_AVX512VL | ISA_AVX512F)
 75808          p.domain = DomainAVX
 75809          p.add(0, func(m *_Encoding, v []interface{}) {
 75810              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75811              m.emit(0x3b)
 75812              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 75813          })
 75814      }
 75815      // VPMINUD xmm, xmm, xmm{k}{z}
 75816      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75817          self.require(ISA_AVX512VL | ISA_AVX512F)
 75818          p.domain = DomainAVX
 75819          p.add(0, func(m *_Encoding, v []interface{}) {
 75820              m.emit(0x62)
 75821              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75822              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75823              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 75824              m.emit(0x3b)
 75825              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75826          })
 75827      }
 75828      // VPMINUD m256/m32bcst, ymm, ymm{k}{z}
 75829      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75830          self.require(ISA_AVX512VL | ISA_AVX512F)
 75831          p.domain = DomainAVX
 75832          p.add(0, func(m *_Encoding, v []interface{}) {
 75833              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75834              m.emit(0x3b)
 75835              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 75836          })
 75837      }
 75838      // VPMINUD ymm, ymm, ymm{k}{z}
 75839      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75840          self.require(ISA_AVX512VL | ISA_AVX512F)
 75841          p.domain = DomainAVX
 75842          p.add(0, func(m *_Encoding, v []interface{}) {
 75843              m.emit(0x62)
 75844              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75845              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75846              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 75847              m.emit(0x3b)
 75848              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75849          })
 75850      }
 75851      if p.len == 0 {
 75852          panic("invalid operands for VPMINUD")
 75853      }
 75854      return p
 75855  }
 75856  
 75857  // VPMINUQ performs "Minimum of Packed Unsigned Quadword Integers".
 75858  //
 75859  // Mnemonic        : VPMINUQ
 75860  // Supported forms : (6 forms)
 75861  //
 75862  //    * VPMINUQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 75863  //    * VPMINUQ zmm, zmm, zmm{k}{z}             [AVX512F]
 75864  //    * VPMINUQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 75865  //    * VPMINUQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 75866  //    * VPMINUQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 75867  //    * VPMINUQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 75868  //
 75869  func (self *Program) VPMINUQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 75870      p := self.alloc("VPMINUQ", 3, Operands { v0, v1, v2 })
 75871      // VPMINUQ m512/m64bcst, zmm, zmm{k}{z}
 75872      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 75873          self.require(ISA_AVX512F)
 75874          p.domain = DomainAVX
 75875          p.add(0, func(m *_Encoding, v []interface{}) {
 75876              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75877              m.emit(0x3b)
 75878              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 75879          })
 75880      }
 75881      // VPMINUQ zmm, zmm, zmm{k}{z}
 75882      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 75883          self.require(ISA_AVX512F)
 75884          p.domain = DomainAVX
 75885          p.add(0, func(m *_Encoding, v []interface{}) {
 75886              m.emit(0x62)
 75887              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75888              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 75889              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 75890              m.emit(0x3b)
 75891              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75892          })
 75893      }
 75894      // VPMINUQ m128/m64bcst, xmm, xmm{k}{z}
 75895      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75896          self.require(ISA_AVX512VL | ISA_AVX512F)
 75897          p.domain = DomainAVX
 75898          p.add(0, func(m *_Encoding, v []interface{}) {
 75899              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75900              m.emit(0x3b)
 75901              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 75902          })
 75903      }
 75904      // VPMINUQ xmm, xmm, xmm{k}{z}
 75905      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75906          self.require(ISA_AVX512VL | ISA_AVX512F)
 75907          p.domain = DomainAVX
 75908          p.add(0, func(m *_Encoding, v []interface{}) {
 75909              m.emit(0x62)
 75910              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75911              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 75912              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 75913              m.emit(0x3b)
 75914              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75915          })
 75916      }
 75917      // VPMINUQ m256/m64bcst, ymm, ymm{k}{z}
 75918      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75919          self.require(ISA_AVX512VL | ISA_AVX512F)
 75920          p.domain = DomainAVX
 75921          p.add(0, func(m *_Encoding, v []interface{}) {
 75922              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75923              m.emit(0x3b)
 75924              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 75925          })
 75926      }
 75927      // VPMINUQ ymm, ymm, ymm{k}{z}
 75928      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75929          self.require(ISA_AVX512VL | ISA_AVX512F)
 75930          p.domain = DomainAVX
 75931          p.add(0, func(m *_Encoding, v []interface{}) {
 75932              m.emit(0x62)
 75933              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75934              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 75935              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 75936              m.emit(0x3b)
 75937              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75938          })
 75939      }
 75940      if p.len == 0 {
 75941          panic("invalid operands for VPMINUQ")
 75942      }
 75943      return p
 75944  }
 75945  
 75946  // VPMINUW performs "Minimum of Packed Unsigned Word Integers".
 75947  //
 75948  // Mnemonic        : VPMINUW
 75949  // Supported forms : (10 forms)
 75950  //
 75951  //    * VPMINUW xmm, xmm, xmm           [AVX]
 75952  //    * VPMINUW m128, xmm, xmm          [AVX]
 75953  //    * VPMINUW ymm, ymm, ymm           [AVX2]
 75954  //    * VPMINUW m256, ymm, ymm          [AVX2]
 75955  //    * VPMINUW zmm, zmm, zmm{k}{z}     [AVX512BW]
 75956  //    * VPMINUW m512, zmm, zmm{k}{z}    [AVX512BW]
 75957  //    * VPMINUW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 75958  //    * VPMINUW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 75959  //    * VPMINUW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 75960  //    * VPMINUW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 75961  //
 75962  func (self *Program) VPMINUW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 75963      p := self.alloc("VPMINUW", 3, Operands { v0, v1, v2 })
 75964      // VPMINUW xmm, xmm, xmm
 75965      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 75966          self.require(ISA_AVX)
 75967          p.domain = DomainAVX
 75968          p.add(0, func(m *_Encoding, v []interface{}) {
 75969              m.emit(0xc4)
 75970              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 75971              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 75972              m.emit(0x3a)
 75973              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75974          })
 75975      }
 75976      // VPMINUW m128, xmm, xmm
 75977      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 75978          self.require(ISA_AVX)
 75979          p.domain = DomainAVX
 75980          p.add(0, func(m *_Encoding, v []interface{}) {
 75981              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75982              m.emit(0x3a)
 75983              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75984          })
 75985      }
 75986      // VPMINUW ymm, ymm, ymm
 75987      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 75988          self.require(ISA_AVX2)
 75989          p.domain = DomainAVX
 75990          p.add(0, func(m *_Encoding, v []interface{}) {
 75991              m.emit(0xc4)
 75992              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 75993              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75994              m.emit(0x3a)
 75995              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75996          })
 75997      }
 75998      // VPMINUW m256, ymm, ymm
 75999      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 76000          self.require(ISA_AVX2)
 76001          p.domain = DomainAVX
 76002          p.add(0, func(m *_Encoding, v []interface{}) {
 76003              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 76004              m.emit(0x3a)
 76005              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 76006          })
 76007      }
 76008      // VPMINUW zmm, zmm, zmm{k}{z}
 76009      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 76010          self.require(ISA_AVX512BW)
 76011          p.domain = DomainAVX
 76012          p.add(0, func(m *_Encoding, v []interface{}) {
 76013              m.emit(0x62)
 76014              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 76015              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 76016              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 76017              m.emit(0x3a)
 76018              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 76019          })
 76020      }
 76021      // VPMINUW m512, zmm, zmm{k}{z}
 76022      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 76023          self.require(ISA_AVX512BW)
 76024          p.domain = DomainAVX
 76025          p.add(0, func(m *_Encoding, v []interface{}) {
 76026              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 76027              m.emit(0x3a)
 76028              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 76029          })
 76030      }
 76031      // VPMINUW xmm, xmm, xmm{k}{z}
 76032      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 76033          self.require(ISA_AVX512VL | ISA_AVX512BW)
 76034          p.domain = DomainAVX
 76035          p.add(0, func(m *_Encoding, v []interface{}) {
 76036              m.emit(0x62)
 76037              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 76038              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 76039              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 76040              m.emit(0x3a)
 76041              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 76042          })
 76043      }
 76044      // VPMINUW m128, xmm, xmm{k}{z}
 76045      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 76046          self.require(ISA_AVX512VL | ISA_AVX512BW)
 76047          p.domain = DomainAVX
 76048          p.add(0, func(m *_Encoding, v []interface{}) {
 76049              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 76050              m.emit(0x3a)
 76051              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 76052          })
 76053      }
 76054      // VPMINUW ymm, ymm, ymm{k}{z}
 76055      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 76056          self.require(ISA_AVX512VL | ISA_AVX512BW)
 76057          p.domain = DomainAVX
 76058          p.add(0, func(m *_Encoding, v []interface{}) {
 76059              m.emit(0x62)
 76060              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 76061              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 76062              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 76063              m.emit(0x3a)
 76064              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 76065          })
 76066      }
 76067      // VPMINUW m256, ymm, ymm{k}{z}
 76068      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 76069          self.require(ISA_AVX512VL | ISA_AVX512BW)
 76070          p.domain = DomainAVX
 76071          p.add(0, func(m *_Encoding, v []interface{}) {
 76072              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 76073              m.emit(0x3a)
 76074              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 76075          })
 76076      }
 76077      if p.len == 0 {
 76078          panic("invalid operands for VPMINUW")
 76079      }
 76080      return p
 76081  }
 76082  
 76083  // VPMOVB2M performs "Move Signs of Packed Byte Integers to Mask Register".
 76084  //
 76085  // Mnemonic        : VPMOVB2M
 76086  // Supported forms : (3 forms)
 76087  //
 76088  //    * VPMOVB2M zmm, k    [AVX512BW]
 76089  //    * VPMOVB2M xmm, k    [AVX512BW,AVX512VL]
 76090  //    * VPMOVB2M ymm, k    [AVX512BW,AVX512VL]
 76091  //
 76092  func (self *Program) VPMOVB2M(v0 interface{}, v1 interface{}) *Instruction {
 76093      p := self.alloc("VPMOVB2M", 2, Operands { v0, v1 })
 76094      // VPMOVB2M zmm, k
 76095      if isZMM(v0) && isK(v1) {
 76096          self.require(ISA_AVX512BW)
 76097          p.domain = DomainAVX
 76098          p.add(0, func(m *_Encoding, v []interface{}) {
 76099              m.emit(0x62)
 76100              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76101              m.emit(0x7e)
 76102              m.emit(0x48)
 76103              m.emit(0x29)
 76104              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76105          })
 76106      }
 76107      // VPMOVB2M xmm, k
 76108      if isEVEXXMM(v0) && isK(v1) {
 76109          self.require(ISA_AVX512VL | ISA_AVX512BW)
 76110          p.domain = DomainAVX
 76111          p.add(0, func(m *_Encoding, v []interface{}) {
 76112              m.emit(0x62)
 76113              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76114              m.emit(0x7e)
 76115              m.emit(0x08)
 76116              m.emit(0x29)
 76117              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76118          })
 76119      }
 76120      // VPMOVB2M ymm, k
 76121      if isEVEXYMM(v0) && isK(v1) {
 76122          self.require(ISA_AVX512VL | ISA_AVX512BW)
 76123          p.domain = DomainAVX
 76124          p.add(0, func(m *_Encoding, v []interface{}) {
 76125              m.emit(0x62)
 76126              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76127              m.emit(0x7e)
 76128              m.emit(0x28)
 76129              m.emit(0x29)
 76130              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76131          })
 76132      }
 76133      if p.len == 0 {
 76134          panic("invalid operands for VPMOVB2M")
 76135      }
 76136      return p
 76137  }
 76138  
 76139  // VPMOVD2M performs "Move Signs of Packed Doubleword Integers to Mask Register".
 76140  //
 76141  // Mnemonic        : VPMOVD2M
 76142  // Supported forms : (3 forms)
 76143  //
 76144  //    * VPMOVD2M zmm, k    [AVX512DQ]
 76145  //    * VPMOVD2M xmm, k    [AVX512DQ,AVX512VL]
 76146  //    * VPMOVD2M ymm, k    [AVX512DQ,AVX512VL]
 76147  //
 76148  func (self *Program) VPMOVD2M(v0 interface{}, v1 interface{}) *Instruction {
 76149      p := self.alloc("VPMOVD2M", 2, Operands { v0, v1 })
 76150      // VPMOVD2M zmm, k
 76151      if isZMM(v0) && isK(v1) {
 76152          self.require(ISA_AVX512DQ)
 76153          p.domain = DomainAVX
 76154          p.add(0, func(m *_Encoding, v []interface{}) {
 76155              m.emit(0x62)
 76156              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76157              m.emit(0x7e)
 76158              m.emit(0x48)
 76159              m.emit(0x39)
 76160              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76161          })
 76162      }
 76163      // VPMOVD2M xmm, k
 76164      if isEVEXXMM(v0) && isK(v1) {
 76165          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 76166          p.domain = DomainAVX
 76167          p.add(0, func(m *_Encoding, v []interface{}) {
 76168              m.emit(0x62)
 76169              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76170              m.emit(0x7e)
 76171              m.emit(0x08)
 76172              m.emit(0x39)
 76173              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76174          })
 76175      }
 76176      // VPMOVD2M ymm, k
 76177      if isEVEXYMM(v0) && isK(v1) {
 76178          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 76179          p.domain = DomainAVX
 76180          p.add(0, func(m *_Encoding, v []interface{}) {
 76181              m.emit(0x62)
 76182              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76183              m.emit(0x7e)
 76184              m.emit(0x28)
 76185              m.emit(0x39)
 76186              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76187          })
 76188      }
 76189      if p.len == 0 {
 76190          panic("invalid operands for VPMOVD2M")
 76191      }
 76192      return p
 76193  }
 76194  
 76195  // VPMOVDB performs "Down Convert Packed Doubleword Values to Byte Values with Truncation".
 76196  //
 76197  // Mnemonic        : VPMOVDB
 76198  // Supported forms : (6 forms)
 76199  //
 76200  //    * VPMOVDB zmm, xmm{k}{z}     [AVX512F]
 76201  //    * VPMOVDB zmm, m128{k}{z}    [AVX512F]
 76202  //    * VPMOVDB xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 76203  //    * VPMOVDB xmm, m32{k}{z}     [AVX512F,AVX512VL]
 76204  //    * VPMOVDB ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 76205  //    * VPMOVDB ymm, m64{k}{z}     [AVX512F,AVX512VL]
 76206  //
 76207  func (self *Program) VPMOVDB(v0 interface{}, v1 interface{}) *Instruction {
 76208      p := self.alloc("VPMOVDB", 2, Operands { v0, v1 })
 76209      // VPMOVDB zmm, xmm{k}{z}
 76210      if isZMM(v0) && isXMMkz(v1) {
 76211          self.require(ISA_AVX512F)
 76212          p.domain = DomainAVX
 76213          p.add(0, func(m *_Encoding, v []interface{}) {
 76214              m.emit(0x62)
 76215              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76216              m.emit(0x7e)
 76217              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 76218              m.emit(0x31)
 76219              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76220          })
 76221      }
 76222      // VPMOVDB zmm, m128{k}{z}
 76223      if isZMM(v0) && isM128kz(v1) {
 76224          self.require(ISA_AVX512F)
 76225          p.domain = DomainAVX
 76226          p.add(0, func(m *_Encoding, v []interface{}) {
 76227              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76228              m.emit(0x31)
 76229              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 76230          })
 76231      }
 76232      // VPMOVDB xmm, xmm{k}{z}
 76233      if isEVEXXMM(v0) && isXMMkz(v1) {
 76234          self.require(ISA_AVX512VL | ISA_AVX512F)
 76235          p.domain = DomainAVX
 76236          p.add(0, func(m *_Encoding, v []interface{}) {
 76237              m.emit(0x62)
 76238              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76239              m.emit(0x7e)
 76240              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 76241              m.emit(0x31)
 76242              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76243          })
 76244      }
 76245      // VPMOVDB xmm, m32{k}{z}
 76246      if isEVEXXMM(v0) && isM32kz(v1) {
 76247          self.require(ISA_AVX512VL | ISA_AVX512F)
 76248          p.domain = DomainAVX
 76249          p.add(0, func(m *_Encoding, v []interface{}) {
 76250              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76251              m.emit(0x31)
 76252              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 76253          })
 76254      }
 76255      // VPMOVDB ymm, xmm{k}{z}
 76256      if isEVEXYMM(v0) && isXMMkz(v1) {
 76257          self.require(ISA_AVX512VL | ISA_AVX512F)
 76258          p.domain = DomainAVX
 76259          p.add(0, func(m *_Encoding, v []interface{}) {
 76260              m.emit(0x62)
 76261              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76262              m.emit(0x7e)
 76263              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 76264              m.emit(0x31)
 76265              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76266          })
 76267      }
 76268      // VPMOVDB ymm, m64{k}{z}
 76269      if isEVEXYMM(v0) && isM64kz(v1) {
 76270          self.require(ISA_AVX512VL | ISA_AVX512F)
 76271          p.domain = DomainAVX
 76272          p.add(0, func(m *_Encoding, v []interface{}) {
 76273              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76274              m.emit(0x31)
 76275              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 76276          })
 76277      }
 76278      if p.len == 0 {
 76279          panic("invalid operands for VPMOVDB")
 76280      }
 76281      return p
 76282  }
 76283  
 76284  // VPMOVDW performs "Down Convert Packed Doubleword Values to Word Values with Truncation".
 76285  //
 76286  // Mnemonic        : VPMOVDW
 76287  // Supported forms : (6 forms)
 76288  //
 76289  //    * VPMOVDW zmm, ymm{k}{z}     [AVX512F]
 76290  //    * VPMOVDW zmm, m256{k}{z}    [AVX512F]
 76291  //    * VPMOVDW xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 76292  //    * VPMOVDW xmm, m64{k}{z}     [AVX512F,AVX512VL]
 76293  //    * VPMOVDW ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 76294  //    * VPMOVDW ymm, m128{k}{z}    [AVX512F,AVX512VL]
 76295  //
 76296  func (self *Program) VPMOVDW(v0 interface{}, v1 interface{}) *Instruction {
 76297      p := self.alloc("VPMOVDW", 2, Operands { v0, v1 })
 76298      // VPMOVDW zmm, ymm{k}{z}
 76299      if isZMM(v0) && isYMMkz(v1) {
 76300          self.require(ISA_AVX512F)
 76301          p.domain = DomainAVX
 76302          p.add(0, func(m *_Encoding, v []interface{}) {
 76303              m.emit(0x62)
 76304              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76305              m.emit(0x7e)
 76306              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 76307              m.emit(0x33)
 76308              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76309          })
 76310      }
 76311      // VPMOVDW zmm, m256{k}{z}
 76312      if isZMM(v0) && isM256kz(v1) {
 76313          self.require(ISA_AVX512F)
 76314          p.domain = DomainAVX
 76315          p.add(0, func(m *_Encoding, v []interface{}) {
 76316              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76317              m.emit(0x33)
 76318              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 76319          })
 76320      }
 76321      // VPMOVDW xmm, xmm{k}{z}
 76322      if isEVEXXMM(v0) && isXMMkz(v1) {
 76323          self.require(ISA_AVX512VL | ISA_AVX512F)
 76324          p.domain = DomainAVX
 76325          p.add(0, func(m *_Encoding, v []interface{}) {
 76326              m.emit(0x62)
 76327              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76328              m.emit(0x7e)
 76329              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 76330              m.emit(0x33)
 76331              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76332          })
 76333      }
 76334      // VPMOVDW xmm, m64{k}{z}
 76335      if isEVEXXMM(v0) && isM64kz(v1) {
 76336          self.require(ISA_AVX512VL | ISA_AVX512F)
 76337          p.domain = DomainAVX
 76338          p.add(0, func(m *_Encoding, v []interface{}) {
 76339              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76340              m.emit(0x33)
 76341              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 76342          })
 76343      }
 76344      // VPMOVDW ymm, xmm{k}{z}
 76345      if isEVEXYMM(v0) && isXMMkz(v1) {
 76346          self.require(ISA_AVX512VL | ISA_AVX512F)
 76347          p.domain = DomainAVX
 76348          p.add(0, func(m *_Encoding, v []interface{}) {
 76349              m.emit(0x62)
 76350              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76351              m.emit(0x7e)
 76352              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 76353              m.emit(0x33)
 76354              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76355          })
 76356      }
 76357      // VPMOVDW ymm, m128{k}{z}
 76358      if isEVEXYMM(v0) && isM128kz(v1) {
 76359          self.require(ISA_AVX512VL | ISA_AVX512F)
 76360          p.domain = DomainAVX
 76361          p.add(0, func(m *_Encoding, v []interface{}) {
 76362              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76363              m.emit(0x33)
 76364              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 76365          })
 76366      }
 76367      if p.len == 0 {
 76368          panic("invalid operands for VPMOVDW")
 76369      }
 76370      return p
 76371  }
 76372  
 76373  // VPMOVM2B performs "Expand Bits of Mask Register to Packed Byte Integers".
 76374  //
 76375  // Mnemonic        : VPMOVM2B
 76376  // Supported forms : (3 forms)
 76377  //
 76378  //    * VPMOVM2B k, zmm    [AVX512BW]
 76379  //    * VPMOVM2B k, xmm    [AVX512BW,AVX512VL]
 76380  //    * VPMOVM2B k, ymm    [AVX512BW,AVX512VL]
 76381  //
 76382  func (self *Program) VPMOVM2B(v0 interface{}, v1 interface{}) *Instruction {
 76383      p := self.alloc("VPMOVM2B", 2, Operands { v0, v1 })
 76384      // VPMOVM2B k, zmm
 76385      if isK(v0) && isZMM(v1) {
 76386          self.require(ISA_AVX512BW)
 76387          p.domain = DomainAVX
 76388          p.add(0, func(m *_Encoding, v []interface{}) {
 76389              m.emit(0x62)
 76390              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76391              m.emit(0x7e)
 76392              m.emit(0x48)
 76393              m.emit(0x28)
 76394              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76395          })
 76396      }
 76397      // VPMOVM2B k, xmm
 76398      if isK(v0) && isEVEXXMM(v1) {
 76399          self.require(ISA_AVX512VL | ISA_AVX512BW)
 76400          p.domain = DomainAVX
 76401          p.add(0, func(m *_Encoding, v []interface{}) {
 76402              m.emit(0x62)
 76403              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76404              m.emit(0x7e)
 76405              m.emit(0x08)
 76406              m.emit(0x28)
 76407              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76408          })
 76409      }
 76410      // VPMOVM2B k, ymm
 76411      if isK(v0) && isEVEXYMM(v1) {
 76412          self.require(ISA_AVX512VL | ISA_AVX512BW)
 76413          p.domain = DomainAVX
 76414          p.add(0, func(m *_Encoding, v []interface{}) {
 76415              m.emit(0x62)
 76416              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76417              m.emit(0x7e)
 76418              m.emit(0x28)
 76419              m.emit(0x28)
 76420              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76421          })
 76422      }
 76423      if p.len == 0 {
 76424          panic("invalid operands for VPMOVM2B")
 76425      }
 76426      return p
 76427  }
 76428  
 76429  // VPMOVM2D performs "Expand Bits of Mask Register to Packed Doubleword Integers".
 76430  //
 76431  // Mnemonic        : VPMOVM2D
 76432  // Supported forms : (3 forms)
 76433  //
 76434  //    * VPMOVM2D k, zmm    [AVX512DQ]
 76435  //    * VPMOVM2D k, xmm    [AVX512DQ,AVX512VL]
 76436  //    * VPMOVM2D k, ymm    [AVX512DQ,AVX512VL]
 76437  //
 76438  func (self *Program) VPMOVM2D(v0 interface{}, v1 interface{}) *Instruction {
 76439      p := self.alloc("VPMOVM2D", 2, Operands { v0, v1 })
 76440      // VPMOVM2D k, zmm
 76441      if isK(v0) && isZMM(v1) {
 76442          self.require(ISA_AVX512DQ)
 76443          p.domain = DomainAVX
 76444          p.add(0, func(m *_Encoding, v []interface{}) {
 76445              m.emit(0x62)
 76446              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76447              m.emit(0x7e)
 76448              m.emit(0x48)
 76449              m.emit(0x38)
 76450              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76451          })
 76452      }
 76453      // VPMOVM2D k, xmm
 76454      if isK(v0) && isEVEXXMM(v1) {
 76455          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 76456          p.domain = DomainAVX
 76457          p.add(0, func(m *_Encoding, v []interface{}) {
 76458              m.emit(0x62)
 76459              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76460              m.emit(0x7e)
 76461              m.emit(0x08)
 76462              m.emit(0x38)
 76463              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76464          })
 76465      }
 76466      // VPMOVM2D k, ymm
 76467      if isK(v0) && isEVEXYMM(v1) {
 76468          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 76469          p.domain = DomainAVX
 76470          p.add(0, func(m *_Encoding, v []interface{}) {
 76471              m.emit(0x62)
 76472              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76473              m.emit(0x7e)
 76474              m.emit(0x28)
 76475              m.emit(0x38)
 76476              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76477          })
 76478      }
 76479      if p.len == 0 {
 76480          panic("invalid operands for VPMOVM2D")
 76481      }
 76482      return p
 76483  }
 76484  
 76485  // VPMOVM2Q performs "Expand Bits of Mask Register to Packed Quadword Integers".
 76486  //
 76487  // Mnemonic        : VPMOVM2Q
 76488  // Supported forms : (3 forms)
 76489  //
 76490  //    * VPMOVM2Q k, zmm    [AVX512DQ]
 76491  //    * VPMOVM2Q k, xmm    [AVX512DQ,AVX512VL]
 76492  //    * VPMOVM2Q k, ymm    [AVX512DQ,AVX512VL]
 76493  //
 76494  func (self *Program) VPMOVM2Q(v0 interface{}, v1 interface{}) *Instruction {
 76495      p := self.alloc("VPMOVM2Q", 2, Operands { v0, v1 })
 76496      // VPMOVM2Q k, zmm
 76497      if isK(v0) && isZMM(v1) {
 76498          self.require(ISA_AVX512DQ)
 76499          p.domain = DomainAVX
 76500          p.add(0, func(m *_Encoding, v []interface{}) {
 76501              m.emit(0x62)
 76502              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76503              m.emit(0xfe)
 76504              m.emit(0x48)
 76505              m.emit(0x38)
 76506              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76507          })
 76508      }
 76509      // VPMOVM2Q k, xmm
 76510      if isK(v0) && isEVEXXMM(v1) {
 76511          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 76512          p.domain = DomainAVX
 76513          p.add(0, func(m *_Encoding, v []interface{}) {
 76514              m.emit(0x62)
 76515              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76516              m.emit(0xfe)
 76517              m.emit(0x08)
 76518              m.emit(0x38)
 76519              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76520          })
 76521      }
 76522      // VPMOVM2Q k, ymm
 76523      if isK(v0) && isEVEXYMM(v1) {
 76524          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 76525          p.domain = DomainAVX
 76526          p.add(0, func(m *_Encoding, v []interface{}) {
 76527              m.emit(0x62)
 76528              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76529              m.emit(0xfe)
 76530              m.emit(0x28)
 76531              m.emit(0x38)
 76532              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76533          })
 76534      }
 76535      if p.len == 0 {
 76536          panic("invalid operands for VPMOVM2Q")
 76537      }
 76538      return p
 76539  }
 76540  
 76541  // VPMOVM2W performs "Expand Bits of Mask Register to Packed Word Integers".
 76542  //
 76543  // Mnemonic        : VPMOVM2W
 76544  // Supported forms : (3 forms)
 76545  //
 76546  //    * VPMOVM2W k, zmm    [AVX512BW]
 76547  //    * VPMOVM2W k, xmm    [AVX512BW,AVX512VL]
 76548  //    * VPMOVM2W k, ymm    [AVX512BW,AVX512VL]
 76549  //
 76550  func (self *Program) VPMOVM2W(v0 interface{}, v1 interface{}) *Instruction {
 76551      p := self.alloc("VPMOVM2W", 2, Operands { v0, v1 })
 76552      // VPMOVM2W k, zmm
 76553      if isK(v0) && isZMM(v1) {
 76554          self.require(ISA_AVX512BW)
 76555          p.domain = DomainAVX
 76556          p.add(0, func(m *_Encoding, v []interface{}) {
 76557              m.emit(0x62)
 76558              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76559              m.emit(0xfe)
 76560              m.emit(0x48)
 76561              m.emit(0x28)
 76562              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76563          })
 76564      }
 76565      // VPMOVM2W k, xmm
 76566      if isK(v0) && isEVEXXMM(v1) {
 76567          self.require(ISA_AVX512VL | ISA_AVX512BW)
 76568          p.domain = DomainAVX
 76569          p.add(0, func(m *_Encoding, v []interface{}) {
 76570              m.emit(0x62)
 76571              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76572              m.emit(0xfe)
 76573              m.emit(0x08)
 76574              m.emit(0x28)
 76575              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76576          })
 76577      }
 76578      // VPMOVM2W k, ymm
 76579      if isK(v0) && isEVEXYMM(v1) {
 76580          self.require(ISA_AVX512VL | ISA_AVX512BW)
 76581          p.domain = DomainAVX
 76582          p.add(0, func(m *_Encoding, v []interface{}) {
 76583              m.emit(0x62)
 76584              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76585              m.emit(0xfe)
 76586              m.emit(0x28)
 76587              m.emit(0x28)
 76588              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76589          })
 76590      }
 76591      if p.len == 0 {
 76592          panic("invalid operands for VPMOVM2W")
 76593      }
 76594      return p
 76595  }
 76596  
 76597  // VPMOVMSKB performs "Move Byte Mask".
 76598  //
 76599  // Mnemonic        : VPMOVMSKB
 76600  // Supported forms : (2 forms)
 76601  //
 76602  //    * VPMOVMSKB xmm, r32    [AVX]
 76603  //    * VPMOVMSKB ymm, r32    [AVX2]
 76604  //
 76605  func (self *Program) VPMOVMSKB(v0 interface{}, v1 interface{}) *Instruction {
 76606      p := self.alloc("VPMOVMSKB", 2, Operands { v0, v1 })
 76607      // VPMOVMSKB xmm, r32
 76608      if isXMM(v0) && isReg32(v1) {
 76609          self.require(ISA_AVX)
 76610          p.domain = DomainAVX
 76611          p.add(0, func(m *_Encoding, v []interface{}) {
 76612              m.vex2(1, hcode(v[1]), v[0], 0)
 76613              m.emit(0xd7)
 76614              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76615          })
 76616      }
 76617      // VPMOVMSKB ymm, r32
 76618      if isYMM(v0) && isReg32(v1) {
 76619          self.require(ISA_AVX2)
 76620          p.domain = DomainAVX
 76621          p.add(0, func(m *_Encoding, v []interface{}) {
 76622              m.vex2(5, hcode(v[1]), v[0], 0)
 76623              m.emit(0xd7)
 76624              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76625          })
 76626      }
 76627      if p.len == 0 {
 76628          panic("invalid operands for VPMOVMSKB")
 76629      }
 76630      return p
 76631  }
 76632  
 76633  // VPMOVQ2M performs "Move Signs of Packed Quadword Integers to Mask Register".
 76634  //
 76635  // Mnemonic        : VPMOVQ2M
 76636  // Supported forms : (3 forms)
 76637  //
 76638  //    * VPMOVQ2M zmm, k    [AVX512DQ]
 76639  //    * VPMOVQ2M xmm, k    [AVX512DQ,AVX512VL]
 76640  //    * VPMOVQ2M ymm, k    [AVX512DQ,AVX512VL]
 76641  //
 76642  func (self *Program) VPMOVQ2M(v0 interface{}, v1 interface{}) *Instruction {
 76643      p := self.alloc("VPMOVQ2M", 2, Operands { v0, v1 })
 76644      // VPMOVQ2M zmm, k
 76645      if isZMM(v0) && isK(v1) {
 76646          self.require(ISA_AVX512DQ)
 76647          p.domain = DomainAVX
 76648          p.add(0, func(m *_Encoding, v []interface{}) {
 76649              m.emit(0x62)
 76650              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76651              m.emit(0xfe)
 76652              m.emit(0x48)
 76653              m.emit(0x39)
 76654              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76655          })
 76656      }
 76657      // VPMOVQ2M xmm, k
 76658      if isEVEXXMM(v0) && isK(v1) {
 76659          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 76660          p.domain = DomainAVX
 76661          p.add(0, func(m *_Encoding, v []interface{}) {
 76662              m.emit(0x62)
 76663              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76664              m.emit(0xfe)
 76665              m.emit(0x08)
 76666              m.emit(0x39)
 76667              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76668          })
 76669      }
 76670      // VPMOVQ2M ymm, k
 76671      if isEVEXYMM(v0) && isK(v1) {
 76672          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 76673          p.domain = DomainAVX
 76674          p.add(0, func(m *_Encoding, v []interface{}) {
 76675              m.emit(0x62)
 76676              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76677              m.emit(0xfe)
 76678              m.emit(0x28)
 76679              m.emit(0x39)
 76680              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76681          })
 76682      }
 76683      if p.len == 0 {
 76684          panic("invalid operands for VPMOVQ2M")
 76685      }
 76686      return p
 76687  }
 76688  
 76689  // VPMOVQB performs "Down Convert Packed Quadword Values to Byte Values with Truncation".
 76690  //
 76691  // Mnemonic        : VPMOVQB
 76692  // Supported forms : (6 forms)
 76693  //
 76694  //    * VPMOVQB zmm, xmm{k}{z}    [AVX512F]
 76695  //    * VPMOVQB zmm, m64{k}{z}    [AVX512F]
 76696  //    * VPMOVQB xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 76697  //    * VPMOVQB xmm, m16{k}{z}    [AVX512F,AVX512VL]
 76698  //    * VPMOVQB ymm, xmm{k}{z}    [AVX512F,AVX512VL]
 76699  //    * VPMOVQB ymm, m32{k}{z}    [AVX512F,AVX512VL]
 76700  //
 76701  func (self *Program) VPMOVQB(v0 interface{}, v1 interface{}) *Instruction {
 76702      p := self.alloc("VPMOVQB", 2, Operands { v0, v1 })
 76703      // VPMOVQB zmm, xmm{k}{z}
 76704      if isZMM(v0) && isXMMkz(v1) {
 76705          self.require(ISA_AVX512F)
 76706          p.domain = DomainAVX
 76707          p.add(0, func(m *_Encoding, v []interface{}) {
 76708              m.emit(0x62)
 76709              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76710              m.emit(0x7e)
 76711              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 76712              m.emit(0x32)
 76713              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76714          })
 76715      }
 76716      // VPMOVQB zmm, m64{k}{z}
 76717      if isZMM(v0) && isM64kz(v1) {
 76718          self.require(ISA_AVX512F)
 76719          p.domain = DomainAVX
 76720          p.add(0, func(m *_Encoding, v []interface{}) {
 76721              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76722              m.emit(0x32)
 76723              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 76724          })
 76725      }
 76726      // VPMOVQB xmm, xmm{k}{z}
 76727      if isEVEXXMM(v0) && isXMMkz(v1) {
 76728          self.require(ISA_AVX512VL | ISA_AVX512F)
 76729          p.domain = DomainAVX
 76730          p.add(0, func(m *_Encoding, v []interface{}) {
 76731              m.emit(0x62)
 76732              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76733              m.emit(0x7e)
 76734              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 76735              m.emit(0x32)
 76736              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76737          })
 76738      }
 76739      // VPMOVQB xmm, m16{k}{z}
 76740      if isEVEXXMM(v0) && isM16kz(v1) {
 76741          self.require(ISA_AVX512VL | ISA_AVX512F)
 76742          p.domain = DomainAVX
 76743          p.add(0, func(m *_Encoding, v []interface{}) {
 76744              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76745              m.emit(0x32)
 76746              m.mrsd(lcode(v[0]), addr(v[1]), 2)
 76747          })
 76748      }
 76749      // VPMOVQB ymm, xmm{k}{z}
 76750      if isEVEXYMM(v0) && isXMMkz(v1) {
 76751          self.require(ISA_AVX512VL | ISA_AVX512F)
 76752          p.domain = DomainAVX
 76753          p.add(0, func(m *_Encoding, v []interface{}) {
 76754              m.emit(0x62)
 76755              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76756              m.emit(0x7e)
 76757              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 76758              m.emit(0x32)
 76759              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76760          })
 76761      }
 76762      // VPMOVQB ymm, m32{k}{z}
 76763      if isEVEXYMM(v0) && isM32kz(v1) {
 76764          self.require(ISA_AVX512VL | ISA_AVX512F)
 76765          p.domain = DomainAVX
 76766          p.add(0, func(m *_Encoding, v []interface{}) {
 76767              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76768              m.emit(0x32)
 76769              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 76770          })
 76771      }
 76772      if p.len == 0 {
 76773          panic("invalid operands for VPMOVQB")
 76774      }
 76775      return p
 76776  }
 76777  
 76778  // VPMOVQD performs "Down Convert Packed Quadword Values to Doubleword Values with Truncation".
 76779  //
 76780  // Mnemonic        : VPMOVQD
 76781  // Supported forms : (6 forms)
 76782  //
 76783  //    * VPMOVQD zmm, ymm{k}{z}     [AVX512F]
 76784  //    * VPMOVQD zmm, m256{k}{z}    [AVX512F]
 76785  //    * VPMOVQD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 76786  //    * VPMOVQD xmm, m64{k}{z}     [AVX512F,AVX512VL]
 76787  //    * VPMOVQD ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 76788  //    * VPMOVQD ymm, m128{k}{z}    [AVX512F,AVX512VL]
 76789  //
 76790  func (self *Program) VPMOVQD(v0 interface{}, v1 interface{}) *Instruction {
 76791      p := self.alloc("VPMOVQD", 2, Operands { v0, v1 })
 76792      // VPMOVQD zmm, ymm{k}{z}
 76793      if isZMM(v0) && isYMMkz(v1) {
 76794          self.require(ISA_AVX512F)
 76795          p.domain = DomainAVX
 76796          p.add(0, func(m *_Encoding, v []interface{}) {
 76797              m.emit(0x62)
 76798              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76799              m.emit(0x7e)
 76800              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 76801              m.emit(0x35)
 76802              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76803          })
 76804      }
 76805      // VPMOVQD zmm, m256{k}{z}
 76806      if isZMM(v0) && isM256kz(v1) {
 76807          self.require(ISA_AVX512F)
 76808          p.domain = DomainAVX
 76809          p.add(0, func(m *_Encoding, v []interface{}) {
 76810              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76811              m.emit(0x35)
 76812              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 76813          })
 76814      }
 76815      // VPMOVQD xmm, xmm{k}{z}
 76816      if isEVEXXMM(v0) && isXMMkz(v1) {
 76817          self.require(ISA_AVX512VL | ISA_AVX512F)
 76818          p.domain = DomainAVX
 76819          p.add(0, func(m *_Encoding, v []interface{}) {
 76820              m.emit(0x62)
 76821              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76822              m.emit(0x7e)
 76823              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 76824              m.emit(0x35)
 76825              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76826          })
 76827      }
 76828      // VPMOVQD xmm, m64{k}{z}
 76829      if isEVEXXMM(v0) && isM64kz(v1) {
 76830          self.require(ISA_AVX512VL | ISA_AVX512F)
 76831          p.domain = DomainAVX
 76832          p.add(0, func(m *_Encoding, v []interface{}) {
 76833              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76834              m.emit(0x35)
 76835              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 76836          })
 76837      }
 76838      // VPMOVQD ymm, xmm{k}{z}
 76839      if isEVEXYMM(v0) && isXMMkz(v1) {
 76840          self.require(ISA_AVX512VL | ISA_AVX512F)
 76841          p.domain = DomainAVX
 76842          p.add(0, func(m *_Encoding, v []interface{}) {
 76843              m.emit(0x62)
 76844              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76845              m.emit(0x7e)
 76846              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 76847              m.emit(0x35)
 76848              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76849          })
 76850      }
 76851      // VPMOVQD ymm, m128{k}{z}
 76852      if isEVEXYMM(v0) && isM128kz(v1) {
 76853          self.require(ISA_AVX512VL | ISA_AVX512F)
 76854          p.domain = DomainAVX
 76855          p.add(0, func(m *_Encoding, v []interface{}) {
 76856              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76857              m.emit(0x35)
 76858              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 76859          })
 76860      }
 76861      if p.len == 0 {
 76862          panic("invalid operands for VPMOVQD")
 76863      }
 76864      return p
 76865  }
 76866  
 76867  // VPMOVQW performs "Down Convert Packed Quadword Values to Word Values with Truncation".
 76868  //
 76869  // Mnemonic        : VPMOVQW
 76870  // Supported forms : (6 forms)
 76871  //
 76872  //    * VPMOVQW zmm, xmm{k}{z}     [AVX512F]
 76873  //    * VPMOVQW zmm, m128{k}{z}    [AVX512F]
 76874  //    * VPMOVQW xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 76875  //    * VPMOVQW xmm, m32{k}{z}     [AVX512F,AVX512VL]
 76876  //    * VPMOVQW ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 76877  //    * VPMOVQW ymm, m64{k}{z}     [AVX512F,AVX512VL]
 76878  //
 76879  func (self *Program) VPMOVQW(v0 interface{}, v1 interface{}) *Instruction {
 76880      p := self.alloc("VPMOVQW", 2, Operands { v0, v1 })
 76881      // VPMOVQW zmm, xmm{k}{z}
 76882      if isZMM(v0) && isXMMkz(v1) {
 76883          self.require(ISA_AVX512F)
 76884          p.domain = DomainAVX
 76885          p.add(0, func(m *_Encoding, v []interface{}) {
 76886              m.emit(0x62)
 76887              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76888              m.emit(0x7e)
 76889              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 76890              m.emit(0x34)
 76891              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76892          })
 76893      }
 76894      // VPMOVQW zmm, m128{k}{z}
 76895      if isZMM(v0) && isM128kz(v1) {
 76896          self.require(ISA_AVX512F)
 76897          p.domain = DomainAVX
 76898          p.add(0, func(m *_Encoding, v []interface{}) {
 76899              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76900              m.emit(0x34)
 76901              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 76902          })
 76903      }
 76904      // VPMOVQW xmm, xmm{k}{z}
 76905      if isEVEXXMM(v0) && isXMMkz(v1) {
 76906          self.require(ISA_AVX512VL | ISA_AVX512F)
 76907          p.domain = DomainAVX
 76908          p.add(0, func(m *_Encoding, v []interface{}) {
 76909              m.emit(0x62)
 76910              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76911              m.emit(0x7e)
 76912              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 76913              m.emit(0x34)
 76914              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76915          })
 76916      }
 76917      // VPMOVQW xmm, m32{k}{z}
 76918      if isEVEXXMM(v0) && isM32kz(v1) {
 76919          self.require(ISA_AVX512VL | ISA_AVX512F)
 76920          p.domain = DomainAVX
 76921          p.add(0, func(m *_Encoding, v []interface{}) {
 76922              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76923              m.emit(0x34)
 76924              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 76925          })
 76926      }
 76927      // VPMOVQW ymm, xmm{k}{z}
 76928      if isEVEXYMM(v0) && isXMMkz(v1) {
 76929          self.require(ISA_AVX512VL | ISA_AVX512F)
 76930          p.domain = DomainAVX
 76931          p.add(0, func(m *_Encoding, v []interface{}) {
 76932              m.emit(0x62)
 76933              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76934              m.emit(0x7e)
 76935              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 76936              m.emit(0x34)
 76937              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76938          })
 76939      }
 76940      // VPMOVQW ymm, m64{k}{z}
 76941      if isEVEXYMM(v0) && isM64kz(v1) {
 76942          self.require(ISA_AVX512VL | ISA_AVX512F)
 76943          p.domain = DomainAVX
 76944          p.add(0, func(m *_Encoding, v []interface{}) {
 76945              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76946              m.emit(0x34)
 76947              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 76948          })
 76949      }
 76950      if p.len == 0 {
 76951          panic("invalid operands for VPMOVQW")
 76952      }
 76953      return p
 76954  }
 76955  
 76956  // VPMOVSDB performs "Down Convert Packed Doubleword Values to Byte Values with Signed Saturation".
 76957  //
 76958  // Mnemonic        : VPMOVSDB
 76959  // Supported forms : (6 forms)
 76960  //
 76961  //    * VPMOVSDB zmm, xmm{k}{z}     [AVX512F]
 76962  //    * VPMOVSDB zmm, m128{k}{z}    [AVX512F]
 76963  //    * VPMOVSDB xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 76964  //    * VPMOVSDB xmm, m32{k}{z}     [AVX512F,AVX512VL]
 76965  //    * VPMOVSDB ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 76966  //    * VPMOVSDB ymm, m64{k}{z}     [AVX512F,AVX512VL]
 76967  //
 76968  func (self *Program) VPMOVSDB(v0 interface{}, v1 interface{}) *Instruction {
 76969      p := self.alloc("VPMOVSDB", 2, Operands { v0, v1 })
 76970      // VPMOVSDB zmm, xmm{k}{z}
 76971      if isZMM(v0) && isXMMkz(v1) {
 76972          self.require(ISA_AVX512F)
 76973          p.domain = DomainAVX
 76974          p.add(0, func(m *_Encoding, v []interface{}) {
 76975              m.emit(0x62)
 76976              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76977              m.emit(0x7e)
 76978              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 76979              m.emit(0x21)
 76980              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76981          })
 76982      }
 76983      // VPMOVSDB zmm, m128{k}{z}
 76984      if isZMM(v0) && isM128kz(v1) {
 76985          self.require(ISA_AVX512F)
 76986          p.domain = DomainAVX
 76987          p.add(0, func(m *_Encoding, v []interface{}) {
 76988              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76989              m.emit(0x21)
 76990              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 76991          })
 76992      }
 76993      // VPMOVSDB xmm, xmm{k}{z}
 76994      if isEVEXXMM(v0) && isXMMkz(v1) {
 76995          self.require(ISA_AVX512VL | ISA_AVX512F)
 76996          p.domain = DomainAVX
 76997          p.add(0, func(m *_Encoding, v []interface{}) {
 76998              m.emit(0x62)
 76999              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77000              m.emit(0x7e)
 77001              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 77002              m.emit(0x21)
 77003              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77004          })
 77005      }
 77006      // VPMOVSDB xmm, m32{k}{z}
 77007      if isEVEXXMM(v0) && isM32kz(v1) {
 77008          self.require(ISA_AVX512VL | ISA_AVX512F)
 77009          p.domain = DomainAVX
 77010          p.add(0, func(m *_Encoding, v []interface{}) {
 77011              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77012              m.emit(0x21)
 77013              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 77014          })
 77015      }
 77016      // VPMOVSDB ymm, xmm{k}{z}
 77017      if isEVEXYMM(v0) && isXMMkz(v1) {
 77018          self.require(ISA_AVX512VL | ISA_AVX512F)
 77019          p.domain = DomainAVX
 77020          p.add(0, func(m *_Encoding, v []interface{}) {
 77021              m.emit(0x62)
 77022              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77023              m.emit(0x7e)
 77024              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 77025              m.emit(0x21)
 77026              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77027          })
 77028      }
 77029      // VPMOVSDB ymm, m64{k}{z}
 77030      if isEVEXYMM(v0) && isM64kz(v1) {
 77031          self.require(ISA_AVX512VL | ISA_AVX512F)
 77032          p.domain = DomainAVX
 77033          p.add(0, func(m *_Encoding, v []interface{}) {
 77034              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77035              m.emit(0x21)
 77036              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 77037          })
 77038      }
 77039      if p.len == 0 {
 77040          panic("invalid operands for VPMOVSDB")
 77041      }
 77042      return p
 77043  }
 77044  
 77045  // VPMOVSDW performs "Down Convert Packed Doubleword Values to Word Values with Signed Saturation".
 77046  //
 77047  // Mnemonic        : VPMOVSDW
 77048  // Supported forms : (6 forms)
 77049  //
 77050  //    * VPMOVSDW zmm, ymm{k}{z}     [AVX512F]
 77051  //    * VPMOVSDW zmm, m256{k}{z}    [AVX512F]
 77052  //    * VPMOVSDW xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 77053  //    * VPMOVSDW xmm, m64{k}{z}     [AVX512F,AVX512VL]
 77054  //    * VPMOVSDW ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 77055  //    * VPMOVSDW ymm, m128{k}{z}    [AVX512F,AVX512VL]
 77056  //
 77057  func (self *Program) VPMOVSDW(v0 interface{}, v1 interface{}) *Instruction {
 77058      p := self.alloc("VPMOVSDW", 2, Operands { v0, v1 })
 77059      // VPMOVSDW zmm, ymm{k}{z}
 77060      if isZMM(v0) && isYMMkz(v1) {
 77061          self.require(ISA_AVX512F)
 77062          p.domain = DomainAVX
 77063          p.add(0, func(m *_Encoding, v []interface{}) {
 77064              m.emit(0x62)
 77065              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77066              m.emit(0x7e)
 77067              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 77068              m.emit(0x23)
 77069              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77070          })
 77071      }
 77072      // VPMOVSDW zmm, m256{k}{z}
 77073      if isZMM(v0) && isM256kz(v1) {
 77074          self.require(ISA_AVX512F)
 77075          p.domain = DomainAVX
 77076          p.add(0, func(m *_Encoding, v []interface{}) {
 77077              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77078              m.emit(0x23)
 77079              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 77080          })
 77081      }
 77082      // VPMOVSDW xmm, xmm{k}{z}
 77083      if isEVEXXMM(v0) && isXMMkz(v1) {
 77084          self.require(ISA_AVX512VL | ISA_AVX512F)
 77085          p.domain = DomainAVX
 77086          p.add(0, func(m *_Encoding, v []interface{}) {
 77087              m.emit(0x62)
 77088              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77089              m.emit(0x7e)
 77090              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 77091              m.emit(0x23)
 77092              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77093          })
 77094      }
 77095      // VPMOVSDW xmm, m64{k}{z}
 77096      if isEVEXXMM(v0) && isM64kz(v1) {
 77097          self.require(ISA_AVX512VL | ISA_AVX512F)
 77098          p.domain = DomainAVX
 77099          p.add(0, func(m *_Encoding, v []interface{}) {
 77100              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77101              m.emit(0x23)
 77102              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 77103          })
 77104      }
 77105      // VPMOVSDW ymm, xmm{k}{z}
 77106      if isEVEXYMM(v0) && isXMMkz(v1) {
 77107          self.require(ISA_AVX512VL | ISA_AVX512F)
 77108          p.domain = DomainAVX
 77109          p.add(0, func(m *_Encoding, v []interface{}) {
 77110              m.emit(0x62)
 77111              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77112              m.emit(0x7e)
 77113              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 77114              m.emit(0x23)
 77115              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77116          })
 77117      }
 77118      // VPMOVSDW ymm, m128{k}{z}
 77119      if isEVEXYMM(v0) && isM128kz(v1) {
 77120          self.require(ISA_AVX512VL | ISA_AVX512F)
 77121          p.domain = DomainAVX
 77122          p.add(0, func(m *_Encoding, v []interface{}) {
 77123              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77124              m.emit(0x23)
 77125              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 77126          })
 77127      }
 77128      if p.len == 0 {
 77129          panic("invalid operands for VPMOVSDW")
 77130      }
 77131      return p
 77132  }
 77133  
 77134  // VPMOVSQB performs "Down Convert Packed Quadword Values to Byte Values with Signed Saturation".
 77135  //
 77136  // Mnemonic        : VPMOVSQB
 77137  // Supported forms : (6 forms)
 77138  //
 77139  //    * VPMOVSQB zmm, xmm{k}{z}    [AVX512F]
 77140  //    * VPMOVSQB zmm, m64{k}{z}    [AVX512F]
 77141  //    * VPMOVSQB xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 77142  //    * VPMOVSQB xmm, m16{k}{z}    [AVX512F,AVX512VL]
 77143  //    * VPMOVSQB ymm, xmm{k}{z}    [AVX512F,AVX512VL]
 77144  //    * VPMOVSQB ymm, m32{k}{z}    [AVX512F,AVX512VL]
 77145  //
 77146  func (self *Program) VPMOVSQB(v0 interface{}, v1 interface{}) *Instruction {
 77147      p := self.alloc("VPMOVSQB", 2, Operands { v0, v1 })
 77148      // VPMOVSQB zmm, xmm{k}{z}
 77149      if isZMM(v0) && isXMMkz(v1) {
 77150          self.require(ISA_AVX512F)
 77151          p.domain = DomainAVX
 77152          p.add(0, func(m *_Encoding, v []interface{}) {
 77153              m.emit(0x62)
 77154              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77155              m.emit(0x7e)
 77156              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 77157              m.emit(0x22)
 77158              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77159          })
 77160      }
 77161      // VPMOVSQB zmm, m64{k}{z}
 77162      if isZMM(v0) && isM64kz(v1) {
 77163          self.require(ISA_AVX512F)
 77164          p.domain = DomainAVX
 77165          p.add(0, func(m *_Encoding, v []interface{}) {
 77166              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77167              m.emit(0x22)
 77168              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 77169          })
 77170      }
 77171      // VPMOVSQB xmm, xmm{k}{z}
 77172      if isEVEXXMM(v0) && isXMMkz(v1) {
 77173          self.require(ISA_AVX512VL | ISA_AVX512F)
 77174          p.domain = DomainAVX
 77175          p.add(0, func(m *_Encoding, v []interface{}) {
 77176              m.emit(0x62)
 77177              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77178              m.emit(0x7e)
 77179              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 77180              m.emit(0x22)
 77181              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77182          })
 77183      }
 77184      // VPMOVSQB xmm, m16{k}{z}
 77185      if isEVEXXMM(v0) && isM16kz(v1) {
 77186          self.require(ISA_AVX512VL | ISA_AVX512F)
 77187          p.domain = DomainAVX
 77188          p.add(0, func(m *_Encoding, v []interface{}) {
 77189              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77190              m.emit(0x22)
 77191              m.mrsd(lcode(v[0]), addr(v[1]), 2)
 77192          })
 77193      }
 77194      // VPMOVSQB ymm, xmm{k}{z}
 77195      if isEVEXYMM(v0) && isXMMkz(v1) {
 77196          self.require(ISA_AVX512VL | ISA_AVX512F)
 77197          p.domain = DomainAVX
 77198          p.add(0, func(m *_Encoding, v []interface{}) {
 77199              m.emit(0x62)
 77200              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77201              m.emit(0x7e)
 77202              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 77203              m.emit(0x22)
 77204              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77205          })
 77206      }
 77207      // VPMOVSQB ymm, m32{k}{z}
 77208      if isEVEXYMM(v0) && isM32kz(v1) {
 77209          self.require(ISA_AVX512VL | ISA_AVX512F)
 77210          p.domain = DomainAVX
 77211          p.add(0, func(m *_Encoding, v []interface{}) {
 77212              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77213              m.emit(0x22)
 77214              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 77215          })
 77216      }
 77217      if p.len == 0 {
 77218          panic("invalid operands for VPMOVSQB")
 77219      }
 77220      return p
 77221  }
 77222  
 77223  // VPMOVSQD performs "Down Convert Packed Quadword Values to Doubleword Values with Signed Saturation".
 77224  //
 77225  // Mnemonic        : VPMOVSQD
 77226  // Supported forms : (6 forms)
 77227  //
 77228  //    * VPMOVSQD zmm, ymm{k}{z}     [AVX512F]
 77229  //    * VPMOVSQD zmm, m256{k}{z}    [AVX512F]
 77230  //    * VPMOVSQD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 77231  //    * VPMOVSQD xmm, m64{k}{z}     [AVX512F,AVX512VL]
 77232  //    * VPMOVSQD ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 77233  //    * VPMOVSQD ymm, m128{k}{z}    [AVX512F,AVX512VL]
 77234  //
 77235  func (self *Program) VPMOVSQD(v0 interface{}, v1 interface{}) *Instruction {
 77236      p := self.alloc("VPMOVSQD", 2, Operands { v0, v1 })
 77237      // VPMOVSQD zmm, ymm{k}{z}
 77238      if isZMM(v0) && isYMMkz(v1) {
 77239          self.require(ISA_AVX512F)
 77240          p.domain = DomainAVX
 77241          p.add(0, func(m *_Encoding, v []interface{}) {
 77242              m.emit(0x62)
 77243              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77244              m.emit(0x7e)
 77245              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 77246              m.emit(0x25)
 77247              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77248          })
 77249      }
 77250      // VPMOVSQD zmm, m256{k}{z}
 77251      if isZMM(v0) && isM256kz(v1) {
 77252          self.require(ISA_AVX512F)
 77253          p.domain = DomainAVX
 77254          p.add(0, func(m *_Encoding, v []interface{}) {
 77255              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77256              m.emit(0x25)
 77257              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 77258          })
 77259      }
 77260      // VPMOVSQD xmm, xmm{k}{z}
 77261      if isEVEXXMM(v0) && isXMMkz(v1) {
 77262          self.require(ISA_AVX512VL | ISA_AVX512F)
 77263          p.domain = DomainAVX
 77264          p.add(0, func(m *_Encoding, v []interface{}) {
 77265              m.emit(0x62)
 77266              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77267              m.emit(0x7e)
 77268              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 77269              m.emit(0x25)
 77270              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77271          })
 77272      }
 77273      // VPMOVSQD xmm, m64{k}{z}
 77274      if isEVEXXMM(v0) && isM64kz(v1) {
 77275          self.require(ISA_AVX512VL | ISA_AVX512F)
 77276          p.domain = DomainAVX
 77277          p.add(0, func(m *_Encoding, v []interface{}) {
 77278              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77279              m.emit(0x25)
 77280              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 77281          })
 77282      }
 77283      // VPMOVSQD ymm, xmm{k}{z}
 77284      if isEVEXYMM(v0) && isXMMkz(v1) {
 77285          self.require(ISA_AVX512VL | ISA_AVX512F)
 77286          p.domain = DomainAVX
 77287          p.add(0, func(m *_Encoding, v []interface{}) {
 77288              m.emit(0x62)
 77289              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77290              m.emit(0x7e)
 77291              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 77292              m.emit(0x25)
 77293              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77294          })
 77295      }
 77296      // VPMOVSQD ymm, m128{k}{z}
 77297      if isEVEXYMM(v0) && isM128kz(v1) {
 77298          self.require(ISA_AVX512VL | ISA_AVX512F)
 77299          p.domain = DomainAVX
 77300          p.add(0, func(m *_Encoding, v []interface{}) {
 77301              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77302              m.emit(0x25)
 77303              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 77304          })
 77305      }
 77306      if p.len == 0 {
 77307          panic("invalid operands for VPMOVSQD")
 77308      }
 77309      return p
 77310  }
 77311  
 77312  // VPMOVSQW performs "Down Convert Packed Quadword Values to Word Values with Signed Saturation".
 77313  //
 77314  // Mnemonic        : VPMOVSQW
 77315  // Supported forms : (6 forms)
 77316  //
 77317  //    * VPMOVSQW zmm, xmm{k}{z}     [AVX512F]
 77318  //    * VPMOVSQW zmm, m128{k}{z}    [AVX512F]
 77319  //    * VPMOVSQW xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 77320  //    * VPMOVSQW xmm, m32{k}{z}     [AVX512F,AVX512VL]
 77321  //    * VPMOVSQW ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 77322  //    * VPMOVSQW ymm, m64{k}{z}     [AVX512F,AVX512VL]
 77323  //
 77324  func (self *Program) VPMOVSQW(v0 interface{}, v1 interface{}) *Instruction {
 77325      p := self.alloc("VPMOVSQW", 2, Operands { v0, v1 })
 77326      // VPMOVSQW zmm, xmm{k}{z}
 77327      if isZMM(v0) && isXMMkz(v1) {
 77328          self.require(ISA_AVX512F)
 77329          p.domain = DomainAVX
 77330          p.add(0, func(m *_Encoding, v []interface{}) {
 77331              m.emit(0x62)
 77332              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77333              m.emit(0x7e)
 77334              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 77335              m.emit(0x24)
 77336              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77337          })
 77338      }
 77339      // VPMOVSQW zmm, m128{k}{z}
 77340      if isZMM(v0) && isM128kz(v1) {
 77341          self.require(ISA_AVX512F)
 77342          p.domain = DomainAVX
 77343          p.add(0, func(m *_Encoding, v []interface{}) {
 77344              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77345              m.emit(0x24)
 77346              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 77347          })
 77348      }
 77349      // VPMOVSQW xmm, xmm{k}{z}
 77350      if isEVEXXMM(v0) && isXMMkz(v1) {
 77351          self.require(ISA_AVX512VL | ISA_AVX512F)
 77352          p.domain = DomainAVX
 77353          p.add(0, func(m *_Encoding, v []interface{}) {
 77354              m.emit(0x62)
 77355              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77356              m.emit(0x7e)
 77357              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 77358              m.emit(0x24)
 77359              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77360          })
 77361      }
 77362      // VPMOVSQW xmm, m32{k}{z}
 77363      if isEVEXXMM(v0) && isM32kz(v1) {
 77364          self.require(ISA_AVX512VL | ISA_AVX512F)
 77365          p.domain = DomainAVX
 77366          p.add(0, func(m *_Encoding, v []interface{}) {
 77367              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77368              m.emit(0x24)
 77369              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 77370          })
 77371      }
 77372      // VPMOVSQW ymm, xmm{k}{z}
 77373      if isEVEXYMM(v0) && isXMMkz(v1) {
 77374          self.require(ISA_AVX512VL | ISA_AVX512F)
 77375          p.domain = DomainAVX
 77376          p.add(0, func(m *_Encoding, v []interface{}) {
 77377              m.emit(0x62)
 77378              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77379              m.emit(0x7e)
 77380              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 77381              m.emit(0x24)
 77382              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77383          })
 77384      }
 77385      // VPMOVSQW ymm, m64{k}{z}
 77386      if isEVEXYMM(v0) && isM64kz(v1) {
 77387          self.require(ISA_AVX512VL | ISA_AVX512F)
 77388          p.domain = DomainAVX
 77389          p.add(0, func(m *_Encoding, v []interface{}) {
 77390              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77391              m.emit(0x24)
 77392              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 77393          })
 77394      }
 77395      if p.len == 0 {
 77396          panic("invalid operands for VPMOVSQW")
 77397      }
 77398      return p
 77399  }
 77400  
 77401  // VPMOVSWB performs "Down Convert Packed Word Values to Byte Values with Signed Saturation".
 77402  //
 77403  // Mnemonic        : VPMOVSWB
 77404  // Supported forms : (6 forms)
 77405  //
 77406  //    * VPMOVSWB zmm, ymm{k}{z}     [AVX512BW]
 77407  //    * VPMOVSWB zmm, m256{k}{z}    [AVX512BW]
 77408  //    * VPMOVSWB xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 77409  //    * VPMOVSWB xmm, m64{k}{z}     [AVX512BW,AVX512VL]
 77410  //    * VPMOVSWB ymm, xmm{k}{z}     [AVX512BW,AVX512VL]
 77411  //    * VPMOVSWB ymm, m128{k}{z}    [AVX512BW,AVX512VL]
 77412  //
 77413  func (self *Program) VPMOVSWB(v0 interface{}, v1 interface{}) *Instruction {
 77414      p := self.alloc("VPMOVSWB", 2, Operands { v0, v1 })
 77415      // VPMOVSWB zmm, ymm{k}{z}
 77416      if isZMM(v0) && isYMMkz(v1) {
 77417          self.require(ISA_AVX512BW)
 77418          p.domain = DomainAVX
 77419          p.add(0, func(m *_Encoding, v []interface{}) {
 77420              m.emit(0x62)
 77421              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77422              m.emit(0x7e)
 77423              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 77424              m.emit(0x20)
 77425              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77426          })
 77427      }
 77428      // VPMOVSWB zmm, m256{k}{z}
 77429      if isZMM(v0) && isM256kz(v1) {
 77430          self.require(ISA_AVX512BW)
 77431          p.domain = DomainAVX
 77432          p.add(0, func(m *_Encoding, v []interface{}) {
 77433              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77434              m.emit(0x20)
 77435              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 77436          })
 77437      }
 77438      // VPMOVSWB xmm, xmm{k}{z}
 77439      if isEVEXXMM(v0) && isXMMkz(v1) {
 77440          self.require(ISA_AVX512VL | ISA_AVX512BW)
 77441          p.domain = DomainAVX
 77442          p.add(0, func(m *_Encoding, v []interface{}) {
 77443              m.emit(0x62)
 77444              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77445              m.emit(0x7e)
 77446              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 77447              m.emit(0x20)
 77448              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77449          })
 77450      }
 77451      // VPMOVSWB xmm, m64{k}{z}
 77452      if isEVEXXMM(v0) && isM64kz(v1) {
 77453          self.require(ISA_AVX512VL | ISA_AVX512BW)
 77454          p.domain = DomainAVX
 77455          p.add(0, func(m *_Encoding, v []interface{}) {
 77456              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77457              m.emit(0x20)
 77458              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 77459          })
 77460      }
 77461      // VPMOVSWB ymm, xmm{k}{z}
 77462      if isEVEXYMM(v0) && isXMMkz(v1) {
 77463          self.require(ISA_AVX512VL | ISA_AVX512BW)
 77464          p.domain = DomainAVX
 77465          p.add(0, func(m *_Encoding, v []interface{}) {
 77466              m.emit(0x62)
 77467              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77468              m.emit(0x7e)
 77469              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 77470              m.emit(0x20)
 77471              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77472          })
 77473      }
 77474      // VPMOVSWB ymm, m128{k}{z}
 77475      if isEVEXYMM(v0) && isM128kz(v1) {
 77476          self.require(ISA_AVX512VL | ISA_AVX512BW)
 77477          p.domain = DomainAVX
 77478          p.add(0, func(m *_Encoding, v []interface{}) {
 77479              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77480              m.emit(0x20)
 77481              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 77482          })
 77483      }
 77484      if p.len == 0 {
 77485          panic("invalid operands for VPMOVSWB")
 77486      }
 77487      return p
 77488  }
 77489  
 77490  // VPMOVSXBD performs "Move Packed Byte Integers to Doubleword Integers with Sign Extension".
 77491  //
 77492  // Mnemonic        : VPMOVSXBD
 77493  // Supported forms : (10 forms)
 77494  //
 77495  //    * VPMOVSXBD xmm, xmm           [AVX]
 77496  //    * VPMOVSXBD m32, xmm           [AVX]
 77497  //    * VPMOVSXBD xmm, ymm           [AVX2]
 77498  //    * VPMOVSXBD m64, ymm           [AVX2]
 77499  //    * VPMOVSXBD xmm, zmm{k}{z}     [AVX512F]
 77500  //    * VPMOVSXBD m128, zmm{k}{z}    [AVX512F]
 77501  //    * VPMOVSXBD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 77502  //    * VPMOVSXBD xmm, ymm{k}{z}     [AVX512F,AVX512VL]
 77503  //    * VPMOVSXBD m32, xmm{k}{z}     [AVX512F,AVX512VL]
 77504  //    * VPMOVSXBD m64, ymm{k}{z}     [AVX512F,AVX512VL]
 77505  //
 77506  func (self *Program) VPMOVSXBD(v0 interface{}, v1 interface{}) *Instruction {
 77507      p := self.alloc("VPMOVSXBD", 2, Operands { v0, v1 })
 77508      // VPMOVSXBD xmm, xmm
 77509      if isXMM(v0) && isXMM(v1) {
 77510          self.require(ISA_AVX)
 77511          p.domain = DomainAVX
 77512          p.add(0, func(m *_Encoding, v []interface{}) {
 77513              m.emit(0xc4)
 77514              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 77515              m.emit(0x79)
 77516              m.emit(0x21)
 77517              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77518          })
 77519      }
 77520      // VPMOVSXBD m32, xmm
 77521      if isM32(v0) && isXMM(v1) {
 77522          self.require(ISA_AVX)
 77523          p.domain = DomainAVX
 77524          p.add(0, func(m *_Encoding, v []interface{}) {
 77525              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 77526              m.emit(0x21)
 77527              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 77528          })
 77529      }
 77530      // VPMOVSXBD xmm, ymm
 77531      if isXMM(v0) && isYMM(v1) {
 77532          self.require(ISA_AVX2)
 77533          p.domain = DomainAVX
 77534          p.add(0, func(m *_Encoding, v []interface{}) {
 77535              m.emit(0xc4)
 77536              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 77537              m.emit(0x7d)
 77538              m.emit(0x21)
 77539              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77540          })
 77541      }
 77542      // VPMOVSXBD m64, ymm
 77543      if isM64(v0) && isYMM(v1) {
 77544          self.require(ISA_AVX2)
 77545          p.domain = DomainAVX
 77546          p.add(0, func(m *_Encoding, v []interface{}) {
 77547              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 77548              m.emit(0x21)
 77549              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 77550          })
 77551      }
 77552      // VPMOVSXBD xmm, zmm{k}{z}
 77553      if isEVEXXMM(v0) && isZMMkz(v1) {
 77554          self.require(ISA_AVX512F)
 77555          p.domain = DomainAVX
 77556          p.add(0, func(m *_Encoding, v []interface{}) {
 77557              m.emit(0x62)
 77558              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 77559              m.emit(0x7d)
 77560              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 77561              m.emit(0x21)
 77562              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77563          })
 77564      }
 77565      // VPMOVSXBD m128, zmm{k}{z}
 77566      if isM128(v0) && isZMMkz(v1) {
 77567          self.require(ISA_AVX512F)
 77568          p.domain = DomainAVX
 77569          p.add(0, func(m *_Encoding, v []interface{}) {
 77570              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 77571              m.emit(0x21)
 77572              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 77573          })
 77574      }
 77575      // VPMOVSXBD xmm, xmm{k}{z}
 77576      if isEVEXXMM(v0) && isXMMkz(v1) {
 77577          self.require(ISA_AVX512VL | ISA_AVX512F)
 77578          p.domain = DomainAVX
 77579          p.add(0, func(m *_Encoding, v []interface{}) {
 77580              m.emit(0x62)
 77581              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 77582              m.emit(0x7d)
 77583              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 77584              m.emit(0x21)
 77585              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77586          })
 77587      }
 77588      // VPMOVSXBD xmm, ymm{k}{z}
 77589      if isEVEXXMM(v0) && isYMMkz(v1) {
 77590          self.require(ISA_AVX512VL | ISA_AVX512F)
 77591          p.domain = DomainAVX
 77592          p.add(0, func(m *_Encoding, v []interface{}) {
 77593              m.emit(0x62)
 77594              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 77595              m.emit(0x7d)
 77596              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 77597              m.emit(0x21)
 77598              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77599          })
 77600      }
 77601      // VPMOVSXBD m32, xmm{k}{z}
 77602      if isM32(v0) && isXMMkz(v1) {
 77603          self.require(ISA_AVX512VL | ISA_AVX512F)
 77604          p.domain = DomainAVX
 77605          p.add(0, func(m *_Encoding, v []interface{}) {
 77606              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 77607              m.emit(0x21)
 77608              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 77609          })
 77610      }
 77611      // VPMOVSXBD m64, ymm{k}{z}
 77612      if isM64(v0) && isYMMkz(v1) {
 77613          self.require(ISA_AVX512VL | ISA_AVX512F)
 77614          p.domain = DomainAVX
 77615          p.add(0, func(m *_Encoding, v []interface{}) {
 77616              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 77617              m.emit(0x21)
 77618              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 77619          })
 77620      }
 77621      if p.len == 0 {
 77622          panic("invalid operands for VPMOVSXBD")
 77623      }
 77624      return p
 77625  }
 77626  
 77627  // VPMOVSXBQ performs "Move Packed Byte Integers to Quadword Integers with Sign Extension".
 77628  //
 77629  // Mnemonic        : VPMOVSXBQ
 77630  // Supported forms : (10 forms)
 77631  //
 77632  //    * VPMOVSXBQ xmm, xmm          [AVX]
 77633  //    * VPMOVSXBQ m16, xmm          [AVX]
 77634  //    * VPMOVSXBQ xmm, ymm          [AVX2]
 77635  //    * VPMOVSXBQ m32, ymm          [AVX2]
 77636  //    * VPMOVSXBQ xmm, zmm{k}{z}    [AVX512F]
 77637  //    * VPMOVSXBQ m64, zmm{k}{z}    [AVX512F]
 77638  //    * VPMOVSXBQ xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 77639  //    * VPMOVSXBQ xmm, ymm{k}{z}    [AVX512F,AVX512VL]
 77640  //    * VPMOVSXBQ m16, xmm{k}{z}    [AVX512F,AVX512VL]
 77641  //    * VPMOVSXBQ m32, ymm{k}{z}    [AVX512F,AVX512VL]
 77642  //
 77643  func (self *Program) VPMOVSXBQ(v0 interface{}, v1 interface{}) *Instruction {
 77644      p := self.alloc("VPMOVSXBQ", 2, Operands { v0, v1 })
 77645      // VPMOVSXBQ xmm, xmm
 77646      if isXMM(v0) && isXMM(v1) {
 77647          self.require(ISA_AVX)
 77648          p.domain = DomainAVX
 77649          p.add(0, func(m *_Encoding, v []interface{}) {
 77650              m.emit(0xc4)
 77651              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 77652              m.emit(0x79)
 77653              m.emit(0x22)
 77654              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77655          })
 77656      }
 77657      // VPMOVSXBQ m16, xmm
 77658      if isM16(v0) && isXMM(v1) {
 77659          self.require(ISA_AVX)
 77660          p.domain = DomainAVX
 77661          p.add(0, func(m *_Encoding, v []interface{}) {
 77662              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 77663              m.emit(0x22)
 77664              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 77665          })
 77666      }
 77667      // VPMOVSXBQ xmm, ymm
 77668      if isXMM(v0) && isYMM(v1) {
 77669          self.require(ISA_AVX2)
 77670          p.domain = DomainAVX
 77671          p.add(0, func(m *_Encoding, v []interface{}) {
 77672              m.emit(0xc4)
 77673              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 77674              m.emit(0x7d)
 77675              m.emit(0x22)
 77676              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77677          })
 77678      }
 77679      // VPMOVSXBQ m32, ymm
 77680      if isM32(v0) && isYMM(v1) {
 77681          self.require(ISA_AVX2)
 77682          p.domain = DomainAVX
 77683          p.add(0, func(m *_Encoding, v []interface{}) {
 77684              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 77685              m.emit(0x22)
 77686              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 77687          })
 77688      }
 77689      // VPMOVSXBQ xmm, zmm{k}{z}
 77690      if isEVEXXMM(v0) && isZMMkz(v1) {
 77691          self.require(ISA_AVX512F)
 77692          p.domain = DomainAVX
 77693          p.add(0, func(m *_Encoding, v []interface{}) {
 77694              m.emit(0x62)
 77695              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 77696              m.emit(0x7d)
 77697              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 77698              m.emit(0x22)
 77699              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77700          })
 77701      }
 77702      // VPMOVSXBQ m64, zmm{k}{z}
 77703      if isM64(v0) && isZMMkz(v1) {
 77704          self.require(ISA_AVX512F)
 77705          p.domain = DomainAVX
 77706          p.add(0, func(m *_Encoding, v []interface{}) {
 77707              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 77708              m.emit(0x22)
 77709              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 77710          })
 77711      }
 77712      // VPMOVSXBQ xmm, xmm{k}{z}
 77713      if isEVEXXMM(v0) && isXMMkz(v1) {
 77714          self.require(ISA_AVX512VL | ISA_AVX512F)
 77715          p.domain = DomainAVX
 77716          p.add(0, func(m *_Encoding, v []interface{}) {
 77717              m.emit(0x62)
 77718              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 77719              m.emit(0x7d)
 77720              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 77721              m.emit(0x22)
 77722              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77723          })
 77724      }
 77725      // VPMOVSXBQ xmm, ymm{k}{z}
 77726      if isEVEXXMM(v0) && isYMMkz(v1) {
 77727          self.require(ISA_AVX512VL | ISA_AVX512F)
 77728          p.domain = DomainAVX
 77729          p.add(0, func(m *_Encoding, v []interface{}) {
 77730              m.emit(0x62)
 77731              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 77732              m.emit(0x7d)
 77733              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 77734              m.emit(0x22)
 77735              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77736          })
 77737      }
 77738      // VPMOVSXBQ m16, xmm{k}{z}
 77739      if isM16(v0) && isXMMkz(v1) {
 77740          self.require(ISA_AVX512VL | ISA_AVX512F)
 77741          p.domain = DomainAVX
 77742          p.add(0, func(m *_Encoding, v []interface{}) {
 77743              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 77744              m.emit(0x22)
 77745              m.mrsd(lcode(v[1]), addr(v[0]), 2)
 77746          })
 77747      }
 77748      // VPMOVSXBQ m32, ymm{k}{z}
 77749      if isM32(v0) && isYMMkz(v1) {
 77750          self.require(ISA_AVX512VL | ISA_AVX512F)
 77751          p.domain = DomainAVX
 77752          p.add(0, func(m *_Encoding, v []interface{}) {
 77753              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 77754              m.emit(0x22)
 77755              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 77756          })
 77757      }
 77758      if p.len == 0 {
 77759          panic("invalid operands for VPMOVSXBQ")
 77760      }
 77761      return p
 77762  }
 77763  
 77764  // VPMOVSXBW performs "Move Packed Byte Integers to Word Integers with Sign Extension".
 77765  //
 77766  // Mnemonic        : VPMOVSXBW
 77767  // Supported forms : (10 forms)
 77768  //
 77769  //    * VPMOVSXBW xmm, xmm           [AVX]
 77770  //    * VPMOVSXBW m64, xmm           [AVX]
 77771  //    * VPMOVSXBW xmm, ymm           [AVX2]
 77772  //    * VPMOVSXBW m128, ymm          [AVX2]
 77773  //    * VPMOVSXBW ymm, zmm{k}{z}     [AVX512BW]
 77774  //    * VPMOVSXBW m256, zmm{k}{z}    [AVX512BW]
 77775  //    * VPMOVSXBW xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 77776  //    * VPMOVSXBW xmm, ymm{k}{z}     [AVX512BW,AVX512VL]
 77777  //    * VPMOVSXBW m64, xmm{k}{z}     [AVX512BW,AVX512VL]
 77778  //    * VPMOVSXBW m128, ymm{k}{z}    [AVX512BW,AVX512VL]
 77779  //
 77780  func (self *Program) VPMOVSXBW(v0 interface{}, v1 interface{}) *Instruction {
 77781      p := self.alloc("VPMOVSXBW", 2, Operands { v0, v1 })
 77782      // VPMOVSXBW xmm, xmm
 77783      if isXMM(v0) && isXMM(v1) {
 77784          self.require(ISA_AVX)
 77785          p.domain = DomainAVX
 77786          p.add(0, func(m *_Encoding, v []interface{}) {
 77787              m.emit(0xc4)
 77788              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 77789              m.emit(0x79)
 77790              m.emit(0x20)
 77791              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77792          })
 77793      }
 77794      // VPMOVSXBW m64, xmm
 77795      if isM64(v0) && isXMM(v1) {
 77796          self.require(ISA_AVX)
 77797          p.domain = DomainAVX
 77798          p.add(0, func(m *_Encoding, v []interface{}) {
 77799              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 77800              m.emit(0x20)
 77801              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 77802          })
 77803      }
 77804      // VPMOVSXBW xmm, ymm
 77805      if isXMM(v0) && isYMM(v1) {
 77806          self.require(ISA_AVX2)
 77807          p.domain = DomainAVX
 77808          p.add(0, func(m *_Encoding, v []interface{}) {
 77809              m.emit(0xc4)
 77810              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 77811              m.emit(0x7d)
 77812              m.emit(0x20)
 77813              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77814          })
 77815      }
 77816      // VPMOVSXBW m128, ymm
 77817      if isM128(v0) && isYMM(v1) {
 77818          self.require(ISA_AVX2)
 77819          p.domain = DomainAVX
 77820          p.add(0, func(m *_Encoding, v []interface{}) {
 77821              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 77822              m.emit(0x20)
 77823              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 77824          })
 77825      }
 77826      // VPMOVSXBW ymm, zmm{k}{z}
 77827      if isEVEXYMM(v0) && isZMMkz(v1) {
 77828          self.require(ISA_AVX512BW)
 77829          p.domain = DomainAVX
 77830          p.add(0, func(m *_Encoding, v []interface{}) {
 77831              m.emit(0x62)
 77832              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 77833              m.emit(0x7d)
 77834              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 77835              m.emit(0x20)
 77836              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77837          })
 77838      }
 77839      // VPMOVSXBW m256, zmm{k}{z}
 77840      if isM256(v0) && isZMMkz(v1) {
 77841          self.require(ISA_AVX512BW)
 77842          p.domain = DomainAVX
 77843          p.add(0, func(m *_Encoding, v []interface{}) {
 77844              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 77845              m.emit(0x20)
 77846              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 77847          })
 77848      }
 77849      // VPMOVSXBW xmm, xmm{k}{z}
 77850      if isEVEXXMM(v0) && isXMMkz(v1) {
 77851          self.require(ISA_AVX512VL | ISA_AVX512BW)
 77852          p.domain = DomainAVX
 77853          p.add(0, func(m *_Encoding, v []interface{}) {
 77854              m.emit(0x62)
 77855              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 77856              m.emit(0x7d)
 77857              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 77858              m.emit(0x20)
 77859              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77860          })
 77861      }
 77862      // VPMOVSXBW xmm, ymm{k}{z}
 77863      if isEVEXXMM(v0) && isYMMkz(v1) {
 77864          self.require(ISA_AVX512VL | ISA_AVX512BW)
 77865          p.domain = DomainAVX
 77866          p.add(0, func(m *_Encoding, v []interface{}) {
 77867              m.emit(0x62)
 77868              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 77869              m.emit(0x7d)
 77870              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 77871              m.emit(0x20)
 77872              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77873          })
 77874      }
 77875      // VPMOVSXBW m64, xmm{k}{z}
 77876      if isM64(v0) && isXMMkz(v1) {
 77877          self.require(ISA_AVX512VL | ISA_AVX512BW)
 77878          p.domain = DomainAVX
 77879          p.add(0, func(m *_Encoding, v []interface{}) {
 77880              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 77881              m.emit(0x20)
 77882              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 77883          })
 77884      }
 77885      // VPMOVSXBW m128, ymm{k}{z}
 77886      if isM128(v0) && isYMMkz(v1) {
 77887          self.require(ISA_AVX512VL | ISA_AVX512BW)
 77888          p.domain = DomainAVX
 77889          p.add(0, func(m *_Encoding, v []interface{}) {
 77890              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 77891              m.emit(0x20)
 77892              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 77893          })
 77894      }
 77895      if p.len == 0 {
 77896          panic("invalid operands for VPMOVSXBW")
 77897      }
 77898      return p
 77899  }
 77900  
 77901  // VPMOVSXDQ performs "Move Packed Doubleword Integers to Quadword Integers with Sign Extension".
 77902  //
 77903  // Mnemonic        : VPMOVSXDQ
 77904  // Supported forms : (10 forms)
 77905  //
 77906  //    * VPMOVSXDQ xmm, xmm           [AVX]
 77907  //    * VPMOVSXDQ m64, xmm           [AVX]
 77908  //    * VPMOVSXDQ xmm, ymm           [AVX2]
 77909  //    * VPMOVSXDQ m128, ymm          [AVX2]
 77910  //    * VPMOVSXDQ ymm, zmm{k}{z}     [AVX512F]
 77911  //    * VPMOVSXDQ m256, zmm{k}{z}    [AVX512F]
 77912  //    * VPMOVSXDQ xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 77913  //    * VPMOVSXDQ xmm, ymm{k}{z}     [AVX512F,AVX512VL]
 77914  //    * VPMOVSXDQ m64, xmm{k}{z}     [AVX512F,AVX512VL]
 77915  //    * VPMOVSXDQ m128, ymm{k}{z}    [AVX512F,AVX512VL]
 77916  //
 77917  func (self *Program) VPMOVSXDQ(v0 interface{}, v1 interface{}) *Instruction {
 77918      p := self.alloc("VPMOVSXDQ", 2, Operands { v0, v1 })
 77919      // VPMOVSXDQ xmm, xmm
 77920      if isXMM(v0) && isXMM(v1) {
 77921          self.require(ISA_AVX)
 77922          p.domain = DomainAVX
 77923          p.add(0, func(m *_Encoding, v []interface{}) {
 77924              m.emit(0xc4)
 77925              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 77926              m.emit(0x79)
 77927              m.emit(0x25)
 77928              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77929          })
 77930      }
 77931      // VPMOVSXDQ m64, xmm
 77932      if isM64(v0) && isXMM(v1) {
 77933          self.require(ISA_AVX)
 77934          p.domain = DomainAVX
 77935          p.add(0, func(m *_Encoding, v []interface{}) {
 77936              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 77937              m.emit(0x25)
 77938              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 77939          })
 77940      }
 77941      // VPMOVSXDQ xmm, ymm
 77942      if isXMM(v0) && isYMM(v1) {
 77943          self.require(ISA_AVX2)
 77944          p.domain = DomainAVX
 77945          p.add(0, func(m *_Encoding, v []interface{}) {
 77946              m.emit(0xc4)
 77947              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 77948              m.emit(0x7d)
 77949              m.emit(0x25)
 77950              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77951          })
 77952      }
 77953      // VPMOVSXDQ m128, ymm
 77954      if isM128(v0) && isYMM(v1) {
 77955          self.require(ISA_AVX2)
 77956          p.domain = DomainAVX
 77957          p.add(0, func(m *_Encoding, v []interface{}) {
 77958              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 77959              m.emit(0x25)
 77960              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 77961          })
 77962      }
 77963      // VPMOVSXDQ ymm, zmm{k}{z}
 77964      if isEVEXYMM(v0) && isZMMkz(v1) {
 77965          self.require(ISA_AVX512F)
 77966          p.domain = DomainAVX
 77967          p.add(0, func(m *_Encoding, v []interface{}) {
 77968              m.emit(0x62)
 77969              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 77970              m.emit(0x7d)
 77971              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 77972              m.emit(0x25)
 77973              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77974          })
 77975      }
 77976      // VPMOVSXDQ m256, zmm{k}{z}
 77977      if isM256(v0) && isZMMkz(v1) {
 77978          self.require(ISA_AVX512F)
 77979          p.domain = DomainAVX
 77980          p.add(0, func(m *_Encoding, v []interface{}) {
 77981              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 77982              m.emit(0x25)
 77983              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 77984          })
 77985      }
 77986      // VPMOVSXDQ xmm, xmm{k}{z}
 77987      if isEVEXXMM(v0) && isXMMkz(v1) {
 77988          self.require(ISA_AVX512VL | ISA_AVX512F)
 77989          p.domain = DomainAVX
 77990          p.add(0, func(m *_Encoding, v []interface{}) {
 77991              m.emit(0x62)
 77992              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 77993              m.emit(0x7d)
 77994              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 77995              m.emit(0x25)
 77996              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77997          })
 77998      }
 77999      // VPMOVSXDQ xmm, ymm{k}{z}
 78000      if isEVEXXMM(v0) && isYMMkz(v1) {
 78001          self.require(ISA_AVX512VL | ISA_AVX512F)
 78002          p.domain = DomainAVX
 78003          p.add(0, func(m *_Encoding, v []interface{}) {
 78004              m.emit(0x62)
 78005              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 78006              m.emit(0x7d)
 78007              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 78008              m.emit(0x25)
 78009              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78010          })
 78011      }
 78012      // VPMOVSXDQ m64, xmm{k}{z}
 78013      if isM64(v0) && isXMMkz(v1) {
 78014          self.require(ISA_AVX512VL | ISA_AVX512F)
 78015          p.domain = DomainAVX
 78016          p.add(0, func(m *_Encoding, v []interface{}) {
 78017              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 78018              m.emit(0x25)
 78019              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 78020          })
 78021      }
 78022      // VPMOVSXDQ m128, ymm{k}{z}
 78023      if isM128(v0) && isYMMkz(v1) {
 78024          self.require(ISA_AVX512VL | ISA_AVX512F)
 78025          p.domain = DomainAVX
 78026          p.add(0, func(m *_Encoding, v []interface{}) {
 78027              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 78028              m.emit(0x25)
 78029              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 78030          })
 78031      }
 78032      if p.len == 0 {
 78033          panic("invalid operands for VPMOVSXDQ")
 78034      }
 78035      return p
 78036  }
 78037  
 78038  // VPMOVSXWD performs "Move Packed Word Integers to Doubleword Integers with Sign Extension".
 78039  //
 78040  // Mnemonic        : VPMOVSXWD
 78041  // Supported forms : (10 forms)
 78042  //
 78043  //    * VPMOVSXWD xmm, xmm           [AVX]
 78044  //    * VPMOVSXWD m64, xmm           [AVX]
 78045  //    * VPMOVSXWD xmm, ymm           [AVX2]
 78046  //    * VPMOVSXWD m128, ymm          [AVX2]
 78047  //    * VPMOVSXWD ymm, zmm{k}{z}     [AVX512F]
 78048  //    * VPMOVSXWD m256, zmm{k}{z}    [AVX512F]
 78049  //    * VPMOVSXWD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 78050  //    * VPMOVSXWD xmm, ymm{k}{z}     [AVX512F,AVX512VL]
 78051  //    * VPMOVSXWD m64, xmm{k}{z}     [AVX512F,AVX512VL]
 78052  //    * VPMOVSXWD m128, ymm{k}{z}    [AVX512F,AVX512VL]
 78053  //
 78054  func (self *Program) VPMOVSXWD(v0 interface{}, v1 interface{}) *Instruction {
 78055      p := self.alloc("VPMOVSXWD", 2, Operands { v0, v1 })
 78056      // VPMOVSXWD xmm, xmm
 78057      if isXMM(v0) && isXMM(v1) {
 78058          self.require(ISA_AVX)
 78059          p.domain = DomainAVX
 78060          p.add(0, func(m *_Encoding, v []interface{}) {
 78061              m.emit(0xc4)
 78062              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 78063              m.emit(0x79)
 78064              m.emit(0x23)
 78065              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78066          })
 78067      }
 78068      // VPMOVSXWD m64, xmm
 78069      if isM64(v0) && isXMM(v1) {
 78070          self.require(ISA_AVX)
 78071          p.domain = DomainAVX
 78072          p.add(0, func(m *_Encoding, v []interface{}) {
 78073              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 78074              m.emit(0x23)
 78075              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 78076          })
 78077      }
 78078      // VPMOVSXWD xmm, ymm
 78079      if isXMM(v0) && isYMM(v1) {
 78080          self.require(ISA_AVX2)
 78081          p.domain = DomainAVX
 78082          p.add(0, func(m *_Encoding, v []interface{}) {
 78083              m.emit(0xc4)
 78084              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 78085              m.emit(0x7d)
 78086              m.emit(0x23)
 78087              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78088          })
 78089      }
 78090      // VPMOVSXWD m128, ymm
 78091      if isM128(v0) && isYMM(v1) {
 78092          self.require(ISA_AVX2)
 78093          p.domain = DomainAVX
 78094          p.add(0, func(m *_Encoding, v []interface{}) {
 78095              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 78096              m.emit(0x23)
 78097              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 78098          })
 78099      }
 78100      // VPMOVSXWD ymm, zmm{k}{z}
 78101      if isEVEXYMM(v0) && isZMMkz(v1) {
 78102          self.require(ISA_AVX512F)
 78103          p.domain = DomainAVX
 78104          p.add(0, func(m *_Encoding, v []interface{}) {
 78105              m.emit(0x62)
 78106              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 78107              m.emit(0x7d)
 78108              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 78109              m.emit(0x23)
 78110              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78111          })
 78112      }
 78113      // VPMOVSXWD m256, zmm{k}{z}
 78114      if isM256(v0) && isZMMkz(v1) {
 78115          self.require(ISA_AVX512F)
 78116          p.domain = DomainAVX
 78117          p.add(0, func(m *_Encoding, v []interface{}) {
 78118              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 78119              m.emit(0x23)
 78120              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 78121          })
 78122      }
 78123      // VPMOVSXWD xmm, xmm{k}{z}
 78124      if isEVEXXMM(v0) && isXMMkz(v1) {
 78125          self.require(ISA_AVX512VL | ISA_AVX512F)
 78126          p.domain = DomainAVX
 78127          p.add(0, func(m *_Encoding, v []interface{}) {
 78128              m.emit(0x62)
 78129              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 78130              m.emit(0x7d)
 78131              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 78132              m.emit(0x23)
 78133              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78134          })
 78135      }
 78136      // VPMOVSXWD xmm, ymm{k}{z}
 78137      if isEVEXXMM(v0) && isYMMkz(v1) {
 78138          self.require(ISA_AVX512VL | ISA_AVX512F)
 78139          p.domain = DomainAVX
 78140          p.add(0, func(m *_Encoding, v []interface{}) {
 78141              m.emit(0x62)
 78142              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 78143              m.emit(0x7d)
 78144              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 78145              m.emit(0x23)
 78146              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78147          })
 78148      }
 78149      // VPMOVSXWD m64, xmm{k}{z}
 78150      if isM64(v0) && isXMMkz(v1) {
 78151          self.require(ISA_AVX512VL | ISA_AVX512F)
 78152          p.domain = DomainAVX
 78153          p.add(0, func(m *_Encoding, v []interface{}) {
 78154              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 78155              m.emit(0x23)
 78156              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 78157          })
 78158      }
 78159      // VPMOVSXWD m128, ymm{k}{z}
 78160      if isM128(v0) && isYMMkz(v1) {
 78161          self.require(ISA_AVX512VL | ISA_AVX512F)
 78162          p.domain = DomainAVX
 78163          p.add(0, func(m *_Encoding, v []interface{}) {
 78164              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 78165              m.emit(0x23)
 78166              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 78167          })
 78168      }
 78169      if p.len == 0 {
 78170          panic("invalid operands for VPMOVSXWD")
 78171      }
 78172      return p
 78173  }
 78174  
 78175  // VPMOVSXWQ performs "Move Packed Word Integers to Quadword Integers with Sign Extension".
 78176  //
 78177  // Mnemonic        : VPMOVSXWQ
 78178  // Supported forms : (10 forms)
 78179  //
 78180  //    * VPMOVSXWQ xmm, xmm           [AVX]
 78181  //    * VPMOVSXWQ m32, xmm           [AVX]
 78182  //    * VPMOVSXWQ xmm, ymm           [AVX2]
 78183  //    * VPMOVSXWQ m64, ymm           [AVX2]
 78184  //    * VPMOVSXWQ xmm, zmm{k}{z}     [AVX512F]
 78185  //    * VPMOVSXWQ m128, zmm{k}{z}    [AVX512F]
 78186  //    * VPMOVSXWQ xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 78187  //    * VPMOVSXWQ xmm, ymm{k}{z}     [AVX512F,AVX512VL]
 78188  //    * VPMOVSXWQ m32, xmm{k}{z}     [AVX512F,AVX512VL]
 78189  //    * VPMOVSXWQ m64, ymm{k}{z}     [AVX512F,AVX512VL]
 78190  //
 78191  func (self *Program) VPMOVSXWQ(v0 interface{}, v1 interface{}) *Instruction {
 78192      p := self.alloc("VPMOVSXWQ", 2, Operands { v0, v1 })
 78193      // VPMOVSXWQ xmm, xmm
 78194      if isXMM(v0) && isXMM(v1) {
 78195          self.require(ISA_AVX)
 78196          p.domain = DomainAVX
 78197          p.add(0, func(m *_Encoding, v []interface{}) {
 78198              m.emit(0xc4)
 78199              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 78200              m.emit(0x79)
 78201              m.emit(0x24)
 78202              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78203          })
 78204      }
 78205      // VPMOVSXWQ m32, xmm
 78206      if isM32(v0) && isXMM(v1) {
 78207          self.require(ISA_AVX)
 78208          p.domain = DomainAVX
 78209          p.add(0, func(m *_Encoding, v []interface{}) {
 78210              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 78211              m.emit(0x24)
 78212              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 78213          })
 78214      }
 78215      // VPMOVSXWQ xmm, ymm
 78216      if isXMM(v0) && isYMM(v1) {
 78217          self.require(ISA_AVX2)
 78218          p.domain = DomainAVX
 78219          p.add(0, func(m *_Encoding, v []interface{}) {
 78220              m.emit(0xc4)
 78221              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 78222              m.emit(0x7d)
 78223              m.emit(0x24)
 78224              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78225          })
 78226      }
 78227      // VPMOVSXWQ m64, ymm
 78228      if isM64(v0) && isYMM(v1) {
 78229          self.require(ISA_AVX2)
 78230          p.domain = DomainAVX
 78231          p.add(0, func(m *_Encoding, v []interface{}) {
 78232              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 78233              m.emit(0x24)
 78234              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 78235          })
 78236      }
 78237      // VPMOVSXWQ xmm, zmm{k}{z}
 78238      if isEVEXXMM(v0) && isZMMkz(v1) {
 78239          self.require(ISA_AVX512F)
 78240          p.domain = DomainAVX
 78241          p.add(0, func(m *_Encoding, v []interface{}) {
 78242              m.emit(0x62)
 78243              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 78244              m.emit(0x7d)
 78245              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 78246              m.emit(0x24)
 78247              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78248          })
 78249      }
 78250      // VPMOVSXWQ m128, zmm{k}{z}
 78251      if isM128(v0) && isZMMkz(v1) {
 78252          self.require(ISA_AVX512F)
 78253          p.domain = DomainAVX
 78254          p.add(0, func(m *_Encoding, v []interface{}) {
 78255              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 78256              m.emit(0x24)
 78257              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 78258          })
 78259      }
 78260      // VPMOVSXWQ xmm, xmm{k}{z}
 78261      if isEVEXXMM(v0) && isXMMkz(v1) {
 78262          self.require(ISA_AVX512VL | ISA_AVX512F)
 78263          p.domain = DomainAVX
 78264          p.add(0, func(m *_Encoding, v []interface{}) {
 78265              m.emit(0x62)
 78266              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 78267              m.emit(0x7d)
 78268              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 78269              m.emit(0x24)
 78270              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78271          })
 78272      }
 78273      // VPMOVSXWQ xmm, ymm{k}{z}
 78274      if isEVEXXMM(v0) && isYMMkz(v1) {
 78275          self.require(ISA_AVX512VL | ISA_AVX512F)
 78276          p.domain = DomainAVX
 78277          p.add(0, func(m *_Encoding, v []interface{}) {
 78278              m.emit(0x62)
 78279              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 78280              m.emit(0x7d)
 78281              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 78282              m.emit(0x24)
 78283              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78284          })
 78285      }
 78286      // VPMOVSXWQ m32, xmm{k}{z}
 78287      if isM32(v0) && isXMMkz(v1) {
 78288          self.require(ISA_AVX512VL | ISA_AVX512F)
 78289          p.domain = DomainAVX
 78290          p.add(0, func(m *_Encoding, v []interface{}) {
 78291              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 78292              m.emit(0x24)
 78293              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 78294          })
 78295      }
 78296      // VPMOVSXWQ m64, ymm{k}{z}
 78297      if isM64(v0) && isYMMkz(v1) {
 78298          self.require(ISA_AVX512VL | ISA_AVX512F)
 78299          p.domain = DomainAVX
 78300          p.add(0, func(m *_Encoding, v []interface{}) {
 78301              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 78302              m.emit(0x24)
 78303              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 78304          })
 78305      }
 78306      if p.len == 0 {
 78307          panic("invalid operands for VPMOVSXWQ")
 78308      }
 78309      return p
 78310  }
 78311  
 78312  // VPMOVUSDB performs "Down Convert Packed Doubleword Values to Byte Values with Unsigned Saturation".
 78313  //
 78314  // Mnemonic        : VPMOVUSDB
 78315  // Supported forms : (6 forms)
 78316  //
 78317  //    * VPMOVUSDB zmm, xmm{k}{z}     [AVX512F]
 78318  //    * VPMOVUSDB zmm, m128{k}{z}    [AVX512F]
 78319  //    * VPMOVUSDB xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 78320  //    * VPMOVUSDB xmm, m32{k}{z}     [AVX512F,AVX512VL]
 78321  //    * VPMOVUSDB ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 78322  //    * VPMOVUSDB ymm, m64{k}{z}     [AVX512F,AVX512VL]
 78323  //
 78324  func (self *Program) VPMOVUSDB(v0 interface{}, v1 interface{}) *Instruction {
 78325      p := self.alloc("VPMOVUSDB", 2, Operands { v0, v1 })
 78326      // VPMOVUSDB zmm, xmm{k}{z}
 78327      if isZMM(v0) && isXMMkz(v1) {
 78328          self.require(ISA_AVX512F)
 78329          p.domain = DomainAVX
 78330          p.add(0, func(m *_Encoding, v []interface{}) {
 78331              m.emit(0x62)
 78332              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78333              m.emit(0x7e)
 78334              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 78335              m.emit(0x11)
 78336              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78337          })
 78338      }
 78339      // VPMOVUSDB zmm, m128{k}{z}
 78340      if isZMM(v0) && isM128kz(v1) {
 78341          self.require(ISA_AVX512F)
 78342          p.domain = DomainAVX
 78343          p.add(0, func(m *_Encoding, v []interface{}) {
 78344              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78345              m.emit(0x11)
 78346              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 78347          })
 78348      }
 78349      // VPMOVUSDB xmm, xmm{k}{z}
 78350      if isEVEXXMM(v0) && isXMMkz(v1) {
 78351          self.require(ISA_AVX512VL | ISA_AVX512F)
 78352          p.domain = DomainAVX
 78353          p.add(0, func(m *_Encoding, v []interface{}) {
 78354              m.emit(0x62)
 78355              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78356              m.emit(0x7e)
 78357              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 78358              m.emit(0x11)
 78359              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78360          })
 78361      }
 78362      // VPMOVUSDB xmm, m32{k}{z}
 78363      if isEVEXXMM(v0) && isM32kz(v1) {
 78364          self.require(ISA_AVX512VL | ISA_AVX512F)
 78365          p.domain = DomainAVX
 78366          p.add(0, func(m *_Encoding, v []interface{}) {
 78367              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78368              m.emit(0x11)
 78369              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 78370          })
 78371      }
 78372      // VPMOVUSDB ymm, xmm{k}{z}
 78373      if isEVEXYMM(v0) && isXMMkz(v1) {
 78374          self.require(ISA_AVX512VL | ISA_AVX512F)
 78375          p.domain = DomainAVX
 78376          p.add(0, func(m *_Encoding, v []interface{}) {
 78377              m.emit(0x62)
 78378              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78379              m.emit(0x7e)
 78380              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 78381              m.emit(0x11)
 78382              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78383          })
 78384      }
 78385      // VPMOVUSDB ymm, m64{k}{z}
 78386      if isEVEXYMM(v0) && isM64kz(v1) {
 78387          self.require(ISA_AVX512VL | ISA_AVX512F)
 78388          p.domain = DomainAVX
 78389          p.add(0, func(m *_Encoding, v []interface{}) {
 78390              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78391              m.emit(0x11)
 78392              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 78393          })
 78394      }
 78395      if p.len == 0 {
 78396          panic("invalid operands for VPMOVUSDB")
 78397      }
 78398      return p
 78399  }
 78400  
 78401  // VPMOVUSDW performs "Down Convert Packed Doubleword Values to Word Values with Unsigned Saturation".
 78402  //
 78403  // Mnemonic        : VPMOVUSDW
 78404  // Supported forms : (6 forms)
 78405  //
 78406  //    * VPMOVUSDW zmm, ymm{k}{z}     [AVX512F]
 78407  //    * VPMOVUSDW zmm, m256{k}{z}    [AVX512F]
 78408  //    * VPMOVUSDW xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 78409  //    * VPMOVUSDW xmm, m64{k}{z}     [AVX512F,AVX512VL]
 78410  //    * VPMOVUSDW ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 78411  //    * VPMOVUSDW ymm, m128{k}{z}    [AVX512F,AVX512VL]
 78412  //
 78413  func (self *Program) VPMOVUSDW(v0 interface{}, v1 interface{}) *Instruction {
 78414      p := self.alloc("VPMOVUSDW", 2, Operands { v0, v1 })
 78415      // VPMOVUSDW zmm, ymm{k}{z}
 78416      if isZMM(v0) && isYMMkz(v1) {
 78417          self.require(ISA_AVX512F)
 78418          p.domain = DomainAVX
 78419          p.add(0, func(m *_Encoding, v []interface{}) {
 78420              m.emit(0x62)
 78421              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78422              m.emit(0x7e)
 78423              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 78424              m.emit(0x13)
 78425              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78426          })
 78427      }
 78428      // VPMOVUSDW zmm, m256{k}{z}
 78429      if isZMM(v0) && isM256kz(v1) {
 78430          self.require(ISA_AVX512F)
 78431          p.domain = DomainAVX
 78432          p.add(0, func(m *_Encoding, v []interface{}) {
 78433              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78434              m.emit(0x13)
 78435              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 78436          })
 78437      }
 78438      // VPMOVUSDW xmm, xmm{k}{z}
 78439      if isEVEXXMM(v0) && isXMMkz(v1) {
 78440          self.require(ISA_AVX512VL | ISA_AVX512F)
 78441          p.domain = DomainAVX
 78442          p.add(0, func(m *_Encoding, v []interface{}) {
 78443              m.emit(0x62)
 78444              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78445              m.emit(0x7e)
 78446              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 78447              m.emit(0x13)
 78448              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78449          })
 78450      }
 78451      // VPMOVUSDW xmm, m64{k}{z}
 78452      if isEVEXXMM(v0) && isM64kz(v1) {
 78453          self.require(ISA_AVX512VL | ISA_AVX512F)
 78454          p.domain = DomainAVX
 78455          p.add(0, func(m *_Encoding, v []interface{}) {
 78456              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78457              m.emit(0x13)
 78458              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 78459          })
 78460      }
 78461      // VPMOVUSDW ymm, xmm{k}{z}
 78462      if isEVEXYMM(v0) && isXMMkz(v1) {
 78463          self.require(ISA_AVX512VL | ISA_AVX512F)
 78464          p.domain = DomainAVX
 78465          p.add(0, func(m *_Encoding, v []interface{}) {
 78466              m.emit(0x62)
 78467              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78468              m.emit(0x7e)
 78469              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 78470              m.emit(0x13)
 78471              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78472          })
 78473      }
 78474      // VPMOVUSDW ymm, m128{k}{z}
 78475      if isEVEXYMM(v0) && isM128kz(v1) {
 78476          self.require(ISA_AVX512VL | ISA_AVX512F)
 78477          p.domain = DomainAVX
 78478          p.add(0, func(m *_Encoding, v []interface{}) {
 78479              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78480              m.emit(0x13)
 78481              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 78482          })
 78483      }
 78484      if p.len == 0 {
 78485          panic("invalid operands for VPMOVUSDW")
 78486      }
 78487      return p
 78488  }
 78489  
 78490  // VPMOVUSQB performs "Down Convert Packed Quadword Values to Byte Values with Unsigned Saturation".
 78491  //
 78492  // Mnemonic        : VPMOVUSQB
 78493  // Supported forms : (6 forms)
 78494  //
 78495  //    * VPMOVUSQB zmm, xmm{k}{z}    [AVX512F]
 78496  //    * VPMOVUSQB zmm, m64{k}{z}    [AVX512F]
 78497  //    * VPMOVUSQB xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 78498  //    * VPMOVUSQB xmm, m16{k}{z}    [AVX512F,AVX512VL]
 78499  //    * VPMOVUSQB ymm, xmm{k}{z}    [AVX512F,AVX512VL]
 78500  //    * VPMOVUSQB ymm, m32{k}{z}    [AVX512F,AVX512VL]
 78501  //
 78502  func (self *Program) VPMOVUSQB(v0 interface{}, v1 interface{}) *Instruction {
 78503      p := self.alloc("VPMOVUSQB", 2, Operands { v0, v1 })
 78504      // VPMOVUSQB zmm, xmm{k}{z}
 78505      if isZMM(v0) && isXMMkz(v1) {
 78506          self.require(ISA_AVX512F)
 78507          p.domain = DomainAVX
 78508          p.add(0, func(m *_Encoding, v []interface{}) {
 78509              m.emit(0x62)
 78510              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78511              m.emit(0x7e)
 78512              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 78513              m.emit(0x12)
 78514              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78515          })
 78516      }
 78517      // VPMOVUSQB zmm, m64{k}{z}
 78518      if isZMM(v0) && isM64kz(v1) {
 78519          self.require(ISA_AVX512F)
 78520          p.domain = DomainAVX
 78521          p.add(0, func(m *_Encoding, v []interface{}) {
 78522              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78523              m.emit(0x12)
 78524              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 78525          })
 78526      }
 78527      // VPMOVUSQB xmm, xmm{k}{z}
 78528      if isEVEXXMM(v0) && isXMMkz(v1) {
 78529          self.require(ISA_AVX512VL | ISA_AVX512F)
 78530          p.domain = DomainAVX
 78531          p.add(0, func(m *_Encoding, v []interface{}) {
 78532              m.emit(0x62)
 78533              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78534              m.emit(0x7e)
 78535              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 78536              m.emit(0x12)
 78537              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78538          })
 78539      }
 78540      // VPMOVUSQB xmm, m16{k}{z}
 78541      if isEVEXXMM(v0) && isM16kz(v1) {
 78542          self.require(ISA_AVX512VL | ISA_AVX512F)
 78543          p.domain = DomainAVX
 78544          p.add(0, func(m *_Encoding, v []interface{}) {
 78545              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78546              m.emit(0x12)
 78547              m.mrsd(lcode(v[0]), addr(v[1]), 2)
 78548          })
 78549      }
 78550      // VPMOVUSQB ymm, xmm{k}{z}
 78551      if isEVEXYMM(v0) && isXMMkz(v1) {
 78552          self.require(ISA_AVX512VL | ISA_AVX512F)
 78553          p.domain = DomainAVX
 78554          p.add(0, func(m *_Encoding, v []interface{}) {
 78555              m.emit(0x62)
 78556              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78557              m.emit(0x7e)
 78558              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 78559              m.emit(0x12)
 78560              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78561          })
 78562      }
 78563      // VPMOVUSQB ymm, m32{k}{z}
 78564      if isEVEXYMM(v0) && isM32kz(v1) {
 78565          self.require(ISA_AVX512VL | ISA_AVX512F)
 78566          p.domain = DomainAVX
 78567          p.add(0, func(m *_Encoding, v []interface{}) {
 78568              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78569              m.emit(0x12)
 78570              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 78571          })
 78572      }
 78573      if p.len == 0 {
 78574          panic("invalid operands for VPMOVUSQB")
 78575      }
 78576      return p
 78577  }
 78578  
 78579  // VPMOVUSQD performs "Down Convert Packed Quadword Values to Doubleword Values with Unsigned Saturation".
 78580  //
 78581  // Mnemonic        : VPMOVUSQD
 78582  // Supported forms : (6 forms)
 78583  //
 78584  //    * VPMOVUSQD zmm, ymm{k}{z}     [AVX512F]
 78585  //    * VPMOVUSQD zmm, m256{k}{z}    [AVX512F]
 78586  //    * VPMOVUSQD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 78587  //    * VPMOVUSQD xmm, m64{k}{z}     [AVX512F,AVX512VL]
 78588  //    * VPMOVUSQD ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 78589  //    * VPMOVUSQD ymm, m128{k}{z}    [AVX512F,AVX512VL]
 78590  //
 78591  func (self *Program) VPMOVUSQD(v0 interface{}, v1 interface{}) *Instruction {
 78592      p := self.alloc("VPMOVUSQD", 2, Operands { v0, v1 })
 78593      // VPMOVUSQD zmm, ymm{k}{z}
 78594      if isZMM(v0) && isYMMkz(v1) {
 78595          self.require(ISA_AVX512F)
 78596          p.domain = DomainAVX
 78597          p.add(0, func(m *_Encoding, v []interface{}) {
 78598              m.emit(0x62)
 78599              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78600              m.emit(0x7e)
 78601              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 78602              m.emit(0x15)
 78603              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78604          })
 78605      }
 78606      // VPMOVUSQD zmm, m256{k}{z}
 78607      if isZMM(v0) && isM256kz(v1) {
 78608          self.require(ISA_AVX512F)
 78609          p.domain = DomainAVX
 78610          p.add(0, func(m *_Encoding, v []interface{}) {
 78611              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78612              m.emit(0x15)
 78613              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 78614          })
 78615      }
 78616      // VPMOVUSQD xmm, xmm{k}{z}
 78617      if isEVEXXMM(v0) && isXMMkz(v1) {
 78618          self.require(ISA_AVX512VL | ISA_AVX512F)
 78619          p.domain = DomainAVX
 78620          p.add(0, func(m *_Encoding, v []interface{}) {
 78621              m.emit(0x62)
 78622              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78623              m.emit(0x7e)
 78624              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 78625              m.emit(0x15)
 78626              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78627          })
 78628      }
 78629      // VPMOVUSQD xmm, m64{k}{z}
 78630      if isEVEXXMM(v0) && isM64kz(v1) {
 78631          self.require(ISA_AVX512VL | ISA_AVX512F)
 78632          p.domain = DomainAVX
 78633          p.add(0, func(m *_Encoding, v []interface{}) {
 78634              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78635              m.emit(0x15)
 78636              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 78637          })
 78638      }
 78639      // VPMOVUSQD ymm, xmm{k}{z}
 78640      if isEVEXYMM(v0) && isXMMkz(v1) {
 78641          self.require(ISA_AVX512VL | ISA_AVX512F)
 78642          p.domain = DomainAVX
 78643          p.add(0, func(m *_Encoding, v []interface{}) {
 78644              m.emit(0x62)
 78645              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78646              m.emit(0x7e)
 78647              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 78648              m.emit(0x15)
 78649              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78650          })
 78651      }
 78652      // VPMOVUSQD ymm, m128{k}{z}
 78653      if isEVEXYMM(v0) && isM128kz(v1) {
 78654          self.require(ISA_AVX512VL | ISA_AVX512F)
 78655          p.domain = DomainAVX
 78656          p.add(0, func(m *_Encoding, v []interface{}) {
 78657              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78658              m.emit(0x15)
 78659              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 78660          })
 78661      }
 78662      if p.len == 0 {
 78663          panic("invalid operands for VPMOVUSQD")
 78664      }
 78665      return p
 78666  }
 78667  
 78668  // VPMOVUSQW performs "Down Convert Packed Quadword Values to Word Values with Unsigned Saturation".
 78669  //
 78670  // Mnemonic        : VPMOVUSQW
 78671  // Supported forms : (6 forms)
 78672  //
 78673  //    * VPMOVUSQW zmm, xmm{k}{z}     [AVX512F]
 78674  //    * VPMOVUSQW zmm, m128{k}{z}    [AVX512F]
 78675  //    * VPMOVUSQW xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 78676  //    * VPMOVUSQW xmm, m32{k}{z}     [AVX512F,AVX512VL]
 78677  //    * VPMOVUSQW ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 78678  //    * VPMOVUSQW ymm, m64{k}{z}     [AVX512F,AVX512VL]
 78679  //
 78680  func (self *Program) VPMOVUSQW(v0 interface{}, v1 interface{}) *Instruction {
 78681      p := self.alloc("VPMOVUSQW", 2, Operands { v0, v1 })
 78682      // VPMOVUSQW zmm, xmm{k}{z}
 78683      if isZMM(v0) && isXMMkz(v1) {
 78684          self.require(ISA_AVX512F)
 78685          p.domain = DomainAVX
 78686          p.add(0, func(m *_Encoding, v []interface{}) {
 78687              m.emit(0x62)
 78688              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78689              m.emit(0x7e)
 78690              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 78691              m.emit(0x14)
 78692              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78693          })
 78694      }
 78695      // VPMOVUSQW zmm, m128{k}{z}
 78696      if isZMM(v0) && isM128kz(v1) {
 78697          self.require(ISA_AVX512F)
 78698          p.domain = DomainAVX
 78699          p.add(0, func(m *_Encoding, v []interface{}) {
 78700              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78701              m.emit(0x14)
 78702              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 78703          })
 78704      }
 78705      // VPMOVUSQW xmm, xmm{k}{z}
 78706      if isEVEXXMM(v0) && isXMMkz(v1) {
 78707          self.require(ISA_AVX512VL | ISA_AVX512F)
 78708          p.domain = DomainAVX
 78709          p.add(0, func(m *_Encoding, v []interface{}) {
 78710              m.emit(0x62)
 78711              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78712              m.emit(0x7e)
 78713              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 78714              m.emit(0x14)
 78715              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78716          })
 78717      }
 78718      // VPMOVUSQW xmm, m32{k}{z}
 78719      if isEVEXXMM(v0) && isM32kz(v1) {
 78720          self.require(ISA_AVX512VL | ISA_AVX512F)
 78721          p.domain = DomainAVX
 78722          p.add(0, func(m *_Encoding, v []interface{}) {
 78723              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78724              m.emit(0x14)
 78725              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 78726          })
 78727      }
 78728      // VPMOVUSQW ymm, xmm{k}{z}
 78729      if isEVEXYMM(v0) && isXMMkz(v1) {
 78730          self.require(ISA_AVX512VL | ISA_AVX512F)
 78731          p.domain = DomainAVX
 78732          p.add(0, func(m *_Encoding, v []interface{}) {
 78733              m.emit(0x62)
 78734              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78735              m.emit(0x7e)
 78736              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 78737              m.emit(0x14)
 78738              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78739          })
 78740      }
 78741      // VPMOVUSQW ymm, m64{k}{z}
 78742      if isEVEXYMM(v0) && isM64kz(v1) {
 78743          self.require(ISA_AVX512VL | ISA_AVX512F)
 78744          p.domain = DomainAVX
 78745          p.add(0, func(m *_Encoding, v []interface{}) {
 78746              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78747              m.emit(0x14)
 78748              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 78749          })
 78750      }
 78751      if p.len == 0 {
 78752          panic("invalid operands for VPMOVUSQW")
 78753      }
 78754      return p
 78755  }
 78756  
 78757  // VPMOVUSWB performs "Down Convert Packed Word Values to Byte Values with Unsigned Saturation".
 78758  //
 78759  // Mnemonic        : VPMOVUSWB
 78760  // Supported forms : (6 forms)
 78761  //
 78762  //    * VPMOVUSWB zmm, ymm{k}{z}     [AVX512BW]
 78763  //    * VPMOVUSWB zmm, m256{k}{z}    [AVX512BW]
 78764  //    * VPMOVUSWB xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 78765  //    * VPMOVUSWB xmm, m64{k}{z}     [AVX512BW,AVX512VL]
 78766  //    * VPMOVUSWB ymm, xmm{k}{z}     [AVX512BW,AVX512VL]
 78767  //    * VPMOVUSWB ymm, m128{k}{z}    [AVX512BW,AVX512VL]
 78768  //
 78769  func (self *Program) VPMOVUSWB(v0 interface{}, v1 interface{}) *Instruction {
 78770      p := self.alloc("VPMOVUSWB", 2, Operands { v0, v1 })
 78771      // VPMOVUSWB zmm, ymm{k}{z}
 78772      if isZMM(v0) && isYMMkz(v1) {
 78773          self.require(ISA_AVX512BW)
 78774          p.domain = DomainAVX
 78775          p.add(0, func(m *_Encoding, v []interface{}) {
 78776              m.emit(0x62)
 78777              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78778              m.emit(0x7e)
 78779              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 78780              m.emit(0x10)
 78781              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78782          })
 78783      }
 78784      // VPMOVUSWB zmm, m256{k}{z}
 78785      if isZMM(v0) && isM256kz(v1) {
 78786          self.require(ISA_AVX512BW)
 78787          p.domain = DomainAVX
 78788          p.add(0, func(m *_Encoding, v []interface{}) {
 78789              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78790              m.emit(0x10)
 78791              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 78792          })
 78793      }
 78794      // VPMOVUSWB xmm, xmm{k}{z}
 78795      if isEVEXXMM(v0) && isXMMkz(v1) {
 78796          self.require(ISA_AVX512VL | ISA_AVX512BW)
 78797          p.domain = DomainAVX
 78798          p.add(0, func(m *_Encoding, v []interface{}) {
 78799              m.emit(0x62)
 78800              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78801              m.emit(0x7e)
 78802              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 78803              m.emit(0x10)
 78804              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78805          })
 78806      }
 78807      // VPMOVUSWB xmm, m64{k}{z}
 78808      if isEVEXXMM(v0) && isM64kz(v1) {
 78809          self.require(ISA_AVX512VL | ISA_AVX512BW)
 78810          p.domain = DomainAVX
 78811          p.add(0, func(m *_Encoding, v []interface{}) {
 78812              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78813              m.emit(0x10)
 78814              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 78815          })
 78816      }
 78817      // VPMOVUSWB ymm, xmm{k}{z}
 78818      if isEVEXYMM(v0) && isXMMkz(v1) {
 78819          self.require(ISA_AVX512VL | ISA_AVX512BW)
 78820          p.domain = DomainAVX
 78821          p.add(0, func(m *_Encoding, v []interface{}) {
 78822              m.emit(0x62)
 78823              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78824              m.emit(0x7e)
 78825              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 78826              m.emit(0x10)
 78827              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78828          })
 78829      }
 78830      // VPMOVUSWB ymm, m128{k}{z}
 78831      if isEVEXYMM(v0) && isM128kz(v1) {
 78832          self.require(ISA_AVX512VL | ISA_AVX512BW)
 78833          p.domain = DomainAVX
 78834          p.add(0, func(m *_Encoding, v []interface{}) {
 78835              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78836              m.emit(0x10)
 78837              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 78838          })
 78839      }
 78840      if p.len == 0 {
 78841          panic("invalid operands for VPMOVUSWB")
 78842      }
 78843      return p
 78844  }
 78845  
 78846  // VPMOVW2M performs "Move Signs of Packed Word Integers to Mask Register".
 78847  //
 78848  // Mnemonic        : VPMOVW2M
 78849  // Supported forms : (3 forms)
 78850  //
 78851  //    * VPMOVW2M zmm, k    [AVX512BW]
 78852  //    * VPMOVW2M xmm, k    [AVX512BW,AVX512VL]
 78853  //    * VPMOVW2M ymm, k    [AVX512BW,AVX512VL]
 78854  //
 78855  func (self *Program) VPMOVW2M(v0 interface{}, v1 interface{}) *Instruction {
 78856      p := self.alloc("VPMOVW2M", 2, Operands { v0, v1 })
 78857      // VPMOVW2M zmm, k
 78858      if isZMM(v0) && isK(v1) {
 78859          self.require(ISA_AVX512BW)
 78860          p.domain = DomainAVX
 78861          p.add(0, func(m *_Encoding, v []interface{}) {
 78862              m.emit(0x62)
 78863              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 78864              m.emit(0xfe)
 78865              m.emit(0x48)
 78866              m.emit(0x29)
 78867              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78868          })
 78869      }
 78870      // VPMOVW2M xmm, k
 78871      if isEVEXXMM(v0) && isK(v1) {
 78872          self.require(ISA_AVX512VL | ISA_AVX512BW)
 78873          p.domain = DomainAVX
 78874          p.add(0, func(m *_Encoding, v []interface{}) {
 78875              m.emit(0x62)
 78876              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 78877              m.emit(0xfe)
 78878              m.emit(0x08)
 78879              m.emit(0x29)
 78880              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78881          })
 78882      }
 78883      // VPMOVW2M ymm, k
 78884      if isEVEXYMM(v0) && isK(v1) {
 78885          self.require(ISA_AVX512VL | ISA_AVX512BW)
 78886          p.domain = DomainAVX
 78887          p.add(0, func(m *_Encoding, v []interface{}) {
 78888              m.emit(0x62)
 78889              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 78890              m.emit(0xfe)
 78891              m.emit(0x28)
 78892              m.emit(0x29)
 78893              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78894          })
 78895      }
 78896      if p.len == 0 {
 78897          panic("invalid operands for VPMOVW2M")
 78898      }
 78899      return p
 78900  }
 78901  
 78902  // VPMOVWB performs "Down Convert Packed Word Values to Byte Values with Truncation".
 78903  //
 78904  // Mnemonic        : VPMOVWB
 78905  // Supported forms : (6 forms)
 78906  //
 78907  //    * VPMOVWB zmm, ymm{k}{z}     [AVX512BW]
 78908  //    * VPMOVWB zmm, m256{k}{z}    [AVX512BW]
 78909  //    * VPMOVWB xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 78910  //    * VPMOVWB xmm, m64{k}{z}     [AVX512BW,AVX512VL]
 78911  //    * VPMOVWB ymm, xmm{k}{z}     [AVX512BW,AVX512VL]
 78912  //    * VPMOVWB ymm, m128{k}{z}    [AVX512BW,AVX512VL]
 78913  //
 78914  func (self *Program) VPMOVWB(v0 interface{}, v1 interface{}) *Instruction {
 78915      p := self.alloc("VPMOVWB", 2, Operands { v0, v1 })
 78916      // VPMOVWB zmm, ymm{k}{z}
 78917      if isZMM(v0) && isYMMkz(v1) {
 78918          self.require(ISA_AVX512BW)
 78919          p.domain = DomainAVX
 78920          p.add(0, func(m *_Encoding, v []interface{}) {
 78921              m.emit(0x62)
 78922              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78923              m.emit(0x7e)
 78924              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 78925              m.emit(0x30)
 78926              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78927          })
 78928      }
 78929      // VPMOVWB zmm, m256{k}{z}
 78930      if isZMM(v0) && isM256kz(v1) {
 78931          self.require(ISA_AVX512BW)
 78932          p.domain = DomainAVX
 78933          p.add(0, func(m *_Encoding, v []interface{}) {
 78934              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78935              m.emit(0x30)
 78936              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 78937          })
 78938      }
 78939      // VPMOVWB xmm, xmm{k}{z}
 78940      if isEVEXXMM(v0) && isXMMkz(v1) {
 78941          self.require(ISA_AVX512VL | ISA_AVX512BW)
 78942          p.domain = DomainAVX
 78943          p.add(0, func(m *_Encoding, v []interface{}) {
 78944              m.emit(0x62)
 78945              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78946              m.emit(0x7e)
 78947              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 78948              m.emit(0x30)
 78949              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78950          })
 78951      }
 78952      // VPMOVWB xmm, m64{k}{z}
 78953      if isEVEXXMM(v0) && isM64kz(v1) {
 78954          self.require(ISA_AVX512VL | ISA_AVX512BW)
 78955          p.domain = DomainAVX
 78956          p.add(0, func(m *_Encoding, v []interface{}) {
 78957              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78958              m.emit(0x30)
 78959              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 78960          })
 78961      }
 78962      // VPMOVWB ymm, xmm{k}{z}
 78963      if isEVEXYMM(v0) && isXMMkz(v1) {
 78964          self.require(ISA_AVX512VL | ISA_AVX512BW)
 78965          p.domain = DomainAVX
 78966          p.add(0, func(m *_Encoding, v []interface{}) {
 78967              m.emit(0x62)
 78968              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78969              m.emit(0x7e)
 78970              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 78971              m.emit(0x30)
 78972              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78973          })
 78974      }
 78975      // VPMOVWB ymm, m128{k}{z}
 78976      if isEVEXYMM(v0) && isM128kz(v1) {
 78977          self.require(ISA_AVX512VL | ISA_AVX512BW)
 78978          p.domain = DomainAVX
 78979          p.add(0, func(m *_Encoding, v []interface{}) {
 78980              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78981              m.emit(0x30)
 78982              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 78983          })
 78984      }
 78985      if p.len == 0 {
 78986          panic("invalid operands for VPMOVWB")
 78987      }
 78988      return p
 78989  }
 78990  
 78991  // VPMOVZXBD performs "Move Packed Byte Integers to Doubleword Integers with Zero Extension".
 78992  //
 78993  // Mnemonic        : VPMOVZXBD
 78994  // Supported forms : (10 forms)
 78995  //
 78996  //    * VPMOVZXBD xmm, xmm           [AVX]
 78997  //    * VPMOVZXBD m32, xmm           [AVX]
 78998  //    * VPMOVZXBD xmm, ymm           [AVX2]
 78999  //    * VPMOVZXBD m64, ymm           [AVX2]
 79000  //    * VPMOVZXBD xmm, zmm{k}{z}     [AVX512F]
 79001  //    * VPMOVZXBD m128, zmm{k}{z}    [AVX512F]
 79002  //    * VPMOVZXBD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 79003  //    * VPMOVZXBD xmm, ymm{k}{z}     [AVX512F,AVX512VL]
 79004  //    * VPMOVZXBD m32, xmm{k}{z}     [AVX512F,AVX512VL]
 79005  //    * VPMOVZXBD m64, ymm{k}{z}     [AVX512F,AVX512VL]
 79006  //
 79007  func (self *Program) VPMOVZXBD(v0 interface{}, v1 interface{}) *Instruction {
 79008      p := self.alloc("VPMOVZXBD", 2, Operands { v0, v1 })
 79009      // VPMOVZXBD xmm, xmm
 79010      if isXMM(v0) && isXMM(v1) {
 79011          self.require(ISA_AVX)
 79012          p.domain = DomainAVX
 79013          p.add(0, func(m *_Encoding, v []interface{}) {
 79014              m.emit(0xc4)
 79015              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79016              m.emit(0x79)
 79017              m.emit(0x31)
 79018              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79019          })
 79020      }
 79021      // VPMOVZXBD m32, xmm
 79022      if isM32(v0) && isXMM(v1) {
 79023          self.require(ISA_AVX)
 79024          p.domain = DomainAVX
 79025          p.add(0, func(m *_Encoding, v []interface{}) {
 79026              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 79027              m.emit(0x31)
 79028              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79029          })
 79030      }
 79031      // VPMOVZXBD xmm, ymm
 79032      if isXMM(v0) && isYMM(v1) {
 79033          self.require(ISA_AVX2)
 79034          p.domain = DomainAVX
 79035          p.add(0, func(m *_Encoding, v []interface{}) {
 79036              m.emit(0xc4)
 79037              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79038              m.emit(0x7d)
 79039              m.emit(0x31)
 79040              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79041          })
 79042      }
 79043      // VPMOVZXBD m64, ymm
 79044      if isM64(v0) && isYMM(v1) {
 79045          self.require(ISA_AVX2)
 79046          p.domain = DomainAVX
 79047          p.add(0, func(m *_Encoding, v []interface{}) {
 79048              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 79049              m.emit(0x31)
 79050              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79051          })
 79052      }
 79053      // VPMOVZXBD xmm, zmm{k}{z}
 79054      if isEVEXXMM(v0) && isZMMkz(v1) {
 79055          self.require(ISA_AVX512F)
 79056          p.domain = DomainAVX
 79057          p.add(0, func(m *_Encoding, v []interface{}) {
 79058              m.emit(0x62)
 79059              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79060              m.emit(0x7d)
 79061              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 79062              m.emit(0x31)
 79063              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79064          })
 79065      }
 79066      // VPMOVZXBD m128, zmm{k}{z}
 79067      if isM128(v0) && isZMMkz(v1) {
 79068          self.require(ISA_AVX512F)
 79069          p.domain = DomainAVX
 79070          p.add(0, func(m *_Encoding, v []interface{}) {
 79071              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79072              m.emit(0x31)
 79073              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 79074          })
 79075      }
 79076      // VPMOVZXBD xmm, xmm{k}{z}
 79077      if isEVEXXMM(v0) && isXMMkz(v1) {
 79078          self.require(ISA_AVX512VL | ISA_AVX512F)
 79079          p.domain = DomainAVX
 79080          p.add(0, func(m *_Encoding, v []interface{}) {
 79081              m.emit(0x62)
 79082              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79083              m.emit(0x7d)
 79084              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 79085              m.emit(0x31)
 79086              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79087          })
 79088      }
 79089      // VPMOVZXBD xmm, ymm{k}{z}
 79090      if isEVEXXMM(v0) && isYMMkz(v1) {
 79091          self.require(ISA_AVX512VL | ISA_AVX512F)
 79092          p.domain = DomainAVX
 79093          p.add(0, func(m *_Encoding, v []interface{}) {
 79094              m.emit(0x62)
 79095              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79096              m.emit(0x7d)
 79097              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 79098              m.emit(0x31)
 79099              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79100          })
 79101      }
 79102      // VPMOVZXBD m32, xmm{k}{z}
 79103      if isM32(v0) && isXMMkz(v1) {
 79104          self.require(ISA_AVX512VL | ISA_AVX512F)
 79105          p.domain = DomainAVX
 79106          p.add(0, func(m *_Encoding, v []interface{}) {
 79107              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79108              m.emit(0x31)
 79109              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 79110          })
 79111      }
 79112      // VPMOVZXBD m64, ymm{k}{z}
 79113      if isM64(v0) && isYMMkz(v1) {
 79114          self.require(ISA_AVX512VL | ISA_AVX512F)
 79115          p.domain = DomainAVX
 79116          p.add(0, func(m *_Encoding, v []interface{}) {
 79117              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79118              m.emit(0x31)
 79119              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 79120          })
 79121      }
 79122      if p.len == 0 {
 79123          panic("invalid operands for VPMOVZXBD")
 79124      }
 79125      return p
 79126  }
 79127  
 79128  // VPMOVZXBQ performs "Move Packed Byte Integers to Quadword Integers with Zero Extension".
 79129  //
 79130  // Mnemonic        : VPMOVZXBQ
 79131  // Supported forms : (10 forms)
 79132  //
 79133  //    * VPMOVZXBQ xmm, xmm          [AVX]
 79134  //    * VPMOVZXBQ m16, xmm          [AVX]
 79135  //    * VPMOVZXBQ xmm, ymm          [AVX2]
 79136  //    * VPMOVZXBQ m32, ymm          [AVX2]
 79137  //    * VPMOVZXBQ xmm, zmm{k}{z}    [AVX512F]
 79138  //    * VPMOVZXBQ m64, zmm{k}{z}    [AVX512F]
 79139  //    * VPMOVZXBQ xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 79140  //    * VPMOVZXBQ xmm, ymm{k}{z}    [AVX512F,AVX512VL]
 79141  //    * VPMOVZXBQ m16, xmm{k}{z}    [AVX512F,AVX512VL]
 79142  //    * VPMOVZXBQ m32, ymm{k}{z}    [AVX512F,AVX512VL]
 79143  //
 79144  func (self *Program) VPMOVZXBQ(v0 interface{}, v1 interface{}) *Instruction {
 79145      p := self.alloc("VPMOVZXBQ", 2, Operands { v0, v1 })
 79146      // VPMOVZXBQ xmm, xmm
 79147      if isXMM(v0) && isXMM(v1) {
 79148          self.require(ISA_AVX)
 79149          p.domain = DomainAVX
 79150          p.add(0, func(m *_Encoding, v []interface{}) {
 79151              m.emit(0xc4)
 79152              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79153              m.emit(0x79)
 79154              m.emit(0x32)
 79155              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79156          })
 79157      }
 79158      // VPMOVZXBQ m16, xmm
 79159      if isM16(v0) && isXMM(v1) {
 79160          self.require(ISA_AVX)
 79161          p.domain = DomainAVX
 79162          p.add(0, func(m *_Encoding, v []interface{}) {
 79163              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 79164              m.emit(0x32)
 79165              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79166          })
 79167      }
 79168      // VPMOVZXBQ xmm, ymm
 79169      if isXMM(v0) && isYMM(v1) {
 79170          self.require(ISA_AVX2)
 79171          p.domain = DomainAVX
 79172          p.add(0, func(m *_Encoding, v []interface{}) {
 79173              m.emit(0xc4)
 79174              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79175              m.emit(0x7d)
 79176              m.emit(0x32)
 79177              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79178          })
 79179      }
 79180      // VPMOVZXBQ m32, ymm
 79181      if isM32(v0) && isYMM(v1) {
 79182          self.require(ISA_AVX2)
 79183          p.domain = DomainAVX
 79184          p.add(0, func(m *_Encoding, v []interface{}) {
 79185              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 79186              m.emit(0x32)
 79187              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79188          })
 79189      }
 79190      // VPMOVZXBQ xmm, zmm{k}{z}
 79191      if isEVEXXMM(v0) && isZMMkz(v1) {
 79192          self.require(ISA_AVX512F)
 79193          p.domain = DomainAVX
 79194          p.add(0, func(m *_Encoding, v []interface{}) {
 79195              m.emit(0x62)
 79196              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79197              m.emit(0x7d)
 79198              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 79199              m.emit(0x32)
 79200              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79201          })
 79202      }
 79203      // VPMOVZXBQ m64, zmm{k}{z}
 79204      if isM64(v0) && isZMMkz(v1) {
 79205          self.require(ISA_AVX512F)
 79206          p.domain = DomainAVX
 79207          p.add(0, func(m *_Encoding, v []interface{}) {
 79208              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79209              m.emit(0x32)
 79210              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 79211          })
 79212      }
 79213      // VPMOVZXBQ xmm, xmm{k}{z}
 79214      if isEVEXXMM(v0) && isXMMkz(v1) {
 79215          self.require(ISA_AVX512VL | ISA_AVX512F)
 79216          p.domain = DomainAVX
 79217          p.add(0, func(m *_Encoding, v []interface{}) {
 79218              m.emit(0x62)
 79219              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79220              m.emit(0x7d)
 79221              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 79222              m.emit(0x32)
 79223              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79224          })
 79225      }
 79226      // VPMOVZXBQ xmm, ymm{k}{z}
 79227      if isEVEXXMM(v0) && isYMMkz(v1) {
 79228          self.require(ISA_AVX512VL | ISA_AVX512F)
 79229          p.domain = DomainAVX
 79230          p.add(0, func(m *_Encoding, v []interface{}) {
 79231              m.emit(0x62)
 79232              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79233              m.emit(0x7d)
 79234              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 79235              m.emit(0x32)
 79236              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79237          })
 79238      }
 79239      // VPMOVZXBQ m16, xmm{k}{z}
 79240      if isM16(v0) && isXMMkz(v1) {
 79241          self.require(ISA_AVX512VL | ISA_AVX512F)
 79242          p.domain = DomainAVX
 79243          p.add(0, func(m *_Encoding, v []interface{}) {
 79244              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79245              m.emit(0x32)
 79246              m.mrsd(lcode(v[1]), addr(v[0]), 2)
 79247          })
 79248      }
 79249      // VPMOVZXBQ m32, ymm{k}{z}
 79250      if isM32(v0) && isYMMkz(v1) {
 79251          self.require(ISA_AVX512VL | ISA_AVX512F)
 79252          p.domain = DomainAVX
 79253          p.add(0, func(m *_Encoding, v []interface{}) {
 79254              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79255              m.emit(0x32)
 79256              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 79257          })
 79258      }
 79259      if p.len == 0 {
 79260          panic("invalid operands for VPMOVZXBQ")
 79261      }
 79262      return p
 79263  }
 79264  
 79265  // VPMOVZXBW performs "Move Packed Byte Integers to Word Integers with Zero Extension".
 79266  //
 79267  // Mnemonic        : VPMOVZXBW
 79268  // Supported forms : (10 forms)
 79269  //
 79270  //    * VPMOVZXBW xmm, xmm           [AVX]
 79271  //    * VPMOVZXBW m64, xmm           [AVX]
 79272  //    * VPMOVZXBW xmm, ymm           [AVX2]
 79273  //    * VPMOVZXBW m128, ymm          [AVX2]
 79274  //    * VPMOVZXBW ymm, zmm{k}{z}     [AVX512BW]
 79275  //    * VPMOVZXBW m256, zmm{k}{z}    [AVX512BW]
 79276  //    * VPMOVZXBW xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 79277  //    * VPMOVZXBW xmm, ymm{k}{z}     [AVX512BW,AVX512VL]
 79278  //    * VPMOVZXBW m64, xmm{k}{z}     [AVX512BW,AVX512VL]
 79279  //    * VPMOVZXBW m128, ymm{k}{z}    [AVX512BW,AVX512VL]
 79280  //
 79281  func (self *Program) VPMOVZXBW(v0 interface{}, v1 interface{}) *Instruction {
 79282      p := self.alloc("VPMOVZXBW", 2, Operands { v0, v1 })
 79283      // VPMOVZXBW xmm, xmm
 79284      if isXMM(v0) && isXMM(v1) {
 79285          self.require(ISA_AVX)
 79286          p.domain = DomainAVX
 79287          p.add(0, func(m *_Encoding, v []interface{}) {
 79288              m.emit(0xc4)
 79289              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79290              m.emit(0x79)
 79291              m.emit(0x30)
 79292              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79293          })
 79294      }
 79295      // VPMOVZXBW m64, xmm
 79296      if isM64(v0) && isXMM(v1) {
 79297          self.require(ISA_AVX)
 79298          p.domain = DomainAVX
 79299          p.add(0, func(m *_Encoding, v []interface{}) {
 79300              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 79301              m.emit(0x30)
 79302              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79303          })
 79304      }
 79305      // VPMOVZXBW xmm, ymm
 79306      if isXMM(v0) && isYMM(v1) {
 79307          self.require(ISA_AVX2)
 79308          p.domain = DomainAVX
 79309          p.add(0, func(m *_Encoding, v []interface{}) {
 79310              m.emit(0xc4)
 79311              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79312              m.emit(0x7d)
 79313              m.emit(0x30)
 79314              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79315          })
 79316      }
 79317      // VPMOVZXBW m128, ymm
 79318      if isM128(v0) && isYMM(v1) {
 79319          self.require(ISA_AVX2)
 79320          p.domain = DomainAVX
 79321          p.add(0, func(m *_Encoding, v []interface{}) {
 79322              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 79323              m.emit(0x30)
 79324              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79325          })
 79326      }
 79327      // VPMOVZXBW ymm, zmm{k}{z}
 79328      if isEVEXYMM(v0) && isZMMkz(v1) {
 79329          self.require(ISA_AVX512BW)
 79330          p.domain = DomainAVX
 79331          p.add(0, func(m *_Encoding, v []interface{}) {
 79332              m.emit(0x62)
 79333              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79334              m.emit(0x7d)
 79335              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 79336              m.emit(0x30)
 79337              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79338          })
 79339      }
 79340      // VPMOVZXBW m256, zmm{k}{z}
 79341      if isM256(v0) && isZMMkz(v1) {
 79342          self.require(ISA_AVX512BW)
 79343          p.domain = DomainAVX
 79344          p.add(0, func(m *_Encoding, v []interface{}) {
 79345              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79346              m.emit(0x30)
 79347              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 79348          })
 79349      }
 79350      // VPMOVZXBW xmm, xmm{k}{z}
 79351      if isEVEXXMM(v0) && isXMMkz(v1) {
 79352          self.require(ISA_AVX512VL | ISA_AVX512BW)
 79353          p.domain = DomainAVX
 79354          p.add(0, func(m *_Encoding, v []interface{}) {
 79355              m.emit(0x62)
 79356              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79357              m.emit(0x7d)
 79358              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 79359              m.emit(0x30)
 79360              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79361          })
 79362      }
 79363      // VPMOVZXBW xmm, ymm{k}{z}
 79364      if isEVEXXMM(v0) && isYMMkz(v1) {
 79365          self.require(ISA_AVX512VL | ISA_AVX512BW)
 79366          p.domain = DomainAVX
 79367          p.add(0, func(m *_Encoding, v []interface{}) {
 79368              m.emit(0x62)
 79369              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79370              m.emit(0x7d)
 79371              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 79372              m.emit(0x30)
 79373              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79374          })
 79375      }
 79376      // VPMOVZXBW m64, xmm{k}{z}
 79377      if isM64(v0) && isXMMkz(v1) {
 79378          self.require(ISA_AVX512VL | ISA_AVX512BW)
 79379          p.domain = DomainAVX
 79380          p.add(0, func(m *_Encoding, v []interface{}) {
 79381              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79382              m.emit(0x30)
 79383              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 79384          })
 79385      }
 79386      // VPMOVZXBW m128, ymm{k}{z}
 79387      if isM128(v0) && isYMMkz(v1) {
 79388          self.require(ISA_AVX512VL | ISA_AVX512BW)
 79389          p.domain = DomainAVX
 79390          p.add(0, func(m *_Encoding, v []interface{}) {
 79391              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79392              m.emit(0x30)
 79393              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 79394          })
 79395      }
 79396      if p.len == 0 {
 79397          panic("invalid operands for VPMOVZXBW")
 79398      }
 79399      return p
 79400  }
 79401  
 79402  // VPMOVZXDQ performs "Move Packed Doubleword Integers to Quadword Integers with Zero Extension".
 79403  //
 79404  // Mnemonic        : VPMOVZXDQ
 79405  // Supported forms : (10 forms)
 79406  //
 79407  //    * VPMOVZXDQ xmm, xmm           [AVX]
 79408  //    * VPMOVZXDQ m64, xmm           [AVX]
 79409  //    * VPMOVZXDQ xmm, ymm           [AVX2]
 79410  //    * VPMOVZXDQ m128, ymm          [AVX2]
 79411  //    * VPMOVZXDQ ymm, zmm{k}{z}     [AVX512F]
 79412  //    * VPMOVZXDQ m256, zmm{k}{z}    [AVX512F]
 79413  //    * VPMOVZXDQ xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 79414  //    * VPMOVZXDQ xmm, ymm{k}{z}     [AVX512F,AVX512VL]
 79415  //    * VPMOVZXDQ m64, xmm{k}{z}     [AVX512F,AVX512VL]
 79416  //    * VPMOVZXDQ m128, ymm{k}{z}    [AVX512F,AVX512VL]
 79417  //
 79418  func (self *Program) VPMOVZXDQ(v0 interface{}, v1 interface{}) *Instruction {
 79419      p := self.alloc("VPMOVZXDQ", 2, Operands { v0, v1 })
 79420      // VPMOVZXDQ xmm, xmm
 79421      if isXMM(v0) && isXMM(v1) {
 79422          self.require(ISA_AVX)
 79423          p.domain = DomainAVX
 79424          p.add(0, func(m *_Encoding, v []interface{}) {
 79425              m.emit(0xc4)
 79426              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79427              m.emit(0x79)
 79428              m.emit(0x35)
 79429              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79430          })
 79431      }
 79432      // VPMOVZXDQ m64, xmm
 79433      if isM64(v0) && isXMM(v1) {
 79434          self.require(ISA_AVX)
 79435          p.domain = DomainAVX
 79436          p.add(0, func(m *_Encoding, v []interface{}) {
 79437              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 79438              m.emit(0x35)
 79439              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79440          })
 79441      }
 79442      // VPMOVZXDQ xmm, ymm
 79443      if isXMM(v0) && isYMM(v1) {
 79444          self.require(ISA_AVX2)
 79445          p.domain = DomainAVX
 79446          p.add(0, func(m *_Encoding, v []interface{}) {
 79447              m.emit(0xc4)
 79448              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79449              m.emit(0x7d)
 79450              m.emit(0x35)
 79451              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79452          })
 79453      }
 79454      // VPMOVZXDQ m128, ymm
 79455      if isM128(v0) && isYMM(v1) {
 79456          self.require(ISA_AVX2)
 79457          p.domain = DomainAVX
 79458          p.add(0, func(m *_Encoding, v []interface{}) {
 79459              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 79460              m.emit(0x35)
 79461              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79462          })
 79463      }
 79464      // VPMOVZXDQ ymm, zmm{k}{z}
 79465      if isEVEXYMM(v0) && isZMMkz(v1) {
 79466          self.require(ISA_AVX512F)
 79467          p.domain = DomainAVX
 79468          p.add(0, func(m *_Encoding, v []interface{}) {
 79469              m.emit(0x62)
 79470              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79471              m.emit(0x7d)
 79472              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 79473              m.emit(0x35)
 79474              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79475          })
 79476      }
 79477      // VPMOVZXDQ m256, zmm{k}{z}
 79478      if isM256(v0) && isZMMkz(v1) {
 79479          self.require(ISA_AVX512F)
 79480          p.domain = DomainAVX
 79481          p.add(0, func(m *_Encoding, v []interface{}) {
 79482              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79483              m.emit(0x35)
 79484              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 79485          })
 79486      }
 79487      // VPMOVZXDQ xmm, xmm{k}{z}
 79488      if isEVEXXMM(v0) && isXMMkz(v1) {
 79489          self.require(ISA_AVX512VL | ISA_AVX512F)
 79490          p.domain = DomainAVX
 79491          p.add(0, func(m *_Encoding, v []interface{}) {
 79492              m.emit(0x62)
 79493              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79494              m.emit(0x7d)
 79495              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 79496              m.emit(0x35)
 79497              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79498          })
 79499      }
 79500      // VPMOVZXDQ xmm, ymm{k}{z}
 79501      if isEVEXXMM(v0) && isYMMkz(v1) {
 79502          self.require(ISA_AVX512VL | ISA_AVX512F)
 79503          p.domain = DomainAVX
 79504          p.add(0, func(m *_Encoding, v []interface{}) {
 79505              m.emit(0x62)
 79506              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79507              m.emit(0x7d)
 79508              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 79509              m.emit(0x35)
 79510              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79511          })
 79512      }
 79513      // VPMOVZXDQ m64, xmm{k}{z}
 79514      if isM64(v0) && isXMMkz(v1) {
 79515          self.require(ISA_AVX512VL | ISA_AVX512F)
 79516          p.domain = DomainAVX
 79517          p.add(0, func(m *_Encoding, v []interface{}) {
 79518              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79519              m.emit(0x35)
 79520              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 79521          })
 79522      }
 79523      // VPMOVZXDQ m128, ymm{k}{z}
 79524      if isM128(v0) && isYMMkz(v1) {
 79525          self.require(ISA_AVX512VL | ISA_AVX512F)
 79526          p.domain = DomainAVX
 79527          p.add(0, func(m *_Encoding, v []interface{}) {
 79528              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79529              m.emit(0x35)
 79530              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 79531          })
 79532      }
 79533      if p.len == 0 {
 79534          panic("invalid operands for VPMOVZXDQ")
 79535      }
 79536      return p
 79537  }
 79538  
 79539  // VPMOVZXWD performs "Move Packed Word Integers to Doubleword Integers with Zero Extension".
 79540  //
 79541  // Mnemonic        : VPMOVZXWD
 79542  // Supported forms : (10 forms)
 79543  //
 79544  //    * VPMOVZXWD xmm, xmm           [AVX]
 79545  //    * VPMOVZXWD m64, xmm           [AVX]
 79546  //    * VPMOVZXWD xmm, ymm           [AVX2]
 79547  //    * VPMOVZXWD m128, ymm          [AVX2]
 79548  //    * VPMOVZXWD ymm, zmm{k}{z}     [AVX512F]
 79549  //    * VPMOVZXWD m256, zmm{k}{z}    [AVX512F]
 79550  //    * VPMOVZXWD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 79551  //    * VPMOVZXWD xmm, ymm{k}{z}     [AVX512F,AVX512VL]
 79552  //    * VPMOVZXWD m64, xmm{k}{z}     [AVX512F,AVX512VL]
 79553  //    * VPMOVZXWD m128, ymm{k}{z}    [AVX512F,AVX512VL]
 79554  //
 79555  func (self *Program) VPMOVZXWD(v0 interface{}, v1 interface{}) *Instruction {
 79556      p := self.alloc("VPMOVZXWD", 2, Operands { v0, v1 })
 79557      // VPMOVZXWD xmm, xmm
 79558      if isXMM(v0) && isXMM(v1) {
 79559          self.require(ISA_AVX)
 79560          p.domain = DomainAVX
 79561          p.add(0, func(m *_Encoding, v []interface{}) {
 79562              m.emit(0xc4)
 79563              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79564              m.emit(0x79)
 79565              m.emit(0x33)
 79566              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79567          })
 79568      }
 79569      // VPMOVZXWD m64, xmm
 79570      if isM64(v0) && isXMM(v1) {
 79571          self.require(ISA_AVX)
 79572          p.domain = DomainAVX
 79573          p.add(0, func(m *_Encoding, v []interface{}) {
 79574              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 79575              m.emit(0x33)
 79576              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79577          })
 79578      }
 79579      // VPMOVZXWD xmm, ymm
 79580      if isXMM(v0) && isYMM(v1) {
 79581          self.require(ISA_AVX2)
 79582          p.domain = DomainAVX
 79583          p.add(0, func(m *_Encoding, v []interface{}) {
 79584              m.emit(0xc4)
 79585              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79586              m.emit(0x7d)
 79587              m.emit(0x33)
 79588              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79589          })
 79590      }
 79591      // VPMOVZXWD m128, ymm
 79592      if isM128(v0) && isYMM(v1) {
 79593          self.require(ISA_AVX2)
 79594          p.domain = DomainAVX
 79595          p.add(0, func(m *_Encoding, v []interface{}) {
 79596              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 79597              m.emit(0x33)
 79598              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79599          })
 79600      }
 79601      // VPMOVZXWD ymm, zmm{k}{z}
 79602      if isEVEXYMM(v0) && isZMMkz(v1) {
 79603          self.require(ISA_AVX512F)
 79604          p.domain = DomainAVX
 79605          p.add(0, func(m *_Encoding, v []interface{}) {
 79606              m.emit(0x62)
 79607              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79608              m.emit(0x7d)
 79609              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 79610              m.emit(0x33)
 79611              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79612          })
 79613      }
 79614      // VPMOVZXWD m256, zmm{k}{z}
 79615      if isM256(v0) && isZMMkz(v1) {
 79616          self.require(ISA_AVX512F)
 79617          p.domain = DomainAVX
 79618          p.add(0, func(m *_Encoding, v []interface{}) {
 79619              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79620              m.emit(0x33)
 79621              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 79622          })
 79623      }
 79624      // VPMOVZXWD xmm, xmm{k}{z}
 79625      if isEVEXXMM(v0) && isXMMkz(v1) {
 79626          self.require(ISA_AVX512VL | ISA_AVX512F)
 79627          p.domain = DomainAVX
 79628          p.add(0, func(m *_Encoding, v []interface{}) {
 79629              m.emit(0x62)
 79630              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79631              m.emit(0x7d)
 79632              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 79633              m.emit(0x33)
 79634              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79635          })
 79636      }
 79637      // VPMOVZXWD xmm, ymm{k}{z}
 79638      if isEVEXXMM(v0) && isYMMkz(v1) {
 79639          self.require(ISA_AVX512VL | ISA_AVX512F)
 79640          p.domain = DomainAVX
 79641          p.add(0, func(m *_Encoding, v []interface{}) {
 79642              m.emit(0x62)
 79643              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79644              m.emit(0x7d)
 79645              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 79646              m.emit(0x33)
 79647              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79648          })
 79649      }
 79650      // VPMOVZXWD m64, xmm{k}{z}
 79651      if isM64(v0) && isXMMkz(v1) {
 79652          self.require(ISA_AVX512VL | ISA_AVX512F)
 79653          p.domain = DomainAVX
 79654          p.add(0, func(m *_Encoding, v []interface{}) {
 79655              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79656              m.emit(0x33)
 79657              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 79658          })
 79659      }
 79660      // VPMOVZXWD m128, ymm{k}{z}
 79661      if isM128(v0) && isYMMkz(v1) {
 79662          self.require(ISA_AVX512VL | ISA_AVX512F)
 79663          p.domain = DomainAVX
 79664          p.add(0, func(m *_Encoding, v []interface{}) {
 79665              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79666              m.emit(0x33)
 79667              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 79668          })
 79669      }
 79670      if p.len == 0 {
 79671          panic("invalid operands for VPMOVZXWD")
 79672      }
 79673      return p
 79674  }
 79675  
 79676  // VPMOVZXWQ performs "Move Packed Word Integers to Quadword Integers with Zero Extension".
 79677  //
 79678  // Mnemonic        : VPMOVZXWQ
 79679  // Supported forms : (10 forms)
 79680  //
 79681  //    * VPMOVZXWQ xmm, xmm           [AVX]
 79682  //    * VPMOVZXWQ m32, xmm           [AVX]
 79683  //    * VPMOVZXWQ xmm, ymm           [AVX2]
 79684  //    * VPMOVZXWQ m64, ymm           [AVX2]
 79685  //    * VPMOVZXWQ xmm, zmm{k}{z}     [AVX512F]
 79686  //    * VPMOVZXWQ m128, zmm{k}{z}    [AVX512F]
 79687  //    * VPMOVZXWQ xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 79688  //    * VPMOVZXWQ xmm, ymm{k}{z}     [AVX512F,AVX512VL]
 79689  //    * VPMOVZXWQ m32, xmm{k}{z}     [AVX512F,AVX512VL]
 79690  //    * VPMOVZXWQ m64, ymm{k}{z}     [AVX512F,AVX512VL]
 79691  //
 79692  func (self *Program) VPMOVZXWQ(v0 interface{}, v1 interface{}) *Instruction {
 79693      p := self.alloc("VPMOVZXWQ", 2, Operands { v0, v1 })
 79694      // VPMOVZXWQ xmm, xmm
 79695      if isXMM(v0) && isXMM(v1) {
 79696          self.require(ISA_AVX)
 79697          p.domain = DomainAVX
 79698          p.add(0, func(m *_Encoding, v []interface{}) {
 79699              m.emit(0xc4)
 79700              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79701              m.emit(0x79)
 79702              m.emit(0x34)
 79703              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79704          })
 79705      }
 79706      // VPMOVZXWQ m32, xmm
 79707      if isM32(v0) && isXMM(v1) {
 79708          self.require(ISA_AVX)
 79709          p.domain = DomainAVX
 79710          p.add(0, func(m *_Encoding, v []interface{}) {
 79711              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 79712              m.emit(0x34)
 79713              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79714          })
 79715      }
 79716      // VPMOVZXWQ xmm, ymm
 79717      if isXMM(v0) && isYMM(v1) {
 79718          self.require(ISA_AVX2)
 79719          p.domain = DomainAVX
 79720          p.add(0, func(m *_Encoding, v []interface{}) {
 79721              m.emit(0xc4)
 79722              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79723              m.emit(0x7d)
 79724              m.emit(0x34)
 79725              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79726          })
 79727      }
 79728      // VPMOVZXWQ m64, ymm
 79729      if isM64(v0) && isYMM(v1) {
 79730          self.require(ISA_AVX2)
 79731          p.domain = DomainAVX
 79732          p.add(0, func(m *_Encoding, v []interface{}) {
 79733              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 79734              m.emit(0x34)
 79735              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79736          })
 79737      }
 79738      // VPMOVZXWQ xmm, zmm{k}{z}
 79739      if isEVEXXMM(v0) && isZMMkz(v1) {
 79740          self.require(ISA_AVX512F)
 79741          p.domain = DomainAVX
 79742          p.add(0, func(m *_Encoding, v []interface{}) {
 79743              m.emit(0x62)
 79744              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79745              m.emit(0x7d)
 79746              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 79747              m.emit(0x34)
 79748              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79749          })
 79750      }
 79751      // VPMOVZXWQ m128, zmm{k}{z}
 79752      if isM128(v0) && isZMMkz(v1) {
 79753          self.require(ISA_AVX512F)
 79754          p.domain = DomainAVX
 79755          p.add(0, func(m *_Encoding, v []interface{}) {
 79756              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79757              m.emit(0x34)
 79758              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 79759          })
 79760      }
 79761      // VPMOVZXWQ xmm, xmm{k}{z}
 79762      if isEVEXXMM(v0) && isXMMkz(v1) {
 79763          self.require(ISA_AVX512VL | ISA_AVX512F)
 79764          p.domain = DomainAVX
 79765          p.add(0, func(m *_Encoding, v []interface{}) {
 79766              m.emit(0x62)
 79767              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79768              m.emit(0x7d)
 79769              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 79770              m.emit(0x34)
 79771              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79772          })
 79773      }
 79774      // VPMOVZXWQ xmm, ymm{k}{z}
 79775      if isEVEXXMM(v0) && isYMMkz(v1) {
 79776          self.require(ISA_AVX512VL | ISA_AVX512F)
 79777          p.domain = DomainAVX
 79778          p.add(0, func(m *_Encoding, v []interface{}) {
 79779              m.emit(0x62)
 79780              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79781              m.emit(0x7d)
 79782              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 79783              m.emit(0x34)
 79784              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79785          })
 79786      }
 79787      // VPMOVZXWQ m32, xmm{k}{z}
 79788      if isM32(v0) && isXMMkz(v1) {
 79789          self.require(ISA_AVX512VL | ISA_AVX512F)
 79790          p.domain = DomainAVX
 79791          p.add(0, func(m *_Encoding, v []interface{}) {
 79792              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79793              m.emit(0x34)
 79794              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 79795          })
 79796      }
 79797      // VPMOVZXWQ m64, ymm{k}{z}
 79798      if isM64(v0) && isYMMkz(v1) {
 79799          self.require(ISA_AVX512VL | ISA_AVX512F)
 79800          p.domain = DomainAVX
 79801          p.add(0, func(m *_Encoding, v []interface{}) {
 79802              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79803              m.emit(0x34)
 79804              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 79805          })
 79806      }
 79807      if p.len == 0 {
 79808          panic("invalid operands for VPMOVZXWQ")
 79809      }
 79810      return p
 79811  }
 79812  
 79813  // VPMULDQ performs "Multiply Packed Signed Doubleword Integers and Store Quadword Result".
 79814  //
 79815  // Mnemonic        : VPMULDQ
 79816  // Supported forms : (10 forms)
 79817  //
 79818  //    * VPMULDQ xmm, xmm, xmm                   [AVX]
 79819  //    * VPMULDQ m128, xmm, xmm                  [AVX]
 79820  //    * VPMULDQ ymm, ymm, ymm                   [AVX2]
 79821  //    * VPMULDQ m256, ymm, ymm                  [AVX2]
 79822  //    * VPMULDQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 79823  //    * VPMULDQ zmm, zmm, zmm{k}{z}             [AVX512F]
 79824  //    * VPMULDQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 79825  //    * VPMULDQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 79826  //    * VPMULDQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 79827  //    * VPMULDQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 79828  //
 79829  func (self *Program) VPMULDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 79830      p := self.alloc("VPMULDQ", 3, Operands { v0, v1, v2 })
 79831      // VPMULDQ xmm, xmm, xmm
 79832      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 79833          self.require(ISA_AVX)
 79834          p.domain = DomainAVX
 79835          p.add(0, func(m *_Encoding, v []interface{}) {
 79836              m.emit(0xc4)
 79837              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 79838              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 79839              m.emit(0x28)
 79840              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 79841          })
 79842      }
 79843      // VPMULDQ m128, xmm, xmm
 79844      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 79845          self.require(ISA_AVX)
 79846          p.domain = DomainAVX
 79847          p.add(0, func(m *_Encoding, v []interface{}) {
 79848              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 79849              m.emit(0x28)
 79850              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 79851          })
 79852      }
 79853      // VPMULDQ ymm, ymm, ymm
 79854      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 79855          self.require(ISA_AVX2)
 79856          p.domain = DomainAVX
 79857          p.add(0, func(m *_Encoding, v []interface{}) {
 79858              m.emit(0xc4)
 79859              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 79860              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 79861              m.emit(0x28)
 79862              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 79863          })
 79864      }
 79865      // VPMULDQ m256, ymm, ymm
 79866      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 79867          self.require(ISA_AVX2)
 79868          p.domain = DomainAVX
 79869          p.add(0, func(m *_Encoding, v []interface{}) {
 79870              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 79871              m.emit(0x28)
 79872              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 79873          })
 79874      }
 79875      // VPMULDQ m512/m64bcst, zmm, zmm{k}{z}
 79876      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 79877          self.require(ISA_AVX512F)
 79878          p.domain = DomainAVX
 79879          p.add(0, func(m *_Encoding, v []interface{}) {
 79880              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 79881              m.emit(0x28)
 79882              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 79883          })
 79884      }
 79885      // VPMULDQ zmm, zmm, zmm{k}{z}
 79886      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 79887          self.require(ISA_AVX512F)
 79888          p.domain = DomainAVX
 79889          p.add(0, func(m *_Encoding, v []interface{}) {
 79890              m.emit(0x62)
 79891              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 79892              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 79893              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 79894              m.emit(0x28)
 79895              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 79896          })
 79897      }
 79898      // VPMULDQ m128/m64bcst, xmm, xmm{k}{z}
 79899      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 79900          self.require(ISA_AVX512VL | ISA_AVX512F)
 79901          p.domain = DomainAVX
 79902          p.add(0, func(m *_Encoding, v []interface{}) {
 79903              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 79904              m.emit(0x28)
 79905              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 79906          })
 79907      }
 79908      // VPMULDQ xmm, xmm, xmm{k}{z}
 79909      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 79910          self.require(ISA_AVX512VL | ISA_AVX512F)
 79911          p.domain = DomainAVX
 79912          p.add(0, func(m *_Encoding, v []interface{}) {
 79913              m.emit(0x62)
 79914              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 79915              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 79916              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 79917              m.emit(0x28)
 79918              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 79919          })
 79920      }
 79921      // VPMULDQ m256/m64bcst, ymm, ymm{k}{z}
 79922      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 79923          self.require(ISA_AVX512VL | ISA_AVX512F)
 79924          p.domain = DomainAVX
 79925          p.add(0, func(m *_Encoding, v []interface{}) {
 79926              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 79927              m.emit(0x28)
 79928              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 79929          })
 79930      }
 79931      // VPMULDQ ymm, ymm, ymm{k}{z}
 79932      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 79933          self.require(ISA_AVX512VL | ISA_AVX512F)
 79934          p.domain = DomainAVX
 79935          p.add(0, func(m *_Encoding, v []interface{}) {
 79936              m.emit(0x62)
 79937              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 79938              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 79939              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 79940              m.emit(0x28)
 79941              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 79942          })
 79943      }
 79944      if p.len == 0 {
 79945          panic("invalid operands for VPMULDQ")
 79946      }
 79947      return p
 79948  }
 79949  
 79950  // VPMULHRSW performs "Packed Multiply Signed Word Integers and Store High Result with Round and Scale".
 79951  //
 79952  // Mnemonic        : VPMULHRSW
 79953  // Supported forms : (10 forms)
 79954  //
 79955  //    * VPMULHRSW xmm, xmm, xmm           [AVX]
 79956  //    * VPMULHRSW m128, xmm, xmm          [AVX]
 79957  //    * VPMULHRSW ymm, ymm, ymm           [AVX2]
 79958  //    * VPMULHRSW m256, ymm, ymm          [AVX2]
 79959  //    * VPMULHRSW zmm, zmm, zmm{k}{z}     [AVX512BW]
 79960  //    * VPMULHRSW m512, zmm, zmm{k}{z}    [AVX512BW]
 79961  //    * VPMULHRSW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 79962  //    * VPMULHRSW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 79963  //    * VPMULHRSW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 79964  //    * VPMULHRSW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 79965  //
 79966  func (self *Program) VPMULHRSW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 79967      p := self.alloc("VPMULHRSW", 3, Operands { v0, v1, v2 })
 79968      // VPMULHRSW xmm, xmm, xmm
 79969      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 79970          self.require(ISA_AVX)
 79971          p.domain = DomainAVX
 79972          p.add(0, func(m *_Encoding, v []interface{}) {
 79973              m.emit(0xc4)
 79974              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 79975              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 79976              m.emit(0x0b)
 79977              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 79978          })
 79979      }
 79980      // VPMULHRSW m128, xmm, xmm
 79981      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 79982          self.require(ISA_AVX)
 79983          p.domain = DomainAVX
 79984          p.add(0, func(m *_Encoding, v []interface{}) {
 79985              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 79986              m.emit(0x0b)
 79987              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 79988          })
 79989      }
 79990      // VPMULHRSW ymm, ymm, ymm
 79991      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 79992          self.require(ISA_AVX2)
 79993          p.domain = DomainAVX
 79994          p.add(0, func(m *_Encoding, v []interface{}) {
 79995              m.emit(0xc4)
 79996              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 79997              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 79998              m.emit(0x0b)
 79999              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80000          })
 80001      }
 80002      // VPMULHRSW m256, ymm, ymm
 80003      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 80004          self.require(ISA_AVX2)
 80005          p.domain = DomainAVX
 80006          p.add(0, func(m *_Encoding, v []interface{}) {
 80007              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80008              m.emit(0x0b)
 80009              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80010          })
 80011      }
 80012      // VPMULHRSW zmm, zmm, zmm{k}{z}
 80013      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 80014          self.require(ISA_AVX512BW)
 80015          p.domain = DomainAVX
 80016          p.add(0, func(m *_Encoding, v []interface{}) {
 80017              m.emit(0x62)
 80018              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80019              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80020              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 80021              m.emit(0x0b)
 80022              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80023          })
 80024      }
 80025      // VPMULHRSW m512, zmm, zmm{k}{z}
 80026      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 80027          self.require(ISA_AVX512BW)
 80028          p.domain = DomainAVX
 80029          p.add(0, func(m *_Encoding, v []interface{}) {
 80030              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80031              m.emit(0x0b)
 80032              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 80033          })
 80034      }
 80035      // VPMULHRSW xmm, xmm, xmm{k}{z}
 80036      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80037          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80038          p.domain = DomainAVX
 80039          p.add(0, func(m *_Encoding, v []interface{}) {
 80040              m.emit(0x62)
 80041              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80042              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80043              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 80044              m.emit(0x0b)
 80045              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80046          })
 80047      }
 80048      // VPMULHRSW m128, xmm, xmm{k}{z}
 80049      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80050          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80051          p.domain = DomainAVX
 80052          p.add(0, func(m *_Encoding, v []interface{}) {
 80053              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80054              m.emit(0x0b)
 80055              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 80056          })
 80057      }
 80058      // VPMULHRSW ymm, ymm, ymm{k}{z}
 80059      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80060          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80061          p.domain = DomainAVX
 80062          p.add(0, func(m *_Encoding, v []interface{}) {
 80063              m.emit(0x62)
 80064              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80065              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80066              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 80067              m.emit(0x0b)
 80068              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80069          })
 80070      }
 80071      // VPMULHRSW m256, ymm, ymm{k}{z}
 80072      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80073          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80074          p.domain = DomainAVX
 80075          p.add(0, func(m *_Encoding, v []interface{}) {
 80076              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80077              m.emit(0x0b)
 80078              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 80079          })
 80080      }
 80081      if p.len == 0 {
 80082          panic("invalid operands for VPMULHRSW")
 80083      }
 80084      return p
 80085  }
 80086  
 80087  // VPMULHUW performs "Multiply Packed Unsigned Word Integers and Store High Result".
 80088  //
 80089  // Mnemonic        : VPMULHUW
 80090  // Supported forms : (10 forms)
 80091  //
 80092  //    * VPMULHUW xmm, xmm, xmm           [AVX]
 80093  //    * VPMULHUW m128, xmm, xmm          [AVX]
 80094  //    * VPMULHUW ymm, ymm, ymm           [AVX2]
 80095  //    * VPMULHUW m256, ymm, ymm          [AVX2]
 80096  //    * VPMULHUW zmm, zmm, zmm{k}{z}     [AVX512BW]
 80097  //    * VPMULHUW m512, zmm, zmm{k}{z}    [AVX512BW]
 80098  //    * VPMULHUW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 80099  //    * VPMULHUW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 80100  //    * VPMULHUW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 80101  //    * VPMULHUW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 80102  //
 80103  func (self *Program) VPMULHUW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 80104      p := self.alloc("VPMULHUW", 3, Operands { v0, v1, v2 })
 80105      // VPMULHUW xmm, xmm, xmm
 80106      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 80107          self.require(ISA_AVX)
 80108          p.domain = DomainAVX
 80109          p.add(0, func(m *_Encoding, v []interface{}) {
 80110              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 80111              m.emit(0xe4)
 80112              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80113          })
 80114      }
 80115      // VPMULHUW m128, xmm, xmm
 80116      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 80117          self.require(ISA_AVX)
 80118          p.domain = DomainAVX
 80119          p.add(0, func(m *_Encoding, v []interface{}) {
 80120              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80121              m.emit(0xe4)
 80122              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80123          })
 80124      }
 80125      // VPMULHUW ymm, ymm, ymm
 80126      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 80127          self.require(ISA_AVX2)
 80128          p.domain = DomainAVX
 80129          p.add(0, func(m *_Encoding, v []interface{}) {
 80130              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 80131              m.emit(0xe4)
 80132              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80133          })
 80134      }
 80135      // VPMULHUW m256, ymm, ymm
 80136      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 80137          self.require(ISA_AVX2)
 80138          p.domain = DomainAVX
 80139          p.add(0, func(m *_Encoding, v []interface{}) {
 80140              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80141              m.emit(0xe4)
 80142              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80143          })
 80144      }
 80145      // VPMULHUW zmm, zmm, zmm{k}{z}
 80146      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 80147          self.require(ISA_AVX512BW)
 80148          p.domain = DomainAVX
 80149          p.add(0, func(m *_Encoding, v []interface{}) {
 80150              m.emit(0x62)
 80151              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80152              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80153              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 80154              m.emit(0xe4)
 80155              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80156          })
 80157      }
 80158      // VPMULHUW m512, zmm, zmm{k}{z}
 80159      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 80160          self.require(ISA_AVX512BW)
 80161          p.domain = DomainAVX
 80162          p.add(0, func(m *_Encoding, v []interface{}) {
 80163              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80164              m.emit(0xe4)
 80165              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 80166          })
 80167      }
 80168      // VPMULHUW xmm, xmm, xmm{k}{z}
 80169      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80170          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80171          p.domain = DomainAVX
 80172          p.add(0, func(m *_Encoding, v []interface{}) {
 80173              m.emit(0x62)
 80174              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80175              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80176              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 80177              m.emit(0xe4)
 80178              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80179          })
 80180      }
 80181      // VPMULHUW m128, xmm, xmm{k}{z}
 80182      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80183          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80184          p.domain = DomainAVX
 80185          p.add(0, func(m *_Encoding, v []interface{}) {
 80186              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80187              m.emit(0xe4)
 80188              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 80189          })
 80190      }
 80191      // VPMULHUW ymm, ymm, ymm{k}{z}
 80192      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80193          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80194          p.domain = DomainAVX
 80195          p.add(0, func(m *_Encoding, v []interface{}) {
 80196              m.emit(0x62)
 80197              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80198              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80199              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 80200              m.emit(0xe4)
 80201              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80202          })
 80203      }
 80204      // VPMULHUW m256, ymm, ymm{k}{z}
 80205      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80206          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80207          p.domain = DomainAVX
 80208          p.add(0, func(m *_Encoding, v []interface{}) {
 80209              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80210              m.emit(0xe4)
 80211              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 80212          })
 80213      }
 80214      if p.len == 0 {
 80215          panic("invalid operands for VPMULHUW")
 80216      }
 80217      return p
 80218  }
 80219  
 80220  // VPMULHW performs "Multiply Packed Signed Word Integers and Store High Result".
 80221  //
 80222  // Mnemonic        : VPMULHW
 80223  // Supported forms : (10 forms)
 80224  //
 80225  //    * VPMULHW xmm, xmm, xmm           [AVX]
 80226  //    * VPMULHW m128, xmm, xmm          [AVX]
 80227  //    * VPMULHW ymm, ymm, ymm           [AVX2]
 80228  //    * VPMULHW m256, ymm, ymm          [AVX2]
 80229  //    * VPMULHW zmm, zmm, zmm{k}{z}     [AVX512BW]
 80230  //    * VPMULHW m512, zmm, zmm{k}{z}    [AVX512BW]
 80231  //    * VPMULHW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 80232  //    * VPMULHW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 80233  //    * VPMULHW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 80234  //    * VPMULHW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 80235  //
 80236  func (self *Program) VPMULHW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 80237      p := self.alloc("VPMULHW", 3, Operands { v0, v1, v2 })
 80238      // VPMULHW xmm, xmm, xmm
 80239      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 80240          self.require(ISA_AVX)
 80241          p.domain = DomainAVX
 80242          p.add(0, func(m *_Encoding, v []interface{}) {
 80243              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 80244              m.emit(0xe5)
 80245              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80246          })
 80247      }
 80248      // VPMULHW m128, xmm, xmm
 80249      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 80250          self.require(ISA_AVX)
 80251          p.domain = DomainAVX
 80252          p.add(0, func(m *_Encoding, v []interface{}) {
 80253              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80254              m.emit(0xe5)
 80255              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80256          })
 80257      }
 80258      // VPMULHW ymm, ymm, ymm
 80259      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 80260          self.require(ISA_AVX2)
 80261          p.domain = DomainAVX
 80262          p.add(0, func(m *_Encoding, v []interface{}) {
 80263              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 80264              m.emit(0xe5)
 80265              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80266          })
 80267      }
 80268      // VPMULHW m256, ymm, ymm
 80269      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 80270          self.require(ISA_AVX2)
 80271          p.domain = DomainAVX
 80272          p.add(0, func(m *_Encoding, v []interface{}) {
 80273              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80274              m.emit(0xe5)
 80275              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80276          })
 80277      }
 80278      // VPMULHW zmm, zmm, zmm{k}{z}
 80279      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 80280          self.require(ISA_AVX512BW)
 80281          p.domain = DomainAVX
 80282          p.add(0, func(m *_Encoding, v []interface{}) {
 80283              m.emit(0x62)
 80284              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80285              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80286              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 80287              m.emit(0xe5)
 80288              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80289          })
 80290      }
 80291      // VPMULHW m512, zmm, zmm{k}{z}
 80292      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 80293          self.require(ISA_AVX512BW)
 80294          p.domain = DomainAVX
 80295          p.add(0, func(m *_Encoding, v []interface{}) {
 80296              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80297              m.emit(0xe5)
 80298              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 80299          })
 80300      }
 80301      // VPMULHW xmm, xmm, xmm{k}{z}
 80302      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80303          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80304          p.domain = DomainAVX
 80305          p.add(0, func(m *_Encoding, v []interface{}) {
 80306              m.emit(0x62)
 80307              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80308              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80309              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 80310              m.emit(0xe5)
 80311              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80312          })
 80313      }
 80314      // VPMULHW m128, xmm, xmm{k}{z}
 80315      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80316          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80317          p.domain = DomainAVX
 80318          p.add(0, func(m *_Encoding, v []interface{}) {
 80319              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80320              m.emit(0xe5)
 80321              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 80322          })
 80323      }
 80324      // VPMULHW ymm, ymm, ymm{k}{z}
 80325      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80326          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80327          p.domain = DomainAVX
 80328          p.add(0, func(m *_Encoding, v []interface{}) {
 80329              m.emit(0x62)
 80330              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80331              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80332              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 80333              m.emit(0xe5)
 80334              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80335          })
 80336      }
 80337      // VPMULHW m256, ymm, ymm{k}{z}
 80338      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80339          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80340          p.domain = DomainAVX
 80341          p.add(0, func(m *_Encoding, v []interface{}) {
 80342              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80343              m.emit(0xe5)
 80344              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 80345          })
 80346      }
 80347      if p.len == 0 {
 80348          panic("invalid operands for VPMULHW")
 80349      }
 80350      return p
 80351  }
 80352  
 80353  // VPMULLD performs "Multiply Packed Signed Doubleword Integers and Store Low Result".
 80354  //
 80355  // Mnemonic        : VPMULLD
 80356  // Supported forms : (10 forms)
 80357  //
 80358  //    * VPMULLD xmm, xmm, xmm                   [AVX]
 80359  //    * VPMULLD m128, xmm, xmm                  [AVX]
 80360  //    * VPMULLD ymm, ymm, ymm                   [AVX2]
 80361  //    * VPMULLD m256, ymm, ymm                  [AVX2]
 80362  //    * VPMULLD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 80363  //    * VPMULLD zmm, zmm, zmm{k}{z}             [AVX512F]
 80364  //    * VPMULLD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 80365  //    * VPMULLD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 80366  //    * VPMULLD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 80367  //    * VPMULLD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 80368  //
 80369  func (self *Program) VPMULLD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 80370      p := self.alloc("VPMULLD", 3, Operands { v0, v1, v2 })
 80371      // VPMULLD xmm, xmm, xmm
 80372      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 80373          self.require(ISA_AVX)
 80374          p.domain = DomainAVX
 80375          p.add(0, func(m *_Encoding, v []interface{}) {
 80376              m.emit(0xc4)
 80377              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 80378              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 80379              m.emit(0x40)
 80380              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80381          })
 80382      }
 80383      // VPMULLD m128, xmm, xmm
 80384      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 80385          self.require(ISA_AVX)
 80386          p.domain = DomainAVX
 80387          p.add(0, func(m *_Encoding, v []interface{}) {
 80388              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80389              m.emit(0x40)
 80390              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80391          })
 80392      }
 80393      // VPMULLD ymm, ymm, ymm
 80394      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 80395          self.require(ISA_AVX2)
 80396          p.domain = DomainAVX
 80397          p.add(0, func(m *_Encoding, v []interface{}) {
 80398              m.emit(0xc4)
 80399              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 80400              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80401              m.emit(0x40)
 80402              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80403          })
 80404      }
 80405      // VPMULLD m256, ymm, ymm
 80406      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 80407          self.require(ISA_AVX2)
 80408          p.domain = DomainAVX
 80409          p.add(0, func(m *_Encoding, v []interface{}) {
 80410              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80411              m.emit(0x40)
 80412              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80413          })
 80414      }
 80415      // VPMULLD m512/m32bcst, zmm, zmm{k}{z}
 80416      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 80417          self.require(ISA_AVX512F)
 80418          p.domain = DomainAVX
 80419          p.add(0, func(m *_Encoding, v []interface{}) {
 80420              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80421              m.emit(0x40)
 80422              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 80423          })
 80424      }
 80425      // VPMULLD zmm, zmm, zmm{k}{z}
 80426      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 80427          self.require(ISA_AVX512F)
 80428          p.domain = DomainAVX
 80429          p.add(0, func(m *_Encoding, v []interface{}) {
 80430              m.emit(0x62)
 80431              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80432              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80433              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 80434              m.emit(0x40)
 80435              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80436          })
 80437      }
 80438      // VPMULLD m128/m32bcst, xmm, xmm{k}{z}
 80439      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80440          self.require(ISA_AVX512VL | ISA_AVX512F)
 80441          p.domain = DomainAVX
 80442          p.add(0, func(m *_Encoding, v []interface{}) {
 80443              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80444              m.emit(0x40)
 80445              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 80446          })
 80447      }
 80448      // VPMULLD xmm, xmm, xmm{k}{z}
 80449      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80450          self.require(ISA_AVX512VL | ISA_AVX512F)
 80451          p.domain = DomainAVX
 80452          p.add(0, func(m *_Encoding, v []interface{}) {
 80453              m.emit(0x62)
 80454              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80455              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80456              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 80457              m.emit(0x40)
 80458              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80459          })
 80460      }
 80461      // VPMULLD m256/m32bcst, ymm, ymm{k}{z}
 80462      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80463          self.require(ISA_AVX512VL | ISA_AVX512F)
 80464          p.domain = DomainAVX
 80465          p.add(0, func(m *_Encoding, v []interface{}) {
 80466              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80467              m.emit(0x40)
 80468              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 80469          })
 80470      }
 80471      // VPMULLD ymm, ymm, ymm{k}{z}
 80472      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80473          self.require(ISA_AVX512VL | ISA_AVX512F)
 80474          p.domain = DomainAVX
 80475          p.add(0, func(m *_Encoding, v []interface{}) {
 80476              m.emit(0x62)
 80477              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80478              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80479              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 80480              m.emit(0x40)
 80481              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80482          })
 80483      }
 80484      if p.len == 0 {
 80485          panic("invalid operands for VPMULLD")
 80486      }
 80487      return p
 80488  }
 80489  
 80490  // VPMULLQ performs "Multiply Packed Signed Quadword Integers and Store Low Result".
 80491  //
 80492  // Mnemonic        : VPMULLQ
 80493  // Supported forms : (6 forms)
 80494  //
 80495  //    * VPMULLQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512DQ]
 80496  //    * VPMULLQ zmm, zmm, zmm{k}{z}             [AVX512DQ]
 80497  //    * VPMULLQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 80498  //    * VPMULLQ xmm, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 80499  //    * VPMULLQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 80500  //    * VPMULLQ ymm, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 80501  //
 80502  func (self *Program) VPMULLQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 80503      p := self.alloc("VPMULLQ", 3, Operands { v0, v1, v2 })
 80504      // VPMULLQ m512/m64bcst, zmm, zmm{k}{z}
 80505      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 80506          self.require(ISA_AVX512DQ)
 80507          p.domain = DomainAVX
 80508          p.add(0, func(m *_Encoding, v []interface{}) {
 80509              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80510              m.emit(0x40)
 80511              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 80512          })
 80513      }
 80514      // VPMULLQ zmm, zmm, zmm{k}{z}
 80515      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 80516          self.require(ISA_AVX512DQ)
 80517          p.domain = DomainAVX
 80518          p.add(0, func(m *_Encoding, v []interface{}) {
 80519              m.emit(0x62)
 80520              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80521              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 80522              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 80523              m.emit(0x40)
 80524              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80525          })
 80526      }
 80527      // VPMULLQ m128/m64bcst, xmm, xmm{k}{z}
 80528      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80529          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 80530          p.domain = DomainAVX
 80531          p.add(0, func(m *_Encoding, v []interface{}) {
 80532              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80533              m.emit(0x40)
 80534              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 80535          })
 80536      }
 80537      // VPMULLQ xmm, xmm, xmm{k}{z}
 80538      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80539          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 80540          p.domain = DomainAVX
 80541          p.add(0, func(m *_Encoding, v []interface{}) {
 80542              m.emit(0x62)
 80543              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80544              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 80545              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 80546              m.emit(0x40)
 80547              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80548          })
 80549      }
 80550      // VPMULLQ m256/m64bcst, ymm, ymm{k}{z}
 80551      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80552          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 80553          p.domain = DomainAVX
 80554          p.add(0, func(m *_Encoding, v []interface{}) {
 80555              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80556              m.emit(0x40)
 80557              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 80558          })
 80559      }
 80560      // VPMULLQ ymm, ymm, ymm{k}{z}
 80561      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80562          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 80563          p.domain = DomainAVX
 80564          p.add(0, func(m *_Encoding, v []interface{}) {
 80565              m.emit(0x62)
 80566              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80567              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 80568              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 80569              m.emit(0x40)
 80570              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80571          })
 80572      }
 80573      if p.len == 0 {
 80574          panic("invalid operands for VPMULLQ")
 80575      }
 80576      return p
 80577  }
 80578  
 80579  // VPMULLW performs "Multiply Packed Signed Word Integers and Store Low Result".
 80580  //
 80581  // Mnemonic        : VPMULLW
 80582  // Supported forms : (10 forms)
 80583  //
 80584  //    * VPMULLW xmm, xmm, xmm           [AVX]
 80585  //    * VPMULLW m128, xmm, xmm          [AVX]
 80586  //    * VPMULLW ymm, ymm, ymm           [AVX2]
 80587  //    * VPMULLW m256, ymm, ymm          [AVX2]
 80588  //    * VPMULLW zmm, zmm, zmm{k}{z}     [AVX512BW]
 80589  //    * VPMULLW m512, zmm, zmm{k}{z}    [AVX512BW]
 80590  //    * VPMULLW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 80591  //    * VPMULLW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 80592  //    * VPMULLW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 80593  //    * VPMULLW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 80594  //
 80595  func (self *Program) VPMULLW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 80596      p := self.alloc("VPMULLW", 3, Operands { v0, v1, v2 })
 80597      // VPMULLW xmm, xmm, xmm
 80598      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 80599          self.require(ISA_AVX)
 80600          p.domain = DomainAVX
 80601          p.add(0, func(m *_Encoding, v []interface{}) {
 80602              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 80603              m.emit(0xd5)
 80604              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80605          })
 80606      }
 80607      // VPMULLW m128, xmm, xmm
 80608      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 80609          self.require(ISA_AVX)
 80610          p.domain = DomainAVX
 80611          p.add(0, func(m *_Encoding, v []interface{}) {
 80612              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80613              m.emit(0xd5)
 80614              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80615          })
 80616      }
 80617      // VPMULLW ymm, ymm, ymm
 80618      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 80619          self.require(ISA_AVX2)
 80620          p.domain = DomainAVX
 80621          p.add(0, func(m *_Encoding, v []interface{}) {
 80622              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 80623              m.emit(0xd5)
 80624              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80625          })
 80626      }
 80627      // VPMULLW m256, ymm, ymm
 80628      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 80629          self.require(ISA_AVX2)
 80630          p.domain = DomainAVX
 80631          p.add(0, func(m *_Encoding, v []interface{}) {
 80632              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80633              m.emit(0xd5)
 80634              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80635          })
 80636      }
 80637      // VPMULLW zmm, zmm, zmm{k}{z}
 80638      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 80639          self.require(ISA_AVX512BW)
 80640          p.domain = DomainAVX
 80641          p.add(0, func(m *_Encoding, v []interface{}) {
 80642              m.emit(0x62)
 80643              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80644              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80645              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 80646              m.emit(0xd5)
 80647              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80648          })
 80649      }
 80650      // VPMULLW m512, zmm, zmm{k}{z}
 80651      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 80652          self.require(ISA_AVX512BW)
 80653          p.domain = DomainAVX
 80654          p.add(0, func(m *_Encoding, v []interface{}) {
 80655              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80656              m.emit(0xd5)
 80657              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 80658          })
 80659      }
 80660      // VPMULLW xmm, xmm, xmm{k}{z}
 80661      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80662          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80663          p.domain = DomainAVX
 80664          p.add(0, func(m *_Encoding, v []interface{}) {
 80665              m.emit(0x62)
 80666              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80667              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80668              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 80669              m.emit(0xd5)
 80670              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80671          })
 80672      }
 80673      // VPMULLW m128, xmm, xmm{k}{z}
 80674      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80675          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80676          p.domain = DomainAVX
 80677          p.add(0, func(m *_Encoding, v []interface{}) {
 80678              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80679              m.emit(0xd5)
 80680              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 80681          })
 80682      }
 80683      // VPMULLW ymm, ymm, ymm{k}{z}
 80684      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80685          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80686          p.domain = DomainAVX
 80687          p.add(0, func(m *_Encoding, v []interface{}) {
 80688              m.emit(0x62)
 80689              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80690              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80691              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 80692              m.emit(0xd5)
 80693              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80694          })
 80695      }
 80696      // VPMULLW m256, ymm, ymm{k}{z}
 80697      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80698          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80699          p.domain = DomainAVX
 80700          p.add(0, func(m *_Encoding, v []interface{}) {
 80701              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80702              m.emit(0xd5)
 80703              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 80704          })
 80705      }
 80706      if p.len == 0 {
 80707          panic("invalid operands for VPMULLW")
 80708      }
 80709      return p
 80710  }
 80711  
 80712  // VPMULTISHIFTQB performs "Select Packed Unaligned Bytes from Quadword Sources".
 80713  //
 80714  // Mnemonic        : VPMULTISHIFTQB
 80715  // Supported forms : (6 forms)
 80716  //
 80717  //    * VPMULTISHIFTQB m128/m64bcst, xmm, xmm{k}{z}    [AVX512VBMI,AVX512VL]
 80718  //    * VPMULTISHIFTQB xmm, xmm, xmm{k}{z}             [AVX512VBMI,AVX512VL]
 80719  //    * VPMULTISHIFTQB m256/m64bcst, ymm, ymm{k}{z}    [AVX512VBMI,AVX512VL]
 80720  //    * VPMULTISHIFTQB ymm, ymm, ymm{k}{z}             [AVX512VBMI,AVX512VL]
 80721  //    * VPMULTISHIFTQB m512/m64bcst, zmm, zmm{k}{z}    [AVX512VBMI]
 80722  //    * VPMULTISHIFTQB zmm, zmm, zmm{k}{z}             [AVX512VBMI]
 80723  //
 80724  func (self *Program) VPMULTISHIFTQB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 80725      p := self.alloc("VPMULTISHIFTQB", 3, Operands { v0, v1, v2 })
 80726      // VPMULTISHIFTQB m128/m64bcst, xmm, xmm{k}{z}
 80727      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80728          self.require(ISA_AVX512VBMI | ISA_AVX512VL)
 80729          p.domain = DomainAVX
 80730          p.add(0, func(m *_Encoding, v []interface{}) {
 80731              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80732              m.emit(0x83)
 80733              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 80734          })
 80735      }
 80736      // VPMULTISHIFTQB xmm, xmm, xmm{k}{z}
 80737      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80738          self.require(ISA_AVX512VBMI | ISA_AVX512VL)
 80739          p.domain = DomainAVX
 80740          p.add(0, func(m *_Encoding, v []interface{}) {
 80741              m.emit(0x62)
 80742              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80743              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 80744              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 80745              m.emit(0x83)
 80746              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80747          })
 80748      }
 80749      // VPMULTISHIFTQB m256/m64bcst, ymm, ymm{k}{z}
 80750      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80751          self.require(ISA_AVX512VBMI | ISA_AVX512VL)
 80752          p.domain = DomainAVX
 80753          p.add(0, func(m *_Encoding, v []interface{}) {
 80754              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80755              m.emit(0x83)
 80756              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 80757          })
 80758      }
 80759      // VPMULTISHIFTQB ymm, ymm, ymm{k}{z}
 80760      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80761          self.require(ISA_AVX512VBMI | ISA_AVX512VL)
 80762          p.domain = DomainAVX
 80763          p.add(0, func(m *_Encoding, v []interface{}) {
 80764              m.emit(0x62)
 80765              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80766              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 80767              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 80768              m.emit(0x83)
 80769              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80770          })
 80771      }
 80772      // VPMULTISHIFTQB m512/m64bcst, zmm, zmm{k}{z}
 80773      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 80774          self.require(ISA_AVX512VBMI)
 80775          p.domain = DomainAVX
 80776          p.add(0, func(m *_Encoding, v []interface{}) {
 80777              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80778              m.emit(0x83)
 80779              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 80780          })
 80781      }
 80782      // VPMULTISHIFTQB zmm, zmm, zmm{k}{z}
 80783      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 80784          self.require(ISA_AVX512VBMI)
 80785          p.domain = DomainAVX
 80786          p.add(0, func(m *_Encoding, v []interface{}) {
 80787              m.emit(0x62)
 80788              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80789              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 80790              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 80791              m.emit(0x83)
 80792              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80793          })
 80794      }
 80795      if p.len == 0 {
 80796          panic("invalid operands for VPMULTISHIFTQB")
 80797      }
 80798      return p
 80799  }
 80800  
 80801  // VPMULUDQ performs "Multiply Packed Unsigned Doubleword Integers".
 80802  //
 80803  // Mnemonic        : VPMULUDQ
 80804  // Supported forms : (10 forms)
 80805  //
 80806  //    * VPMULUDQ xmm, xmm, xmm                   [AVX]
 80807  //    * VPMULUDQ m128, xmm, xmm                  [AVX]
 80808  //    * VPMULUDQ ymm, ymm, ymm                   [AVX2]
 80809  //    * VPMULUDQ m256, ymm, ymm                  [AVX2]
 80810  //    * VPMULUDQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 80811  //    * VPMULUDQ zmm, zmm, zmm{k}{z}             [AVX512F]
 80812  //    * VPMULUDQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 80813  //    * VPMULUDQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 80814  //    * VPMULUDQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 80815  //    * VPMULUDQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 80816  //
 80817  func (self *Program) VPMULUDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 80818      p := self.alloc("VPMULUDQ", 3, Operands { v0, v1, v2 })
 80819      // VPMULUDQ xmm, xmm, xmm
 80820      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 80821          self.require(ISA_AVX)
 80822          p.domain = DomainAVX
 80823          p.add(0, func(m *_Encoding, v []interface{}) {
 80824              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 80825              m.emit(0xf4)
 80826              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80827          })
 80828      }
 80829      // VPMULUDQ m128, xmm, xmm
 80830      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 80831          self.require(ISA_AVX)
 80832          p.domain = DomainAVX
 80833          p.add(0, func(m *_Encoding, v []interface{}) {
 80834              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80835              m.emit(0xf4)
 80836              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80837          })
 80838      }
 80839      // VPMULUDQ ymm, ymm, ymm
 80840      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 80841          self.require(ISA_AVX2)
 80842          p.domain = DomainAVX
 80843          p.add(0, func(m *_Encoding, v []interface{}) {
 80844              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 80845              m.emit(0xf4)
 80846              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80847          })
 80848      }
 80849      // VPMULUDQ m256, ymm, ymm
 80850      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 80851          self.require(ISA_AVX2)
 80852          p.domain = DomainAVX
 80853          p.add(0, func(m *_Encoding, v []interface{}) {
 80854              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80855              m.emit(0xf4)
 80856              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80857          })
 80858      }
 80859      // VPMULUDQ m512/m64bcst, zmm, zmm{k}{z}
 80860      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 80861          self.require(ISA_AVX512F)
 80862          p.domain = DomainAVX
 80863          p.add(0, func(m *_Encoding, v []interface{}) {
 80864              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80865              m.emit(0xf4)
 80866              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 80867          })
 80868      }
 80869      // VPMULUDQ zmm, zmm, zmm{k}{z}
 80870      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 80871          self.require(ISA_AVX512F)
 80872          p.domain = DomainAVX
 80873          p.add(0, func(m *_Encoding, v []interface{}) {
 80874              m.emit(0x62)
 80875              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80876              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 80877              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 80878              m.emit(0xf4)
 80879              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80880          })
 80881      }
 80882      // VPMULUDQ m128/m64bcst, xmm, xmm{k}{z}
 80883      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80884          self.require(ISA_AVX512VL | ISA_AVX512F)
 80885          p.domain = DomainAVX
 80886          p.add(0, func(m *_Encoding, v []interface{}) {
 80887              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80888              m.emit(0xf4)
 80889              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 80890          })
 80891      }
 80892      // VPMULUDQ xmm, xmm, xmm{k}{z}
 80893      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80894          self.require(ISA_AVX512VL | ISA_AVX512F)
 80895          p.domain = DomainAVX
 80896          p.add(0, func(m *_Encoding, v []interface{}) {
 80897              m.emit(0x62)
 80898              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80899              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 80900              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 80901              m.emit(0xf4)
 80902              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80903          })
 80904      }
 80905      // VPMULUDQ m256/m64bcst, ymm, ymm{k}{z}
 80906      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80907          self.require(ISA_AVX512VL | ISA_AVX512F)
 80908          p.domain = DomainAVX
 80909          p.add(0, func(m *_Encoding, v []interface{}) {
 80910              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80911              m.emit(0xf4)
 80912              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 80913          })
 80914      }
 80915      // VPMULUDQ ymm, ymm, ymm{k}{z}
 80916      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80917          self.require(ISA_AVX512VL | ISA_AVX512F)
 80918          p.domain = DomainAVX
 80919          p.add(0, func(m *_Encoding, v []interface{}) {
 80920              m.emit(0x62)
 80921              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80922              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 80923              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 80924              m.emit(0xf4)
 80925              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80926          })
 80927      }
 80928      if p.len == 0 {
 80929          panic("invalid operands for VPMULUDQ")
 80930      }
 80931      return p
 80932  }
 80933  
 80934  // VPOPCNTD performs "Packed Population Count for Doubleword Integers".
 80935  //
 80936  // Mnemonic        : VPOPCNTD
 80937  // Supported forms : (2 forms)
 80938  //
 80939  //    * VPOPCNTD m512/m32bcst, zmm{k}{z}    [AVX512VPOPCNTDQ]
 80940  //    * VPOPCNTD zmm, zmm{k}{z}             [AVX512VPOPCNTDQ]
 80941  //
 80942  func (self *Program) VPOPCNTD(v0 interface{}, v1 interface{}) *Instruction {
 80943      p := self.alloc("VPOPCNTD", 2, Operands { v0, v1 })
 80944      // VPOPCNTD m512/m32bcst, zmm{k}{z}
 80945      if isM512M32bcst(v0) && isZMMkz(v1) {
 80946          self.require(ISA_AVX512VPOPCNTDQ)
 80947          p.domain = DomainAVX
 80948          p.add(0, func(m *_Encoding, v []interface{}) {
 80949              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 80950              m.emit(0x55)
 80951              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 80952          })
 80953      }
 80954      // VPOPCNTD zmm, zmm{k}{z}
 80955      if isZMM(v0) && isZMMkz(v1) {
 80956          self.require(ISA_AVX512VPOPCNTDQ)
 80957          p.domain = DomainAVX
 80958          p.add(0, func(m *_Encoding, v []interface{}) {
 80959              m.emit(0x62)
 80960              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 80961              m.emit(0x7d)
 80962              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 80963              m.emit(0x55)
 80964              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 80965          })
 80966      }
 80967      if p.len == 0 {
 80968          panic("invalid operands for VPOPCNTD")
 80969      }
 80970      return p
 80971  }
 80972  
 80973  // VPOPCNTQ performs "Packed Population Count for Quadword Integers".
 80974  //
 80975  // Mnemonic        : VPOPCNTQ
 80976  // Supported forms : (2 forms)
 80977  //
 80978  //    * VPOPCNTQ m512/m64bcst, zmm{k}{z}    [AVX512VPOPCNTDQ]
 80979  //    * VPOPCNTQ zmm, zmm{k}{z}             [AVX512VPOPCNTDQ]
 80980  //
 80981  func (self *Program) VPOPCNTQ(v0 interface{}, v1 interface{}) *Instruction {
 80982      p := self.alloc("VPOPCNTQ", 2, Operands { v0, v1 })
 80983      // VPOPCNTQ m512/m64bcst, zmm{k}{z}
 80984      if isM512M64bcst(v0) && isZMMkz(v1) {
 80985          self.require(ISA_AVX512VPOPCNTDQ)
 80986          p.domain = DomainAVX
 80987          p.add(0, func(m *_Encoding, v []interface{}) {
 80988              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 80989              m.emit(0x55)
 80990              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 80991          })
 80992      }
 80993      // VPOPCNTQ zmm, zmm{k}{z}
 80994      if isZMM(v0) && isZMMkz(v1) {
 80995          self.require(ISA_AVX512VPOPCNTDQ)
 80996          p.domain = DomainAVX
 80997          p.add(0, func(m *_Encoding, v []interface{}) {
 80998              m.emit(0x62)
 80999              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 81000              m.emit(0xfd)
 81001              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 81002              m.emit(0x55)
 81003              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 81004          })
 81005      }
 81006      if p.len == 0 {
 81007          panic("invalid operands for VPOPCNTQ")
 81008      }
 81009      return p
 81010  }
 81011  
 81012  // VPOR performs "Packed Bitwise Logical OR".
 81013  //
 81014  // Mnemonic        : VPOR
 81015  // Supported forms : (4 forms)
 81016  //
 81017  //    * VPOR xmm, xmm, xmm     [AVX]
 81018  //    * VPOR m128, xmm, xmm    [AVX]
 81019  //    * VPOR ymm, ymm, ymm     [AVX2]
 81020  //    * VPOR m256, ymm, ymm    [AVX2]
 81021  //
 81022  func (self *Program) VPOR(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 81023      p := self.alloc("VPOR", 3, Operands { v0, v1, v2 })
 81024      // VPOR xmm, xmm, xmm
 81025      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 81026          self.require(ISA_AVX)
 81027          p.domain = DomainAVX
 81028          p.add(0, func(m *_Encoding, v []interface{}) {
 81029              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 81030              m.emit(0xeb)
 81031              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81032          })
 81033      }
 81034      // VPOR m128, xmm, xmm
 81035      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 81036          self.require(ISA_AVX)
 81037          p.domain = DomainAVX
 81038          p.add(0, func(m *_Encoding, v []interface{}) {
 81039              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 81040              m.emit(0xeb)
 81041              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 81042          })
 81043      }
 81044      // VPOR ymm, ymm, ymm
 81045      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 81046          self.require(ISA_AVX2)
 81047          p.domain = DomainAVX
 81048          p.add(0, func(m *_Encoding, v []interface{}) {
 81049              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 81050              m.emit(0xeb)
 81051              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81052          })
 81053      }
 81054      // VPOR m256, ymm, ymm
 81055      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 81056          self.require(ISA_AVX2)
 81057          p.domain = DomainAVX
 81058          p.add(0, func(m *_Encoding, v []interface{}) {
 81059              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 81060              m.emit(0xeb)
 81061              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 81062          })
 81063      }
 81064      if p.len == 0 {
 81065          panic("invalid operands for VPOR")
 81066      }
 81067      return p
 81068  }
 81069  
 81070  // VPORD performs "Bitwise Logical OR of Packed Doubleword Integers".
 81071  //
 81072  // Mnemonic        : VPORD
 81073  // Supported forms : (6 forms)
 81074  //
 81075  //    * VPORD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 81076  //    * VPORD zmm, zmm, zmm{k}{z}             [AVX512F]
 81077  //    * VPORD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 81078  //    * VPORD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 81079  //    * VPORD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 81080  //    * VPORD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 81081  //
 81082  func (self *Program) VPORD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 81083      p := self.alloc("VPORD", 3, Operands { v0, v1, v2 })
 81084      // VPORD m512/m32bcst, zmm, zmm{k}{z}
 81085      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 81086          self.require(ISA_AVX512F)
 81087          p.domain = DomainAVX
 81088          p.add(0, func(m *_Encoding, v []interface{}) {
 81089              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81090              m.emit(0xeb)
 81091              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 81092          })
 81093      }
 81094      // VPORD zmm, zmm, zmm{k}{z}
 81095      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 81096          self.require(ISA_AVX512F)
 81097          p.domain = DomainAVX
 81098          p.add(0, func(m *_Encoding, v []interface{}) {
 81099              m.emit(0x62)
 81100              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81101              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 81102              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 81103              m.emit(0xeb)
 81104              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81105          })
 81106      }
 81107      // VPORD m128/m32bcst, xmm, xmm{k}{z}
 81108      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81109          self.require(ISA_AVX512VL | ISA_AVX512F)
 81110          p.domain = DomainAVX
 81111          p.add(0, func(m *_Encoding, v []interface{}) {
 81112              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81113              m.emit(0xeb)
 81114              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 81115          })
 81116      }
 81117      // VPORD xmm, xmm, xmm{k}{z}
 81118      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81119          self.require(ISA_AVX512VL | ISA_AVX512F)
 81120          p.domain = DomainAVX
 81121          p.add(0, func(m *_Encoding, v []interface{}) {
 81122              m.emit(0x62)
 81123              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81124              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 81125              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 81126              m.emit(0xeb)
 81127              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81128          })
 81129      }
 81130      // VPORD m256/m32bcst, ymm, ymm{k}{z}
 81131      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81132          self.require(ISA_AVX512VL | ISA_AVX512F)
 81133          p.domain = DomainAVX
 81134          p.add(0, func(m *_Encoding, v []interface{}) {
 81135              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81136              m.emit(0xeb)
 81137              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 81138          })
 81139      }
 81140      // VPORD ymm, ymm, ymm{k}{z}
 81141      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81142          self.require(ISA_AVX512VL | ISA_AVX512F)
 81143          p.domain = DomainAVX
 81144          p.add(0, func(m *_Encoding, v []interface{}) {
 81145              m.emit(0x62)
 81146              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81147              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 81148              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 81149              m.emit(0xeb)
 81150              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81151          })
 81152      }
 81153      if p.len == 0 {
 81154          panic("invalid operands for VPORD")
 81155      }
 81156      return p
 81157  }
 81158  
 81159  // VPORQ performs "Bitwise Logical OR of Packed Quadword Integers".
 81160  //
 81161  // Mnemonic        : VPORQ
 81162  // Supported forms : (6 forms)
 81163  //
 81164  //    * VPORQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 81165  //    * VPORQ zmm, zmm, zmm{k}{z}             [AVX512F]
 81166  //    * VPORQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 81167  //    * VPORQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 81168  //    * VPORQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 81169  //    * VPORQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 81170  //
 81171  func (self *Program) VPORQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 81172      p := self.alloc("VPORQ", 3, Operands { v0, v1, v2 })
 81173      // VPORQ m512/m64bcst, zmm, zmm{k}{z}
 81174      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 81175          self.require(ISA_AVX512F)
 81176          p.domain = DomainAVX
 81177          p.add(0, func(m *_Encoding, v []interface{}) {
 81178              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81179              m.emit(0xeb)
 81180              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 81181          })
 81182      }
 81183      // VPORQ zmm, zmm, zmm{k}{z}
 81184      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 81185          self.require(ISA_AVX512F)
 81186          p.domain = DomainAVX
 81187          p.add(0, func(m *_Encoding, v []interface{}) {
 81188              m.emit(0x62)
 81189              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81190              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 81191              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 81192              m.emit(0xeb)
 81193              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81194          })
 81195      }
 81196      // VPORQ m128/m64bcst, xmm, xmm{k}{z}
 81197      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81198          self.require(ISA_AVX512VL | ISA_AVX512F)
 81199          p.domain = DomainAVX
 81200          p.add(0, func(m *_Encoding, v []interface{}) {
 81201              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81202              m.emit(0xeb)
 81203              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 81204          })
 81205      }
 81206      // VPORQ xmm, xmm, xmm{k}{z}
 81207      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81208          self.require(ISA_AVX512VL | ISA_AVX512F)
 81209          p.domain = DomainAVX
 81210          p.add(0, func(m *_Encoding, v []interface{}) {
 81211              m.emit(0x62)
 81212              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81213              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 81214              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 81215              m.emit(0xeb)
 81216              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81217          })
 81218      }
 81219      // VPORQ m256/m64bcst, ymm, ymm{k}{z}
 81220      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81221          self.require(ISA_AVX512VL | ISA_AVX512F)
 81222          p.domain = DomainAVX
 81223          p.add(0, func(m *_Encoding, v []interface{}) {
 81224              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81225              m.emit(0xeb)
 81226              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 81227          })
 81228      }
 81229      // VPORQ ymm, ymm, ymm{k}{z}
 81230      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81231          self.require(ISA_AVX512VL | ISA_AVX512F)
 81232          p.domain = DomainAVX
 81233          p.add(0, func(m *_Encoding, v []interface{}) {
 81234              m.emit(0x62)
 81235              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81236              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 81237              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 81238              m.emit(0xeb)
 81239              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81240          })
 81241      }
 81242      if p.len == 0 {
 81243          panic("invalid operands for VPORQ")
 81244      }
 81245      return p
 81246  }
 81247  
 81248  // VPPERM performs "Packed Permute Bytes".
 81249  //
 81250  // Mnemonic        : VPPERM
 81251  // Supported forms : (3 forms)
 81252  //
 81253  //    * VPPERM xmm, xmm, xmm, xmm     [XOP]
 81254  //    * VPPERM m128, xmm, xmm, xmm    [XOP]
 81255  //    * VPPERM xmm, m128, xmm, xmm    [XOP]
 81256  //
 81257  func (self *Program) VPPERM(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 81258      p := self.alloc("VPPERM", 4, Operands { v0, v1, v2, v3 })
 81259      // VPPERM xmm, xmm, xmm, xmm
 81260      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 81261          self.require(ISA_XOP)
 81262          p.domain = DomainAMDSpecific
 81263          p.add(0, func(m *_Encoding, v []interface{}) {
 81264              m.emit(0x8f)
 81265              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 81266              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 81267              m.emit(0xa3)
 81268              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 81269              m.emit(hlcode(v[0]) << 4)
 81270          })
 81271          p.add(0, func(m *_Encoding, v []interface{}) {
 81272              m.emit(0x8f)
 81273              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 81274              m.emit(0xf8 ^ (hlcode(v[2]) << 3))
 81275              m.emit(0xa3)
 81276              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 81277              m.emit(hlcode(v[1]) << 4)
 81278          })
 81279      }
 81280      // VPPERM m128, xmm, xmm, xmm
 81281      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 81282          self.require(ISA_XOP)
 81283          p.domain = DomainAMDSpecific
 81284          p.add(0, func(m *_Encoding, v []interface{}) {
 81285              m.vex3(0x8f, 0b1000, 0x80, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 81286              m.emit(0xa3)
 81287              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 81288              m.emit(hlcode(v[1]) << 4)
 81289          })
 81290      }
 81291      // VPPERM xmm, m128, xmm, xmm
 81292      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 81293          self.require(ISA_XOP)
 81294          p.domain = DomainAMDSpecific
 81295          p.add(0, func(m *_Encoding, v []interface{}) {
 81296              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 81297              m.emit(0xa3)
 81298              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 81299              m.emit(hlcode(v[0]) << 4)
 81300          })
 81301      }
 81302      if p.len == 0 {
 81303          panic("invalid operands for VPPERM")
 81304      }
 81305      return p
 81306  }
 81307  
 81308  // VPROLD performs "Rotate Packed Doubleword Left".
 81309  //
 81310  // Mnemonic        : VPROLD
 81311  // Supported forms : (6 forms)
 81312  //
 81313  //    * VPROLD imm8, m512/m32bcst, zmm{k}{z}    [AVX512F]
 81314  //    * VPROLD imm8, zmm, zmm{k}{z}             [AVX512F]
 81315  //    * VPROLD imm8, m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 81316  //    * VPROLD imm8, m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 81317  //    * VPROLD imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 81318  //    * VPROLD imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 81319  //
 81320  func (self *Program) VPROLD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 81321      p := self.alloc("VPROLD", 3, Operands { v0, v1, v2 })
 81322      // VPROLD imm8, m512/m32bcst, zmm{k}{z}
 81323      if isImm8(v0) && isM512M32bcst(v1) && isZMMkz(v2) {
 81324          self.require(ISA_AVX512F)
 81325          p.domain = DomainAVX
 81326          p.add(0, func(m *_Encoding, v []interface{}) {
 81327              m.evex(0b01, 0x05, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81328              m.emit(0x72)
 81329              m.mrsd(1, addr(v[1]), 64)
 81330              m.imm1(toImmAny(v[0]))
 81331          })
 81332      }
 81333      // VPROLD imm8, zmm, zmm{k}{z}
 81334      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 81335          self.require(ISA_AVX512F)
 81336          p.domain = DomainAVX
 81337          p.add(0, func(m *_Encoding, v []interface{}) {
 81338              m.emit(0x62)
 81339              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81340              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 81341              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 81342              m.emit(0x72)
 81343              m.emit(0xc8 | lcode(v[1]))
 81344              m.imm1(toImmAny(v[0]))
 81345          })
 81346      }
 81347      // VPROLD imm8, m128/m32bcst, xmm{k}{z}
 81348      if isImm8(v0) && isM128M32bcst(v1) && isXMMkz(v2) {
 81349          self.require(ISA_AVX512VL | ISA_AVX512F)
 81350          p.domain = DomainAVX
 81351          p.add(0, func(m *_Encoding, v []interface{}) {
 81352              m.evex(0b01, 0x05, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81353              m.emit(0x72)
 81354              m.mrsd(1, addr(v[1]), 16)
 81355              m.imm1(toImmAny(v[0]))
 81356          })
 81357      }
 81358      // VPROLD imm8, m256/m32bcst, ymm{k}{z}
 81359      if isImm8(v0) && isM256M32bcst(v1) && isYMMkz(v2) {
 81360          self.require(ISA_AVX512VL | ISA_AVX512F)
 81361          p.domain = DomainAVX
 81362          p.add(0, func(m *_Encoding, v []interface{}) {
 81363              m.evex(0b01, 0x05, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81364              m.emit(0x72)
 81365              m.mrsd(1, addr(v[1]), 32)
 81366              m.imm1(toImmAny(v[0]))
 81367          })
 81368      }
 81369      // VPROLD imm8, xmm, xmm{k}{z}
 81370      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81371          self.require(ISA_AVX512VL | ISA_AVX512F)
 81372          p.domain = DomainAVX
 81373          p.add(0, func(m *_Encoding, v []interface{}) {
 81374              m.emit(0x62)
 81375              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81376              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 81377              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 81378              m.emit(0x72)
 81379              m.emit(0xc8 | lcode(v[1]))
 81380              m.imm1(toImmAny(v[0]))
 81381          })
 81382      }
 81383      // VPROLD imm8, ymm, ymm{k}{z}
 81384      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81385          self.require(ISA_AVX512VL | ISA_AVX512F)
 81386          p.domain = DomainAVX
 81387          p.add(0, func(m *_Encoding, v []interface{}) {
 81388              m.emit(0x62)
 81389              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81390              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 81391              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 81392              m.emit(0x72)
 81393              m.emit(0xc8 | lcode(v[1]))
 81394              m.imm1(toImmAny(v[0]))
 81395          })
 81396      }
 81397      if p.len == 0 {
 81398          panic("invalid operands for VPROLD")
 81399      }
 81400      return p
 81401  }
 81402  
 81403  // VPROLQ performs "Rotate Packed Quadword Left".
 81404  //
 81405  // Mnemonic        : VPROLQ
 81406  // Supported forms : (6 forms)
 81407  //
 81408  //    * VPROLQ imm8, m512/m64bcst, zmm{k}{z}    [AVX512F]
 81409  //    * VPROLQ imm8, zmm, zmm{k}{z}             [AVX512F]
 81410  //    * VPROLQ imm8, m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 81411  //    * VPROLQ imm8, m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 81412  //    * VPROLQ imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 81413  //    * VPROLQ imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 81414  //
 81415  func (self *Program) VPROLQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 81416      p := self.alloc("VPROLQ", 3, Operands { v0, v1, v2 })
 81417      // VPROLQ imm8, m512/m64bcst, zmm{k}{z}
 81418      if isImm8(v0) && isM512M64bcst(v1) && isZMMkz(v2) {
 81419          self.require(ISA_AVX512F)
 81420          p.domain = DomainAVX
 81421          p.add(0, func(m *_Encoding, v []interface{}) {
 81422              m.evex(0b01, 0x85, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81423              m.emit(0x72)
 81424              m.mrsd(1, addr(v[1]), 64)
 81425              m.imm1(toImmAny(v[0]))
 81426          })
 81427      }
 81428      // VPROLQ imm8, zmm, zmm{k}{z}
 81429      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 81430          self.require(ISA_AVX512F)
 81431          p.domain = DomainAVX
 81432          p.add(0, func(m *_Encoding, v []interface{}) {
 81433              m.emit(0x62)
 81434              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81435              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 81436              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 81437              m.emit(0x72)
 81438              m.emit(0xc8 | lcode(v[1]))
 81439              m.imm1(toImmAny(v[0]))
 81440          })
 81441      }
 81442      // VPROLQ imm8, m128/m64bcst, xmm{k}{z}
 81443      if isImm8(v0) && isM128M64bcst(v1) && isXMMkz(v2) {
 81444          self.require(ISA_AVX512VL | ISA_AVX512F)
 81445          p.domain = DomainAVX
 81446          p.add(0, func(m *_Encoding, v []interface{}) {
 81447              m.evex(0b01, 0x85, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81448              m.emit(0x72)
 81449              m.mrsd(1, addr(v[1]), 16)
 81450              m.imm1(toImmAny(v[0]))
 81451          })
 81452      }
 81453      // VPROLQ imm8, m256/m64bcst, ymm{k}{z}
 81454      if isImm8(v0) && isM256M64bcst(v1) && isYMMkz(v2) {
 81455          self.require(ISA_AVX512VL | ISA_AVX512F)
 81456          p.domain = DomainAVX
 81457          p.add(0, func(m *_Encoding, v []interface{}) {
 81458              m.evex(0b01, 0x85, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81459              m.emit(0x72)
 81460              m.mrsd(1, addr(v[1]), 32)
 81461              m.imm1(toImmAny(v[0]))
 81462          })
 81463      }
 81464      // VPROLQ imm8, xmm, xmm{k}{z}
 81465      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81466          self.require(ISA_AVX512VL | ISA_AVX512F)
 81467          p.domain = DomainAVX
 81468          p.add(0, func(m *_Encoding, v []interface{}) {
 81469              m.emit(0x62)
 81470              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81471              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 81472              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 81473              m.emit(0x72)
 81474              m.emit(0xc8 | lcode(v[1]))
 81475              m.imm1(toImmAny(v[0]))
 81476          })
 81477      }
 81478      // VPROLQ imm8, ymm, ymm{k}{z}
 81479      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81480          self.require(ISA_AVX512VL | ISA_AVX512F)
 81481          p.domain = DomainAVX
 81482          p.add(0, func(m *_Encoding, v []interface{}) {
 81483              m.emit(0x62)
 81484              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81485              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 81486              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 81487              m.emit(0x72)
 81488              m.emit(0xc8 | lcode(v[1]))
 81489              m.imm1(toImmAny(v[0]))
 81490          })
 81491      }
 81492      if p.len == 0 {
 81493          panic("invalid operands for VPROLQ")
 81494      }
 81495      return p
 81496  }
 81497  
 81498  // VPROLVD performs "Variable Rotate Packed Doubleword Left".
 81499  //
 81500  // Mnemonic        : VPROLVD
 81501  // Supported forms : (6 forms)
 81502  //
 81503  //    * VPROLVD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 81504  //    * VPROLVD zmm, zmm, zmm{k}{z}             [AVX512F]
 81505  //    * VPROLVD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 81506  //    * VPROLVD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 81507  //    * VPROLVD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 81508  //    * VPROLVD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 81509  //
 81510  func (self *Program) VPROLVD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 81511      p := self.alloc("VPROLVD", 3, Operands { v0, v1, v2 })
 81512      // VPROLVD m512/m32bcst, zmm, zmm{k}{z}
 81513      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 81514          self.require(ISA_AVX512F)
 81515          p.domain = DomainAVX
 81516          p.add(0, func(m *_Encoding, v []interface{}) {
 81517              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81518              m.emit(0x15)
 81519              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 81520          })
 81521      }
 81522      // VPROLVD zmm, zmm, zmm{k}{z}
 81523      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 81524          self.require(ISA_AVX512F)
 81525          p.domain = DomainAVX
 81526          p.add(0, func(m *_Encoding, v []interface{}) {
 81527              m.emit(0x62)
 81528              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81529              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 81530              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 81531              m.emit(0x15)
 81532              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81533          })
 81534      }
 81535      // VPROLVD m128/m32bcst, xmm, xmm{k}{z}
 81536      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81537          self.require(ISA_AVX512VL | ISA_AVX512F)
 81538          p.domain = DomainAVX
 81539          p.add(0, func(m *_Encoding, v []interface{}) {
 81540              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81541              m.emit(0x15)
 81542              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 81543          })
 81544      }
 81545      // VPROLVD xmm, xmm, xmm{k}{z}
 81546      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81547          self.require(ISA_AVX512VL | ISA_AVX512F)
 81548          p.domain = DomainAVX
 81549          p.add(0, func(m *_Encoding, v []interface{}) {
 81550              m.emit(0x62)
 81551              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81552              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 81553              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 81554              m.emit(0x15)
 81555              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81556          })
 81557      }
 81558      // VPROLVD m256/m32bcst, ymm, ymm{k}{z}
 81559      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81560          self.require(ISA_AVX512VL | ISA_AVX512F)
 81561          p.domain = DomainAVX
 81562          p.add(0, func(m *_Encoding, v []interface{}) {
 81563              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81564              m.emit(0x15)
 81565              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 81566          })
 81567      }
 81568      // VPROLVD ymm, ymm, ymm{k}{z}
 81569      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81570          self.require(ISA_AVX512VL | ISA_AVX512F)
 81571          p.domain = DomainAVX
 81572          p.add(0, func(m *_Encoding, v []interface{}) {
 81573              m.emit(0x62)
 81574              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81575              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 81576              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 81577              m.emit(0x15)
 81578              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81579          })
 81580      }
 81581      if p.len == 0 {
 81582          panic("invalid operands for VPROLVD")
 81583      }
 81584      return p
 81585  }
 81586  
 81587  // VPROLVQ performs "Variable Rotate Packed Quadword Left".
 81588  //
 81589  // Mnemonic        : VPROLVQ
 81590  // Supported forms : (6 forms)
 81591  //
 81592  //    * VPROLVQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 81593  //    * VPROLVQ zmm, zmm, zmm{k}{z}             [AVX512F]
 81594  //    * VPROLVQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 81595  //    * VPROLVQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 81596  //    * VPROLVQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 81597  //    * VPROLVQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 81598  //
 81599  func (self *Program) VPROLVQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 81600      p := self.alloc("VPROLVQ", 3, Operands { v0, v1, v2 })
 81601      // VPROLVQ m512/m64bcst, zmm, zmm{k}{z}
 81602      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 81603          self.require(ISA_AVX512F)
 81604          p.domain = DomainAVX
 81605          p.add(0, func(m *_Encoding, v []interface{}) {
 81606              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81607              m.emit(0x15)
 81608              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 81609          })
 81610      }
 81611      // VPROLVQ zmm, zmm, zmm{k}{z}
 81612      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 81613          self.require(ISA_AVX512F)
 81614          p.domain = DomainAVX
 81615          p.add(0, func(m *_Encoding, v []interface{}) {
 81616              m.emit(0x62)
 81617              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81618              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 81619              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 81620              m.emit(0x15)
 81621              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81622          })
 81623      }
 81624      // VPROLVQ m128/m64bcst, xmm, xmm{k}{z}
 81625      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81626          self.require(ISA_AVX512VL | ISA_AVX512F)
 81627          p.domain = DomainAVX
 81628          p.add(0, func(m *_Encoding, v []interface{}) {
 81629              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81630              m.emit(0x15)
 81631              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 81632          })
 81633      }
 81634      // VPROLVQ xmm, xmm, xmm{k}{z}
 81635      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81636          self.require(ISA_AVX512VL | ISA_AVX512F)
 81637          p.domain = DomainAVX
 81638          p.add(0, func(m *_Encoding, v []interface{}) {
 81639              m.emit(0x62)
 81640              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81641              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 81642              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 81643              m.emit(0x15)
 81644              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81645          })
 81646      }
 81647      // VPROLVQ m256/m64bcst, ymm, ymm{k}{z}
 81648      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81649          self.require(ISA_AVX512VL | ISA_AVX512F)
 81650          p.domain = DomainAVX
 81651          p.add(0, func(m *_Encoding, v []interface{}) {
 81652              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81653              m.emit(0x15)
 81654              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 81655          })
 81656      }
 81657      // VPROLVQ ymm, ymm, ymm{k}{z}
 81658      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81659          self.require(ISA_AVX512VL | ISA_AVX512F)
 81660          p.domain = DomainAVX
 81661          p.add(0, func(m *_Encoding, v []interface{}) {
 81662              m.emit(0x62)
 81663              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81664              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 81665              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 81666              m.emit(0x15)
 81667              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81668          })
 81669      }
 81670      if p.len == 0 {
 81671          panic("invalid operands for VPROLVQ")
 81672      }
 81673      return p
 81674  }
 81675  
 81676  // VPRORD performs "Rotate Packed Doubleword Right".
 81677  //
 81678  // Mnemonic        : VPRORD
 81679  // Supported forms : (6 forms)
 81680  //
 81681  //    * VPRORD imm8, m512/m32bcst, zmm{k}{z}    [AVX512F]
 81682  //    * VPRORD imm8, zmm, zmm{k}{z}             [AVX512F]
 81683  //    * VPRORD imm8, m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 81684  //    * VPRORD imm8, m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 81685  //    * VPRORD imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 81686  //    * VPRORD imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 81687  //
 81688  func (self *Program) VPRORD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 81689      p := self.alloc("VPRORD", 3, Operands { v0, v1, v2 })
 81690      // VPRORD imm8, m512/m32bcst, zmm{k}{z}
 81691      if isImm8(v0) && isM512M32bcst(v1) && isZMMkz(v2) {
 81692          self.require(ISA_AVX512F)
 81693          p.domain = DomainAVX
 81694          p.add(0, func(m *_Encoding, v []interface{}) {
 81695              m.evex(0b01, 0x05, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81696              m.emit(0x72)
 81697              m.mrsd(0, addr(v[1]), 64)
 81698              m.imm1(toImmAny(v[0]))
 81699          })
 81700      }
 81701      // VPRORD imm8, zmm, zmm{k}{z}
 81702      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 81703          self.require(ISA_AVX512F)
 81704          p.domain = DomainAVX
 81705          p.add(0, func(m *_Encoding, v []interface{}) {
 81706              m.emit(0x62)
 81707              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81708              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 81709              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 81710              m.emit(0x72)
 81711              m.emit(0xc0 | lcode(v[1]))
 81712              m.imm1(toImmAny(v[0]))
 81713          })
 81714      }
 81715      // VPRORD imm8, m128/m32bcst, xmm{k}{z}
 81716      if isImm8(v0) && isM128M32bcst(v1) && isXMMkz(v2) {
 81717          self.require(ISA_AVX512VL | ISA_AVX512F)
 81718          p.domain = DomainAVX
 81719          p.add(0, func(m *_Encoding, v []interface{}) {
 81720              m.evex(0b01, 0x05, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81721              m.emit(0x72)
 81722              m.mrsd(0, addr(v[1]), 16)
 81723              m.imm1(toImmAny(v[0]))
 81724          })
 81725      }
 81726      // VPRORD imm8, m256/m32bcst, ymm{k}{z}
 81727      if isImm8(v0) && isM256M32bcst(v1) && isYMMkz(v2) {
 81728          self.require(ISA_AVX512VL | ISA_AVX512F)
 81729          p.domain = DomainAVX
 81730          p.add(0, func(m *_Encoding, v []interface{}) {
 81731              m.evex(0b01, 0x05, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81732              m.emit(0x72)
 81733              m.mrsd(0, addr(v[1]), 32)
 81734              m.imm1(toImmAny(v[0]))
 81735          })
 81736      }
 81737      // VPRORD imm8, xmm, xmm{k}{z}
 81738      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81739          self.require(ISA_AVX512VL | ISA_AVX512F)
 81740          p.domain = DomainAVX
 81741          p.add(0, func(m *_Encoding, v []interface{}) {
 81742              m.emit(0x62)
 81743              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81744              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 81745              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 81746              m.emit(0x72)
 81747              m.emit(0xc0 | lcode(v[1]))
 81748              m.imm1(toImmAny(v[0]))
 81749          })
 81750      }
 81751      // VPRORD imm8, ymm, ymm{k}{z}
 81752      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81753          self.require(ISA_AVX512VL | ISA_AVX512F)
 81754          p.domain = DomainAVX
 81755          p.add(0, func(m *_Encoding, v []interface{}) {
 81756              m.emit(0x62)
 81757              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81758              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 81759              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 81760              m.emit(0x72)
 81761              m.emit(0xc0 | lcode(v[1]))
 81762              m.imm1(toImmAny(v[0]))
 81763          })
 81764      }
 81765      if p.len == 0 {
 81766          panic("invalid operands for VPRORD")
 81767      }
 81768      return p
 81769  }
 81770  
 81771  // VPRORQ performs "Rotate Packed Quadword Right".
 81772  //
 81773  // Mnemonic        : VPRORQ
 81774  // Supported forms : (6 forms)
 81775  //
 81776  //    * VPRORQ imm8, m512/m64bcst, zmm{k}{z}    [AVX512F]
 81777  //    * VPRORQ imm8, zmm, zmm{k}{z}             [AVX512F]
 81778  //    * VPRORQ imm8, m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 81779  //    * VPRORQ imm8, m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 81780  //    * VPRORQ imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 81781  //    * VPRORQ imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 81782  //
 81783  func (self *Program) VPRORQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 81784      p := self.alloc("VPRORQ", 3, Operands { v0, v1, v2 })
 81785      // VPRORQ imm8, m512/m64bcst, zmm{k}{z}
 81786      if isImm8(v0) && isM512M64bcst(v1) && isZMMkz(v2) {
 81787          self.require(ISA_AVX512F)
 81788          p.domain = DomainAVX
 81789          p.add(0, func(m *_Encoding, v []interface{}) {
 81790              m.evex(0b01, 0x85, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81791              m.emit(0x72)
 81792              m.mrsd(0, addr(v[1]), 64)
 81793              m.imm1(toImmAny(v[0]))
 81794          })
 81795      }
 81796      // VPRORQ imm8, zmm, zmm{k}{z}
 81797      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 81798          self.require(ISA_AVX512F)
 81799          p.domain = DomainAVX
 81800          p.add(0, func(m *_Encoding, v []interface{}) {
 81801              m.emit(0x62)
 81802              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81803              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 81804              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 81805              m.emit(0x72)
 81806              m.emit(0xc0 | lcode(v[1]))
 81807              m.imm1(toImmAny(v[0]))
 81808          })
 81809      }
 81810      // VPRORQ imm8, m128/m64bcst, xmm{k}{z}
 81811      if isImm8(v0) && isM128M64bcst(v1) && isXMMkz(v2) {
 81812          self.require(ISA_AVX512VL | ISA_AVX512F)
 81813          p.domain = DomainAVX
 81814          p.add(0, func(m *_Encoding, v []interface{}) {
 81815              m.evex(0b01, 0x85, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81816              m.emit(0x72)
 81817              m.mrsd(0, addr(v[1]), 16)
 81818              m.imm1(toImmAny(v[0]))
 81819          })
 81820      }
 81821      // VPRORQ imm8, m256/m64bcst, ymm{k}{z}
 81822      if isImm8(v0) && isM256M64bcst(v1) && isYMMkz(v2) {
 81823          self.require(ISA_AVX512VL | ISA_AVX512F)
 81824          p.domain = DomainAVX
 81825          p.add(0, func(m *_Encoding, v []interface{}) {
 81826              m.evex(0b01, 0x85, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81827              m.emit(0x72)
 81828              m.mrsd(0, addr(v[1]), 32)
 81829              m.imm1(toImmAny(v[0]))
 81830          })
 81831      }
 81832      // VPRORQ imm8, xmm, xmm{k}{z}
 81833      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81834          self.require(ISA_AVX512VL | ISA_AVX512F)
 81835          p.domain = DomainAVX
 81836          p.add(0, func(m *_Encoding, v []interface{}) {
 81837              m.emit(0x62)
 81838              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81839              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 81840              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 81841              m.emit(0x72)
 81842              m.emit(0xc0 | lcode(v[1]))
 81843              m.imm1(toImmAny(v[0]))
 81844          })
 81845      }
 81846      // VPRORQ imm8, ymm, ymm{k}{z}
 81847      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81848          self.require(ISA_AVX512VL | ISA_AVX512F)
 81849          p.domain = DomainAVX
 81850          p.add(0, func(m *_Encoding, v []interface{}) {
 81851              m.emit(0x62)
 81852              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81853              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 81854              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 81855              m.emit(0x72)
 81856              m.emit(0xc0 | lcode(v[1]))
 81857              m.imm1(toImmAny(v[0]))
 81858          })
 81859      }
 81860      if p.len == 0 {
 81861          panic("invalid operands for VPRORQ")
 81862      }
 81863      return p
 81864  }
 81865  
 81866  // VPRORVD performs "Variable Rotate Packed Doubleword Right".
 81867  //
 81868  // Mnemonic        : VPRORVD
 81869  // Supported forms : (6 forms)
 81870  //
 81871  //    * VPRORVD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 81872  //    * VPRORVD zmm, zmm, zmm{k}{z}             [AVX512F]
 81873  //    * VPRORVD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 81874  //    * VPRORVD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 81875  //    * VPRORVD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 81876  //    * VPRORVD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 81877  //
 81878  func (self *Program) VPRORVD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 81879      p := self.alloc("VPRORVD", 3, Operands { v0, v1, v2 })
 81880      // VPRORVD m512/m32bcst, zmm, zmm{k}{z}
 81881      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 81882          self.require(ISA_AVX512F)
 81883          p.domain = DomainAVX
 81884          p.add(0, func(m *_Encoding, v []interface{}) {
 81885              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81886              m.emit(0x14)
 81887              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 81888          })
 81889      }
 81890      // VPRORVD zmm, zmm, zmm{k}{z}
 81891      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 81892          self.require(ISA_AVX512F)
 81893          p.domain = DomainAVX
 81894          p.add(0, func(m *_Encoding, v []interface{}) {
 81895              m.emit(0x62)
 81896              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81897              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 81898              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 81899              m.emit(0x14)
 81900              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81901          })
 81902      }
 81903      // VPRORVD m128/m32bcst, xmm, xmm{k}{z}
 81904      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81905          self.require(ISA_AVX512VL | ISA_AVX512F)
 81906          p.domain = DomainAVX
 81907          p.add(0, func(m *_Encoding, v []interface{}) {
 81908              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81909              m.emit(0x14)
 81910              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 81911          })
 81912      }
 81913      // VPRORVD xmm, xmm, xmm{k}{z}
 81914      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81915          self.require(ISA_AVX512VL | ISA_AVX512F)
 81916          p.domain = DomainAVX
 81917          p.add(0, func(m *_Encoding, v []interface{}) {
 81918              m.emit(0x62)
 81919              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81920              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 81921              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 81922              m.emit(0x14)
 81923              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81924          })
 81925      }
 81926      // VPRORVD m256/m32bcst, ymm, ymm{k}{z}
 81927      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81928          self.require(ISA_AVX512VL | ISA_AVX512F)
 81929          p.domain = DomainAVX
 81930          p.add(0, func(m *_Encoding, v []interface{}) {
 81931              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81932              m.emit(0x14)
 81933              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 81934          })
 81935      }
 81936      // VPRORVD ymm, ymm, ymm{k}{z}
 81937      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81938          self.require(ISA_AVX512VL | ISA_AVX512F)
 81939          p.domain = DomainAVX
 81940          p.add(0, func(m *_Encoding, v []interface{}) {
 81941              m.emit(0x62)
 81942              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81943              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 81944              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 81945              m.emit(0x14)
 81946              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81947          })
 81948      }
 81949      if p.len == 0 {
 81950          panic("invalid operands for VPRORVD")
 81951      }
 81952      return p
 81953  }
 81954  
 81955  // VPRORVQ performs "Variable Rotate Packed Quadword Right".
 81956  //
 81957  // Mnemonic        : VPRORVQ
 81958  // Supported forms : (6 forms)
 81959  //
 81960  //    * VPRORVQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 81961  //    * VPRORVQ zmm, zmm, zmm{k}{z}             [AVX512F]
 81962  //    * VPRORVQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 81963  //    * VPRORVQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 81964  //    * VPRORVQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 81965  //    * VPRORVQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 81966  //
 81967  func (self *Program) VPRORVQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 81968      p := self.alloc("VPRORVQ", 3, Operands { v0, v1, v2 })
 81969      // VPRORVQ m512/m64bcst, zmm, zmm{k}{z}
 81970      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 81971          self.require(ISA_AVX512F)
 81972          p.domain = DomainAVX
 81973          p.add(0, func(m *_Encoding, v []interface{}) {
 81974              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81975              m.emit(0x14)
 81976              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 81977          })
 81978      }
 81979      // VPRORVQ zmm, zmm, zmm{k}{z}
 81980      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 81981          self.require(ISA_AVX512F)
 81982          p.domain = DomainAVX
 81983          p.add(0, func(m *_Encoding, v []interface{}) {
 81984              m.emit(0x62)
 81985              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81986              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 81987              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 81988              m.emit(0x14)
 81989              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81990          })
 81991      }
 81992      // VPRORVQ m128/m64bcst, xmm, xmm{k}{z}
 81993      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81994          self.require(ISA_AVX512VL | ISA_AVX512F)
 81995          p.domain = DomainAVX
 81996          p.add(0, func(m *_Encoding, v []interface{}) {
 81997              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81998              m.emit(0x14)
 81999              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 82000          })
 82001      }
 82002      // VPRORVQ xmm, xmm, xmm{k}{z}
 82003      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 82004          self.require(ISA_AVX512VL | ISA_AVX512F)
 82005          p.domain = DomainAVX
 82006          p.add(0, func(m *_Encoding, v []interface{}) {
 82007              m.emit(0x62)
 82008              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 82009              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 82010              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 82011              m.emit(0x14)
 82012              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82013          })
 82014      }
 82015      // VPRORVQ m256/m64bcst, ymm, ymm{k}{z}
 82016      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 82017          self.require(ISA_AVX512VL | ISA_AVX512F)
 82018          p.domain = DomainAVX
 82019          p.add(0, func(m *_Encoding, v []interface{}) {
 82020              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 82021              m.emit(0x14)
 82022              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 82023          })
 82024      }
 82025      // VPRORVQ ymm, ymm, ymm{k}{z}
 82026      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 82027          self.require(ISA_AVX512VL | ISA_AVX512F)
 82028          p.domain = DomainAVX
 82029          p.add(0, func(m *_Encoding, v []interface{}) {
 82030              m.emit(0x62)
 82031              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 82032              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 82033              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 82034              m.emit(0x14)
 82035              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82036          })
 82037      }
 82038      if p.len == 0 {
 82039          panic("invalid operands for VPRORVQ")
 82040      }
 82041      return p
 82042  }
 82043  
 82044  // VPROTB performs "Packed Rotate Bytes".
 82045  //
 82046  // Mnemonic        : VPROTB
 82047  // Supported forms : (5 forms)
 82048  //
 82049  //    * VPROTB imm8, xmm, xmm     [XOP]
 82050  //    * VPROTB xmm, xmm, xmm      [XOP]
 82051  //    * VPROTB m128, xmm, xmm     [XOP]
 82052  //    * VPROTB imm8, m128, xmm    [XOP]
 82053  //    * VPROTB xmm, m128, xmm     [XOP]
 82054  //
 82055  func (self *Program) VPROTB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 82056      p := self.alloc("VPROTB", 3, Operands { v0, v1, v2 })
 82057      // VPROTB imm8, xmm, xmm
 82058      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 82059          self.require(ISA_XOP)
 82060          p.domain = DomainAMDSpecific
 82061          p.add(0, func(m *_Encoding, v []interface{}) {
 82062              m.emit(0x8f)
 82063              m.emit(0xe8 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82064              m.emit(0x78)
 82065              m.emit(0xc0)
 82066              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82067              m.imm1(toImmAny(v[0]))
 82068          })
 82069      }
 82070      // VPROTB xmm, xmm, xmm
 82071      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 82072          self.require(ISA_XOP)
 82073          p.domain = DomainAMDSpecific
 82074          p.add(0, func(m *_Encoding, v []interface{}) {
 82075              m.emit(0x8f)
 82076              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82077              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 82078              m.emit(0x90)
 82079              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82080          })
 82081          p.add(0, func(m *_Encoding, v []interface{}) {
 82082              m.emit(0x8f)
 82083              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 82084              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 82085              m.emit(0x90)
 82086              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82087          })
 82088      }
 82089      // VPROTB m128, xmm, xmm
 82090      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 82091          self.require(ISA_XOP)
 82092          p.domain = DomainAMDSpecific
 82093          p.add(0, func(m *_Encoding, v []interface{}) {
 82094              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 82095              m.emit(0x90)
 82096              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 82097          })
 82098      }
 82099      // VPROTB imm8, m128, xmm
 82100      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 82101          self.require(ISA_XOP)
 82102          p.domain = DomainAMDSpecific
 82103          p.add(0, func(m *_Encoding, v []interface{}) {
 82104              m.vex3(0x8f, 0b1000, 0x00, hcode(v[2]), addr(v[1]), 0)
 82105              m.emit(0xc0)
 82106              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82107              m.imm1(toImmAny(v[0]))
 82108          })
 82109      }
 82110      // VPROTB xmm, m128, xmm
 82111      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 82112          self.require(ISA_XOP)
 82113          p.domain = DomainAMDSpecific
 82114          p.add(0, func(m *_Encoding, v []interface{}) {
 82115              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 82116              m.emit(0x90)
 82117              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82118          })
 82119      }
 82120      if p.len == 0 {
 82121          panic("invalid operands for VPROTB")
 82122      }
 82123      return p
 82124  }
 82125  
 82126  // VPROTD performs "Packed Rotate Doublewords".
 82127  //
 82128  // Mnemonic        : VPROTD
 82129  // Supported forms : (5 forms)
 82130  //
 82131  //    * VPROTD imm8, xmm, xmm     [XOP]
 82132  //    * VPROTD xmm, xmm, xmm      [XOP]
 82133  //    * VPROTD m128, xmm, xmm     [XOP]
 82134  //    * VPROTD imm8, m128, xmm    [XOP]
 82135  //    * VPROTD xmm, m128, xmm     [XOP]
 82136  //
 82137  func (self *Program) VPROTD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 82138      p := self.alloc("VPROTD", 3, Operands { v0, v1, v2 })
 82139      // VPROTD imm8, xmm, xmm
 82140      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 82141          self.require(ISA_XOP)
 82142          p.domain = DomainAMDSpecific
 82143          p.add(0, func(m *_Encoding, v []interface{}) {
 82144              m.emit(0x8f)
 82145              m.emit(0xe8 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82146              m.emit(0x78)
 82147              m.emit(0xc2)
 82148              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82149              m.imm1(toImmAny(v[0]))
 82150          })
 82151      }
 82152      // VPROTD xmm, xmm, xmm
 82153      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 82154          self.require(ISA_XOP)
 82155          p.domain = DomainAMDSpecific
 82156          p.add(0, func(m *_Encoding, v []interface{}) {
 82157              m.emit(0x8f)
 82158              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82159              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 82160              m.emit(0x92)
 82161              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82162          })
 82163          p.add(0, func(m *_Encoding, v []interface{}) {
 82164              m.emit(0x8f)
 82165              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 82166              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 82167              m.emit(0x92)
 82168              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82169          })
 82170      }
 82171      // VPROTD m128, xmm, xmm
 82172      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 82173          self.require(ISA_XOP)
 82174          p.domain = DomainAMDSpecific
 82175          p.add(0, func(m *_Encoding, v []interface{}) {
 82176              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 82177              m.emit(0x92)
 82178              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 82179          })
 82180      }
 82181      // VPROTD imm8, m128, xmm
 82182      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 82183          self.require(ISA_XOP)
 82184          p.domain = DomainAMDSpecific
 82185          p.add(0, func(m *_Encoding, v []interface{}) {
 82186              m.vex3(0x8f, 0b1000, 0x00, hcode(v[2]), addr(v[1]), 0)
 82187              m.emit(0xc2)
 82188              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82189              m.imm1(toImmAny(v[0]))
 82190          })
 82191      }
 82192      // VPROTD xmm, m128, xmm
 82193      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 82194          self.require(ISA_XOP)
 82195          p.domain = DomainAMDSpecific
 82196          p.add(0, func(m *_Encoding, v []interface{}) {
 82197              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 82198              m.emit(0x92)
 82199              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82200          })
 82201      }
 82202      if p.len == 0 {
 82203          panic("invalid operands for VPROTD")
 82204      }
 82205      return p
 82206  }
 82207  
 82208  // VPROTQ performs "Packed Rotate Quadwords".
 82209  //
 82210  // Mnemonic        : VPROTQ
 82211  // Supported forms : (5 forms)
 82212  //
 82213  //    * VPROTQ imm8, xmm, xmm     [XOP]
 82214  //    * VPROTQ xmm, xmm, xmm      [XOP]
 82215  //    * VPROTQ m128, xmm, xmm     [XOP]
 82216  //    * VPROTQ imm8, m128, xmm    [XOP]
 82217  //    * VPROTQ xmm, m128, xmm     [XOP]
 82218  //
 82219  func (self *Program) VPROTQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 82220      p := self.alloc("VPROTQ", 3, Operands { v0, v1, v2 })
 82221      // VPROTQ imm8, xmm, xmm
 82222      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 82223          self.require(ISA_XOP)
 82224          p.domain = DomainAMDSpecific
 82225          p.add(0, func(m *_Encoding, v []interface{}) {
 82226              m.emit(0x8f)
 82227              m.emit(0xe8 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82228              m.emit(0x78)
 82229              m.emit(0xc3)
 82230              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82231              m.imm1(toImmAny(v[0]))
 82232          })
 82233      }
 82234      // VPROTQ xmm, xmm, xmm
 82235      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 82236          self.require(ISA_XOP)
 82237          p.domain = DomainAMDSpecific
 82238          p.add(0, func(m *_Encoding, v []interface{}) {
 82239              m.emit(0x8f)
 82240              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82241              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 82242              m.emit(0x93)
 82243              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82244          })
 82245          p.add(0, func(m *_Encoding, v []interface{}) {
 82246              m.emit(0x8f)
 82247              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 82248              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 82249              m.emit(0x93)
 82250              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82251          })
 82252      }
 82253      // VPROTQ m128, xmm, xmm
 82254      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 82255          self.require(ISA_XOP)
 82256          p.domain = DomainAMDSpecific
 82257          p.add(0, func(m *_Encoding, v []interface{}) {
 82258              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 82259              m.emit(0x93)
 82260              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 82261          })
 82262      }
 82263      // VPROTQ imm8, m128, xmm
 82264      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 82265          self.require(ISA_XOP)
 82266          p.domain = DomainAMDSpecific
 82267          p.add(0, func(m *_Encoding, v []interface{}) {
 82268              m.vex3(0x8f, 0b1000, 0x00, hcode(v[2]), addr(v[1]), 0)
 82269              m.emit(0xc3)
 82270              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82271              m.imm1(toImmAny(v[0]))
 82272          })
 82273      }
 82274      // VPROTQ xmm, m128, xmm
 82275      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 82276          self.require(ISA_XOP)
 82277          p.domain = DomainAMDSpecific
 82278          p.add(0, func(m *_Encoding, v []interface{}) {
 82279              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 82280              m.emit(0x93)
 82281              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82282          })
 82283      }
 82284      if p.len == 0 {
 82285          panic("invalid operands for VPROTQ")
 82286      }
 82287      return p
 82288  }
 82289  
 82290  // VPROTW performs "Packed Rotate Words".
 82291  //
 82292  // Mnemonic        : VPROTW
 82293  // Supported forms : (5 forms)
 82294  //
 82295  //    * VPROTW imm8, xmm, xmm     [XOP]
 82296  //    * VPROTW xmm, xmm, xmm      [XOP]
 82297  //    * VPROTW m128, xmm, xmm     [XOP]
 82298  //    * VPROTW imm8, m128, xmm    [XOP]
 82299  //    * VPROTW xmm, m128, xmm     [XOP]
 82300  //
 82301  func (self *Program) VPROTW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 82302      p := self.alloc("VPROTW", 3, Operands { v0, v1, v2 })
 82303      // VPROTW imm8, xmm, xmm
 82304      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 82305          self.require(ISA_XOP)
 82306          p.domain = DomainAMDSpecific
 82307          p.add(0, func(m *_Encoding, v []interface{}) {
 82308              m.emit(0x8f)
 82309              m.emit(0xe8 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82310              m.emit(0x78)
 82311              m.emit(0xc1)
 82312              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82313              m.imm1(toImmAny(v[0]))
 82314          })
 82315      }
 82316      // VPROTW xmm, xmm, xmm
 82317      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 82318          self.require(ISA_XOP)
 82319          p.domain = DomainAMDSpecific
 82320          p.add(0, func(m *_Encoding, v []interface{}) {
 82321              m.emit(0x8f)
 82322              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82323              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 82324              m.emit(0x91)
 82325              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82326          })
 82327          p.add(0, func(m *_Encoding, v []interface{}) {
 82328              m.emit(0x8f)
 82329              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 82330              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 82331              m.emit(0x91)
 82332              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82333          })
 82334      }
 82335      // VPROTW m128, xmm, xmm
 82336      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 82337          self.require(ISA_XOP)
 82338          p.domain = DomainAMDSpecific
 82339          p.add(0, func(m *_Encoding, v []interface{}) {
 82340              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 82341              m.emit(0x91)
 82342              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 82343          })
 82344      }
 82345      // VPROTW imm8, m128, xmm
 82346      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 82347          self.require(ISA_XOP)
 82348          p.domain = DomainAMDSpecific
 82349          p.add(0, func(m *_Encoding, v []interface{}) {
 82350              m.vex3(0x8f, 0b1000, 0x00, hcode(v[2]), addr(v[1]), 0)
 82351              m.emit(0xc1)
 82352              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82353              m.imm1(toImmAny(v[0]))
 82354          })
 82355      }
 82356      // VPROTW xmm, m128, xmm
 82357      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 82358          self.require(ISA_XOP)
 82359          p.domain = DomainAMDSpecific
 82360          p.add(0, func(m *_Encoding, v []interface{}) {
 82361              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 82362              m.emit(0x91)
 82363              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82364          })
 82365      }
 82366      if p.len == 0 {
 82367          panic("invalid operands for VPROTW")
 82368      }
 82369      return p
 82370  }
 82371  
 82372  // VPSADBW performs "Compute Sum of Absolute Differences".
 82373  //
 82374  // Mnemonic        : VPSADBW
 82375  // Supported forms : (10 forms)
 82376  //
 82377  //    * VPSADBW xmm, xmm, xmm     [AVX]
 82378  //    * VPSADBW m128, xmm, xmm    [AVX]
 82379  //    * VPSADBW ymm, ymm, ymm     [AVX2]
 82380  //    * VPSADBW m256, ymm, ymm    [AVX2]
 82381  //    * VPSADBW zmm, zmm, zmm     [AVX512BW]
 82382  //    * VPSADBW m512, zmm, zmm    [AVX512BW]
 82383  //    * VPSADBW xmm, xmm, xmm     [AVX512BW,AVX512VL]
 82384  //    * VPSADBW m128, xmm, xmm    [AVX512BW,AVX512VL]
 82385  //    * VPSADBW ymm, ymm, ymm     [AVX512BW,AVX512VL]
 82386  //    * VPSADBW m256, ymm, ymm    [AVX512BW,AVX512VL]
 82387  //
 82388  func (self *Program) VPSADBW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 82389      p := self.alloc("VPSADBW", 3, Operands { v0, v1, v2 })
 82390      // VPSADBW xmm, xmm, xmm
 82391      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 82392          self.require(ISA_AVX)
 82393          p.domain = DomainAVX
 82394          p.add(0, func(m *_Encoding, v []interface{}) {
 82395              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 82396              m.emit(0xf6)
 82397              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82398          })
 82399      }
 82400      // VPSADBW m128, xmm, xmm
 82401      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 82402          self.require(ISA_AVX)
 82403          p.domain = DomainAVX
 82404          p.add(0, func(m *_Encoding, v []interface{}) {
 82405              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 82406              m.emit(0xf6)
 82407              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 82408          })
 82409      }
 82410      // VPSADBW ymm, ymm, ymm
 82411      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 82412          self.require(ISA_AVX2)
 82413          p.domain = DomainAVX
 82414          p.add(0, func(m *_Encoding, v []interface{}) {
 82415              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 82416              m.emit(0xf6)
 82417              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82418          })
 82419      }
 82420      // VPSADBW m256, ymm, ymm
 82421      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 82422          self.require(ISA_AVX2)
 82423          p.domain = DomainAVX
 82424          p.add(0, func(m *_Encoding, v []interface{}) {
 82425              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 82426              m.emit(0xf6)
 82427              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 82428          })
 82429      }
 82430      // VPSADBW zmm, zmm, zmm
 82431      if isZMM(v0) && isZMM(v1) && isZMM(v2) {
 82432          self.require(ISA_AVX512BW)
 82433          p.domain = DomainAVX
 82434          p.add(0, func(m *_Encoding, v []interface{}) {
 82435              m.emit(0x62)
 82436              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 82437              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 82438              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x40)
 82439              m.emit(0xf6)
 82440              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82441          })
 82442      }
 82443      // VPSADBW m512, zmm, zmm
 82444      if isM512(v0) && isZMM(v1) && isZMM(v2) {
 82445          self.require(ISA_AVX512BW)
 82446          p.domain = DomainAVX
 82447          p.add(0, func(m *_Encoding, v []interface{}) {
 82448              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 82449              m.emit(0xf6)
 82450              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 82451          })
 82452      }
 82453      // VPSADBW xmm, xmm, xmm
 82454      if isEVEXXMM(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 82455          self.require(ISA_AVX512VL | ISA_AVX512BW)
 82456          p.domain = DomainAVX
 82457          p.add(0, func(m *_Encoding, v []interface{}) {
 82458              m.emit(0x62)
 82459              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 82460              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 82461              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x00)
 82462              m.emit(0xf6)
 82463              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82464          })
 82465      }
 82466      // VPSADBW m128, xmm, xmm
 82467      if isM128(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 82468          self.require(ISA_AVX512VL | ISA_AVX512BW)
 82469          p.domain = DomainAVX
 82470          p.add(0, func(m *_Encoding, v []interface{}) {
 82471              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 82472              m.emit(0xf6)
 82473              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 82474          })
 82475      }
 82476      // VPSADBW ymm, ymm, ymm
 82477      if isEVEXYMM(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) {
 82478          self.require(ISA_AVX512VL | ISA_AVX512BW)
 82479          p.domain = DomainAVX
 82480          p.add(0, func(m *_Encoding, v []interface{}) {
 82481              m.emit(0x62)
 82482              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 82483              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 82484              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x20)
 82485              m.emit(0xf6)
 82486              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82487          })
 82488      }
 82489      // VPSADBW m256, ymm, ymm
 82490      if isM256(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) {
 82491          self.require(ISA_AVX512VL | ISA_AVX512BW)
 82492          p.domain = DomainAVX
 82493          p.add(0, func(m *_Encoding, v []interface{}) {
 82494              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 82495              m.emit(0xf6)
 82496              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 82497          })
 82498      }
 82499      if p.len == 0 {
 82500          panic("invalid operands for VPSADBW")
 82501      }
 82502      return p
 82503  }
 82504  
 82505  // VPSCATTERDD performs "Scatter Packed Doubleword Values with Signed Doubleword Indices".
 82506  //
 82507  // Mnemonic        : VPSCATTERDD
 82508  // Supported forms : (3 forms)
 82509  //
 82510  //    * VPSCATTERDD zmm, vm32z{k}    [AVX512F]
 82511  //    * VPSCATTERDD xmm, vm32x{k}    [AVX512F,AVX512VL]
 82512  //    * VPSCATTERDD ymm, vm32y{k}    [AVX512F,AVX512VL]
 82513  //
 82514  func (self *Program) VPSCATTERDD(v0 interface{}, v1 interface{}) *Instruction {
 82515      p := self.alloc("VPSCATTERDD", 2, Operands { v0, v1 })
 82516      // VPSCATTERDD zmm, vm32z{k}
 82517      if isZMM(v0) && isVMZk(v1) {
 82518          self.require(ISA_AVX512F)
 82519          p.domain = DomainAVX
 82520          p.add(0, func(m *_Encoding, v []interface{}) {
 82521              m.evex(0b10, 0x05, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82522              m.emit(0xa0)
 82523              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 82524          })
 82525      }
 82526      // VPSCATTERDD xmm, vm32x{k}
 82527      if isEVEXXMM(v0) && isVMXk(v1) {
 82528          self.require(ISA_AVX512VL | ISA_AVX512F)
 82529          p.domain = DomainAVX
 82530          p.add(0, func(m *_Encoding, v []interface{}) {
 82531              m.evex(0b10, 0x05, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82532              m.emit(0xa0)
 82533              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 82534          })
 82535      }
 82536      // VPSCATTERDD ymm, vm32y{k}
 82537      if isEVEXYMM(v0) && isVMYk(v1) {
 82538          self.require(ISA_AVX512VL | ISA_AVX512F)
 82539          p.domain = DomainAVX
 82540          p.add(0, func(m *_Encoding, v []interface{}) {
 82541              m.evex(0b10, 0x05, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82542              m.emit(0xa0)
 82543              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 82544          })
 82545      }
 82546      if p.len == 0 {
 82547          panic("invalid operands for VPSCATTERDD")
 82548      }
 82549      return p
 82550  }
 82551  
 82552  // VPSCATTERDQ performs "Scatter Packed Quadword Values with Signed Doubleword Indices".
 82553  //
 82554  // Mnemonic        : VPSCATTERDQ
 82555  // Supported forms : (3 forms)
 82556  //
 82557  //    * VPSCATTERDQ zmm, vm32y{k}    [AVX512F]
 82558  //    * VPSCATTERDQ xmm, vm32x{k}    [AVX512F,AVX512VL]
 82559  //    * VPSCATTERDQ ymm, vm32x{k}    [AVX512F,AVX512VL]
 82560  //
 82561  func (self *Program) VPSCATTERDQ(v0 interface{}, v1 interface{}) *Instruction {
 82562      p := self.alloc("VPSCATTERDQ", 2, Operands { v0, v1 })
 82563      // VPSCATTERDQ zmm, vm32y{k}
 82564      if isZMM(v0) && isVMYk(v1) {
 82565          self.require(ISA_AVX512F)
 82566          p.domain = DomainAVX
 82567          p.add(0, func(m *_Encoding, v []interface{}) {
 82568              m.evex(0b10, 0x85, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82569              m.emit(0xa0)
 82570              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 82571          })
 82572      }
 82573      // VPSCATTERDQ xmm, vm32x{k}
 82574      if isEVEXXMM(v0) && isVMXk(v1) {
 82575          self.require(ISA_AVX512VL | ISA_AVX512F)
 82576          p.domain = DomainAVX
 82577          p.add(0, func(m *_Encoding, v []interface{}) {
 82578              m.evex(0b10, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82579              m.emit(0xa0)
 82580              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 82581          })
 82582      }
 82583      // VPSCATTERDQ ymm, vm32x{k}
 82584      if isEVEXYMM(v0) && isVMXk(v1) {
 82585          self.require(ISA_AVX512VL | ISA_AVX512F)
 82586          p.domain = DomainAVX
 82587          p.add(0, func(m *_Encoding, v []interface{}) {
 82588              m.evex(0b10, 0x85, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82589              m.emit(0xa0)
 82590              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 82591          })
 82592      }
 82593      if p.len == 0 {
 82594          panic("invalid operands for VPSCATTERDQ")
 82595      }
 82596      return p
 82597  }
 82598  
 82599  // VPSCATTERQD performs "Scatter Packed Doubleword Values with Signed Quadword Indices".
 82600  //
 82601  // Mnemonic        : VPSCATTERQD
 82602  // Supported forms : (3 forms)
 82603  //
 82604  //    * VPSCATTERQD ymm, vm64z{k}    [AVX512F]
 82605  //    * VPSCATTERQD xmm, vm64x{k}    [AVX512F,AVX512VL]
 82606  //    * VPSCATTERQD xmm, vm64y{k}    [AVX512F,AVX512VL]
 82607  //
 82608  func (self *Program) VPSCATTERQD(v0 interface{}, v1 interface{}) *Instruction {
 82609      p := self.alloc("VPSCATTERQD", 2, Operands { v0, v1 })
 82610      // VPSCATTERQD ymm, vm64z{k}
 82611      if isEVEXYMM(v0) && isVMZk(v1) {
 82612          self.require(ISA_AVX512F)
 82613          p.domain = DomainAVX
 82614          p.add(0, func(m *_Encoding, v []interface{}) {
 82615              m.evex(0b10, 0x05, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82616              m.emit(0xa1)
 82617              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 82618          })
 82619      }
 82620      // VPSCATTERQD xmm, vm64x{k}
 82621      if isEVEXXMM(v0) && isVMXk(v1) {
 82622          self.require(ISA_AVX512VL | ISA_AVX512F)
 82623          p.domain = DomainAVX
 82624          p.add(0, func(m *_Encoding, v []interface{}) {
 82625              m.evex(0b10, 0x05, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82626              m.emit(0xa1)
 82627              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 82628          })
 82629      }
 82630      // VPSCATTERQD xmm, vm64y{k}
 82631      if isEVEXXMM(v0) && isVMYk(v1) {
 82632          self.require(ISA_AVX512VL | ISA_AVX512F)
 82633          p.domain = DomainAVX
 82634          p.add(0, func(m *_Encoding, v []interface{}) {
 82635              m.evex(0b10, 0x05, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82636              m.emit(0xa1)
 82637              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 82638          })
 82639      }
 82640      if p.len == 0 {
 82641          panic("invalid operands for VPSCATTERQD")
 82642      }
 82643      return p
 82644  }
 82645  
 82646  // VPSCATTERQQ performs "Scatter Packed Quadword Values with Signed Quadword Indices".
 82647  //
 82648  // Mnemonic        : VPSCATTERQQ
 82649  // Supported forms : (3 forms)
 82650  //
 82651  //    * VPSCATTERQQ zmm, vm64z{k}    [AVX512F]
 82652  //    * VPSCATTERQQ xmm, vm64x{k}    [AVX512F,AVX512VL]
 82653  //    * VPSCATTERQQ ymm, vm64y{k}    [AVX512F,AVX512VL]
 82654  //
 82655  func (self *Program) VPSCATTERQQ(v0 interface{}, v1 interface{}) *Instruction {
 82656      p := self.alloc("VPSCATTERQQ", 2, Operands { v0, v1 })
 82657      // VPSCATTERQQ zmm, vm64z{k}
 82658      if isZMM(v0) && isVMZk(v1) {
 82659          self.require(ISA_AVX512F)
 82660          p.domain = DomainAVX
 82661          p.add(0, func(m *_Encoding, v []interface{}) {
 82662              m.evex(0b10, 0x85, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82663              m.emit(0xa1)
 82664              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 82665          })
 82666      }
 82667      // VPSCATTERQQ xmm, vm64x{k}
 82668      if isEVEXXMM(v0) && isVMXk(v1) {
 82669          self.require(ISA_AVX512VL | ISA_AVX512F)
 82670          p.domain = DomainAVX
 82671          p.add(0, func(m *_Encoding, v []interface{}) {
 82672              m.evex(0b10, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82673              m.emit(0xa1)
 82674              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 82675          })
 82676      }
 82677      // VPSCATTERQQ ymm, vm64y{k}
 82678      if isEVEXYMM(v0) && isVMYk(v1) {
 82679          self.require(ISA_AVX512VL | ISA_AVX512F)
 82680          p.domain = DomainAVX
 82681          p.add(0, func(m *_Encoding, v []interface{}) {
 82682              m.evex(0b10, 0x85, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82683              m.emit(0xa1)
 82684              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 82685          })
 82686      }
 82687      if p.len == 0 {
 82688          panic("invalid operands for VPSCATTERQQ")
 82689      }
 82690      return p
 82691  }
 82692  
 82693  // VPSHAB performs "Packed Shift Arithmetic Bytes".
 82694  //
 82695  // Mnemonic        : VPSHAB
 82696  // Supported forms : (3 forms)
 82697  //
 82698  //    * VPSHAB xmm, xmm, xmm     [XOP]
 82699  //    * VPSHAB m128, xmm, xmm    [XOP]
 82700  //    * VPSHAB xmm, m128, xmm    [XOP]
 82701  //
 82702  func (self *Program) VPSHAB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 82703      p := self.alloc("VPSHAB", 3, Operands { v0, v1, v2 })
 82704      // VPSHAB xmm, xmm, xmm
 82705      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 82706          self.require(ISA_XOP)
 82707          p.domain = DomainAMDSpecific
 82708          p.add(0, func(m *_Encoding, v []interface{}) {
 82709              m.emit(0x8f)
 82710              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82711              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 82712              m.emit(0x98)
 82713              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82714          })
 82715          p.add(0, func(m *_Encoding, v []interface{}) {
 82716              m.emit(0x8f)
 82717              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 82718              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 82719              m.emit(0x98)
 82720              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82721          })
 82722      }
 82723      // VPSHAB m128, xmm, xmm
 82724      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 82725          self.require(ISA_XOP)
 82726          p.domain = DomainAMDSpecific
 82727          p.add(0, func(m *_Encoding, v []interface{}) {
 82728              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 82729              m.emit(0x98)
 82730              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 82731          })
 82732      }
 82733      // VPSHAB xmm, m128, xmm
 82734      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 82735          self.require(ISA_XOP)
 82736          p.domain = DomainAMDSpecific
 82737          p.add(0, func(m *_Encoding, v []interface{}) {
 82738              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 82739              m.emit(0x98)
 82740              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82741          })
 82742      }
 82743      if p.len == 0 {
 82744          panic("invalid operands for VPSHAB")
 82745      }
 82746      return p
 82747  }
 82748  
 82749  // VPSHAD performs "Packed Shift Arithmetic Doublewords".
 82750  //
 82751  // Mnemonic        : VPSHAD
 82752  // Supported forms : (3 forms)
 82753  //
 82754  //    * VPSHAD xmm, xmm, xmm     [XOP]
 82755  //    * VPSHAD m128, xmm, xmm    [XOP]
 82756  //    * VPSHAD xmm, m128, xmm    [XOP]
 82757  //
 82758  func (self *Program) VPSHAD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 82759      p := self.alloc("VPSHAD", 3, Operands { v0, v1, v2 })
 82760      // VPSHAD xmm, xmm, xmm
 82761      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 82762          self.require(ISA_XOP)
 82763          p.domain = DomainAMDSpecific
 82764          p.add(0, func(m *_Encoding, v []interface{}) {
 82765              m.emit(0x8f)
 82766              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82767              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 82768              m.emit(0x9a)
 82769              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82770          })
 82771          p.add(0, func(m *_Encoding, v []interface{}) {
 82772              m.emit(0x8f)
 82773              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 82774              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 82775              m.emit(0x9a)
 82776              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82777          })
 82778      }
 82779      // VPSHAD m128, xmm, xmm
 82780      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 82781          self.require(ISA_XOP)
 82782          p.domain = DomainAMDSpecific
 82783          p.add(0, func(m *_Encoding, v []interface{}) {
 82784              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 82785              m.emit(0x9a)
 82786              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 82787          })
 82788      }
 82789      // VPSHAD xmm, m128, xmm
 82790      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 82791          self.require(ISA_XOP)
 82792          p.domain = DomainAMDSpecific
 82793          p.add(0, func(m *_Encoding, v []interface{}) {
 82794              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 82795              m.emit(0x9a)
 82796              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82797          })
 82798      }
 82799      if p.len == 0 {
 82800          panic("invalid operands for VPSHAD")
 82801      }
 82802      return p
 82803  }
 82804  
 82805  // VPSHAQ performs "Packed Shift Arithmetic Quadwords".
 82806  //
 82807  // Mnemonic        : VPSHAQ
 82808  // Supported forms : (3 forms)
 82809  //
 82810  //    * VPSHAQ xmm, xmm, xmm     [XOP]
 82811  //    * VPSHAQ m128, xmm, xmm    [XOP]
 82812  //    * VPSHAQ xmm, m128, xmm    [XOP]
 82813  //
 82814  func (self *Program) VPSHAQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 82815      p := self.alloc("VPSHAQ", 3, Operands { v0, v1, v2 })
 82816      // VPSHAQ xmm, xmm, xmm
 82817      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 82818          self.require(ISA_XOP)
 82819          p.domain = DomainAMDSpecific
 82820          p.add(0, func(m *_Encoding, v []interface{}) {
 82821              m.emit(0x8f)
 82822              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82823              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 82824              m.emit(0x9b)
 82825              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82826          })
 82827          p.add(0, func(m *_Encoding, v []interface{}) {
 82828              m.emit(0x8f)
 82829              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 82830              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 82831              m.emit(0x9b)
 82832              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82833          })
 82834      }
 82835      // VPSHAQ m128, xmm, xmm
 82836      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 82837          self.require(ISA_XOP)
 82838          p.domain = DomainAMDSpecific
 82839          p.add(0, func(m *_Encoding, v []interface{}) {
 82840              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 82841              m.emit(0x9b)
 82842              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 82843          })
 82844      }
 82845      // VPSHAQ xmm, m128, xmm
 82846      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 82847          self.require(ISA_XOP)
 82848          p.domain = DomainAMDSpecific
 82849          p.add(0, func(m *_Encoding, v []interface{}) {
 82850              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 82851              m.emit(0x9b)
 82852              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82853          })
 82854      }
 82855      if p.len == 0 {
 82856          panic("invalid operands for VPSHAQ")
 82857      }
 82858      return p
 82859  }
 82860  
 82861  // VPSHAW performs "Packed Shift Arithmetic Words".
 82862  //
 82863  // Mnemonic        : VPSHAW
 82864  // Supported forms : (3 forms)
 82865  //
 82866  //    * VPSHAW xmm, xmm, xmm     [XOP]
 82867  //    * VPSHAW m128, xmm, xmm    [XOP]
 82868  //    * VPSHAW xmm, m128, xmm    [XOP]
 82869  //
 82870  func (self *Program) VPSHAW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 82871      p := self.alloc("VPSHAW", 3, Operands { v0, v1, v2 })
 82872      // VPSHAW xmm, xmm, xmm
 82873      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 82874          self.require(ISA_XOP)
 82875          p.domain = DomainAMDSpecific
 82876          p.add(0, func(m *_Encoding, v []interface{}) {
 82877              m.emit(0x8f)
 82878              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82879              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 82880              m.emit(0x99)
 82881              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82882          })
 82883          p.add(0, func(m *_Encoding, v []interface{}) {
 82884              m.emit(0x8f)
 82885              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 82886              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 82887              m.emit(0x99)
 82888              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82889          })
 82890      }
 82891      // VPSHAW m128, xmm, xmm
 82892      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 82893          self.require(ISA_XOP)
 82894          p.domain = DomainAMDSpecific
 82895          p.add(0, func(m *_Encoding, v []interface{}) {
 82896              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 82897              m.emit(0x99)
 82898              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 82899          })
 82900      }
 82901      // VPSHAW xmm, m128, xmm
 82902      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 82903          self.require(ISA_XOP)
 82904          p.domain = DomainAMDSpecific
 82905          p.add(0, func(m *_Encoding, v []interface{}) {
 82906              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 82907              m.emit(0x99)
 82908              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82909          })
 82910      }
 82911      if p.len == 0 {
 82912          panic("invalid operands for VPSHAW")
 82913      }
 82914      return p
 82915  }
 82916  
 82917  // VPSHLB performs "Packed Shift Logical Bytes".
 82918  //
 82919  // Mnemonic        : VPSHLB
 82920  // Supported forms : (3 forms)
 82921  //
 82922  //    * VPSHLB xmm, xmm, xmm     [XOP]
 82923  //    * VPSHLB m128, xmm, xmm    [XOP]
 82924  //    * VPSHLB xmm, m128, xmm    [XOP]
 82925  //
 82926  func (self *Program) VPSHLB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 82927      p := self.alloc("VPSHLB", 3, Operands { v0, v1, v2 })
 82928      // VPSHLB xmm, xmm, xmm
 82929      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 82930          self.require(ISA_XOP)
 82931          p.domain = DomainAMDSpecific
 82932          p.add(0, func(m *_Encoding, v []interface{}) {
 82933              m.emit(0x8f)
 82934              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82935              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 82936              m.emit(0x94)
 82937              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82938          })
 82939          p.add(0, func(m *_Encoding, v []interface{}) {
 82940              m.emit(0x8f)
 82941              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 82942              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 82943              m.emit(0x94)
 82944              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82945          })
 82946      }
 82947      // VPSHLB m128, xmm, xmm
 82948      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 82949          self.require(ISA_XOP)
 82950          p.domain = DomainAMDSpecific
 82951          p.add(0, func(m *_Encoding, v []interface{}) {
 82952              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 82953              m.emit(0x94)
 82954              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 82955          })
 82956      }
 82957      // VPSHLB xmm, m128, xmm
 82958      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 82959          self.require(ISA_XOP)
 82960          p.domain = DomainAMDSpecific
 82961          p.add(0, func(m *_Encoding, v []interface{}) {
 82962              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 82963              m.emit(0x94)
 82964              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82965          })
 82966      }
 82967      if p.len == 0 {
 82968          panic("invalid operands for VPSHLB")
 82969      }
 82970      return p
 82971  }
 82972  
 82973  // VPSHLD performs "Packed Shift Logical Doublewords".
 82974  //
 82975  // Mnemonic        : VPSHLD
 82976  // Supported forms : (3 forms)
 82977  //
 82978  //    * VPSHLD xmm, xmm, xmm     [XOP]
 82979  //    * VPSHLD m128, xmm, xmm    [XOP]
 82980  //    * VPSHLD xmm, m128, xmm    [XOP]
 82981  //
 82982  func (self *Program) VPSHLD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 82983      p := self.alloc("VPSHLD", 3, Operands { v0, v1, v2 })
 82984      // VPSHLD xmm, xmm, xmm
 82985      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 82986          self.require(ISA_XOP)
 82987          p.domain = DomainAMDSpecific
 82988          p.add(0, func(m *_Encoding, v []interface{}) {
 82989              m.emit(0x8f)
 82990              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82991              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 82992              m.emit(0x96)
 82993              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82994          })
 82995          p.add(0, func(m *_Encoding, v []interface{}) {
 82996              m.emit(0x8f)
 82997              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 82998              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 82999              m.emit(0x96)
 83000              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83001          })
 83002      }
 83003      // VPSHLD m128, xmm, xmm
 83004      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 83005          self.require(ISA_XOP)
 83006          p.domain = DomainAMDSpecific
 83007          p.add(0, func(m *_Encoding, v []interface{}) {
 83008              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83009              m.emit(0x96)
 83010              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83011          })
 83012      }
 83013      // VPSHLD xmm, m128, xmm
 83014      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 83015          self.require(ISA_XOP)
 83016          p.domain = DomainAMDSpecific
 83017          p.add(0, func(m *_Encoding, v []interface{}) {
 83018              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 83019              m.emit(0x96)
 83020              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 83021          })
 83022      }
 83023      if p.len == 0 {
 83024          panic("invalid operands for VPSHLD")
 83025      }
 83026      return p
 83027  }
 83028  
 83029  // VPSHLQ performs "Packed Shift Logical Quadwords".
 83030  //
 83031  // Mnemonic        : VPSHLQ
 83032  // Supported forms : (3 forms)
 83033  //
 83034  //    * VPSHLQ xmm, xmm, xmm     [XOP]
 83035  //    * VPSHLQ m128, xmm, xmm    [XOP]
 83036  //    * VPSHLQ xmm, m128, xmm    [XOP]
 83037  //
 83038  func (self *Program) VPSHLQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 83039      p := self.alloc("VPSHLQ", 3, Operands { v0, v1, v2 })
 83040      // VPSHLQ xmm, xmm, xmm
 83041      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 83042          self.require(ISA_XOP)
 83043          p.domain = DomainAMDSpecific
 83044          p.add(0, func(m *_Encoding, v []interface{}) {
 83045              m.emit(0x8f)
 83046              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 83047              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 83048              m.emit(0x97)
 83049              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83050          })
 83051          p.add(0, func(m *_Encoding, v []interface{}) {
 83052              m.emit(0x8f)
 83053              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 83054              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 83055              m.emit(0x97)
 83056              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83057          })
 83058      }
 83059      // VPSHLQ m128, xmm, xmm
 83060      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 83061          self.require(ISA_XOP)
 83062          p.domain = DomainAMDSpecific
 83063          p.add(0, func(m *_Encoding, v []interface{}) {
 83064              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83065              m.emit(0x97)
 83066              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83067          })
 83068      }
 83069      // VPSHLQ xmm, m128, xmm
 83070      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 83071          self.require(ISA_XOP)
 83072          p.domain = DomainAMDSpecific
 83073          p.add(0, func(m *_Encoding, v []interface{}) {
 83074              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 83075              m.emit(0x97)
 83076              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 83077          })
 83078      }
 83079      if p.len == 0 {
 83080          panic("invalid operands for VPSHLQ")
 83081      }
 83082      return p
 83083  }
 83084  
 83085  // VPSHLW performs "Packed Shift Logical Words".
 83086  //
 83087  // Mnemonic        : VPSHLW
 83088  // Supported forms : (3 forms)
 83089  //
 83090  //    * VPSHLW xmm, xmm, xmm     [XOP]
 83091  //    * VPSHLW m128, xmm, xmm    [XOP]
 83092  //    * VPSHLW xmm, m128, xmm    [XOP]
 83093  //
 83094  func (self *Program) VPSHLW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 83095      p := self.alloc("VPSHLW", 3, Operands { v0, v1, v2 })
 83096      // VPSHLW xmm, xmm, xmm
 83097      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 83098          self.require(ISA_XOP)
 83099          p.domain = DomainAMDSpecific
 83100          p.add(0, func(m *_Encoding, v []interface{}) {
 83101              m.emit(0x8f)
 83102              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 83103              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 83104              m.emit(0x95)
 83105              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83106          })
 83107          p.add(0, func(m *_Encoding, v []interface{}) {
 83108              m.emit(0x8f)
 83109              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 83110              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 83111              m.emit(0x95)
 83112              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83113          })
 83114      }
 83115      // VPSHLW m128, xmm, xmm
 83116      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 83117          self.require(ISA_XOP)
 83118          p.domain = DomainAMDSpecific
 83119          p.add(0, func(m *_Encoding, v []interface{}) {
 83120              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83121              m.emit(0x95)
 83122              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83123          })
 83124      }
 83125      // VPSHLW xmm, m128, xmm
 83126      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 83127          self.require(ISA_XOP)
 83128          p.domain = DomainAMDSpecific
 83129          p.add(0, func(m *_Encoding, v []interface{}) {
 83130              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 83131              m.emit(0x95)
 83132              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 83133          })
 83134      }
 83135      if p.len == 0 {
 83136          panic("invalid operands for VPSHLW")
 83137      }
 83138      return p
 83139  }
 83140  
 83141  // VPSHUFB performs "Packed Shuffle Bytes".
 83142  //
 83143  // Mnemonic        : VPSHUFB
 83144  // Supported forms : (10 forms)
 83145  //
 83146  //    * VPSHUFB xmm, xmm, xmm           [AVX]
 83147  //    * VPSHUFB m128, xmm, xmm          [AVX]
 83148  //    * VPSHUFB ymm, ymm, ymm           [AVX2]
 83149  //    * VPSHUFB m256, ymm, ymm          [AVX2]
 83150  //    * VPSHUFB zmm, zmm, zmm{k}{z}     [AVX512BW]
 83151  //    * VPSHUFB m512, zmm, zmm{k}{z}    [AVX512BW]
 83152  //    * VPSHUFB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 83153  //    * VPSHUFB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 83154  //    * VPSHUFB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 83155  //    * VPSHUFB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 83156  //
 83157  func (self *Program) VPSHUFB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 83158      p := self.alloc("VPSHUFB", 3, Operands { v0, v1, v2 })
 83159      // VPSHUFB xmm, xmm, xmm
 83160      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 83161          self.require(ISA_AVX)
 83162          p.domain = DomainAVX
 83163          p.add(0, func(m *_Encoding, v []interface{}) {
 83164              m.emit(0xc4)
 83165              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 83166              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 83167              m.emit(0x00)
 83168              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83169          })
 83170      }
 83171      // VPSHUFB m128, xmm, xmm
 83172      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 83173          self.require(ISA_AVX)
 83174          p.domain = DomainAVX
 83175          p.add(0, func(m *_Encoding, v []interface{}) {
 83176              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83177              m.emit(0x00)
 83178              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83179          })
 83180      }
 83181      // VPSHUFB ymm, ymm, ymm
 83182      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 83183          self.require(ISA_AVX2)
 83184          p.domain = DomainAVX
 83185          p.add(0, func(m *_Encoding, v []interface{}) {
 83186              m.emit(0xc4)
 83187              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 83188              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 83189              m.emit(0x00)
 83190              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83191          })
 83192      }
 83193      // VPSHUFB m256, ymm, ymm
 83194      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 83195          self.require(ISA_AVX2)
 83196          p.domain = DomainAVX
 83197          p.add(0, func(m *_Encoding, v []interface{}) {
 83198              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83199              m.emit(0x00)
 83200              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83201          })
 83202      }
 83203      // VPSHUFB zmm, zmm, zmm{k}{z}
 83204      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 83205          self.require(ISA_AVX512BW)
 83206          p.domain = DomainAVX
 83207          p.add(0, func(m *_Encoding, v []interface{}) {
 83208              m.emit(0x62)
 83209              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 83210              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 83211              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 83212              m.emit(0x00)
 83213              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83214          })
 83215      }
 83216      // VPSHUFB m512, zmm, zmm{k}{z}
 83217      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 83218          self.require(ISA_AVX512BW)
 83219          p.domain = DomainAVX
 83220          p.add(0, func(m *_Encoding, v []interface{}) {
 83221              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 83222              m.emit(0x00)
 83223              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 83224          })
 83225      }
 83226      // VPSHUFB xmm, xmm, xmm{k}{z}
 83227      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 83228          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83229          p.domain = DomainAVX
 83230          p.add(0, func(m *_Encoding, v []interface{}) {
 83231              m.emit(0x62)
 83232              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 83233              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 83234              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 83235              m.emit(0x00)
 83236              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83237          })
 83238      }
 83239      // VPSHUFB m128, xmm, xmm{k}{z}
 83240      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 83241          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83242          p.domain = DomainAVX
 83243          p.add(0, func(m *_Encoding, v []interface{}) {
 83244              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 83245              m.emit(0x00)
 83246              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 83247          })
 83248      }
 83249      // VPSHUFB ymm, ymm, ymm{k}{z}
 83250      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 83251          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83252          p.domain = DomainAVX
 83253          p.add(0, func(m *_Encoding, v []interface{}) {
 83254              m.emit(0x62)
 83255              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 83256              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 83257              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 83258              m.emit(0x00)
 83259              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83260          })
 83261      }
 83262      // VPSHUFB m256, ymm, ymm{k}{z}
 83263      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 83264          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83265          p.domain = DomainAVX
 83266          p.add(0, func(m *_Encoding, v []interface{}) {
 83267              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 83268              m.emit(0x00)
 83269              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 83270          })
 83271      }
 83272      if p.len == 0 {
 83273          panic("invalid operands for VPSHUFB")
 83274      }
 83275      return p
 83276  }
 83277  
 83278  // VPSHUFD performs "Shuffle Packed Doublewords".
 83279  //
 83280  // Mnemonic        : VPSHUFD
 83281  // Supported forms : (10 forms)
 83282  //
 83283  //    * VPSHUFD imm8, xmm, xmm                   [AVX]
 83284  //    * VPSHUFD imm8, m128, xmm                  [AVX]
 83285  //    * VPSHUFD imm8, ymm, ymm                   [AVX2]
 83286  //    * VPSHUFD imm8, m256, ymm                  [AVX2]
 83287  //    * VPSHUFD imm8, m512/m32bcst, zmm{k}{z}    [AVX512F]
 83288  //    * VPSHUFD imm8, zmm, zmm{k}{z}             [AVX512F]
 83289  //    * VPSHUFD imm8, m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 83290  //    * VPSHUFD imm8, m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 83291  //    * VPSHUFD imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 83292  //    * VPSHUFD imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 83293  //
 83294  func (self *Program) VPSHUFD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 83295      p := self.alloc("VPSHUFD", 3, Operands { v0, v1, v2 })
 83296      // VPSHUFD imm8, xmm, xmm
 83297      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 83298          self.require(ISA_AVX)
 83299          p.domain = DomainAVX
 83300          p.add(0, func(m *_Encoding, v []interface{}) {
 83301              m.vex2(1, hcode(v[2]), v[1], 0)
 83302              m.emit(0x70)
 83303              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83304              m.imm1(toImmAny(v[0]))
 83305          })
 83306      }
 83307      // VPSHUFD imm8, m128, xmm
 83308      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 83309          self.require(ISA_AVX)
 83310          p.domain = DomainAVX
 83311          p.add(0, func(m *_Encoding, v []interface{}) {
 83312              m.vex2(1, hcode(v[2]), addr(v[1]), 0)
 83313              m.emit(0x70)
 83314              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 83315              m.imm1(toImmAny(v[0]))
 83316          })
 83317      }
 83318      // VPSHUFD imm8, ymm, ymm
 83319      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 83320          self.require(ISA_AVX2)
 83321          p.domain = DomainAVX
 83322          p.add(0, func(m *_Encoding, v []interface{}) {
 83323              m.vex2(5, hcode(v[2]), v[1], 0)
 83324              m.emit(0x70)
 83325              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83326              m.imm1(toImmAny(v[0]))
 83327          })
 83328      }
 83329      // VPSHUFD imm8, m256, ymm
 83330      if isImm8(v0) && isM256(v1) && isYMM(v2) {
 83331          self.require(ISA_AVX2)
 83332          p.domain = DomainAVX
 83333          p.add(0, func(m *_Encoding, v []interface{}) {
 83334              m.vex2(5, hcode(v[2]), addr(v[1]), 0)
 83335              m.emit(0x70)
 83336              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 83337              m.imm1(toImmAny(v[0]))
 83338          })
 83339      }
 83340      // VPSHUFD imm8, m512/m32bcst, zmm{k}{z}
 83341      if isImm8(v0) && isM512M32bcst(v1) && isZMMkz(v2) {
 83342          self.require(ISA_AVX512F)
 83343          p.domain = DomainAVX
 83344          p.add(0, func(m *_Encoding, v []interface{}) {
 83345              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 83346              m.emit(0x70)
 83347              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 83348              m.imm1(toImmAny(v[0]))
 83349          })
 83350      }
 83351      // VPSHUFD imm8, zmm, zmm{k}{z}
 83352      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 83353          self.require(ISA_AVX512F)
 83354          p.domain = DomainAVX
 83355          p.add(0, func(m *_Encoding, v []interface{}) {
 83356              m.emit(0x62)
 83357              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 83358              m.emit(0x7d)
 83359              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 83360              m.emit(0x70)
 83361              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83362              m.imm1(toImmAny(v[0]))
 83363          })
 83364      }
 83365      // VPSHUFD imm8, m128/m32bcst, xmm{k}{z}
 83366      if isImm8(v0) && isM128M32bcst(v1) && isXMMkz(v2) {
 83367          self.require(ISA_AVX512VL | ISA_AVX512F)
 83368          p.domain = DomainAVX
 83369          p.add(0, func(m *_Encoding, v []interface{}) {
 83370              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 83371              m.emit(0x70)
 83372              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 83373              m.imm1(toImmAny(v[0]))
 83374          })
 83375      }
 83376      // VPSHUFD imm8, m256/m32bcst, ymm{k}{z}
 83377      if isImm8(v0) && isM256M32bcst(v1) && isYMMkz(v2) {
 83378          self.require(ISA_AVX512VL | ISA_AVX512F)
 83379          p.domain = DomainAVX
 83380          p.add(0, func(m *_Encoding, v []interface{}) {
 83381              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 83382              m.emit(0x70)
 83383              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 83384              m.imm1(toImmAny(v[0]))
 83385          })
 83386      }
 83387      // VPSHUFD imm8, xmm, xmm{k}{z}
 83388      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 83389          self.require(ISA_AVX512VL | ISA_AVX512F)
 83390          p.domain = DomainAVX
 83391          p.add(0, func(m *_Encoding, v []interface{}) {
 83392              m.emit(0x62)
 83393              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 83394              m.emit(0x7d)
 83395              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 83396              m.emit(0x70)
 83397              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83398              m.imm1(toImmAny(v[0]))
 83399          })
 83400      }
 83401      // VPSHUFD imm8, ymm, ymm{k}{z}
 83402      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 83403          self.require(ISA_AVX512VL | ISA_AVX512F)
 83404          p.domain = DomainAVX
 83405          p.add(0, func(m *_Encoding, v []interface{}) {
 83406              m.emit(0x62)
 83407              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 83408              m.emit(0x7d)
 83409              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 83410              m.emit(0x70)
 83411              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83412              m.imm1(toImmAny(v[0]))
 83413          })
 83414      }
 83415      if p.len == 0 {
 83416          panic("invalid operands for VPSHUFD")
 83417      }
 83418      return p
 83419  }
 83420  
 83421  // VPSHUFHW performs "Shuffle Packed High Words".
 83422  //
 83423  // Mnemonic        : VPSHUFHW
 83424  // Supported forms : (10 forms)
 83425  //
 83426  //    * VPSHUFHW imm8, xmm, xmm           [AVX]
 83427  //    * VPSHUFHW imm8, m128, xmm          [AVX]
 83428  //    * VPSHUFHW imm8, ymm, ymm           [AVX2]
 83429  //    * VPSHUFHW imm8, m256, ymm          [AVX2]
 83430  //    * VPSHUFHW imm8, zmm, zmm{k}{z}     [AVX512BW]
 83431  //    * VPSHUFHW imm8, m512, zmm{k}{z}    [AVX512BW]
 83432  //    * VPSHUFHW imm8, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 83433  //    * VPSHUFHW imm8, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 83434  //    * VPSHUFHW imm8, m128, xmm{k}{z}    [AVX512BW,AVX512VL]
 83435  //    * VPSHUFHW imm8, m256, ymm{k}{z}    [AVX512BW,AVX512VL]
 83436  //
 83437  func (self *Program) VPSHUFHW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 83438      p := self.alloc("VPSHUFHW", 3, Operands { v0, v1, v2 })
 83439      // VPSHUFHW imm8, xmm, xmm
 83440      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 83441          self.require(ISA_AVX)
 83442          p.domain = DomainAVX
 83443          p.add(0, func(m *_Encoding, v []interface{}) {
 83444              m.vex2(2, hcode(v[2]), v[1], 0)
 83445              m.emit(0x70)
 83446              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83447              m.imm1(toImmAny(v[0]))
 83448          })
 83449      }
 83450      // VPSHUFHW imm8, m128, xmm
 83451      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 83452          self.require(ISA_AVX)
 83453          p.domain = DomainAVX
 83454          p.add(0, func(m *_Encoding, v []interface{}) {
 83455              m.vex2(2, hcode(v[2]), addr(v[1]), 0)
 83456              m.emit(0x70)
 83457              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 83458              m.imm1(toImmAny(v[0]))
 83459          })
 83460      }
 83461      // VPSHUFHW imm8, ymm, ymm
 83462      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 83463          self.require(ISA_AVX2)
 83464          p.domain = DomainAVX
 83465          p.add(0, func(m *_Encoding, v []interface{}) {
 83466              m.vex2(6, hcode(v[2]), v[1], 0)
 83467              m.emit(0x70)
 83468              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83469              m.imm1(toImmAny(v[0]))
 83470          })
 83471      }
 83472      // VPSHUFHW imm8, m256, ymm
 83473      if isImm8(v0) && isM256(v1) && isYMM(v2) {
 83474          self.require(ISA_AVX2)
 83475          p.domain = DomainAVX
 83476          p.add(0, func(m *_Encoding, v []interface{}) {
 83477              m.vex2(6, hcode(v[2]), addr(v[1]), 0)
 83478              m.emit(0x70)
 83479              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 83480              m.imm1(toImmAny(v[0]))
 83481          })
 83482      }
 83483      // VPSHUFHW imm8, zmm, zmm{k}{z}
 83484      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 83485          self.require(ISA_AVX512BW)
 83486          p.domain = DomainAVX
 83487          p.add(0, func(m *_Encoding, v []interface{}) {
 83488              m.emit(0x62)
 83489              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 83490              m.emit(0x7e)
 83491              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 83492              m.emit(0x70)
 83493              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83494              m.imm1(toImmAny(v[0]))
 83495          })
 83496      }
 83497      // VPSHUFHW imm8, m512, zmm{k}{z}
 83498      if isImm8(v0) && isM512(v1) && isZMMkz(v2) {
 83499          self.require(ISA_AVX512BW)
 83500          p.domain = DomainAVX
 83501          p.add(0, func(m *_Encoding, v []interface{}) {
 83502              m.evex(0b01, 0x06, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), 0)
 83503              m.emit(0x70)
 83504              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 83505              m.imm1(toImmAny(v[0]))
 83506          })
 83507      }
 83508      // VPSHUFHW imm8, xmm, xmm{k}{z}
 83509      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 83510          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83511          p.domain = DomainAVX
 83512          p.add(0, func(m *_Encoding, v []interface{}) {
 83513              m.emit(0x62)
 83514              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 83515              m.emit(0x7e)
 83516              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 83517              m.emit(0x70)
 83518              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83519              m.imm1(toImmAny(v[0]))
 83520          })
 83521      }
 83522      // VPSHUFHW imm8, ymm, ymm{k}{z}
 83523      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 83524          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83525          p.domain = DomainAVX
 83526          p.add(0, func(m *_Encoding, v []interface{}) {
 83527              m.emit(0x62)
 83528              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 83529              m.emit(0x7e)
 83530              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 83531              m.emit(0x70)
 83532              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83533              m.imm1(toImmAny(v[0]))
 83534          })
 83535      }
 83536      // VPSHUFHW imm8, m128, xmm{k}{z}
 83537      if isImm8(v0) && isM128(v1) && isXMMkz(v2) {
 83538          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83539          p.domain = DomainAVX
 83540          p.add(0, func(m *_Encoding, v []interface{}) {
 83541              m.evex(0b01, 0x06, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), 0)
 83542              m.emit(0x70)
 83543              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 83544              m.imm1(toImmAny(v[0]))
 83545          })
 83546      }
 83547      // VPSHUFHW imm8, m256, ymm{k}{z}
 83548      if isImm8(v0) && isM256(v1) && isYMMkz(v2) {
 83549          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83550          p.domain = DomainAVX
 83551          p.add(0, func(m *_Encoding, v []interface{}) {
 83552              m.evex(0b01, 0x06, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), 0)
 83553              m.emit(0x70)
 83554              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 83555              m.imm1(toImmAny(v[0]))
 83556          })
 83557      }
 83558      if p.len == 0 {
 83559          panic("invalid operands for VPSHUFHW")
 83560      }
 83561      return p
 83562  }
 83563  
 83564  // VPSHUFLW performs "Shuffle Packed Low Words".
 83565  //
 83566  // Mnemonic        : VPSHUFLW
 83567  // Supported forms : (10 forms)
 83568  //
 83569  //    * VPSHUFLW imm8, xmm, xmm           [AVX]
 83570  //    * VPSHUFLW imm8, m128, xmm          [AVX]
 83571  //    * VPSHUFLW imm8, ymm, ymm           [AVX2]
 83572  //    * VPSHUFLW imm8, m256, ymm          [AVX2]
 83573  //    * VPSHUFLW imm8, zmm, zmm{k}{z}     [AVX512BW]
 83574  //    * VPSHUFLW imm8, m512, zmm{k}{z}    [AVX512BW]
 83575  //    * VPSHUFLW imm8, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 83576  //    * VPSHUFLW imm8, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 83577  //    * VPSHUFLW imm8, m128, xmm{k}{z}    [AVX512BW,AVX512VL]
 83578  //    * VPSHUFLW imm8, m256, ymm{k}{z}    [AVX512BW,AVX512VL]
 83579  //
 83580  func (self *Program) VPSHUFLW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 83581      p := self.alloc("VPSHUFLW", 3, Operands { v0, v1, v2 })
 83582      // VPSHUFLW imm8, xmm, xmm
 83583      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 83584          self.require(ISA_AVX)
 83585          p.domain = DomainAVX
 83586          p.add(0, func(m *_Encoding, v []interface{}) {
 83587              m.vex2(3, hcode(v[2]), v[1], 0)
 83588              m.emit(0x70)
 83589              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83590              m.imm1(toImmAny(v[0]))
 83591          })
 83592      }
 83593      // VPSHUFLW imm8, m128, xmm
 83594      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 83595          self.require(ISA_AVX)
 83596          p.domain = DomainAVX
 83597          p.add(0, func(m *_Encoding, v []interface{}) {
 83598              m.vex2(3, hcode(v[2]), addr(v[1]), 0)
 83599              m.emit(0x70)
 83600              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 83601              m.imm1(toImmAny(v[0]))
 83602          })
 83603      }
 83604      // VPSHUFLW imm8, ymm, ymm
 83605      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 83606          self.require(ISA_AVX2)
 83607          p.domain = DomainAVX
 83608          p.add(0, func(m *_Encoding, v []interface{}) {
 83609              m.vex2(7, hcode(v[2]), v[1], 0)
 83610              m.emit(0x70)
 83611              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83612              m.imm1(toImmAny(v[0]))
 83613          })
 83614      }
 83615      // VPSHUFLW imm8, m256, ymm
 83616      if isImm8(v0) && isM256(v1) && isYMM(v2) {
 83617          self.require(ISA_AVX2)
 83618          p.domain = DomainAVX
 83619          p.add(0, func(m *_Encoding, v []interface{}) {
 83620              m.vex2(7, hcode(v[2]), addr(v[1]), 0)
 83621              m.emit(0x70)
 83622              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 83623              m.imm1(toImmAny(v[0]))
 83624          })
 83625      }
 83626      // VPSHUFLW imm8, zmm, zmm{k}{z}
 83627      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 83628          self.require(ISA_AVX512BW)
 83629          p.domain = DomainAVX
 83630          p.add(0, func(m *_Encoding, v []interface{}) {
 83631              m.emit(0x62)
 83632              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 83633              m.emit(0x7f)
 83634              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 83635              m.emit(0x70)
 83636              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83637              m.imm1(toImmAny(v[0]))
 83638          })
 83639      }
 83640      // VPSHUFLW imm8, m512, zmm{k}{z}
 83641      if isImm8(v0) && isM512(v1) && isZMMkz(v2) {
 83642          self.require(ISA_AVX512BW)
 83643          p.domain = DomainAVX
 83644          p.add(0, func(m *_Encoding, v []interface{}) {
 83645              m.evex(0b01, 0x07, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), 0)
 83646              m.emit(0x70)
 83647              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 83648              m.imm1(toImmAny(v[0]))
 83649          })
 83650      }
 83651      // VPSHUFLW imm8, xmm, xmm{k}{z}
 83652      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 83653          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83654          p.domain = DomainAVX
 83655          p.add(0, func(m *_Encoding, v []interface{}) {
 83656              m.emit(0x62)
 83657              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 83658              m.emit(0x7f)
 83659              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 83660              m.emit(0x70)
 83661              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83662              m.imm1(toImmAny(v[0]))
 83663          })
 83664      }
 83665      // VPSHUFLW imm8, ymm, ymm{k}{z}
 83666      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 83667          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83668          p.domain = DomainAVX
 83669          p.add(0, func(m *_Encoding, v []interface{}) {
 83670              m.emit(0x62)
 83671              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 83672              m.emit(0x7f)
 83673              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 83674              m.emit(0x70)
 83675              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83676              m.imm1(toImmAny(v[0]))
 83677          })
 83678      }
 83679      // VPSHUFLW imm8, m128, xmm{k}{z}
 83680      if isImm8(v0) && isM128(v1) && isXMMkz(v2) {
 83681          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83682          p.domain = DomainAVX
 83683          p.add(0, func(m *_Encoding, v []interface{}) {
 83684              m.evex(0b01, 0x07, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), 0)
 83685              m.emit(0x70)
 83686              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 83687              m.imm1(toImmAny(v[0]))
 83688          })
 83689      }
 83690      // VPSHUFLW imm8, m256, ymm{k}{z}
 83691      if isImm8(v0) && isM256(v1) && isYMMkz(v2) {
 83692          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83693          p.domain = DomainAVX
 83694          p.add(0, func(m *_Encoding, v []interface{}) {
 83695              m.evex(0b01, 0x07, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), 0)
 83696              m.emit(0x70)
 83697              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 83698              m.imm1(toImmAny(v[0]))
 83699          })
 83700      }
 83701      if p.len == 0 {
 83702          panic("invalid operands for VPSHUFLW")
 83703      }
 83704      return p
 83705  }
 83706  
 83707  // VPSIGNB performs "Packed Sign of Byte Integers".
 83708  //
 83709  // Mnemonic        : VPSIGNB
 83710  // Supported forms : (4 forms)
 83711  //
 83712  //    * VPSIGNB xmm, xmm, xmm     [AVX]
 83713  //    * VPSIGNB m128, xmm, xmm    [AVX]
 83714  //    * VPSIGNB ymm, ymm, ymm     [AVX2]
 83715  //    * VPSIGNB m256, ymm, ymm    [AVX2]
 83716  //
 83717  func (self *Program) VPSIGNB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 83718      p := self.alloc("VPSIGNB", 3, Operands { v0, v1, v2 })
 83719      // VPSIGNB xmm, xmm, xmm
 83720      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 83721          self.require(ISA_AVX)
 83722          p.domain = DomainAVX
 83723          p.add(0, func(m *_Encoding, v []interface{}) {
 83724              m.emit(0xc4)
 83725              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 83726              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 83727              m.emit(0x08)
 83728              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83729          })
 83730      }
 83731      // VPSIGNB m128, xmm, xmm
 83732      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 83733          self.require(ISA_AVX)
 83734          p.domain = DomainAVX
 83735          p.add(0, func(m *_Encoding, v []interface{}) {
 83736              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83737              m.emit(0x08)
 83738              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83739          })
 83740      }
 83741      // VPSIGNB ymm, ymm, ymm
 83742      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 83743          self.require(ISA_AVX2)
 83744          p.domain = DomainAVX
 83745          p.add(0, func(m *_Encoding, v []interface{}) {
 83746              m.emit(0xc4)
 83747              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 83748              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 83749              m.emit(0x08)
 83750              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83751          })
 83752      }
 83753      // VPSIGNB m256, ymm, ymm
 83754      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 83755          self.require(ISA_AVX2)
 83756          p.domain = DomainAVX
 83757          p.add(0, func(m *_Encoding, v []interface{}) {
 83758              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83759              m.emit(0x08)
 83760              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83761          })
 83762      }
 83763      if p.len == 0 {
 83764          panic("invalid operands for VPSIGNB")
 83765      }
 83766      return p
 83767  }
 83768  
 83769  // VPSIGND performs "Packed Sign of Doubleword Integers".
 83770  //
 83771  // Mnemonic        : VPSIGND
 83772  // Supported forms : (4 forms)
 83773  //
 83774  //    * VPSIGND xmm, xmm, xmm     [AVX]
 83775  //    * VPSIGND m128, xmm, xmm    [AVX]
 83776  //    * VPSIGND ymm, ymm, ymm     [AVX2]
 83777  //    * VPSIGND m256, ymm, ymm    [AVX2]
 83778  //
 83779  func (self *Program) VPSIGND(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 83780      p := self.alloc("VPSIGND", 3, Operands { v0, v1, v2 })
 83781      // VPSIGND xmm, xmm, xmm
 83782      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 83783          self.require(ISA_AVX)
 83784          p.domain = DomainAVX
 83785          p.add(0, func(m *_Encoding, v []interface{}) {
 83786              m.emit(0xc4)
 83787              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 83788              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 83789              m.emit(0x0a)
 83790              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83791          })
 83792      }
 83793      // VPSIGND m128, xmm, xmm
 83794      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 83795          self.require(ISA_AVX)
 83796          p.domain = DomainAVX
 83797          p.add(0, func(m *_Encoding, v []interface{}) {
 83798              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83799              m.emit(0x0a)
 83800              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83801          })
 83802      }
 83803      // VPSIGND ymm, ymm, ymm
 83804      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 83805          self.require(ISA_AVX2)
 83806          p.domain = DomainAVX
 83807          p.add(0, func(m *_Encoding, v []interface{}) {
 83808              m.emit(0xc4)
 83809              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 83810              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 83811              m.emit(0x0a)
 83812              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83813          })
 83814      }
 83815      // VPSIGND m256, ymm, ymm
 83816      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 83817          self.require(ISA_AVX2)
 83818          p.domain = DomainAVX
 83819          p.add(0, func(m *_Encoding, v []interface{}) {
 83820              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83821              m.emit(0x0a)
 83822              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83823          })
 83824      }
 83825      if p.len == 0 {
 83826          panic("invalid operands for VPSIGND")
 83827      }
 83828      return p
 83829  }
 83830  
 83831  // VPSIGNW performs "Packed Sign of Word Integers".
 83832  //
 83833  // Mnemonic        : VPSIGNW
 83834  // Supported forms : (4 forms)
 83835  //
 83836  //    * VPSIGNW xmm, xmm, xmm     [AVX]
 83837  //    * VPSIGNW m128, xmm, xmm    [AVX]
 83838  //    * VPSIGNW ymm, ymm, ymm     [AVX2]
 83839  //    * VPSIGNW m256, ymm, ymm    [AVX2]
 83840  //
 83841  func (self *Program) VPSIGNW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 83842      p := self.alloc("VPSIGNW", 3, Operands { v0, v1, v2 })
 83843      // VPSIGNW xmm, xmm, xmm
 83844      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 83845          self.require(ISA_AVX)
 83846          p.domain = DomainAVX
 83847          p.add(0, func(m *_Encoding, v []interface{}) {
 83848              m.emit(0xc4)
 83849              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 83850              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 83851              m.emit(0x09)
 83852              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83853          })
 83854      }
 83855      // VPSIGNW m128, xmm, xmm
 83856      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 83857          self.require(ISA_AVX)
 83858          p.domain = DomainAVX
 83859          p.add(0, func(m *_Encoding, v []interface{}) {
 83860              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83861              m.emit(0x09)
 83862              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83863          })
 83864      }
 83865      // VPSIGNW ymm, ymm, ymm
 83866      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 83867          self.require(ISA_AVX2)
 83868          p.domain = DomainAVX
 83869          p.add(0, func(m *_Encoding, v []interface{}) {
 83870              m.emit(0xc4)
 83871              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 83872              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 83873              m.emit(0x09)
 83874              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83875          })
 83876      }
 83877      // VPSIGNW m256, ymm, ymm
 83878      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 83879          self.require(ISA_AVX2)
 83880          p.domain = DomainAVX
 83881          p.add(0, func(m *_Encoding, v []interface{}) {
 83882              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83883              m.emit(0x09)
 83884              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83885          })
 83886      }
 83887      if p.len == 0 {
 83888          panic("invalid operands for VPSIGNW")
 83889      }
 83890      return p
 83891  }
 83892  
 83893  // VPSLLD performs "Shift Packed Doubleword Data Left Logical".
 83894  //
 83895  // Mnemonic        : VPSLLD
 83896  // Supported forms : (18 forms)
 83897  //
 83898  //    * VPSLLD imm8, xmm, xmm                   [AVX]
 83899  //    * VPSLLD xmm, xmm, xmm                    [AVX]
 83900  //    * VPSLLD m128, xmm, xmm                   [AVX]
 83901  //    * VPSLLD imm8, ymm, ymm                   [AVX2]
 83902  //    * VPSLLD xmm, ymm, ymm                    [AVX2]
 83903  //    * VPSLLD m128, ymm, ymm                   [AVX2]
 83904  //    * VPSLLD imm8, m512/m32bcst, zmm{k}{z}    [AVX512F]
 83905  //    * VPSLLD imm8, zmm, zmm{k}{z}             [AVX512F]
 83906  //    * VPSLLD xmm, zmm, zmm{k}{z}              [AVX512F]
 83907  //    * VPSLLD m128, zmm, zmm{k}{z}             [AVX512F]
 83908  //    * VPSLLD imm8, m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 83909  //    * VPSLLD imm8, m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 83910  //    * VPSLLD imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 83911  //    * VPSLLD xmm, xmm, xmm{k}{z}              [AVX512F,AVX512VL]
 83912  //    * VPSLLD m128, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 83913  //    * VPSLLD imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 83914  //    * VPSLLD xmm, ymm, ymm{k}{z}              [AVX512F,AVX512VL]
 83915  //    * VPSLLD m128, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 83916  //
 83917  func (self *Program) VPSLLD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 83918      p := self.alloc("VPSLLD", 3, Operands { v0, v1, v2 })
 83919      // VPSLLD imm8, xmm, xmm
 83920      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 83921          self.require(ISA_AVX)
 83922          p.domain = DomainAVX
 83923          p.add(0, func(m *_Encoding, v []interface{}) {
 83924              m.vex2(1, 0, v[1], hlcode(v[2]))
 83925              m.emit(0x72)
 83926              m.emit(0xf0 | lcode(v[1]))
 83927              m.imm1(toImmAny(v[0]))
 83928          })
 83929      }
 83930      // VPSLLD xmm, xmm, xmm
 83931      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 83932          self.require(ISA_AVX)
 83933          p.domain = DomainAVX
 83934          p.add(0, func(m *_Encoding, v []interface{}) {
 83935              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 83936              m.emit(0xf2)
 83937              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83938          })
 83939      }
 83940      // VPSLLD m128, xmm, xmm
 83941      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 83942          self.require(ISA_AVX)
 83943          p.domain = DomainAVX
 83944          p.add(0, func(m *_Encoding, v []interface{}) {
 83945              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83946              m.emit(0xf2)
 83947              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83948          })
 83949      }
 83950      // VPSLLD imm8, ymm, ymm
 83951      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 83952          self.require(ISA_AVX2)
 83953          p.domain = DomainAVX
 83954          p.add(0, func(m *_Encoding, v []interface{}) {
 83955              m.vex2(5, 0, v[1], hlcode(v[2]))
 83956              m.emit(0x72)
 83957              m.emit(0xf0 | lcode(v[1]))
 83958              m.imm1(toImmAny(v[0]))
 83959          })
 83960      }
 83961      // VPSLLD xmm, ymm, ymm
 83962      if isXMM(v0) && isYMM(v1) && isYMM(v2) {
 83963          self.require(ISA_AVX2)
 83964          p.domain = DomainAVX
 83965          p.add(0, func(m *_Encoding, v []interface{}) {
 83966              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 83967              m.emit(0xf2)
 83968              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83969          })
 83970      }
 83971      // VPSLLD m128, ymm, ymm
 83972      if isM128(v0) && isYMM(v1) && isYMM(v2) {
 83973          self.require(ISA_AVX2)
 83974          p.domain = DomainAVX
 83975          p.add(0, func(m *_Encoding, v []interface{}) {
 83976              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83977              m.emit(0xf2)
 83978              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83979          })
 83980      }
 83981      // VPSLLD imm8, m512/m32bcst, zmm{k}{z}
 83982      if isImm8(v0) && isM512M32bcst(v1) && isZMMkz(v2) {
 83983          self.require(ISA_AVX512F)
 83984          p.domain = DomainAVX
 83985          p.add(0, func(m *_Encoding, v []interface{}) {
 83986              m.evex(0b01, 0x05, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 83987              m.emit(0x72)
 83988              m.mrsd(6, addr(v[1]), 64)
 83989              m.imm1(toImmAny(v[0]))
 83990          })
 83991      }
 83992      // VPSLLD imm8, zmm, zmm{k}{z}
 83993      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 83994          self.require(ISA_AVX512F)
 83995          p.domain = DomainAVX
 83996          p.add(0, func(m *_Encoding, v []interface{}) {
 83997              m.emit(0x62)
 83998              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 83999              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 84000              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 84001              m.emit(0x72)
 84002              m.emit(0xf0 | lcode(v[1]))
 84003              m.imm1(toImmAny(v[0]))
 84004          })
 84005      }
 84006      // VPSLLD xmm, zmm, zmm{k}{z}
 84007      if isEVEXXMM(v0) && isZMM(v1) && isZMMkz(v2) {
 84008          self.require(ISA_AVX512F)
 84009          p.domain = DomainAVX
 84010          p.add(0, func(m *_Encoding, v []interface{}) {
 84011              m.emit(0x62)
 84012              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84013              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 84014              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 84015              m.emit(0xf2)
 84016              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84017          })
 84018      }
 84019      // VPSLLD m128, zmm, zmm{k}{z}
 84020      if isM128(v0) && isZMM(v1) && isZMMkz(v2) {
 84021          self.require(ISA_AVX512F)
 84022          p.domain = DomainAVX
 84023          p.add(0, func(m *_Encoding, v []interface{}) {
 84024              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 84025              m.emit(0xf2)
 84026              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 84027          })
 84028      }
 84029      // VPSLLD imm8, m128/m32bcst, xmm{k}{z}
 84030      if isImm8(v0) && isM128M32bcst(v1) && isXMMkz(v2) {
 84031          self.require(ISA_AVX512VL | ISA_AVX512F)
 84032          p.domain = DomainAVX
 84033          p.add(0, func(m *_Encoding, v []interface{}) {
 84034              m.evex(0b01, 0x05, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 84035              m.emit(0x72)
 84036              m.mrsd(6, addr(v[1]), 16)
 84037              m.imm1(toImmAny(v[0]))
 84038          })
 84039      }
 84040      // VPSLLD imm8, m256/m32bcst, ymm{k}{z}
 84041      if isImm8(v0) && isM256M32bcst(v1) && isYMMkz(v2) {
 84042          self.require(ISA_AVX512VL | ISA_AVX512F)
 84043          p.domain = DomainAVX
 84044          p.add(0, func(m *_Encoding, v []interface{}) {
 84045              m.evex(0b01, 0x05, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 84046              m.emit(0x72)
 84047              m.mrsd(6, addr(v[1]), 32)
 84048              m.imm1(toImmAny(v[0]))
 84049          })
 84050      }
 84051      // VPSLLD imm8, xmm, xmm{k}{z}
 84052      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84053          self.require(ISA_AVX512VL | ISA_AVX512F)
 84054          p.domain = DomainAVX
 84055          p.add(0, func(m *_Encoding, v []interface{}) {
 84056              m.emit(0x62)
 84057              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 84058              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 84059              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 84060              m.emit(0x72)
 84061              m.emit(0xf0 | lcode(v[1]))
 84062              m.imm1(toImmAny(v[0]))
 84063          })
 84064      }
 84065      // VPSLLD xmm, xmm, xmm{k}{z}
 84066      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84067          self.require(ISA_AVX512VL | ISA_AVX512F)
 84068          p.domain = DomainAVX
 84069          p.add(0, func(m *_Encoding, v []interface{}) {
 84070              m.emit(0x62)
 84071              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84072              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 84073              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 84074              m.emit(0xf2)
 84075              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84076          })
 84077      }
 84078      // VPSLLD m128, xmm, xmm{k}{z}
 84079      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84080          self.require(ISA_AVX512VL | ISA_AVX512F)
 84081          p.domain = DomainAVX
 84082          p.add(0, func(m *_Encoding, v []interface{}) {
 84083              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 84084              m.emit(0xf2)
 84085              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 84086          })
 84087      }
 84088      // VPSLLD imm8, ymm, ymm{k}{z}
 84089      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84090          self.require(ISA_AVX512VL | ISA_AVX512F)
 84091          p.domain = DomainAVX
 84092          p.add(0, func(m *_Encoding, v []interface{}) {
 84093              m.emit(0x62)
 84094              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 84095              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 84096              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 84097              m.emit(0x72)
 84098              m.emit(0xf0 | lcode(v[1]))
 84099              m.imm1(toImmAny(v[0]))
 84100          })
 84101      }
 84102      // VPSLLD xmm, ymm, ymm{k}{z}
 84103      if isEVEXXMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84104          self.require(ISA_AVX512VL | ISA_AVX512F)
 84105          p.domain = DomainAVX
 84106          p.add(0, func(m *_Encoding, v []interface{}) {
 84107              m.emit(0x62)
 84108              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84109              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 84110              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 84111              m.emit(0xf2)
 84112              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84113          })
 84114      }
 84115      // VPSLLD m128, ymm, ymm{k}{z}
 84116      if isM128(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84117          self.require(ISA_AVX512VL | ISA_AVX512F)
 84118          p.domain = DomainAVX
 84119          p.add(0, func(m *_Encoding, v []interface{}) {
 84120              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 84121              m.emit(0xf2)
 84122              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 84123          })
 84124      }
 84125      if p.len == 0 {
 84126          panic("invalid operands for VPSLLD")
 84127      }
 84128      return p
 84129  }
 84130  
 84131  // VPSLLDQ performs "Shift Packed Double Quadword Left Logical".
 84132  //
 84133  // Mnemonic        : VPSLLDQ
 84134  // Supported forms : (8 forms)
 84135  //
 84136  //    * VPSLLDQ imm8, xmm, xmm     [AVX]
 84137  //    * VPSLLDQ imm8, ymm, ymm     [AVX2]
 84138  //    * VPSLLDQ imm8, zmm, zmm     [AVX512BW]
 84139  //    * VPSLLDQ imm8, m512, zmm    [AVX512BW]
 84140  //    * VPSLLDQ imm8, xmm, xmm     [AVX512BW,AVX512VL]
 84141  //    * VPSLLDQ imm8, m128, xmm    [AVX512BW,AVX512VL]
 84142  //    * VPSLLDQ imm8, ymm, ymm     [AVX512BW,AVX512VL]
 84143  //    * VPSLLDQ imm8, m256, ymm    [AVX512BW,AVX512VL]
 84144  //
 84145  func (self *Program) VPSLLDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 84146      p := self.alloc("VPSLLDQ", 3, Operands { v0, v1, v2 })
 84147      // VPSLLDQ imm8, xmm, xmm
 84148      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 84149          self.require(ISA_AVX)
 84150          p.domain = DomainAVX
 84151          p.add(0, func(m *_Encoding, v []interface{}) {
 84152              m.vex2(1, 0, v[1], hlcode(v[2]))
 84153              m.emit(0x73)
 84154              m.emit(0xf8 | lcode(v[1]))
 84155              m.imm1(toImmAny(v[0]))
 84156          })
 84157      }
 84158      // VPSLLDQ imm8, ymm, ymm
 84159      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 84160          self.require(ISA_AVX2)
 84161          p.domain = DomainAVX
 84162          p.add(0, func(m *_Encoding, v []interface{}) {
 84163              m.vex2(5, 0, v[1], hlcode(v[2]))
 84164              m.emit(0x73)
 84165              m.emit(0xf8 | lcode(v[1]))
 84166              m.imm1(toImmAny(v[0]))
 84167          })
 84168      }
 84169      // VPSLLDQ imm8, zmm, zmm
 84170      if isImm8(v0) && isZMM(v1) && isZMM(v2) {
 84171          self.require(ISA_AVX512BW)
 84172          p.domain = DomainAVX
 84173          p.add(0, func(m *_Encoding, v []interface{}) {
 84174              m.emit(0x62)
 84175              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 84176              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 84177              m.emit((0x08 ^ (ecode(v[2]) << 3)) | 0x40)
 84178              m.emit(0x73)
 84179              m.emit(0xf8 | lcode(v[1]))
 84180              m.imm1(toImmAny(v[0]))
 84181          })
 84182      }
 84183      // VPSLLDQ imm8, m512, zmm
 84184      if isImm8(v0) && isM512(v1) && isZMM(v2) {
 84185          self.require(ISA_AVX512BW)
 84186          p.domain = DomainAVX
 84187          p.add(0, func(m *_Encoding, v []interface{}) {
 84188              m.evex(0b01, 0x05, 0b10, 0, addr(v[1]), vcode(v[2]), 0, 0, 0)
 84189              m.emit(0x73)
 84190              m.mrsd(7, addr(v[1]), 64)
 84191              m.imm1(toImmAny(v[0]))
 84192          })
 84193      }
 84194      // VPSLLDQ imm8, xmm, xmm
 84195      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 84196          self.require(ISA_AVX512VL | ISA_AVX512BW)
 84197          p.domain = DomainAVX
 84198          p.add(0, func(m *_Encoding, v []interface{}) {
 84199              m.emit(0x62)
 84200              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 84201              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 84202              m.emit((0x08 ^ (ecode(v[2]) << 3)) | 0x00)
 84203              m.emit(0x73)
 84204              m.emit(0xf8 | lcode(v[1]))
 84205              m.imm1(toImmAny(v[0]))
 84206          })
 84207      }
 84208      // VPSLLDQ imm8, m128, xmm
 84209      if isImm8(v0) && isM128(v1) && isEVEXXMM(v2) {
 84210          self.require(ISA_AVX512VL | ISA_AVX512BW)
 84211          p.domain = DomainAVX
 84212          p.add(0, func(m *_Encoding, v []interface{}) {
 84213              m.evex(0b01, 0x05, 0b00, 0, addr(v[1]), vcode(v[2]), 0, 0, 0)
 84214              m.emit(0x73)
 84215              m.mrsd(7, addr(v[1]), 16)
 84216              m.imm1(toImmAny(v[0]))
 84217          })
 84218      }
 84219      // VPSLLDQ imm8, ymm, ymm
 84220      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) {
 84221          self.require(ISA_AVX512VL | ISA_AVX512BW)
 84222          p.domain = DomainAVX
 84223          p.add(0, func(m *_Encoding, v []interface{}) {
 84224              m.emit(0x62)
 84225              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 84226              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 84227              m.emit((0x08 ^ (ecode(v[2]) << 3)) | 0x20)
 84228              m.emit(0x73)
 84229              m.emit(0xf8 | lcode(v[1]))
 84230              m.imm1(toImmAny(v[0]))
 84231          })
 84232      }
 84233      // VPSLLDQ imm8, m256, ymm
 84234      if isImm8(v0) && isM256(v1) && isEVEXYMM(v2) {
 84235          self.require(ISA_AVX512VL | ISA_AVX512BW)
 84236          p.domain = DomainAVX
 84237          p.add(0, func(m *_Encoding, v []interface{}) {
 84238              m.evex(0b01, 0x05, 0b01, 0, addr(v[1]), vcode(v[2]), 0, 0, 0)
 84239              m.emit(0x73)
 84240              m.mrsd(7, addr(v[1]), 32)
 84241              m.imm1(toImmAny(v[0]))
 84242          })
 84243      }
 84244      if p.len == 0 {
 84245          panic("invalid operands for VPSLLDQ")
 84246      }
 84247      return p
 84248  }
 84249  
 84250  // VPSLLQ performs "Shift Packed Quadword Data Left Logical".
 84251  //
 84252  // Mnemonic        : VPSLLQ
 84253  // Supported forms : (18 forms)
 84254  //
 84255  //    * VPSLLQ imm8, xmm, xmm                   [AVX]
 84256  //    * VPSLLQ xmm, xmm, xmm                    [AVX]
 84257  //    * VPSLLQ m128, xmm, xmm                   [AVX]
 84258  //    * VPSLLQ imm8, ymm, ymm                   [AVX2]
 84259  //    * VPSLLQ xmm, ymm, ymm                    [AVX2]
 84260  //    * VPSLLQ m128, ymm, ymm                   [AVX2]
 84261  //    * VPSLLQ imm8, m512/m64bcst, zmm{k}{z}    [AVX512F]
 84262  //    * VPSLLQ imm8, zmm, zmm{k}{z}             [AVX512F]
 84263  //    * VPSLLQ xmm, zmm, zmm{k}{z}              [AVX512F]
 84264  //    * VPSLLQ m128, zmm, zmm{k}{z}             [AVX512F]
 84265  //    * VPSLLQ imm8, m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 84266  //    * VPSLLQ imm8, m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 84267  //    * VPSLLQ imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 84268  //    * VPSLLQ xmm, xmm, xmm{k}{z}              [AVX512F,AVX512VL]
 84269  //    * VPSLLQ m128, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 84270  //    * VPSLLQ imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 84271  //    * VPSLLQ xmm, ymm, ymm{k}{z}              [AVX512F,AVX512VL]
 84272  //    * VPSLLQ m128, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 84273  //
 84274  func (self *Program) VPSLLQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 84275      p := self.alloc("VPSLLQ", 3, Operands { v0, v1, v2 })
 84276      // VPSLLQ imm8, xmm, xmm
 84277      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 84278          self.require(ISA_AVX)
 84279          p.domain = DomainAVX
 84280          p.add(0, func(m *_Encoding, v []interface{}) {
 84281              m.vex2(1, 0, v[1], hlcode(v[2]))
 84282              m.emit(0x73)
 84283              m.emit(0xf0 | lcode(v[1]))
 84284              m.imm1(toImmAny(v[0]))
 84285          })
 84286      }
 84287      // VPSLLQ xmm, xmm, xmm
 84288      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 84289          self.require(ISA_AVX)
 84290          p.domain = DomainAVX
 84291          p.add(0, func(m *_Encoding, v []interface{}) {
 84292              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 84293              m.emit(0xf3)
 84294              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84295          })
 84296      }
 84297      // VPSLLQ m128, xmm, xmm
 84298      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 84299          self.require(ISA_AVX)
 84300          p.domain = DomainAVX
 84301          p.add(0, func(m *_Encoding, v []interface{}) {
 84302              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 84303              m.emit(0xf3)
 84304              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 84305          })
 84306      }
 84307      // VPSLLQ imm8, ymm, ymm
 84308      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 84309          self.require(ISA_AVX2)
 84310          p.domain = DomainAVX
 84311          p.add(0, func(m *_Encoding, v []interface{}) {
 84312              m.vex2(5, 0, v[1], hlcode(v[2]))
 84313              m.emit(0x73)
 84314              m.emit(0xf0 | lcode(v[1]))
 84315              m.imm1(toImmAny(v[0]))
 84316          })
 84317      }
 84318      // VPSLLQ xmm, ymm, ymm
 84319      if isXMM(v0) && isYMM(v1) && isYMM(v2) {
 84320          self.require(ISA_AVX2)
 84321          p.domain = DomainAVX
 84322          p.add(0, func(m *_Encoding, v []interface{}) {
 84323              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 84324              m.emit(0xf3)
 84325              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84326          })
 84327      }
 84328      // VPSLLQ m128, ymm, ymm
 84329      if isM128(v0) && isYMM(v1) && isYMM(v2) {
 84330          self.require(ISA_AVX2)
 84331          p.domain = DomainAVX
 84332          p.add(0, func(m *_Encoding, v []interface{}) {
 84333              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 84334              m.emit(0xf3)
 84335              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 84336          })
 84337      }
 84338      // VPSLLQ imm8, m512/m64bcst, zmm{k}{z}
 84339      if isImm8(v0) && isM512M64bcst(v1) && isZMMkz(v2) {
 84340          self.require(ISA_AVX512F)
 84341          p.domain = DomainAVX
 84342          p.add(0, func(m *_Encoding, v []interface{}) {
 84343              m.evex(0b01, 0x85, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 84344              m.emit(0x73)
 84345              m.mrsd(6, addr(v[1]), 64)
 84346              m.imm1(toImmAny(v[0]))
 84347          })
 84348      }
 84349      // VPSLLQ imm8, zmm, zmm{k}{z}
 84350      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 84351          self.require(ISA_AVX512F)
 84352          p.domain = DomainAVX
 84353          p.add(0, func(m *_Encoding, v []interface{}) {
 84354              m.emit(0x62)
 84355              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 84356              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 84357              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 84358              m.emit(0x73)
 84359              m.emit(0xf0 | lcode(v[1]))
 84360              m.imm1(toImmAny(v[0]))
 84361          })
 84362      }
 84363      // VPSLLQ xmm, zmm, zmm{k}{z}
 84364      if isEVEXXMM(v0) && isZMM(v1) && isZMMkz(v2) {
 84365          self.require(ISA_AVX512F)
 84366          p.domain = DomainAVX
 84367          p.add(0, func(m *_Encoding, v []interface{}) {
 84368              m.emit(0x62)
 84369              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84370              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 84371              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 84372              m.emit(0xf3)
 84373              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84374          })
 84375      }
 84376      // VPSLLQ m128, zmm, zmm{k}{z}
 84377      if isM128(v0) && isZMM(v1) && isZMMkz(v2) {
 84378          self.require(ISA_AVX512F)
 84379          p.domain = DomainAVX
 84380          p.add(0, func(m *_Encoding, v []interface{}) {
 84381              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 84382              m.emit(0xf3)
 84383              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 84384          })
 84385      }
 84386      // VPSLLQ imm8, m128/m64bcst, xmm{k}{z}
 84387      if isImm8(v0) && isM128M64bcst(v1) && isXMMkz(v2) {
 84388          self.require(ISA_AVX512VL | ISA_AVX512F)
 84389          p.domain = DomainAVX
 84390          p.add(0, func(m *_Encoding, v []interface{}) {
 84391              m.evex(0b01, 0x85, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 84392              m.emit(0x73)
 84393              m.mrsd(6, addr(v[1]), 16)
 84394              m.imm1(toImmAny(v[0]))
 84395          })
 84396      }
 84397      // VPSLLQ imm8, m256/m64bcst, ymm{k}{z}
 84398      if isImm8(v0) && isM256M64bcst(v1) && isYMMkz(v2) {
 84399          self.require(ISA_AVX512VL | ISA_AVX512F)
 84400          p.domain = DomainAVX
 84401          p.add(0, func(m *_Encoding, v []interface{}) {
 84402              m.evex(0b01, 0x85, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 84403              m.emit(0x73)
 84404              m.mrsd(6, addr(v[1]), 32)
 84405              m.imm1(toImmAny(v[0]))
 84406          })
 84407      }
 84408      // VPSLLQ imm8, xmm, xmm{k}{z}
 84409      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84410          self.require(ISA_AVX512VL | ISA_AVX512F)
 84411          p.domain = DomainAVX
 84412          p.add(0, func(m *_Encoding, v []interface{}) {
 84413              m.emit(0x62)
 84414              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 84415              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 84416              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 84417              m.emit(0x73)
 84418              m.emit(0xf0 | lcode(v[1]))
 84419              m.imm1(toImmAny(v[0]))
 84420          })
 84421      }
 84422      // VPSLLQ xmm, xmm, xmm{k}{z}
 84423      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84424          self.require(ISA_AVX512VL | ISA_AVX512F)
 84425          p.domain = DomainAVX
 84426          p.add(0, func(m *_Encoding, v []interface{}) {
 84427              m.emit(0x62)
 84428              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84429              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 84430              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 84431              m.emit(0xf3)
 84432              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84433          })
 84434      }
 84435      // VPSLLQ m128, xmm, xmm{k}{z}
 84436      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84437          self.require(ISA_AVX512VL | ISA_AVX512F)
 84438          p.domain = DomainAVX
 84439          p.add(0, func(m *_Encoding, v []interface{}) {
 84440              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 84441              m.emit(0xf3)
 84442              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 84443          })
 84444      }
 84445      // VPSLLQ imm8, ymm, ymm{k}{z}
 84446      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84447          self.require(ISA_AVX512VL | ISA_AVX512F)
 84448          p.domain = DomainAVX
 84449          p.add(0, func(m *_Encoding, v []interface{}) {
 84450              m.emit(0x62)
 84451              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 84452              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 84453              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 84454              m.emit(0x73)
 84455              m.emit(0xf0 | lcode(v[1]))
 84456              m.imm1(toImmAny(v[0]))
 84457          })
 84458      }
 84459      // VPSLLQ xmm, ymm, ymm{k}{z}
 84460      if isEVEXXMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84461          self.require(ISA_AVX512VL | ISA_AVX512F)
 84462          p.domain = DomainAVX
 84463          p.add(0, func(m *_Encoding, v []interface{}) {
 84464              m.emit(0x62)
 84465              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84466              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 84467              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 84468              m.emit(0xf3)
 84469              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84470          })
 84471      }
 84472      // VPSLLQ m128, ymm, ymm{k}{z}
 84473      if isM128(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84474          self.require(ISA_AVX512VL | ISA_AVX512F)
 84475          p.domain = DomainAVX
 84476          p.add(0, func(m *_Encoding, v []interface{}) {
 84477              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 84478              m.emit(0xf3)
 84479              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 84480          })
 84481      }
 84482      if p.len == 0 {
 84483          panic("invalid operands for VPSLLQ")
 84484      }
 84485      return p
 84486  }
 84487  
 84488  // VPSLLVD performs "Variable Shift Packed Doubleword Data Left Logical".
 84489  //
 84490  // Mnemonic        : VPSLLVD
 84491  // Supported forms : (10 forms)
 84492  //
 84493  //    * VPSLLVD xmm, xmm, xmm                   [AVX2]
 84494  //    * VPSLLVD m128, xmm, xmm                  [AVX2]
 84495  //    * VPSLLVD ymm, ymm, ymm                   [AVX2]
 84496  //    * VPSLLVD m256, ymm, ymm                  [AVX2]
 84497  //    * VPSLLVD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 84498  //    * VPSLLVD zmm, zmm, zmm{k}{z}             [AVX512F]
 84499  //    * VPSLLVD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 84500  //    * VPSLLVD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 84501  //    * VPSLLVD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 84502  //    * VPSLLVD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 84503  //
 84504  func (self *Program) VPSLLVD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 84505      p := self.alloc("VPSLLVD", 3, Operands { v0, v1, v2 })
 84506      // VPSLLVD xmm, xmm, xmm
 84507      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 84508          self.require(ISA_AVX2)
 84509          p.domain = DomainAVX
 84510          p.add(0, func(m *_Encoding, v []interface{}) {
 84511              m.emit(0xc4)
 84512              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 84513              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 84514              m.emit(0x47)
 84515              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84516          })
 84517      }
 84518      // VPSLLVD m128, xmm, xmm
 84519      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 84520          self.require(ISA_AVX2)
 84521          p.domain = DomainAVX
 84522          p.add(0, func(m *_Encoding, v []interface{}) {
 84523              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 84524              m.emit(0x47)
 84525              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 84526          })
 84527      }
 84528      // VPSLLVD ymm, ymm, ymm
 84529      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 84530          self.require(ISA_AVX2)
 84531          p.domain = DomainAVX
 84532          p.add(0, func(m *_Encoding, v []interface{}) {
 84533              m.emit(0xc4)
 84534              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 84535              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 84536              m.emit(0x47)
 84537              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84538          })
 84539      }
 84540      // VPSLLVD m256, ymm, ymm
 84541      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 84542          self.require(ISA_AVX2)
 84543          p.domain = DomainAVX
 84544          p.add(0, func(m *_Encoding, v []interface{}) {
 84545              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 84546              m.emit(0x47)
 84547              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 84548          })
 84549      }
 84550      // VPSLLVD m512/m32bcst, zmm, zmm{k}{z}
 84551      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 84552          self.require(ISA_AVX512F)
 84553          p.domain = DomainAVX
 84554          p.add(0, func(m *_Encoding, v []interface{}) {
 84555              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 84556              m.emit(0x47)
 84557              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 84558          })
 84559      }
 84560      // VPSLLVD zmm, zmm, zmm{k}{z}
 84561      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 84562          self.require(ISA_AVX512F)
 84563          p.domain = DomainAVX
 84564          p.add(0, func(m *_Encoding, v []interface{}) {
 84565              m.emit(0x62)
 84566              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84567              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 84568              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 84569              m.emit(0x47)
 84570              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84571          })
 84572      }
 84573      // VPSLLVD m128/m32bcst, xmm, xmm{k}{z}
 84574      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84575          self.require(ISA_AVX512VL | ISA_AVX512F)
 84576          p.domain = DomainAVX
 84577          p.add(0, func(m *_Encoding, v []interface{}) {
 84578              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 84579              m.emit(0x47)
 84580              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 84581          })
 84582      }
 84583      // VPSLLVD xmm, xmm, xmm{k}{z}
 84584      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84585          self.require(ISA_AVX512VL | ISA_AVX512F)
 84586          p.domain = DomainAVX
 84587          p.add(0, func(m *_Encoding, v []interface{}) {
 84588              m.emit(0x62)
 84589              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84590              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 84591              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 84592              m.emit(0x47)
 84593              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84594          })
 84595      }
 84596      // VPSLLVD m256/m32bcst, ymm, ymm{k}{z}
 84597      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84598          self.require(ISA_AVX512VL | ISA_AVX512F)
 84599          p.domain = DomainAVX
 84600          p.add(0, func(m *_Encoding, v []interface{}) {
 84601              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 84602              m.emit(0x47)
 84603              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 84604          })
 84605      }
 84606      // VPSLLVD ymm, ymm, ymm{k}{z}
 84607      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84608          self.require(ISA_AVX512VL | ISA_AVX512F)
 84609          p.domain = DomainAVX
 84610          p.add(0, func(m *_Encoding, v []interface{}) {
 84611              m.emit(0x62)
 84612              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84613              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 84614              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 84615              m.emit(0x47)
 84616              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84617          })
 84618      }
 84619      if p.len == 0 {
 84620          panic("invalid operands for VPSLLVD")
 84621      }
 84622      return p
 84623  }
 84624  
 84625  // VPSLLVQ performs "Variable Shift Packed Quadword Data Left Logical".
 84626  //
 84627  // Mnemonic        : VPSLLVQ
 84628  // Supported forms : (10 forms)
 84629  //
 84630  //    * VPSLLVQ xmm, xmm, xmm                   [AVX2]
 84631  //    * VPSLLVQ m128, xmm, xmm                  [AVX2]
 84632  //    * VPSLLVQ ymm, ymm, ymm                   [AVX2]
 84633  //    * VPSLLVQ m256, ymm, ymm                  [AVX2]
 84634  //    * VPSLLVQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 84635  //    * VPSLLVQ zmm, zmm, zmm{k}{z}             [AVX512F]
 84636  //    * VPSLLVQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 84637  //    * VPSLLVQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 84638  //    * VPSLLVQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 84639  //    * VPSLLVQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 84640  //
 84641  func (self *Program) VPSLLVQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 84642      p := self.alloc("VPSLLVQ", 3, Operands { v0, v1, v2 })
 84643      // VPSLLVQ xmm, xmm, xmm
 84644      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 84645          self.require(ISA_AVX2)
 84646          p.domain = DomainAVX
 84647          p.add(0, func(m *_Encoding, v []interface{}) {
 84648              m.emit(0xc4)
 84649              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 84650              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 84651              m.emit(0x47)
 84652              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84653          })
 84654      }
 84655      // VPSLLVQ m128, xmm, xmm
 84656      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 84657          self.require(ISA_AVX2)
 84658          p.domain = DomainAVX
 84659          p.add(0, func(m *_Encoding, v []interface{}) {
 84660              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 84661              m.emit(0x47)
 84662              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 84663          })
 84664      }
 84665      // VPSLLVQ ymm, ymm, ymm
 84666      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 84667          self.require(ISA_AVX2)
 84668          p.domain = DomainAVX
 84669          p.add(0, func(m *_Encoding, v []interface{}) {
 84670              m.emit(0xc4)
 84671              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 84672              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 84673              m.emit(0x47)
 84674              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84675          })
 84676      }
 84677      // VPSLLVQ m256, ymm, ymm
 84678      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 84679          self.require(ISA_AVX2)
 84680          p.domain = DomainAVX
 84681          p.add(0, func(m *_Encoding, v []interface{}) {
 84682              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 84683              m.emit(0x47)
 84684              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 84685          })
 84686      }
 84687      // VPSLLVQ m512/m64bcst, zmm, zmm{k}{z}
 84688      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 84689          self.require(ISA_AVX512F)
 84690          p.domain = DomainAVX
 84691          p.add(0, func(m *_Encoding, v []interface{}) {
 84692              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 84693              m.emit(0x47)
 84694              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 84695          })
 84696      }
 84697      // VPSLLVQ zmm, zmm, zmm{k}{z}
 84698      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 84699          self.require(ISA_AVX512F)
 84700          p.domain = DomainAVX
 84701          p.add(0, func(m *_Encoding, v []interface{}) {
 84702              m.emit(0x62)
 84703              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84704              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 84705              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 84706              m.emit(0x47)
 84707              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84708          })
 84709      }
 84710      // VPSLLVQ m128/m64bcst, xmm, xmm{k}{z}
 84711      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84712          self.require(ISA_AVX512VL | ISA_AVX512F)
 84713          p.domain = DomainAVX
 84714          p.add(0, func(m *_Encoding, v []interface{}) {
 84715              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 84716              m.emit(0x47)
 84717              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 84718          })
 84719      }
 84720      // VPSLLVQ xmm, xmm, xmm{k}{z}
 84721      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84722          self.require(ISA_AVX512VL | ISA_AVX512F)
 84723          p.domain = DomainAVX
 84724          p.add(0, func(m *_Encoding, v []interface{}) {
 84725              m.emit(0x62)
 84726              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84727              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 84728              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 84729              m.emit(0x47)
 84730              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84731          })
 84732      }
 84733      // VPSLLVQ m256/m64bcst, ymm, ymm{k}{z}
 84734      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84735          self.require(ISA_AVX512VL | ISA_AVX512F)
 84736          p.domain = DomainAVX
 84737          p.add(0, func(m *_Encoding, v []interface{}) {
 84738              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 84739              m.emit(0x47)
 84740              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 84741          })
 84742      }
 84743      // VPSLLVQ ymm, ymm, ymm{k}{z}
 84744      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84745          self.require(ISA_AVX512VL | ISA_AVX512F)
 84746          p.domain = DomainAVX
 84747          p.add(0, func(m *_Encoding, v []interface{}) {
 84748              m.emit(0x62)
 84749              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84750              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 84751              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 84752              m.emit(0x47)
 84753              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84754          })
 84755      }
 84756      if p.len == 0 {
 84757          panic("invalid operands for VPSLLVQ")
 84758      }
 84759      return p
 84760  }
 84761  
 84762  // VPSLLVW performs "Variable Shift Packed Word Data Left Logical".
 84763  //
 84764  // Mnemonic        : VPSLLVW
 84765  // Supported forms : (6 forms)
 84766  //
 84767  //    * VPSLLVW zmm, zmm, zmm{k}{z}     [AVX512BW]
 84768  //    * VPSLLVW m512, zmm, zmm{k}{z}    [AVX512BW]
 84769  //    * VPSLLVW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 84770  //    * VPSLLVW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 84771  //    * VPSLLVW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 84772  //    * VPSLLVW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 84773  //
 84774  func (self *Program) VPSLLVW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 84775      p := self.alloc("VPSLLVW", 3, Operands { v0, v1, v2 })
 84776      // VPSLLVW zmm, zmm, zmm{k}{z}
 84777      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 84778          self.require(ISA_AVX512BW)
 84779          p.domain = DomainAVX
 84780          p.add(0, func(m *_Encoding, v []interface{}) {
 84781              m.emit(0x62)
 84782              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84783              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 84784              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 84785              m.emit(0x12)
 84786              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84787          })
 84788      }
 84789      // VPSLLVW m512, zmm, zmm{k}{z}
 84790      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 84791          self.require(ISA_AVX512BW)
 84792          p.domain = DomainAVX
 84793          p.add(0, func(m *_Encoding, v []interface{}) {
 84794              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 84795              m.emit(0x12)
 84796              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 84797          })
 84798      }
 84799      // VPSLLVW xmm, xmm, xmm{k}{z}
 84800      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84801          self.require(ISA_AVX512VL | ISA_AVX512BW)
 84802          p.domain = DomainAVX
 84803          p.add(0, func(m *_Encoding, v []interface{}) {
 84804              m.emit(0x62)
 84805              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84806              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 84807              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 84808              m.emit(0x12)
 84809              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84810          })
 84811      }
 84812      // VPSLLVW m128, xmm, xmm{k}{z}
 84813      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84814          self.require(ISA_AVX512VL | ISA_AVX512BW)
 84815          p.domain = DomainAVX
 84816          p.add(0, func(m *_Encoding, v []interface{}) {
 84817              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 84818              m.emit(0x12)
 84819              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 84820          })
 84821      }
 84822      // VPSLLVW ymm, ymm, ymm{k}{z}
 84823      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84824          self.require(ISA_AVX512VL | ISA_AVX512BW)
 84825          p.domain = DomainAVX
 84826          p.add(0, func(m *_Encoding, v []interface{}) {
 84827              m.emit(0x62)
 84828              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84829              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 84830              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 84831              m.emit(0x12)
 84832              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84833          })
 84834      }
 84835      // VPSLLVW m256, ymm, ymm{k}{z}
 84836      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84837          self.require(ISA_AVX512VL | ISA_AVX512BW)
 84838          p.domain = DomainAVX
 84839          p.add(0, func(m *_Encoding, v []interface{}) {
 84840              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 84841              m.emit(0x12)
 84842              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 84843          })
 84844      }
 84845      if p.len == 0 {
 84846          panic("invalid operands for VPSLLVW")
 84847      }
 84848      return p
 84849  }
 84850  
 84851  // VPSLLW performs "Shift Packed Word Data Left Logical".
 84852  //
 84853  // Mnemonic        : VPSLLW
 84854  // Supported forms : (18 forms)
 84855  //
 84856  //    * VPSLLW imm8, xmm, xmm           [AVX]
 84857  //    * VPSLLW xmm, xmm, xmm            [AVX]
 84858  //    * VPSLLW m128, xmm, xmm           [AVX]
 84859  //    * VPSLLW imm8, ymm, ymm           [AVX2]
 84860  //    * VPSLLW xmm, ymm, ymm            [AVX2]
 84861  //    * VPSLLW m128, ymm, ymm           [AVX2]
 84862  //    * VPSLLW imm8, zmm, zmm{k}{z}     [AVX512BW]
 84863  //    * VPSLLW xmm, zmm, zmm{k}{z}      [AVX512BW]
 84864  //    * VPSLLW m128, zmm, zmm{k}{z}     [AVX512BW]
 84865  //    * VPSLLW imm8, m512, zmm{k}{z}    [AVX512BW]
 84866  //    * VPSLLW imm8, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 84867  //    * VPSLLW xmm, xmm, xmm{k}{z}      [AVX512BW,AVX512VL]
 84868  //    * VPSLLW m128, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 84869  //    * VPSLLW imm8, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 84870  //    * VPSLLW xmm, ymm, ymm{k}{z}      [AVX512BW,AVX512VL]
 84871  //    * VPSLLW m128, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 84872  //    * VPSLLW imm8, m128, xmm{k}{z}    [AVX512BW,AVX512VL]
 84873  //    * VPSLLW imm8, m256, ymm{k}{z}    [AVX512BW,AVX512VL]
 84874  //
 84875  func (self *Program) VPSLLW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 84876      p := self.alloc("VPSLLW", 3, Operands { v0, v1, v2 })
 84877      // VPSLLW imm8, xmm, xmm
 84878      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 84879          self.require(ISA_AVX)
 84880          p.domain = DomainAVX
 84881          p.add(0, func(m *_Encoding, v []interface{}) {
 84882              m.vex2(1, 0, v[1], hlcode(v[2]))
 84883              m.emit(0x71)
 84884              m.emit(0xf0 | lcode(v[1]))
 84885              m.imm1(toImmAny(v[0]))
 84886          })
 84887      }
 84888      // VPSLLW xmm, xmm, xmm
 84889      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 84890          self.require(ISA_AVX)
 84891          p.domain = DomainAVX
 84892          p.add(0, func(m *_Encoding, v []interface{}) {
 84893              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 84894              m.emit(0xf1)
 84895              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84896          })
 84897      }
 84898      // VPSLLW m128, xmm, xmm
 84899      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 84900          self.require(ISA_AVX)
 84901          p.domain = DomainAVX
 84902          p.add(0, func(m *_Encoding, v []interface{}) {
 84903              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 84904              m.emit(0xf1)
 84905              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 84906          })
 84907      }
 84908      // VPSLLW imm8, ymm, ymm
 84909      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 84910          self.require(ISA_AVX2)
 84911          p.domain = DomainAVX
 84912          p.add(0, func(m *_Encoding, v []interface{}) {
 84913              m.vex2(5, 0, v[1], hlcode(v[2]))
 84914              m.emit(0x71)
 84915              m.emit(0xf0 | lcode(v[1]))
 84916              m.imm1(toImmAny(v[0]))
 84917          })
 84918      }
 84919      // VPSLLW xmm, ymm, ymm
 84920      if isXMM(v0) && isYMM(v1) && isYMM(v2) {
 84921          self.require(ISA_AVX2)
 84922          p.domain = DomainAVX
 84923          p.add(0, func(m *_Encoding, v []interface{}) {
 84924              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 84925              m.emit(0xf1)
 84926              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84927          })
 84928      }
 84929      // VPSLLW m128, ymm, ymm
 84930      if isM128(v0) && isYMM(v1) && isYMM(v2) {
 84931          self.require(ISA_AVX2)
 84932          p.domain = DomainAVX
 84933          p.add(0, func(m *_Encoding, v []interface{}) {
 84934              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 84935              m.emit(0xf1)
 84936              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 84937          })
 84938      }
 84939      // VPSLLW imm8, zmm, zmm{k}{z}
 84940      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 84941          self.require(ISA_AVX512BW)
 84942          p.domain = DomainAVX
 84943          p.add(0, func(m *_Encoding, v []interface{}) {
 84944              m.emit(0x62)
 84945              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 84946              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 84947              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 84948              m.emit(0x71)
 84949              m.emit(0xf0 | lcode(v[1]))
 84950              m.imm1(toImmAny(v[0]))
 84951          })
 84952      }
 84953      // VPSLLW xmm, zmm, zmm{k}{z}
 84954      if isEVEXXMM(v0) && isZMM(v1) && isZMMkz(v2) {
 84955          self.require(ISA_AVX512BW)
 84956          p.domain = DomainAVX
 84957          p.add(0, func(m *_Encoding, v []interface{}) {
 84958              m.emit(0x62)
 84959              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84960              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 84961              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 84962              m.emit(0xf1)
 84963              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84964          })
 84965      }
 84966      // VPSLLW m128, zmm, zmm{k}{z}
 84967      if isM128(v0) && isZMM(v1) && isZMMkz(v2) {
 84968          self.require(ISA_AVX512BW)
 84969          p.domain = DomainAVX
 84970          p.add(0, func(m *_Encoding, v []interface{}) {
 84971              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 84972              m.emit(0xf1)
 84973              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 84974          })
 84975      }
 84976      // VPSLLW imm8, m512, zmm{k}{z}
 84977      if isImm8(v0) && isM512(v1) && isZMMkz(v2) {
 84978          self.require(ISA_AVX512BW)
 84979          p.domain = DomainAVX
 84980          p.add(0, func(m *_Encoding, v []interface{}) {
 84981              m.evex(0b01, 0x05, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), 0)
 84982              m.emit(0x71)
 84983              m.mrsd(6, addr(v[1]), 64)
 84984              m.imm1(toImmAny(v[0]))
 84985          })
 84986      }
 84987      // VPSLLW imm8, xmm, xmm{k}{z}
 84988      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84989          self.require(ISA_AVX512VL | ISA_AVX512BW)
 84990          p.domain = DomainAVX
 84991          p.add(0, func(m *_Encoding, v []interface{}) {
 84992              m.emit(0x62)
 84993              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 84994              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 84995              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 84996              m.emit(0x71)
 84997              m.emit(0xf0 | lcode(v[1]))
 84998              m.imm1(toImmAny(v[0]))
 84999          })
 85000      }
 85001      // VPSLLW xmm, xmm, xmm{k}{z}
 85002      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85003          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85004          p.domain = DomainAVX
 85005          p.add(0, func(m *_Encoding, v []interface{}) {
 85006              m.emit(0x62)
 85007              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85008              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 85009              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 85010              m.emit(0xf1)
 85011              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85012          })
 85013      }
 85014      // VPSLLW m128, xmm, xmm{k}{z}
 85015      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85016          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85017          p.domain = DomainAVX
 85018          p.add(0, func(m *_Encoding, v []interface{}) {
 85019              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85020              m.emit(0xf1)
 85021              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85022          })
 85023      }
 85024      // VPSLLW imm8, ymm, ymm{k}{z}
 85025      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85026          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85027          p.domain = DomainAVX
 85028          p.add(0, func(m *_Encoding, v []interface{}) {
 85029              m.emit(0x62)
 85030              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 85031              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 85032              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 85033              m.emit(0x71)
 85034              m.emit(0xf0 | lcode(v[1]))
 85035              m.imm1(toImmAny(v[0]))
 85036          })
 85037      }
 85038      // VPSLLW xmm, ymm, ymm{k}{z}
 85039      if isEVEXXMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85040          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85041          p.domain = DomainAVX
 85042          p.add(0, func(m *_Encoding, v []interface{}) {
 85043              m.emit(0x62)
 85044              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85045              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 85046              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 85047              m.emit(0xf1)
 85048              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85049          })
 85050      }
 85051      // VPSLLW m128, ymm, ymm{k}{z}
 85052      if isM128(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85053          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85054          p.domain = DomainAVX
 85055          p.add(0, func(m *_Encoding, v []interface{}) {
 85056              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85057              m.emit(0xf1)
 85058              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85059          })
 85060      }
 85061      // VPSLLW imm8, m128, xmm{k}{z}
 85062      if isImm8(v0) && isM128(v1) && isXMMkz(v2) {
 85063          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85064          p.domain = DomainAVX
 85065          p.add(0, func(m *_Encoding, v []interface{}) {
 85066              m.evex(0b01, 0x05, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), 0)
 85067              m.emit(0x71)
 85068              m.mrsd(6, addr(v[1]), 16)
 85069              m.imm1(toImmAny(v[0]))
 85070          })
 85071      }
 85072      // VPSLLW imm8, m256, ymm{k}{z}
 85073      if isImm8(v0) && isM256(v1) && isYMMkz(v2) {
 85074          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85075          p.domain = DomainAVX
 85076          p.add(0, func(m *_Encoding, v []interface{}) {
 85077              m.evex(0b01, 0x05, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), 0)
 85078              m.emit(0x71)
 85079              m.mrsd(6, addr(v[1]), 32)
 85080              m.imm1(toImmAny(v[0]))
 85081          })
 85082      }
 85083      if p.len == 0 {
 85084          panic("invalid operands for VPSLLW")
 85085      }
 85086      return p
 85087  }
 85088  
 85089  // VPSRAD performs "Shift Packed Doubleword Data Right Arithmetic".
 85090  //
 85091  // Mnemonic        : VPSRAD
 85092  // Supported forms : (18 forms)
 85093  //
 85094  //    * VPSRAD imm8, xmm, xmm                   [AVX]
 85095  //    * VPSRAD xmm, xmm, xmm                    [AVX]
 85096  //    * VPSRAD m128, xmm, xmm                   [AVX]
 85097  //    * VPSRAD imm8, ymm, ymm                   [AVX2]
 85098  //    * VPSRAD xmm, ymm, ymm                    [AVX2]
 85099  //    * VPSRAD m128, ymm, ymm                   [AVX2]
 85100  //    * VPSRAD imm8, m512/m32bcst, zmm{k}{z}    [AVX512F]
 85101  //    * VPSRAD imm8, zmm, zmm{k}{z}             [AVX512F]
 85102  //    * VPSRAD xmm, zmm, zmm{k}{z}              [AVX512F]
 85103  //    * VPSRAD m128, zmm, zmm{k}{z}             [AVX512F]
 85104  //    * VPSRAD imm8, m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 85105  //    * VPSRAD imm8, m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 85106  //    * VPSRAD imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 85107  //    * VPSRAD xmm, xmm, xmm{k}{z}              [AVX512F,AVX512VL]
 85108  //    * VPSRAD m128, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 85109  //    * VPSRAD imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 85110  //    * VPSRAD xmm, ymm, ymm{k}{z}              [AVX512F,AVX512VL]
 85111  //    * VPSRAD m128, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 85112  //
 85113  func (self *Program) VPSRAD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 85114      p := self.alloc("VPSRAD", 3, Operands { v0, v1, v2 })
 85115      // VPSRAD imm8, xmm, xmm
 85116      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 85117          self.require(ISA_AVX)
 85118          p.domain = DomainAVX
 85119          p.add(0, func(m *_Encoding, v []interface{}) {
 85120              m.vex2(1, 0, v[1], hlcode(v[2]))
 85121              m.emit(0x72)
 85122              m.emit(0xe0 | lcode(v[1]))
 85123              m.imm1(toImmAny(v[0]))
 85124          })
 85125      }
 85126      // VPSRAD xmm, xmm, xmm
 85127      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 85128          self.require(ISA_AVX)
 85129          p.domain = DomainAVX
 85130          p.add(0, func(m *_Encoding, v []interface{}) {
 85131              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 85132              m.emit(0xe2)
 85133              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85134          })
 85135      }
 85136      // VPSRAD m128, xmm, xmm
 85137      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 85138          self.require(ISA_AVX)
 85139          p.domain = DomainAVX
 85140          p.add(0, func(m *_Encoding, v []interface{}) {
 85141              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 85142              m.emit(0xe2)
 85143              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 85144          })
 85145      }
 85146      // VPSRAD imm8, ymm, ymm
 85147      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 85148          self.require(ISA_AVX2)
 85149          p.domain = DomainAVX
 85150          p.add(0, func(m *_Encoding, v []interface{}) {
 85151              m.vex2(5, 0, v[1], hlcode(v[2]))
 85152              m.emit(0x72)
 85153              m.emit(0xe0 | lcode(v[1]))
 85154              m.imm1(toImmAny(v[0]))
 85155          })
 85156      }
 85157      // VPSRAD xmm, ymm, ymm
 85158      if isXMM(v0) && isYMM(v1) && isYMM(v2) {
 85159          self.require(ISA_AVX2)
 85160          p.domain = DomainAVX
 85161          p.add(0, func(m *_Encoding, v []interface{}) {
 85162              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 85163              m.emit(0xe2)
 85164              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85165          })
 85166      }
 85167      // VPSRAD m128, ymm, ymm
 85168      if isM128(v0) && isYMM(v1) && isYMM(v2) {
 85169          self.require(ISA_AVX2)
 85170          p.domain = DomainAVX
 85171          p.add(0, func(m *_Encoding, v []interface{}) {
 85172              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 85173              m.emit(0xe2)
 85174              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 85175          })
 85176      }
 85177      // VPSRAD imm8, m512/m32bcst, zmm{k}{z}
 85178      if isImm8(v0) && isM512M32bcst(v1) && isZMMkz(v2) {
 85179          self.require(ISA_AVX512F)
 85180          p.domain = DomainAVX
 85181          p.add(0, func(m *_Encoding, v []interface{}) {
 85182              m.evex(0b01, 0x05, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 85183              m.emit(0x72)
 85184              m.mrsd(4, addr(v[1]), 64)
 85185              m.imm1(toImmAny(v[0]))
 85186          })
 85187      }
 85188      // VPSRAD imm8, zmm, zmm{k}{z}
 85189      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 85190          self.require(ISA_AVX512F)
 85191          p.domain = DomainAVX
 85192          p.add(0, func(m *_Encoding, v []interface{}) {
 85193              m.emit(0x62)
 85194              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 85195              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 85196              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 85197              m.emit(0x72)
 85198              m.emit(0xe0 | lcode(v[1]))
 85199              m.imm1(toImmAny(v[0]))
 85200          })
 85201      }
 85202      // VPSRAD xmm, zmm, zmm{k}{z}
 85203      if isEVEXXMM(v0) && isZMM(v1) && isZMMkz(v2) {
 85204          self.require(ISA_AVX512F)
 85205          p.domain = DomainAVX
 85206          p.add(0, func(m *_Encoding, v []interface{}) {
 85207              m.emit(0x62)
 85208              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85209              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 85210              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 85211              m.emit(0xe2)
 85212              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85213          })
 85214      }
 85215      // VPSRAD m128, zmm, zmm{k}{z}
 85216      if isM128(v0) && isZMM(v1) && isZMMkz(v2) {
 85217          self.require(ISA_AVX512F)
 85218          p.domain = DomainAVX
 85219          p.add(0, func(m *_Encoding, v []interface{}) {
 85220              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85221              m.emit(0xe2)
 85222              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85223          })
 85224      }
 85225      // VPSRAD imm8, m128/m32bcst, xmm{k}{z}
 85226      if isImm8(v0) && isM128M32bcst(v1) && isXMMkz(v2) {
 85227          self.require(ISA_AVX512VL | ISA_AVX512F)
 85228          p.domain = DomainAVX
 85229          p.add(0, func(m *_Encoding, v []interface{}) {
 85230              m.evex(0b01, 0x05, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 85231              m.emit(0x72)
 85232              m.mrsd(4, addr(v[1]), 16)
 85233              m.imm1(toImmAny(v[0]))
 85234          })
 85235      }
 85236      // VPSRAD imm8, m256/m32bcst, ymm{k}{z}
 85237      if isImm8(v0) && isM256M32bcst(v1) && isYMMkz(v2) {
 85238          self.require(ISA_AVX512VL | ISA_AVX512F)
 85239          p.domain = DomainAVX
 85240          p.add(0, func(m *_Encoding, v []interface{}) {
 85241              m.evex(0b01, 0x05, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 85242              m.emit(0x72)
 85243              m.mrsd(4, addr(v[1]), 32)
 85244              m.imm1(toImmAny(v[0]))
 85245          })
 85246      }
 85247      // VPSRAD imm8, xmm, xmm{k}{z}
 85248      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85249          self.require(ISA_AVX512VL | ISA_AVX512F)
 85250          p.domain = DomainAVX
 85251          p.add(0, func(m *_Encoding, v []interface{}) {
 85252              m.emit(0x62)
 85253              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 85254              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 85255              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 85256              m.emit(0x72)
 85257              m.emit(0xe0 | lcode(v[1]))
 85258              m.imm1(toImmAny(v[0]))
 85259          })
 85260      }
 85261      // VPSRAD xmm, xmm, xmm{k}{z}
 85262      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85263          self.require(ISA_AVX512VL | ISA_AVX512F)
 85264          p.domain = DomainAVX
 85265          p.add(0, func(m *_Encoding, v []interface{}) {
 85266              m.emit(0x62)
 85267              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85268              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 85269              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 85270              m.emit(0xe2)
 85271              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85272          })
 85273      }
 85274      // VPSRAD m128, xmm, xmm{k}{z}
 85275      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85276          self.require(ISA_AVX512VL | ISA_AVX512F)
 85277          p.domain = DomainAVX
 85278          p.add(0, func(m *_Encoding, v []interface{}) {
 85279              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85280              m.emit(0xe2)
 85281              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85282          })
 85283      }
 85284      // VPSRAD imm8, ymm, ymm{k}{z}
 85285      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85286          self.require(ISA_AVX512VL | ISA_AVX512F)
 85287          p.domain = DomainAVX
 85288          p.add(0, func(m *_Encoding, v []interface{}) {
 85289              m.emit(0x62)
 85290              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 85291              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 85292              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 85293              m.emit(0x72)
 85294              m.emit(0xe0 | lcode(v[1]))
 85295              m.imm1(toImmAny(v[0]))
 85296          })
 85297      }
 85298      // VPSRAD xmm, ymm, ymm{k}{z}
 85299      if isEVEXXMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85300          self.require(ISA_AVX512VL | ISA_AVX512F)
 85301          p.domain = DomainAVX
 85302          p.add(0, func(m *_Encoding, v []interface{}) {
 85303              m.emit(0x62)
 85304              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85305              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 85306              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 85307              m.emit(0xe2)
 85308              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85309          })
 85310      }
 85311      // VPSRAD m128, ymm, ymm{k}{z}
 85312      if isM128(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85313          self.require(ISA_AVX512VL | ISA_AVX512F)
 85314          p.domain = DomainAVX
 85315          p.add(0, func(m *_Encoding, v []interface{}) {
 85316              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85317              m.emit(0xe2)
 85318              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85319          })
 85320      }
 85321      if p.len == 0 {
 85322          panic("invalid operands for VPSRAD")
 85323      }
 85324      return p
 85325  }
 85326  
 85327  // VPSRAQ performs "Shift Packed Quadword Data Right Arithmetic".
 85328  //
 85329  // Mnemonic        : VPSRAQ
 85330  // Supported forms : (12 forms)
 85331  //
 85332  //    * VPSRAQ imm8, m512/m64bcst, zmm{k}{z}    [AVX512F]
 85333  //    * VPSRAQ imm8, zmm, zmm{k}{z}             [AVX512F]
 85334  //    * VPSRAQ xmm, zmm, zmm{k}{z}              [AVX512F]
 85335  //    * VPSRAQ m128, zmm, zmm{k}{z}             [AVX512F]
 85336  //    * VPSRAQ imm8, m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 85337  //    * VPSRAQ imm8, m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 85338  //    * VPSRAQ imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 85339  //    * VPSRAQ xmm, xmm, xmm{k}{z}              [AVX512F,AVX512VL]
 85340  //    * VPSRAQ m128, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 85341  //    * VPSRAQ imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 85342  //    * VPSRAQ xmm, ymm, ymm{k}{z}              [AVX512F,AVX512VL]
 85343  //    * VPSRAQ m128, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 85344  //
 85345  func (self *Program) VPSRAQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 85346      p := self.alloc("VPSRAQ", 3, Operands { v0, v1, v2 })
 85347      // VPSRAQ imm8, m512/m64bcst, zmm{k}{z}
 85348      if isImm8(v0) && isM512M64bcst(v1) && isZMMkz(v2) {
 85349          self.require(ISA_AVX512F)
 85350          p.domain = DomainAVX
 85351          p.add(0, func(m *_Encoding, v []interface{}) {
 85352              m.evex(0b01, 0x85, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 85353              m.emit(0x72)
 85354              m.mrsd(4, addr(v[1]), 64)
 85355              m.imm1(toImmAny(v[0]))
 85356          })
 85357      }
 85358      // VPSRAQ imm8, zmm, zmm{k}{z}
 85359      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 85360          self.require(ISA_AVX512F)
 85361          p.domain = DomainAVX
 85362          p.add(0, func(m *_Encoding, v []interface{}) {
 85363              m.emit(0x62)
 85364              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 85365              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 85366              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 85367              m.emit(0x72)
 85368              m.emit(0xe0 | lcode(v[1]))
 85369              m.imm1(toImmAny(v[0]))
 85370          })
 85371      }
 85372      // VPSRAQ xmm, zmm, zmm{k}{z}
 85373      if isEVEXXMM(v0) && isZMM(v1) && isZMMkz(v2) {
 85374          self.require(ISA_AVX512F)
 85375          p.domain = DomainAVX
 85376          p.add(0, func(m *_Encoding, v []interface{}) {
 85377              m.emit(0x62)
 85378              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85379              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 85380              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 85381              m.emit(0xe2)
 85382              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85383          })
 85384      }
 85385      // VPSRAQ m128, zmm, zmm{k}{z}
 85386      if isM128(v0) && isZMM(v1) && isZMMkz(v2) {
 85387          self.require(ISA_AVX512F)
 85388          p.domain = DomainAVX
 85389          p.add(0, func(m *_Encoding, v []interface{}) {
 85390              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85391              m.emit(0xe2)
 85392              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85393          })
 85394      }
 85395      // VPSRAQ imm8, m128/m64bcst, xmm{k}{z}
 85396      if isImm8(v0) && isM128M64bcst(v1) && isXMMkz(v2) {
 85397          self.require(ISA_AVX512VL | ISA_AVX512F)
 85398          p.domain = DomainAVX
 85399          p.add(0, func(m *_Encoding, v []interface{}) {
 85400              m.evex(0b01, 0x85, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 85401              m.emit(0x72)
 85402              m.mrsd(4, addr(v[1]), 16)
 85403              m.imm1(toImmAny(v[0]))
 85404          })
 85405      }
 85406      // VPSRAQ imm8, m256/m64bcst, ymm{k}{z}
 85407      if isImm8(v0) && isM256M64bcst(v1) && isYMMkz(v2) {
 85408          self.require(ISA_AVX512VL | ISA_AVX512F)
 85409          p.domain = DomainAVX
 85410          p.add(0, func(m *_Encoding, v []interface{}) {
 85411              m.evex(0b01, 0x85, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 85412              m.emit(0x72)
 85413              m.mrsd(4, addr(v[1]), 32)
 85414              m.imm1(toImmAny(v[0]))
 85415          })
 85416      }
 85417      // VPSRAQ imm8, xmm, xmm{k}{z}
 85418      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85419          self.require(ISA_AVX512VL | ISA_AVX512F)
 85420          p.domain = DomainAVX
 85421          p.add(0, func(m *_Encoding, v []interface{}) {
 85422              m.emit(0x62)
 85423              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 85424              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 85425              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 85426              m.emit(0x72)
 85427              m.emit(0xe0 | lcode(v[1]))
 85428              m.imm1(toImmAny(v[0]))
 85429          })
 85430      }
 85431      // VPSRAQ xmm, xmm, xmm{k}{z}
 85432      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85433          self.require(ISA_AVX512VL | ISA_AVX512F)
 85434          p.domain = DomainAVX
 85435          p.add(0, func(m *_Encoding, v []interface{}) {
 85436              m.emit(0x62)
 85437              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85438              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 85439              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 85440              m.emit(0xe2)
 85441              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85442          })
 85443      }
 85444      // VPSRAQ m128, xmm, xmm{k}{z}
 85445      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85446          self.require(ISA_AVX512VL | ISA_AVX512F)
 85447          p.domain = DomainAVX
 85448          p.add(0, func(m *_Encoding, v []interface{}) {
 85449              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85450              m.emit(0xe2)
 85451              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85452          })
 85453      }
 85454      // VPSRAQ imm8, ymm, ymm{k}{z}
 85455      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85456          self.require(ISA_AVX512VL | ISA_AVX512F)
 85457          p.domain = DomainAVX
 85458          p.add(0, func(m *_Encoding, v []interface{}) {
 85459              m.emit(0x62)
 85460              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 85461              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 85462              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 85463              m.emit(0x72)
 85464              m.emit(0xe0 | lcode(v[1]))
 85465              m.imm1(toImmAny(v[0]))
 85466          })
 85467      }
 85468      // VPSRAQ xmm, ymm, ymm{k}{z}
 85469      if isEVEXXMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85470          self.require(ISA_AVX512VL | ISA_AVX512F)
 85471          p.domain = DomainAVX
 85472          p.add(0, func(m *_Encoding, v []interface{}) {
 85473              m.emit(0x62)
 85474              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85475              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 85476              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 85477              m.emit(0xe2)
 85478              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85479          })
 85480      }
 85481      // VPSRAQ m128, ymm, ymm{k}{z}
 85482      if isM128(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85483          self.require(ISA_AVX512VL | ISA_AVX512F)
 85484          p.domain = DomainAVX
 85485          p.add(0, func(m *_Encoding, v []interface{}) {
 85486              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85487              m.emit(0xe2)
 85488              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85489          })
 85490      }
 85491      if p.len == 0 {
 85492          panic("invalid operands for VPSRAQ")
 85493      }
 85494      return p
 85495  }
 85496  
 85497  // VPSRAVD performs "Variable Shift Packed Doubleword Data Right Arithmetic".
 85498  //
 85499  // Mnemonic        : VPSRAVD
 85500  // Supported forms : (10 forms)
 85501  //
 85502  //    * VPSRAVD xmm, xmm, xmm                   [AVX2]
 85503  //    * VPSRAVD m128, xmm, xmm                  [AVX2]
 85504  //    * VPSRAVD ymm, ymm, ymm                   [AVX2]
 85505  //    * VPSRAVD m256, ymm, ymm                  [AVX2]
 85506  //    * VPSRAVD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 85507  //    * VPSRAVD zmm, zmm, zmm{k}{z}             [AVX512F]
 85508  //    * VPSRAVD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 85509  //    * VPSRAVD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 85510  //    * VPSRAVD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 85511  //    * VPSRAVD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 85512  //
 85513  func (self *Program) VPSRAVD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 85514      p := self.alloc("VPSRAVD", 3, Operands { v0, v1, v2 })
 85515      // VPSRAVD xmm, xmm, xmm
 85516      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 85517          self.require(ISA_AVX2)
 85518          p.domain = DomainAVX
 85519          p.add(0, func(m *_Encoding, v []interface{}) {
 85520              m.emit(0xc4)
 85521              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 85522              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 85523              m.emit(0x46)
 85524              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85525          })
 85526      }
 85527      // VPSRAVD m128, xmm, xmm
 85528      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 85529          self.require(ISA_AVX2)
 85530          p.domain = DomainAVX
 85531          p.add(0, func(m *_Encoding, v []interface{}) {
 85532              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 85533              m.emit(0x46)
 85534              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 85535          })
 85536      }
 85537      // VPSRAVD ymm, ymm, ymm
 85538      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 85539          self.require(ISA_AVX2)
 85540          p.domain = DomainAVX
 85541          p.add(0, func(m *_Encoding, v []interface{}) {
 85542              m.emit(0xc4)
 85543              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 85544              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 85545              m.emit(0x46)
 85546              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85547          })
 85548      }
 85549      // VPSRAVD m256, ymm, ymm
 85550      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 85551          self.require(ISA_AVX2)
 85552          p.domain = DomainAVX
 85553          p.add(0, func(m *_Encoding, v []interface{}) {
 85554              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 85555              m.emit(0x46)
 85556              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 85557          })
 85558      }
 85559      // VPSRAVD m512/m32bcst, zmm, zmm{k}{z}
 85560      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 85561          self.require(ISA_AVX512F)
 85562          p.domain = DomainAVX
 85563          p.add(0, func(m *_Encoding, v []interface{}) {
 85564              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 85565              m.emit(0x46)
 85566              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 85567          })
 85568      }
 85569      // VPSRAVD zmm, zmm, zmm{k}{z}
 85570      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 85571          self.require(ISA_AVX512F)
 85572          p.domain = DomainAVX
 85573          p.add(0, func(m *_Encoding, v []interface{}) {
 85574              m.emit(0x62)
 85575              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85576              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 85577              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 85578              m.emit(0x46)
 85579              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85580          })
 85581      }
 85582      // VPSRAVD m128/m32bcst, xmm, xmm{k}{z}
 85583      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85584          self.require(ISA_AVX512VL | ISA_AVX512F)
 85585          p.domain = DomainAVX
 85586          p.add(0, func(m *_Encoding, v []interface{}) {
 85587              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 85588              m.emit(0x46)
 85589              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85590          })
 85591      }
 85592      // VPSRAVD xmm, xmm, xmm{k}{z}
 85593      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85594          self.require(ISA_AVX512VL | ISA_AVX512F)
 85595          p.domain = DomainAVX
 85596          p.add(0, func(m *_Encoding, v []interface{}) {
 85597              m.emit(0x62)
 85598              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85599              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 85600              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 85601              m.emit(0x46)
 85602              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85603          })
 85604      }
 85605      // VPSRAVD m256/m32bcst, ymm, ymm{k}{z}
 85606      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85607          self.require(ISA_AVX512VL | ISA_AVX512F)
 85608          p.domain = DomainAVX
 85609          p.add(0, func(m *_Encoding, v []interface{}) {
 85610              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 85611              m.emit(0x46)
 85612              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 85613          })
 85614      }
 85615      // VPSRAVD ymm, ymm, ymm{k}{z}
 85616      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85617          self.require(ISA_AVX512VL | ISA_AVX512F)
 85618          p.domain = DomainAVX
 85619          p.add(0, func(m *_Encoding, v []interface{}) {
 85620              m.emit(0x62)
 85621              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85622              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 85623              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 85624              m.emit(0x46)
 85625              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85626          })
 85627      }
 85628      if p.len == 0 {
 85629          panic("invalid operands for VPSRAVD")
 85630      }
 85631      return p
 85632  }
 85633  
 85634  // VPSRAVQ performs "Variable Shift Packed Quadword Data Right Arithmetic".
 85635  //
 85636  // Mnemonic        : VPSRAVQ
 85637  // Supported forms : (6 forms)
 85638  //
 85639  //    * VPSRAVQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 85640  //    * VPSRAVQ zmm, zmm, zmm{k}{z}             [AVX512F]
 85641  //    * VPSRAVQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 85642  //    * VPSRAVQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 85643  //    * VPSRAVQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 85644  //    * VPSRAVQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 85645  //
 85646  func (self *Program) VPSRAVQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 85647      p := self.alloc("VPSRAVQ", 3, Operands { v0, v1, v2 })
 85648      // VPSRAVQ m512/m64bcst, zmm, zmm{k}{z}
 85649      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 85650          self.require(ISA_AVX512F)
 85651          p.domain = DomainAVX
 85652          p.add(0, func(m *_Encoding, v []interface{}) {
 85653              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 85654              m.emit(0x46)
 85655              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 85656          })
 85657      }
 85658      // VPSRAVQ zmm, zmm, zmm{k}{z}
 85659      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 85660          self.require(ISA_AVX512F)
 85661          p.domain = DomainAVX
 85662          p.add(0, func(m *_Encoding, v []interface{}) {
 85663              m.emit(0x62)
 85664              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85665              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 85666              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 85667              m.emit(0x46)
 85668              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85669          })
 85670      }
 85671      // VPSRAVQ m128/m64bcst, xmm, xmm{k}{z}
 85672      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85673          self.require(ISA_AVX512VL | ISA_AVX512F)
 85674          p.domain = DomainAVX
 85675          p.add(0, func(m *_Encoding, v []interface{}) {
 85676              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 85677              m.emit(0x46)
 85678              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85679          })
 85680      }
 85681      // VPSRAVQ xmm, xmm, xmm{k}{z}
 85682      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85683          self.require(ISA_AVX512VL | ISA_AVX512F)
 85684          p.domain = DomainAVX
 85685          p.add(0, func(m *_Encoding, v []interface{}) {
 85686              m.emit(0x62)
 85687              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85688              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 85689              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 85690              m.emit(0x46)
 85691              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85692          })
 85693      }
 85694      // VPSRAVQ m256/m64bcst, ymm, ymm{k}{z}
 85695      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85696          self.require(ISA_AVX512VL | ISA_AVX512F)
 85697          p.domain = DomainAVX
 85698          p.add(0, func(m *_Encoding, v []interface{}) {
 85699              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 85700              m.emit(0x46)
 85701              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 85702          })
 85703      }
 85704      // VPSRAVQ ymm, ymm, ymm{k}{z}
 85705      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85706          self.require(ISA_AVX512VL | ISA_AVX512F)
 85707          p.domain = DomainAVX
 85708          p.add(0, func(m *_Encoding, v []interface{}) {
 85709              m.emit(0x62)
 85710              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85711              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 85712              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 85713              m.emit(0x46)
 85714              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85715          })
 85716      }
 85717      if p.len == 0 {
 85718          panic("invalid operands for VPSRAVQ")
 85719      }
 85720      return p
 85721  }
 85722  
 85723  // VPSRAVW performs "Variable Shift Packed Word Data Right Arithmetic".
 85724  //
 85725  // Mnemonic        : VPSRAVW
 85726  // Supported forms : (6 forms)
 85727  //
 85728  //    * VPSRAVW zmm, zmm, zmm{k}{z}     [AVX512BW]
 85729  //    * VPSRAVW m512, zmm, zmm{k}{z}    [AVX512BW]
 85730  //    * VPSRAVW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 85731  //    * VPSRAVW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 85732  //    * VPSRAVW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 85733  //    * VPSRAVW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 85734  //
 85735  func (self *Program) VPSRAVW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 85736      p := self.alloc("VPSRAVW", 3, Operands { v0, v1, v2 })
 85737      // VPSRAVW zmm, zmm, zmm{k}{z}
 85738      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 85739          self.require(ISA_AVX512BW)
 85740          p.domain = DomainAVX
 85741          p.add(0, func(m *_Encoding, v []interface{}) {
 85742              m.emit(0x62)
 85743              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85744              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 85745              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 85746              m.emit(0x11)
 85747              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85748          })
 85749      }
 85750      // VPSRAVW m512, zmm, zmm{k}{z}
 85751      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 85752          self.require(ISA_AVX512BW)
 85753          p.domain = DomainAVX
 85754          p.add(0, func(m *_Encoding, v []interface{}) {
 85755              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85756              m.emit(0x11)
 85757              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 85758          })
 85759      }
 85760      // VPSRAVW xmm, xmm, xmm{k}{z}
 85761      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85762          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85763          p.domain = DomainAVX
 85764          p.add(0, func(m *_Encoding, v []interface{}) {
 85765              m.emit(0x62)
 85766              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85767              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 85768              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 85769              m.emit(0x11)
 85770              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85771          })
 85772      }
 85773      // VPSRAVW m128, xmm, xmm{k}{z}
 85774      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85775          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85776          p.domain = DomainAVX
 85777          p.add(0, func(m *_Encoding, v []interface{}) {
 85778              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85779              m.emit(0x11)
 85780              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85781          })
 85782      }
 85783      // VPSRAVW ymm, ymm, ymm{k}{z}
 85784      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85785          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85786          p.domain = DomainAVX
 85787          p.add(0, func(m *_Encoding, v []interface{}) {
 85788              m.emit(0x62)
 85789              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85790              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 85791              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 85792              m.emit(0x11)
 85793              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85794          })
 85795      }
 85796      // VPSRAVW m256, ymm, ymm{k}{z}
 85797      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85798          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85799          p.domain = DomainAVX
 85800          p.add(0, func(m *_Encoding, v []interface{}) {
 85801              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85802              m.emit(0x11)
 85803              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 85804          })
 85805      }
 85806      if p.len == 0 {
 85807          panic("invalid operands for VPSRAVW")
 85808      }
 85809      return p
 85810  }
 85811  
 85812  // VPSRAW performs "Shift Packed Word Data Right Arithmetic".
 85813  //
 85814  // Mnemonic        : VPSRAW
 85815  // Supported forms : (18 forms)
 85816  //
 85817  //    * VPSRAW imm8, xmm, xmm           [AVX]
 85818  //    * VPSRAW xmm, xmm, xmm            [AVX]
 85819  //    * VPSRAW m128, xmm, xmm           [AVX]
 85820  //    * VPSRAW imm8, ymm, ymm           [AVX2]
 85821  //    * VPSRAW xmm, ymm, ymm            [AVX2]
 85822  //    * VPSRAW m128, ymm, ymm           [AVX2]
 85823  //    * VPSRAW imm8, zmm, zmm{k}{z}     [AVX512BW]
 85824  //    * VPSRAW xmm, zmm, zmm{k}{z}      [AVX512BW]
 85825  //    * VPSRAW m128, zmm, zmm{k}{z}     [AVX512BW]
 85826  //    * VPSRAW imm8, m512, zmm{k}{z}    [AVX512BW]
 85827  //    * VPSRAW imm8, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 85828  //    * VPSRAW xmm, xmm, xmm{k}{z}      [AVX512BW,AVX512VL]
 85829  //    * VPSRAW m128, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 85830  //    * VPSRAW imm8, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 85831  //    * VPSRAW xmm, ymm, ymm{k}{z}      [AVX512BW,AVX512VL]
 85832  //    * VPSRAW m128, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 85833  //    * VPSRAW imm8, m128, xmm{k}{z}    [AVX512BW,AVX512VL]
 85834  //    * VPSRAW imm8, m256, ymm{k}{z}    [AVX512BW,AVX512VL]
 85835  //
 85836  func (self *Program) VPSRAW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 85837      p := self.alloc("VPSRAW", 3, Operands { v0, v1, v2 })
 85838      // VPSRAW imm8, xmm, xmm
 85839      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 85840          self.require(ISA_AVX)
 85841          p.domain = DomainAVX
 85842          p.add(0, func(m *_Encoding, v []interface{}) {
 85843              m.vex2(1, 0, v[1], hlcode(v[2]))
 85844              m.emit(0x71)
 85845              m.emit(0xe0 | lcode(v[1]))
 85846              m.imm1(toImmAny(v[0]))
 85847          })
 85848      }
 85849      // VPSRAW xmm, xmm, xmm
 85850      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 85851          self.require(ISA_AVX)
 85852          p.domain = DomainAVX
 85853          p.add(0, func(m *_Encoding, v []interface{}) {
 85854              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 85855              m.emit(0xe1)
 85856              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85857          })
 85858      }
 85859      // VPSRAW m128, xmm, xmm
 85860      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 85861          self.require(ISA_AVX)
 85862          p.domain = DomainAVX
 85863          p.add(0, func(m *_Encoding, v []interface{}) {
 85864              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 85865              m.emit(0xe1)
 85866              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 85867          })
 85868      }
 85869      // VPSRAW imm8, ymm, ymm
 85870      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 85871          self.require(ISA_AVX2)
 85872          p.domain = DomainAVX
 85873          p.add(0, func(m *_Encoding, v []interface{}) {
 85874              m.vex2(5, 0, v[1], hlcode(v[2]))
 85875              m.emit(0x71)
 85876              m.emit(0xe0 | lcode(v[1]))
 85877              m.imm1(toImmAny(v[0]))
 85878          })
 85879      }
 85880      // VPSRAW xmm, ymm, ymm
 85881      if isXMM(v0) && isYMM(v1) && isYMM(v2) {
 85882          self.require(ISA_AVX2)
 85883          p.domain = DomainAVX
 85884          p.add(0, func(m *_Encoding, v []interface{}) {
 85885              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 85886              m.emit(0xe1)
 85887              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85888          })
 85889      }
 85890      // VPSRAW m128, ymm, ymm
 85891      if isM128(v0) && isYMM(v1) && isYMM(v2) {
 85892          self.require(ISA_AVX2)
 85893          p.domain = DomainAVX
 85894          p.add(0, func(m *_Encoding, v []interface{}) {
 85895              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 85896              m.emit(0xe1)
 85897              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 85898          })
 85899      }
 85900      // VPSRAW imm8, zmm, zmm{k}{z}
 85901      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 85902          self.require(ISA_AVX512BW)
 85903          p.domain = DomainAVX
 85904          p.add(0, func(m *_Encoding, v []interface{}) {
 85905              m.emit(0x62)
 85906              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 85907              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 85908              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 85909              m.emit(0x71)
 85910              m.emit(0xe0 | lcode(v[1]))
 85911              m.imm1(toImmAny(v[0]))
 85912          })
 85913      }
 85914      // VPSRAW xmm, zmm, zmm{k}{z}
 85915      if isEVEXXMM(v0) && isZMM(v1) && isZMMkz(v2) {
 85916          self.require(ISA_AVX512BW)
 85917          p.domain = DomainAVX
 85918          p.add(0, func(m *_Encoding, v []interface{}) {
 85919              m.emit(0x62)
 85920              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85921              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 85922              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 85923              m.emit(0xe1)
 85924              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85925          })
 85926      }
 85927      // VPSRAW m128, zmm, zmm{k}{z}
 85928      if isM128(v0) && isZMM(v1) && isZMMkz(v2) {
 85929          self.require(ISA_AVX512BW)
 85930          p.domain = DomainAVX
 85931          p.add(0, func(m *_Encoding, v []interface{}) {
 85932              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85933              m.emit(0xe1)
 85934              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85935          })
 85936      }
 85937      // VPSRAW imm8, m512, zmm{k}{z}
 85938      if isImm8(v0) && isM512(v1) && isZMMkz(v2) {
 85939          self.require(ISA_AVX512BW)
 85940          p.domain = DomainAVX
 85941          p.add(0, func(m *_Encoding, v []interface{}) {
 85942              m.evex(0b01, 0x05, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), 0)
 85943              m.emit(0x71)
 85944              m.mrsd(4, addr(v[1]), 64)
 85945              m.imm1(toImmAny(v[0]))
 85946          })
 85947      }
 85948      // VPSRAW imm8, xmm, xmm{k}{z}
 85949      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85950          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85951          p.domain = DomainAVX
 85952          p.add(0, func(m *_Encoding, v []interface{}) {
 85953              m.emit(0x62)
 85954              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 85955              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 85956              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 85957              m.emit(0x71)
 85958              m.emit(0xe0 | lcode(v[1]))
 85959              m.imm1(toImmAny(v[0]))
 85960          })
 85961      }
 85962      // VPSRAW xmm, xmm, xmm{k}{z}
 85963      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85964          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85965          p.domain = DomainAVX
 85966          p.add(0, func(m *_Encoding, v []interface{}) {
 85967              m.emit(0x62)
 85968              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85969              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 85970              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 85971              m.emit(0xe1)
 85972              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85973          })
 85974      }
 85975      // VPSRAW m128, xmm, xmm{k}{z}
 85976      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85977          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85978          p.domain = DomainAVX
 85979          p.add(0, func(m *_Encoding, v []interface{}) {
 85980              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85981              m.emit(0xe1)
 85982              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85983          })
 85984      }
 85985      // VPSRAW imm8, ymm, ymm{k}{z}
 85986      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85987          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85988          p.domain = DomainAVX
 85989          p.add(0, func(m *_Encoding, v []interface{}) {
 85990              m.emit(0x62)
 85991              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 85992              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 85993              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 85994              m.emit(0x71)
 85995              m.emit(0xe0 | lcode(v[1]))
 85996              m.imm1(toImmAny(v[0]))
 85997          })
 85998      }
 85999      // VPSRAW xmm, ymm, ymm{k}{z}
 86000      if isEVEXXMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86001          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86002          p.domain = DomainAVX
 86003          p.add(0, func(m *_Encoding, v []interface{}) {
 86004              m.emit(0x62)
 86005              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86006              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 86007              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 86008              m.emit(0xe1)
 86009              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86010          })
 86011      }
 86012      // VPSRAW m128, ymm, ymm{k}{z}
 86013      if isM128(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86014          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86015          p.domain = DomainAVX
 86016          p.add(0, func(m *_Encoding, v []interface{}) {
 86017              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 86018              m.emit(0xe1)
 86019              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 86020          })
 86021      }
 86022      // VPSRAW imm8, m128, xmm{k}{z}
 86023      if isImm8(v0) && isM128(v1) && isXMMkz(v2) {
 86024          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86025          p.domain = DomainAVX
 86026          p.add(0, func(m *_Encoding, v []interface{}) {
 86027              m.evex(0b01, 0x05, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), 0)
 86028              m.emit(0x71)
 86029              m.mrsd(4, addr(v[1]), 16)
 86030              m.imm1(toImmAny(v[0]))
 86031          })
 86032      }
 86033      // VPSRAW imm8, m256, ymm{k}{z}
 86034      if isImm8(v0) && isM256(v1) && isYMMkz(v2) {
 86035          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86036          p.domain = DomainAVX
 86037          p.add(0, func(m *_Encoding, v []interface{}) {
 86038              m.evex(0b01, 0x05, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), 0)
 86039              m.emit(0x71)
 86040              m.mrsd(4, addr(v[1]), 32)
 86041              m.imm1(toImmAny(v[0]))
 86042          })
 86043      }
 86044      if p.len == 0 {
 86045          panic("invalid operands for VPSRAW")
 86046      }
 86047      return p
 86048  }
 86049  
 86050  // VPSRLD performs "Shift Packed Doubleword Data Right Logical".
 86051  //
 86052  // Mnemonic        : VPSRLD
 86053  // Supported forms : (18 forms)
 86054  //
 86055  //    * VPSRLD imm8, xmm, xmm                   [AVX]
 86056  //    * VPSRLD xmm, xmm, xmm                    [AVX]
 86057  //    * VPSRLD m128, xmm, xmm                   [AVX]
 86058  //    * VPSRLD imm8, ymm, ymm                   [AVX2]
 86059  //    * VPSRLD xmm, ymm, ymm                    [AVX2]
 86060  //    * VPSRLD m128, ymm, ymm                   [AVX2]
 86061  //    * VPSRLD imm8, m512/m32bcst, zmm{k}{z}    [AVX512F]
 86062  //    * VPSRLD imm8, zmm, zmm{k}{z}             [AVX512F]
 86063  //    * VPSRLD xmm, zmm, zmm{k}{z}              [AVX512F]
 86064  //    * VPSRLD m128, zmm, zmm{k}{z}             [AVX512F]
 86065  //    * VPSRLD imm8, m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 86066  //    * VPSRLD imm8, m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 86067  //    * VPSRLD imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 86068  //    * VPSRLD xmm, xmm, xmm{k}{z}              [AVX512F,AVX512VL]
 86069  //    * VPSRLD m128, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 86070  //    * VPSRLD imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 86071  //    * VPSRLD xmm, ymm, ymm{k}{z}              [AVX512F,AVX512VL]
 86072  //    * VPSRLD m128, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 86073  //
 86074  func (self *Program) VPSRLD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 86075      p := self.alloc("VPSRLD", 3, Operands { v0, v1, v2 })
 86076      // VPSRLD imm8, xmm, xmm
 86077      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 86078          self.require(ISA_AVX)
 86079          p.domain = DomainAVX
 86080          p.add(0, func(m *_Encoding, v []interface{}) {
 86081              m.vex2(1, 0, v[1], hlcode(v[2]))
 86082              m.emit(0x72)
 86083              m.emit(0xd0 | lcode(v[1]))
 86084              m.imm1(toImmAny(v[0]))
 86085          })
 86086      }
 86087      // VPSRLD xmm, xmm, xmm
 86088      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 86089          self.require(ISA_AVX)
 86090          p.domain = DomainAVX
 86091          p.add(0, func(m *_Encoding, v []interface{}) {
 86092              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 86093              m.emit(0xd2)
 86094              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86095          })
 86096      }
 86097      // VPSRLD m128, xmm, xmm
 86098      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 86099          self.require(ISA_AVX)
 86100          p.domain = DomainAVX
 86101          p.add(0, func(m *_Encoding, v []interface{}) {
 86102              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 86103              m.emit(0xd2)
 86104              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 86105          })
 86106      }
 86107      // VPSRLD imm8, ymm, ymm
 86108      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 86109          self.require(ISA_AVX2)
 86110          p.domain = DomainAVX
 86111          p.add(0, func(m *_Encoding, v []interface{}) {
 86112              m.vex2(5, 0, v[1], hlcode(v[2]))
 86113              m.emit(0x72)
 86114              m.emit(0xd0 | lcode(v[1]))
 86115              m.imm1(toImmAny(v[0]))
 86116          })
 86117      }
 86118      // VPSRLD xmm, ymm, ymm
 86119      if isXMM(v0) && isYMM(v1) && isYMM(v2) {
 86120          self.require(ISA_AVX2)
 86121          p.domain = DomainAVX
 86122          p.add(0, func(m *_Encoding, v []interface{}) {
 86123              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 86124              m.emit(0xd2)
 86125              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86126          })
 86127      }
 86128      // VPSRLD m128, ymm, ymm
 86129      if isM128(v0) && isYMM(v1) && isYMM(v2) {
 86130          self.require(ISA_AVX2)
 86131          p.domain = DomainAVX
 86132          p.add(0, func(m *_Encoding, v []interface{}) {
 86133              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 86134              m.emit(0xd2)
 86135              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 86136          })
 86137      }
 86138      // VPSRLD imm8, m512/m32bcst, zmm{k}{z}
 86139      if isImm8(v0) && isM512M32bcst(v1) && isZMMkz(v2) {
 86140          self.require(ISA_AVX512F)
 86141          p.domain = DomainAVX
 86142          p.add(0, func(m *_Encoding, v []interface{}) {
 86143              m.evex(0b01, 0x05, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 86144              m.emit(0x72)
 86145              m.mrsd(2, addr(v[1]), 64)
 86146              m.imm1(toImmAny(v[0]))
 86147          })
 86148      }
 86149      // VPSRLD imm8, zmm, zmm{k}{z}
 86150      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 86151          self.require(ISA_AVX512F)
 86152          p.domain = DomainAVX
 86153          p.add(0, func(m *_Encoding, v []interface{}) {
 86154              m.emit(0x62)
 86155              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 86156              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 86157              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 86158              m.emit(0x72)
 86159              m.emit(0xd0 | lcode(v[1]))
 86160              m.imm1(toImmAny(v[0]))
 86161          })
 86162      }
 86163      // VPSRLD xmm, zmm, zmm{k}{z}
 86164      if isEVEXXMM(v0) && isZMM(v1) && isZMMkz(v2) {
 86165          self.require(ISA_AVX512F)
 86166          p.domain = DomainAVX
 86167          p.add(0, func(m *_Encoding, v []interface{}) {
 86168              m.emit(0x62)
 86169              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86170              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 86171              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 86172              m.emit(0xd2)
 86173              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86174          })
 86175      }
 86176      // VPSRLD m128, zmm, zmm{k}{z}
 86177      if isM128(v0) && isZMM(v1) && isZMMkz(v2) {
 86178          self.require(ISA_AVX512F)
 86179          p.domain = DomainAVX
 86180          p.add(0, func(m *_Encoding, v []interface{}) {
 86181              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 86182              m.emit(0xd2)
 86183              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 86184          })
 86185      }
 86186      // VPSRLD imm8, m128/m32bcst, xmm{k}{z}
 86187      if isImm8(v0) && isM128M32bcst(v1) && isXMMkz(v2) {
 86188          self.require(ISA_AVX512VL | ISA_AVX512F)
 86189          p.domain = DomainAVX
 86190          p.add(0, func(m *_Encoding, v []interface{}) {
 86191              m.evex(0b01, 0x05, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 86192              m.emit(0x72)
 86193              m.mrsd(2, addr(v[1]), 16)
 86194              m.imm1(toImmAny(v[0]))
 86195          })
 86196      }
 86197      // VPSRLD imm8, m256/m32bcst, ymm{k}{z}
 86198      if isImm8(v0) && isM256M32bcst(v1) && isYMMkz(v2) {
 86199          self.require(ISA_AVX512VL | ISA_AVX512F)
 86200          p.domain = DomainAVX
 86201          p.add(0, func(m *_Encoding, v []interface{}) {
 86202              m.evex(0b01, 0x05, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 86203              m.emit(0x72)
 86204              m.mrsd(2, addr(v[1]), 32)
 86205              m.imm1(toImmAny(v[0]))
 86206          })
 86207      }
 86208      // VPSRLD imm8, xmm, xmm{k}{z}
 86209      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86210          self.require(ISA_AVX512VL | ISA_AVX512F)
 86211          p.domain = DomainAVX
 86212          p.add(0, func(m *_Encoding, v []interface{}) {
 86213              m.emit(0x62)
 86214              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 86215              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 86216              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 86217              m.emit(0x72)
 86218              m.emit(0xd0 | lcode(v[1]))
 86219              m.imm1(toImmAny(v[0]))
 86220          })
 86221      }
 86222      // VPSRLD xmm, xmm, xmm{k}{z}
 86223      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86224          self.require(ISA_AVX512VL | ISA_AVX512F)
 86225          p.domain = DomainAVX
 86226          p.add(0, func(m *_Encoding, v []interface{}) {
 86227              m.emit(0x62)
 86228              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86229              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 86230              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 86231              m.emit(0xd2)
 86232              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86233          })
 86234      }
 86235      // VPSRLD m128, xmm, xmm{k}{z}
 86236      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86237          self.require(ISA_AVX512VL | ISA_AVX512F)
 86238          p.domain = DomainAVX
 86239          p.add(0, func(m *_Encoding, v []interface{}) {
 86240              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 86241              m.emit(0xd2)
 86242              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 86243          })
 86244      }
 86245      // VPSRLD imm8, ymm, ymm{k}{z}
 86246      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86247          self.require(ISA_AVX512VL | ISA_AVX512F)
 86248          p.domain = DomainAVX
 86249          p.add(0, func(m *_Encoding, v []interface{}) {
 86250              m.emit(0x62)
 86251              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 86252              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 86253              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 86254              m.emit(0x72)
 86255              m.emit(0xd0 | lcode(v[1]))
 86256              m.imm1(toImmAny(v[0]))
 86257          })
 86258      }
 86259      // VPSRLD xmm, ymm, ymm{k}{z}
 86260      if isEVEXXMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86261          self.require(ISA_AVX512VL | ISA_AVX512F)
 86262          p.domain = DomainAVX
 86263          p.add(0, func(m *_Encoding, v []interface{}) {
 86264              m.emit(0x62)
 86265              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86266              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 86267              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 86268              m.emit(0xd2)
 86269              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86270          })
 86271      }
 86272      // VPSRLD m128, ymm, ymm{k}{z}
 86273      if isM128(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86274          self.require(ISA_AVX512VL | ISA_AVX512F)
 86275          p.domain = DomainAVX
 86276          p.add(0, func(m *_Encoding, v []interface{}) {
 86277              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 86278              m.emit(0xd2)
 86279              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 86280          })
 86281      }
 86282      if p.len == 0 {
 86283          panic("invalid operands for VPSRLD")
 86284      }
 86285      return p
 86286  }
 86287  
 86288  // VPSRLDQ performs "Shift Packed Double Quadword Right Logical".
 86289  //
 86290  // Mnemonic        : VPSRLDQ
 86291  // Supported forms : (8 forms)
 86292  //
 86293  //    * VPSRLDQ imm8, xmm, xmm     [AVX]
 86294  //    * VPSRLDQ imm8, ymm, ymm     [AVX2]
 86295  //    * VPSRLDQ imm8, zmm, zmm     [AVX512BW]
 86296  //    * VPSRLDQ imm8, m512, zmm    [AVX512BW]
 86297  //    * VPSRLDQ imm8, xmm, xmm     [AVX512BW,AVX512VL]
 86298  //    * VPSRLDQ imm8, m128, xmm    [AVX512BW,AVX512VL]
 86299  //    * VPSRLDQ imm8, ymm, ymm     [AVX512BW,AVX512VL]
 86300  //    * VPSRLDQ imm8, m256, ymm    [AVX512BW,AVX512VL]
 86301  //
 86302  func (self *Program) VPSRLDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 86303      p := self.alloc("VPSRLDQ", 3, Operands { v0, v1, v2 })
 86304      // VPSRLDQ imm8, xmm, xmm
 86305      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 86306          self.require(ISA_AVX)
 86307          p.domain = DomainAVX
 86308          p.add(0, func(m *_Encoding, v []interface{}) {
 86309              m.vex2(1, 0, v[1], hlcode(v[2]))
 86310              m.emit(0x73)
 86311              m.emit(0xd8 | lcode(v[1]))
 86312              m.imm1(toImmAny(v[0]))
 86313          })
 86314      }
 86315      // VPSRLDQ imm8, ymm, ymm
 86316      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 86317          self.require(ISA_AVX2)
 86318          p.domain = DomainAVX
 86319          p.add(0, func(m *_Encoding, v []interface{}) {
 86320              m.vex2(5, 0, v[1], hlcode(v[2]))
 86321              m.emit(0x73)
 86322              m.emit(0xd8 | lcode(v[1]))
 86323              m.imm1(toImmAny(v[0]))
 86324          })
 86325      }
 86326      // VPSRLDQ imm8, zmm, zmm
 86327      if isImm8(v0) && isZMM(v1) && isZMM(v2) {
 86328          self.require(ISA_AVX512BW)
 86329          p.domain = DomainAVX
 86330          p.add(0, func(m *_Encoding, v []interface{}) {
 86331              m.emit(0x62)
 86332              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 86333              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 86334              m.emit((0x08 ^ (ecode(v[2]) << 3)) | 0x40)
 86335              m.emit(0x73)
 86336              m.emit(0xd8 | lcode(v[1]))
 86337              m.imm1(toImmAny(v[0]))
 86338          })
 86339      }
 86340      // VPSRLDQ imm8, m512, zmm
 86341      if isImm8(v0) && isM512(v1) && isZMM(v2) {
 86342          self.require(ISA_AVX512BW)
 86343          p.domain = DomainAVX
 86344          p.add(0, func(m *_Encoding, v []interface{}) {
 86345              m.evex(0b01, 0x05, 0b10, 0, addr(v[1]), vcode(v[2]), 0, 0, 0)
 86346              m.emit(0x73)
 86347              m.mrsd(3, addr(v[1]), 64)
 86348              m.imm1(toImmAny(v[0]))
 86349          })
 86350      }
 86351      // VPSRLDQ imm8, xmm, xmm
 86352      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 86353          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86354          p.domain = DomainAVX
 86355          p.add(0, func(m *_Encoding, v []interface{}) {
 86356              m.emit(0x62)
 86357              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 86358              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 86359              m.emit((0x08 ^ (ecode(v[2]) << 3)) | 0x00)
 86360              m.emit(0x73)
 86361              m.emit(0xd8 | lcode(v[1]))
 86362              m.imm1(toImmAny(v[0]))
 86363          })
 86364      }
 86365      // VPSRLDQ imm8, m128, xmm
 86366      if isImm8(v0) && isM128(v1) && isEVEXXMM(v2) {
 86367          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86368          p.domain = DomainAVX
 86369          p.add(0, func(m *_Encoding, v []interface{}) {
 86370              m.evex(0b01, 0x05, 0b00, 0, addr(v[1]), vcode(v[2]), 0, 0, 0)
 86371              m.emit(0x73)
 86372              m.mrsd(3, addr(v[1]), 16)
 86373              m.imm1(toImmAny(v[0]))
 86374          })
 86375      }
 86376      // VPSRLDQ imm8, ymm, ymm
 86377      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) {
 86378          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86379          p.domain = DomainAVX
 86380          p.add(0, func(m *_Encoding, v []interface{}) {
 86381              m.emit(0x62)
 86382              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 86383              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 86384              m.emit((0x08 ^ (ecode(v[2]) << 3)) | 0x20)
 86385              m.emit(0x73)
 86386              m.emit(0xd8 | lcode(v[1]))
 86387              m.imm1(toImmAny(v[0]))
 86388          })
 86389      }
 86390      // VPSRLDQ imm8, m256, ymm
 86391      if isImm8(v0) && isM256(v1) && isEVEXYMM(v2) {
 86392          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86393          p.domain = DomainAVX
 86394          p.add(0, func(m *_Encoding, v []interface{}) {
 86395              m.evex(0b01, 0x05, 0b01, 0, addr(v[1]), vcode(v[2]), 0, 0, 0)
 86396              m.emit(0x73)
 86397              m.mrsd(3, addr(v[1]), 32)
 86398              m.imm1(toImmAny(v[0]))
 86399          })
 86400      }
 86401      if p.len == 0 {
 86402          panic("invalid operands for VPSRLDQ")
 86403      }
 86404      return p
 86405  }
 86406  
 86407  // VPSRLQ performs "Shift Packed Quadword Data Right Logical".
 86408  //
 86409  // Mnemonic        : VPSRLQ
 86410  // Supported forms : (18 forms)
 86411  //
 86412  //    * VPSRLQ imm8, xmm, xmm                   [AVX]
 86413  //    * VPSRLQ xmm, xmm, xmm                    [AVX]
 86414  //    * VPSRLQ m128, xmm, xmm                   [AVX]
 86415  //    * VPSRLQ imm8, ymm, ymm                   [AVX2]
 86416  //    * VPSRLQ xmm, ymm, ymm                    [AVX2]
 86417  //    * VPSRLQ m128, ymm, ymm                   [AVX2]
 86418  //    * VPSRLQ imm8, m512/m64bcst, zmm{k}{z}    [AVX512F]
 86419  //    * VPSRLQ imm8, zmm, zmm{k}{z}             [AVX512F]
 86420  //    * VPSRLQ xmm, zmm, zmm{k}{z}              [AVX512F]
 86421  //    * VPSRLQ m128, zmm, zmm{k}{z}             [AVX512F]
 86422  //    * VPSRLQ imm8, m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 86423  //    * VPSRLQ imm8, m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 86424  //    * VPSRLQ imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 86425  //    * VPSRLQ xmm, xmm, xmm{k}{z}              [AVX512F,AVX512VL]
 86426  //    * VPSRLQ m128, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 86427  //    * VPSRLQ imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 86428  //    * VPSRLQ xmm, ymm, ymm{k}{z}              [AVX512F,AVX512VL]
 86429  //    * VPSRLQ m128, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 86430  //
 86431  func (self *Program) VPSRLQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 86432      p := self.alloc("VPSRLQ", 3, Operands { v0, v1, v2 })
 86433      // VPSRLQ imm8, xmm, xmm
 86434      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 86435          self.require(ISA_AVX)
 86436          p.domain = DomainAVX
 86437          p.add(0, func(m *_Encoding, v []interface{}) {
 86438              m.vex2(1, 0, v[1], hlcode(v[2]))
 86439              m.emit(0x73)
 86440              m.emit(0xd0 | lcode(v[1]))
 86441              m.imm1(toImmAny(v[0]))
 86442          })
 86443      }
 86444      // VPSRLQ xmm, xmm, xmm
 86445      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 86446          self.require(ISA_AVX)
 86447          p.domain = DomainAVX
 86448          p.add(0, func(m *_Encoding, v []interface{}) {
 86449              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 86450              m.emit(0xd3)
 86451              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86452          })
 86453      }
 86454      // VPSRLQ m128, xmm, xmm
 86455      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 86456          self.require(ISA_AVX)
 86457          p.domain = DomainAVX
 86458          p.add(0, func(m *_Encoding, v []interface{}) {
 86459              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 86460              m.emit(0xd3)
 86461              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 86462          })
 86463      }
 86464      // VPSRLQ imm8, ymm, ymm
 86465      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 86466          self.require(ISA_AVX2)
 86467          p.domain = DomainAVX
 86468          p.add(0, func(m *_Encoding, v []interface{}) {
 86469              m.vex2(5, 0, v[1], hlcode(v[2]))
 86470              m.emit(0x73)
 86471              m.emit(0xd0 | lcode(v[1]))
 86472              m.imm1(toImmAny(v[0]))
 86473          })
 86474      }
 86475      // VPSRLQ xmm, ymm, ymm
 86476      if isXMM(v0) && isYMM(v1) && isYMM(v2) {
 86477          self.require(ISA_AVX2)
 86478          p.domain = DomainAVX
 86479          p.add(0, func(m *_Encoding, v []interface{}) {
 86480              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 86481              m.emit(0xd3)
 86482              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86483          })
 86484      }
 86485      // VPSRLQ m128, ymm, ymm
 86486      if isM128(v0) && isYMM(v1) && isYMM(v2) {
 86487          self.require(ISA_AVX2)
 86488          p.domain = DomainAVX
 86489          p.add(0, func(m *_Encoding, v []interface{}) {
 86490              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 86491              m.emit(0xd3)
 86492              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 86493          })
 86494      }
 86495      // VPSRLQ imm8, m512/m64bcst, zmm{k}{z}
 86496      if isImm8(v0) && isM512M64bcst(v1) && isZMMkz(v2) {
 86497          self.require(ISA_AVX512F)
 86498          p.domain = DomainAVX
 86499          p.add(0, func(m *_Encoding, v []interface{}) {
 86500              m.evex(0b01, 0x85, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 86501              m.emit(0x73)
 86502              m.mrsd(2, addr(v[1]), 64)
 86503              m.imm1(toImmAny(v[0]))
 86504          })
 86505      }
 86506      // VPSRLQ imm8, zmm, zmm{k}{z}
 86507      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 86508          self.require(ISA_AVX512F)
 86509          p.domain = DomainAVX
 86510          p.add(0, func(m *_Encoding, v []interface{}) {
 86511              m.emit(0x62)
 86512              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 86513              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 86514              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 86515              m.emit(0x73)
 86516              m.emit(0xd0 | lcode(v[1]))
 86517              m.imm1(toImmAny(v[0]))
 86518          })
 86519      }
 86520      // VPSRLQ xmm, zmm, zmm{k}{z}
 86521      if isEVEXXMM(v0) && isZMM(v1) && isZMMkz(v2) {
 86522          self.require(ISA_AVX512F)
 86523          p.domain = DomainAVX
 86524          p.add(0, func(m *_Encoding, v []interface{}) {
 86525              m.emit(0x62)
 86526              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86527              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 86528              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 86529              m.emit(0xd3)
 86530              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86531          })
 86532      }
 86533      // VPSRLQ m128, zmm, zmm{k}{z}
 86534      if isM128(v0) && isZMM(v1) && isZMMkz(v2) {
 86535          self.require(ISA_AVX512F)
 86536          p.domain = DomainAVX
 86537          p.add(0, func(m *_Encoding, v []interface{}) {
 86538              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 86539              m.emit(0xd3)
 86540              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 86541          })
 86542      }
 86543      // VPSRLQ imm8, m128/m64bcst, xmm{k}{z}
 86544      if isImm8(v0) && isM128M64bcst(v1) && isXMMkz(v2) {
 86545          self.require(ISA_AVX512VL | ISA_AVX512F)
 86546          p.domain = DomainAVX
 86547          p.add(0, func(m *_Encoding, v []interface{}) {
 86548              m.evex(0b01, 0x85, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 86549              m.emit(0x73)
 86550              m.mrsd(2, addr(v[1]), 16)
 86551              m.imm1(toImmAny(v[0]))
 86552          })
 86553      }
 86554      // VPSRLQ imm8, m256/m64bcst, ymm{k}{z}
 86555      if isImm8(v0) && isM256M64bcst(v1) && isYMMkz(v2) {
 86556          self.require(ISA_AVX512VL | ISA_AVX512F)
 86557          p.domain = DomainAVX
 86558          p.add(0, func(m *_Encoding, v []interface{}) {
 86559              m.evex(0b01, 0x85, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 86560              m.emit(0x73)
 86561              m.mrsd(2, addr(v[1]), 32)
 86562              m.imm1(toImmAny(v[0]))
 86563          })
 86564      }
 86565      // VPSRLQ imm8, xmm, xmm{k}{z}
 86566      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86567          self.require(ISA_AVX512VL | ISA_AVX512F)
 86568          p.domain = DomainAVX
 86569          p.add(0, func(m *_Encoding, v []interface{}) {
 86570              m.emit(0x62)
 86571              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 86572              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 86573              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 86574              m.emit(0x73)
 86575              m.emit(0xd0 | lcode(v[1]))
 86576              m.imm1(toImmAny(v[0]))
 86577          })
 86578      }
 86579      // VPSRLQ xmm, xmm, xmm{k}{z}
 86580      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86581          self.require(ISA_AVX512VL | ISA_AVX512F)
 86582          p.domain = DomainAVX
 86583          p.add(0, func(m *_Encoding, v []interface{}) {
 86584              m.emit(0x62)
 86585              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86586              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 86587              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 86588              m.emit(0xd3)
 86589              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86590          })
 86591      }
 86592      // VPSRLQ m128, xmm, xmm{k}{z}
 86593      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86594          self.require(ISA_AVX512VL | ISA_AVX512F)
 86595          p.domain = DomainAVX
 86596          p.add(0, func(m *_Encoding, v []interface{}) {
 86597              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 86598              m.emit(0xd3)
 86599              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 86600          })
 86601      }
 86602      // VPSRLQ imm8, ymm, ymm{k}{z}
 86603      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86604          self.require(ISA_AVX512VL | ISA_AVX512F)
 86605          p.domain = DomainAVX
 86606          p.add(0, func(m *_Encoding, v []interface{}) {
 86607              m.emit(0x62)
 86608              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 86609              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 86610              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 86611              m.emit(0x73)
 86612              m.emit(0xd0 | lcode(v[1]))
 86613              m.imm1(toImmAny(v[0]))
 86614          })
 86615      }
 86616      // VPSRLQ xmm, ymm, ymm{k}{z}
 86617      if isEVEXXMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86618          self.require(ISA_AVX512VL | ISA_AVX512F)
 86619          p.domain = DomainAVX
 86620          p.add(0, func(m *_Encoding, v []interface{}) {
 86621              m.emit(0x62)
 86622              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86623              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 86624              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 86625              m.emit(0xd3)
 86626              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86627          })
 86628      }
 86629      // VPSRLQ m128, ymm, ymm{k}{z}
 86630      if isM128(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86631          self.require(ISA_AVX512VL | ISA_AVX512F)
 86632          p.domain = DomainAVX
 86633          p.add(0, func(m *_Encoding, v []interface{}) {
 86634              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 86635              m.emit(0xd3)
 86636              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 86637          })
 86638      }
 86639      if p.len == 0 {
 86640          panic("invalid operands for VPSRLQ")
 86641      }
 86642      return p
 86643  }
 86644  
 86645  // VPSRLVD performs "Variable Shift Packed Doubleword Data Right Logical".
 86646  //
 86647  // Mnemonic        : VPSRLVD
 86648  // Supported forms : (10 forms)
 86649  //
 86650  //    * VPSRLVD xmm, xmm, xmm                   [AVX2]
 86651  //    * VPSRLVD m128, xmm, xmm                  [AVX2]
 86652  //    * VPSRLVD ymm, ymm, ymm                   [AVX2]
 86653  //    * VPSRLVD m256, ymm, ymm                  [AVX2]
 86654  //    * VPSRLVD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 86655  //    * VPSRLVD zmm, zmm, zmm{k}{z}             [AVX512F]
 86656  //    * VPSRLVD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 86657  //    * VPSRLVD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 86658  //    * VPSRLVD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 86659  //    * VPSRLVD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 86660  //
 86661  func (self *Program) VPSRLVD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 86662      p := self.alloc("VPSRLVD", 3, Operands { v0, v1, v2 })
 86663      // VPSRLVD xmm, xmm, xmm
 86664      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 86665          self.require(ISA_AVX2)
 86666          p.domain = DomainAVX
 86667          p.add(0, func(m *_Encoding, v []interface{}) {
 86668              m.emit(0xc4)
 86669              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 86670              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 86671              m.emit(0x45)
 86672              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86673          })
 86674      }
 86675      // VPSRLVD m128, xmm, xmm
 86676      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 86677          self.require(ISA_AVX2)
 86678          p.domain = DomainAVX
 86679          p.add(0, func(m *_Encoding, v []interface{}) {
 86680              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 86681              m.emit(0x45)
 86682              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 86683          })
 86684      }
 86685      // VPSRLVD ymm, ymm, ymm
 86686      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 86687          self.require(ISA_AVX2)
 86688          p.domain = DomainAVX
 86689          p.add(0, func(m *_Encoding, v []interface{}) {
 86690              m.emit(0xc4)
 86691              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 86692              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 86693              m.emit(0x45)
 86694              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86695          })
 86696      }
 86697      // VPSRLVD m256, ymm, ymm
 86698      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 86699          self.require(ISA_AVX2)
 86700          p.domain = DomainAVX
 86701          p.add(0, func(m *_Encoding, v []interface{}) {
 86702              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 86703              m.emit(0x45)
 86704              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 86705          })
 86706      }
 86707      // VPSRLVD m512/m32bcst, zmm, zmm{k}{z}
 86708      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 86709          self.require(ISA_AVX512F)
 86710          p.domain = DomainAVX
 86711          p.add(0, func(m *_Encoding, v []interface{}) {
 86712              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 86713              m.emit(0x45)
 86714              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 86715          })
 86716      }
 86717      // VPSRLVD zmm, zmm, zmm{k}{z}
 86718      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 86719          self.require(ISA_AVX512F)
 86720          p.domain = DomainAVX
 86721          p.add(0, func(m *_Encoding, v []interface{}) {
 86722              m.emit(0x62)
 86723              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86724              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 86725              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 86726              m.emit(0x45)
 86727              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86728          })
 86729      }
 86730      // VPSRLVD m128/m32bcst, xmm, xmm{k}{z}
 86731      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86732          self.require(ISA_AVX512VL | ISA_AVX512F)
 86733          p.domain = DomainAVX
 86734          p.add(0, func(m *_Encoding, v []interface{}) {
 86735              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 86736              m.emit(0x45)
 86737              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 86738          })
 86739      }
 86740      // VPSRLVD xmm, xmm, xmm{k}{z}
 86741      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86742          self.require(ISA_AVX512VL | ISA_AVX512F)
 86743          p.domain = DomainAVX
 86744          p.add(0, func(m *_Encoding, v []interface{}) {
 86745              m.emit(0x62)
 86746              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86747              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 86748              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 86749              m.emit(0x45)
 86750              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86751          })
 86752      }
 86753      // VPSRLVD m256/m32bcst, ymm, ymm{k}{z}
 86754      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86755          self.require(ISA_AVX512VL | ISA_AVX512F)
 86756          p.domain = DomainAVX
 86757          p.add(0, func(m *_Encoding, v []interface{}) {
 86758              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 86759              m.emit(0x45)
 86760              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 86761          })
 86762      }
 86763      // VPSRLVD ymm, ymm, ymm{k}{z}
 86764      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86765          self.require(ISA_AVX512VL | ISA_AVX512F)
 86766          p.domain = DomainAVX
 86767          p.add(0, func(m *_Encoding, v []interface{}) {
 86768              m.emit(0x62)
 86769              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86770              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 86771              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 86772              m.emit(0x45)
 86773              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86774          })
 86775      }
 86776      if p.len == 0 {
 86777          panic("invalid operands for VPSRLVD")
 86778      }
 86779      return p
 86780  }
 86781  
 86782  // VPSRLVQ performs "Variable Shift Packed Quadword Data Right Logical".
 86783  //
 86784  // Mnemonic        : VPSRLVQ
 86785  // Supported forms : (10 forms)
 86786  //
 86787  //    * VPSRLVQ xmm, xmm, xmm                   [AVX2]
 86788  //    * VPSRLVQ m128, xmm, xmm                  [AVX2]
 86789  //    * VPSRLVQ ymm, ymm, ymm                   [AVX2]
 86790  //    * VPSRLVQ m256, ymm, ymm                  [AVX2]
 86791  //    * VPSRLVQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 86792  //    * VPSRLVQ zmm, zmm, zmm{k}{z}             [AVX512F]
 86793  //    * VPSRLVQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 86794  //    * VPSRLVQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 86795  //    * VPSRLVQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 86796  //    * VPSRLVQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 86797  //
 86798  func (self *Program) VPSRLVQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 86799      p := self.alloc("VPSRLVQ", 3, Operands { v0, v1, v2 })
 86800      // VPSRLVQ xmm, xmm, xmm
 86801      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 86802          self.require(ISA_AVX2)
 86803          p.domain = DomainAVX
 86804          p.add(0, func(m *_Encoding, v []interface{}) {
 86805              m.emit(0xc4)
 86806              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 86807              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 86808              m.emit(0x45)
 86809              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86810          })
 86811      }
 86812      // VPSRLVQ m128, xmm, xmm
 86813      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 86814          self.require(ISA_AVX2)
 86815          p.domain = DomainAVX
 86816          p.add(0, func(m *_Encoding, v []interface{}) {
 86817              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 86818              m.emit(0x45)
 86819              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 86820          })
 86821      }
 86822      // VPSRLVQ ymm, ymm, ymm
 86823      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 86824          self.require(ISA_AVX2)
 86825          p.domain = DomainAVX
 86826          p.add(0, func(m *_Encoding, v []interface{}) {
 86827              m.emit(0xc4)
 86828              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 86829              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 86830              m.emit(0x45)
 86831              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86832          })
 86833      }
 86834      // VPSRLVQ m256, ymm, ymm
 86835      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 86836          self.require(ISA_AVX2)
 86837          p.domain = DomainAVX
 86838          p.add(0, func(m *_Encoding, v []interface{}) {
 86839              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 86840              m.emit(0x45)
 86841              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 86842          })
 86843      }
 86844      // VPSRLVQ m512/m64bcst, zmm, zmm{k}{z}
 86845      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 86846          self.require(ISA_AVX512F)
 86847          p.domain = DomainAVX
 86848          p.add(0, func(m *_Encoding, v []interface{}) {
 86849              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 86850              m.emit(0x45)
 86851              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 86852          })
 86853      }
 86854      // VPSRLVQ zmm, zmm, zmm{k}{z}
 86855      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 86856          self.require(ISA_AVX512F)
 86857          p.domain = DomainAVX
 86858          p.add(0, func(m *_Encoding, v []interface{}) {
 86859              m.emit(0x62)
 86860              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86861              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 86862              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 86863              m.emit(0x45)
 86864              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86865          })
 86866      }
 86867      // VPSRLVQ m128/m64bcst, xmm, xmm{k}{z}
 86868      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86869          self.require(ISA_AVX512VL | ISA_AVX512F)
 86870          p.domain = DomainAVX
 86871          p.add(0, func(m *_Encoding, v []interface{}) {
 86872              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 86873              m.emit(0x45)
 86874              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 86875          })
 86876      }
 86877      // VPSRLVQ xmm, xmm, xmm{k}{z}
 86878      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86879          self.require(ISA_AVX512VL | ISA_AVX512F)
 86880          p.domain = DomainAVX
 86881          p.add(0, func(m *_Encoding, v []interface{}) {
 86882              m.emit(0x62)
 86883              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86884              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 86885              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 86886              m.emit(0x45)
 86887              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86888          })
 86889      }
 86890      // VPSRLVQ m256/m64bcst, ymm, ymm{k}{z}
 86891      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86892          self.require(ISA_AVX512VL | ISA_AVX512F)
 86893          p.domain = DomainAVX
 86894          p.add(0, func(m *_Encoding, v []interface{}) {
 86895              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 86896              m.emit(0x45)
 86897              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 86898          })
 86899      }
 86900      // VPSRLVQ ymm, ymm, ymm{k}{z}
 86901      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86902          self.require(ISA_AVX512VL | ISA_AVX512F)
 86903          p.domain = DomainAVX
 86904          p.add(0, func(m *_Encoding, v []interface{}) {
 86905              m.emit(0x62)
 86906              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86907              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 86908              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 86909              m.emit(0x45)
 86910              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86911          })
 86912      }
 86913      if p.len == 0 {
 86914          panic("invalid operands for VPSRLVQ")
 86915      }
 86916      return p
 86917  }
 86918  
 86919  // VPSRLVW performs "Variable Shift Packed Word Data Right Logical".
 86920  //
 86921  // Mnemonic        : VPSRLVW
 86922  // Supported forms : (6 forms)
 86923  //
 86924  //    * VPSRLVW zmm, zmm, zmm{k}{z}     [AVX512BW]
 86925  //    * VPSRLVW m512, zmm, zmm{k}{z}    [AVX512BW]
 86926  //    * VPSRLVW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 86927  //    * VPSRLVW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 86928  //    * VPSRLVW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 86929  //    * VPSRLVW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 86930  //
 86931  func (self *Program) VPSRLVW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 86932      p := self.alloc("VPSRLVW", 3, Operands { v0, v1, v2 })
 86933      // VPSRLVW zmm, zmm, zmm{k}{z}
 86934      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 86935          self.require(ISA_AVX512BW)
 86936          p.domain = DomainAVX
 86937          p.add(0, func(m *_Encoding, v []interface{}) {
 86938              m.emit(0x62)
 86939              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86940              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 86941              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 86942              m.emit(0x10)
 86943              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86944          })
 86945      }
 86946      // VPSRLVW m512, zmm, zmm{k}{z}
 86947      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 86948          self.require(ISA_AVX512BW)
 86949          p.domain = DomainAVX
 86950          p.add(0, func(m *_Encoding, v []interface{}) {
 86951              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 86952              m.emit(0x10)
 86953              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 86954          })
 86955      }
 86956      // VPSRLVW xmm, xmm, xmm{k}{z}
 86957      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86958          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86959          p.domain = DomainAVX
 86960          p.add(0, func(m *_Encoding, v []interface{}) {
 86961              m.emit(0x62)
 86962              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86963              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 86964              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 86965              m.emit(0x10)
 86966              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86967          })
 86968      }
 86969      // VPSRLVW m128, xmm, xmm{k}{z}
 86970      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86971          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86972          p.domain = DomainAVX
 86973          p.add(0, func(m *_Encoding, v []interface{}) {
 86974              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 86975              m.emit(0x10)
 86976              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 86977          })
 86978      }
 86979      // VPSRLVW ymm, ymm, ymm{k}{z}
 86980      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86981          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86982          p.domain = DomainAVX
 86983          p.add(0, func(m *_Encoding, v []interface{}) {
 86984              m.emit(0x62)
 86985              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86986              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 86987              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 86988              m.emit(0x10)
 86989              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86990          })
 86991      }
 86992      // VPSRLVW m256, ymm, ymm{k}{z}
 86993      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86994          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86995          p.domain = DomainAVX
 86996          p.add(0, func(m *_Encoding, v []interface{}) {
 86997              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 86998              m.emit(0x10)
 86999              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 87000          })
 87001      }
 87002      if p.len == 0 {
 87003          panic("invalid operands for VPSRLVW")
 87004      }
 87005      return p
 87006  }
 87007  
 87008  // VPSRLW performs "Shift Packed Word Data Right Logical".
 87009  //
 87010  // Mnemonic        : VPSRLW
 87011  // Supported forms : (18 forms)
 87012  //
 87013  //    * VPSRLW imm8, xmm, xmm           [AVX]
 87014  //    * VPSRLW xmm, xmm, xmm            [AVX]
 87015  //    * VPSRLW m128, xmm, xmm           [AVX]
 87016  //    * VPSRLW imm8, ymm, ymm           [AVX2]
 87017  //    * VPSRLW xmm, ymm, ymm            [AVX2]
 87018  //    * VPSRLW m128, ymm, ymm           [AVX2]
 87019  //    * VPSRLW imm8, zmm, zmm{k}{z}     [AVX512BW]
 87020  //    * VPSRLW xmm, zmm, zmm{k}{z}      [AVX512BW]
 87021  //    * VPSRLW m128, zmm, zmm{k}{z}     [AVX512BW]
 87022  //    * VPSRLW imm8, m512, zmm{k}{z}    [AVX512BW]
 87023  //    * VPSRLW imm8, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 87024  //    * VPSRLW xmm, xmm, xmm{k}{z}      [AVX512BW,AVX512VL]
 87025  //    * VPSRLW m128, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 87026  //    * VPSRLW imm8, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 87027  //    * VPSRLW xmm, ymm, ymm{k}{z}      [AVX512BW,AVX512VL]
 87028  //    * VPSRLW m128, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 87029  //    * VPSRLW imm8, m128, xmm{k}{z}    [AVX512BW,AVX512VL]
 87030  //    * VPSRLW imm8, m256, ymm{k}{z}    [AVX512BW,AVX512VL]
 87031  //
 87032  func (self *Program) VPSRLW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 87033      p := self.alloc("VPSRLW", 3, Operands { v0, v1, v2 })
 87034      // VPSRLW imm8, xmm, xmm
 87035      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 87036          self.require(ISA_AVX)
 87037          p.domain = DomainAVX
 87038          p.add(0, func(m *_Encoding, v []interface{}) {
 87039              m.vex2(1, 0, v[1], hlcode(v[2]))
 87040              m.emit(0x71)
 87041              m.emit(0xd0 | lcode(v[1]))
 87042              m.imm1(toImmAny(v[0]))
 87043          })
 87044      }
 87045      // VPSRLW xmm, xmm, xmm
 87046      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 87047          self.require(ISA_AVX)
 87048          p.domain = DomainAVX
 87049          p.add(0, func(m *_Encoding, v []interface{}) {
 87050              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 87051              m.emit(0xd1)
 87052              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87053          })
 87054      }
 87055      // VPSRLW m128, xmm, xmm
 87056      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 87057          self.require(ISA_AVX)
 87058          p.domain = DomainAVX
 87059          p.add(0, func(m *_Encoding, v []interface{}) {
 87060              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87061              m.emit(0xd1)
 87062              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87063          })
 87064      }
 87065      // VPSRLW imm8, ymm, ymm
 87066      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 87067          self.require(ISA_AVX2)
 87068          p.domain = DomainAVX
 87069          p.add(0, func(m *_Encoding, v []interface{}) {
 87070              m.vex2(5, 0, v[1], hlcode(v[2]))
 87071              m.emit(0x71)
 87072              m.emit(0xd0 | lcode(v[1]))
 87073              m.imm1(toImmAny(v[0]))
 87074          })
 87075      }
 87076      // VPSRLW xmm, ymm, ymm
 87077      if isXMM(v0) && isYMM(v1) && isYMM(v2) {
 87078          self.require(ISA_AVX2)
 87079          p.domain = DomainAVX
 87080          p.add(0, func(m *_Encoding, v []interface{}) {
 87081              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 87082              m.emit(0xd1)
 87083              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87084          })
 87085      }
 87086      // VPSRLW m128, ymm, ymm
 87087      if isM128(v0) && isYMM(v1) && isYMM(v2) {
 87088          self.require(ISA_AVX2)
 87089          p.domain = DomainAVX
 87090          p.add(0, func(m *_Encoding, v []interface{}) {
 87091              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87092              m.emit(0xd1)
 87093              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87094          })
 87095      }
 87096      // VPSRLW imm8, zmm, zmm{k}{z}
 87097      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 87098          self.require(ISA_AVX512BW)
 87099          p.domain = DomainAVX
 87100          p.add(0, func(m *_Encoding, v []interface{}) {
 87101              m.emit(0x62)
 87102              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 87103              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 87104              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 87105              m.emit(0x71)
 87106              m.emit(0xd0 | lcode(v[1]))
 87107              m.imm1(toImmAny(v[0]))
 87108          })
 87109      }
 87110      // VPSRLW xmm, zmm, zmm{k}{z}
 87111      if isEVEXXMM(v0) && isZMM(v1) && isZMMkz(v2) {
 87112          self.require(ISA_AVX512BW)
 87113          p.domain = DomainAVX
 87114          p.add(0, func(m *_Encoding, v []interface{}) {
 87115              m.emit(0x62)
 87116              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87117              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87118              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 87119              m.emit(0xd1)
 87120              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87121          })
 87122      }
 87123      // VPSRLW m128, zmm, zmm{k}{z}
 87124      if isM128(v0) && isZMM(v1) && isZMMkz(v2) {
 87125          self.require(ISA_AVX512BW)
 87126          p.domain = DomainAVX
 87127          p.add(0, func(m *_Encoding, v []interface{}) {
 87128              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87129              m.emit(0xd1)
 87130              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 87131          })
 87132      }
 87133      // VPSRLW imm8, m512, zmm{k}{z}
 87134      if isImm8(v0) && isM512(v1) && isZMMkz(v2) {
 87135          self.require(ISA_AVX512BW)
 87136          p.domain = DomainAVX
 87137          p.add(0, func(m *_Encoding, v []interface{}) {
 87138              m.evex(0b01, 0x05, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), 0)
 87139              m.emit(0x71)
 87140              m.mrsd(2, addr(v[1]), 64)
 87141              m.imm1(toImmAny(v[0]))
 87142          })
 87143      }
 87144      // VPSRLW imm8, xmm, xmm{k}{z}
 87145      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87146          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87147          p.domain = DomainAVX
 87148          p.add(0, func(m *_Encoding, v []interface{}) {
 87149              m.emit(0x62)
 87150              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 87151              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 87152              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 87153              m.emit(0x71)
 87154              m.emit(0xd0 | lcode(v[1]))
 87155              m.imm1(toImmAny(v[0]))
 87156          })
 87157      }
 87158      // VPSRLW xmm, xmm, xmm{k}{z}
 87159      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87160          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87161          p.domain = DomainAVX
 87162          p.add(0, func(m *_Encoding, v []interface{}) {
 87163              m.emit(0x62)
 87164              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87165              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87166              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 87167              m.emit(0xd1)
 87168              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87169          })
 87170      }
 87171      // VPSRLW m128, xmm, xmm{k}{z}
 87172      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87173          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87174          p.domain = DomainAVX
 87175          p.add(0, func(m *_Encoding, v []interface{}) {
 87176              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87177              m.emit(0xd1)
 87178              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 87179          })
 87180      }
 87181      // VPSRLW imm8, ymm, ymm{k}{z}
 87182      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87183          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87184          p.domain = DomainAVX
 87185          p.add(0, func(m *_Encoding, v []interface{}) {
 87186              m.emit(0x62)
 87187              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 87188              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 87189              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 87190              m.emit(0x71)
 87191              m.emit(0xd0 | lcode(v[1]))
 87192              m.imm1(toImmAny(v[0]))
 87193          })
 87194      }
 87195      // VPSRLW xmm, ymm, ymm{k}{z}
 87196      if isEVEXXMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87197          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87198          p.domain = DomainAVX
 87199          p.add(0, func(m *_Encoding, v []interface{}) {
 87200              m.emit(0x62)
 87201              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87202              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87203              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 87204              m.emit(0xd1)
 87205              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87206          })
 87207      }
 87208      // VPSRLW m128, ymm, ymm{k}{z}
 87209      if isM128(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87210          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87211          p.domain = DomainAVX
 87212          p.add(0, func(m *_Encoding, v []interface{}) {
 87213              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87214              m.emit(0xd1)
 87215              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 87216          })
 87217      }
 87218      // VPSRLW imm8, m128, xmm{k}{z}
 87219      if isImm8(v0) && isM128(v1) && isXMMkz(v2) {
 87220          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87221          p.domain = DomainAVX
 87222          p.add(0, func(m *_Encoding, v []interface{}) {
 87223              m.evex(0b01, 0x05, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), 0)
 87224              m.emit(0x71)
 87225              m.mrsd(2, addr(v[1]), 16)
 87226              m.imm1(toImmAny(v[0]))
 87227          })
 87228      }
 87229      // VPSRLW imm8, m256, ymm{k}{z}
 87230      if isImm8(v0) && isM256(v1) && isYMMkz(v2) {
 87231          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87232          p.domain = DomainAVX
 87233          p.add(0, func(m *_Encoding, v []interface{}) {
 87234              m.evex(0b01, 0x05, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), 0)
 87235              m.emit(0x71)
 87236              m.mrsd(2, addr(v[1]), 32)
 87237              m.imm1(toImmAny(v[0]))
 87238          })
 87239      }
 87240      if p.len == 0 {
 87241          panic("invalid operands for VPSRLW")
 87242      }
 87243      return p
 87244  }
 87245  
 87246  // VPSUBB performs "Subtract Packed Byte Integers".
 87247  //
 87248  // Mnemonic        : VPSUBB
 87249  // Supported forms : (10 forms)
 87250  //
 87251  //    * VPSUBB xmm, xmm, xmm           [AVX]
 87252  //    * VPSUBB m128, xmm, xmm          [AVX]
 87253  //    * VPSUBB ymm, ymm, ymm           [AVX2]
 87254  //    * VPSUBB m256, ymm, ymm          [AVX2]
 87255  //    * VPSUBB zmm, zmm, zmm{k}{z}     [AVX512BW]
 87256  //    * VPSUBB m512, zmm, zmm{k}{z}    [AVX512BW]
 87257  //    * VPSUBB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 87258  //    * VPSUBB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 87259  //    * VPSUBB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 87260  //    * VPSUBB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 87261  //
 87262  func (self *Program) VPSUBB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 87263      p := self.alloc("VPSUBB", 3, Operands { v0, v1, v2 })
 87264      // VPSUBB xmm, xmm, xmm
 87265      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 87266          self.require(ISA_AVX)
 87267          p.domain = DomainAVX
 87268          p.add(0, func(m *_Encoding, v []interface{}) {
 87269              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 87270              m.emit(0xf8)
 87271              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87272          })
 87273      }
 87274      // VPSUBB m128, xmm, xmm
 87275      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 87276          self.require(ISA_AVX)
 87277          p.domain = DomainAVX
 87278          p.add(0, func(m *_Encoding, v []interface{}) {
 87279              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87280              m.emit(0xf8)
 87281              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87282          })
 87283      }
 87284      // VPSUBB ymm, ymm, ymm
 87285      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 87286          self.require(ISA_AVX2)
 87287          p.domain = DomainAVX
 87288          p.add(0, func(m *_Encoding, v []interface{}) {
 87289              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 87290              m.emit(0xf8)
 87291              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87292          })
 87293      }
 87294      // VPSUBB m256, ymm, ymm
 87295      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 87296          self.require(ISA_AVX2)
 87297          p.domain = DomainAVX
 87298          p.add(0, func(m *_Encoding, v []interface{}) {
 87299              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87300              m.emit(0xf8)
 87301              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87302          })
 87303      }
 87304      // VPSUBB zmm, zmm, zmm{k}{z}
 87305      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 87306          self.require(ISA_AVX512BW)
 87307          p.domain = DomainAVX
 87308          p.add(0, func(m *_Encoding, v []interface{}) {
 87309              m.emit(0x62)
 87310              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87311              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87312              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 87313              m.emit(0xf8)
 87314              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87315          })
 87316      }
 87317      // VPSUBB m512, zmm, zmm{k}{z}
 87318      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 87319          self.require(ISA_AVX512BW)
 87320          p.domain = DomainAVX
 87321          p.add(0, func(m *_Encoding, v []interface{}) {
 87322              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87323              m.emit(0xf8)
 87324              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 87325          })
 87326      }
 87327      // VPSUBB xmm, xmm, xmm{k}{z}
 87328      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87329          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87330          p.domain = DomainAVX
 87331          p.add(0, func(m *_Encoding, v []interface{}) {
 87332              m.emit(0x62)
 87333              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87334              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87335              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 87336              m.emit(0xf8)
 87337              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87338          })
 87339      }
 87340      // VPSUBB m128, xmm, xmm{k}{z}
 87341      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87342          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87343          p.domain = DomainAVX
 87344          p.add(0, func(m *_Encoding, v []interface{}) {
 87345              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87346              m.emit(0xf8)
 87347              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 87348          })
 87349      }
 87350      // VPSUBB ymm, ymm, ymm{k}{z}
 87351      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87352          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87353          p.domain = DomainAVX
 87354          p.add(0, func(m *_Encoding, v []interface{}) {
 87355              m.emit(0x62)
 87356              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87357              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87358              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 87359              m.emit(0xf8)
 87360              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87361          })
 87362      }
 87363      // VPSUBB m256, ymm, ymm{k}{z}
 87364      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87365          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87366          p.domain = DomainAVX
 87367          p.add(0, func(m *_Encoding, v []interface{}) {
 87368              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87369              m.emit(0xf8)
 87370              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 87371          })
 87372      }
 87373      if p.len == 0 {
 87374          panic("invalid operands for VPSUBB")
 87375      }
 87376      return p
 87377  }
 87378  
 87379  // VPSUBD performs "Subtract Packed Doubleword Integers".
 87380  //
 87381  // Mnemonic        : VPSUBD
 87382  // Supported forms : (10 forms)
 87383  //
 87384  //    * VPSUBD xmm, xmm, xmm                   [AVX]
 87385  //    * VPSUBD m128, xmm, xmm                  [AVX]
 87386  //    * VPSUBD ymm, ymm, ymm                   [AVX2]
 87387  //    * VPSUBD m256, ymm, ymm                  [AVX2]
 87388  //    * VPSUBD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 87389  //    * VPSUBD zmm, zmm, zmm{k}{z}             [AVX512F]
 87390  //    * VPSUBD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 87391  //    * VPSUBD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 87392  //    * VPSUBD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 87393  //    * VPSUBD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 87394  //
 87395  func (self *Program) VPSUBD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 87396      p := self.alloc("VPSUBD", 3, Operands { v0, v1, v2 })
 87397      // VPSUBD xmm, xmm, xmm
 87398      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 87399          self.require(ISA_AVX)
 87400          p.domain = DomainAVX
 87401          p.add(0, func(m *_Encoding, v []interface{}) {
 87402              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 87403              m.emit(0xfa)
 87404              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87405          })
 87406      }
 87407      // VPSUBD m128, xmm, xmm
 87408      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 87409          self.require(ISA_AVX)
 87410          p.domain = DomainAVX
 87411          p.add(0, func(m *_Encoding, v []interface{}) {
 87412              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87413              m.emit(0xfa)
 87414              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87415          })
 87416      }
 87417      // VPSUBD ymm, ymm, ymm
 87418      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 87419          self.require(ISA_AVX2)
 87420          p.domain = DomainAVX
 87421          p.add(0, func(m *_Encoding, v []interface{}) {
 87422              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 87423              m.emit(0xfa)
 87424              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87425          })
 87426      }
 87427      // VPSUBD m256, ymm, ymm
 87428      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 87429          self.require(ISA_AVX2)
 87430          p.domain = DomainAVX
 87431          p.add(0, func(m *_Encoding, v []interface{}) {
 87432              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87433              m.emit(0xfa)
 87434              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87435          })
 87436      }
 87437      // VPSUBD m512/m32bcst, zmm, zmm{k}{z}
 87438      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 87439          self.require(ISA_AVX512F)
 87440          p.domain = DomainAVX
 87441          p.add(0, func(m *_Encoding, v []interface{}) {
 87442              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 87443              m.emit(0xfa)
 87444              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 87445          })
 87446      }
 87447      // VPSUBD zmm, zmm, zmm{k}{z}
 87448      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 87449          self.require(ISA_AVX512F)
 87450          p.domain = DomainAVX
 87451          p.add(0, func(m *_Encoding, v []interface{}) {
 87452              m.emit(0x62)
 87453              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87454              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87455              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 87456              m.emit(0xfa)
 87457              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87458          })
 87459      }
 87460      // VPSUBD m128/m32bcst, xmm, xmm{k}{z}
 87461      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87462          self.require(ISA_AVX512VL | ISA_AVX512F)
 87463          p.domain = DomainAVX
 87464          p.add(0, func(m *_Encoding, v []interface{}) {
 87465              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 87466              m.emit(0xfa)
 87467              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 87468          })
 87469      }
 87470      // VPSUBD xmm, xmm, xmm{k}{z}
 87471      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87472          self.require(ISA_AVX512VL | ISA_AVX512F)
 87473          p.domain = DomainAVX
 87474          p.add(0, func(m *_Encoding, v []interface{}) {
 87475              m.emit(0x62)
 87476              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87477              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87478              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 87479              m.emit(0xfa)
 87480              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87481          })
 87482      }
 87483      // VPSUBD m256/m32bcst, ymm, ymm{k}{z}
 87484      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87485          self.require(ISA_AVX512VL | ISA_AVX512F)
 87486          p.domain = DomainAVX
 87487          p.add(0, func(m *_Encoding, v []interface{}) {
 87488              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 87489              m.emit(0xfa)
 87490              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 87491          })
 87492      }
 87493      // VPSUBD ymm, ymm, ymm{k}{z}
 87494      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87495          self.require(ISA_AVX512VL | ISA_AVX512F)
 87496          p.domain = DomainAVX
 87497          p.add(0, func(m *_Encoding, v []interface{}) {
 87498              m.emit(0x62)
 87499              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87500              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87501              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 87502              m.emit(0xfa)
 87503              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87504          })
 87505      }
 87506      if p.len == 0 {
 87507          panic("invalid operands for VPSUBD")
 87508      }
 87509      return p
 87510  }
 87511  
 87512  // VPSUBQ performs "Subtract Packed Quadword Integers".
 87513  //
 87514  // Mnemonic        : VPSUBQ
 87515  // Supported forms : (10 forms)
 87516  //
 87517  //    * VPSUBQ xmm, xmm, xmm                   [AVX]
 87518  //    * VPSUBQ m128, xmm, xmm                  [AVX]
 87519  //    * VPSUBQ ymm, ymm, ymm                   [AVX2]
 87520  //    * VPSUBQ m256, ymm, ymm                  [AVX2]
 87521  //    * VPSUBQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 87522  //    * VPSUBQ zmm, zmm, zmm{k}{z}             [AVX512F]
 87523  //    * VPSUBQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 87524  //    * VPSUBQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 87525  //    * VPSUBQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 87526  //    * VPSUBQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 87527  //
 87528  func (self *Program) VPSUBQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 87529      p := self.alloc("VPSUBQ", 3, Operands { v0, v1, v2 })
 87530      // VPSUBQ xmm, xmm, xmm
 87531      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 87532          self.require(ISA_AVX)
 87533          p.domain = DomainAVX
 87534          p.add(0, func(m *_Encoding, v []interface{}) {
 87535              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 87536              m.emit(0xfb)
 87537              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87538          })
 87539      }
 87540      // VPSUBQ m128, xmm, xmm
 87541      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 87542          self.require(ISA_AVX)
 87543          p.domain = DomainAVX
 87544          p.add(0, func(m *_Encoding, v []interface{}) {
 87545              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87546              m.emit(0xfb)
 87547              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87548          })
 87549      }
 87550      // VPSUBQ ymm, ymm, ymm
 87551      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 87552          self.require(ISA_AVX2)
 87553          p.domain = DomainAVX
 87554          p.add(0, func(m *_Encoding, v []interface{}) {
 87555              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 87556              m.emit(0xfb)
 87557              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87558          })
 87559      }
 87560      // VPSUBQ m256, ymm, ymm
 87561      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 87562          self.require(ISA_AVX2)
 87563          p.domain = DomainAVX
 87564          p.add(0, func(m *_Encoding, v []interface{}) {
 87565              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87566              m.emit(0xfb)
 87567              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87568          })
 87569      }
 87570      // VPSUBQ m512/m64bcst, zmm, zmm{k}{z}
 87571      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 87572          self.require(ISA_AVX512F)
 87573          p.domain = DomainAVX
 87574          p.add(0, func(m *_Encoding, v []interface{}) {
 87575              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 87576              m.emit(0xfb)
 87577              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 87578          })
 87579      }
 87580      // VPSUBQ zmm, zmm, zmm{k}{z}
 87581      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 87582          self.require(ISA_AVX512F)
 87583          p.domain = DomainAVX
 87584          p.add(0, func(m *_Encoding, v []interface{}) {
 87585              m.emit(0x62)
 87586              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87587              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 87588              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 87589              m.emit(0xfb)
 87590              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87591          })
 87592      }
 87593      // VPSUBQ m128/m64bcst, xmm, xmm{k}{z}
 87594      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87595          self.require(ISA_AVX512VL | ISA_AVX512F)
 87596          p.domain = DomainAVX
 87597          p.add(0, func(m *_Encoding, v []interface{}) {
 87598              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 87599              m.emit(0xfb)
 87600              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 87601          })
 87602      }
 87603      // VPSUBQ xmm, xmm, xmm{k}{z}
 87604      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87605          self.require(ISA_AVX512VL | ISA_AVX512F)
 87606          p.domain = DomainAVX
 87607          p.add(0, func(m *_Encoding, v []interface{}) {
 87608              m.emit(0x62)
 87609              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87610              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 87611              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 87612              m.emit(0xfb)
 87613              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87614          })
 87615      }
 87616      // VPSUBQ m256/m64bcst, ymm, ymm{k}{z}
 87617      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87618          self.require(ISA_AVX512VL | ISA_AVX512F)
 87619          p.domain = DomainAVX
 87620          p.add(0, func(m *_Encoding, v []interface{}) {
 87621              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 87622              m.emit(0xfb)
 87623              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 87624          })
 87625      }
 87626      // VPSUBQ ymm, ymm, ymm{k}{z}
 87627      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87628          self.require(ISA_AVX512VL | ISA_AVX512F)
 87629          p.domain = DomainAVX
 87630          p.add(0, func(m *_Encoding, v []interface{}) {
 87631              m.emit(0x62)
 87632              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87633              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 87634              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 87635              m.emit(0xfb)
 87636              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87637          })
 87638      }
 87639      if p.len == 0 {
 87640          panic("invalid operands for VPSUBQ")
 87641      }
 87642      return p
 87643  }
 87644  
 87645  // VPSUBSB performs "Subtract Packed Signed Byte Integers with Signed Saturation".
 87646  //
 87647  // Mnemonic        : VPSUBSB
 87648  // Supported forms : (10 forms)
 87649  //
 87650  //    * VPSUBSB xmm, xmm, xmm           [AVX]
 87651  //    * VPSUBSB m128, xmm, xmm          [AVX]
 87652  //    * VPSUBSB ymm, ymm, ymm           [AVX2]
 87653  //    * VPSUBSB m256, ymm, ymm          [AVX2]
 87654  //    * VPSUBSB zmm, zmm, zmm{k}{z}     [AVX512BW]
 87655  //    * VPSUBSB m512, zmm, zmm{k}{z}    [AVX512BW]
 87656  //    * VPSUBSB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 87657  //    * VPSUBSB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 87658  //    * VPSUBSB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 87659  //    * VPSUBSB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 87660  //
 87661  func (self *Program) VPSUBSB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 87662      p := self.alloc("VPSUBSB", 3, Operands { v0, v1, v2 })
 87663      // VPSUBSB xmm, xmm, xmm
 87664      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 87665          self.require(ISA_AVX)
 87666          p.domain = DomainAVX
 87667          p.add(0, func(m *_Encoding, v []interface{}) {
 87668              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 87669              m.emit(0xe8)
 87670              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87671          })
 87672      }
 87673      // VPSUBSB m128, xmm, xmm
 87674      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 87675          self.require(ISA_AVX)
 87676          p.domain = DomainAVX
 87677          p.add(0, func(m *_Encoding, v []interface{}) {
 87678              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87679              m.emit(0xe8)
 87680              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87681          })
 87682      }
 87683      // VPSUBSB ymm, ymm, ymm
 87684      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 87685          self.require(ISA_AVX2)
 87686          p.domain = DomainAVX
 87687          p.add(0, func(m *_Encoding, v []interface{}) {
 87688              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 87689              m.emit(0xe8)
 87690              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87691          })
 87692      }
 87693      // VPSUBSB m256, ymm, ymm
 87694      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 87695          self.require(ISA_AVX2)
 87696          p.domain = DomainAVX
 87697          p.add(0, func(m *_Encoding, v []interface{}) {
 87698              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87699              m.emit(0xe8)
 87700              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87701          })
 87702      }
 87703      // VPSUBSB zmm, zmm, zmm{k}{z}
 87704      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 87705          self.require(ISA_AVX512BW)
 87706          p.domain = DomainAVX
 87707          p.add(0, func(m *_Encoding, v []interface{}) {
 87708              m.emit(0x62)
 87709              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87710              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87711              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 87712              m.emit(0xe8)
 87713              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87714          })
 87715      }
 87716      // VPSUBSB m512, zmm, zmm{k}{z}
 87717      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 87718          self.require(ISA_AVX512BW)
 87719          p.domain = DomainAVX
 87720          p.add(0, func(m *_Encoding, v []interface{}) {
 87721              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87722              m.emit(0xe8)
 87723              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 87724          })
 87725      }
 87726      // VPSUBSB xmm, xmm, xmm{k}{z}
 87727      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87728          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87729          p.domain = DomainAVX
 87730          p.add(0, func(m *_Encoding, v []interface{}) {
 87731              m.emit(0x62)
 87732              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87733              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87734              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 87735              m.emit(0xe8)
 87736              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87737          })
 87738      }
 87739      // VPSUBSB m128, xmm, xmm{k}{z}
 87740      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87741          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87742          p.domain = DomainAVX
 87743          p.add(0, func(m *_Encoding, v []interface{}) {
 87744              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87745              m.emit(0xe8)
 87746              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 87747          })
 87748      }
 87749      // VPSUBSB ymm, ymm, ymm{k}{z}
 87750      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87751          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87752          p.domain = DomainAVX
 87753          p.add(0, func(m *_Encoding, v []interface{}) {
 87754              m.emit(0x62)
 87755              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87756              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87757              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 87758              m.emit(0xe8)
 87759              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87760          })
 87761      }
 87762      // VPSUBSB m256, ymm, ymm{k}{z}
 87763      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87764          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87765          p.domain = DomainAVX
 87766          p.add(0, func(m *_Encoding, v []interface{}) {
 87767              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87768              m.emit(0xe8)
 87769              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 87770          })
 87771      }
 87772      if p.len == 0 {
 87773          panic("invalid operands for VPSUBSB")
 87774      }
 87775      return p
 87776  }
 87777  
 87778  // VPSUBSW performs "Subtract Packed Signed Word Integers with Signed Saturation".
 87779  //
 87780  // Mnemonic        : VPSUBSW
 87781  // Supported forms : (10 forms)
 87782  //
 87783  //    * VPSUBSW xmm, xmm, xmm           [AVX]
 87784  //    * VPSUBSW m128, xmm, xmm          [AVX]
 87785  //    * VPSUBSW ymm, ymm, ymm           [AVX2]
 87786  //    * VPSUBSW m256, ymm, ymm          [AVX2]
 87787  //    * VPSUBSW zmm, zmm, zmm{k}{z}     [AVX512BW]
 87788  //    * VPSUBSW m512, zmm, zmm{k}{z}    [AVX512BW]
 87789  //    * VPSUBSW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 87790  //    * VPSUBSW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 87791  //    * VPSUBSW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 87792  //    * VPSUBSW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 87793  //
 87794  func (self *Program) VPSUBSW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 87795      p := self.alloc("VPSUBSW", 3, Operands { v0, v1, v2 })
 87796      // VPSUBSW xmm, xmm, xmm
 87797      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 87798          self.require(ISA_AVX)
 87799          p.domain = DomainAVX
 87800          p.add(0, func(m *_Encoding, v []interface{}) {
 87801              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 87802              m.emit(0xe9)
 87803              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87804          })
 87805      }
 87806      // VPSUBSW m128, xmm, xmm
 87807      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 87808          self.require(ISA_AVX)
 87809          p.domain = DomainAVX
 87810          p.add(0, func(m *_Encoding, v []interface{}) {
 87811              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87812              m.emit(0xe9)
 87813              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87814          })
 87815      }
 87816      // VPSUBSW ymm, ymm, ymm
 87817      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 87818          self.require(ISA_AVX2)
 87819          p.domain = DomainAVX
 87820          p.add(0, func(m *_Encoding, v []interface{}) {
 87821              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 87822              m.emit(0xe9)
 87823              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87824          })
 87825      }
 87826      // VPSUBSW m256, ymm, ymm
 87827      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 87828          self.require(ISA_AVX2)
 87829          p.domain = DomainAVX
 87830          p.add(0, func(m *_Encoding, v []interface{}) {
 87831              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87832              m.emit(0xe9)
 87833              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87834          })
 87835      }
 87836      // VPSUBSW zmm, zmm, zmm{k}{z}
 87837      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 87838          self.require(ISA_AVX512BW)
 87839          p.domain = DomainAVX
 87840          p.add(0, func(m *_Encoding, v []interface{}) {
 87841              m.emit(0x62)
 87842              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87843              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87844              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 87845              m.emit(0xe9)
 87846              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87847          })
 87848      }
 87849      // VPSUBSW m512, zmm, zmm{k}{z}
 87850      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 87851          self.require(ISA_AVX512BW)
 87852          p.domain = DomainAVX
 87853          p.add(0, func(m *_Encoding, v []interface{}) {
 87854              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87855              m.emit(0xe9)
 87856              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 87857          })
 87858      }
 87859      // VPSUBSW xmm, xmm, xmm{k}{z}
 87860      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87861          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87862          p.domain = DomainAVX
 87863          p.add(0, func(m *_Encoding, v []interface{}) {
 87864              m.emit(0x62)
 87865              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87866              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87867              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 87868              m.emit(0xe9)
 87869              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87870          })
 87871      }
 87872      // VPSUBSW m128, xmm, xmm{k}{z}
 87873      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87874          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87875          p.domain = DomainAVX
 87876          p.add(0, func(m *_Encoding, v []interface{}) {
 87877              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87878              m.emit(0xe9)
 87879              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 87880          })
 87881      }
 87882      // VPSUBSW ymm, ymm, ymm{k}{z}
 87883      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87884          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87885          p.domain = DomainAVX
 87886          p.add(0, func(m *_Encoding, v []interface{}) {
 87887              m.emit(0x62)
 87888              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87889              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87890              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 87891              m.emit(0xe9)
 87892              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87893          })
 87894      }
 87895      // VPSUBSW m256, ymm, ymm{k}{z}
 87896      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87897          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87898          p.domain = DomainAVX
 87899          p.add(0, func(m *_Encoding, v []interface{}) {
 87900              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87901              m.emit(0xe9)
 87902              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 87903          })
 87904      }
 87905      if p.len == 0 {
 87906          panic("invalid operands for VPSUBSW")
 87907      }
 87908      return p
 87909  }
 87910  
 87911  // VPSUBUSB performs "Subtract Packed Unsigned Byte Integers with Unsigned Saturation".
 87912  //
 87913  // Mnemonic        : VPSUBUSB
 87914  // Supported forms : (10 forms)
 87915  //
 87916  //    * VPSUBUSB xmm, xmm, xmm           [AVX]
 87917  //    * VPSUBUSB m128, xmm, xmm          [AVX]
 87918  //    * VPSUBUSB ymm, ymm, ymm           [AVX2]
 87919  //    * VPSUBUSB m256, ymm, ymm          [AVX2]
 87920  //    * VPSUBUSB zmm, zmm, zmm{k}{z}     [AVX512BW]
 87921  //    * VPSUBUSB m512, zmm, zmm{k}{z}    [AVX512BW]
 87922  //    * VPSUBUSB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 87923  //    * VPSUBUSB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 87924  //    * VPSUBUSB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 87925  //    * VPSUBUSB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 87926  //
 87927  func (self *Program) VPSUBUSB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 87928      p := self.alloc("VPSUBUSB", 3, Operands { v0, v1, v2 })
 87929      // VPSUBUSB xmm, xmm, xmm
 87930      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 87931          self.require(ISA_AVX)
 87932          p.domain = DomainAVX
 87933          p.add(0, func(m *_Encoding, v []interface{}) {
 87934              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 87935              m.emit(0xd8)
 87936              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87937          })
 87938      }
 87939      // VPSUBUSB m128, xmm, xmm
 87940      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 87941          self.require(ISA_AVX)
 87942          p.domain = DomainAVX
 87943          p.add(0, func(m *_Encoding, v []interface{}) {
 87944              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87945              m.emit(0xd8)
 87946              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87947          })
 87948      }
 87949      // VPSUBUSB ymm, ymm, ymm
 87950      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 87951          self.require(ISA_AVX2)
 87952          p.domain = DomainAVX
 87953          p.add(0, func(m *_Encoding, v []interface{}) {
 87954              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 87955              m.emit(0xd8)
 87956              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87957          })
 87958      }
 87959      // VPSUBUSB m256, ymm, ymm
 87960      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 87961          self.require(ISA_AVX2)
 87962          p.domain = DomainAVX
 87963          p.add(0, func(m *_Encoding, v []interface{}) {
 87964              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87965              m.emit(0xd8)
 87966              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87967          })
 87968      }
 87969      // VPSUBUSB zmm, zmm, zmm{k}{z}
 87970      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 87971          self.require(ISA_AVX512BW)
 87972          p.domain = DomainAVX
 87973          p.add(0, func(m *_Encoding, v []interface{}) {
 87974              m.emit(0x62)
 87975              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87976              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87977              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 87978              m.emit(0xd8)
 87979              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87980          })
 87981      }
 87982      // VPSUBUSB m512, zmm, zmm{k}{z}
 87983      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 87984          self.require(ISA_AVX512BW)
 87985          p.domain = DomainAVX
 87986          p.add(0, func(m *_Encoding, v []interface{}) {
 87987              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87988              m.emit(0xd8)
 87989              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 87990          })
 87991      }
 87992      // VPSUBUSB xmm, xmm, xmm{k}{z}
 87993      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87994          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87995          p.domain = DomainAVX
 87996          p.add(0, func(m *_Encoding, v []interface{}) {
 87997              m.emit(0x62)
 87998              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87999              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88000              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 88001              m.emit(0xd8)
 88002              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88003          })
 88004      }
 88005      // VPSUBUSB m128, xmm, xmm{k}{z}
 88006      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 88007          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88008          p.domain = DomainAVX
 88009          p.add(0, func(m *_Encoding, v []interface{}) {
 88010              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 88011              m.emit(0xd8)
 88012              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 88013          })
 88014      }
 88015      // VPSUBUSB ymm, ymm, ymm{k}{z}
 88016      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 88017          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88018          p.domain = DomainAVX
 88019          p.add(0, func(m *_Encoding, v []interface{}) {
 88020              m.emit(0x62)
 88021              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88022              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88023              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 88024              m.emit(0xd8)
 88025              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88026          })
 88027      }
 88028      // VPSUBUSB m256, ymm, ymm{k}{z}
 88029      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 88030          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88031          p.domain = DomainAVX
 88032          p.add(0, func(m *_Encoding, v []interface{}) {
 88033              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 88034              m.emit(0xd8)
 88035              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 88036          })
 88037      }
 88038      if p.len == 0 {
 88039          panic("invalid operands for VPSUBUSB")
 88040      }
 88041      return p
 88042  }
 88043  
 88044  // VPSUBUSW performs "Subtract Packed Unsigned Word Integers with Unsigned Saturation".
 88045  //
 88046  // Mnemonic        : VPSUBUSW
 88047  // Supported forms : (10 forms)
 88048  //
 88049  //    * VPSUBUSW xmm, xmm, xmm           [AVX]
 88050  //    * VPSUBUSW m128, xmm, xmm          [AVX]
 88051  //    * VPSUBUSW ymm, ymm, ymm           [AVX2]
 88052  //    * VPSUBUSW m256, ymm, ymm          [AVX2]
 88053  //    * VPSUBUSW zmm, zmm, zmm{k}{z}     [AVX512BW]
 88054  //    * VPSUBUSW m512, zmm, zmm{k}{z}    [AVX512BW]
 88055  //    * VPSUBUSW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 88056  //    * VPSUBUSW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 88057  //    * VPSUBUSW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 88058  //    * VPSUBUSW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 88059  //
 88060  func (self *Program) VPSUBUSW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 88061      p := self.alloc("VPSUBUSW", 3, Operands { v0, v1, v2 })
 88062      // VPSUBUSW xmm, xmm, xmm
 88063      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 88064          self.require(ISA_AVX)
 88065          p.domain = DomainAVX
 88066          p.add(0, func(m *_Encoding, v []interface{}) {
 88067              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 88068              m.emit(0xd9)
 88069              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88070          })
 88071      }
 88072      // VPSUBUSW m128, xmm, xmm
 88073      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 88074          self.require(ISA_AVX)
 88075          p.domain = DomainAVX
 88076          p.add(0, func(m *_Encoding, v []interface{}) {
 88077              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 88078              m.emit(0xd9)
 88079              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 88080          })
 88081      }
 88082      // VPSUBUSW ymm, ymm, ymm
 88083      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 88084          self.require(ISA_AVX2)
 88085          p.domain = DomainAVX
 88086          p.add(0, func(m *_Encoding, v []interface{}) {
 88087              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 88088              m.emit(0xd9)
 88089              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88090          })
 88091      }
 88092      // VPSUBUSW m256, ymm, ymm
 88093      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 88094          self.require(ISA_AVX2)
 88095          p.domain = DomainAVX
 88096          p.add(0, func(m *_Encoding, v []interface{}) {
 88097              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 88098              m.emit(0xd9)
 88099              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 88100          })
 88101      }
 88102      // VPSUBUSW zmm, zmm, zmm{k}{z}
 88103      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 88104          self.require(ISA_AVX512BW)
 88105          p.domain = DomainAVX
 88106          p.add(0, func(m *_Encoding, v []interface{}) {
 88107              m.emit(0x62)
 88108              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88109              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88110              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 88111              m.emit(0xd9)
 88112              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88113          })
 88114      }
 88115      // VPSUBUSW m512, zmm, zmm{k}{z}
 88116      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 88117          self.require(ISA_AVX512BW)
 88118          p.domain = DomainAVX
 88119          p.add(0, func(m *_Encoding, v []interface{}) {
 88120              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 88121              m.emit(0xd9)
 88122              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 88123          })
 88124      }
 88125      // VPSUBUSW xmm, xmm, xmm{k}{z}
 88126      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 88127          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88128          p.domain = DomainAVX
 88129          p.add(0, func(m *_Encoding, v []interface{}) {
 88130              m.emit(0x62)
 88131              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88132              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88133              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 88134              m.emit(0xd9)
 88135              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88136          })
 88137      }
 88138      // VPSUBUSW m128, xmm, xmm{k}{z}
 88139      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 88140          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88141          p.domain = DomainAVX
 88142          p.add(0, func(m *_Encoding, v []interface{}) {
 88143              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 88144              m.emit(0xd9)
 88145              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 88146          })
 88147      }
 88148      // VPSUBUSW ymm, ymm, ymm{k}{z}
 88149      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 88150          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88151          p.domain = DomainAVX
 88152          p.add(0, func(m *_Encoding, v []interface{}) {
 88153              m.emit(0x62)
 88154              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88155              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88156              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 88157              m.emit(0xd9)
 88158              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88159          })
 88160      }
 88161      // VPSUBUSW m256, ymm, ymm{k}{z}
 88162      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 88163          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88164          p.domain = DomainAVX
 88165          p.add(0, func(m *_Encoding, v []interface{}) {
 88166              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 88167              m.emit(0xd9)
 88168              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 88169          })
 88170      }
 88171      if p.len == 0 {
 88172          panic("invalid operands for VPSUBUSW")
 88173      }
 88174      return p
 88175  }
 88176  
 88177  // VPSUBW performs "Subtract Packed Word Integers".
 88178  //
 88179  // Mnemonic        : VPSUBW
 88180  // Supported forms : (10 forms)
 88181  //
 88182  //    * VPSUBW xmm, xmm, xmm           [AVX]
 88183  //    * VPSUBW m128, xmm, xmm          [AVX]
 88184  //    * VPSUBW ymm, ymm, ymm           [AVX2]
 88185  //    * VPSUBW m256, ymm, ymm          [AVX2]
 88186  //    * VPSUBW zmm, zmm, zmm{k}{z}     [AVX512BW]
 88187  //    * VPSUBW m512, zmm, zmm{k}{z}    [AVX512BW]
 88188  //    * VPSUBW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 88189  //    * VPSUBW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 88190  //    * VPSUBW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 88191  //    * VPSUBW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 88192  //
 88193  func (self *Program) VPSUBW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 88194      p := self.alloc("VPSUBW", 3, Operands { v0, v1, v2 })
 88195      // VPSUBW xmm, xmm, xmm
 88196      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 88197          self.require(ISA_AVX)
 88198          p.domain = DomainAVX
 88199          p.add(0, func(m *_Encoding, v []interface{}) {
 88200              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 88201              m.emit(0xf9)
 88202              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88203          })
 88204      }
 88205      // VPSUBW m128, xmm, xmm
 88206      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 88207          self.require(ISA_AVX)
 88208          p.domain = DomainAVX
 88209          p.add(0, func(m *_Encoding, v []interface{}) {
 88210              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 88211              m.emit(0xf9)
 88212              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 88213          })
 88214      }
 88215      // VPSUBW ymm, ymm, ymm
 88216      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 88217          self.require(ISA_AVX2)
 88218          p.domain = DomainAVX
 88219          p.add(0, func(m *_Encoding, v []interface{}) {
 88220              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 88221              m.emit(0xf9)
 88222              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88223          })
 88224      }
 88225      // VPSUBW m256, ymm, ymm
 88226      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 88227          self.require(ISA_AVX2)
 88228          p.domain = DomainAVX
 88229          p.add(0, func(m *_Encoding, v []interface{}) {
 88230              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 88231              m.emit(0xf9)
 88232              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 88233          })
 88234      }
 88235      // VPSUBW zmm, zmm, zmm{k}{z}
 88236      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 88237          self.require(ISA_AVX512BW)
 88238          p.domain = DomainAVX
 88239          p.add(0, func(m *_Encoding, v []interface{}) {
 88240              m.emit(0x62)
 88241              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88242              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88243              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 88244              m.emit(0xf9)
 88245              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88246          })
 88247      }
 88248      // VPSUBW m512, zmm, zmm{k}{z}
 88249      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 88250          self.require(ISA_AVX512BW)
 88251          p.domain = DomainAVX
 88252          p.add(0, func(m *_Encoding, v []interface{}) {
 88253              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 88254              m.emit(0xf9)
 88255              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 88256          })
 88257      }
 88258      // VPSUBW xmm, xmm, xmm{k}{z}
 88259      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 88260          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88261          p.domain = DomainAVX
 88262          p.add(0, func(m *_Encoding, v []interface{}) {
 88263              m.emit(0x62)
 88264              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88265              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88266              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 88267              m.emit(0xf9)
 88268              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88269          })
 88270      }
 88271      // VPSUBW m128, xmm, xmm{k}{z}
 88272      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 88273          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88274          p.domain = DomainAVX
 88275          p.add(0, func(m *_Encoding, v []interface{}) {
 88276              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 88277              m.emit(0xf9)
 88278              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 88279          })
 88280      }
 88281      // VPSUBW ymm, ymm, ymm{k}{z}
 88282      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 88283          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88284          p.domain = DomainAVX
 88285          p.add(0, func(m *_Encoding, v []interface{}) {
 88286              m.emit(0x62)
 88287              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88288              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88289              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 88290              m.emit(0xf9)
 88291              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88292          })
 88293      }
 88294      // VPSUBW m256, ymm, ymm{k}{z}
 88295      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 88296          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88297          p.domain = DomainAVX
 88298          p.add(0, func(m *_Encoding, v []interface{}) {
 88299              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 88300              m.emit(0xf9)
 88301              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 88302          })
 88303      }
 88304      if p.len == 0 {
 88305          panic("invalid operands for VPSUBW")
 88306      }
 88307      return p
 88308  }
 88309  
 88310  // VPTERNLOGD performs "Bitwise Ternary Logical Operation on Doubleword Values".
 88311  //
 88312  // Mnemonic        : VPTERNLOGD
 88313  // Supported forms : (6 forms)
 88314  //
 88315  //    * VPTERNLOGD imm8, m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 88316  //    * VPTERNLOGD imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 88317  //    * VPTERNLOGD imm8, m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 88318  //    * VPTERNLOGD imm8, xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 88319  //    * VPTERNLOGD imm8, m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 88320  //    * VPTERNLOGD imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 88321  //
 88322  func (self *Program) VPTERNLOGD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 88323      p := self.alloc("VPTERNLOGD", 4, Operands { v0, v1, v2, v3 })
 88324      // VPTERNLOGD imm8, m512/m32bcst, zmm, zmm{k}{z}
 88325      if isImm8(v0) && isM512M32bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 88326          self.require(ISA_AVX512F)
 88327          p.domain = DomainAVX
 88328          p.add(0, func(m *_Encoding, v []interface{}) {
 88329              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 88330              m.emit(0x25)
 88331              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 88332              m.imm1(toImmAny(v[0]))
 88333          })
 88334      }
 88335      // VPTERNLOGD imm8, zmm, zmm, zmm{k}{z}
 88336      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 88337          self.require(ISA_AVX512F)
 88338          p.domain = DomainAVX
 88339          p.add(0, func(m *_Encoding, v []interface{}) {
 88340              m.emit(0x62)
 88341              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 88342              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 88343              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 88344              m.emit(0x25)
 88345              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 88346              m.imm1(toImmAny(v[0]))
 88347          })
 88348      }
 88349      // VPTERNLOGD imm8, m128/m32bcst, xmm, xmm{k}{z}
 88350      if isImm8(v0) && isM128M32bcst(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 88351          self.require(ISA_AVX512VL | ISA_AVX512F)
 88352          p.domain = DomainAVX
 88353          p.add(0, func(m *_Encoding, v []interface{}) {
 88354              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 88355              m.emit(0x25)
 88356              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 88357              m.imm1(toImmAny(v[0]))
 88358          })
 88359      }
 88360      // VPTERNLOGD imm8, xmm, xmm, xmm{k}{z}
 88361      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 88362          self.require(ISA_AVX512VL | ISA_AVX512F)
 88363          p.domain = DomainAVX
 88364          p.add(0, func(m *_Encoding, v []interface{}) {
 88365              m.emit(0x62)
 88366              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 88367              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 88368              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 88369              m.emit(0x25)
 88370              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 88371              m.imm1(toImmAny(v[0]))
 88372          })
 88373      }
 88374      // VPTERNLOGD imm8, m256/m32bcst, ymm, ymm{k}{z}
 88375      if isImm8(v0) && isM256M32bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 88376          self.require(ISA_AVX512VL | ISA_AVX512F)
 88377          p.domain = DomainAVX
 88378          p.add(0, func(m *_Encoding, v []interface{}) {
 88379              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 88380              m.emit(0x25)
 88381              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 88382              m.imm1(toImmAny(v[0]))
 88383          })
 88384      }
 88385      // VPTERNLOGD imm8, ymm, ymm, ymm{k}{z}
 88386      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 88387          self.require(ISA_AVX512VL | ISA_AVX512F)
 88388          p.domain = DomainAVX
 88389          p.add(0, func(m *_Encoding, v []interface{}) {
 88390              m.emit(0x62)
 88391              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 88392              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 88393              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 88394              m.emit(0x25)
 88395              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 88396              m.imm1(toImmAny(v[0]))
 88397          })
 88398      }
 88399      if p.len == 0 {
 88400          panic("invalid operands for VPTERNLOGD")
 88401      }
 88402      return p
 88403  }
 88404  
 88405  // VPTERNLOGQ performs "Bitwise Ternary Logical Operation on Quadword Values".
 88406  //
 88407  // Mnemonic        : VPTERNLOGQ
 88408  // Supported forms : (6 forms)
 88409  //
 88410  //    * VPTERNLOGQ imm8, m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 88411  //    * VPTERNLOGQ imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 88412  //    * VPTERNLOGQ imm8, m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 88413  //    * VPTERNLOGQ imm8, xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 88414  //    * VPTERNLOGQ imm8, m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 88415  //    * VPTERNLOGQ imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 88416  //
 88417  func (self *Program) VPTERNLOGQ(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 88418      p := self.alloc("VPTERNLOGQ", 4, Operands { v0, v1, v2, v3 })
 88419      // VPTERNLOGQ imm8, m512/m64bcst, zmm, zmm{k}{z}
 88420      if isImm8(v0) && isM512M64bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 88421          self.require(ISA_AVX512F)
 88422          p.domain = DomainAVX
 88423          p.add(0, func(m *_Encoding, v []interface{}) {
 88424              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 88425              m.emit(0x25)
 88426              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 88427              m.imm1(toImmAny(v[0]))
 88428          })
 88429      }
 88430      // VPTERNLOGQ imm8, zmm, zmm, zmm{k}{z}
 88431      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 88432          self.require(ISA_AVX512F)
 88433          p.domain = DomainAVX
 88434          p.add(0, func(m *_Encoding, v []interface{}) {
 88435              m.emit(0x62)
 88436              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 88437              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 88438              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 88439              m.emit(0x25)
 88440              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 88441              m.imm1(toImmAny(v[0]))
 88442          })
 88443      }
 88444      // VPTERNLOGQ imm8, m128/m64bcst, xmm, xmm{k}{z}
 88445      if isImm8(v0) && isM128M64bcst(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 88446          self.require(ISA_AVX512VL | ISA_AVX512F)
 88447          p.domain = DomainAVX
 88448          p.add(0, func(m *_Encoding, v []interface{}) {
 88449              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 88450              m.emit(0x25)
 88451              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 88452              m.imm1(toImmAny(v[0]))
 88453          })
 88454      }
 88455      // VPTERNLOGQ imm8, xmm, xmm, xmm{k}{z}
 88456      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 88457          self.require(ISA_AVX512VL | ISA_AVX512F)
 88458          p.domain = DomainAVX
 88459          p.add(0, func(m *_Encoding, v []interface{}) {
 88460              m.emit(0x62)
 88461              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 88462              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 88463              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 88464              m.emit(0x25)
 88465              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 88466              m.imm1(toImmAny(v[0]))
 88467          })
 88468      }
 88469      // VPTERNLOGQ imm8, m256/m64bcst, ymm, ymm{k}{z}
 88470      if isImm8(v0) && isM256M64bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 88471          self.require(ISA_AVX512VL | ISA_AVX512F)
 88472          p.domain = DomainAVX
 88473          p.add(0, func(m *_Encoding, v []interface{}) {
 88474              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 88475              m.emit(0x25)
 88476              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 88477              m.imm1(toImmAny(v[0]))
 88478          })
 88479      }
 88480      // VPTERNLOGQ imm8, ymm, ymm, ymm{k}{z}
 88481      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 88482          self.require(ISA_AVX512VL | ISA_AVX512F)
 88483          p.domain = DomainAVX
 88484          p.add(0, func(m *_Encoding, v []interface{}) {
 88485              m.emit(0x62)
 88486              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 88487              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 88488              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 88489              m.emit(0x25)
 88490              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 88491              m.imm1(toImmAny(v[0]))
 88492          })
 88493      }
 88494      if p.len == 0 {
 88495          panic("invalid operands for VPTERNLOGQ")
 88496      }
 88497      return p
 88498  }
 88499  
 88500  // VPTEST performs "Packed Logical Compare".
 88501  //
 88502  // Mnemonic        : VPTEST
 88503  // Supported forms : (4 forms)
 88504  //
 88505  //    * VPTEST xmm, xmm     [AVX]
 88506  //    * VPTEST m128, xmm    [AVX]
 88507  //    * VPTEST ymm, ymm     [AVX]
 88508  //    * VPTEST m256, ymm    [AVX]
 88509  //
 88510  func (self *Program) VPTEST(v0 interface{}, v1 interface{}) *Instruction {
 88511      p := self.alloc("VPTEST", 2, Operands { v0, v1 })
 88512      // VPTEST xmm, xmm
 88513      if isXMM(v0) && isXMM(v1) {
 88514          self.require(ISA_AVX)
 88515          p.domain = DomainAVX
 88516          p.add(0, func(m *_Encoding, v []interface{}) {
 88517              m.emit(0xc4)
 88518              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 88519              m.emit(0x79)
 88520              m.emit(0x17)
 88521              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 88522          })
 88523      }
 88524      // VPTEST m128, xmm
 88525      if isM128(v0) && isXMM(v1) {
 88526          self.require(ISA_AVX)
 88527          p.domain = DomainAVX
 88528          p.add(0, func(m *_Encoding, v []interface{}) {
 88529              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 88530              m.emit(0x17)
 88531              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 88532          })
 88533      }
 88534      // VPTEST ymm, ymm
 88535      if isYMM(v0) && isYMM(v1) {
 88536          self.require(ISA_AVX)
 88537          p.domain = DomainAVX
 88538          p.add(0, func(m *_Encoding, v []interface{}) {
 88539              m.emit(0xc4)
 88540              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 88541              m.emit(0x7d)
 88542              m.emit(0x17)
 88543              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 88544          })
 88545      }
 88546      // VPTEST m256, ymm
 88547      if isM256(v0) && isYMM(v1) {
 88548          self.require(ISA_AVX)
 88549          p.domain = DomainAVX
 88550          p.add(0, func(m *_Encoding, v []interface{}) {
 88551              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 88552              m.emit(0x17)
 88553              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 88554          })
 88555      }
 88556      if p.len == 0 {
 88557          panic("invalid operands for VPTEST")
 88558      }
 88559      return p
 88560  }
 88561  
 88562  // VPTESTMB performs "Logical AND of Packed Byte Integer Values and Set Mask".
 88563  //
 88564  // Mnemonic        : VPTESTMB
 88565  // Supported forms : (6 forms)
 88566  //
 88567  //    * VPTESTMB zmm, zmm, k{k}     [AVX512BW]
 88568  //    * VPTESTMB m512, zmm, k{k}    [AVX512BW]
 88569  //    * VPTESTMB xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 88570  //    * VPTESTMB m128, xmm, k{k}    [AVX512BW,AVX512VL]
 88571  //    * VPTESTMB ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 88572  //    * VPTESTMB m256, ymm, k{k}    [AVX512BW,AVX512VL]
 88573  //
 88574  func (self *Program) VPTESTMB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 88575      p := self.alloc("VPTESTMB", 3, Operands { v0, v1, v2 })
 88576      // VPTESTMB zmm, zmm, k{k}
 88577      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 88578          self.require(ISA_AVX512BW)
 88579          p.domain = DomainAVX
 88580          p.add(0, func(m *_Encoding, v []interface{}) {
 88581              m.emit(0x62)
 88582              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88583              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88584              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 88585              m.emit(0x26)
 88586              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88587          })
 88588      }
 88589      // VPTESTMB m512, zmm, k{k}
 88590      if isM512(v0) && isZMM(v1) && isKk(v2) {
 88591          self.require(ISA_AVX512BW)
 88592          p.domain = DomainAVX
 88593          p.add(0, func(m *_Encoding, v []interface{}) {
 88594              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 88595              m.emit(0x26)
 88596              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 88597          })
 88598      }
 88599      // VPTESTMB xmm, xmm, k{k}
 88600      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 88601          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88602          p.domain = DomainAVX
 88603          p.add(0, func(m *_Encoding, v []interface{}) {
 88604              m.emit(0x62)
 88605              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88606              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88607              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 88608              m.emit(0x26)
 88609              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88610          })
 88611      }
 88612      // VPTESTMB m128, xmm, k{k}
 88613      if isM128(v0) && isEVEXXMM(v1) && isKk(v2) {
 88614          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88615          p.domain = DomainAVX
 88616          p.add(0, func(m *_Encoding, v []interface{}) {
 88617              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 88618              m.emit(0x26)
 88619              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 88620          })
 88621      }
 88622      // VPTESTMB ymm, ymm, k{k}
 88623      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 88624          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88625          p.domain = DomainAVX
 88626          p.add(0, func(m *_Encoding, v []interface{}) {
 88627              m.emit(0x62)
 88628              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88629              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88630              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 88631              m.emit(0x26)
 88632              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88633          })
 88634      }
 88635      // VPTESTMB m256, ymm, k{k}
 88636      if isM256(v0) && isEVEXYMM(v1) && isKk(v2) {
 88637          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88638          p.domain = DomainAVX
 88639          p.add(0, func(m *_Encoding, v []interface{}) {
 88640              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 88641              m.emit(0x26)
 88642              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 88643          })
 88644      }
 88645      if p.len == 0 {
 88646          panic("invalid operands for VPTESTMB")
 88647      }
 88648      return p
 88649  }
 88650  
 88651  // VPTESTMD performs "Logical AND of Packed Doubleword Integer Values and Set Mask".
 88652  //
 88653  // Mnemonic        : VPTESTMD
 88654  // Supported forms : (6 forms)
 88655  //
 88656  //    * VPTESTMD m512/m32bcst, zmm, k{k}    [AVX512F]
 88657  //    * VPTESTMD zmm, zmm, k{k}             [AVX512F]
 88658  //    * VPTESTMD m128/m32bcst, xmm, k{k}    [AVX512F,AVX512VL]
 88659  //    * VPTESTMD xmm, xmm, k{k}             [AVX512F,AVX512VL]
 88660  //    * VPTESTMD m256/m32bcst, ymm, k{k}    [AVX512F,AVX512VL]
 88661  //    * VPTESTMD ymm, ymm, k{k}             [AVX512F,AVX512VL]
 88662  //
 88663  func (self *Program) VPTESTMD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 88664      p := self.alloc("VPTESTMD", 3, Operands { v0, v1, v2 })
 88665      // VPTESTMD m512/m32bcst, zmm, k{k}
 88666      if isM512M32bcst(v0) && isZMM(v1) && isKk(v2) {
 88667          self.require(ISA_AVX512F)
 88668          p.domain = DomainAVX
 88669          p.add(0, func(m *_Encoding, v []interface{}) {
 88670              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 88671              m.emit(0x27)
 88672              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 88673          })
 88674      }
 88675      // VPTESTMD zmm, zmm, k{k}
 88676      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 88677          self.require(ISA_AVX512F)
 88678          p.domain = DomainAVX
 88679          p.add(0, func(m *_Encoding, v []interface{}) {
 88680              m.emit(0x62)
 88681              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88682              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88683              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 88684              m.emit(0x27)
 88685              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88686          })
 88687      }
 88688      // VPTESTMD m128/m32bcst, xmm, k{k}
 88689      if isM128M32bcst(v0) && isEVEXXMM(v1) && isKk(v2) {
 88690          self.require(ISA_AVX512VL | ISA_AVX512F)
 88691          p.domain = DomainAVX
 88692          p.add(0, func(m *_Encoding, v []interface{}) {
 88693              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 88694              m.emit(0x27)
 88695              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 88696          })
 88697      }
 88698      // VPTESTMD xmm, xmm, k{k}
 88699      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 88700          self.require(ISA_AVX512VL | ISA_AVX512F)
 88701          p.domain = DomainAVX
 88702          p.add(0, func(m *_Encoding, v []interface{}) {
 88703              m.emit(0x62)
 88704              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88705              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88706              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 88707              m.emit(0x27)
 88708              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88709          })
 88710      }
 88711      // VPTESTMD m256/m32bcst, ymm, k{k}
 88712      if isM256M32bcst(v0) && isEVEXYMM(v1) && isKk(v2) {
 88713          self.require(ISA_AVX512VL | ISA_AVX512F)
 88714          p.domain = DomainAVX
 88715          p.add(0, func(m *_Encoding, v []interface{}) {
 88716              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 88717              m.emit(0x27)
 88718              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 88719          })
 88720      }
 88721      // VPTESTMD ymm, ymm, k{k}
 88722      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 88723          self.require(ISA_AVX512VL | ISA_AVX512F)
 88724          p.domain = DomainAVX
 88725          p.add(0, func(m *_Encoding, v []interface{}) {
 88726              m.emit(0x62)
 88727              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88728              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88729              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 88730              m.emit(0x27)
 88731              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88732          })
 88733      }
 88734      if p.len == 0 {
 88735          panic("invalid operands for VPTESTMD")
 88736      }
 88737      return p
 88738  }
 88739  
 88740  // VPTESTMQ performs "Logical AND of Packed Quadword Integer Values and Set Mask".
 88741  //
 88742  // Mnemonic        : VPTESTMQ
 88743  // Supported forms : (6 forms)
 88744  //
 88745  //    * VPTESTMQ m512/m64bcst, zmm, k{k}    [AVX512F]
 88746  //    * VPTESTMQ zmm, zmm, k{k}             [AVX512F]
 88747  //    * VPTESTMQ m128/m64bcst, xmm, k{k}    [AVX512F,AVX512VL]
 88748  //    * VPTESTMQ xmm, xmm, k{k}             [AVX512F,AVX512VL]
 88749  //    * VPTESTMQ m256/m64bcst, ymm, k{k}    [AVX512F,AVX512VL]
 88750  //    * VPTESTMQ ymm, ymm, k{k}             [AVX512F,AVX512VL]
 88751  //
 88752  func (self *Program) VPTESTMQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 88753      p := self.alloc("VPTESTMQ", 3, Operands { v0, v1, v2 })
 88754      // VPTESTMQ m512/m64bcst, zmm, k{k}
 88755      if isM512M64bcst(v0) && isZMM(v1) && isKk(v2) {
 88756          self.require(ISA_AVX512F)
 88757          p.domain = DomainAVX
 88758          p.add(0, func(m *_Encoding, v []interface{}) {
 88759              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 88760              m.emit(0x27)
 88761              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 88762          })
 88763      }
 88764      // VPTESTMQ zmm, zmm, k{k}
 88765      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 88766          self.require(ISA_AVX512F)
 88767          p.domain = DomainAVX
 88768          p.add(0, func(m *_Encoding, v []interface{}) {
 88769              m.emit(0x62)
 88770              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88771              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 88772              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 88773              m.emit(0x27)
 88774              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88775          })
 88776      }
 88777      // VPTESTMQ m128/m64bcst, xmm, k{k}
 88778      if isM128M64bcst(v0) && isEVEXXMM(v1) && isKk(v2) {
 88779          self.require(ISA_AVX512VL | ISA_AVX512F)
 88780          p.domain = DomainAVX
 88781          p.add(0, func(m *_Encoding, v []interface{}) {
 88782              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 88783              m.emit(0x27)
 88784              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 88785          })
 88786      }
 88787      // VPTESTMQ xmm, xmm, k{k}
 88788      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 88789          self.require(ISA_AVX512VL | ISA_AVX512F)
 88790          p.domain = DomainAVX
 88791          p.add(0, func(m *_Encoding, v []interface{}) {
 88792              m.emit(0x62)
 88793              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88794              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 88795              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 88796              m.emit(0x27)
 88797              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88798          })
 88799      }
 88800      // VPTESTMQ m256/m64bcst, ymm, k{k}
 88801      if isM256M64bcst(v0) && isEVEXYMM(v1) && isKk(v2) {
 88802          self.require(ISA_AVX512VL | ISA_AVX512F)
 88803          p.domain = DomainAVX
 88804          p.add(0, func(m *_Encoding, v []interface{}) {
 88805              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 88806              m.emit(0x27)
 88807              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 88808          })
 88809      }
 88810      // VPTESTMQ ymm, ymm, k{k}
 88811      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 88812          self.require(ISA_AVX512VL | ISA_AVX512F)
 88813          p.domain = DomainAVX
 88814          p.add(0, func(m *_Encoding, v []interface{}) {
 88815              m.emit(0x62)
 88816              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88817              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 88818              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 88819              m.emit(0x27)
 88820              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88821          })
 88822      }
 88823      if p.len == 0 {
 88824          panic("invalid operands for VPTESTMQ")
 88825      }
 88826      return p
 88827  }
 88828  
 88829  // VPTESTMW performs "Logical AND of Packed Word Integer Values and Set Mask".
 88830  //
 88831  // Mnemonic        : VPTESTMW
 88832  // Supported forms : (6 forms)
 88833  //
 88834  //    * VPTESTMW zmm, zmm, k{k}     [AVX512BW]
 88835  //    * VPTESTMW m512, zmm, k{k}    [AVX512BW]
 88836  //    * VPTESTMW xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 88837  //    * VPTESTMW m128, xmm, k{k}    [AVX512BW,AVX512VL]
 88838  //    * VPTESTMW ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 88839  //    * VPTESTMW m256, ymm, k{k}    [AVX512BW,AVX512VL]
 88840  //
 88841  func (self *Program) VPTESTMW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 88842      p := self.alloc("VPTESTMW", 3, Operands { v0, v1, v2 })
 88843      // VPTESTMW zmm, zmm, k{k}
 88844      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 88845          self.require(ISA_AVX512BW)
 88846          p.domain = DomainAVX
 88847          p.add(0, func(m *_Encoding, v []interface{}) {
 88848              m.emit(0x62)
 88849              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88850              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 88851              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 88852              m.emit(0x26)
 88853              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88854          })
 88855      }
 88856      // VPTESTMW m512, zmm, k{k}
 88857      if isM512(v0) && isZMM(v1) && isKk(v2) {
 88858          self.require(ISA_AVX512BW)
 88859          p.domain = DomainAVX
 88860          p.add(0, func(m *_Encoding, v []interface{}) {
 88861              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 88862              m.emit(0x26)
 88863              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 88864          })
 88865      }
 88866      // VPTESTMW xmm, xmm, k{k}
 88867      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 88868          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88869          p.domain = DomainAVX
 88870          p.add(0, func(m *_Encoding, v []interface{}) {
 88871              m.emit(0x62)
 88872              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88873              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 88874              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 88875              m.emit(0x26)
 88876              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88877          })
 88878      }
 88879      // VPTESTMW m128, xmm, k{k}
 88880      if isM128(v0) && isEVEXXMM(v1) && isKk(v2) {
 88881          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88882          p.domain = DomainAVX
 88883          p.add(0, func(m *_Encoding, v []interface{}) {
 88884              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 88885              m.emit(0x26)
 88886              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 88887          })
 88888      }
 88889      // VPTESTMW ymm, ymm, k{k}
 88890      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 88891          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88892          p.domain = DomainAVX
 88893          p.add(0, func(m *_Encoding, v []interface{}) {
 88894              m.emit(0x62)
 88895              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88896              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 88897              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 88898              m.emit(0x26)
 88899              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88900          })
 88901      }
 88902      // VPTESTMW m256, ymm, k{k}
 88903      if isM256(v0) && isEVEXYMM(v1) && isKk(v2) {
 88904          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88905          p.domain = DomainAVX
 88906          p.add(0, func(m *_Encoding, v []interface{}) {
 88907              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 88908              m.emit(0x26)
 88909              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 88910          })
 88911      }
 88912      if p.len == 0 {
 88913          panic("invalid operands for VPTESTMW")
 88914      }
 88915      return p
 88916  }
 88917  
 88918  // VPTESTNMB performs "Logical NAND of Packed Byte Integer Values and Set Mask".
 88919  //
 88920  // Mnemonic        : VPTESTNMB
 88921  // Supported forms : (6 forms)
 88922  //
 88923  //    * VPTESTNMB zmm, zmm, k{k}     [AVX512BW,AVX512F]
 88924  //    * VPTESTNMB m512, zmm, k{k}    [AVX512BW,AVX512F]
 88925  //    * VPTESTNMB xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 88926  //    * VPTESTNMB m128, xmm, k{k}    [AVX512BW,AVX512VL]
 88927  //    * VPTESTNMB ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 88928  //    * VPTESTNMB m256, ymm, k{k}    [AVX512BW,AVX512VL]
 88929  //
 88930  func (self *Program) VPTESTNMB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 88931      p := self.alloc("VPTESTNMB", 3, Operands { v0, v1, v2 })
 88932      // VPTESTNMB zmm, zmm, k{k}
 88933      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 88934          self.require(ISA_AVX512F | ISA_AVX512BW)
 88935          p.domain = DomainAVX
 88936          p.add(0, func(m *_Encoding, v []interface{}) {
 88937              m.emit(0x62)
 88938              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88939              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 88940              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 88941              m.emit(0x26)
 88942              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88943          })
 88944      }
 88945      // VPTESTNMB m512, zmm, k{k}
 88946      if isM512(v0) && isZMM(v1) && isKk(v2) {
 88947          self.require(ISA_AVX512F | ISA_AVX512BW)
 88948          p.domain = DomainAVX
 88949          p.add(0, func(m *_Encoding, v []interface{}) {
 88950              m.evex(0b10, 0x06, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 88951              m.emit(0x26)
 88952              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 88953          })
 88954      }
 88955      // VPTESTNMB xmm, xmm, k{k}
 88956      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 88957          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88958          p.domain = DomainAVX
 88959          p.add(0, func(m *_Encoding, v []interface{}) {
 88960              m.emit(0x62)
 88961              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88962              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 88963              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 88964              m.emit(0x26)
 88965              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88966          })
 88967      }
 88968      // VPTESTNMB m128, xmm, k{k}
 88969      if isM128(v0) && isEVEXXMM(v1) && isKk(v2) {
 88970          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88971          p.domain = DomainAVX
 88972          p.add(0, func(m *_Encoding, v []interface{}) {
 88973              m.evex(0b10, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 88974              m.emit(0x26)
 88975              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 88976          })
 88977      }
 88978      // VPTESTNMB ymm, ymm, k{k}
 88979      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 88980          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88981          p.domain = DomainAVX
 88982          p.add(0, func(m *_Encoding, v []interface{}) {
 88983              m.emit(0x62)
 88984              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88985              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 88986              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 88987              m.emit(0x26)
 88988              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88989          })
 88990      }
 88991      // VPTESTNMB m256, ymm, k{k}
 88992      if isM256(v0) && isEVEXYMM(v1) && isKk(v2) {
 88993          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88994          p.domain = DomainAVX
 88995          p.add(0, func(m *_Encoding, v []interface{}) {
 88996              m.evex(0b10, 0x06, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 88997              m.emit(0x26)
 88998              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 88999          })
 89000      }
 89001      if p.len == 0 {
 89002          panic("invalid operands for VPTESTNMB")
 89003      }
 89004      return p
 89005  }
 89006  
 89007  // VPTESTNMD performs "Logical NAND of Packed Doubleword Integer Values and Set Mask".
 89008  //
 89009  // Mnemonic        : VPTESTNMD
 89010  // Supported forms : (6 forms)
 89011  //
 89012  //    * VPTESTNMD m512/m32bcst, zmm, k{k}    [AVX512F]
 89013  //    * VPTESTNMD zmm, zmm, k{k}             [AVX512F]
 89014  //    * VPTESTNMD m128/m32bcst, xmm, k{k}    [AVX512F,AVX512VL]
 89015  //    * VPTESTNMD xmm, xmm, k{k}             [AVX512F,AVX512VL]
 89016  //    * VPTESTNMD m256/m32bcst, ymm, k{k}    [AVX512F,AVX512VL]
 89017  //    * VPTESTNMD ymm, ymm, k{k}             [AVX512F,AVX512VL]
 89018  //
 89019  func (self *Program) VPTESTNMD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 89020      p := self.alloc("VPTESTNMD", 3, Operands { v0, v1, v2 })
 89021      // VPTESTNMD m512/m32bcst, zmm, k{k}
 89022      if isM512M32bcst(v0) && isZMM(v1) && isKk(v2) {
 89023          self.require(ISA_AVX512F)
 89024          p.domain = DomainAVX
 89025          p.add(0, func(m *_Encoding, v []interface{}) {
 89026              m.evex(0b10, 0x06, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 89027              m.emit(0x27)
 89028              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 89029          })
 89030      }
 89031      // VPTESTNMD zmm, zmm, k{k}
 89032      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 89033          self.require(ISA_AVX512F)
 89034          p.domain = DomainAVX
 89035          p.add(0, func(m *_Encoding, v []interface{}) {
 89036              m.emit(0x62)
 89037              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89038              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 89039              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 89040              m.emit(0x27)
 89041              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89042          })
 89043      }
 89044      // VPTESTNMD m128/m32bcst, xmm, k{k}
 89045      if isM128M32bcst(v0) && isEVEXXMM(v1) && isKk(v2) {
 89046          self.require(ISA_AVX512VL | ISA_AVX512F)
 89047          p.domain = DomainAVX
 89048          p.add(0, func(m *_Encoding, v []interface{}) {
 89049              m.evex(0b10, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 89050              m.emit(0x27)
 89051              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 89052          })
 89053      }
 89054      // VPTESTNMD xmm, xmm, k{k}
 89055      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 89056          self.require(ISA_AVX512VL | ISA_AVX512F)
 89057          p.domain = DomainAVX
 89058          p.add(0, func(m *_Encoding, v []interface{}) {
 89059              m.emit(0x62)
 89060              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89061              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 89062              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 89063              m.emit(0x27)
 89064              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89065          })
 89066      }
 89067      // VPTESTNMD m256/m32bcst, ymm, k{k}
 89068      if isM256M32bcst(v0) && isEVEXYMM(v1) && isKk(v2) {
 89069          self.require(ISA_AVX512VL | ISA_AVX512F)
 89070          p.domain = DomainAVX
 89071          p.add(0, func(m *_Encoding, v []interface{}) {
 89072              m.evex(0b10, 0x06, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 89073              m.emit(0x27)
 89074              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 89075          })
 89076      }
 89077      // VPTESTNMD ymm, ymm, k{k}
 89078      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 89079          self.require(ISA_AVX512VL | ISA_AVX512F)
 89080          p.domain = DomainAVX
 89081          p.add(0, func(m *_Encoding, v []interface{}) {
 89082              m.emit(0x62)
 89083              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89084              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 89085              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 89086              m.emit(0x27)
 89087              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89088          })
 89089      }
 89090      if p.len == 0 {
 89091          panic("invalid operands for VPTESTNMD")
 89092      }
 89093      return p
 89094  }
 89095  
 89096  // VPTESTNMQ performs "Logical NAND of Packed Quadword Integer Values and Set Mask".
 89097  //
 89098  // Mnemonic        : VPTESTNMQ
 89099  // Supported forms : (6 forms)
 89100  //
 89101  //    * VPTESTNMQ m512/m64bcst, zmm, k{k}    [AVX512F]
 89102  //    * VPTESTNMQ zmm, zmm, k{k}             [AVX512F]
 89103  //    * VPTESTNMQ m128/m64bcst, xmm, k{k}    [AVX512F,AVX512VL]
 89104  //    * VPTESTNMQ xmm, xmm, k{k}             [AVX512F,AVX512VL]
 89105  //    * VPTESTNMQ m256/m64bcst, ymm, k{k}    [AVX512F,AVX512VL]
 89106  //    * VPTESTNMQ ymm, ymm, k{k}             [AVX512F,AVX512VL]
 89107  //
 89108  func (self *Program) VPTESTNMQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 89109      p := self.alloc("VPTESTNMQ", 3, Operands { v0, v1, v2 })
 89110      // VPTESTNMQ m512/m64bcst, zmm, k{k}
 89111      if isM512M64bcst(v0) && isZMM(v1) && isKk(v2) {
 89112          self.require(ISA_AVX512F)
 89113          p.domain = DomainAVX
 89114          p.add(0, func(m *_Encoding, v []interface{}) {
 89115              m.evex(0b10, 0x86, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 89116              m.emit(0x27)
 89117              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 89118          })
 89119      }
 89120      // VPTESTNMQ zmm, zmm, k{k}
 89121      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 89122          self.require(ISA_AVX512F)
 89123          p.domain = DomainAVX
 89124          p.add(0, func(m *_Encoding, v []interface{}) {
 89125              m.emit(0x62)
 89126              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89127              m.emit(0xfe ^ (hlcode(v[1]) << 3))
 89128              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 89129              m.emit(0x27)
 89130              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89131          })
 89132      }
 89133      // VPTESTNMQ m128/m64bcst, xmm, k{k}
 89134      if isM128M64bcst(v0) && isEVEXXMM(v1) && isKk(v2) {
 89135          self.require(ISA_AVX512VL | ISA_AVX512F)
 89136          p.domain = DomainAVX
 89137          p.add(0, func(m *_Encoding, v []interface{}) {
 89138              m.evex(0b10, 0x86, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 89139              m.emit(0x27)
 89140              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 89141          })
 89142      }
 89143      // VPTESTNMQ xmm, xmm, k{k}
 89144      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 89145          self.require(ISA_AVX512VL | ISA_AVX512F)
 89146          p.domain = DomainAVX
 89147          p.add(0, func(m *_Encoding, v []interface{}) {
 89148              m.emit(0x62)
 89149              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89150              m.emit(0xfe ^ (hlcode(v[1]) << 3))
 89151              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 89152              m.emit(0x27)
 89153              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89154          })
 89155      }
 89156      // VPTESTNMQ m256/m64bcst, ymm, k{k}
 89157      if isM256M64bcst(v0) && isEVEXYMM(v1) && isKk(v2) {
 89158          self.require(ISA_AVX512VL | ISA_AVX512F)
 89159          p.domain = DomainAVX
 89160          p.add(0, func(m *_Encoding, v []interface{}) {
 89161              m.evex(0b10, 0x86, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 89162              m.emit(0x27)
 89163              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 89164          })
 89165      }
 89166      // VPTESTNMQ ymm, ymm, k{k}
 89167      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 89168          self.require(ISA_AVX512VL | ISA_AVX512F)
 89169          p.domain = DomainAVX
 89170          p.add(0, func(m *_Encoding, v []interface{}) {
 89171              m.emit(0x62)
 89172              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89173              m.emit(0xfe ^ (hlcode(v[1]) << 3))
 89174              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 89175              m.emit(0x27)
 89176              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89177          })
 89178      }
 89179      if p.len == 0 {
 89180          panic("invalid operands for VPTESTNMQ")
 89181      }
 89182      return p
 89183  }
 89184  
 89185  // VPTESTNMW performs "Logical NAND of Packed Word Integer Values and Set Mask".
 89186  //
 89187  // Mnemonic        : VPTESTNMW
 89188  // Supported forms : (6 forms)
 89189  //
 89190  //    * VPTESTNMW zmm, zmm, k{k}     [AVX512BW,AVX512F]
 89191  //    * VPTESTNMW m512, zmm, k{k}    [AVX512BW,AVX512F]
 89192  //    * VPTESTNMW xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 89193  //    * VPTESTNMW m128, xmm, k{k}    [AVX512BW,AVX512VL]
 89194  //    * VPTESTNMW ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 89195  //    * VPTESTNMW m256, ymm, k{k}    [AVX512BW,AVX512VL]
 89196  //
 89197  func (self *Program) VPTESTNMW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 89198      p := self.alloc("VPTESTNMW", 3, Operands { v0, v1, v2 })
 89199      // VPTESTNMW zmm, zmm, k{k}
 89200      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 89201          self.require(ISA_AVX512F | ISA_AVX512BW)
 89202          p.domain = DomainAVX
 89203          p.add(0, func(m *_Encoding, v []interface{}) {
 89204              m.emit(0x62)
 89205              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89206              m.emit(0xfe ^ (hlcode(v[1]) << 3))
 89207              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 89208              m.emit(0x26)
 89209              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89210          })
 89211      }
 89212      // VPTESTNMW m512, zmm, k{k}
 89213      if isM512(v0) && isZMM(v1) && isKk(v2) {
 89214          self.require(ISA_AVX512F | ISA_AVX512BW)
 89215          p.domain = DomainAVX
 89216          p.add(0, func(m *_Encoding, v []interface{}) {
 89217              m.evex(0b10, 0x86, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 89218              m.emit(0x26)
 89219              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 89220          })
 89221      }
 89222      // VPTESTNMW xmm, xmm, k{k}
 89223      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 89224          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89225          p.domain = DomainAVX
 89226          p.add(0, func(m *_Encoding, v []interface{}) {
 89227              m.emit(0x62)
 89228              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89229              m.emit(0xfe ^ (hlcode(v[1]) << 3))
 89230              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 89231              m.emit(0x26)
 89232              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89233          })
 89234      }
 89235      // VPTESTNMW m128, xmm, k{k}
 89236      if isM128(v0) && isEVEXXMM(v1) && isKk(v2) {
 89237          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89238          p.domain = DomainAVX
 89239          p.add(0, func(m *_Encoding, v []interface{}) {
 89240              m.evex(0b10, 0x86, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 89241              m.emit(0x26)
 89242              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 89243          })
 89244      }
 89245      // VPTESTNMW ymm, ymm, k{k}
 89246      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 89247          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89248          p.domain = DomainAVX
 89249          p.add(0, func(m *_Encoding, v []interface{}) {
 89250              m.emit(0x62)
 89251              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89252              m.emit(0xfe ^ (hlcode(v[1]) << 3))
 89253              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 89254              m.emit(0x26)
 89255              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89256          })
 89257      }
 89258      // VPTESTNMW m256, ymm, k{k}
 89259      if isM256(v0) && isEVEXYMM(v1) && isKk(v2) {
 89260          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89261          p.domain = DomainAVX
 89262          p.add(0, func(m *_Encoding, v []interface{}) {
 89263              m.evex(0b10, 0x86, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 89264              m.emit(0x26)
 89265              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 89266          })
 89267      }
 89268      if p.len == 0 {
 89269          panic("invalid operands for VPTESTNMW")
 89270      }
 89271      return p
 89272  }
 89273  
 89274  // VPUNPCKHBW performs "Unpack and Interleave High-Order Bytes into Words".
 89275  //
 89276  // Mnemonic        : VPUNPCKHBW
 89277  // Supported forms : (10 forms)
 89278  //
 89279  //    * VPUNPCKHBW xmm, xmm, xmm           [AVX]
 89280  //    * VPUNPCKHBW m128, xmm, xmm          [AVX]
 89281  //    * VPUNPCKHBW ymm, ymm, ymm           [AVX2]
 89282  //    * VPUNPCKHBW m256, ymm, ymm          [AVX2]
 89283  //    * VPUNPCKHBW zmm, zmm, zmm{k}{z}     [AVX512BW]
 89284  //    * VPUNPCKHBW m512, zmm, zmm{k}{z}    [AVX512BW]
 89285  //    * VPUNPCKHBW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 89286  //    * VPUNPCKHBW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 89287  //    * VPUNPCKHBW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 89288  //    * VPUNPCKHBW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 89289  //
 89290  func (self *Program) VPUNPCKHBW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 89291      p := self.alloc("VPUNPCKHBW", 3, Operands { v0, v1, v2 })
 89292      // VPUNPCKHBW xmm, xmm, xmm
 89293      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 89294          self.require(ISA_AVX)
 89295          p.domain = DomainAVX
 89296          p.add(0, func(m *_Encoding, v []interface{}) {
 89297              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 89298              m.emit(0x68)
 89299              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89300          })
 89301      }
 89302      // VPUNPCKHBW m128, xmm, xmm
 89303      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 89304          self.require(ISA_AVX)
 89305          p.domain = DomainAVX
 89306          p.add(0, func(m *_Encoding, v []interface{}) {
 89307              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89308              m.emit(0x68)
 89309              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89310          })
 89311      }
 89312      // VPUNPCKHBW ymm, ymm, ymm
 89313      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 89314          self.require(ISA_AVX2)
 89315          p.domain = DomainAVX
 89316          p.add(0, func(m *_Encoding, v []interface{}) {
 89317              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 89318              m.emit(0x68)
 89319              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89320          })
 89321      }
 89322      // VPUNPCKHBW m256, ymm, ymm
 89323      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 89324          self.require(ISA_AVX2)
 89325          p.domain = DomainAVX
 89326          p.add(0, func(m *_Encoding, v []interface{}) {
 89327              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89328              m.emit(0x68)
 89329              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89330          })
 89331      }
 89332      // VPUNPCKHBW zmm, zmm, zmm{k}{z}
 89333      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 89334          self.require(ISA_AVX512BW)
 89335          p.domain = DomainAVX
 89336          p.add(0, func(m *_Encoding, v []interface{}) {
 89337              m.emit(0x62)
 89338              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89339              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89340              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 89341              m.emit(0x68)
 89342              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89343          })
 89344      }
 89345      // VPUNPCKHBW m512, zmm, zmm{k}{z}
 89346      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 89347          self.require(ISA_AVX512BW)
 89348          p.domain = DomainAVX
 89349          p.add(0, func(m *_Encoding, v []interface{}) {
 89350              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 89351              m.emit(0x68)
 89352              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 89353          })
 89354      }
 89355      // VPUNPCKHBW xmm, xmm, xmm{k}{z}
 89356      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 89357          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89358          p.domain = DomainAVX
 89359          p.add(0, func(m *_Encoding, v []interface{}) {
 89360              m.emit(0x62)
 89361              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89362              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89363              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 89364              m.emit(0x68)
 89365              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89366          })
 89367      }
 89368      // VPUNPCKHBW m128, xmm, xmm{k}{z}
 89369      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 89370          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89371          p.domain = DomainAVX
 89372          p.add(0, func(m *_Encoding, v []interface{}) {
 89373              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 89374              m.emit(0x68)
 89375              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 89376          })
 89377      }
 89378      // VPUNPCKHBW ymm, ymm, ymm{k}{z}
 89379      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 89380          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89381          p.domain = DomainAVX
 89382          p.add(0, func(m *_Encoding, v []interface{}) {
 89383              m.emit(0x62)
 89384              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89385              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89386              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 89387              m.emit(0x68)
 89388              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89389          })
 89390      }
 89391      // VPUNPCKHBW m256, ymm, ymm{k}{z}
 89392      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 89393          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89394          p.domain = DomainAVX
 89395          p.add(0, func(m *_Encoding, v []interface{}) {
 89396              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 89397              m.emit(0x68)
 89398              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 89399          })
 89400      }
 89401      if p.len == 0 {
 89402          panic("invalid operands for VPUNPCKHBW")
 89403      }
 89404      return p
 89405  }
 89406  
 89407  // VPUNPCKHDQ performs "Unpack and Interleave High-Order Doublewords into Quadwords".
 89408  //
 89409  // Mnemonic        : VPUNPCKHDQ
 89410  // Supported forms : (10 forms)
 89411  //
 89412  //    * VPUNPCKHDQ xmm, xmm, xmm                   [AVX]
 89413  //    * VPUNPCKHDQ m128, xmm, xmm                  [AVX]
 89414  //    * VPUNPCKHDQ ymm, ymm, ymm                   [AVX2]
 89415  //    * VPUNPCKHDQ m256, ymm, ymm                  [AVX2]
 89416  //    * VPUNPCKHDQ m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 89417  //    * VPUNPCKHDQ zmm, zmm, zmm{k}{z}             [AVX512F]
 89418  //    * VPUNPCKHDQ m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 89419  //    * VPUNPCKHDQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 89420  //    * VPUNPCKHDQ m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 89421  //    * VPUNPCKHDQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 89422  //
 89423  func (self *Program) VPUNPCKHDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 89424      p := self.alloc("VPUNPCKHDQ", 3, Operands { v0, v1, v2 })
 89425      // VPUNPCKHDQ xmm, xmm, xmm
 89426      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 89427          self.require(ISA_AVX)
 89428          p.domain = DomainAVX
 89429          p.add(0, func(m *_Encoding, v []interface{}) {
 89430              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 89431              m.emit(0x6a)
 89432              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89433          })
 89434      }
 89435      // VPUNPCKHDQ m128, xmm, xmm
 89436      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 89437          self.require(ISA_AVX)
 89438          p.domain = DomainAVX
 89439          p.add(0, func(m *_Encoding, v []interface{}) {
 89440              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89441              m.emit(0x6a)
 89442              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89443          })
 89444      }
 89445      // VPUNPCKHDQ ymm, ymm, ymm
 89446      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 89447          self.require(ISA_AVX2)
 89448          p.domain = DomainAVX
 89449          p.add(0, func(m *_Encoding, v []interface{}) {
 89450              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 89451              m.emit(0x6a)
 89452              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89453          })
 89454      }
 89455      // VPUNPCKHDQ m256, ymm, ymm
 89456      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 89457          self.require(ISA_AVX2)
 89458          p.domain = DomainAVX
 89459          p.add(0, func(m *_Encoding, v []interface{}) {
 89460              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89461              m.emit(0x6a)
 89462              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89463          })
 89464      }
 89465      // VPUNPCKHDQ m512/m32bcst, zmm, zmm{k}{z}
 89466      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 89467          self.require(ISA_AVX512F)
 89468          p.domain = DomainAVX
 89469          p.add(0, func(m *_Encoding, v []interface{}) {
 89470              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 89471              m.emit(0x6a)
 89472              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 89473          })
 89474      }
 89475      // VPUNPCKHDQ zmm, zmm, zmm{k}{z}
 89476      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 89477          self.require(ISA_AVX512F)
 89478          p.domain = DomainAVX
 89479          p.add(0, func(m *_Encoding, v []interface{}) {
 89480              m.emit(0x62)
 89481              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89482              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89483              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 89484              m.emit(0x6a)
 89485              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89486          })
 89487      }
 89488      // VPUNPCKHDQ m128/m32bcst, xmm, xmm{k}{z}
 89489      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 89490          self.require(ISA_AVX512VL | ISA_AVX512F)
 89491          p.domain = DomainAVX
 89492          p.add(0, func(m *_Encoding, v []interface{}) {
 89493              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 89494              m.emit(0x6a)
 89495              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 89496          })
 89497      }
 89498      // VPUNPCKHDQ xmm, xmm, xmm{k}{z}
 89499      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 89500          self.require(ISA_AVX512VL | ISA_AVX512F)
 89501          p.domain = DomainAVX
 89502          p.add(0, func(m *_Encoding, v []interface{}) {
 89503              m.emit(0x62)
 89504              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89505              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89506              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 89507              m.emit(0x6a)
 89508              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89509          })
 89510      }
 89511      // VPUNPCKHDQ m256/m32bcst, ymm, ymm{k}{z}
 89512      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 89513          self.require(ISA_AVX512VL | ISA_AVX512F)
 89514          p.domain = DomainAVX
 89515          p.add(0, func(m *_Encoding, v []interface{}) {
 89516              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 89517              m.emit(0x6a)
 89518              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 89519          })
 89520      }
 89521      // VPUNPCKHDQ ymm, ymm, ymm{k}{z}
 89522      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 89523          self.require(ISA_AVX512VL | ISA_AVX512F)
 89524          p.domain = DomainAVX
 89525          p.add(0, func(m *_Encoding, v []interface{}) {
 89526              m.emit(0x62)
 89527              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89528              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89529              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 89530              m.emit(0x6a)
 89531              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89532          })
 89533      }
 89534      if p.len == 0 {
 89535          panic("invalid operands for VPUNPCKHDQ")
 89536      }
 89537      return p
 89538  }
 89539  
 89540  // VPUNPCKHQDQ performs "Unpack and Interleave High-Order Quadwords into Double Quadwords".
 89541  //
 89542  // Mnemonic        : VPUNPCKHQDQ
 89543  // Supported forms : (10 forms)
 89544  //
 89545  //    * VPUNPCKHQDQ xmm, xmm, xmm                   [AVX]
 89546  //    * VPUNPCKHQDQ m128, xmm, xmm                  [AVX]
 89547  //    * VPUNPCKHQDQ ymm, ymm, ymm                   [AVX2]
 89548  //    * VPUNPCKHQDQ m256, ymm, ymm                  [AVX2]
 89549  //    * VPUNPCKHQDQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 89550  //    * VPUNPCKHQDQ zmm, zmm, zmm{k}{z}             [AVX512F]
 89551  //    * VPUNPCKHQDQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 89552  //    * VPUNPCKHQDQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 89553  //    * VPUNPCKHQDQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 89554  //    * VPUNPCKHQDQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 89555  //
 89556  func (self *Program) VPUNPCKHQDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 89557      p := self.alloc("VPUNPCKHQDQ", 3, Operands { v0, v1, v2 })
 89558      // VPUNPCKHQDQ xmm, xmm, xmm
 89559      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 89560          self.require(ISA_AVX)
 89561          p.domain = DomainAVX
 89562          p.add(0, func(m *_Encoding, v []interface{}) {
 89563              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 89564              m.emit(0x6d)
 89565              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89566          })
 89567      }
 89568      // VPUNPCKHQDQ m128, xmm, xmm
 89569      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 89570          self.require(ISA_AVX)
 89571          p.domain = DomainAVX
 89572          p.add(0, func(m *_Encoding, v []interface{}) {
 89573              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89574              m.emit(0x6d)
 89575              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89576          })
 89577      }
 89578      // VPUNPCKHQDQ ymm, ymm, ymm
 89579      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 89580          self.require(ISA_AVX2)
 89581          p.domain = DomainAVX
 89582          p.add(0, func(m *_Encoding, v []interface{}) {
 89583              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 89584              m.emit(0x6d)
 89585              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89586          })
 89587      }
 89588      // VPUNPCKHQDQ m256, ymm, ymm
 89589      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 89590          self.require(ISA_AVX2)
 89591          p.domain = DomainAVX
 89592          p.add(0, func(m *_Encoding, v []interface{}) {
 89593              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89594              m.emit(0x6d)
 89595              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89596          })
 89597      }
 89598      // VPUNPCKHQDQ m512/m64bcst, zmm, zmm{k}{z}
 89599      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 89600          self.require(ISA_AVX512F)
 89601          p.domain = DomainAVX
 89602          p.add(0, func(m *_Encoding, v []interface{}) {
 89603              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 89604              m.emit(0x6d)
 89605              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 89606          })
 89607      }
 89608      // VPUNPCKHQDQ zmm, zmm, zmm{k}{z}
 89609      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 89610          self.require(ISA_AVX512F)
 89611          p.domain = DomainAVX
 89612          p.add(0, func(m *_Encoding, v []interface{}) {
 89613              m.emit(0x62)
 89614              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89615              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 89616              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 89617              m.emit(0x6d)
 89618              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89619          })
 89620      }
 89621      // VPUNPCKHQDQ m128/m64bcst, xmm, xmm{k}{z}
 89622      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 89623          self.require(ISA_AVX512VL | ISA_AVX512F)
 89624          p.domain = DomainAVX
 89625          p.add(0, func(m *_Encoding, v []interface{}) {
 89626              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 89627              m.emit(0x6d)
 89628              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 89629          })
 89630      }
 89631      // VPUNPCKHQDQ xmm, xmm, xmm{k}{z}
 89632      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 89633          self.require(ISA_AVX512VL | ISA_AVX512F)
 89634          p.domain = DomainAVX
 89635          p.add(0, func(m *_Encoding, v []interface{}) {
 89636              m.emit(0x62)
 89637              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89638              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 89639              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 89640              m.emit(0x6d)
 89641              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89642          })
 89643      }
 89644      // VPUNPCKHQDQ m256/m64bcst, ymm, ymm{k}{z}
 89645      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 89646          self.require(ISA_AVX512VL | ISA_AVX512F)
 89647          p.domain = DomainAVX
 89648          p.add(0, func(m *_Encoding, v []interface{}) {
 89649              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 89650              m.emit(0x6d)
 89651              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 89652          })
 89653      }
 89654      // VPUNPCKHQDQ ymm, ymm, ymm{k}{z}
 89655      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 89656          self.require(ISA_AVX512VL | ISA_AVX512F)
 89657          p.domain = DomainAVX
 89658          p.add(0, func(m *_Encoding, v []interface{}) {
 89659              m.emit(0x62)
 89660              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89661              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 89662              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 89663              m.emit(0x6d)
 89664              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89665          })
 89666      }
 89667      if p.len == 0 {
 89668          panic("invalid operands for VPUNPCKHQDQ")
 89669      }
 89670      return p
 89671  }
 89672  
 89673  // VPUNPCKHWD performs "Unpack and Interleave High-Order Words into Doublewords".
 89674  //
 89675  // Mnemonic        : VPUNPCKHWD
 89676  // Supported forms : (10 forms)
 89677  //
 89678  //    * VPUNPCKHWD xmm, xmm, xmm           [AVX]
 89679  //    * VPUNPCKHWD m128, xmm, xmm          [AVX]
 89680  //    * VPUNPCKHWD ymm, ymm, ymm           [AVX2]
 89681  //    * VPUNPCKHWD m256, ymm, ymm          [AVX2]
 89682  //    * VPUNPCKHWD zmm, zmm, zmm{k}{z}     [AVX512BW]
 89683  //    * VPUNPCKHWD m512, zmm, zmm{k}{z}    [AVX512BW]
 89684  //    * VPUNPCKHWD xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 89685  //    * VPUNPCKHWD m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 89686  //    * VPUNPCKHWD ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 89687  //    * VPUNPCKHWD m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 89688  //
 89689  func (self *Program) VPUNPCKHWD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 89690      p := self.alloc("VPUNPCKHWD", 3, Operands { v0, v1, v2 })
 89691      // VPUNPCKHWD xmm, xmm, xmm
 89692      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 89693          self.require(ISA_AVX)
 89694          p.domain = DomainAVX
 89695          p.add(0, func(m *_Encoding, v []interface{}) {
 89696              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 89697              m.emit(0x69)
 89698              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89699          })
 89700      }
 89701      // VPUNPCKHWD m128, xmm, xmm
 89702      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 89703          self.require(ISA_AVX)
 89704          p.domain = DomainAVX
 89705          p.add(0, func(m *_Encoding, v []interface{}) {
 89706              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89707              m.emit(0x69)
 89708              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89709          })
 89710      }
 89711      // VPUNPCKHWD ymm, ymm, ymm
 89712      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 89713          self.require(ISA_AVX2)
 89714          p.domain = DomainAVX
 89715          p.add(0, func(m *_Encoding, v []interface{}) {
 89716              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 89717              m.emit(0x69)
 89718              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89719          })
 89720      }
 89721      // VPUNPCKHWD m256, ymm, ymm
 89722      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 89723          self.require(ISA_AVX2)
 89724          p.domain = DomainAVX
 89725          p.add(0, func(m *_Encoding, v []interface{}) {
 89726              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89727              m.emit(0x69)
 89728              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89729          })
 89730      }
 89731      // VPUNPCKHWD zmm, zmm, zmm{k}{z}
 89732      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 89733          self.require(ISA_AVX512BW)
 89734          p.domain = DomainAVX
 89735          p.add(0, func(m *_Encoding, v []interface{}) {
 89736              m.emit(0x62)
 89737              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89738              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89739              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 89740              m.emit(0x69)
 89741              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89742          })
 89743      }
 89744      // VPUNPCKHWD m512, zmm, zmm{k}{z}
 89745      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 89746          self.require(ISA_AVX512BW)
 89747          p.domain = DomainAVX
 89748          p.add(0, func(m *_Encoding, v []interface{}) {
 89749              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 89750              m.emit(0x69)
 89751              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 89752          })
 89753      }
 89754      // VPUNPCKHWD xmm, xmm, xmm{k}{z}
 89755      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 89756          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89757          p.domain = DomainAVX
 89758          p.add(0, func(m *_Encoding, v []interface{}) {
 89759              m.emit(0x62)
 89760              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89761              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89762              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 89763              m.emit(0x69)
 89764              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89765          })
 89766      }
 89767      // VPUNPCKHWD m128, xmm, xmm{k}{z}
 89768      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 89769          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89770          p.domain = DomainAVX
 89771          p.add(0, func(m *_Encoding, v []interface{}) {
 89772              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 89773              m.emit(0x69)
 89774              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 89775          })
 89776      }
 89777      // VPUNPCKHWD ymm, ymm, ymm{k}{z}
 89778      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 89779          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89780          p.domain = DomainAVX
 89781          p.add(0, func(m *_Encoding, v []interface{}) {
 89782              m.emit(0x62)
 89783              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89784              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89785              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 89786              m.emit(0x69)
 89787              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89788          })
 89789      }
 89790      // VPUNPCKHWD m256, ymm, ymm{k}{z}
 89791      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 89792          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89793          p.domain = DomainAVX
 89794          p.add(0, func(m *_Encoding, v []interface{}) {
 89795              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 89796              m.emit(0x69)
 89797              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 89798          })
 89799      }
 89800      if p.len == 0 {
 89801          panic("invalid operands for VPUNPCKHWD")
 89802      }
 89803      return p
 89804  }
 89805  
 89806  // VPUNPCKLBW performs "Unpack and Interleave Low-Order Bytes into Words".
 89807  //
 89808  // Mnemonic        : VPUNPCKLBW
 89809  // Supported forms : (10 forms)
 89810  //
 89811  //    * VPUNPCKLBW xmm, xmm, xmm           [AVX]
 89812  //    * VPUNPCKLBW m128, xmm, xmm          [AVX]
 89813  //    * VPUNPCKLBW ymm, ymm, ymm           [AVX2]
 89814  //    * VPUNPCKLBW m256, ymm, ymm          [AVX2]
 89815  //    * VPUNPCKLBW zmm, zmm, zmm{k}{z}     [AVX512BW]
 89816  //    * VPUNPCKLBW m512, zmm, zmm{k}{z}    [AVX512BW]
 89817  //    * VPUNPCKLBW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 89818  //    * VPUNPCKLBW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 89819  //    * VPUNPCKLBW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 89820  //    * VPUNPCKLBW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 89821  //
 89822  func (self *Program) VPUNPCKLBW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 89823      p := self.alloc("VPUNPCKLBW", 3, Operands { v0, v1, v2 })
 89824      // VPUNPCKLBW xmm, xmm, xmm
 89825      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 89826          self.require(ISA_AVX)
 89827          p.domain = DomainAVX
 89828          p.add(0, func(m *_Encoding, v []interface{}) {
 89829              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 89830              m.emit(0x60)
 89831              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89832          })
 89833      }
 89834      // VPUNPCKLBW m128, xmm, xmm
 89835      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 89836          self.require(ISA_AVX)
 89837          p.domain = DomainAVX
 89838          p.add(0, func(m *_Encoding, v []interface{}) {
 89839              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89840              m.emit(0x60)
 89841              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89842          })
 89843      }
 89844      // VPUNPCKLBW ymm, ymm, ymm
 89845      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 89846          self.require(ISA_AVX2)
 89847          p.domain = DomainAVX
 89848          p.add(0, func(m *_Encoding, v []interface{}) {
 89849              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 89850              m.emit(0x60)
 89851              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89852          })
 89853      }
 89854      // VPUNPCKLBW m256, ymm, ymm
 89855      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 89856          self.require(ISA_AVX2)
 89857          p.domain = DomainAVX
 89858          p.add(0, func(m *_Encoding, v []interface{}) {
 89859              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89860              m.emit(0x60)
 89861              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89862          })
 89863      }
 89864      // VPUNPCKLBW zmm, zmm, zmm{k}{z}
 89865      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 89866          self.require(ISA_AVX512BW)
 89867          p.domain = DomainAVX
 89868          p.add(0, func(m *_Encoding, v []interface{}) {
 89869              m.emit(0x62)
 89870              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89871              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89872              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 89873              m.emit(0x60)
 89874              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89875          })
 89876      }
 89877      // VPUNPCKLBW m512, zmm, zmm{k}{z}
 89878      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 89879          self.require(ISA_AVX512BW)
 89880          p.domain = DomainAVX
 89881          p.add(0, func(m *_Encoding, v []interface{}) {
 89882              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 89883              m.emit(0x60)
 89884              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 89885          })
 89886      }
 89887      // VPUNPCKLBW xmm, xmm, xmm{k}{z}
 89888      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 89889          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89890          p.domain = DomainAVX
 89891          p.add(0, func(m *_Encoding, v []interface{}) {
 89892              m.emit(0x62)
 89893              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89894              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89895              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 89896              m.emit(0x60)
 89897              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89898          })
 89899      }
 89900      // VPUNPCKLBW m128, xmm, xmm{k}{z}
 89901      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 89902          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89903          p.domain = DomainAVX
 89904          p.add(0, func(m *_Encoding, v []interface{}) {
 89905              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 89906              m.emit(0x60)
 89907              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 89908          })
 89909      }
 89910      // VPUNPCKLBW ymm, ymm, ymm{k}{z}
 89911      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 89912          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89913          p.domain = DomainAVX
 89914          p.add(0, func(m *_Encoding, v []interface{}) {
 89915              m.emit(0x62)
 89916              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89917              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89918              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 89919              m.emit(0x60)
 89920              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89921          })
 89922      }
 89923      // VPUNPCKLBW m256, ymm, ymm{k}{z}
 89924      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 89925          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89926          p.domain = DomainAVX
 89927          p.add(0, func(m *_Encoding, v []interface{}) {
 89928              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 89929              m.emit(0x60)
 89930              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 89931          })
 89932      }
 89933      if p.len == 0 {
 89934          panic("invalid operands for VPUNPCKLBW")
 89935      }
 89936      return p
 89937  }
 89938  
 89939  // VPUNPCKLDQ performs "Unpack and Interleave Low-Order Doublewords into Quadwords".
 89940  //
 89941  // Mnemonic        : VPUNPCKLDQ
 89942  // Supported forms : (10 forms)
 89943  //
 89944  //    * VPUNPCKLDQ xmm, xmm, xmm                   [AVX]
 89945  //    * VPUNPCKLDQ m128, xmm, xmm                  [AVX]
 89946  //    * VPUNPCKLDQ ymm, ymm, ymm                   [AVX2]
 89947  //    * VPUNPCKLDQ m256, ymm, ymm                  [AVX2]
 89948  //    * VPUNPCKLDQ m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 89949  //    * VPUNPCKLDQ zmm, zmm, zmm{k}{z}             [AVX512F]
 89950  //    * VPUNPCKLDQ m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 89951  //    * VPUNPCKLDQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 89952  //    * VPUNPCKLDQ m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 89953  //    * VPUNPCKLDQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 89954  //
 89955  func (self *Program) VPUNPCKLDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 89956      p := self.alloc("VPUNPCKLDQ", 3, Operands { v0, v1, v2 })
 89957      // VPUNPCKLDQ xmm, xmm, xmm
 89958      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 89959          self.require(ISA_AVX)
 89960          p.domain = DomainAVX
 89961          p.add(0, func(m *_Encoding, v []interface{}) {
 89962              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 89963              m.emit(0x62)
 89964              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89965          })
 89966      }
 89967      // VPUNPCKLDQ m128, xmm, xmm
 89968      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 89969          self.require(ISA_AVX)
 89970          p.domain = DomainAVX
 89971          p.add(0, func(m *_Encoding, v []interface{}) {
 89972              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89973              m.emit(0x62)
 89974              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89975          })
 89976      }
 89977      // VPUNPCKLDQ ymm, ymm, ymm
 89978      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 89979          self.require(ISA_AVX2)
 89980          p.domain = DomainAVX
 89981          p.add(0, func(m *_Encoding, v []interface{}) {
 89982              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 89983              m.emit(0x62)
 89984              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89985          })
 89986      }
 89987      // VPUNPCKLDQ m256, ymm, ymm
 89988      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 89989          self.require(ISA_AVX2)
 89990          p.domain = DomainAVX
 89991          p.add(0, func(m *_Encoding, v []interface{}) {
 89992              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89993              m.emit(0x62)
 89994              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89995          })
 89996      }
 89997      // VPUNPCKLDQ m512/m32bcst, zmm, zmm{k}{z}
 89998      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 89999          self.require(ISA_AVX512F)
 90000          p.domain = DomainAVX
 90001          p.add(0, func(m *_Encoding, v []interface{}) {
 90002              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90003              m.emit(0x62)
 90004              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 90005          })
 90006      }
 90007      // VPUNPCKLDQ zmm, zmm, zmm{k}{z}
 90008      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 90009          self.require(ISA_AVX512F)
 90010          p.domain = DomainAVX
 90011          p.add(0, func(m *_Encoding, v []interface{}) {
 90012              m.emit(0x62)
 90013              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90014              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 90015              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 90016              m.emit(0x62)
 90017              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90018          })
 90019      }
 90020      // VPUNPCKLDQ m128/m32bcst, xmm, xmm{k}{z}
 90021      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 90022          self.require(ISA_AVX512VL | ISA_AVX512F)
 90023          p.domain = DomainAVX
 90024          p.add(0, func(m *_Encoding, v []interface{}) {
 90025              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90026              m.emit(0x62)
 90027              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 90028          })
 90029      }
 90030      // VPUNPCKLDQ xmm, xmm, xmm{k}{z}
 90031      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 90032          self.require(ISA_AVX512VL | ISA_AVX512F)
 90033          p.domain = DomainAVX
 90034          p.add(0, func(m *_Encoding, v []interface{}) {
 90035              m.emit(0x62)
 90036              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90037              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 90038              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 90039              m.emit(0x62)
 90040              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90041          })
 90042      }
 90043      // VPUNPCKLDQ m256/m32bcst, ymm, ymm{k}{z}
 90044      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 90045          self.require(ISA_AVX512VL | ISA_AVX512F)
 90046          p.domain = DomainAVX
 90047          p.add(0, func(m *_Encoding, v []interface{}) {
 90048              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90049              m.emit(0x62)
 90050              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 90051          })
 90052      }
 90053      // VPUNPCKLDQ ymm, ymm, ymm{k}{z}
 90054      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 90055          self.require(ISA_AVX512VL | ISA_AVX512F)
 90056          p.domain = DomainAVX
 90057          p.add(0, func(m *_Encoding, v []interface{}) {
 90058              m.emit(0x62)
 90059              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90060              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 90061              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 90062              m.emit(0x62)
 90063              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90064          })
 90065      }
 90066      if p.len == 0 {
 90067          panic("invalid operands for VPUNPCKLDQ")
 90068      }
 90069      return p
 90070  }
 90071  
 90072  // VPUNPCKLQDQ performs "Unpack and Interleave Low-Order Quadwords into Double Quadwords".
 90073  //
 90074  // Mnemonic        : VPUNPCKLQDQ
 90075  // Supported forms : (10 forms)
 90076  //
 90077  //    * VPUNPCKLQDQ xmm, xmm, xmm                   [AVX]
 90078  //    * VPUNPCKLQDQ m128, xmm, xmm                  [AVX]
 90079  //    * VPUNPCKLQDQ ymm, ymm, ymm                   [AVX2]
 90080  //    * VPUNPCKLQDQ m256, ymm, ymm                  [AVX2]
 90081  //    * VPUNPCKLQDQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 90082  //    * VPUNPCKLQDQ zmm, zmm, zmm{k}{z}             [AVX512F]
 90083  //    * VPUNPCKLQDQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 90084  //    * VPUNPCKLQDQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 90085  //    * VPUNPCKLQDQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 90086  //    * VPUNPCKLQDQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 90087  //
 90088  func (self *Program) VPUNPCKLQDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 90089      p := self.alloc("VPUNPCKLQDQ", 3, Operands { v0, v1, v2 })
 90090      // VPUNPCKLQDQ xmm, xmm, xmm
 90091      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 90092          self.require(ISA_AVX)
 90093          p.domain = DomainAVX
 90094          p.add(0, func(m *_Encoding, v []interface{}) {
 90095              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 90096              m.emit(0x6c)
 90097              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90098          })
 90099      }
 90100      // VPUNPCKLQDQ m128, xmm, xmm
 90101      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 90102          self.require(ISA_AVX)
 90103          p.domain = DomainAVX
 90104          p.add(0, func(m *_Encoding, v []interface{}) {
 90105              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 90106              m.emit(0x6c)
 90107              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 90108          })
 90109      }
 90110      // VPUNPCKLQDQ ymm, ymm, ymm
 90111      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 90112          self.require(ISA_AVX2)
 90113          p.domain = DomainAVX
 90114          p.add(0, func(m *_Encoding, v []interface{}) {
 90115              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 90116              m.emit(0x6c)
 90117              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90118          })
 90119      }
 90120      // VPUNPCKLQDQ m256, ymm, ymm
 90121      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 90122          self.require(ISA_AVX2)
 90123          p.domain = DomainAVX
 90124          p.add(0, func(m *_Encoding, v []interface{}) {
 90125              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 90126              m.emit(0x6c)
 90127              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 90128          })
 90129      }
 90130      // VPUNPCKLQDQ m512/m64bcst, zmm, zmm{k}{z}
 90131      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 90132          self.require(ISA_AVX512F)
 90133          p.domain = DomainAVX
 90134          p.add(0, func(m *_Encoding, v []interface{}) {
 90135              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90136              m.emit(0x6c)
 90137              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 90138          })
 90139      }
 90140      // VPUNPCKLQDQ zmm, zmm, zmm{k}{z}
 90141      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 90142          self.require(ISA_AVX512F)
 90143          p.domain = DomainAVX
 90144          p.add(0, func(m *_Encoding, v []interface{}) {
 90145              m.emit(0x62)
 90146              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90147              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 90148              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 90149              m.emit(0x6c)
 90150              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90151          })
 90152      }
 90153      // VPUNPCKLQDQ m128/m64bcst, xmm, xmm{k}{z}
 90154      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 90155          self.require(ISA_AVX512VL | ISA_AVX512F)
 90156          p.domain = DomainAVX
 90157          p.add(0, func(m *_Encoding, v []interface{}) {
 90158              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90159              m.emit(0x6c)
 90160              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 90161          })
 90162      }
 90163      // VPUNPCKLQDQ xmm, xmm, xmm{k}{z}
 90164      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 90165          self.require(ISA_AVX512VL | ISA_AVX512F)
 90166          p.domain = DomainAVX
 90167          p.add(0, func(m *_Encoding, v []interface{}) {
 90168              m.emit(0x62)
 90169              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90170              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 90171              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 90172              m.emit(0x6c)
 90173              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90174          })
 90175      }
 90176      // VPUNPCKLQDQ m256/m64bcst, ymm, ymm{k}{z}
 90177      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 90178          self.require(ISA_AVX512VL | ISA_AVX512F)
 90179          p.domain = DomainAVX
 90180          p.add(0, func(m *_Encoding, v []interface{}) {
 90181              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90182              m.emit(0x6c)
 90183              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 90184          })
 90185      }
 90186      // VPUNPCKLQDQ ymm, ymm, ymm{k}{z}
 90187      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 90188          self.require(ISA_AVX512VL | ISA_AVX512F)
 90189          p.domain = DomainAVX
 90190          p.add(0, func(m *_Encoding, v []interface{}) {
 90191              m.emit(0x62)
 90192              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90193              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 90194              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 90195              m.emit(0x6c)
 90196              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90197          })
 90198      }
 90199      if p.len == 0 {
 90200          panic("invalid operands for VPUNPCKLQDQ")
 90201      }
 90202      return p
 90203  }
 90204  
 90205  // VPUNPCKLWD performs "Unpack and Interleave Low-Order Words into Doublewords".
 90206  //
 90207  // Mnemonic        : VPUNPCKLWD
 90208  // Supported forms : (10 forms)
 90209  //
 90210  //    * VPUNPCKLWD xmm, xmm, xmm           [AVX]
 90211  //    * VPUNPCKLWD m128, xmm, xmm          [AVX]
 90212  //    * VPUNPCKLWD ymm, ymm, ymm           [AVX2]
 90213  //    * VPUNPCKLWD m256, ymm, ymm          [AVX2]
 90214  //    * VPUNPCKLWD zmm, zmm, zmm{k}{z}     [AVX512BW]
 90215  //    * VPUNPCKLWD m512, zmm, zmm{k}{z}    [AVX512BW]
 90216  //    * VPUNPCKLWD xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 90217  //    * VPUNPCKLWD m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 90218  //    * VPUNPCKLWD ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 90219  //    * VPUNPCKLWD m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 90220  //
 90221  func (self *Program) VPUNPCKLWD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 90222      p := self.alloc("VPUNPCKLWD", 3, Operands { v0, v1, v2 })
 90223      // VPUNPCKLWD xmm, xmm, xmm
 90224      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 90225          self.require(ISA_AVX)
 90226          p.domain = DomainAVX
 90227          p.add(0, func(m *_Encoding, v []interface{}) {
 90228              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 90229              m.emit(0x61)
 90230              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90231          })
 90232      }
 90233      // VPUNPCKLWD m128, xmm, xmm
 90234      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 90235          self.require(ISA_AVX)
 90236          p.domain = DomainAVX
 90237          p.add(0, func(m *_Encoding, v []interface{}) {
 90238              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 90239              m.emit(0x61)
 90240              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 90241          })
 90242      }
 90243      // VPUNPCKLWD ymm, ymm, ymm
 90244      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 90245          self.require(ISA_AVX2)
 90246          p.domain = DomainAVX
 90247          p.add(0, func(m *_Encoding, v []interface{}) {
 90248              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 90249              m.emit(0x61)
 90250              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90251          })
 90252      }
 90253      // VPUNPCKLWD m256, ymm, ymm
 90254      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 90255          self.require(ISA_AVX2)
 90256          p.domain = DomainAVX
 90257          p.add(0, func(m *_Encoding, v []interface{}) {
 90258              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 90259              m.emit(0x61)
 90260              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 90261          })
 90262      }
 90263      // VPUNPCKLWD zmm, zmm, zmm{k}{z}
 90264      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 90265          self.require(ISA_AVX512BW)
 90266          p.domain = DomainAVX
 90267          p.add(0, func(m *_Encoding, v []interface{}) {
 90268              m.emit(0x62)
 90269              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90270              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 90271              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 90272              m.emit(0x61)
 90273              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90274          })
 90275      }
 90276      // VPUNPCKLWD m512, zmm, zmm{k}{z}
 90277      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 90278          self.require(ISA_AVX512BW)
 90279          p.domain = DomainAVX
 90280          p.add(0, func(m *_Encoding, v []interface{}) {
 90281              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 90282              m.emit(0x61)
 90283              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 90284          })
 90285      }
 90286      // VPUNPCKLWD xmm, xmm, xmm{k}{z}
 90287      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 90288          self.require(ISA_AVX512VL | ISA_AVX512BW)
 90289          p.domain = DomainAVX
 90290          p.add(0, func(m *_Encoding, v []interface{}) {
 90291              m.emit(0x62)
 90292              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90293              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 90294              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 90295              m.emit(0x61)
 90296              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90297          })
 90298      }
 90299      // VPUNPCKLWD m128, xmm, xmm{k}{z}
 90300      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 90301          self.require(ISA_AVX512VL | ISA_AVX512BW)
 90302          p.domain = DomainAVX
 90303          p.add(0, func(m *_Encoding, v []interface{}) {
 90304              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 90305              m.emit(0x61)
 90306              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 90307          })
 90308      }
 90309      // VPUNPCKLWD ymm, ymm, ymm{k}{z}
 90310      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 90311          self.require(ISA_AVX512VL | ISA_AVX512BW)
 90312          p.domain = DomainAVX
 90313          p.add(0, func(m *_Encoding, v []interface{}) {
 90314              m.emit(0x62)
 90315              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90316              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 90317              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 90318              m.emit(0x61)
 90319              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90320          })
 90321      }
 90322      // VPUNPCKLWD m256, ymm, ymm{k}{z}
 90323      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 90324          self.require(ISA_AVX512VL | ISA_AVX512BW)
 90325          p.domain = DomainAVX
 90326          p.add(0, func(m *_Encoding, v []interface{}) {
 90327              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 90328              m.emit(0x61)
 90329              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 90330          })
 90331      }
 90332      if p.len == 0 {
 90333          panic("invalid operands for VPUNPCKLWD")
 90334      }
 90335      return p
 90336  }
 90337  
 90338  // VPXOR performs "Packed Bitwise Logical Exclusive OR".
 90339  //
 90340  // Mnemonic        : VPXOR
 90341  // Supported forms : (4 forms)
 90342  //
 90343  //    * VPXOR xmm, xmm, xmm     [AVX]
 90344  //    * VPXOR m128, xmm, xmm    [AVX]
 90345  //    * VPXOR ymm, ymm, ymm     [AVX2]
 90346  //    * VPXOR m256, ymm, ymm    [AVX2]
 90347  //
 90348  func (self *Program) VPXOR(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 90349      p := self.alloc("VPXOR", 3, Operands { v0, v1, v2 })
 90350      // VPXOR xmm, xmm, xmm
 90351      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 90352          self.require(ISA_AVX)
 90353          p.domain = DomainAVX
 90354          p.add(0, func(m *_Encoding, v []interface{}) {
 90355              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 90356              m.emit(0xef)
 90357              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90358          })
 90359      }
 90360      // VPXOR m128, xmm, xmm
 90361      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 90362          self.require(ISA_AVX)
 90363          p.domain = DomainAVX
 90364          p.add(0, func(m *_Encoding, v []interface{}) {
 90365              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 90366              m.emit(0xef)
 90367              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 90368          })
 90369      }
 90370      // VPXOR ymm, ymm, ymm
 90371      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 90372          self.require(ISA_AVX2)
 90373          p.domain = DomainAVX
 90374          p.add(0, func(m *_Encoding, v []interface{}) {
 90375              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 90376              m.emit(0xef)
 90377              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90378          })
 90379      }
 90380      // VPXOR m256, ymm, ymm
 90381      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 90382          self.require(ISA_AVX2)
 90383          p.domain = DomainAVX
 90384          p.add(0, func(m *_Encoding, v []interface{}) {
 90385              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 90386              m.emit(0xef)
 90387              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 90388          })
 90389      }
 90390      if p.len == 0 {
 90391          panic("invalid operands for VPXOR")
 90392      }
 90393      return p
 90394  }
 90395  
 90396  // VPXORD performs "Bitwise Logical Exclusive OR of Packed Doubleword Integers".
 90397  //
 90398  // Mnemonic        : VPXORD
 90399  // Supported forms : (6 forms)
 90400  //
 90401  //    * VPXORD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 90402  //    * VPXORD zmm, zmm, zmm{k}{z}             [AVX512F]
 90403  //    * VPXORD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 90404  //    * VPXORD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 90405  //    * VPXORD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 90406  //    * VPXORD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 90407  //
 90408  func (self *Program) VPXORD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 90409      p := self.alloc("VPXORD", 3, Operands { v0, v1, v2 })
 90410      // VPXORD m512/m32bcst, zmm, zmm{k}{z}
 90411      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 90412          self.require(ISA_AVX512F)
 90413          p.domain = DomainAVX
 90414          p.add(0, func(m *_Encoding, v []interface{}) {
 90415              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90416              m.emit(0xef)
 90417              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 90418          })
 90419      }
 90420      // VPXORD zmm, zmm, zmm{k}{z}
 90421      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 90422          self.require(ISA_AVX512F)
 90423          p.domain = DomainAVX
 90424          p.add(0, func(m *_Encoding, v []interface{}) {
 90425              m.emit(0x62)
 90426              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90427              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 90428              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 90429              m.emit(0xef)
 90430              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90431          })
 90432      }
 90433      // VPXORD m128/m32bcst, xmm, xmm{k}{z}
 90434      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 90435          self.require(ISA_AVX512VL | ISA_AVX512F)
 90436          p.domain = DomainAVX
 90437          p.add(0, func(m *_Encoding, v []interface{}) {
 90438              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90439              m.emit(0xef)
 90440              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 90441          })
 90442      }
 90443      // VPXORD xmm, xmm, xmm{k}{z}
 90444      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 90445          self.require(ISA_AVX512VL | ISA_AVX512F)
 90446          p.domain = DomainAVX
 90447          p.add(0, func(m *_Encoding, v []interface{}) {
 90448              m.emit(0x62)
 90449              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90450              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 90451              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 90452              m.emit(0xef)
 90453              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90454          })
 90455      }
 90456      // VPXORD m256/m32bcst, ymm, ymm{k}{z}
 90457      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 90458          self.require(ISA_AVX512VL | ISA_AVX512F)
 90459          p.domain = DomainAVX
 90460          p.add(0, func(m *_Encoding, v []interface{}) {
 90461              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90462              m.emit(0xef)
 90463              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 90464          })
 90465      }
 90466      // VPXORD ymm, ymm, ymm{k}{z}
 90467      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 90468          self.require(ISA_AVX512VL | ISA_AVX512F)
 90469          p.domain = DomainAVX
 90470          p.add(0, func(m *_Encoding, v []interface{}) {
 90471              m.emit(0x62)
 90472              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90473              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 90474              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 90475              m.emit(0xef)
 90476              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90477          })
 90478      }
 90479      if p.len == 0 {
 90480          panic("invalid operands for VPXORD")
 90481      }
 90482      return p
 90483  }
 90484  
 90485  // VPXORQ performs "Bitwise Logical Exclusive OR of Packed Quadword Integers".
 90486  //
 90487  // Mnemonic        : VPXORQ
 90488  // Supported forms : (6 forms)
 90489  //
 90490  //    * VPXORQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 90491  //    * VPXORQ zmm, zmm, zmm{k}{z}             [AVX512F]
 90492  //    * VPXORQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 90493  //    * VPXORQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 90494  //    * VPXORQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 90495  //    * VPXORQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 90496  //
 90497  func (self *Program) VPXORQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 90498      p := self.alloc("VPXORQ", 3, Operands { v0, v1, v2 })
 90499      // VPXORQ m512/m64bcst, zmm, zmm{k}{z}
 90500      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 90501          self.require(ISA_AVX512F)
 90502          p.domain = DomainAVX
 90503          p.add(0, func(m *_Encoding, v []interface{}) {
 90504              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90505              m.emit(0xef)
 90506              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 90507          })
 90508      }
 90509      // VPXORQ zmm, zmm, zmm{k}{z}
 90510      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 90511          self.require(ISA_AVX512F)
 90512          p.domain = DomainAVX
 90513          p.add(0, func(m *_Encoding, v []interface{}) {
 90514              m.emit(0x62)
 90515              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90516              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 90517              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 90518              m.emit(0xef)
 90519              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90520          })
 90521      }
 90522      // VPXORQ m128/m64bcst, xmm, xmm{k}{z}
 90523      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 90524          self.require(ISA_AVX512VL | ISA_AVX512F)
 90525          p.domain = DomainAVX
 90526          p.add(0, func(m *_Encoding, v []interface{}) {
 90527              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90528              m.emit(0xef)
 90529              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 90530          })
 90531      }
 90532      // VPXORQ xmm, xmm, xmm{k}{z}
 90533      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 90534          self.require(ISA_AVX512VL | ISA_AVX512F)
 90535          p.domain = DomainAVX
 90536          p.add(0, func(m *_Encoding, v []interface{}) {
 90537              m.emit(0x62)
 90538              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90539              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 90540              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 90541              m.emit(0xef)
 90542              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90543          })
 90544      }
 90545      // VPXORQ m256/m64bcst, ymm, ymm{k}{z}
 90546      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 90547          self.require(ISA_AVX512VL | ISA_AVX512F)
 90548          p.domain = DomainAVX
 90549          p.add(0, func(m *_Encoding, v []interface{}) {
 90550              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90551              m.emit(0xef)
 90552              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 90553          })
 90554      }
 90555      // VPXORQ ymm, ymm, ymm{k}{z}
 90556      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 90557          self.require(ISA_AVX512VL | ISA_AVX512F)
 90558          p.domain = DomainAVX
 90559          p.add(0, func(m *_Encoding, v []interface{}) {
 90560              m.emit(0x62)
 90561              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90562              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 90563              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 90564              m.emit(0xef)
 90565              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90566          })
 90567      }
 90568      if p.len == 0 {
 90569          panic("invalid operands for VPXORQ")
 90570      }
 90571      return p
 90572  }
 90573  
 90574  // VRANGEPD performs "Range Restriction Calculation For Packed Pairs of Double-Precision Floating-Point Values".
 90575  //
 90576  // Mnemonic        : VRANGEPD
 90577  // Supported forms : (7 forms)
 90578  //
 90579  //    * VRANGEPD imm8, m512/m64bcst, zmm, zmm{k}{z}    [AVX512DQ]
 90580  //    * VRANGEPD imm8, {sae}, zmm, zmm, zmm{k}{z}      [AVX512DQ]
 90581  //    * VRANGEPD imm8, zmm, zmm, zmm{k}{z}             [AVX512DQ]
 90582  //    * VRANGEPD imm8, m128/m64bcst, xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 90583  //    * VRANGEPD imm8, xmm, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 90584  //    * VRANGEPD imm8, m256/m64bcst, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 90585  //    * VRANGEPD imm8, ymm, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 90586  //
 90587  func (self *Program) VRANGEPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 90588      var p *Instruction
 90589      switch len(vv) {
 90590          case 0  : p = self.alloc("VRANGEPD", 4, Operands { v0, v1, v2, v3 })
 90591          case 1  : p = self.alloc("VRANGEPD", 5, Operands { v0, v1, v2, v3, vv[0] })
 90592          default : panic("instruction VRANGEPD takes 4 or 5 operands")
 90593      }
 90594      // VRANGEPD imm8, m512/m64bcst, zmm, zmm{k}{z}
 90595      if len(vv) == 0 && isImm8(v0) && isM512M64bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 90596          self.require(ISA_AVX512DQ)
 90597          p.domain = DomainAVX
 90598          p.add(0, func(m *_Encoding, v []interface{}) {
 90599              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 90600              m.emit(0x50)
 90601              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 90602              m.imm1(toImmAny(v[0]))
 90603          })
 90604      }
 90605      // VRANGEPD imm8, {sae}, zmm, zmm, zmm{k}{z}
 90606      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isZMM(v2) && isZMM(v3) && isZMMkz(vv[0]) {
 90607          self.require(ISA_AVX512DQ)
 90608          p.domain = DomainAVX
 90609          p.add(0, func(m *_Encoding, v []interface{}) {
 90610              m.emit(0x62)
 90611              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 90612              m.emit(0xfd ^ (hlcode(v[3]) << 3))
 90613              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 90614              m.emit(0x50)
 90615              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 90616              m.imm1(toImmAny(v[0]))
 90617          })
 90618      }
 90619      // VRANGEPD imm8, zmm, zmm, zmm{k}{z}
 90620      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 90621          self.require(ISA_AVX512DQ)
 90622          p.domain = DomainAVX
 90623          p.add(0, func(m *_Encoding, v []interface{}) {
 90624              m.emit(0x62)
 90625              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 90626              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 90627              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 90628              m.emit(0x50)
 90629              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 90630              m.imm1(toImmAny(v[0]))
 90631          })
 90632      }
 90633      // VRANGEPD imm8, m128/m64bcst, xmm, xmm{k}{z}
 90634      if len(vv) == 0 && isImm8(v0) && isM128M64bcst(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 90635          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 90636          p.domain = DomainAVX
 90637          p.add(0, func(m *_Encoding, v []interface{}) {
 90638              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 90639              m.emit(0x50)
 90640              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 90641              m.imm1(toImmAny(v[0]))
 90642          })
 90643      }
 90644      // VRANGEPD imm8, xmm, xmm, xmm{k}{z}
 90645      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 90646          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 90647          p.domain = DomainAVX
 90648          p.add(0, func(m *_Encoding, v []interface{}) {
 90649              m.emit(0x62)
 90650              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 90651              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 90652              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 90653              m.emit(0x50)
 90654              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 90655              m.imm1(toImmAny(v[0]))
 90656          })
 90657      }
 90658      // VRANGEPD imm8, m256/m64bcst, ymm, ymm{k}{z}
 90659      if len(vv) == 0 && isImm8(v0) && isM256M64bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 90660          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 90661          p.domain = DomainAVX
 90662          p.add(0, func(m *_Encoding, v []interface{}) {
 90663              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 90664              m.emit(0x50)
 90665              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 90666              m.imm1(toImmAny(v[0]))
 90667          })
 90668      }
 90669      // VRANGEPD imm8, ymm, ymm, ymm{k}{z}
 90670      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 90671          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 90672          p.domain = DomainAVX
 90673          p.add(0, func(m *_Encoding, v []interface{}) {
 90674              m.emit(0x62)
 90675              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 90676              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 90677              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 90678              m.emit(0x50)
 90679              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 90680              m.imm1(toImmAny(v[0]))
 90681          })
 90682      }
 90683      if p.len == 0 {
 90684          panic("invalid operands for VRANGEPD")
 90685      }
 90686      return p
 90687  }
 90688  
 90689  // VRANGEPS performs "Range Restriction Calculation For Packed Pairs of Single-Precision Floating-Point Values".
 90690  //
 90691  // Mnemonic        : VRANGEPS
 90692  // Supported forms : (7 forms)
 90693  //
 90694  //    * VRANGEPS imm8, m512/m32bcst, zmm, zmm{k}{z}    [AVX512DQ]
 90695  //    * VRANGEPS imm8, {sae}, zmm, zmm, zmm{k}{z}      [AVX512DQ]
 90696  //    * VRANGEPS imm8, zmm, zmm, zmm{k}{z}             [AVX512DQ]
 90697  //    * VRANGEPS imm8, m128/m32bcst, xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 90698  //    * VRANGEPS imm8, xmm, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 90699  //    * VRANGEPS imm8, m256/m32bcst, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 90700  //    * VRANGEPS imm8, ymm, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 90701  //
 90702  func (self *Program) VRANGEPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 90703      var p *Instruction
 90704      switch len(vv) {
 90705          case 0  : p = self.alloc("VRANGEPS", 4, Operands { v0, v1, v2, v3 })
 90706          case 1  : p = self.alloc("VRANGEPS", 5, Operands { v0, v1, v2, v3, vv[0] })
 90707          default : panic("instruction VRANGEPS takes 4 or 5 operands")
 90708      }
 90709      // VRANGEPS imm8, m512/m32bcst, zmm, zmm{k}{z}
 90710      if len(vv) == 0 && isImm8(v0) && isM512M32bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 90711          self.require(ISA_AVX512DQ)
 90712          p.domain = DomainAVX
 90713          p.add(0, func(m *_Encoding, v []interface{}) {
 90714              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 90715              m.emit(0x50)
 90716              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 90717              m.imm1(toImmAny(v[0]))
 90718          })
 90719      }
 90720      // VRANGEPS imm8, {sae}, zmm, zmm, zmm{k}{z}
 90721      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isZMM(v2) && isZMM(v3) && isZMMkz(vv[0]) {
 90722          self.require(ISA_AVX512DQ)
 90723          p.domain = DomainAVX
 90724          p.add(0, func(m *_Encoding, v []interface{}) {
 90725              m.emit(0x62)
 90726              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 90727              m.emit(0x7d ^ (hlcode(v[3]) << 3))
 90728              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 90729              m.emit(0x50)
 90730              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 90731              m.imm1(toImmAny(v[0]))
 90732          })
 90733      }
 90734      // VRANGEPS imm8, zmm, zmm, zmm{k}{z}
 90735      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 90736          self.require(ISA_AVX512DQ)
 90737          p.domain = DomainAVX
 90738          p.add(0, func(m *_Encoding, v []interface{}) {
 90739              m.emit(0x62)
 90740              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 90741              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 90742              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 90743              m.emit(0x50)
 90744              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 90745              m.imm1(toImmAny(v[0]))
 90746          })
 90747      }
 90748      // VRANGEPS imm8, m128/m32bcst, xmm, xmm{k}{z}
 90749      if len(vv) == 0 && isImm8(v0) && isM128M32bcst(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 90750          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 90751          p.domain = DomainAVX
 90752          p.add(0, func(m *_Encoding, v []interface{}) {
 90753              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 90754              m.emit(0x50)
 90755              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 90756              m.imm1(toImmAny(v[0]))
 90757          })
 90758      }
 90759      // VRANGEPS imm8, xmm, xmm, xmm{k}{z}
 90760      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 90761          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 90762          p.domain = DomainAVX
 90763          p.add(0, func(m *_Encoding, v []interface{}) {
 90764              m.emit(0x62)
 90765              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 90766              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 90767              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 90768              m.emit(0x50)
 90769              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 90770              m.imm1(toImmAny(v[0]))
 90771          })
 90772      }
 90773      // VRANGEPS imm8, m256/m32bcst, ymm, ymm{k}{z}
 90774      if len(vv) == 0 && isImm8(v0) && isM256M32bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 90775          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 90776          p.domain = DomainAVX
 90777          p.add(0, func(m *_Encoding, v []interface{}) {
 90778              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 90779              m.emit(0x50)
 90780              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 90781              m.imm1(toImmAny(v[0]))
 90782          })
 90783      }
 90784      // VRANGEPS imm8, ymm, ymm, ymm{k}{z}
 90785      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 90786          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 90787          p.domain = DomainAVX
 90788          p.add(0, func(m *_Encoding, v []interface{}) {
 90789              m.emit(0x62)
 90790              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 90791              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 90792              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 90793              m.emit(0x50)
 90794              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 90795              m.imm1(toImmAny(v[0]))
 90796          })
 90797      }
 90798      if p.len == 0 {
 90799          panic("invalid operands for VRANGEPS")
 90800      }
 90801      return p
 90802  }
 90803  
 90804  // VRANGESD performs "Range Restriction Calculation For a pair of Scalar Double-Precision Floating-Point Values".
 90805  //
 90806  // Mnemonic        : VRANGESD
 90807  // Supported forms : (3 forms)
 90808  //
 90809  //    * VRANGESD imm8, m64, xmm, xmm{k}{z}           [AVX512DQ]
 90810  //    * VRANGESD imm8, {sae}, xmm, xmm, xmm{k}{z}    [AVX512DQ]
 90811  //    * VRANGESD imm8, xmm, xmm, xmm{k}{z}           [AVX512DQ]
 90812  //
 90813  func (self *Program) VRANGESD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 90814      var p *Instruction
 90815      switch len(vv) {
 90816          case 0  : p = self.alloc("VRANGESD", 4, Operands { v0, v1, v2, v3 })
 90817          case 1  : p = self.alloc("VRANGESD", 5, Operands { v0, v1, v2, v3, vv[0] })
 90818          default : panic("instruction VRANGESD takes 4 or 5 operands")
 90819      }
 90820      // VRANGESD imm8, m64, xmm, xmm{k}{z}
 90821      if len(vv) == 0 && isImm8(v0) && isM64(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 90822          self.require(ISA_AVX512DQ)
 90823          p.domain = DomainAVX
 90824          p.add(0, func(m *_Encoding, v []interface{}) {
 90825              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 90826              m.emit(0x51)
 90827              m.mrsd(lcode(v[3]), addr(v[1]), 8)
 90828              m.imm1(toImmAny(v[0]))
 90829          })
 90830      }
 90831      // VRANGESD imm8, {sae}, xmm, xmm, xmm{k}{z}
 90832      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) && isXMMkz(vv[0]) {
 90833          self.require(ISA_AVX512DQ)
 90834          p.domain = DomainAVX
 90835          p.add(0, func(m *_Encoding, v []interface{}) {
 90836              m.emit(0x62)
 90837              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 90838              m.emit(0xfd ^ (hlcode(v[3]) << 3))
 90839              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 90840              m.emit(0x51)
 90841              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 90842              m.imm1(toImmAny(v[0]))
 90843          })
 90844      }
 90845      // VRANGESD imm8, xmm, xmm, xmm{k}{z}
 90846      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 90847          self.require(ISA_AVX512DQ)
 90848          p.domain = DomainAVX
 90849          p.add(0, func(m *_Encoding, v []interface{}) {
 90850              m.emit(0x62)
 90851              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 90852              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 90853              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 90854              m.emit(0x51)
 90855              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 90856              m.imm1(toImmAny(v[0]))
 90857          })
 90858      }
 90859      if p.len == 0 {
 90860          panic("invalid operands for VRANGESD")
 90861      }
 90862      return p
 90863  }
 90864  
 90865  // VRANGESS performs "Range Restriction Calculation For a pair of Scalar Single-Precision Floating-Point Values".
 90866  //
 90867  // Mnemonic        : VRANGESS
 90868  // Supported forms : (3 forms)
 90869  //
 90870  //    * VRANGESS imm8, m32, xmm, xmm{k}{z}           [AVX512DQ]
 90871  //    * VRANGESS imm8, {sae}, xmm, xmm, xmm{k}{z}    [AVX512DQ]
 90872  //    * VRANGESS imm8, xmm, xmm, xmm{k}{z}           [AVX512DQ]
 90873  //
 90874  func (self *Program) VRANGESS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 90875      var p *Instruction
 90876      switch len(vv) {
 90877          case 0  : p = self.alloc("VRANGESS", 4, Operands { v0, v1, v2, v3 })
 90878          case 1  : p = self.alloc("VRANGESS", 5, Operands { v0, v1, v2, v3, vv[0] })
 90879          default : panic("instruction VRANGESS takes 4 or 5 operands")
 90880      }
 90881      // VRANGESS imm8, m32, xmm, xmm{k}{z}
 90882      if len(vv) == 0 && isImm8(v0) && isM32(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 90883          self.require(ISA_AVX512DQ)
 90884          p.domain = DomainAVX
 90885          p.add(0, func(m *_Encoding, v []interface{}) {
 90886              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 90887              m.emit(0x51)
 90888              m.mrsd(lcode(v[3]), addr(v[1]), 4)
 90889              m.imm1(toImmAny(v[0]))
 90890          })
 90891      }
 90892      // VRANGESS imm8, {sae}, xmm, xmm, xmm{k}{z}
 90893      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) && isXMMkz(vv[0]) {
 90894          self.require(ISA_AVX512DQ)
 90895          p.domain = DomainAVX
 90896          p.add(0, func(m *_Encoding, v []interface{}) {
 90897              m.emit(0x62)
 90898              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 90899              m.emit(0x7d ^ (hlcode(v[3]) << 3))
 90900              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 90901              m.emit(0x51)
 90902              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 90903              m.imm1(toImmAny(v[0]))
 90904          })
 90905      }
 90906      // VRANGESS imm8, xmm, xmm, xmm{k}{z}
 90907      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 90908          self.require(ISA_AVX512DQ)
 90909          p.domain = DomainAVX
 90910          p.add(0, func(m *_Encoding, v []interface{}) {
 90911              m.emit(0x62)
 90912              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 90913              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 90914              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 90915              m.emit(0x51)
 90916              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 90917              m.imm1(toImmAny(v[0]))
 90918          })
 90919      }
 90920      if p.len == 0 {
 90921          panic("invalid operands for VRANGESS")
 90922      }
 90923      return p
 90924  }
 90925  
 90926  // VRCP14PD performs "Compute Approximate Reciprocals of Packed Double-Precision Floating-Point Values".
 90927  //
 90928  // Mnemonic        : VRCP14PD
 90929  // Supported forms : (6 forms)
 90930  //
 90931  //    * VRCP14PD m512/m64bcst, zmm{k}{z}    [AVX512F]
 90932  //    * VRCP14PD zmm, zmm{k}{z}             [AVX512F]
 90933  //    * VRCP14PD m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 90934  //    * VRCP14PD m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 90935  //    * VRCP14PD xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 90936  //    * VRCP14PD ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 90937  //
 90938  func (self *Program) VRCP14PD(v0 interface{}, v1 interface{}) *Instruction {
 90939      p := self.alloc("VRCP14PD", 2, Operands { v0, v1 })
 90940      // VRCP14PD m512/m64bcst, zmm{k}{z}
 90941      if isM512M64bcst(v0) && isZMMkz(v1) {
 90942          self.require(ISA_AVX512F)
 90943          p.domain = DomainAVX
 90944          p.add(0, func(m *_Encoding, v []interface{}) {
 90945              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 90946              m.emit(0x4c)
 90947              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 90948          })
 90949      }
 90950      // VRCP14PD zmm, zmm{k}{z}
 90951      if isZMM(v0) && isZMMkz(v1) {
 90952          self.require(ISA_AVX512F)
 90953          p.domain = DomainAVX
 90954          p.add(0, func(m *_Encoding, v []interface{}) {
 90955              m.emit(0x62)
 90956              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 90957              m.emit(0xfd)
 90958              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 90959              m.emit(0x4c)
 90960              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 90961          })
 90962      }
 90963      // VRCP14PD m128/m64bcst, xmm{k}{z}
 90964      if isM128M64bcst(v0) && isXMMkz(v1) {
 90965          self.require(ISA_AVX512VL | ISA_AVX512F)
 90966          p.domain = DomainAVX
 90967          p.add(0, func(m *_Encoding, v []interface{}) {
 90968              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 90969              m.emit(0x4c)
 90970              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 90971          })
 90972      }
 90973      // VRCP14PD m256/m64bcst, ymm{k}{z}
 90974      if isM256M64bcst(v0) && isYMMkz(v1) {
 90975          self.require(ISA_AVX512VL | ISA_AVX512F)
 90976          p.domain = DomainAVX
 90977          p.add(0, func(m *_Encoding, v []interface{}) {
 90978              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 90979              m.emit(0x4c)
 90980              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 90981          })
 90982      }
 90983      // VRCP14PD xmm, xmm{k}{z}
 90984      if isEVEXXMM(v0) && isXMMkz(v1) {
 90985          self.require(ISA_AVX512VL | ISA_AVX512F)
 90986          p.domain = DomainAVX
 90987          p.add(0, func(m *_Encoding, v []interface{}) {
 90988              m.emit(0x62)
 90989              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 90990              m.emit(0xfd)
 90991              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 90992              m.emit(0x4c)
 90993              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 90994          })
 90995      }
 90996      // VRCP14PD ymm, ymm{k}{z}
 90997      if isEVEXYMM(v0) && isYMMkz(v1) {
 90998          self.require(ISA_AVX512VL | ISA_AVX512F)
 90999          p.domain = DomainAVX
 91000          p.add(0, func(m *_Encoding, v []interface{}) {
 91001              m.emit(0x62)
 91002              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 91003              m.emit(0xfd)
 91004              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 91005              m.emit(0x4c)
 91006              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 91007          })
 91008      }
 91009      if p.len == 0 {
 91010          panic("invalid operands for VRCP14PD")
 91011      }
 91012      return p
 91013  }
 91014  
 91015  // VRCP14PS performs "Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values".
 91016  //
 91017  // Mnemonic        : VRCP14PS
 91018  // Supported forms : (6 forms)
 91019  //
 91020  //    * VRCP14PS m512/m32bcst, zmm{k}{z}    [AVX512F]
 91021  //    * VRCP14PS zmm, zmm{k}{z}             [AVX512F]
 91022  //    * VRCP14PS m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 91023  //    * VRCP14PS m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 91024  //    * VRCP14PS xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 91025  //    * VRCP14PS ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 91026  //
 91027  func (self *Program) VRCP14PS(v0 interface{}, v1 interface{}) *Instruction {
 91028      p := self.alloc("VRCP14PS", 2, Operands { v0, v1 })
 91029      // VRCP14PS m512/m32bcst, zmm{k}{z}
 91030      if isM512M32bcst(v0) && isZMMkz(v1) {
 91031          self.require(ISA_AVX512F)
 91032          p.domain = DomainAVX
 91033          p.add(0, func(m *_Encoding, v []interface{}) {
 91034              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 91035              m.emit(0x4c)
 91036              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 91037          })
 91038      }
 91039      // VRCP14PS zmm, zmm{k}{z}
 91040      if isZMM(v0) && isZMMkz(v1) {
 91041          self.require(ISA_AVX512F)
 91042          p.domain = DomainAVX
 91043          p.add(0, func(m *_Encoding, v []interface{}) {
 91044              m.emit(0x62)
 91045              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 91046              m.emit(0x7d)
 91047              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 91048              m.emit(0x4c)
 91049              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 91050          })
 91051      }
 91052      // VRCP14PS m128/m32bcst, xmm{k}{z}
 91053      if isM128M32bcst(v0) && isXMMkz(v1) {
 91054          self.require(ISA_AVX512VL | ISA_AVX512F)
 91055          p.domain = DomainAVX
 91056          p.add(0, func(m *_Encoding, v []interface{}) {
 91057              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 91058              m.emit(0x4c)
 91059              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 91060          })
 91061      }
 91062      // VRCP14PS m256/m32bcst, ymm{k}{z}
 91063      if isM256M32bcst(v0) && isYMMkz(v1) {
 91064          self.require(ISA_AVX512VL | ISA_AVX512F)
 91065          p.domain = DomainAVX
 91066          p.add(0, func(m *_Encoding, v []interface{}) {
 91067              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 91068              m.emit(0x4c)
 91069              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 91070          })
 91071      }
 91072      // VRCP14PS xmm, xmm{k}{z}
 91073      if isEVEXXMM(v0) && isXMMkz(v1) {
 91074          self.require(ISA_AVX512VL | ISA_AVX512F)
 91075          p.domain = DomainAVX
 91076          p.add(0, func(m *_Encoding, v []interface{}) {
 91077              m.emit(0x62)
 91078              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 91079              m.emit(0x7d)
 91080              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 91081              m.emit(0x4c)
 91082              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 91083          })
 91084      }
 91085      // VRCP14PS ymm, ymm{k}{z}
 91086      if isEVEXYMM(v0) && isYMMkz(v1) {
 91087          self.require(ISA_AVX512VL | ISA_AVX512F)
 91088          p.domain = DomainAVX
 91089          p.add(0, func(m *_Encoding, v []interface{}) {
 91090              m.emit(0x62)
 91091              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 91092              m.emit(0x7d)
 91093              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 91094              m.emit(0x4c)
 91095              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 91096          })
 91097      }
 91098      if p.len == 0 {
 91099          panic("invalid operands for VRCP14PS")
 91100      }
 91101      return p
 91102  }
 91103  
 91104  // VRCP14SD performs "Compute Approximate Reciprocal of a Scalar Double-Precision Floating-Point Value".
 91105  //
 91106  // Mnemonic        : VRCP14SD
 91107  // Supported forms : (2 forms)
 91108  //
 91109  //    * VRCP14SD xmm, xmm, xmm{k}{z}    [AVX512F]
 91110  //    * VRCP14SD m64, xmm, xmm{k}{z}    [AVX512F]
 91111  //
 91112  func (self *Program) VRCP14SD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 91113      p := self.alloc("VRCP14SD", 3, Operands { v0, v1, v2 })
 91114      // VRCP14SD xmm, xmm, xmm{k}{z}
 91115      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91116          self.require(ISA_AVX512F)
 91117          p.domain = DomainAVX
 91118          p.add(0, func(m *_Encoding, v []interface{}) {
 91119              m.emit(0x62)
 91120              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 91121              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 91122              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 91123              m.emit(0x4d)
 91124              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 91125          })
 91126      }
 91127      // VRCP14SD m64, xmm, xmm{k}{z}
 91128      if isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91129          self.require(ISA_AVX512F)
 91130          p.domain = DomainAVX
 91131          p.add(0, func(m *_Encoding, v []interface{}) {
 91132              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 91133              m.emit(0x4d)
 91134              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 91135          })
 91136      }
 91137      if p.len == 0 {
 91138          panic("invalid operands for VRCP14SD")
 91139      }
 91140      return p
 91141  }
 91142  
 91143  // VRCP14SS performs "Compute Approximate Reciprocal of a Scalar Single-Precision Floating-Point Value".
 91144  //
 91145  // Mnemonic        : VRCP14SS
 91146  // Supported forms : (2 forms)
 91147  //
 91148  //    * VRCP14SS xmm, xmm, xmm{k}{z}    [AVX512F]
 91149  //    * VRCP14SS m32, xmm, xmm{k}{z}    [AVX512F]
 91150  //
 91151  func (self *Program) VRCP14SS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 91152      p := self.alloc("VRCP14SS", 3, Operands { v0, v1, v2 })
 91153      // VRCP14SS xmm, xmm, xmm{k}{z}
 91154      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91155          self.require(ISA_AVX512F)
 91156          p.domain = DomainAVX
 91157          p.add(0, func(m *_Encoding, v []interface{}) {
 91158              m.emit(0x62)
 91159              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 91160              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 91161              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 91162              m.emit(0x4d)
 91163              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 91164          })
 91165      }
 91166      // VRCP14SS m32, xmm, xmm{k}{z}
 91167      if isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91168          self.require(ISA_AVX512F)
 91169          p.domain = DomainAVX
 91170          p.add(0, func(m *_Encoding, v []interface{}) {
 91171              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 91172              m.emit(0x4d)
 91173              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 91174          })
 91175      }
 91176      if p.len == 0 {
 91177          panic("invalid operands for VRCP14SS")
 91178      }
 91179      return p
 91180  }
 91181  
 91182  // VRCP28PD performs "Approximation to the Reciprocal of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error".
 91183  //
 91184  // Mnemonic        : VRCP28PD
 91185  // Supported forms : (3 forms)
 91186  //
 91187  //    * VRCP28PD m512/m64bcst, zmm{k}{z}    [AVX512ER]
 91188  //    * VRCP28PD {sae}, zmm, zmm{k}{z}      [AVX512ER]
 91189  //    * VRCP28PD zmm, zmm{k}{z}             [AVX512ER]
 91190  //
 91191  func (self *Program) VRCP28PD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 91192      var p *Instruction
 91193      switch len(vv) {
 91194          case 0  : p = self.alloc("VRCP28PD", 2, Operands { v0, v1 })
 91195          case 1  : p = self.alloc("VRCP28PD", 3, Operands { v0, v1, vv[0] })
 91196          default : panic("instruction VRCP28PD takes 2 or 3 operands")
 91197      }
 91198      // VRCP28PD m512/m64bcst, zmm{k}{z}
 91199      if len(vv) == 0 && isM512M64bcst(v0) && isZMMkz(v1) {
 91200          self.require(ISA_AVX512ER)
 91201          p.domain = DomainAVX
 91202          p.add(0, func(m *_Encoding, v []interface{}) {
 91203              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 91204              m.emit(0xca)
 91205              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 91206          })
 91207      }
 91208      // VRCP28PD {sae}, zmm, zmm{k}{z}
 91209      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 91210          self.require(ISA_AVX512ER)
 91211          p.domain = DomainAVX
 91212          p.add(0, func(m *_Encoding, v []interface{}) {
 91213              m.emit(0x62)
 91214              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91215              m.emit(0xfd)
 91216              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 91217              m.emit(0xca)
 91218              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91219          })
 91220      }
 91221      // VRCP28PD zmm, zmm{k}{z}
 91222      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 91223          self.require(ISA_AVX512ER)
 91224          p.domain = DomainAVX
 91225          p.add(0, func(m *_Encoding, v []interface{}) {
 91226              m.emit(0x62)
 91227              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 91228              m.emit(0xfd)
 91229              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 91230              m.emit(0xca)
 91231              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 91232          })
 91233      }
 91234      if p.len == 0 {
 91235          panic("invalid operands for VRCP28PD")
 91236      }
 91237      return p
 91238  }
 91239  
 91240  // VRCP28PS performs "Approximation to the Reciprocal of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error".
 91241  //
 91242  // Mnemonic        : VRCP28PS
 91243  // Supported forms : (3 forms)
 91244  //
 91245  //    * VRCP28PS m512/m32bcst, zmm{k}{z}    [AVX512ER]
 91246  //    * VRCP28PS {sae}, zmm, zmm{k}{z}      [AVX512ER]
 91247  //    * VRCP28PS zmm, zmm{k}{z}             [AVX512ER]
 91248  //
 91249  func (self *Program) VRCP28PS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 91250      var p *Instruction
 91251      switch len(vv) {
 91252          case 0  : p = self.alloc("VRCP28PS", 2, Operands { v0, v1 })
 91253          case 1  : p = self.alloc("VRCP28PS", 3, Operands { v0, v1, vv[0] })
 91254          default : panic("instruction VRCP28PS takes 2 or 3 operands")
 91255      }
 91256      // VRCP28PS m512/m32bcst, zmm{k}{z}
 91257      if len(vv) == 0 && isM512M32bcst(v0) && isZMMkz(v1) {
 91258          self.require(ISA_AVX512ER)
 91259          p.domain = DomainAVX
 91260          p.add(0, func(m *_Encoding, v []interface{}) {
 91261              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 91262              m.emit(0xca)
 91263              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 91264          })
 91265      }
 91266      // VRCP28PS {sae}, zmm, zmm{k}{z}
 91267      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 91268          self.require(ISA_AVX512ER)
 91269          p.domain = DomainAVX
 91270          p.add(0, func(m *_Encoding, v []interface{}) {
 91271              m.emit(0x62)
 91272              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91273              m.emit(0x7d)
 91274              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 91275              m.emit(0xca)
 91276              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91277          })
 91278      }
 91279      // VRCP28PS zmm, zmm{k}{z}
 91280      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 91281          self.require(ISA_AVX512ER)
 91282          p.domain = DomainAVX
 91283          p.add(0, func(m *_Encoding, v []interface{}) {
 91284              m.emit(0x62)
 91285              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 91286              m.emit(0x7d)
 91287              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 91288              m.emit(0xca)
 91289              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 91290          })
 91291      }
 91292      if p.len == 0 {
 91293          panic("invalid operands for VRCP28PS")
 91294      }
 91295      return p
 91296  }
 91297  
 91298  // VRCP28SD performs "Approximation to the Reciprocal of a Scalar Double-Precision Floating-Point Value with Less Than 2^-28 Relative Error".
 91299  //
 91300  // Mnemonic        : VRCP28SD
 91301  // Supported forms : (3 forms)
 91302  //
 91303  //    * VRCP28SD m64, xmm, xmm{k}{z}           [AVX512ER]
 91304  //    * VRCP28SD {sae}, xmm, xmm, xmm{k}{z}    [AVX512ER]
 91305  //    * VRCP28SD xmm, xmm, xmm{k}{z}           [AVX512ER]
 91306  //
 91307  func (self *Program) VRCP28SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 91308      var p *Instruction
 91309      switch len(vv) {
 91310          case 0  : p = self.alloc("VRCP28SD", 3, Operands { v0, v1, v2 })
 91311          case 1  : p = self.alloc("VRCP28SD", 4, Operands { v0, v1, v2, vv[0] })
 91312          default : panic("instruction VRCP28SD takes 3 or 4 operands")
 91313      }
 91314      // VRCP28SD m64, xmm, xmm{k}{z}
 91315      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91316          self.require(ISA_AVX512ER)
 91317          p.domain = DomainAVX
 91318          p.add(0, func(m *_Encoding, v []interface{}) {
 91319              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 91320              m.emit(0xcb)
 91321              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 91322          })
 91323      }
 91324      // VRCP28SD {sae}, xmm, xmm, xmm{k}{z}
 91325      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 91326          self.require(ISA_AVX512ER)
 91327          p.domain = DomainAVX
 91328          p.add(0, func(m *_Encoding, v []interface{}) {
 91329              m.emit(0x62)
 91330              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 91331              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 91332              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 91333              m.emit(0xcb)
 91334              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 91335          })
 91336      }
 91337      // VRCP28SD xmm, xmm, xmm{k}{z}
 91338      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91339          self.require(ISA_AVX512ER)
 91340          p.domain = DomainAVX
 91341          p.add(0, func(m *_Encoding, v []interface{}) {
 91342              m.emit(0x62)
 91343              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 91344              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 91345              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 91346              m.emit(0xcb)
 91347              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 91348          })
 91349      }
 91350      if p.len == 0 {
 91351          panic("invalid operands for VRCP28SD")
 91352      }
 91353      return p
 91354  }
 91355  
 91356  // VRCP28SS performs "Approximation to the Reciprocal of a Scalar Single-Precision Floating-Point Value with Less Than 2^-28 Relative Error".
 91357  //
 91358  // Mnemonic        : VRCP28SS
 91359  // Supported forms : (3 forms)
 91360  //
 91361  //    * VRCP28SS m32, xmm, xmm{k}{z}           [AVX512ER]
 91362  //    * VRCP28SS {sae}, xmm, xmm, xmm{k}{z}    [AVX512ER]
 91363  //    * VRCP28SS xmm, xmm, xmm{k}{z}           [AVX512ER]
 91364  //
 91365  func (self *Program) VRCP28SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 91366      var p *Instruction
 91367      switch len(vv) {
 91368          case 0  : p = self.alloc("VRCP28SS", 3, Operands { v0, v1, v2 })
 91369          case 1  : p = self.alloc("VRCP28SS", 4, Operands { v0, v1, v2, vv[0] })
 91370          default : panic("instruction VRCP28SS takes 3 or 4 operands")
 91371      }
 91372      // VRCP28SS m32, xmm, xmm{k}{z}
 91373      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91374          self.require(ISA_AVX512ER)
 91375          p.domain = DomainAVX
 91376          p.add(0, func(m *_Encoding, v []interface{}) {
 91377              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 91378              m.emit(0xcb)
 91379              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 91380          })
 91381      }
 91382      // VRCP28SS {sae}, xmm, xmm, xmm{k}{z}
 91383      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 91384          self.require(ISA_AVX512ER)
 91385          p.domain = DomainAVX
 91386          p.add(0, func(m *_Encoding, v []interface{}) {
 91387              m.emit(0x62)
 91388              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 91389              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 91390              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 91391              m.emit(0xcb)
 91392              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 91393          })
 91394      }
 91395      // VRCP28SS xmm, xmm, xmm{k}{z}
 91396      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91397          self.require(ISA_AVX512ER)
 91398          p.domain = DomainAVX
 91399          p.add(0, func(m *_Encoding, v []interface{}) {
 91400              m.emit(0x62)
 91401              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 91402              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 91403              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 91404              m.emit(0xcb)
 91405              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 91406          })
 91407      }
 91408      if p.len == 0 {
 91409          panic("invalid operands for VRCP28SS")
 91410      }
 91411      return p
 91412  }
 91413  
 91414  // VRCPPS performs "Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values".
 91415  //
 91416  // Mnemonic        : VRCPPS
 91417  // Supported forms : (4 forms)
 91418  //
 91419  //    * VRCPPS xmm, xmm     [AVX]
 91420  //    * VRCPPS m128, xmm    [AVX]
 91421  //    * VRCPPS ymm, ymm     [AVX]
 91422  //    * VRCPPS m256, ymm    [AVX]
 91423  //
 91424  func (self *Program) VRCPPS(v0 interface{}, v1 interface{}) *Instruction {
 91425      p := self.alloc("VRCPPS", 2, Operands { v0, v1 })
 91426      // VRCPPS xmm, xmm
 91427      if isXMM(v0) && isXMM(v1) {
 91428          self.require(ISA_AVX)
 91429          p.domain = DomainAVX
 91430          p.add(0, func(m *_Encoding, v []interface{}) {
 91431              m.vex2(0, hcode(v[1]), v[0], 0)
 91432              m.emit(0x53)
 91433              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 91434          })
 91435      }
 91436      // VRCPPS m128, xmm
 91437      if isM128(v0) && isXMM(v1) {
 91438          self.require(ISA_AVX)
 91439          p.domain = DomainAVX
 91440          p.add(0, func(m *_Encoding, v []interface{}) {
 91441              m.vex2(0, hcode(v[1]), addr(v[0]), 0)
 91442              m.emit(0x53)
 91443              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 91444          })
 91445      }
 91446      // VRCPPS ymm, ymm
 91447      if isYMM(v0) && isYMM(v1) {
 91448          self.require(ISA_AVX)
 91449          p.domain = DomainAVX
 91450          p.add(0, func(m *_Encoding, v []interface{}) {
 91451              m.vex2(4, hcode(v[1]), v[0], 0)
 91452              m.emit(0x53)
 91453              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 91454          })
 91455      }
 91456      // VRCPPS m256, ymm
 91457      if isM256(v0) && isYMM(v1) {
 91458          self.require(ISA_AVX)
 91459          p.domain = DomainAVX
 91460          p.add(0, func(m *_Encoding, v []interface{}) {
 91461              m.vex2(4, hcode(v[1]), addr(v[0]), 0)
 91462              m.emit(0x53)
 91463              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 91464          })
 91465      }
 91466      if p.len == 0 {
 91467          panic("invalid operands for VRCPPS")
 91468      }
 91469      return p
 91470  }
 91471  
 91472  // VRCPSS performs "Compute Approximate Reciprocal of Scalar Single-Precision Floating-Point Values".
 91473  //
 91474  // Mnemonic        : VRCPSS
 91475  // Supported forms : (2 forms)
 91476  //
 91477  //    * VRCPSS xmm, xmm, xmm    [AVX]
 91478  //    * VRCPSS m32, xmm, xmm    [AVX]
 91479  //
 91480  func (self *Program) VRCPSS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 91481      p := self.alloc("VRCPSS", 3, Operands { v0, v1, v2 })
 91482      // VRCPSS xmm, xmm, xmm
 91483      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 91484          self.require(ISA_AVX)
 91485          p.domain = DomainAVX
 91486          p.add(0, func(m *_Encoding, v []interface{}) {
 91487              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 91488              m.emit(0x53)
 91489              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 91490          })
 91491      }
 91492      // VRCPSS m32, xmm, xmm
 91493      if isM32(v0) && isXMM(v1) && isXMM(v2) {
 91494          self.require(ISA_AVX)
 91495          p.domain = DomainAVX
 91496          p.add(0, func(m *_Encoding, v []interface{}) {
 91497              m.vex2(2, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 91498              m.emit(0x53)
 91499              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 91500          })
 91501      }
 91502      if p.len == 0 {
 91503          panic("invalid operands for VRCPSS")
 91504      }
 91505      return p
 91506  }
 91507  
 91508  // VREDUCEPD performs "Perform Reduction Transformation on Packed Double-Precision Floating-Point Values".
 91509  //
 91510  // Mnemonic        : VREDUCEPD
 91511  // Supported forms : (6 forms)
 91512  //
 91513  //    * VREDUCEPD imm8, m512/m64bcst, zmm{k}{z}    [AVX512DQ]
 91514  //    * VREDUCEPD imm8, zmm, zmm{k}{z}             [AVX512DQ]
 91515  //    * VREDUCEPD imm8, m128/m64bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 91516  //    * VREDUCEPD imm8, m256/m64bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 91517  //    * VREDUCEPD imm8, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 91518  //    * VREDUCEPD imm8, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 91519  //
 91520  func (self *Program) VREDUCEPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 91521      p := self.alloc("VREDUCEPD", 3, Operands { v0, v1, v2 })
 91522      // VREDUCEPD imm8, m512/m64bcst, zmm{k}{z}
 91523      if isImm8(v0) && isM512M64bcst(v1) && isZMMkz(v2) {
 91524          self.require(ISA_AVX512DQ)
 91525          p.domain = DomainAVX
 91526          p.add(0, func(m *_Encoding, v []interface{}) {
 91527              m.evex(0b11, 0x85, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91528              m.emit(0x56)
 91529              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 91530              m.imm1(toImmAny(v[0]))
 91531          })
 91532      }
 91533      // VREDUCEPD imm8, zmm, zmm{k}{z}
 91534      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 91535          self.require(ISA_AVX512DQ)
 91536          p.domain = DomainAVX
 91537          p.add(0, func(m *_Encoding, v []interface{}) {
 91538              m.emit(0x62)
 91539              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91540              m.emit(0xfd)
 91541              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 91542              m.emit(0x56)
 91543              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91544              m.imm1(toImmAny(v[0]))
 91545          })
 91546      }
 91547      // VREDUCEPD imm8, m128/m64bcst, xmm{k}{z}
 91548      if isImm8(v0) && isM128M64bcst(v1) && isXMMkz(v2) {
 91549          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 91550          p.domain = DomainAVX
 91551          p.add(0, func(m *_Encoding, v []interface{}) {
 91552              m.evex(0b11, 0x85, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91553              m.emit(0x56)
 91554              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 91555              m.imm1(toImmAny(v[0]))
 91556          })
 91557      }
 91558      // VREDUCEPD imm8, m256/m64bcst, ymm{k}{z}
 91559      if isImm8(v0) && isM256M64bcst(v1) && isYMMkz(v2) {
 91560          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 91561          p.domain = DomainAVX
 91562          p.add(0, func(m *_Encoding, v []interface{}) {
 91563              m.evex(0b11, 0x85, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91564              m.emit(0x56)
 91565              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 91566              m.imm1(toImmAny(v[0]))
 91567          })
 91568      }
 91569      // VREDUCEPD imm8, xmm, xmm{k}{z}
 91570      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91571          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 91572          p.domain = DomainAVX
 91573          p.add(0, func(m *_Encoding, v []interface{}) {
 91574              m.emit(0x62)
 91575              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91576              m.emit(0xfd)
 91577              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 91578              m.emit(0x56)
 91579              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91580              m.imm1(toImmAny(v[0]))
 91581          })
 91582      }
 91583      // VREDUCEPD imm8, ymm, ymm{k}{z}
 91584      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 91585          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 91586          p.domain = DomainAVX
 91587          p.add(0, func(m *_Encoding, v []interface{}) {
 91588              m.emit(0x62)
 91589              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91590              m.emit(0xfd)
 91591              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 91592              m.emit(0x56)
 91593              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91594              m.imm1(toImmAny(v[0]))
 91595          })
 91596      }
 91597      if p.len == 0 {
 91598          panic("invalid operands for VREDUCEPD")
 91599      }
 91600      return p
 91601  }
 91602  
 91603  // VREDUCEPS performs "Perform Reduction Transformation on Packed Single-Precision Floating-Point Values".
 91604  //
 91605  // Mnemonic        : VREDUCEPS
 91606  // Supported forms : (6 forms)
 91607  //
 91608  //    * VREDUCEPS imm8, m512/m32bcst, zmm{k}{z}    [AVX512DQ]
 91609  //    * VREDUCEPS imm8, zmm, zmm{k}{z}             [AVX512DQ]
 91610  //    * VREDUCEPS imm8, m128/m32bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 91611  //    * VREDUCEPS imm8, m256/m32bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 91612  //    * VREDUCEPS imm8, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 91613  //    * VREDUCEPS imm8, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 91614  //
 91615  func (self *Program) VREDUCEPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 91616      p := self.alloc("VREDUCEPS", 3, Operands { v0, v1, v2 })
 91617      // VREDUCEPS imm8, m512/m32bcst, zmm{k}{z}
 91618      if isImm8(v0) && isM512M32bcst(v1) && isZMMkz(v2) {
 91619          self.require(ISA_AVX512DQ)
 91620          p.domain = DomainAVX
 91621          p.add(0, func(m *_Encoding, v []interface{}) {
 91622              m.evex(0b11, 0x05, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91623              m.emit(0x56)
 91624              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 91625              m.imm1(toImmAny(v[0]))
 91626          })
 91627      }
 91628      // VREDUCEPS imm8, zmm, zmm{k}{z}
 91629      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 91630          self.require(ISA_AVX512DQ)
 91631          p.domain = DomainAVX
 91632          p.add(0, func(m *_Encoding, v []interface{}) {
 91633              m.emit(0x62)
 91634              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91635              m.emit(0x7d)
 91636              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 91637              m.emit(0x56)
 91638              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91639              m.imm1(toImmAny(v[0]))
 91640          })
 91641      }
 91642      // VREDUCEPS imm8, m128/m32bcst, xmm{k}{z}
 91643      if isImm8(v0) && isM128M32bcst(v1) && isXMMkz(v2) {
 91644          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 91645          p.domain = DomainAVX
 91646          p.add(0, func(m *_Encoding, v []interface{}) {
 91647              m.evex(0b11, 0x05, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91648              m.emit(0x56)
 91649              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 91650              m.imm1(toImmAny(v[0]))
 91651          })
 91652      }
 91653      // VREDUCEPS imm8, m256/m32bcst, ymm{k}{z}
 91654      if isImm8(v0) && isM256M32bcst(v1) && isYMMkz(v2) {
 91655          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 91656          p.domain = DomainAVX
 91657          p.add(0, func(m *_Encoding, v []interface{}) {
 91658              m.evex(0b11, 0x05, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91659              m.emit(0x56)
 91660              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 91661              m.imm1(toImmAny(v[0]))
 91662          })
 91663      }
 91664      // VREDUCEPS imm8, xmm, xmm{k}{z}
 91665      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91666          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 91667          p.domain = DomainAVX
 91668          p.add(0, func(m *_Encoding, v []interface{}) {
 91669              m.emit(0x62)
 91670              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91671              m.emit(0x7d)
 91672              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 91673              m.emit(0x56)
 91674              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91675              m.imm1(toImmAny(v[0]))
 91676          })
 91677      }
 91678      // VREDUCEPS imm8, ymm, ymm{k}{z}
 91679      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 91680          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 91681          p.domain = DomainAVX
 91682          p.add(0, func(m *_Encoding, v []interface{}) {
 91683              m.emit(0x62)
 91684              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91685              m.emit(0x7d)
 91686              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 91687              m.emit(0x56)
 91688              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91689              m.imm1(toImmAny(v[0]))
 91690          })
 91691      }
 91692      if p.len == 0 {
 91693          panic("invalid operands for VREDUCEPS")
 91694      }
 91695      return p
 91696  }
 91697  
 91698  // VREDUCESD performs "Perform Reduction Transformation on a Scalar Double-Precision Floating-Point Value".
 91699  //
 91700  // Mnemonic        : VREDUCESD
 91701  // Supported forms : (2 forms)
 91702  //
 91703  //    * VREDUCESD imm8, xmm, xmm, xmm{k}{z}    [AVX512DQ]
 91704  //    * VREDUCESD imm8, m64, xmm, xmm{k}{z}    [AVX512DQ]
 91705  //
 91706  func (self *Program) VREDUCESD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 91707      p := self.alloc("VREDUCESD", 4, Operands { v0, v1, v2, v3 })
 91708      // VREDUCESD imm8, xmm, xmm, xmm{k}{z}
 91709      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 91710          self.require(ISA_AVX512DQ)
 91711          p.domain = DomainAVX
 91712          p.add(0, func(m *_Encoding, v []interface{}) {
 91713              m.emit(0x62)
 91714              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 91715              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 91716              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 91717              m.emit(0x57)
 91718              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 91719              m.imm1(toImmAny(v[0]))
 91720          })
 91721      }
 91722      // VREDUCESD imm8, m64, xmm, xmm{k}{z}
 91723      if isImm8(v0) && isM64(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 91724          self.require(ISA_AVX512DQ)
 91725          p.domain = DomainAVX
 91726          p.add(0, func(m *_Encoding, v []interface{}) {
 91727              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 91728              m.emit(0x57)
 91729              m.mrsd(lcode(v[3]), addr(v[1]), 8)
 91730              m.imm1(toImmAny(v[0]))
 91731          })
 91732      }
 91733      if p.len == 0 {
 91734          panic("invalid operands for VREDUCESD")
 91735      }
 91736      return p
 91737  }
 91738  
 91739  // VREDUCESS performs "Perform Reduction Transformation on a Scalar Single-Precision Floating-Point Value".
 91740  //
 91741  // Mnemonic        : VREDUCESS
 91742  // Supported forms : (2 forms)
 91743  //
 91744  //    * VREDUCESS imm8, xmm, xmm, xmm{k}{z}    [AVX512DQ]
 91745  //    * VREDUCESS imm8, m32, xmm, xmm{k}{z}    [AVX512DQ]
 91746  //
 91747  func (self *Program) VREDUCESS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 91748      p := self.alloc("VREDUCESS", 4, Operands { v0, v1, v2, v3 })
 91749      // VREDUCESS imm8, xmm, xmm, xmm{k}{z}
 91750      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 91751          self.require(ISA_AVX512DQ)
 91752          p.domain = DomainAVX
 91753          p.add(0, func(m *_Encoding, v []interface{}) {
 91754              m.emit(0x62)
 91755              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 91756              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 91757              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 91758              m.emit(0x57)
 91759              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 91760              m.imm1(toImmAny(v[0]))
 91761          })
 91762      }
 91763      // VREDUCESS imm8, m32, xmm, xmm{k}{z}
 91764      if isImm8(v0) && isM32(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 91765          self.require(ISA_AVX512DQ)
 91766          p.domain = DomainAVX
 91767          p.add(0, func(m *_Encoding, v []interface{}) {
 91768              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 91769              m.emit(0x57)
 91770              m.mrsd(lcode(v[3]), addr(v[1]), 4)
 91771              m.imm1(toImmAny(v[0]))
 91772          })
 91773      }
 91774      if p.len == 0 {
 91775          panic("invalid operands for VREDUCESS")
 91776      }
 91777      return p
 91778  }
 91779  
 91780  // VRNDSCALEPD performs "Round Packed Double-Precision Floating-Point Values To Include A Given Number Of Fraction Bits".
 91781  //
 91782  // Mnemonic        : VRNDSCALEPD
 91783  // Supported forms : (7 forms)
 91784  //
 91785  //    * VRNDSCALEPD imm8, m512/m64bcst, zmm{k}{z}    [AVX512F]
 91786  //    * VRNDSCALEPD imm8, {sae}, zmm, zmm{k}{z}      [AVX512F]
 91787  //    * VRNDSCALEPD imm8, zmm, zmm{k}{z}             [AVX512F]
 91788  //    * VRNDSCALEPD imm8, m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 91789  //    * VRNDSCALEPD imm8, m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 91790  //    * VRNDSCALEPD imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 91791  //    * VRNDSCALEPD imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 91792  //
 91793  func (self *Program) VRNDSCALEPD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 91794      var p *Instruction
 91795      switch len(vv) {
 91796          case 0  : p = self.alloc("VRNDSCALEPD", 3, Operands { v0, v1, v2 })
 91797          case 1  : p = self.alloc("VRNDSCALEPD", 4, Operands { v0, v1, v2, vv[0] })
 91798          default : panic("instruction VRNDSCALEPD takes 3 or 4 operands")
 91799      }
 91800      // VRNDSCALEPD imm8, m512/m64bcst, zmm{k}{z}
 91801      if len(vv) == 0 && isImm8(v0) && isM512M64bcst(v1) && isZMMkz(v2) {
 91802          self.require(ISA_AVX512F)
 91803          p.domain = DomainAVX
 91804          p.add(0, func(m *_Encoding, v []interface{}) {
 91805              m.evex(0b11, 0x85, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91806              m.emit(0x09)
 91807              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 91808              m.imm1(toImmAny(v[0]))
 91809          })
 91810      }
 91811      // VRNDSCALEPD imm8, {sae}, zmm, zmm{k}{z}
 91812      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 91813          self.require(ISA_AVX512F)
 91814          p.domain = DomainAVX
 91815          p.add(0, func(m *_Encoding, v []interface{}) {
 91816              m.emit(0x62)
 91817              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[3]) << 4)))
 91818              m.emit(0xfd)
 91819              m.emit((zcode(v[3]) << 7) | kcode(v[3]) | 0x18)
 91820              m.emit(0x09)
 91821              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[2]))
 91822              m.imm1(toImmAny(v[0]))
 91823          })
 91824      }
 91825      // VRNDSCALEPD imm8, zmm, zmm{k}{z}
 91826      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 91827          self.require(ISA_AVX512F)
 91828          p.domain = DomainAVX
 91829          p.add(0, func(m *_Encoding, v []interface{}) {
 91830              m.emit(0x62)
 91831              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91832              m.emit(0xfd)
 91833              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 91834              m.emit(0x09)
 91835              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91836              m.imm1(toImmAny(v[0]))
 91837          })
 91838      }
 91839      // VRNDSCALEPD imm8, m128/m64bcst, xmm{k}{z}
 91840      if len(vv) == 0 && isImm8(v0) && isM128M64bcst(v1) && isXMMkz(v2) {
 91841          self.require(ISA_AVX512VL | ISA_AVX512F)
 91842          p.domain = DomainAVX
 91843          p.add(0, func(m *_Encoding, v []interface{}) {
 91844              m.evex(0b11, 0x85, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91845              m.emit(0x09)
 91846              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 91847              m.imm1(toImmAny(v[0]))
 91848          })
 91849      }
 91850      // VRNDSCALEPD imm8, m256/m64bcst, ymm{k}{z}
 91851      if len(vv) == 0 && isImm8(v0) && isM256M64bcst(v1) && isYMMkz(v2) {
 91852          self.require(ISA_AVX512VL | ISA_AVX512F)
 91853          p.domain = DomainAVX
 91854          p.add(0, func(m *_Encoding, v []interface{}) {
 91855              m.evex(0b11, 0x85, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91856              m.emit(0x09)
 91857              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 91858              m.imm1(toImmAny(v[0]))
 91859          })
 91860      }
 91861      // VRNDSCALEPD imm8, xmm, xmm{k}{z}
 91862      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91863          self.require(ISA_AVX512VL | ISA_AVX512F)
 91864          p.domain = DomainAVX
 91865          p.add(0, func(m *_Encoding, v []interface{}) {
 91866              m.emit(0x62)
 91867              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91868              m.emit(0xfd)
 91869              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 91870              m.emit(0x09)
 91871              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91872              m.imm1(toImmAny(v[0]))
 91873          })
 91874      }
 91875      // VRNDSCALEPD imm8, ymm, ymm{k}{z}
 91876      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 91877          self.require(ISA_AVX512VL | ISA_AVX512F)
 91878          p.domain = DomainAVX
 91879          p.add(0, func(m *_Encoding, v []interface{}) {
 91880              m.emit(0x62)
 91881              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91882              m.emit(0xfd)
 91883              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 91884              m.emit(0x09)
 91885              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91886              m.imm1(toImmAny(v[0]))
 91887          })
 91888      }
 91889      if p.len == 0 {
 91890          panic("invalid operands for VRNDSCALEPD")
 91891      }
 91892      return p
 91893  }
 91894  
 91895  // VRNDSCALEPS performs "Round Packed Single-Precision Floating-Point Values To Include A Given Number Of Fraction Bits".
 91896  //
 91897  // Mnemonic        : VRNDSCALEPS
 91898  // Supported forms : (7 forms)
 91899  //
 91900  //    * VRNDSCALEPS imm8, m512/m32bcst, zmm{k}{z}    [AVX512F]
 91901  //    * VRNDSCALEPS imm8, {sae}, zmm, zmm{k}{z}      [AVX512F]
 91902  //    * VRNDSCALEPS imm8, zmm, zmm{k}{z}             [AVX512F]
 91903  //    * VRNDSCALEPS imm8, m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 91904  //    * VRNDSCALEPS imm8, m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 91905  //    * VRNDSCALEPS imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 91906  //    * VRNDSCALEPS imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 91907  //
 91908  func (self *Program) VRNDSCALEPS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 91909      var p *Instruction
 91910      switch len(vv) {
 91911          case 0  : p = self.alloc("VRNDSCALEPS", 3, Operands { v0, v1, v2 })
 91912          case 1  : p = self.alloc("VRNDSCALEPS", 4, Operands { v0, v1, v2, vv[0] })
 91913          default : panic("instruction VRNDSCALEPS takes 3 or 4 operands")
 91914      }
 91915      // VRNDSCALEPS imm8, m512/m32bcst, zmm{k}{z}
 91916      if len(vv) == 0 && isImm8(v0) && isM512M32bcst(v1) && isZMMkz(v2) {
 91917          self.require(ISA_AVX512F)
 91918          p.domain = DomainAVX
 91919          p.add(0, func(m *_Encoding, v []interface{}) {
 91920              m.evex(0b11, 0x05, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91921              m.emit(0x08)
 91922              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 91923              m.imm1(toImmAny(v[0]))
 91924          })
 91925      }
 91926      // VRNDSCALEPS imm8, {sae}, zmm, zmm{k}{z}
 91927      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 91928          self.require(ISA_AVX512F)
 91929          p.domain = DomainAVX
 91930          p.add(0, func(m *_Encoding, v []interface{}) {
 91931              m.emit(0x62)
 91932              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[3]) << 4)))
 91933              m.emit(0x7d)
 91934              m.emit((zcode(v[3]) << 7) | kcode(v[3]) | 0x18)
 91935              m.emit(0x08)
 91936              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[2]))
 91937              m.imm1(toImmAny(v[0]))
 91938          })
 91939      }
 91940      // VRNDSCALEPS imm8, zmm, zmm{k}{z}
 91941      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 91942          self.require(ISA_AVX512F)
 91943          p.domain = DomainAVX
 91944          p.add(0, func(m *_Encoding, v []interface{}) {
 91945              m.emit(0x62)
 91946              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91947              m.emit(0x7d)
 91948              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 91949              m.emit(0x08)
 91950              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91951              m.imm1(toImmAny(v[0]))
 91952          })
 91953      }
 91954      // VRNDSCALEPS imm8, m128/m32bcst, xmm{k}{z}
 91955      if len(vv) == 0 && isImm8(v0) && isM128M32bcst(v1) && isXMMkz(v2) {
 91956          self.require(ISA_AVX512VL | ISA_AVX512F)
 91957          p.domain = DomainAVX
 91958          p.add(0, func(m *_Encoding, v []interface{}) {
 91959              m.evex(0b11, 0x05, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91960              m.emit(0x08)
 91961              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 91962              m.imm1(toImmAny(v[0]))
 91963          })
 91964      }
 91965      // VRNDSCALEPS imm8, m256/m32bcst, ymm{k}{z}
 91966      if len(vv) == 0 && isImm8(v0) && isM256M32bcst(v1) && isYMMkz(v2) {
 91967          self.require(ISA_AVX512VL | ISA_AVX512F)
 91968          p.domain = DomainAVX
 91969          p.add(0, func(m *_Encoding, v []interface{}) {
 91970              m.evex(0b11, 0x05, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91971              m.emit(0x08)
 91972              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 91973              m.imm1(toImmAny(v[0]))
 91974          })
 91975      }
 91976      // VRNDSCALEPS imm8, xmm, xmm{k}{z}
 91977      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91978          self.require(ISA_AVX512VL | ISA_AVX512F)
 91979          p.domain = DomainAVX
 91980          p.add(0, func(m *_Encoding, v []interface{}) {
 91981              m.emit(0x62)
 91982              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91983              m.emit(0x7d)
 91984              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 91985              m.emit(0x08)
 91986              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91987              m.imm1(toImmAny(v[0]))
 91988          })
 91989      }
 91990      // VRNDSCALEPS imm8, ymm, ymm{k}{z}
 91991      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 91992          self.require(ISA_AVX512VL | ISA_AVX512F)
 91993          p.domain = DomainAVX
 91994          p.add(0, func(m *_Encoding, v []interface{}) {
 91995              m.emit(0x62)
 91996              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91997              m.emit(0x7d)
 91998              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 91999              m.emit(0x08)
 92000              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 92001              m.imm1(toImmAny(v[0]))
 92002          })
 92003      }
 92004      if p.len == 0 {
 92005          panic("invalid operands for VRNDSCALEPS")
 92006      }
 92007      return p
 92008  }
 92009  
 92010  // VRNDSCALESD performs "Round Scalar Double-Precision Floating-Point Value To Include A Given Number Of Fraction Bits".
 92011  //
 92012  // Mnemonic        : VRNDSCALESD
 92013  // Supported forms : (3 forms)
 92014  //
 92015  //    * VRNDSCALESD imm8, m64, xmm, xmm{k}{z}           [AVX512F]
 92016  //    * VRNDSCALESD imm8, {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 92017  //    * VRNDSCALESD imm8, xmm, xmm, xmm{k}{z}           [AVX512F]
 92018  //
 92019  func (self *Program) VRNDSCALESD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 92020      var p *Instruction
 92021      switch len(vv) {
 92022          case 0  : p = self.alloc("VRNDSCALESD", 4, Operands { v0, v1, v2, v3 })
 92023          case 1  : p = self.alloc("VRNDSCALESD", 5, Operands { v0, v1, v2, v3, vv[0] })
 92024          default : panic("instruction VRNDSCALESD takes 4 or 5 operands")
 92025      }
 92026      // VRNDSCALESD imm8, m64, xmm, xmm{k}{z}
 92027      if len(vv) == 0 && isImm8(v0) && isM64(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 92028          self.require(ISA_AVX512F)
 92029          p.domain = DomainAVX
 92030          p.add(0, func(m *_Encoding, v []interface{}) {
 92031              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 92032              m.emit(0x0b)
 92033              m.mrsd(lcode(v[3]), addr(v[1]), 8)
 92034              m.imm1(toImmAny(v[0]))
 92035          })
 92036      }
 92037      // VRNDSCALESD imm8, {sae}, xmm, xmm, xmm{k}{z}
 92038      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) && isXMMkz(vv[0]) {
 92039          self.require(ISA_AVX512F)
 92040          p.domain = DomainAVX
 92041          p.add(0, func(m *_Encoding, v []interface{}) {
 92042              m.emit(0x62)
 92043              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 92044              m.emit(0xfd ^ (hlcode(v[3]) << 3))
 92045              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 92046              m.emit(0x0b)
 92047              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 92048              m.imm1(toImmAny(v[0]))
 92049          })
 92050      }
 92051      // VRNDSCALESD imm8, xmm, xmm, xmm{k}{z}
 92052      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 92053          self.require(ISA_AVX512F)
 92054          p.domain = DomainAVX
 92055          p.add(0, func(m *_Encoding, v []interface{}) {
 92056              m.emit(0x62)
 92057              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 92058              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 92059              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 92060              m.emit(0x0b)
 92061              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 92062              m.imm1(toImmAny(v[0]))
 92063          })
 92064      }
 92065      if p.len == 0 {
 92066          panic("invalid operands for VRNDSCALESD")
 92067      }
 92068      return p
 92069  }
 92070  
 92071  // VRNDSCALESS performs "Round Scalar Single-Precision Floating-Point Value To Include A Given Number Of Fraction Bits".
 92072  //
 92073  // Mnemonic        : VRNDSCALESS
 92074  // Supported forms : (3 forms)
 92075  //
 92076  //    * VRNDSCALESS imm8, m32, xmm, xmm{k}{z}           [AVX512F]
 92077  //    * VRNDSCALESS imm8, {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 92078  //    * VRNDSCALESS imm8, xmm, xmm, xmm{k}{z}           [AVX512F]
 92079  //
 92080  func (self *Program) VRNDSCALESS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 92081      var p *Instruction
 92082      switch len(vv) {
 92083          case 0  : p = self.alloc("VRNDSCALESS", 4, Operands { v0, v1, v2, v3 })
 92084          case 1  : p = self.alloc("VRNDSCALESS", 5, Operands { v0, v1, v2, v3, vv[0] })
 92085          default : panic("instruction VRNDSCALESS takes 4 or 5 operands")
 92086      }
 92087      // VRNDSCALESS imm8, m32, xmm, xmm{k}{z}
 92088      if len(vv) == 0 && isImm8(v0) && isM32(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 92089          self.require(ISA_AVX512F)
 92090          p.domain = DomainAVX
 92091          p.add(0, func(m *_Encoding, v []interface{}) {
 92092              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 92093              m.emit(0x0a)
 92094              m.mrsd(lcode(v[3]), addr(v[1]), 4)
 92095              m.imm1(toImmAny(v[0]))
 92096          })
 92097      }
 92098      // VRNDSCALESS imm8, {sae}, xmm, xmm, xmm{k}{z}
 92099      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) && isXMMkz(vv[0]) {
 92100          self.require(ISA_AVX512F)
 92101          p.domain = DomainAVX
 92102          p.add(0, func(m *_Encoding, v []interface{}) {
 92103              m.emit(0x62)
 92104              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 92105              m.emit(0x7d ^ (hlcode(v[3]) << 3))
 92106              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 92107              m.emit(0x0a)
 92108              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 92109              m.imm1(toImmAny(v[0]))
 92110          })
 92111      }
 92112      // VRNDSCALESS imm8, xmm, xmm, xmm{k}{z}
 92113      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 92114          self.require(ISA_AVX512F)
 92115          p.domain = DomainAVX
 92116          p.add(0, func(m *_Encoding, v []interface{}) {
 92117              m.emit(0x62)
 92118              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 92119              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 92120              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 92121              m.emit(0x0a)
 92122              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 92123              m.imm1(toImmAny(v[0]))
 92124          })
 92125      }
 92126      if p.len == 0 {
 92127          panic("invalid operands for VRNDSCALESS")
 92128      }
 92129      return p
 92130  }
 92131  
 92132  // VROUNDPD performs "Round Packed Double Precision Floating-Point Values".
 92133  //
 92134  // Mnemonic        : VROUNDPD
 92135  // Supported forms : (4 forms)
 92136  //
 92137  //    * VROUNDPD imm8, xmm, xmm     [AVX]
 92138  //    * VROUNDPD imm8, m128, xmm    [AVX]
 92139  //    * VROUNDPD imm8, ymm, ymm     [AVX]
 92140  //    * VROUNDPD imm8, m256, ymm    [AVX]
 92141  //
 92142  func (self *Program) VROUNDPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 92143      p := self.alloc("VROUNDPD", 3, Operands { v0, v1, v2 })
 92144      // VROUNDPD imm8, xmm, xmm
 92145      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 92146          self.require(ISA_AVX)
 92147          p.domain = DomainAVX
 92148          p.add(0, func(m *_Encoding, v []interface{}) {
 92149              m.emit(0xc4)
 92150              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 92151              m.emit(0x79)
 92152              m.emit(0x09)
 92153              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 92154              m.imm1(toImmAny(v[0]))
 92155          })
 92156      }
 92157      // VROUNDPD imm8, m128, xmm
 92158      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 92159          self.require(ISA_AVX)
 92160          p.domain = DomainAVX
 92161          p.add(0, func(m *_Encoding, v []interface{}) {
 92162              m.vex3(0xc4, 0b11, 0x01, hcode(v[2]), addr(v[1]), 0)
 92163              m.emit(0x09)
 92164              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 92165              m.imm1(toImmAny(v[0]))
 92166          })
 92167      }
 92168      // VROUNDPD imm8, ymm, ymm
 92169      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 92170          self.require(ISA_AVX)
 92171          p.domain = DomainAVX
 92172          p.add(0, func(m *_Encoding, v []interface{}) {
 92173              m.emit(0xc4)
 92174              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 92175              m.emit(0x7d)
 92176              m.emit(0x09)
 92177              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 92178              m.imm1(toImmAny(v[0]))
 92179          })
 92180      }
 92181      // VROUNDPD imm8, m256, ymm
 92182      if isImm8(v0) && isM256(v1) && isYMM(v2) {
 92183          self.require(ISA_AVX)
 92184          p.domain = DomainAVX
 92185          p.add(0, func(m *_Encoding, v []interface{}) {
 92186              m.vex3(0xc4, 0b11, 0x05, hcode(v[2]), addr(v[1]), 0)
 92187              m.emit(0x09)
 92188              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 92189              m.imm1(toImmAny(v[0]))
 92190          })
 92191      }
 92192      if p.len == 0 {
 92193          panic("invalid operands for VROUNDPD")
 92194      }
 92195      return p
 92196  }
 92197  
 92198  // VROUNDPS performs "Round Packed Single Precision Floating-Point Values".
 92199  //
 92200  // Mnemonic        : VROUNDPS
 92201  // Supported forms : (4 forms)
 92202  //
 92203  //    * VROUNDPS imm8, xmm, xmm     [AVX]
 92204  //    * VROUNDPS imm8, m128, xmm    [AVX]
 92205  //    * VROUNDPS imm8, ymm, ymm     [AVX]
 92206  //    * VROUNDPS imm8, m256, ymm    [AVX]
 92207  //
 92208  func (self *Program) VROUNDPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 92209      p := self.alloc("VROUNDPS", 3, Operands { v0, v1, v2 })
 92210      // VROUNDPS imm8, xmm, xmm
 92211      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 92212          self.require(ISA_AVX)
 92213          p.domain = DomainAVX
 92214          p.add(0, func(m *_Encoding, v []interface{}) {
 92215              m.emit(0xc4)
 92216              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 92217              m.emit(0x79)
 92218              m.emit(0x08)
 92219              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 92220              m.imm1(toImmAny(v[0]))
 92221          })
 92222      }
 92223      // VROUNDPS imm8, m128, xmm
 92224      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 92225          self.require(ISA_AVX)
 92226          p.domain = DomainAVX
 92227          p.add(0, func(m *_Encoding, v []interface{}) {
 92228              m.vex3(0xc4, 0b11, 0x01, hcode(v[2]), addr(v[1]), 0)
 92229              m.emit(0x08)
 92230              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 92231              m.imm1(toImmAny(v[0]))
 92232          })
 92233      }
 92234      // VROUNDPS imm8, ymm, ymm
 92235      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 92236          self.require(ISA_AVX)
 92237          p.domain = DomainAVX
 92238          p.add(0, func(m *_Encoding, v []interface{}) {
 92239              m.emit(0xc4)
 92240              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 92241              m.emit(0x7d)
 92242              m.emit(0x08)
 92243              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 92244              m.imm1(toImmAny(v[0]))
 92245          })
 92246      }
 92247      // VROUNDPS imm8, m256, ymm
 92248      if isImm8(v0) && isM256(v1) && isYMM(v2) {
 92249          self.require(ISA_AVX)
 92250          p.domain = DomainAVX
 92251          p.add(0, func(m *_Encoding, v []interface{}) {
 92252              m.vex3(0xc4, 0b11, 0x05, hcode(v[2]), addr(v[1]), 0)
 92253              m.emit(0x08)
 92254              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 92255              m.imm1(toImmAny(v[0]))
 92256          })
 92257      }
 92258      if p.len == 0 {
 92259          panic("invalid operands for VROUNDPS")
 92260      }
 92261      return p
 92262  }
 92263  
 92264  // VROUNDSD performs "Round Scalar Double Precision Floating-Point Values".
 92265  //
 92266  // Mnemonic        : VROUNDSD
 92267  // Supported forms : (2 forms)
 92268  //
 92269  //    * VROUNDSD imm8, xmm, xmm, xmm    [AVX]
 92270  //    * VROUNDSD imm8, m64, xmm, xmm    [AVX]
 92271  //
 92272  func (self *Program) VROUNDSD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 92273      p := self.alloc("VROUNDSD", 4, Operands { v0, v1, v2, v3 })
 92274      // VROUNDSD imm8, xmm, xmm, xmm
 92275      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 92276          self.require(ISA_AVX)
 92277          p.domain = DomainAVX
 92278          p.add(0, func(m *_Encoding, v []interface{}) {
 92279              m.emit(0xc4)
 92280              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 92281              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 92282              m.emit(0x0b)
 92283              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 92284              m.imm1(toImmAny(v[0]))
 92285          })
 92286      }
 92287      // VROUNDSD imm8, m64, xmm, xmm
 92288      if isImm8(v0) && isM64(v1) && isXMM(v2) && isXMM(v3) {
 92289          self.require(ISA_AVX)
 92290          p.domain = DomainAVX
 92291          p.add(0, func(m *_Encoding, v []interface{}) {
 92292              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 92293              m.emit(0x0b)
 92294              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 92295              m.imm1(toImmAny(v[0]))
 92296          })
 92297      }
 92298      if p.len == 0 {
 92299          panic("invalid operands for VROUNDSD")
 92300      }
 92301      return p
 92302  }
 92303  
 92304  // VROUNDSS performs "Round Scalar Single Precision Floating-Point Values".
 92305  //
 92306  // Mnemonic        : VROUNDSS
 92307  // Supported forms : (2 forms)
 92308  //
 92309  //    * VROUNDSS imm8, xmm, xmm, xmm    [AVX]
 92310  //    * VROUNDSS imm8, m32, xmm, xmm    [AVX]
 92311  //
 92312  func (self *Program) VROUNDSS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 92313      p := self.alloc("VROUNDSS", 4, Operands { v0, v1, v2, v3 })
 92314      // VROUNDSS imm8, xmm, xmm, xmm
 92315      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 92316          self.require(ISA_AVX)
 92317          p.domain = DomainAVX
 92318          p.add(0, func(m *_Encoding, v []interface{}) {
 92319              m.emit(0xc4)
 92320              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 92321              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 92322              m.emit(0x0a)
 92323              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 92324              m.imm1(toImmAny(v[0]))
 92325          })
 92326      }
 92327      // VROUNDSS imm8, m32, xmm, xmm
 92328      if isImm8(v0) && isM32(v1) && isXMM(v2) && isXMM(v3) {
 92329          self.require(ISA_AVX)
 92330          p.domain = DomainAVX
 92331          p.add(0, func(m *_Encoding, v []interface{}) {
 92332              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 92333              m.emit(0x0a)
 92334              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 92335              m.imm1(toImmAny(v[0]))
 92336          })
 92337      }
 92338      if p.len == 0 {
 92339          panic("invalid operands for VROUNDSS")
 92340      }
 92341      return p
 92342  }
 92343  
 92344  // VRSQRT14PD performs "Compute Approximate Reciprocals of Square Roots of Packed Double-Precision Floating-Point Values".
 92345  //
 92346  // Mnemonic        : VRSQRT14PD
 92347  // Supported forms : (6 forms)
 92348  //
 92349  //    * VRSQRT14PD m512/m64bcst, zmm{k}{z}    [AVX512F]
 92350  //    * VRSQRT14PD zmm, zmm{k}{z}             [AVX512F]
 92351  //    * VRSQRT14PD m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 92352  //    * VRSQRT14PD m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 92353  //    * VRSQRT14PD xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 92354  //    * VRSQRT14PD ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 92355  //
 92356  func (self *Program) VRSQRT14PD(v0 interface{}, v1 interface{}) *Instruction {
 92357      p := self.alloc("VRSQRT14PD", 2, Operands { v0, v1 })
 92358      // VRSQRT14PD m512/m64bcst, zmm{k}{z}
 92359      if isM512M64bcst(v0) && isZMMkz(v1) {
 92360          self.require(ISA_AVX512F)
 92361          p.domain = DomainAVX
 92362          p.add(0, func(m *_Encoding, v []interface{}) {
 92363              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 92364              m.emit(0x4e)
 92365              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 92366          })
 92367      }
 92368      // VRSQRT14PD zmm, zmm{k}{z}
 92369      if isZMM(v0) && isZMMkz(v1) {
 92370          self.require(ISA_AVX512F)
 92371          p.domain = DomainAVX
 92372          p.add(0, func(m *_Encoding, v []interface{}) {
 92373              m.emit(0x62)
 92374              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 92375              m.emit(0xfd)
 92376              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 92377              m.emit(0x4e)
 92378              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 92379          })
 92380      }
 92381      // VRSQRT14PD m128/m64bcst, xmm{k}{z}
 92382      if isM128M64bcst(v0) && isXMMkz(v1) {
 92383          self.require(ISA_AVX512VL | ISA_AVX512F)
 92384          p.domain = DomainAVX
 92385          p.add(0, func(m *_Encoding, v []interface{}) {
 92386              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 92387              m.emit(0x4e)
 92388              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 92389          })
 92390      }
 92391      // VRSQRT14PD m256/m64bcst, ymm{k}{z}
 92392      if isM256M64bcst(v0) && isYMMkz(v1) {
 92393          self.require(ISA_AVX512VL | ISA_AVX512F)
 92394          p.domain = DomainAVX
 92395          p.add(0, func(m *_Encoding, v []interface{}) {
 92396              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 92397              m.emit(0x4e)
 92398              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 92399          })
 92400      }
 92401      // VRSQRT14PD xmm, xmm{k}{z}
 92402      if isEVEXXMM(v0) && isXMMkz(v1) {
 92403          self.require(ISA_AVX512VL | ISA_AVX512F)
 92404          p.domain = DomainAVX
 92405          p.add(0, func(m *_Encoding, v []interface{}) {
 92406              m.emit(0x62)
 92407              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 92408              m.emit(0xfd)
 92409              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 92410              m.emit(0x4e)
 92411              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 92412          })
 92413      }
 92414      // VRSQRT14PD ymm, ymm{k}{z}
 92415      if isEVEXYMM(v0) && isYMMkz(v1) {
 92416          self.require(ISA_AVX512VL | ISA_AVX512F)
 92417          p.domain = DomainAVX
 92418          p.add(0, func(m *_Encoding, v []interface{}) {
 92419              m.emit(0x62)
 92420              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 92421              m.emit(0xfd)
 92422              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 92423              m.emit(0x4e)
 92424              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 92425          })
 92426      }
 92427      if p.len == 0 {
 92428          panic("invalid operands for VRSQRT14PD")
 92429      }
 92430      return p
 92431  }
 92432  
 92433  // VRSQRT14PS performs "Compute Approximate Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values".
 92434  //
 92435  // Mnemonic        : VRSQRT14PS
 92436  // Supported forms : (6 forms)
 92437  //
 92438  //    * VRSQRT14PS m512/m32bcst, zmm{k}{z}    [AVX512F]
 92439  //    * VRSQRT14PS zmm, zmm{k}{z}             [AVX512F]
 92440  //    * VRSQRT14PS m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 92441  //    * VRSQRT14PS m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 92442  //    * VRSQRT14PS xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 92443  //    * VRSQRT14PS ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 92444  //
 92445  func (self *Program) VRSQRT14PS(v0 interface{}, v1 interface{}) *Instruction {
 92446      p := self.alloc("VRSQRT14PS", 2, Operands { v0, v1 })
 92447      // VRSQRT14PS m512/m32bcst, zmm{k}{z}
 92448      if isM512M32bcst(v0) && isZMMkz(v1) {
 92449          self.require(ISA_AVX512F)
 92450          p.domain = DomainAVX
 92451          p.add(0, func(m *_Encoding, v []interface{}) {
 92452              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 92453              m.emit(0x4e)
 92454              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 92455          })
 92456      }
 92457      // VRSQRT14PS zmm, zmm{k}{z}
 92458      if isZMM(v0) && isZMMkz(v1) {
 92459          self.require(ISA_AVX512F)
 92460          p.domain = DomainAVX
 92461          p.add(0, func(m *_Encoding, v []interface{}) {
 92462              m.emit(0x62)
 92463              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 92464              m.emit(0x7d)
 92465              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 92466              m.emit(0x4e)
 92467              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 92468          })
 92469      }
 92470      // VRSQRT14PS m128/m32bcst, xmm{k}{z}
 92471      if isM128M32bcst(v0) && isXMMkz(v1) {
 92472          self.require(ISA_AVX512VL | ISA_AVX512F)
 92473          p.domain = DomainAVX
 92474          p.add(0, func(m *_Encoding, v []interface{}) {
 92475              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 92476              m.emit(0x4e)
 92477              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 92478          })
 92479      }
 92480      // VRSQRT14PS m256/m32bcst, ymm{k}{z}
 92481      if isM256M32bcst(v0) && isYMMkz(v1) {
 92482          self.require(ISA_AVX512VL | ISA_AVX512F)
 92483          p.domain = DomainAVX
 92484          p.add(0, func(m *_Encoding, v []interface{}) {
 92485              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 92486              m.emit(0x4e)
 92487              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 92488          })
 92489      }
 92490      // VRSQRT14PS xmm, xmm{k}{z}
 92491      if isEVEXXMM(v0) && isXMMkz(v1) {
 92492          self.require(ISA_AVX512VL | ISA_AVX512F)
 92493          p.domain = DomainAVX
 92494          p.add(0, func(m *_Encoding, v []interface{}) {
 92495              m.emit(0x62)
 92496              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 92497              m.emit(0x7d)
 92498              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 92499              m.emit(0x4e)
 92500              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 92501          })
 92502      }
 92503      // VRSQRT14PS ymm, ymm{k}{z}
 92504      if isEVEXYMM(v0) && isYMMkz(v1) {
 92505          self.require(ISA_AVX512VL | ISA_AVX512F)
 92506          p.domain = DomainAVX
 92507          p.add(0, func(m *_Encoding, v []interface{}) {
 92508              m.emit(0x62)
 92509              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 92510              m.emit(0x7d)
 92511              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 92512              m.emit(0x4e)
 92513              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 92514          })
 92515      }
 92516      if p.len == 0 {
 92517          panic("invalid operands for VRSQRT14PS")
 92518      }
 92519      return p
 92520  }
 92521  
 92522  // VRSQRT14SD performs "Compute Approximate Reciprocal of a Square Root of a Scalar Double-Precision Floating-Point Value".
 92523  //
 92524  // Mnemonic        : VRSQRT14SD
 92525  // Supported forms : (2 forms)
 92526  //
 92527  //    * VRSQRT14SD xmm, xmm, xmm{k}{z}    [AVX512F]
 92528  //    * VRSQRT14SD m64, xmm, xmm{k}{z}    [AVX512F]
 92529  //
 92530  func (self *Program) VRSQRT14SD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 92531      p := self.alloc("VRSQRT14SD", 3, Operands { v0, v1, v2 })
 92532      // VRSQRT14SD xmm, xmm, xmm{k}{z}
 92533      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 92534          self.require(ISA_AVX512F)
 92535          p.domain = DomainAVX
 92536          p.add(0, func(m *_Encoding, v []interface{}) {
 92537              m.emit(0x62)
 92538              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 92539              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 92540              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 92541              m.emit(0x4f)
 92542              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 92543          })
 92544      }
 92545      // VRSQRT14SD m64, xmm, xmm{k}{z}
 92546      if isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 92547          self.require(ISA_AVX512F)
 92548          p.domain = DomainAVX
 92549          p.add(0, func(m *_Encoding, v []interface{}) {
 92550              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 92551              m.emit(0x4f)
 92552              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 92553          })
 92554      }
 92555      if p.len == 0 {
 92556          panic("invalid operands for VRSQRT14SD")
 92557      }
 92558      return p
 92559  }
 92560  
 92561  // VRSQRT14SS performs "Compute Approximate Reciprocal of a Square Root of a Scalar Single-Precision Floating-Point Value".
 92562  //
 92563  // Mnemonic        : VRSQRT14SS
 92564  // Supported forms : (2 forms)
 92565  //
 92566  //    * VRSQRT14SS xmm, xmm, xmm{k}{z}    [AVX512F]
 92567  //    * VRSQRT14SS m32, xmm, xmm{k}{z}    [AVX512F]
 92568  //
 92569  func (self *Program) VRSQRT14SS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 92570      p := self.alloc("VRSQRT14SS", 3, Operands { v0, v1, v2 })
 92571      // VRSQRT14SS xmm, xmm, xmm{k}{z}
 92572      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 92573          self.require(ISA_AVX512F)
 92574          p.domain = DomainAVX
 92575          p.add(0, func(m *_Encoding, v []interface{}) {
 92576              m.emit(0x62)
 92577              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 92578              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 92579              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 92580              m.emit(0x4f)
 92581              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 92582          })
 92583      }
 92584      // VRSQRT14SS m32, xmm, xmm{k}{z}
 92585      if isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 92586          self.require(ISA_AVX512F)
 92587          p.domain = DomainAVX
 92588          p.add(0, func(m *_Encoding, v []interface{}) {
 92589              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 92590              m.emit(0x4f)
 92591              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 92592          })
 92593      }
 92594      if p.len == 0 {
 92595          panic("invalid operands for VRSQRT14SS")
 92596      }
 92597      return p
 92598  }
 92599  
 92600  // VRSQRT28PD performs "Approximation to the Reciprocal Square Root of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error".
 92601  //
 92602  // Mnemonic        : VRSQRT28PD
 92603  // Supported forms : (3 forms)
 92604  //
 92605  //    * VRSQRT28PD m512/m64bcst, zmm{k}{z}    [AVX512ER]
 92606  //    * VRSQRT28PD {sae}, zmm, zmm{k}{z}      [AVX512ER]
 92607  //    * VRSQRT28PD zmm, zmm{k}{z}             [AVX512ER]
 92608  //
 92609  func (self *Program) VRSQRT28PD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 92610      var p *Instruction
 92611      switch len(vv) {
 92612          case 0  : p = self.alloc("VRSQRT28PD", 2, Operands { v0, v1 })
 92613          case 1  : p = self.alloc("VRSQRT28PD", 3, Operands { v0, v1, vv[0] })
 92614          default : panic("instruction VRSQRT28PD takes 2 or 3 operands")
 92615      }
 92616      // VRSQRT28PD m512/m64bcst, zmm{k}{z}
 92617      if len(vv) == 0 && isM512M64bcst(v0) && isZMMkz(v1) {
 92618          self.require(ISA_AVX512ER)
 92619          p.domain = DomainAVX
 92620          p.add(0, func(m *_Encoding, v []interface{}) {
 92621              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 92622              m.emit(0xcc)
 92623              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 92624          })
 92625      }
 92626      // VRSQRT28PD {sae}, zmm, zmm{k}{z}
 92627      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 92628          self.require(ISA_AVX512ER)
 92629          p.domain = DomainAVX
 92630          p.add(0, func(m *_Encoding, v []interface{}) {
 92631              m.emit(0x62)
 92632              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 92633              m.emit(0xfd)
 92634              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 92635              m.emit(0xcc)
 92636              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 92637          })
 92638      }
 92639      // VRSQRT28PD zmm, zmm{k}{z}
 92640      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 92641          self.require(ISA_AVX512ER)
 92642          p.domain = DomainAVX
 92643          p.add(0, func(m *_Encoding, v []interface{}) {
 92644              m.emit(0x62)
 92645              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 92646              m.emit(0xfd)
 92647              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 92648              m.emit(0xcc)
 92649              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 92650          })
 92651      }
 92652      if p.len == 0 {
 92653          panic("invalid operands for VRSQRT28PD")
 92654      }
 92655      return p
 92656  }
 92657  
 92658  // VRSQRT28PS performs "Approximation to the Reciprocal Square Root of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error".
 92659  //
 92660  // Mnemonic        : VRSQRT28PS
 92661  // Supported forms : (3 forms)
 92662  //
 92663  //    * VRSQRT28PS m512/m32bcst, zmm{k}{z}    [AVX512ER]
 92664  //    * VRSQRT28PS {sae}, zmm, zmm{k}{z}      [AVX512ER]
 92665  //    * VRSQRT28PS zmm, zmm{k}{z}             [AVX512ER]
 92666  //
 92667  func (self *Program) VRSQRT28PS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 92668      var p *Instruction
 92669      switch len(vv) {
 92670          case 0  : p = self.alloc("VRSQRT28PS", 2, Operands { v0, v1 })
 92671          case 1  : p = self.alloc("VRSQRT28PS", 3, Operands { v0, v1, vv[0] })
 92672          default : panic("instruction VRSQRT28PS takes 2 or 3 operands")
 92673      }
 92674      // VRSQRT28PS m512/m32bcst, zmm{k}{z}
 92675      if len(vv) == 0 && isM512M32bcst(v0) && isZMMkz(v1) {
 92676          self.require(ISA_AVX512ER)
 92677          p.domain = DomainAVX
 92678          p.add(0, func(m *_Encoding, v []interface{}) {
 92679              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 92680              m.emit(0xcc)
 92681              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 92682          })
 92683      }
 92684      // VRSQRT28PS {sae}, zmm, zmm{k}{z}
 92685      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 92686          self.require(ISA_AVX512ER)
 92687          p.domain = DomainAVX
 92688          p.add(0, func(m *_Encoding, v []interface{}) {
 92689              m.emit(0x62)
 92690              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 92691              m.emit(0x7d)
 92692              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 92693              m.emit(0xcc)
 92694              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 92695          })
 92696      }
 92697      // VRSQRT28PS zmm, zmm{k}{z}
 92698      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 92699          self.require(ISA_AVX512ER)
 92700          p.domain = DomainAVX
 92701          p.add(0, func(m *_Encoding, v []interface{}) {
 92702              m.emit(0x62)
 92703              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 92704              m.emit(0x7d)
 92705              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 92706              m.emit(0xcc)
 92707              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 92708          })
 92709      }
 92710      if p.len == 0 {
 92711          panic("invalid operands for VRSQRT28PS")
 92712      }
 92713      return p
 92714  }
 92715  
 92716  // VRSQRT28SD performs "Approximation to the Reciprocal Square Root of a Scalar Double-Precision Floating-Point Value with Less Than 2^-28 Relative Error".
 92717  //
 92718  // Mnemonic        : VRSQRT28SD
 92719  // Supported forms : (3 forms)
 92720  //
 92721  //    * VRSQRT28SD m64, xmm, xmm{k}{z}           [AVX512ER]
 92722  //    * VRSQRT28SD {sae}, xmm, xmm, xmm{k}{z}    [AVX512ER]
 92723  //    * VRSQRT28SD xmm, xmm, xmm{k}{z}           [AVX512ER]
 92724  //
 92725  func (self *Program) VRSQRT28SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 92726      var p *Instruction
 92727      switch len(vv) {
 92728          case 0  : p = self.alloc("VRSQRT28SD", 3, Operands { v0, v1, v2 })
 92729          case 1  : p = self.alloc("VRSQRT28SD", 4, Operands { v0, v1, v2, vv[0] })
 92730          default : panic("instruction VRSQRT28SD takes 3 or 4 operands")
 92731      }
 92732      // VRSQRT28SD m64, xmm, xmm{k}{z}
 92733      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 92734          self.require(ISA_AVX512ER)
 92735          p.domain = DomainAVX
 92736          p.add(0, func(m *_Encoding, v []interface{}) {
 92737              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 92738              m.emit(0xcd)
 92739              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 92740          })
 92741      }
 92742      // VRSQRT28SD {sae}, xmm, xmm, xmm{k}{z}
 92743      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 92744          self.require(ISA_AVX512ER)
 92745          p.domain = DomainAVX
 92746          p.add(0, func(m *_Encoding, v []interface{}) {
 92747              m.emit(0x62)
 92748              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 92749              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 92750              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 92751              m.emit(0xcd)
 92752              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 92753          })
 92754      }
 92755      // VRSQRT28SD xmm, xmm, xmm{k}{z}
 92756      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 92757          self.require(ISA_AVX512ER)
 92758          p.domain = DomainAVX
 92759          p.add(0, func(m *_Encoding, v []interface{}) {
 92760              m.emit(0x62)
 92761              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 92762              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 92763              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 92764              m.emit(0xcd)
 92765              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 92766          })
 92767      }
 92768      if p.len == 0 {
 92769          panic("invalid operands for VRSQRT28SD")
 92770      }
 92771      return p
 92772  }
 92773  
 92774  // VRSQRT28SS performs "Approximation to the Reciprocal Square Root of a Scalar Single-Precision Floating-Point Value with Less Than 2^-28 Relative Error".
 92775  //
 92776  // Mnemonic        : VRSQRT28SS
 92777  // Supported forms : (3 forms)
 92778  //
 92779  //    * VRSQRT28SS m32, xmm, xmm{k}{z}           [AVX512ER]
 92780  //    * VRSQRT28SS {sae}, xmm, xmm, xmm{k}{z}    [AVX512ER]
 92781  //    * VRSQRT28SS xmm, xmm, xmm{k}{z}           [AVX512ER]
 92782  //
 92783  func (self *Program) VRSQRT28SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 92784      var p *Instruction
 92785      switch len(vv) {
 92786          case 0  : p = self.alloc("VRSQRT28SS", 3, Operands { v0, v1, v2 })
 92787          case 1  : p = self.alloc("VRSQRT28SS", 4, Operands { v0, v1, v2, vv[0] })
 92788          default : panic("instruction VRSQRT28SS takes 3 or 4 operands")
 92789      }
 92790      // VRSQRT28SS m32, xmm, xmm{k}{z}
 92791      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 92792          self.require(ISA_AVX512ER)
 92793          p.domain = DomainAVX
 92794          p.add(0, func(m *_Encoding, v []interface{}) {
 92795              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 92796              m.emit(0xcd)
 92797              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 92798          })
 92799      }
 92800      // VRSQRT28SS {sae}, xmm, xmm, xmm{k}{z}
 92801      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 92802          self.require(ISA_AVX512ER)
 92803          p.domain = DomainAVX
 92804          p.add(0, func(m *_Encoding, v []interface{}) {
 92805              m.emit(0x62)
 92806              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 92807              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 92808              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 92809              m.emit(0xcd)
 92810              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 92811          })
 92812      }
 92813      // VRSQRT28SS xmm, xmm, xmm{k}{z}
 92814      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 92815          self.require(ISA_AVX512ER)
 92816          p.domain = DomainAVX
 92817          p.add(0, func(m *_Encoding, v []interface{}) {
 92818              m.emit(0x62)
 92819              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 92820              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 92821              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 92822              m.emit(0xcd)
 92823              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 92824          })
 92825      }
 92826      if p.len == 0 {
 92827          panic("invalid operands for VRSQRT28SS")
 92828      }
 92829      return p
 92830  }
 92831  
 92832  // VRSQRTPS performs "Compute Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values".
 92833  //
 92834  // Mnemonic        : VRSQRTPS
 92835  // Supported forms : (4 forms)
 92836  //
 92837  //    * VRSQRTPS xmm, xmm     [AVX]
 92838  //    * VRSQRTPS m128, xmm    [AVX]
 92839  //    * VRSQRTPS ymm, ymm     [AVX]
 92840  //    * VRSQRTPS m256, ymm    [AVX]
 92841  //
 92842  func (self *Program) VRSQRTPS(v0 interface{}, v1 interface{}) *Instruction {
 92843      p := self.alloc("VRSQRTPS", 2, Operands { v0, v1 })
 92844      // VRSQRTPS xmm, xmm
 92845      if isXMM(v0) && isXMM(v1) {
 92846          self.require(ISA_AVX)
 92847          p.domain = DomainAVX
 92848          p.add(0, func(m *_Encoding, v []interface{}) {
 92849              m.vex2(0, hcode(v[1]), v[0], 0)
 92850              m.emit(0x52)
 92851              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 92852          })
 92853      }
 92854      // VRSQRTPS m128, xmm
 92855      if isM128(v0) && isXMM(v1) {
 92856          self.require(ISA_AVX)
 92857          p.domain = DomainAVX
 92858          p.add(0, func(m *_Encoding, v []interface{}) {
 92859              m.vex2(0, hcode(v[1]), addr(v[0]), 0)
 92860              m.emit(0x52)
 92861              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 92862          })
 92863      }
 92864      // VRSQRTPS ymm, ymm
 92865      if isYMM(v0) && isYMM(v1) {
 92866          self.require(ISA_AVX)
 92867          p.domain = DomainAVX
 92868          p.add(0, func(m *_Encoding, v []interface{}) {
 92869              m.vex2(4, hcode(v[1]), v[0], 0)
 92870              m.emit(0x52)
 92871              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 92872          })
 92873      }
 92874      // VRSQRTPS m256, ymm
 92875      if isM256(v0) && isYMM(v1) {
 92876          self.require(ISA_AVX)
 92877          p.domain = DomainAVX
 92878          p.add(0, func(m *_Encoding, v []interface{}) {
 92879              m.vex2(4, hcode(v[1]), addr(v[0]), 0)
 92880              m.emit(0x52)
 92881              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 92882          })
 92883      }
 92884      if p.len == 0 {
 92885          panic("invalid operands for VRSQRTPS")
 92886      }
 92887      return p
 92888  }
 92889  
 92890  // VRSQRTSS performs "Compute Reciprocal of Square Root of Scalar Single-Precision Floating-Point Value".
 92891  //
 92892  // Mnemonic        : VRSQRTSS
 92893  // Supported forms : (2 forms)
 92894  //
 92895  //    * VRSQRTSS xmm, xmm, xmm    [AVX]
 92896  //    * VRSQRTSS m32, xmm, xmm    [AVX]
 92897  //
 92898  func (self *Program) VRSQRTSS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 92899      p := self.alloc("VRSQRTSS", 3, Operands { v0, v1, v2 })
 92900      // VRSQRTSS xmm, xmm, xmm
 92901      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 92902          self.require(ISA_AVX)
 92903          p.domain = DomainAVX
 92904          p.add(0, func(m *_Encoding, v []interface{}) {
 92905              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 92906              m.emit(0x52)
 92907              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 92908          })
 92909      }
 92910      // VRSQRTSS m32, xmm, xmm
 92911      if isM32(v0) && isXMM(v1) && isXMM(v2) {
 92912          self.require(ISA_AVX)
 92913          p.domain = DomainAVX
 92914          p.add(0, func(m *_Encoding, v []interface{}) {
 92915              m.vex2(2, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 92916              m.emit(0x52)
 92917              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 92918          })
 92919      }
 92920      if p.len == 0 {
 92921          panic("invalid operands for VRSQRTSS")
 92922      }
 92923      return p
 92924  }
 92925  
 92926  // VSCALEFPD performs "Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values".
 92927  //
 92928  // Mnemonic        : VSCALEFPD
 92929  // Supported forms : (7 forms)
 92930  //
 92931  //    * VSCALEFPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 92932  //    * VSCALEFPD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 92933  //    * VSCALEFPD zmm, zmm, zmm{k}{z}             [AVX512F]
 92934  //    * VSCALEFPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 92935  //    * VSCALEFPD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 92936  //    * VSCALEFPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 92937  //    * VSCALEFPD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 92938  //
 92939  func (self *Program) VSCALEFPD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 92940      var p *Instruction
 92941      switch len(vv) {
 92942          case 0  : p = self.alloc("VSCALEFPD", 3, Operands { v0, v1, v2 })
 92943          case 1  : p = self.alloc("VSCALEFPD", 4, Operands { v0, v1, v2, vv[0] })
 92944          default : panic("instruction VSCALEFPD takes 3 or 4 operands")
 92945      }
 92946      // VSCALEFPD m512/m64bcst, zmm, zmm{k}{z}
 92947      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 92948          self.require(ISA_AVX512F)
 92949          p.domain = DomainAVX
 92950          p.add(0, func(m *_Encoding, v []interface{}) {
 92951              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 92952              m.emit(0x2c)
 92953              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 92954          })
 92955      }
 92956      // VSCALEFPD {er}, zmm, zmm, zmm{k}{z}
 92957      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 92958          self.require(ISA_AVX512F)
 92959          p.domain = DomainAVX
 92960          p.add(0, func(m *_Encoding, v []interface{}) {
 92961              m.emit(0x62)
 92962              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 92963              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 92964              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 92965              m.emit(0x2c)
 92966              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 92967          })
 92968      }
 92969      // VSCALEFPD zmm, zmm, zmm{k}{z}
 92970      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 92971          self.require(ISA_AVX512F)
 92972          p.domain = DomainAVX
 92973          p.add(0, func(m *_Encoding, v []interface{}) {
 92974              m.emit(0x62)
 92975              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 92976              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 92977              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 92978              m.emit(0x2c)
 92979              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 92980          })
 92981      }
 92982      // VSCALEFPD m128/m64bcst, xmm, xmm{k}{z}
 92983      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 92984          self.require(ISA_AVX512VL | ISA_AVX512F)
 92985          p.domain = DomainAVX
 92986          p.add(0, func(m *_Encoding, v []interface{}) {
 92987              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 92988              m.emit(0x2c)
 92989              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 92990          })
 92991      }
 92992      // VSCALEFPD xmm, xmm, xmm{k}{z}
 92993      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 92994          self.require(ISA_AVX512VL | ISA_AVX512F)
 92995          p.domain = DomainAVX
 92996          p.add(0, func(m *_Encoding, v []interface{}) {
 92997              m.emit(0x62)
 92998              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 92999              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 93000              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 93001              m.emit(0x2c)
 93002              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 93003          })
 93004      }
 93005      // VSCALEFPD m256/m64bcst, ymm, ymm{k}{z}
 93006      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 93007          self.require(ISA_AVX512VL | ISA_AVX512F)
 93008          p.domain = DomainAVX
 93009          p.add(0, func(m *_Encoding, v []interface{}) {
 93010              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 93011              m.emit(0x2c)
 93012              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 93013          })
 93014      }
 93015      // VSCALEFPD ymm, ymm, ymm{k}{z}
 93016      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 93017          self.require(ISA_AVX512VL | ISA_AVX512F)
 93018          p.domain = DomainAVX
 93019          p.add(0, func(m *_Encoding, v []interface{}) {
 93020              m.emit(0x62)
 93021              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 93022              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 93023              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 93024              m.emit(0x2c)
 93025              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 93026          })
 93027      }
 93028      if p.len == 0 {
 93029          panic("invalid operands for VSCALEFPD")
 93030      }
 93031      return p
 93032  }
 93033  
 93034  // VSCALEFPS performs "Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values".
 93035  //
 93036  // Mnemonic        : VSCALEFPS
 93037  // Supported forms : (7 forms)
 93038  //
 93039  //    * VSCALEFPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 93040  //    * VSCALEFPS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 93041  //    * VSCALEFPS zmm, zmm, zmm{k}{z}             [AVX512F]
 93042  //    * VSCALEFPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 93043  //    * VSCALEFPS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 93044  //    * VSCALEFPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 93045  //    * VSCALEFPS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 93046  //
 93047  func (self *Program) VSCALEFPS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 93048      var p *Instruction
 93049      switch len(vv) {
 93050          case 0  : p = self.alloc("VSCALEFPS", 3, Operands { v0, v1, v2 })
 93051          case 1  : p = self.alloc("VSCALEFPS", 4, Operands { v0, v1, v2, vv[0] })
 93052          default : panic("instruction VSCALEFPS takes 3 or 4 operands")
 93053      }
 93054      // VSCALEFPS m512/m32bcst, zmm, zmm{k}{z}
 93055      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 93056          self.require(ISA_AVX512F)
 93057          p.domain = DomainAVX
 93058          p.add(0, func(m *_Encoding, v []interface{}) {
 93059              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 93060              m.emit(0x2c)
 93061              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 93062          })
 93063      }
 93064      // VSCALEFPS {er}, zmm, zmm, zmm{k}{z}
 93065      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 93066          self.require(ISA_AVX512F)
 93067          p.domain = DomainAVX
 93068          p.add(0, func(m *_Encoding, v []interface{}) {
 93069              m.emit(0x62)
 93070              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93071              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 93072              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 93073              m.emit(0x2c)
 93074              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93075          })
 93076      }
 93077      // VSCALEFPS zmm, zmm, zmm{k}{z}
 93078      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 93079          self.require(ISA_AVX512F)
 93080          p.domain = DomainAVX
 93081          p.add(0, func(m *_Encoding, v []interface{}) {
 93082              m.emit(0x62)
 93083              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 93084              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 93085              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 93086              m.emit(0x2c)
 93087              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 93088          })
 93089      }
 93090      // VSCALEFPS m128/m32bcst, xmm, xmm{k}{z}
 93091      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 93092          self.require(ISA_AVX512VL | ISA_AVX512F)
 93093          p.domain = DomainAVX
 93094          p.add(0, func(m *_Encoding, v []interface{}) {
 93095              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 93096              m.emit(0x2c)
 93097              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 93098          })
 93099      }
 93100      // VSCALEFPS xmm, xmm, xmm{k}{z}
 93101      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 93102          self.require(ISA_AVX512VL | ISA_AVX512F)
 93103          p.domain = DomainAVX
 93104          p.add(0, func(m *_Encoding, v []interface{}) {
 93105              m.emit(0x62)
 93106              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 93107              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 93108              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 93109              m.emit(0x2c)
 93110              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 93111          })
 93112      }
 93113      // VSCALEFPS m256/m32bcst, ymm, ymm{k}{z}
 93114      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 93115          self.require(ISA_AVX512VL | ISA_AVX512F)
 93116          p.domain = DomainAVX
 93117          p.add(0, func(m *_Encoding, v []interface{}) {
 93118              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 93119              m.emit(0x2c)
 93120              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 93121          })
 93122      }
 93123      // VSCALEFPS ymm, ymm, ymm{k}{z}
 93124      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 93125          self.require(ISA_AVX512VL | ISA_AVX512F)
 93126          p.domain = DomainAVX
 93127          p.add(0, func(m *_Encoding, v []interface{}) {
 93128              m.emit(0x62)
 93129              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 93130              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 93131              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 93132              m.emit(0x2c)
 93133              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 93134          })
 93135      }
 93136      if p.len == 0 {
 93137          panic("invalid operands for VSCALEFPS")
 93138      }
 93139      return p
 93140  }
 93141  
 93142  // VSCALEFSD performs "Scale Scalar Double-Precision Floating-Point Value With a Double-Precision Floating-Point Value".
 93143  //
 93144  // Mnemonic        : VSCALEFSD
 93145  // Supported forms : (3 forms)
 93146  //
 93147  //    * VSCALEFSD m64, xmm, xmm{k}{z}          [AVX512F]
 93148  //    * VSCALEFSD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 93149  //    * VSCALEFSD xmm, xmm, xmm{k}{z}          [AVX512F]
 93150  //
 93151  func (self *Program) VSCALEFSD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 93152      var p *Instruction
 93153      switch len(vv) {
 93154          case 0  : p = self.alloc("VSCALEFSD", 3, Operands { v0, v1, v2 })
 93155          case 1  : p = self.alloc("VSCALEFSD", 4, Operands { v0, v1, v2, vv[0] })
 93156          default : panic("instruction VSCALEFSD takes 3 or 4 operands")
 93157      }
 93158      // VSCALEFSD m64, xmm, xmm{k}{z}
 93159      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 93160          self.require(ISA_AVX512F)
 93161          p.domain = DomainAVX
 93162          p.add(0, func(m *_Encoding, v []interface{}) {
 93163              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 93164              m.emit(0x2d)
 93165              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 93166          })
 93167      }
 93168      // VSCALEFSD {er}, xmm, xmm, xmm{k}{z}
 93169      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 93170          self.require(ISA_AVX512F)
 93171          p.domain = DomainAVX
 93172          p.add(0, func(m *_Encoding, v []interface{}) {
 93173              m.emit(0x62)
 93174              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93175              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 93176              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 93177              m.emit(0x2d)
 93178              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93179          })
 93180      }
 93181      // VSCALEFSD xmm, xmm, xmm{k}{z}
 93182      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 93183          self.require(ISA_AVX512F)
 93184          p.domain = DomainAVX
 93185          p.add(0, func(m *_Encoding, v []interface{}) {
 93186              m.emit(0x62)
 93187              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 93188              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 93189              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 93190              m.emit(0x2d)
 93191              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 93192          })
 93193      }
 93194      if p.len == 0 {
 93195          panic("invalid operands for VSCALEFSD")
 93196      }
 93197      return p
 93198  }
 93199  
 93200  // VSCALEFSS performs "Scale Scalar Single-Precision Floating-Point Value With a Single-Precision Floating-Point Value".
 93201  //
 93202  // Mnemonic        : VSCALEFSS
 93203  // Supported forms : (3 forms)
 93204  //
 93205  //    * VSCALEFSS m32, xmm, xmm{k}{z}          [AVX512F]
 93206  //    * VSCALEFSS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 93207  //    * VSCALEFSS xmm, xmm, xmm{k}{z}          [AVX512F]
 93208  //
 93209  func (self *Program) VSCALEFSS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 93210      var p *Instruction
 93211      switch len(vv) {
 93212          case 0  : p = self.alloc("VSCALEFSS", 3, Operands { v0, v1, v2 })
 93213          case 1  : p = self.alloc("VSCALEFSS", 4, Operands { v0, v1, v2, vv[0] })
 93214          default : panic("instruction VSCALEFSS takes 3 or 4 operands")
 93215      }
 93216      // VSCALEFSS m32, xmm, xmm{k}{z}
 93217      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 93218          self.require(ISA_AVX512F)
 93219          p.domain = DomainAVX
 93220          p.add(0, func(m *_Encoding, v []interface{}) {
 93221              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 93222              m.emit(0x2d)
 93223              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 93224          })
 93225      }
 93226      // VSCALEFSS {er}, xmm, xmm, xmm{k}{z}
 93227      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 93228          self.require(ISA_AVX512F)
 93229          p.domain = DomainAVX
 93230          p.add(0, func(m *_Encoding, v []interface{}) {
 93231              m.emit(0x62)
 93232              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93233              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 93234              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 93235              m.emit(0x2d)
 93236              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93237          })
 93238      }
 93239      // VSCALEFSS xmm, xmm, xmm{k}{z}
 93240      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 93241          self.require(ISA_AVX512F)
 93242          p.domain = DomainAVX
 93243          p.add(0, func(m *_Encoding, v []interface{}) {
 93244              m.emit(0x62)
 93245              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 93246              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 93247              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 93248              m.emit(0x2d)
 93249              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 93250          })
 93251      }
 93252      if p.len == 0 {
 93253          panic("invalid operands for VSCALEFSS")
 93254      }
 93255      return p
 93256  }
 93257  
 93258  // VSCATTERDPD performs "Scatter Packed Double-Precision Floating-Point Values with Signed Doubleword Indices".
 93259  //
 93260  // Mnemonic        : VSCATTERDPD
 93261  // Supported forms : (3 forms)
 93262  //
 93263  //    * VSCATTERDPD zmm, vm32y{k}    [AVX512F]
 93264  //    * VSCATTERDPD xmm, vm32x{k}    [AVX512F,AVX512VL]
 93265  //    * VSCATTERDPD ymm, vm32x{k}    [AVX512F,AVX512VL]
 93266  //
 93267  func (self *Program) VSCATTERDPD(v0 interface{}, v1 interface{}) *Instruction {
 93268      p := self.alloc("VSCATTERDPD", 2, Operands { v0, v1 })
 93269      // VSCATTERDPD zmm, vm32y{k}
 93270      if isZMM(v0) && isVMYk(v1) {
 93271          self.require(ISA_AVX512F)
 93272          p.domain = DomainAVX
 93273          p.add(0, func(m *_Encoding, v []interface{}) {
 93274              m.evex(0b10, 0x85, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93275              m.emit(0xa2)
 93276              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 93277          })
 93278      }
 93279      // VSCATTERDPD xmm, vm32x{k}
 93280      if isEVEXXMM(v0) && isVMXk(v1) {
 93281          self.require(ISA_AVX512VL | ISA_AVX512F)
 93282          p.domain = DomainAVX
 93283          p.add(0, func(m *_Encoding, v []interface{}) {
 93284              m.evex(0b10, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93285              m.emit(0xa2)
 93286              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 93287          })
 93288      }
 93289      // VSCATTERDPD ymm, vm32x{k}
 93290      if isEVEXYMM(v0) && isVMXk(v1) {
 93291          self.require(ISA_AVX512VL | ISA_AVX512F)
 93292          p.domain = DomainAVX
 93293          p.add(0, func(m *_Encoding, v []interface{}) {
 93294              m.evex(0b10, 0x85, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93295              m.emit(0xa2)
 93296              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 93297          })
 93298      }
 93299      if p.len == 0 {
 93300          panic("invalid operands for VSCATTERDPD")
 93301      }
 93302      return p
 93303  }
 93304  
 93305  // VSCATTERDPS performs "Scatter Packed Single-Precision Floating-Point Values with Signed Doubleword Indices".
 93306  //
 93307  // Mnemonic        : VSCATTERDPS
 93308  // Supported forms : (3 forms)
 93309  //
 93310  //    * VSCATTERDPS zmm, vm32z{k}    [AVX512F]
 93311  //    * VSCATTERDPS xmm, vm32x{k}    [AVX512F,AVX512VL]
 93312  //    * VSCATTERDPS ymm, vm32y{k}    [AVX512F,AVX512VL]
 93313  //
 93314  func (self *Program) VSCATTERDPS(v0 interface{}, v1 interface{}) *Instruction {
 93315      p := self.alloc("VSCATTERDPS", 2, Operands { v0, v1 })
 93316      // VSCATTERDPS zmm, vm32z{k}
 93317      if isZMM(v0) && isVMZk(v1) {
 93318          self.require(ISA_AVX512F)
 93319          p.domain = DomainAVX
 93320          p.add(0, func(m *_Encoding, v []interface{}) {
 93321              m.evex(0b10, 0x05, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93322              m.emit(0xa2)
 93323              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 93324          })
 93325      }
 93326      // VSCATTERDPS xmm, vm32x{k}
 93327      if isEVEXXMM(v0) && isVMXk(v1) {
 93328          self.require(ISA_AVX512VL | ISA_AVX512F)
 93329          p.domain = DomainAVX
 93330          p.add(0, func(m *_Encoding, v []interface{}) {
 93331              m.evex(0b10, 0x05, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93332              m.emit(0xa2)
 93333              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 93334          })
 93335      }
 93336      // VSCATTERDPS ymm, vm32y{k}
 93337      if isEVEXYMM(v0) && isVMYk(v1) {
 93338          self.require(ISA_AVX512VL | ISA_AVX512F)
 93339          p.domain = DomainAVX
 93340          p.add(0, func(m *_Encoding, v []interface{}) {
 93341              m.evex(0b10, 0x05, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93342              m.emit(0xa2)
 93343              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 93344          })
 93345      }
 93346      if p.len == 0 {
 93347          panic("invalid operands for VSCATTERDPS")
 93348      }
 93349      return p
 93350  }
 93351  
 93352  // VSCATTERPF0DPD performs "Sparse Prefetch Packed Double-Precision Floating-Point Data Values with Signed Doubleword Indices Using T0 Hint with Intent to Write".
 93353  //
 93354  // Mnemonic        : VSCATTERPF0DPD
 93355  // Supported forms : (1 form)
 93356  //
 93357  //    * VSCATTERPF0DPD vm32y{k}    [AVX512PF]
 93358  //
 93359  func (self *Program) VSCATTERPF0DPD(v0 interface{}) *Instruction {
 93360      p := self.alloc("VSCATTERPF0DPD", 1, Operands { v0 })
 93361      // VSCATTERPF0DPD vm32y{k}
 93362      if isVMYk(v0) {
 93363          self.require(ISA_AVX512PF)
 93364          p.domain = DomainAVX
 93365          p.add(0, func(m *_Encoding, v []interface{}) {
 93366              m.evex(0b10, 0x85, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 93367              m.emit(0xc6)
 93368              m.mrsd(5, addr(v[0]), 8)
 93369          })
 93370      }
 93371      if p.len == 0 {
 93372          panic("invalid operands for VSCATTERPF0DPD")
 93373      }
 93374      return p
 93375  }
 93376  
 93377  // VSCATTERPF0DPS performs "Sparse Prefetch Packed Single-Precision Floating-Point Data Values with Signed Doubleword Indices Using T0 Hint with Intent to Write".
 93378  //
 93379  // Mnemonic        : VSCATTERPF0DPS
 93380  // Supported forms : (1 form)
 93381  //
 93382  //    * VSCATTERPF0DPS vm32z{k}    [AVX512PF]
 93383  //
 93384  func (self *Program) VSCATTERPF0DPS(v0 interface{}) *Instruction {
 93385      p := self.alloc("VSCATTERPF0DPS", 1, Operands { v0 })
 93386      // VSCATTERPF0DPS vm32z{k}
 93387      if isVMZk(v0) {
 93388          self.require(ISA_AVX512PF)
 93389          p.domain = DomainAVX
 93390          p.add(0, func(m *_Encoding, v []interface{}) {
 93391              m.evex(0b10, 0x05, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 93392              m.emit(0xc6)
 93393              m.mrsd(5, addr(v[0]), 4)
 93394          })
 93395      }
 93396      if p.len == 0 {
 93397          panic("invalid operands for VSCATTERPF0DPS")
 93398      }
 93399      return p
 93400  }
 93401  
 93402  // VSCATTERPF0QPD performs "Sparse Prefetch Packed Double-Precision Floating-Point Data Values with Signed Quadword Indices Using T0 Hint with Intent to Write".
 93403  //
 93404  // Mnemonic        : VSCATTERPF0QPD
 93405  // Supported forms : (1 form)
 93406  //
 93407  //    * VSCATTERPF0QPD vm64z{k}    [AVX512PF]
 93408  //
 93409  func (self *Program) VSCATTERPF0QPD(v0 interface{}) *Instruction {
 93410      p := self.alloc("VSCATTERPF0QPD", 1, Operands { v0 })
 93411      // VSCATTERPF0QPD vm64z{k}
 93412      if isVMZk(v0) {
 93413          self.require(ISA_AVX512PF)
 93414          p.domain = DomainAVX
 93415          p.add(0, func(m *_Encoding, v []interface{}) {
 93416              m.evex(0b10, 0x85, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 93417              m.emit(0xc7)
 93418              m.mrsd(5, addr(v[0]), 8)
 93419          })
 93420      }
 93421      if p.len == 0 {
 93422          panic("invalid operands for VSCATTERPF0QPD")
 93423      }
 93424      return p
 93425  }
 93426  
 93427  // VSCATTERPF0QPS performs "Sparse Prefetch Packed Single-Precision Floating-Point Data Values with Signed Quadword Indices Using T0 Hint with Intent to Write".
 93428  //
 93429  // Mnemonic        : VSCATTERPF0QPS
 93430  // Supported forms : (1 form)
 93431  //
 93432  //    * VSCATTERPF0QPS vm64z{k}    [AVX512PF]
 93433  //
 93434  func (self *Program) VSCATTERPF0QPS(v0 interface{}) *Instruction {
 93435      p := self.alloc("VSCATTERPF0QPS", 1, Operands { v0 })
 93436      // VSCATTERPF0QPS vm64z{k}
 93437      if isVMZk(v0) {
 93438          self.require(ISA_AVX512PF)
 93439          p.domain = DomainAVX
 93440          p.add(0, func(m *_Encoding, v []interface{}) {
 93441              m.evex(0b10, 0x05, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 93442              m.emit(0xc7)
 93443              m.mrsd(5, addr(v[0]), 4)
 93444          })
 93445      }
 93446      if p.len == 0 {
 93447          panic("invalid operands for VSCATTERPF0QPS")
 93448      }
 93449      return p
 93450  }
 93451  
 93452  // VSCATTERPF1DPD performs "Sparse Prefetch Packed Double-Precision Floating-Point Data Values with Signed Doubleword Indices Using T1 Hint with Intent to Write".
 93453  //
 93454  // Mnemonic        : VSCATTERPF1DPD
 93455  // Supported forms : (1 form)
 93456  //
 93457  //    * VSCATTERPF1DPD vm32y{k}    [AVX512PF]
 93458  //
 93459  func (self *Program) VSCATTERPF1DPD(v0 interface{}) *Instruction {
 93460      p := self.alloc("VSCATTERPF1DPD", 1, Operands { v0 })
 93461      // VSCATTERPF1DPD vm32y{k}
 93462      if isVMYk(v0) {
 93463          self.require(ISA_AVX512PF)
 93464          p.domain = DomainAVX
 93465          p.add(0, func(m *_Encoding, v []interface{}) {
 93466              m.evex(0b10, 0x85, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 93467              m.emit(0xc6)
 93468              m.mrsd(6, addr(v[0]), 8)
 93469          })
 93470      }
 93471      if p.len == 0 {
 93472          panic("invalid operands for VSCATTERPF1DPD")
 93473      }
 93474      return p
 93475  }
 93476  
 93477  // VSCATTERPF1DPS performs "Sparse Prefetch Packed Single-Precision Floating-Point Data Values with Signed Doubleword Indices Using T1 Hint with Intent to Write".
 93478  //
 93479  // Mnemonic        : VSCATTERPF1DPS
 93480  // Supported forms : (1 form)
 93481  //
 93482  //    * VSCATTERPF1DPS vm32z{k}    [AVX512PF]
 93483  //
 93484  func (self *Program) VSCATTERPF1DPS(v0 interface{}) *Instruction {
 93485      p := self.alloc("VSCATTERPF1DPS", 1, Operands { v0 })
 93486      // VSCATTERPF1DPS vm32z{k}
 93487      if isVMZk(v0) {
 93488          self.require(ISA_AVX512PF)
 93489          p.domain = DomainAVX
 93490          p.add(0, func(m *_Encoding, v []interface{}) {
 93491              m.evex(0b10, 0x05, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 93492              m.emit(0xc6)
 93493              m.mrsd(6, addr(v[0]), 4)
 93494          })
 93495      }
 93496      if p.len == 0 {
 93497          panic("invalid operands for VSCATTERPF1DPS")
 93498      }
 93499      return p
 93500  }
 93501  
 93502  // VSCATTERPF1QPD performs "Sparse Prefetch Packed Double-Precision Floating-Point Data Values with Signed Quadword Indices Using T1 Hint with Intent to Write".
 93503  //
 93504  // Mnemonic        : VSCATTERPF1QPD
 93505  // Supported forms : (1 form)
 93506  //
 93507  //    * VSCATTERPF1QPD vm64z{k}    [AVX512PF]
 93508  //
 93509  func (self *Program) VSCATTERPF1QPD(v0 interface{}) *Instruction {
 93510      p := self.alloc("VSCATTERPF1QPD", 1, Operands { v0 })
 93511      // VSCATTERPF1QPD vm64z{k}
 93512      if isVMZk(v0) {
 93513          self.require(ISA_AVX512PF)
 93514          p.domain = DomainAVX
 93515          p.add(0, func(m *_Encoding, v []interface{}) {
 93516              m.evex(0b10, 0x85, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 93517              m.emit(0xc7)
 93518              m.mrsd(6, addr(v[0]), 8)
 93519          })
 93520      }
 93521      if p.len == 0 {
 93522          panic("invalid operands for VSCATTERPF1QPD")
 93523      }
 93524      return p
 93525  }
 93526  
 93527  // VSCATTERPF1QPS performs "Sparse Prefetch Packed Single-Precision Floating-Point Data Values with Signed Quadword Indices Using T1 Hint with Intent to Write".
 93528  //
 93529  // Mnemonic        : VSCATTERPF1QPS
 93530  // Supported forms : (1 form)
 93531  //
 93532  //    * VSCATTERPF1QPS vm64z{k}    [AVX512PF]
 93533  //
 93534  func (self *Program) VSCATTERPF1QPS(v0 interface{}) *Instruction {
 93535      p := self.alloc("VSCATTERPF1QPS", 1, Operands { v0 })
 93536      // VSCATTERPF1QPS vm64z{k}
 93537      if isVMZk(v0) {
 93538          self.require(ISA_AVX512PF)
 93539          p.domain = DomainAVX
 93540          p.add(0, func(m *_Encoding, v []interface{}) {
 93541              m.evex(0b10, 0x05, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 93542              m.emit(0xc7)
 93543              m.mrsd(6, addr(v[0]), 4)
 93544          })
 93545      }
 93546      if p.len == 0 {
 93547          panic("invalid operands for VSCATTERPF1QPS")
 93548      }
 93549      return p
 93550  }
 93551  
 93552  // VSCATTERQPD performs "Scatter Packed Double-Precision Floating-Point Values with Signed Quadword Indices".
 93553  //
 93554  // Mnemonic        : VSCATTERQPD
 93555  // Supported forms : (3 forms)
 93556  //
 93557  //    * VSCATTERQPD zmm, vm64z{k}    [AVX512F]
 93558  //    * VSCATTERQPD xmm, vm64x{k}    [AVX512F,AVX512VL]
 93559  //    * VSCATTERQPD ymm, vm64y{k}    [AVX512F,AVX512VL]
 93560  //
 93561  func (self *Program) VSCATTERQPD(v0 interface{}, v1 interface{}) *Instruction {
 93562      p := self.alloc("VSCATTERQPD", 2, Operands { v0, v1 })
 93563      // VSCATTERQPD zmm, vm64z{k}
 93564      if isZMM(v0) && isVMZk(v1) {
 93565          self.require(ISA_AVX512F)
 93566          p.domain = DomainAVX
 93567          p.add(0, func(m *_Encoding, v []interface{}) {
 93568              m.evex(0b10, 0x85, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93569              m.emit(0xa3)
 93570              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 93571          })
 93572      }
 93573      // VSCATTERQPD xmm, vm64x{k}
 93574      if isEVEXXMM(v0) && isVMXk(v1) {
 93575          self.require(ISA_AVX512VL | ISA_AVX512F)
 93576          p.domain = DomainAVX
 93577          p.add(0, func(m *_Encoding, v []interface{}) {
 93578              m.evex(0b10, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93579              m.emit(0xa3)
 93580              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 93581          })
 93582      }
 93583      // VSCATTERQPD ymm, vm64y{k}
 93584      if isEVEXYMM(v0) && isVMYk(v1) {
 93585          self.require(ISA_AVX512VL | ISA_AVX512F)
 93586          p.domain = DomainAVX
 93587          p.add(0, func(m *_Encoding, v []interface{}) {
 93588              m.evex(0b10, 0x85, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93589              m.emit(0xa3)
 93590              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 93591          })
 93592      }
 93593      if p.len == 0 {
 93594          panic("invalid operands for VSCATTERQPD")
 93595      }
 93596      return p
 93597  }
 93598  
 93599  // VSCATTERQPS performs "Scatter Packed Single-Precision Floating-Point Values with Signed Quadword Indices".
 93600  //
 93601  // Mnemonic        : VSCATTERQPS
 93602  // Supported forms : (3 forms)
 93603  //
 93604  //    * VSCATTERQPS ymm, vm64z{k}    [AVX512F]
 93605  //    * VSCATTERQPS xmm, vm64x{k}    [AVX512F,AVX512VL]
 93606  //    * VSCATTERQPS xmm, vm64y{k}    [AVX512F,AVX512VL]
 93607  //
 93608  func (self *Program) VSCATTERQPS(v0 interface{}, v1 interface{}) *Instruction {
 93609      p := self.alloc("VSCATTERQPS", 2, Operands { v0, v1 })
 93610      // VSCATTERQPS ymm, vm64z{k}
 93611      if isEVEXYMM(v0) && isVMZk(v1) {
 93612          self.require(ISA_AVX512F)
 93613          p.domain = DomainAVX
 93614          p.add(0, func(m *_Encoding, v []interface{}) {
 93615              m.evex(0b10, 0x05, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93616              m.emit(0xa3)
 93617              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 93618          })
 93619      }
 93620      // VSCATTERQPS xmm, vm64x{k}
 93621      if isEVEXXMM(v0) && isVMXk(v1) {
 93622          self.require(ISA_AVX512VL | ISA_AVX512F)
 93623          p.domain = DomainAVX
 93624          p.add(0, func(m *_Encoding, v []interface{}) {
 93625              m.evex(0b10, 0x05, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93626              m.emit(0xa3)
 93627              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 93628          })
 93629      }
 93630      // VSCATTERQPS xmm, vm64y{k}
 93631      if isEVEXXMM(v0) && isVMYk(v1) {
 93632          self.require(ISA_AVX512VL | ISA_AVX512F)
 93633          p.domain = DomainAVX
 93634          p.add(0, func(m *_Encoding, v []interface{}) {
 93635              m.evex(0b10, 0x05, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93636              m.emit(0xa3)
 93637              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 93638          })
 93639      }
 93640      if p.len == 0 {
 93641          panic("invalid operands for VSCATTERQPS")
 93642      }
 93643      return p
 93644  }
 93645  
 93646  // VSHUFF32X4 performs "Shuffle 128-Bit Packed Single-Precision Floating-Point Values".
 93647  //
 93648  // Mnemonic        : VSHUFF32X4
 93649  // Supported forms : (4 forms)
 93650  //
 93651  //    * VSHUFF32X4 imm8, m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 93652  //    * VSHUFF32X4 imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 93653  //    * VSHUFF32X4 imm8, m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 93654  //    * VSHUFF32X4 imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 93655  //
 93656  func (self *Program) VSHUFF32X4(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 93657      p := self.alloc("VSHUFF32X4", 4, Operands { v0, v1, v2, v3 })
 93658      // VSHUFF32X4 imm8, m512/m32bcst, zmm, zmm{k}{z}
 93659      if isImm8(v0) && isM512M32bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 93660          self.require(ISA_AVX512F)
 93661          p.domain = DomainAVX
 93662          p.add(0, func(m *_Encoding, v []interface{}) {
 93663              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 93664              m.emit(0x23)
 93665              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 93666              m.imm1(toImmAny(v[0]))
 93667          })
 93668      }
 93669      // VSHUFF32X4 imm8, zmm, zmm, zmm{k}{z}
 93670      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 93671          self.require(ISA_AVX512F)
 93672          p.domain = DomainAVX
 93673          p.add(0, func(m *_Encoding, v []interface{}) {
 93674              m.emit(0x62)
 93675              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93676              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 93677              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 93678              m.emit(0x23)
 93679              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93680              m.imm1(toImmAny(v[0]))
 93681          })
 93682      }
 93683      // VSHUFF32X4 imm8, m256/m32bcst, ymm, ymm{k}{z}
 93684      if isImm8(v0) && isM256M32bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 93685          self.require(ISA_AVX512VL | ISA_AVX512F)
 93686          p.domain = DomainAVX
 93687          p.add(0, func(m *_Encoding, v []interface{}) {
 93688              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 93689              m.emit(0x23)
 93690              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 93691              m.imm1(toImmAny(v[0]))
 93692          })
 93693      }
 93694      // VSHUFF32X4 imm8, ymm, ymm, ymm{k}{z}
 93695      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 93696          self.require(ISA_AVX512VL | ISA_AVX512F)
 93697          p.domain = DomainAVX
 93698          p.add(0, func(m *_Encoding, v []interface{}) {
 93699              m.emit(0x62)
 93700              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93701              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 93702              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 93703              m.emit(0x23)
 93704              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93705              m.imm1(toImmAny(v[0]))
 93706          })
 93707      }
 93708      if p.len == 0 {
 93709          panic("invalid operands for VSHUFF32X4")
 93710      }
 93711      return p
 93712  }
 93713  
 93714  // VSHUFF64X2 performs "Shuffle 128-Bit Packed Double-Precision Floating-Point Values".
 93715  //
 93716  // Mnemonic        : VSHUFF64X2
 93717  // Supported forms : (4 forms)
 93718  //
 93719  //    * VSHUFF64X2 imm8, m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 93720  //    * VSHUFF64X2 imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 93721  //    * VSHUFF64X2 imm8, m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 93722  //    * VSHUFF64X2 imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 93723  //
 93724  func (self *Program) VSHUFF64X2(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 93725      p := self.alloc("VSHUFF64X2", 4, Operands { v0, v1, v2, v3 })
 93726      // VSHUFF64X2 imm8, m512/m64bcst, zmm, zmm{k}{z}
 93727      if isImm8(v0) && isM512M64bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 93728          self.require(ISA_AVX512F)
 93729          p.domain = DomainAVX
 93730          p.add(0, func(m *_Encoding, v []interface{}) {
 93731              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 93732              m.emit(0x23)
 93733              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 93734              m.imm1(toImmAny(v[0]))
 93735          })
 93736      }
 93737      // VSHUFF64X2 imm8, zmm, zmm, zmm{k}{z}
 93738      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 93739          self.require(ISA_AVX512F)
 93740          p.domain = DomainAVX
 93741          p.add(0, func(m *_Encoding, v []interface{}) {
 93742              m.emit(0x62)
 93743              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93744              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 93745              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 93746              m.emit(0x23)
 93747              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93748              m.imm1(toImmAny(v[0]))
 93749          })
 93750      }
 93751      // VSHUFF64X2 imm8, m256/m64bcst, ymm, ymm{k}{z}
 93752      if isImm8(v0) && isM256M64bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 93753          self.require(ISA_AVX512VL | ISA_AVX512F)
 93754          p.domain = DomainAVX
 93755          p.add(0, func(m *_Encoding, v []interface{}) {
 93756              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 93757              m.emit(0x23)
 93758              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 93759              m.imm1(toImmAny(v[0]))
 93760          })
 93761      }
 93762      // VSHUFF64X2 imm8, ymm, ymm, ymm{k}{z}
 93763      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 93764          self.require(ISA_AVX512VL | ISA_AVX512F)
 93765          p.domain = DomainAVX
 93766          p.add(0, func(m *_Encoding, v []interface{}) {
 93767              m.emit(0x62)
 93768              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93769              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 93770              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 93771              m.emit(0x23)
 93772              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93773              m.imm1(toImmAny(v[0]))
 93774          })
 93775      }
 93776      if p.len == 0 {
 93777          panic("invalid operands for VSHUFF64X2")
 93778      }
 93779      return p
 93780  }
 93781  
 93782  // VSHUFI32X4 performs "Shuffle 128-Bit Packed Doubleword Integer Values".
 93783  //
 93784  // Mnemonic        : VSHUFI32X4
 93785  // Supported forms : (4 forms)
 93786  //
 93787  //    * VSHUFI32X4 imm8, m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 93788  //    * VSHUFI32X4 imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 93789  //    * VSHUFI32X4 imm8, m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 93790  //    * VSHUFI32X4 imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 93791  //
 93792  func (self *Program) VSHUFI32X4(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 93793      p := self.alloc("VSHUFI32X4", 4, Operands { v0, v1, v2, v3 })
 93794      // VSHUFI32X4 imm8, m512/m32bcst, zmm, zmm{k}{z}
 93795      if isImm8(v0) && isM512M32bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 93796          self.require(ISA_AVX512F)
 93797          p.domain = DomainAVX
 93798          p.add(0, func(m *_Encoding, v []interface{}) {
 93799              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 93800              m.emit(0x43)
 93801              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 93802              m.imm1(toImmAny(v[0]))
 93803          })
 93804      }
 93805      // VSHUFI32X4 imm8, zmm, zmm, zmm{k}{z}
 93806      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 93807          self.require(ISA_AVX512F)
 93808          p.domain = DomainAVX
 93809          p.add(0, func(m *_Encoding, v []interface{}) {
 93810              m.emit(0x62)
 93811              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93812              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 93813              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 93814              m.emit(0x43)
 93815              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93816              m.imm1(toImmAny(v[0]))
 93817          })
 93818      }
 93819      // VSHUFI32X4 imm8, m256/m32bcst, ymm, ymm{k}{z}
 93820      if isImm8(v0) && isM256M32bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 93821          self.require(ISA_AVX512VL | ISA_AVX512F)
 93822          p.domain = DomainAVX
 93823          p.add(0, func(m *_Encoding, v []interface{}) {
 93824              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 93825              m.emit(0x43)
 93826              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 93827              m.imm1(toImmAny(v[0]))
 93828          })
 93829      }
 93830      // VSHUFI32X4 imm8, ymm, ymm, ymm{k}{z}
 93831      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 93832          self.require(ISA_AVX512VL | ISA_AVX512F)
 93833          p.domain = DomainAVX
 93834          p.add(0, func(m *_Encoding, v []interface{}) {
 93835              m.emit(0x62)
 93836              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93837              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 93838              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 93839              m.emit(0x43)
 93840              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93841              m.imm1(toImmAny(v[0]))
 93842          })
 93843      }
 93844      if p.len == 0 {
 93845          panic("invalid operands for VSHUFI32X4")
 93846      }
 93847      return p
 93848  }
 93849  
 93850  // VSHUFI64X2 performs "Shuffle 128-Bit Packed Quadword Integer Values".
 93851  //
 93852  // Mnemonic        : VSHUFI64X2
 93853  // Supported forms : (4 forms)
 93854  //
 93855  //    * VSHUFI64X2 imm8, m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 93856  //    * VSHUFI64X2 imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 93857  //    * VSHUFI64X2 imm8, m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 93858  //    * VSHUFI64X2 imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 93859  //
 93860  func (self *Program) VSHUFI64X2(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 93861      p := self.alloc("VSHUFI64X2", 4, Operands { v0, v1, v2, v3 })
 93862      // VSHUFI64X2 imm8, m512/m64bcst, zmm, zmm{k}{z}
 93863      if isImm8(v0) && isM512M64bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 93864          self.require(ISA_AVX512F)
 93865          p.domain = DomainAVX
 93866          p.add(0, func(m *_Encoding, v []interface{}) {
 93867              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 93868              m.emit(0x43)
 93869              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 93870              m.imm1(toImmAny(v[0]))
 93871          })
 93872      }
 93873      // VSHUFI64X2 imm8, zmm, zmm, zmm{k}{z}
 93874      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 93875          self.require(ISA_AVX512F)
 93876          p.domain = DomainAVX
 93877          p.add(0, func(m *_Encoding, v []interface{}) {
 93878              m.emit(0x62)
 93879              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93880              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 93881              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 93882              m.emit(0x43)
 93883              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93884              m.imm1(toImmAny(v[0]))
 93885          })
 93886      }
 93887      // VSHUFI64X2 imm8, m256/m64bcst, ymm, ymm{k}{z}
 93888      if isImm8(v0) && isM256M64bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 93889          self.require(ISA_AVX512VL | ISA_AVX512F)
 93890          p.domain = DomainAVX
 93891          p.add(0, func(m *_Encoding, v []interface{}) {
 93892              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 93893              m.emit(0x43)
 93894              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 93895              m.imm1(toImmAny(v[0]))
 93896          })
 93897      }
 93898      // VSHUFI64X2 imm8, ymm, ymm, ymm{k}{z}
 93899      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 93900          self.require(ISA_AVX512VL | ISA_AVX512F)
 93901          p.domain = DomainAVX
 93902          p.add(0, func(m *_Encoding, v []interface{}) {
 93903              m.emit(0x62)
 93904              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93905              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 93906              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 93907              m.emit(0x43)
 93908              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93909              m.imm1(toImmAny(v[0]))
 93910          })
 93911      }
 93912      if p.len == 0 {
 93913          panic("invalid operands for VSHUFI64X2")
 93914      }
 93915      return p
 93916  }
 93917  
 93918  // VSHUFPD performs "Shuffle Packed Double-Precision Floating-Point Values".
 93919  //
 93920  // Mnemonic        : VSHUFPD
 93921  // Supported forms : (10 forms)
 93922  //
 93923  //    * VSHUFPD imm8, xmm, xmm, xmm                   [AVX]
 93924  //    * VSHUFPD imm8, m128, xmm, xmm                  [AVX]
 93925  //    * VSHUFPD imm8, ymm, ymm, ymm                   [AVX]
 93926  //    * VSHUFPD imm8, m256, ymm, ymm                  [AVX]
 93927  //    * VSHUFPD imm8, m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 93928  //    * VSHUFPD imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 93929  //    * VSHUFPD imm8, m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 93930  //    * VSHUFPD imm8, xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 93931  //    * VSHUFPD imm8, m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 93932  //    * VSHUFPD imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 93933  //
 93934  func (self *Program) VSHUFPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 93935      p := self.alloc("VSHUFPD", 4, Operands { v0, v1, v2, v3 })
 93936      // VSHUFPD imm8, xmm, xmm, xmm
 93937      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 93938          self.require(ISA_AVX)
 93939          p.domain = DomainAVX
 93940          p.add(0, func(m *_Encoding, v []interface{}) {
 93941              m.vex2(1, hcode(v[3]), v[1], hlcode(v[2]))
 93942              m.emit(0xc6)
 93943              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93944              m.imm1(toImmAny(v[0]))
 93945          })
 93946      }
 93947      // VSHUFPD imm8, m128, xmm, xmm
 93948      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 93949          self.require(ISA_AVX)
 93950          p.domain = DomainAVX
 93951          p.add(0, func(m *_Encoding, v []interface{}) {
 93952              m.vex2(1, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 93953              m.emit(0xc6)
 93954              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 93955              m.imm1(toImmAny(v[0]))
 93956          })
 93957      }
 93958      // VSHUFPD imm8, ymm, ymm, ymm
 93959      if isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 93960          self.require(ISA_AVX)
 93961          p.domain = DomainAVX
 93962          p.add(0, func(m *_Encoding, v []interface{}) {
 93963              m.vex2(5, hcode(v[3]), v[1], hlcode(v[2]))
 93964              m.emit(0xc6)
 93965              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93966              m.imm1(toImmAny(v[0]))
 93967          })
 93968      }
 93969      // VSHUFPD imm8, m256, ymm, ymm
 93970      if isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 93971          self.require(ISA_AVX)
 93972          p.domain = DomainAVX
 93973          p.add(0, func(m *_Encoding, v []interface{}) {
 93974              m.vex2(5, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 93975              m.emit(0xc6)
 93976              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 93977              m.imm1(toImmAny(v[0]))
 93978          })
 93979      }
 93980      // VSHUFPD imm8, m512/m64bcst, zmm, zmm{k}{z}
 93981      if isImm8(v0) && isM512M64bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 93982          self.require(ISA_AVX512F)
 93983          p.domain = DomainAVX
 93984          p.add(0, func(m *_Encoding, v []interface{}) {
 93985              m.evex(0b01, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 93986              m.emit(0xc6)
 93987              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 93988              m.imm1(toImmAny(v[0]))
 93989          })
 93990      }
 93991      // VSHUFPD imm8, zmm, zmm, zmm{k}{z}
 93992      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 93993          self.require(ISA_AVX512F)
 93994          p.domain = DomainAVX
 93995          p.add(0, func(m *_Encoding, v []interface{}) {
 93996              m.emit(0x62)
 93997              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93998              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 93999              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 94000              m.emit(0xc6)
 94001              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94002              m.imm1(toImmAny(v[0]))
 94003          })
 94004      }
 94005      // VSHUFPD imm8, m128/m64bcst, xmm, xmm{k}{z}
 94006      if isImm8(v0) && isM128M64bcst(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 94007          self.require(ISA_AVX512VL | ISA_AVX512F)
 94008          p.domain = DomainAVX
 94009          p.add(0, func(m *_Encoding, v []interface{}) {
 94010              m.evex(0b01, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 94011              m.emit(0xc6)
 94012              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 94013              m.imm1(toImmAny(v[0]))
 94014          })
 94015      }
 94016      // VSHUFPD imm8, xmm, xmm, xmm{k}{z}
 94017      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 94018          self.require(ISA_AVX512VL | ISA_AVX512F)
 94019          p.domain = DomainAVX
 94020          p.add(0, func(m *_Encoding, v []interface{}) {
 94021              m.emit(0x62)
 94022              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 94023              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 94024              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 94025              m.emit(0xc6)
 94026              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94027              m.imm1(toImmAny(v[0]))
 94028          })
 94029      }
 94030      // VSHUFPD imm8, m256/m64bcst, ymm, ymm{k}{z}
 94031      if isImm8(v0) && isM256M64bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 94032          self.require(ISA_AVX512VL | ISA_AVX512F)
 94033          p.domain = DomainAVX
 94034          p.add(0, func(m *_Encoding, v []interface{}) {
 94035              m.evex(0b01, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 94036              m.emit(0xc6)
 94037              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 94038              m.imm1(toImmAny(v[0]))
 94039          })
 94040      }
 94041      // VSHUFPD imm8, ymm, ymm, ymm{k}{z}
 94042      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 94043          self.require(ISA_AVX512VL | ISA_AVX512F)
 94044          p.domain = DomainAVX
 94045          p.add(0, func(m *_Encoding, v []interface{}) {
 94046              m.emit(0x62)
 94047              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 94048              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 94049              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 94050              m.emit(0xc6)
 94051              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94052              m.imm1(toImmAny(v[0]))
 94053          })
 94054      }
 94055      if p.len == 0 {
 94056          panic("invalid operands for VSHUFPD")
 94057      }
 94058      return p
 94059  }
 94060  
 94061  // VSHUFPS performs "Shuffle Packed Single-Precision Floating-Point Values".
 94062  //
 94063  // Mnemonic        : VSHUFPS
 94064  // Supported forms : (10 forms)
 94065  //
 94066  //    * VSHUFPS imm8, xmm, xmm, xmm                   [AVX]
 94067  //    * VSHUFPS imm8, m128, xmm, xmm                  [AVX]
 94068  //    * VSHUFPS imm8, ymm, ymm, ymm                   [AVX]
 94069  //    * VSHUFPS imm8, m256, ymm, ymm                  [AVX]
 94070  //    * VSHUFPS imm8, m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 94071  //    * VSHUFPS imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 94072  //    * VSHUFPS imm8, m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 94073  //    * VSHUFPS imm8, xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 94074  //    * VSHUFPS imm8, m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 94075  //    * VSHUFPS imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 94076  //
 94077  func (self *Program) VSHUFPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 94078      p := self.alloc("VSHUFPS", 4, Operands { v0, v1, v2, v3 })
 94079      // VSHUFPS imm8, xmm, xmm, xmm
 94080      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 94081          self.require(ISA_AVX)
 94082          p.domain = DomainAVX
 94083          p.add(0, func(m *_Encoding, v []interface{}) {
 94084              m.vex2(0, hcode(v[3]), v[1], hlcode(v[2]))
 94085              m.emit(0xc6)
 94086              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94087              m.imm1(toImmAny(v[0]))
 94088          })
 94089      }
 94090      // VSHUFPS imm8, m128, xmm, xmm
 94091      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 94092          self.require(ISA_AVX)
 94093          p.domain = DomainAVX
 94094          p.add(0, func(m *_Encoding, v []interface{}) {
 94095              m.vex2(0, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 94096              m.emit(0xc6)
 94097              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 94098              m.imm1(toImmAny(v[0]))
 94099          })
 94100      }
 94101      // VSHUFPS imm8, ymm, ymm, ymm
 94102      if isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 94103          self.require(ISA_AVX)
 94104          p.domain = DomainAVX
 94105          p.add(0, func(m *_Encoding, v []interface{}) {
 94106              m.vex2(4, hcode(v[3]), v[1], hlcode(v[2]))
 94107              m.emit(0xc6)
 94108              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94109              m.imm1(toImmAny(v[0]))
 94110          })
 94111      }
 94112      // VSHUFPS imm8, m256, ymm, ymm
 94113      if isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 94114          self.require(ISA_AVX)
 94115          p.domain = DomainAVX
 94116          p.add(0, func(m *_Encoding, v []interface{}) {
 94117              m.vex2(4, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 94118              m.emit(0xc6)
 94119              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 94120              m.imm1(toImmAny(v[0]))
 94121          })
 94122      }
 94123      // VSHUFPS imm8, m512/m32bcst, zmm, zmm{k}{z}
 94124      if isImm8(v0) && isM512M32bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 94125          self.require(ISA_AVX512F)
 94126          p.domain = DomainAVX
 94127          p.add(0, func(m *_Encoding, v []interface{}) {
 94128              m.evex(0b01, 0x04, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 94129              m.emit(0xc6)
 94130              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 94131              m.imm1(toImmAny(v[0]))
 94132          })
 94133      }
 94134      // VSHUFPS imm8, zmm, zmm, zmm{k}{z}
 94135      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 94136          self.require(ISA_AVX512F)
 94137          p.domain = DomainAVX
 94138          p.add(0, func(m *_Encoding, v []interface{}) {
 94139              m.emit(0x62)
 94140              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 94141              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 94142              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 94143              m.emit(0xc6)
 94144              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94145              m.imm1(toImmAny(v[0]))
 94146          })
 94147      }
 94148      // VSHUFPS imm8, m128/m32bcst, xmm, xmm{k}{z}
 94149      if isImm8(v0) && isM128M32bcst(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 94150          self.require(ISA_AVX512VL | ISA_AVX512F)
 94151          p.domain = DomainAVX
 94152          p.add(0, func(m *_Encoding, v []interface{}) {
 94153              m.evex(0b01, 0x04, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 94154              m.emit(0xc6)
 94155              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 94156              m.imm1(toImmAny(v[0]))
 94157          })
 94158      }
 94159      // VSHUFPS imm8, xmm, xmm, xmm{k}{z}
 94160      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 94161          self.require(ISA_AVX512VL | ISA_AVX512F)
 94162          p.domain = DomainAVX
 94163          p.add(0, func(m *_Encoding, v []interface{}) {
 94164              m.emit(0x62)
 94165              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 94166              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 94167              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 94168              m.emit(0xc6)
 94169              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94170              m.imm1(toImmAny(v[0]))
 94171          })
 94172      }
 94173      // VSHUFPS imm8, m256/m32bcst, ymm, ymm{k}{z}
 94174      if isImm8(v0) && isM256M32bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 94175          self.require(ISA_AVX512VL | ISA_AVX512F)
 94176          p.domain = DomainAVX
 94177          p.add(0, func(m *_Encoding, v []interface{}) {
 94178              m.evex(0b01, 0x04, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 94179              m.emit(0xc6)
 94180              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 94181              m.imm1(toImmAny(v[0]))
 94182          })
 94183      }
 94184      // VSHUFPS imm8, ymm, ymm, ymm{k}{z}
 94185      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 94186          self.require(ISA_AVX512VL | ISA_AVX512F)
 94187          p.domain = DomainAVX
 94188          p.add(0, func(m *_Encoding, v []interface{}) {
 94189              m.emit(0x62)
 94190              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 94191              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 94192              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 94193              m.emit(0xc6)
 94194              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94195              m.imm1(toImmAny(v[0]))
 94196          })
 94197      }
 94198      if p.len == 0 {
 94199          panic("invalid operands for VSHUFPS")
 94200      }
 94201      return p
 94202  }
 94203  
 94204  // VSQRTPD performs "Compute Square Roots of Packed Double-Precision Floating-Point Values".
 94205  //
 94206  // Mnemonic        : VSQRTPD
 94207  // Supported forms : (11 forms)
 94208  //
 94209  //    * VSQRTPD xmm, xmm                   [AVX]
 94210  //    * VSQRTPD m128, xmm                  [AVX]
 94211  //    * VSQRTPD ymm, ymm                   [AVX]
 94212  //    * VSQRTPD m256, ymm                  [AVX]
 94213  //    * VSQRTPD m512/m64bcst, zmm{k}{z}    [AVX512F]
 94214  //    * VSQRTPD {er}, zmm, zmm{k}{z}       [AVX512F]
 94215  //    * VSQRTPD zmm, zmm{k}{z}             [AVX512F]
 94216  //    * VSQRTPD m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 94217  //    * VSQRTPD m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 94218  //    * VSQRTPD xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 94219  //    * VSQRTPD ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 94220  //
 94221  func (self *Program) VSQRTPD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 94222      var p *Instruction
 94223      switch len(vv) {
 94224          case 0  : p = self.alloc("VSQRTPD", 2, Operands { v0, v1 })
 94225          case 1  : p = self.alloc("VSQRTPD", 3, Operands { v0, v1, vv[0] })
 94226          default : panic("instruction VSQRTPD takes 2 or 3 operands")
 94227      }
 94228      // VSQRTPD xmm, xmm
 94229      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 94230          self.require(ISA_AVX)
 94231          p.domain = DomainAVX
 94232          p.add(0, func(m *_Encoding, v []interface{}) {
 94233              m.vex2(1, hcode(v[1]), v[0], 0)
 94234              m.emit(0x51)
 94235              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 94236          })
 94237      }
 94238      // VSQRTPD m128, xmm
 94239      if len(vv) == 0 && isM128(v0) && isXMM(v1) {
 94240          self.require(ISA_AVX)
 94241          p.domain = DomainAVX
 94242          p.add(0, func(m *_Encoding, v []interface{}) {
 94243              m.vex2(1, hcode(v[1]), addr(v[0]), 0)
 94244              m.emit(0x51)
 94245              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 94246          })
 94247      }
 94248      // VSQRTPD ymm, ymm
 94249      if len(vv) == 0 && isYMM(v0) && isYMM(v1) {
 94250          self.require(ISA_AVX)
 94251          p.domain = DomainAVX
 94252          p.add(0, func(m *_Encoding, v []interface{}) {
 94253              m.vex2(5, hcode(v[1]), v[0], 0)
 94254              m.emit(0x51)
 94255              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 94256          })
 94257      }
 94258      // VSQRTPD m256, ymm
 94259      if len(vv) == 0 && isM256(v0) && isYMM(v1) {
 94260          self.require(ISA_AVX)
 94261          p.domain = DomainAVX
 94262          p.add(0, func(m *_Encoding, v []interface{}) {
 94263              m.vex2(5, hcode(v[1]), addr(v[0]), 0)
 94264              m.emit(0x51)
 94265              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 94266          })
 94267      }
 94268      // VSQRTPD m512/m64bcst, zmm{k}{z}
 94269      if len(vv) == 0 && isM512M64bcst(v0) && isZMMkz(v1) {
 94270          self.require(ISA_AVX512F)
 94271          p.domain = DomainAVX
 94272          p.add(0, func(m *_Encoding, v []interface{}) {
 94273              m.evex(0b01, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 94274              m.emit(0x51)
 94275              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 94276          })
 94277      }
 94278      // VSQRTPD {er}, zmm, zmm{k}{z}
 94279      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 94280          self.require(ISA_AVX512F)
 94281          p.domain = DomainAVX
 94282          p.add(0, func(m *_Encoding, v []interface{}) {
 94283              m.emit(0x62)
 94284              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 94285              m.emit(0xfd)
 94286              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 94287              m.emit(0x51)
 94288              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 94289          })
 94290      }
 94291      // VSQRTPD zmm, zmm{k}{z}
 94292      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 94293          self.require(ISA_AVX512F)
 94294          p.domain = DomainAVX
 94295          p.add(0, func(m *_Encoding, v []interface{}) {
 94296              m.emit(0x62)
 94297              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 94298              m.emit(0xfd)
 94299              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 94300              m.emit(0x51)
 94301              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 94302          })
 94303      }
 94304      // VSQRTPD m128/m32bcst, xmm{k}{z}
 94305      if len(vv) == 0 && isM128M32bcst(v0) && isXMMkz(v1) {
 94306          self.require(ISA_AVX512VL | ISA_AVX512F)
 94307          p.domain = DomainAVX
 94308          p.add(0, func(m *_Encoding, v []interface{}) {
 94309              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 94310              m.emit(0x51)
 94311              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 94312          })
 94313      }
 94314      // VSQRTPD m256/m32bcst, ymm{k}{z}
 94315      if len(vv) == 0 && isM256M32bcst(v0) && isYMMkz(v1) {
 94316          self.require(ISA_AVX512VL | ISA_AVX512F)
 94317          p.domain = DomainAVX
 94318          p.add(0, func(m *_Encoding, v []interface{}) {
 94319              m.evex(0b01, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 94320              m.emit(0x51)
 94321              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 94322          })
 94323      }
 94324      // VSQRTPD xmm, xmm{k}{z}
 94325      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 94326          self.require(ISA_AVX512VL | ISA_AVX512F)
 94327          p.domain = DomainAVX
 94328          p.add(0, func(m *_Encoding, v []interface{}) {
 94329              m.emit(0x62)
 94330              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 94331              m.emit(0xfd)
 94332              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 94333              m.emit(0x51)
 94334              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 94335          })
 94336      }
 94337      // VSQRTPD ymm, ymm{k}{z}
 94338      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 94339          self.require(ISA_AVX512VL | ISA_AVX512F)
 94340          p.domain = DomainAVX
 94341          p.add(0, func(m *_Encoding, v []interface{}) {
 94342              m.emit(0x62)
 94343              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 94344              m.emit(0xfd)
 94345              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 94346              m.emit(0x51)
 94347              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 94348          })
 94349      }
 94350      if p.len == 0 {
 94351          panic("invalid operands for VSQRTPD")
 94352      }
 94353      return p
 94354  }
 94355  
 94356  // VSQRTPS performs "Compute Square Roots of Packed Single-Precision Floating-Point Values".
 94357  //
 94358  // Mnemonic        : VSQRTPS
 94359  // Supported forms : (11 forms)
 94360  //
 94361  //    * VSQRTPS xmm, xmm                   [AVX]
 94362  //    * VSQRTPS m128, xmm                  [AVX]
 94363  //    * VSQRTPS ymm, ymm                   [AVX]
 94364  //    * VSQRTPS m256, ymm                  [AVX]
 94365  //    * VSQRTPS m512/m32bcst, zmm{k}{z}    [AVX512F]
 94366  //    * VSQRTPS {er}, zmm, zmm{k}{z}       [AVX512F]
 94367  //    * VSQRTPS zmm, zmm{k}{z}             [AVX512F]
 94368  //    * VSQRTPS m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 94369  //    * VSQRTPS m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 94370  //    * VSQRTPS xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 94371  //    * VSQRTPS ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 94372  //
 94373  func (self *Program) VSQRTPS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 94374      var p *Instruction
 94375      switch len(vv) {
 94376          case 0  : p = self.alloc("VSQRTPS", 2, Operands { v0, v1 })
 94377          case 1  : p = self.alloc("VSQRTPS", 3, Operands { v0, v1, vv[0] })
 94378          default : panic("instruction VSQRTPS takes 2 or 3 operands")
 94379      }
 94380      // VSQRTPS xmm, xmm
 94381      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 94382          self.require(ISA_AVX)
 94383          p.domain = DomainAVX
 94384          p.add(0, func(m *_Encoding, v []interface{}) {
 94385              m.vex2(0, hcode(v[1]), v[0], 0)
 94386              m.emit(0x51)
 94387              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 94388          })
 94389      }
 94390      // VSQRTPS m128, xmm
 94391      if len(vv) == 0 && isM128(v0) && isXMM(v1) {
 94392          self.require(ISA_AVX)
 94393          p.domain = DomainAVX
 94394          p.add(0, func(m *_Encoding, v []interface{}) {
 94395              m.vex2(0, hcode(v[1]), addr(v[0]), 0)
 94396              m.emit(0x51)
 94397              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 94398          })
 94399      }
 94400      // VSQRTPS ymm, ymm
 94401      if len(vv) == 0 && isYMM(v0) && isYMM(v1) {
 94402          self.require(ISA_AVX)
 94403          p.domain = DomainAVX
 94404          p.add(0, func(m *_Encoding, v []interface{}) {
 94405              m.vex2(4, hcode(v[1]), v[0], 0)
 94406              m.emit(0x51)
 94407              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 94408          })
 94409      }
 94410      // VSQRTPS m256, ymm
 94411      if len(vv) == 0 && isM256(v0) && isYMM(v1) {
 94412          self.require(ISA_AVX)
 94413          p.domain = DomainAVX
 94414          p.add(0, func(m *_Encoding, v []interface{}) {
 94415              m.vex2(4, hcode(v[1]), addr(v[0]), 0)
 94416              m.emit(0x51)
 94417              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 94418          })
 94419      }
 94420      // VSQRTPS m512/m32bcst, zmm{k}{z}
 94421      if len(vv) == 0 && isM512M32bcst(v0) && isZMMkz(v1) {
 94422          self.require(ISA_AVX512F)
 94423          p.domain = DomainAVX
 94424          p.add(0, func(m *_Encoding, v []interface{}) {
 94425              m.evex(0b01, 0x04, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 94426              m.emit(0x51)
 94427              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 94428          })
 94429      }
 94430      // VSQRTPS {er}, zmm, zmm{k}{z}
 94431      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 94432          self.require(ISA_AVX512F)
 94433          p.domain = DomainAVX
 94434          p.add(0, func(m *_Encoding, v []interface{}) {
 94435              m.emit(0x62)
 94436              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 94437              m.emit(0x7c)
 94438              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 94439              m.emit(0x51)
 94440              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 94441          })
 94442      }
 94443      // VSQRTPS zmm, zmm{k}{z}
 94444      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 94445          self.require(ISA_AVX512F)
 94446          p.domain = DomainAVX
 94447          p.add(0, func(m *_Encoding, v []interface{}) {
 94448              m.emit(0x62)
 94449              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 94450              m.emit(0x7c)
 94451              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 94452              m.emit(0x51)
 94453              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 94454          })
 94455      }
 94456      // VSQRTPS m128/m32bcst, xmm{k}{z}
 94457      if len(vv) == 0 && isM128M32bcst(v0) && isXMMkz(v1) {
 94458          self.require(ISA_AVX512VL | ISA_AVX512F)
 94459          p.domain = DomainAVX
 94460          p.add(0, func(m *_Encoding, v []interface{}) {
 94461              m.evex(0b01, 0x04, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 94462              m.emit(0x51)
 94463              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 94464          })
 94465      }
 94466      // VSQRTPS m256/m32bcst, ymm{k}{z}
 94467      if len(vv) == 0 && isM256M32bcst(v0) && isYMMkz(v1) {
 94468          self.require(ISA_AVX512VL | ISA_AVX512F)
 94469          p.domain = DomainAVX
 94470          p.add(0, func(m *_Encoding, v []interface{}) {
 94471              m.evex(0b01, 0x04, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 94472              m.emit(0x51)
 94473              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 94474          })
 94475      }
 94476      // VSQRTPS xmm, xmm{k}{z}
 94477      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 94478          self.require(ISA_AVX512VL | ISA_AVX512F)
 94479          p.domain = DomainAVX
 94480          p.add(0, func(m *_Encoding, v []interface{}) {
 94481              m.emit(0x62)
 94482              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 94483              m.emit(0x7c)
 94484              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 94485              m.emit(0x51)
 94486              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 94487          })
 94488      }
 94489      // VSQRTPS ymm, ymm{k}{z}
 94490      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 94491          self.require(ISA_AVX512VL | ISA_AVX512F)
 94492          p.domain = DomainAVX
 94493          p.add(0, func(m *_Encoding, v []interface{}) {
 94494              m.emit(0x62)
 94495              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 94496              m.emit(0x7c)
 94497              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 94498              m.emit(0x51)
 94499              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 94500          })
 94501      }
 94502      if p.len == 0 {
 94503          panic("invalid operands for VSQRTPS")
 94504      }
 94505      return p
 94506  }
 94507  
 94508  // VSQRTSD performs "Compute Square Root of Scalar Double-Precision Floating-Point Value".
 94509  //
 94510  // Mnemonic        : VSQRTSD
 94511  // Supported forms : (5 forms)
 94512  //
 94513  //    * VSQRTSD xmm, xmm, xmm                [AVX]
 94514  //    * VSQRTSD m64, xmm, xmm                [AVX]
 94515  //    * VSQRTSD m64, xmm, xmm{k}{z}          [AVX512F]
 94516  //    * VSQRTSD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 94517  //    * VSQRTSD xmm, xmm, xmm{k}{z}          [AVX512F]
 94518  //
 94519  func (self *Program) VSQRTSD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 94520      var p *Instruction
 94521      switch len(vv) {
 94522          case 0  : p = self.alloc("VSQRTSD", 3, Operands { v0, v1, v2 })
 94523          case 1  : p = self.alloc("VSQRTSD", 4, Operands { v0, v1, v2, vv[0] })
 94524          default : panic("instruction VSQRTSD takes 3 or 4 operands")
 94525      }
 94526      // VSQRTSD xmm, xmm, xmm
 94527      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 94528          self.require(ISA_AVX)
 94529          p.domain = DomainAVX
 94530          p.add(0, func(m *_Encoding, v []interface{}) {
 94531              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 94532              m.emit(0x51)
 94533              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94534          })
 94535      }
 94536      // VSQRTSD m64, xmm, xmm
 94537      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 94538          self.require(ISA_AVX)
 94539          p.domain = DomainAVX
 94540          p.add(0, func(m *_Encoding, v []interface{}) {
 94541              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 94542              m.emit(0x51)
 94543              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 94544          })
 94545      }
 94546      // VSQRTSD m64, xmm, xmm{k}{z}
 94547      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 94548          self.require(ISA_AVX512F)
 94549          p.domain = DomainAVX
 94550          p.add(0, func(m *_Encoding, v []interface{}) {
 94551              m.evex(0b01, 0x87, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 94552              m.emit(0x51)
 94553              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 94554          })
 94555      }
 94556      // VSQRTSD {er}, xmm, xmm, xmm{k}{z}
 94557      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 94558          self.require(ISA_AVX512F)
 94559          p.domain = DomainAVX
 94560          p.add(0, func(m *_Encoding, v []interface{}) {
 94561              m.emit(0x62)
 94562              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 94563              m.emit(0xff ^ (hlcode(v[2]) << 3))
 94564              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 94565              m.emit(0x51)
 94566              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94567          })
 94568      }
 94569      // VSQRTSD xmm, xmm, xmm{k}{z}
 94570      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 94571          self.require(ISA_AVX512F)
 94572          p.domain = DomainAVX
 94573          p.add(0, func(m *_Encoding, v []interface{}) {
 94574              m.emit(0x62)
 94575              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 94576              m.emit(0xff ^ (hlcode(v[1]) << 3))
 94577              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 94578              m.emit(0x51)
 94579              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94580          })
 94581      }
 94582      if p.len == 0 {
 94583          panic("invalid operands for VSQRTSD")
 94584      }
 94585      return p
 94586  }
 94587  
 94588  // VSQRTSS performs "Compute Square Root of Scalar Single-Precision Floating-Point Value".
 94589  //
 94590  // Mnemonic        : VSQRTSS
 94591  // Supported forms : (5 forms)
 94592  //
 94593  //    * VSQRTSS xmm, xmm, xmm                [AVX]
 94594  //    * VSQRTSS m32, xmm, xmm                [AVX]
 94595  //    * VSQRTSS m32, xmm, xmm{k}{z}          [AVX512F]
 94596  //    * VSQRTSS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 94597  //    * VSQRTSS xmm, xmm, xmm{k}{z}          [AVX512F]
 94598  //
 94599  func (self *Program) VSQRTSS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 94600      var p *Instruction
 94601      switch len(vv) {
 94602          case 0  : p = self.alloc("VSQRTSS", 3, Operands { v0, v1, v2 })
 94603          case 1  : p = self.alloc("VSQRTSS", 4, Operands { v0, v1, v2, vv[0] })
 94604          default : panic("instruction VSQRTSS takes 3 or 4 operands")
 94605      }
 94606      // VSQRTSS xmm, xmm, xmm
 94607      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 94608          self.require(ISA_AVX)
 94609          p.domain = DomainAVX
 94610          p.add(0, func(m *_Encoding, v []interface{}) {
 94611              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 94612              m.emit(0x51)
 94613              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94614          })
 94615      }
 94616      // VSQRTSS m32, xmm, xmm
 94617      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 94618          self.require(ISA_AVX)
 94619          p.domain = DomainAVX
 94620          p.add(0, func(m *_Encoding, v []interface{}) {
 94621              m.vex2(2, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 94622              m.emit(0x51)
 94623              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 94624          })
 94625      }
 94626      // VSQRTSS m32, xmm, xmm{k}{z}
 94627      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 94628          self.require(ISA_AVX512F)
 94629          p.domain = DomainAVX
 94630          p.add(0, func(m *_Encoding, v []interface{}) {
 94631              m.evex(0b01, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 94632              m.emit(0x51)
 94633              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 94634          })
 94635      }
 94636      // VSQRTSS {er}, xmm, xmm, xmm{k}{z}
 94637      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 94638          self.require(ISA_AVX512F)
 94639          p.domain = DomainAVX
 94640          p.add(0, func(m *_Encoding, v []interface{}) {
 94641              m.emit(0x62)
 94642              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 94643              m.emit(0x7e ^ (hlcode(v[2]) << 3))
 94644              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 94645              m.emit(0x51)
 94646              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94647          })
 94648      }
 94649      // VSQRTSS xmm, xmm, xmm{k}{z}
 94650      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 94651          self.require(ISA_AVX512F)
 94652          p.domain = DomainAVX
 94653          p.add(0, func(m *_Encoding, v []interface{}) {
 94654              m.emit(0x62)
 94655              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 94656              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 94657              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 94658              m.emit(0x51)
 94659              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94660          })
 94661      }
 94662      if p.len == 0 {
 94663          panic("invalid operands for VSQRTSS")
 94664      }
 94665      return p
 94666  }
 94667  
 94668  // VSTMXCSR performs "Store MXCSR Register State".
 94669  //
 94670  // Mnemonic        : VSTMXCSR
 94671  // Supported forms : (1 form)
 94672  //
 94673  //    * VSTMXCSR m32    [AVX]
 94674  //
 94675  func (self *Program) VSTMXCSR(v0 interface{}) *Instruction {
 94676      p := self.alloc("VSTMXCSR", 1, Operands { v0 })
 94677      // VSTMXCSR m32
 94678      if isM32(v0) {
 94679          self.require(ISA_AVX)
 94680          p.domain = DomainAVX
 94681          p.add(0, func(m *_Encoding, v []interface{}) {
 94682              m.vex2(0, 0, addr(v[0]), 0)
 94683              m.emit(0xae)
 94684              m.mrsd(3, addr(v[0]), 1)
 94685          })
 94686      }
 94687      if p.len == 0 {
 94688          panic("invalid operands for VSTMXCSR")
 94689      }
 94690      return p
 94691  }
 94692  
 94693  // VSUBPD performs "Subtract Packed Double-Precision Floating-Point Values".
 94694  //
 94695  // Mnemonic        : VSUBPD
 94696  // Supported forms : (11 forms)
 94697  //
 94698  //    * VSUBPD xmm, xmm, xmm                   [AVX]
 94699  //    * VSUBPD m128, xmm, xmm                  [AVX]
 94700  //    * VSUBPD ymm, ymm, ymm                   [AVX]
 94701  //    * VSUBPD m256, ymm, ymm                  [AVX]
 94702  //    * VSUBPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 94703  //    * VSUBPD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 94704  //    * VSUBPD zmm, zmm, zmm{k}{z}             [AVX512F]
 94705  //    * VSUBPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 94706  //    * VSUBPD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 94707  //    * VSUBPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 94708  //    * VSUBPD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 94709  //
 94710  func (self *Program) VSUBPD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 94711      var p *Instruction
 94712      switch len(vv) {
 94713          case 0  : p = self.alloc("VSUBPD", 3, Operands { v0, v1, v2 })
 94714          case 1  : p = self.alloc("VSUBPD", 4, Operands { v0, v1, v2, vv[0] })
 94715          default : panic("instruction VSUBPD takes 3 or 4 operands")
 94716      }
 94717      // VSUBPD xmm, xmm, xmm
 94718      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 94719          self.require(ISA_AVX)
 94720          p.domain = DomainAVX
 94721          p.add(0, func(m *_Encoding, v []interface{}) {
 94722              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 94723              m.emit(0x5c)
 94724              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94725          })
 94726      }
 94727      // VSUBPD m128, xmm, xmm
 94728      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 94729          self.require(ISA_AVX)
 94730          p.domain = DomainAVX
 94731          p.add(0, func(m *_Encoding, v []interface{}) {
 94732              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 94733              m.emit(0x5c)
 94734              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 94735          })
 94736      }
 94737      // VSUBPD ymm, ymm, ymm
 94738      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 94739          self.require(ISA_AVX)
 94740          p.domain = DomainAVX
 94741          p.add(0, func(m *_Encoding, v []interface{}) {
 94742              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 94743              m.emit(0x5c)
 94744              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94745          })
 94746      }
 94747      // VSUBPD m256, ymm, ymm
 94748      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 94749          self.require(ISA_AVX)
 94750          p.domain = DomainAVX
 94751          p.add(0, func(m *_Encoding, v []interface{}) {
 94752              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 94753              m.emit(0x5c)
 94754              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 94755          })
 94756      }
 94757      // VSUBPD m512/m64bcst, zmm, zmm{k}{z}
 94758      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 94759          self.require(ISA_AVX512F)
 94760          p.domain = DomainAVX
 94761          p.add(0, func(m *_Encoding, v []interface{}) {
 94762              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 94763              m.emit(0x5c)
 94764              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 94765          })
 94766      }
 94767      // VSUBPD {er}, zmm, zmm, zmm{k}{z}
 94768      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 94769          self.require(ISA_AVX512F)
 94770          p.domain = DomainAVX
 94771          p.add(0, func(m *_Encoding, v []interface{}) {
 94772              m.emit(0x62)
 94773              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 94774              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 94775              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 94776              m.emit(0x5c)
 94777              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94778          })
 94779      }
 94780      // VSUBPD zmm, zmm, zmm{k}{z}
 94781      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 94782          self.require(ISA_AVX512F)
 94783          p.domain = DomainAVX
 94784          p.add(0, func(m *_Encoding, v []interface{}) {
 94785              m.emit(0x62)
 94786              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 94787              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 94788              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 94789              m.emit(0x5c)
 94790              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94791          })
 94792      }
 94793      // VSUBPD m128/m64bcst, xmm, xmm{k}{z}
 94794      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 94795          self.require(ISA_AVX512VL | ISA_AVX512F)
 94796          p.domain = DomainAVX
 94797          p.add(0, func(m *_Encoding, v []interface{}) {
 94798              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 94799              m.emit(0x5c)
 94800              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 94801          })
 94802      }
 94803      // VSUBPD xmm, xmm, xmm{k}{z}
 94804      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 94805          self.require(ISA_AVX512VL | ISA_AVX512F)
 94806          p.domain = DomainAVX
 94807          p.add(0, func(m *_Encoding, v []interface{}) {
 94808              m.emit(0x62)
 94809              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 94810              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 94811              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 94812              m.emit(0x5c)
 94813              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94814          })
 94815      }
 94816      // VSUBPD m256/m64bcst, ymm, ymm{k}{z}
 94817      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 94818          self.require(ISA_AVX512VL | ISA_AVX512F)
 94819          p.domain = DomainAVX
 94820          p.add(0, func(m *_Encoding, v []interface{}) {
 94821              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 94822              m.emit(0x5c)
 94823              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 94824          })
 94825      }
 94826      // VSUBPD ymm, ymm, ymm{k}{z}
 94827      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 94828          self.require(ISA_AVX512VL | ISA_AVX512F)
 94829          p.domain = DomainAVX
 94830          p.add(0, func(m *_Encoding, v []interface{}) {
 94831              m.emit(0x62)
 94832              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 94833              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 94834              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 94835              m.emit(0x5c)
 94836              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94837          })
 94838      }
 94839      if p.len == 0 {
 94840          panic("invalid operands for VSUBPD")
 94841      }
 94842      return p
 94843  }
 94844  
 94845  // VSUBPS performs "Subtract Packed Single-Precision Floating-Point Values".
 94846  //
 94847  // Mnemonic        : VSUBPS
 94848  // Supported forms : (11 forms)
 94849  //
 94850  //    * VSUBPS xmm, xmm, xmm                   [AVX]
 94851  //    * VSUBPS m128, xmm, xmm                  [AVX]
 94852  //    * VSUBPS ymm, ymm, ymm                   [AVX]
 94853  //    * VSUBPS m256, ymm, ymm                  [AVX]
 94854  //    * VSUBPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 94855  //    * VSUBPS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 94856  //    * VSUBPS zmm, zmm, zmm{k}{z}             [AVX512F]
 94857  //    * VSUBPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 94858  //    * VSUBPS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 94859  //    * VSUBPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 94860  //    * VSUBPS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 94861  //
 94862  func (self *Program) VSUBPS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 94863      var p *Instruction
 94864      switch len(vv) {
 94865          case 0  : p = self.alloc("VSUBPS", 3, Operands { v0, v1, v2 })
 94866          case 1  : p = self.alloc("VSUBPS", 4, Operands { v0, v1, v2, vv[0] })
 94867          default : panic("instruction VSUBPS takes 3 or 4 operands")
 94868      }
 94869      // VSUBPS xmm, xmm, xmm
 94870      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 94871          self.require(ISA_AVX)
 94872          p.domain = DomainAVX
 94873          p.add(0, func(m *_Encoding, v []interface{}) {
 94874              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 94875              m.emit(0x5c)
 94876              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94877          })
 94878      }
 94879      // VSUBPS m128, xmm, xmm
 94880      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 94881          self.require(ISA_AVX)
 94882          p.domain = DomainAVX
 94883          p.add(0, func(m *_Encoding, v []interface{}) {
 94884              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 94885              m.emit(0x5c)
 94886              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 94887          })
 94888      }
 94889      // VSUBPS ymm, ymm, ymm
 94890      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 94891          self.require(ISA_AVX)
 94892          p.domain = DomainAVX
 94893          p.add(0, func(m *_Encoding, v []interface{}) {
 94894              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 94895              m.emit(0x5c)
 94896              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94897          })
 94898      }
 94899      // VSUBPS m256, ymm, ymm
 94900      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 94901          self.require(ISA_AVX)
 94902          p.domain = DomainAVX
 94903          p.add(0, func(m *_Encoding, v []interface{}) {
 94904              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 94905              m.emit(0x5c)
 94906              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 94907          })
 94908      }
 94909      // VSUBPS m512/m32bcst, zmm, zmm{k}{z}
 94910      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 94911          self.require(ISA_AVX512F)
 94912          p.domain = DomainAVX
 94913          p.add(0, func(m *_Encoding, v []interface{}) {
 94914              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 94915              m.emit(0x5c)
 94916              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 94917          })
 94918      }
 94919      // VSUBPS {er}, zmm, zmm, zmm{k}{z}
 94920      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 94921          self.require(ISA_AVX512F)
 94922          p.domain = DomainAVX
 94923          p.add(0, func(m *_Encoding, v []interface{}) {
 94924              m.emit(0x62)
 94925              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 94926              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 94927              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 94928              m.emit(0x5c)
 94929              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94930          })
 94931      }
 94932      // VSUBPS zmm, zmm, zmm{k}{z}
 94933      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 94934          self.require(ISA_AVX512F)
 94935          p.domain = DomainAVX
 94936          p.add(0, func(m *_Encoding, v []interface{}) {
 94937              m.emit(0x62)
 94938              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 94939              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 94940              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 94941              m.emit(0x5c)
 94942              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94943          })
 94944      }
 94945      // VSUBPS m128/m32bcst, xmm, xmm{k}{z}
 94946      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 94947          self.require(ISA_AVX512VL | ISA_AVX512F)
 94948          p.domain = DomainAVX
 94949          p.add(0, func(m *_Encoding, v []interface{}) {
 94950              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 94951              m.emit(0x5c)
 94952              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 94953          })
 94954      }
 94955      // VSUBPS xmm, xmm, xmm{k}{z}
 94956      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 94957          self.require(ISA_AVX512VL | ISA_AVX512F)
 94958          p.domain = DomainAVX
 94959          p.add(0, func(m *_Encoding, v []interface{}) {
 94960              m.emit(0x62)
 94961              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 94962              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 94963              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 94964              m.emit(0x5c)
 94965              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94966          })
 94967      }
 94968      // VSUBPS m256/m32bcst, ymm, ymm{k}{z}
 94969      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 94970          self.require(ISA_AVX512VL | ISA_AVX512F)
 94971          p.domain = DomainAVX
 94972          p.add(0, func(m *_Encoding, v []interface{}) {
 94973              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 94974              m.emit(0x5c)
 94975              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 94976          })
 94977      }
 94978      // VSUBPS ymm, ymm, ymm{k}{z}
 94979      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 94980          self.require(ISA_AVX512VL | ISA_AVX512F)
 94981          p.domain = DomainAVX
 94982          p.add(0, func(m *_Encoding, v []interface{}) {
 94983              m.emit(0x62)
 94984              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 94985              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 94986              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 94987              m.emit(0x5c)
 94988              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94989          })
 94990      }
 94991      if p.len == 0 {
 94992          panic("invalid operands for VSUBPS")
 94993      }
 94994      return p
 94995  }
 94996  
 94997  // VSUBSD performs "Subtract Scalar Double-Precision Floating-Point Values".
 94998  //
 94999  // Mnemonic        : VSUBSD
 95000  // Supported forms : (5 forms)
 95001  //
 95002  //    * VSUBSD xmm, xmm, xmm                [AVX]
 95003  //    * VSUBSD m64, xmm, xmm                [AVX]
 95004  //    * VSUBSD m64, xmm, xmm{k}{z}          [AVX512F]
 95005  //    * VSUBSD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 95006  //    * VSUBSD xmm, xmm, xmm{k}{z}          [AVX512F]
 95007  //
 95008  func (self *Program) VSUBSD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 95009      var p *Instruction
 95010      switch len(vv) {
 95011          case 0  : p = self.alloc("VSUBSD", 3, Operands { v0, v1, v2 })
 95012          case 1  : p = self.alloc("VSUBSD", 4, Operands { v0, v1, v2, vv[0] })
 95013          default : panic("instruction VSUBSD takes 3 or 4 operands")
 95014      }
 95015      // VSUBSD xmm, xmm, xmm
 95016      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 95017          self.require(ISA_AVX)
 95018          p.domain = DomainAVX
 95019          p.add(0, func(m *_Encoding, v []interface{}) {
 95020              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 95021              m.emit(0x5c)
 95022              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95023          })
 95024      }
 95025      // VSUBSD m64, xmm, xmm
 95026      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 95027          self.require(ISA_AVX)
 95028          p.domain = DomainAVX
 95029          p.add(0, func(m *_Encoding, v []interface{}) {
 95030              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 95031              m.emit(0x5c)
 95032              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 95033          })
 95034      }
 95035      // VSUBSD m64, xmm, xmm{k}{z}
 95036      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95037          self.require(ISA_AVX512F)
 95038          p.domain = DomainAVX
 95039          p.add(0, func(m *_Encoding, v []interface{}) {
 95040              m.evex(0b01, 0x87, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 95041              m.emit(0x5c)
 95042              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 95043          })
 95044      }
 95045      // VSUBSD {er}, xmm, xmm, xmm{k}{z}
 95046      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 95047          self.require(ISA_AVX512F)
 95048          p.domain = DomainAVX
 95049          p.add(0, func(m *_Encoding, v []interface{}) {
 95050              m.emit(0x62)
 95051              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 95052              m.emit(0xff ^ (hlcode(v[2]) << 3))
 95053              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 95054              m.emit(0x5c)
 95055              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 95056          })
 95057      }
 95058      // VSUBSD xmm, xmm, xmm{k}{z}
 95059      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95060          self.require(ISA_AVX512F)
 95061          p.domain = DomainAVX
 95062          p.add(0, func(m *_Encoding, v []interface{}) {
 95063              m.emit(0x62)
 95064              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95065              m.emit(0xff ^ (hlcode(v[1]) << 3))
 95066              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 95067              m.emit(0x5c)
 95068              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95069          })
 95070      }
 95071      if p.len == 0 {
 95072          panic("invalid operands for VSUBSD")
 95073      }
 95074      return p
 95075  }
 95076  
 95077  // VSUBSS performs "Subtract Scalar Single-Precision Floating-Point Values".
 95078  //
 95079  // Mnemonic        : VSUBSS
 95080  // Supported forms : (5 forms)
 95081  //
 95082  //    * VSUBSS xmm, xmm, xmm                [AVX]
 95083  //    * VSUBSS m32, xmm, xmm                [AVX]
 95084  //    * VSUBSS m32, xmm, xmm{k}{z}          [AVX512F]
 95085  //    * VSUBSS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 95086  //    * VSUBSS xmm, xmm, xmm{k}{z}          [AVX512F]
 95087  //
 95088  func (self *Program) VSUBSS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 95089      var p *Instruction
 95090      switch len(vv) {
 95091          case 0  : p = self.alloc("VSUBSS", 3, Operands { v0, v1, v2 })
 95092          case 1  : p = self.alloc("VSUBSS", 4, Operands { v0, v1, v2, vv[0] })
 95093          default : panic("instruction VSUBSS takes 3 or 4 operands")
 95094      }
 95095      // VSUBSS xmm, xmm, xmm
 95096      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 95097          self.require(ISA_AVX)
 95098          p.domain = DomainAVX
 95099          p.add(0, func(m *_Encoding, v []interface{}) {
 95100              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 95101              m.emit(0x5c)
 95102              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95103          })
 95104      }
 95105      // VSUBSS m32, xmm, xmm
 95106      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 95107          self.require(ISA_AVX)
 95108          p.domain = DomainAVX
 95109          p.add(0, func(m *_Encoding, v []interface{}) {
 95110              m.vex2(2, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 95111              m.emit(0x5c)
 95112              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 95113          })
 95114      }
 95115      // VSUBSS m32, xmm, xmm{k}{z}
 95116      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95117          self.require(ISA_AVX512F)
 95118          p.domain = DomainAVX
 95119          p.add(0, func(m *_Encoding, v []interface{}) {
 95120              m.evex(0b01, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 95121              m.emit(0x5c)
 95122              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 95123          })
 95124      }
 95125      // VSUBSS {er}, xmm, xmm, xmm{k}{z}
 95126      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 95127          self.require(ISA_AVX512F)
 95128          p.domain = DomainAVX
 95129          p.add(0, func(m *_Encoding, v []interface{}) {
 95130              m.emit(0x62)
 95131              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 95132              m.emit(0x7e ^ (hlcode(v[2]) << 3))
 95133              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 95134              m.emit(0x5c)
 95135              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 95136          })
 95137      }
 95138      // VSUBSS xmm, xmm, xmm{k}{z}
 95139      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95140          self.require(ISA_AVX512F)
 95141          p.domain = DomainAVX
 95142          p.add(0, func(m *_Encoding, v []interface{}) {
 95143              m.emit(0x62)
 95144              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95145              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 95146              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 95147              m.emit(0x5c)
 95148              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95149          })
 95150      }
 95151      if p.len == 0 {
 95152          panic("invalid operands for VSUBSS")
 95153      }
 95154      return p
 95155  }
 95156  
 95157  // VTESTPD performs "Packed Double-Precision Floating-Point Bit Test".
 95158  //
 95159  // Mnemonic        : VTESTPD
 95160  // Supported forms : (4 forms)
 95161  //
 95162  //    * VTESTPD xmm, xmm     [AVX]
 95163  //    * VTESTPD m128, xmm    [AVX]
 95164  //    * VTESTPD ymm, ymm     [AVX]
 95165  //    * VTESTPD m256, ymm    [AVX]
 95166  //
 95167  func (self *Program) VTESTPD(v0 interface{}, v1 interface{}) *Instruction {
 95168      p := self.alloc("VTESTPD", 2, Operands { v0, v1 })
 95169      // VTESTPD xmm, xmm
 95170      if isXMM(v0) && isXMM(v1) {
 95171          self.require(ISA_AVX)
 95172          p.domain = DomainAVX
 95173          p.add(0, func(m *_Encoding, v []interface{}) {
 95174              m.emit(0xc4)
 95175              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 95176              m.emit(0x79)
 95177              m.emit(0x0f)
 95178              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 95179          })
 95180      }
 95181      // VTESTPD m128, xmm
 95182      if isM128(v0) && isXMM(v1) {
 95183          self.require(ISA_AVX)
 95184          p.domain = DomainAVX
 95185          p.add(0, func(m *_Encoding, v []interface{}) {
 95186              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 95187              m.emit(0x0f)
 95188              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 95189          })
 95190      }
 95191      // VTESTPD ymm, ymm
 95192      if isYMM(v0) && isYMM(v1) {
 95193          self.require(ISA_AVX)
 95194          p.domain = DomainAVX
 95195          p.add(0, func(m *_Encoding, v []interface{}) {
 95196              m.emit(0xc4)
 95197              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 95198              m.emit(0x7d)
 95199              m.emit(0x0f)
 95200              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 95201          })
 95202      }
 95203      // VTESTPD m256, ymm
 95204      if isM256(v0) && isYMM(v1) {
 95205          self.require(ISA_AVX)
 95206          p.domain = DomainAVX
 95207          p.add(0, func(m *_Encoding, v []interface{}) {
 95208              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 95209              m.emit(0x0f)
 95210              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 95211          })
 95212      }
 95213      if p.len == 0 {
 95214          panic("invalid operands for VTESTPD")
 95215      }
 95216      return p
 95217  }
 95218  
 95219  // VTESTPS performs "Packed Single-Precision Floating-Point Bit Test".
 95220  //
 95221  // Mnemonic        : VTESTPS
 95222  // Supported forms : (4 forms)
 95223  //
 95224  //    * VTESTPS xmm, xmm     [AVX]
 95225  //    * VTESTPS m128, xmm    [AVX]
 95226  //    * VTESTPS ymm, ymm     [AVX]
 95227  //    * VTESTPS m256, ymm    [AVX]
 95228  //
 95229  func (self *Program) VTESTPS(v0 interface{}, v1 interface{}) *Instruction {
 95230      p := self.alloc("VTESTPS", 2, Operands { v0, v1 })
 95231      // VTESTPS xmm, xmm
 95232      if isXMM(v0) && isXMM(v1) {
 95233          self.require(ISA_AVX)
 95234          p.domain = DomainAVX
 95235          p.add(0, func(m *_Encoding, v []interface{}) {
 95236              m.emit(0xc4)
 95237              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 95238              m.emit(0x79)
 95239              m.emit(0x0e)
 95240              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 95241          })
 95242      }
 95243      // VTESTPS m128, xmm
 95244      if isM128(v0) && isXMM(v1) {
 95245          self.require(ISA_AVX)
 95246          p.domain = DomainAVX
 95247          p.add(0, func(m *_Encoding, v []interface{}) {
 95248              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 95249              m.emit(0x0e)
 95250              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 95251          })
 95252      }
 95253      // VTESTPS ymm, ymm
 95254      if isYMM(v0) && isYMM(v1) {
 95255          self.require(ISA_AVX)
 95256          p.domain = DomainAVX
 95257          p.add(0, func(m *_Encoding, v []interface{}) {
 95258              m.emit(0xc4)
 95259              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 95260              m.emit(0x7d)
 95261              m.emit(0x0e)
 95262              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 95263          })
 95264      }
 95265      // VTESTPS m256, ymm
 95266      if isM256(v0) && isYMM(v1) {
 95267          self.require(ISA_AVX)
 95268          p.domain = DomainAVX
 95269          p.add(0, func(m *_Encoding, v []interface{}) {
 95270              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 95271              m.emit(0x0e)
 95272              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 95273          })
 95274      }
 95275      if p.len == 0 {
 95276          panic("invalid operands for VTESTPS")
 95277      }
 95278      return p
 95279  }
 95280  
 95281  // VUCOMISD performs "Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS".
 95282  //
 95283  // Mnemonic        : VUCOMISD
 95284  // Supported forms : (5 forms)
 95285  //
 95286  //    * VUCOMISD xmm, xmm           [AVX]
 95287  //    * VUCOMISD m64, xmm           [AVX]
 95288  //    * VUCOMISD m64, xmm           [AVX512F]
 95289  //    * VUCOMISD {sae}, xmm, xmm    [AVX512F]
 95290  //    * VUCOMISD xmm, xmm           [AVX512F]
 95291  //
 95292  func (self *Program) VUCOMISD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 95293      var p *Instruction
 95294      switch len(vv) {
 95295          case 0  : p = self.alloc("VUCOMISD", 2, Operands { v0, v1 })
 95296          case 1  : p = self.alloc("VUCOMISD", 3, Operands { v0, v1, vv[0] })
 95297          default : panic("instruction VUCOMISD takes 2 or 3 operands")
 95298      }
 95299      // VUCOMISD xmm, xmm
 95300      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 95301          self.require(ISA_AVX)
 95302          p.domain = DomainAVX
 95303          p.add(0, func(m *_Encoding, v []interface{}) {
 95304              m.vex2(1, hcode(v[1]), v[0], 0)
 95305              m.emit(0x2e)
 95306              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 95307          })
 95308      }
 95309      // VUCOMISD m64, xmm
 95310      if len(vv) == 0 && isM64(v0) && isXMM(v1) {
 95311          self.require(ISA_AVX)
 95312          p.domain = DomainAVX
 95313          p.add(0, func(m *_Encoding, v []interface{}) {
 95314              m.vex2(1, hcode(v[1]), addr(v[0]), 0)
 95315              m.emit(0x2e)
 95316              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 95317          })
 95318      }
 95319      // VUCOMISD m64, xmm
 95320      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) {
 95321          self.require(ISA_AVX512F)
 95322          p.domain = DomainAVX
 95323          p.add(0, func(m *_Encoding, v []interface{}) {
 95324              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 95325              m.emit(0x2e)
 95326              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 95327          })
 95328      }
 95329      // VUCOMISD {sae}, xmm, xmm
 95330      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(vv[0]) {
 95331          self.require(ISA_AVX512F)
 95332          p.domain = DomainAVX
 95333          p.add(0, func(m *_Encoding, v []interface{}) {
 95334              m.emit(0x62)
 95335              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 95336              m.emit(0xfd)
 95337              m.emit(0x18)
 95338              m.emit(0x2e)
 95339              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 95340          })
 95341      }
 95342      // VUCOMISD xmm, xmm
 95343      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) {
 95344          self.require(ISA_AVX512F)
 95345          p.domain = DomainAVX
 95346          p.add(0, func(m *_Encoding, v []interface{}) {
 95347              m.emit(0x62)
 95348              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 95349              m.emit(0xfd)
 95350              m.emit(0x48)
 95351              m.emit(0x2e)
 95352              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 95353          })
 95354      }
 95355      if p.len == 0 {
 95356          panic("invalid operands for VUCOMISD")
 95357      }
 95358      return p
 95359  }
 95360  
 95361  // VUCOMISS performs "Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS".
 95362  //
 95363  // Mnemonic        : VUCOMISS
 95364  // Supported forms : (5 forms)
 95365  //
 95366  //    * VUCOMISS xmm, xmm           [AVX]
 95367  //    * VUCOMISS m32, xmm           [AVX]
 95368  //    * VUCOMISS m32, xmm           [AVX512F]
 95369  //    * VUCOMISS {sae}, xmm, xmm    [AVX512F]
 95370  //    * VUCOMISS xmm, xmm           [AVX512F]
 95371  //
 95372  func (self *Program) VUCOMISS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 95373      var p *Instruction
 95374      switch len(vv) {
 95375          case 0  : p = self.alloc("VUCOMISS", 2, Operands { v0, v1 })
 95376          case 1  : p = self.alloc("VUCOMISS", 3, Operands { v0, v1, vv[0] })
 95377          default : panic("instruction VUCOMISS takes 2 or 3 operands")
 95378      }
 95379      // VUCOMISS xmm, xmm
 95380      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 95381          self.require(ISA_AVX)
 95382          p.domain = DomainAVX
 95383          p.add(0, func(m *_Encoding, v []interface{}) {
 95384              m.vex2(0, hcode(v[1]), v[0], 0)
 95385              m.emit(0x2e)
 95386              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 95387          })
 95388      }
 95389      // VUCOMISS m32, xmm
 95390      if len(vv) == 0 && isM32(v0) && isXMM(v1) {
 95391          self.require(ISA_AVX)
 95392          p.domain = DomainAVX
 95393          p.add(0, func(m *_Encoding, v []interface{}) {
 95394              m.vex2(0, hcode(v[1]), addr(v[0]), 0)
 95395              m.emit(0x2e)
 95396              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 95397          })
 95398      }
 95399      // VUCOMISS m32, xmm
 95400      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) {
 95401          self.require(ISA_AVX512F)
 95402          p.domain = DomainAVX
 95403          p.add(0, func(m *_Encoding, v []interface{}) {
 95404              m.evex(0b01, 0x04, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 95405              m.emit(0x2e)
 95406              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 95407          })
 95408      }
 95409      // VUCOMISS {sae}, xmm, xmm
 95410      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(vv[0]) {
 95411          self.require(ISA_AVX512F)
 95412          p.domain = DomainAVX
 95413          p.add(0, func(m *_Encoding, v []interface{}) {
 95414              m.emit(0x62)
 95415              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 95416              m.emit(0x7c)
 95417              m.emit(0x18)
 95418              m.emit(0x2e)
 95419              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 95420          })
 95421      }
 95422      // VUCOMISS xmm, xmm
 95423      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) {
 95424          self.require(ISA_AVX512F)
 95425          p.domain = DomainAVX
 95426          p.add(0, func(m *_Encoding, v []interface{}) {
 95427              m.emit(0x62)
 95428              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 95429              m.emit(0x7c)
 95430              m.emit(0x48)
 95431              m.emit(0x2e)
 95432              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 95433          })
 95434      }
 95435      if p.len == 0 {
 95436          panic("invalid operands for VUCOMISS")
 95437      }
 95438      return p
 95439  }
 95440  
 95441  // VUNPCKHPD performs "Unpack and Interleave High Packed Double-Precision Floating-Point Values".
 95442  //
 95443  // Mnemonic        : VUNPCKHPD
 95444  // Supported forms : (10 forms)
 95445  //
 95446  //    * VUNPCKHPD xmm, xmm, xmm                   [AVX]
 95447  //    * VUNPCKHPD m128, xmm, xmm                  [AVX]
 95448  //    * VUNPCKHPD ymm, ymm, ymm                   [AVX]
 95449  //    * VUNPCKHPD m256, ymm, ymm                  [AVX]
 95450  //    * VUNPCKHPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 95451  //    * VUNPCKHPD zmm, zmm, zmm{k}{z}             [AVX512F]
 95452  //    * VUNPCKHPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 95453  //    * VUNPCKHPD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 95454  //    * VUNPCKHPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 95455  //    * VUNPCKHPD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 95456  //
 95457  func (self *Program) VUNPCKHPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 95458      p := self.alloc("VUNPCKHPD", 3, Operands { v0, v1, v2 })
 95459      // VUNPCKHPD xmm, xmm, xmm
 95460      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 95461          self.require(ISA_AVX)
 95462          p.domain = DomainAVX
 95463          p.add(0, func(m *_Encoding, v []interface{}) {
 95464              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 95465              m.emit(0x15)
 95466              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95467          })
 95468      }
 95469      // VUNPCKHPD m128, xmm, xmm
 95470      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 95471          self.require(ISA_AVX)
 95472          p.domain = DomainAVX
 95473          p.add(0, func(m *_Encoding, v []interface{}) {
 95474              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 95475              m.emit(0x15)
 95476              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 95477          })
 95478      }
 95479      // VUNPCKHPD ymm, ymm, ymm
 95480      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 95481          self.require(ISA_AVX)
 95482          p.domain = DomainAVX
 95483          p.add(0, func(m *_Encoding, v []interface{}) {
 95484              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 95485              m.emit(0x15)
 95486              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95487          })
 95488      }
 95489      // VUNPCKHPD m256, ymm, ymm
 95490      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 95491          self.require(ISA_AVX)
 95492          p.domain = DomainAVX
 95493          p.add(0, func(m *_Encoding, v []interface{}) {
 95494              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 95495              m.emit(0x15)
 95496              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 95497          })
 95498      }
 95499      // VUNPCKHPD m512/m64bcst, zmm, zmm{k}{z}
 95500      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 95501          self.require(ISA_AVX512F)
 95502          p.domain = DomainAVX
 95503          p.add(0, func(m *_Encoding, v []interface{}) {
 95504              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95505              m.emit(0x15)
 95506              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 95507          })
 95508      }
 95509      // VUNPCKHPD zmm, zmm, zmm{k}{z}
 95510      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 95511          self.require(ISA_AVX512F)
 95512          p.domain = DomainAVX
 95513          p.add(0, func(m *_Encoding, v []interface{}) {
 95514              m.emit(0x62)
 95515              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95516              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 95517              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 95518              m.emit(0x15)
 95519              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95520          })
 95521      }
 95522      // VUNPCKHPD m128/m64bcst, xmm, xmm{k}{z}
 95523      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95524          self.require(ISA_AVX512VL | ISA_AVX512F)
 95525          p.domain = DomainAVX
 95526          p.add(0, func(m *_Encoding, v []interface{}) {
 95527              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95528              m.emit(0x15)
 95529              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 95530          })
 95531      }
 95532      // VUNPCKHPD xmm, xmm, xmm{k}{z}
 95533      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95534          self.require(ISA_AVX512VL | ISA_AVX512F)
 95535          p.domain = DomainAVX
 95536          p.add(0, func(m *_Encoding, v []interface{}) {
 95537              m.emit(0x62)
 95538              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95539              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 95540              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 95541              m.emit(0x15)
 95542              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95543          })
 95544      }
 95545      // VUNPCKHPD m256/m64bcst, ymm, ymm{k}{z}
 95546      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 95547          self.require(ISA_AVX512VL | ISA_AVX512F)
 95548          p.domain = DomainAVX
 95549          p.add(0, func(m *_Encoding, v []interface{}) {
 95550              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95551              m.emit(0x15)
 95552              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 95553          })
 95554      }
 95555      // VUNPCKHPD ymm, ymm, ymm{k}{z}
 95556      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 95557          self.require(ISA_AVX512VL | ISA_AVX512F)
 95558          p.domain = DomainAVX
 95559          p.add(0, func(m *_Encoding, v []interface{}) {
 95560              m.emit(0x62)
 95561              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95562              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 95563              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 95564              m.emit(0x15)
 95565              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95566          })
 95567      }
 95568      if p.len == 0 {
 95569          panic("invalid operands for VUNPCKHPD")
 95570      }
 95571      return p
 95572  }
 95573  
 95574  // VUNPCKHPS performs "Unpack and Interleave High Packed Single-Precision Floating-Point Values".
 95575  //
 95576  // Mnemonic        : VUNPCKHPS
 95577  // Supported forms : (10 forms)
 95578  //
 95579  //    * VUNPCKHPS xmm, xmm, xmm                   [AVX]
 95580  //    * VUNPCKHPS m128, xmm, xmm                  [AVX]
 95581  //    * VUNPCKHPS ymm, ymm, ymm                   [AVX]
 95582  //    * VUNPCKHPS m256, ymm, ymm                  [AVX]
 95583  //    * VUNPCKHPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 95584  //    * VUNPCKHPS zmm, zmm, zmm{k}{z}             [AVX512F]
 95585  //    * VUNPCKHPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 95586  //    * VUNPCKHPS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 95587  //    * VUNPCKHPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 95588  //    * VUNPCKHPS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 95589  //
 95590  func (self *Program) VUNPCKHPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 95591      p := self.alloc("VUNPCKHPS", 3, Operands { v0, v1, v2 })
 95592      // VUNPCKHPS xmm, xmm, xmm
 95593      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 95594          self.require(ISA_AVX)
 95595          p.domain = DomainAVX
 95596          p.add(0, func(m *_Encoding, v []interface{}) {
 95597              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 95598              m.emit(0x15)
 95599              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95600          })
 95601      }
 95602      // VUNPCKHPS m128, xmm, xmm
 95603      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 95604          self.require(ISA_AVX)
 95605          p.domain = DomainAVX
 95606          p.add(0, func(m *_Encoding, v []interface{}) {
 95607              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 95608              m.emit(0x15)
 95609              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 95610          })
 95611      }
 95612      // VUNPCKHPS ymm, ymm, ymm
 95613      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 95614          self.require(ISA_AVX)
 95615          p.domain = DomainAVX
 95616          p.add(0, func(m *_Encoding, v []interface{}) {
 95617              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 95618              m.emit(0x15)
 95619              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95620          })
 95621      }
 95622      // VUNPCKHPS m256, ymm, ymm
 95623      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 95624          self.require(ISA_AVX)
 95625          p.domain = DomainAVX
 95626          p.add(0, func(m *_Encoding, v []interface{}) {
 95627              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 95628              m.emit(0x15)
 95629              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 95630          })
 95631      }
 95632      // VUNPCKHPS m512/m32bcst, zmm, zmm{k}{z}
 95633      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 95634          self.require(ISA_AVX512F)
 95635          p.domain = DomainAVX
 95636          p.add(0, func(m *_Encoding, v []interface{}) {
 95637              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95638              m.emit(0x15)
 95639              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 95640          })
 95641      }
 95642      // VUNPCKHPS zmm, zmm, zmm{k}{z}
 95643      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 95644          self.require(ISA_AVX512F)
 95645          p.domain = DomainAVX
 95646          p.add(0, func(m *_Encoding, v []interface{}) {
 95647              m.emit(0x62)
 95648              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95649              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 95650              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 95651              m.emit(0x15)
 95652              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95653          })
 95654      }
 95655      // VUNPCKHPS m128/m32bcst, xmm, xmm{k}{z}
 95656      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95657          self.require(ISA_AVX512VL | ISA_AVX512F)
 95658          p.domain = DomainAVX
 95659          p.add(0, func(m *_Encoding, v []interface{}) {
 95660              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95661              m.emit(0x15)
 95662              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 95663          })
 95664      }
 95665      // VUNPCKHPS xmm, xmm, xmm{k}{z}
 95666      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95667          self.require(ISA_AVX512VL | ISA_AVX512F)
 95668          p.domain = DomainAVX
 95669          p.add(0, func(m *_Encoding, v []interface{}) {
 95670              m.emit(0x62)
 95671              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95672              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 95673              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 95674              m.emit(0x15)
 95675              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95676          })
 95677      }
 95678      // VUNPCKHPS m256/m32bcst, ymm, ymm{k}{z}
 95679      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 95680          self.require(ISA_AVX512VL | ISA_AVX512F)
 95681          p.domain = DomainAVX
 95682          p.add(0, func(m *_Encoding, v []interface{}) {
 95683              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95684              m.emit(0x15)
 95685              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 95686          })
 95687      }
 95688      // VUNPCKHPS ymm, ymm, ymm{k}{z}
 95689      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 95690          self.require(ISA_AVX512VL | ISA_AVX512F)
 95691          p.domain = DomainAVX
 95692          p.add(0, func(m *_Encoding, v []interface{}) {
 95693              m.emit(0x62)
 95694              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95695              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 95696              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 95697              m.emit(0x15)
 95698              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95699          })
 95700      }
 95701      if p.len == 0 {
 95702          panic("invalid operands for VUNPCKHPS")
 95703      }
 95704      return p
 95705  }
 95706  
 95707  // VUNPCKLPD performs "Unpack and Interleave Low Packed Double-Precision Floating-Point Values".
 95708  //
 95709  // Mnemonic        : VUNPCKLPD
 95710  // Supported forms : (10 forms)
 95711  //
 95712  //    * VUNPCKLPD xmm, xmm, xmm                   [AVX]
 95713  //    * VUNPCKLPD m128, xmm, xmm                  [AVX]
 95714  //    * VUNPCKLPD ymm, ymm, ymm                   [AVX]
 95715  //    * VUNPCKLPD m256, ymm, ymm                  [AVX]
 95716  //    * VUNPCKLPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 95717  //    * VUNPCKLPD zmm, zmm, zmm{k}{z}             [AVX512F]
 95718  //    * VUNPCKLPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 95719  //    * VUNPCKLPD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 95720  //    * VUNPCKLPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 95721  //    * VUNPCKLPD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 95722  //
 95723  func (self *Program) VUNPCKLPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 95724      p := self.alloc("VUNPCKLPD", 3, Operands { v0, v1, v2 })
 95725      // VUNPCKLPD xmm, xmm, xmm
 95726      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 95727          self.require(ISA_AVX)
 95728          p.domain = DomainAVX
 95729          p.add(0, func(m *_Encoding, v []interface{}) {
 95730              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 95731              m.emit(0x14)
 95732              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95733          })
 95734      }
 95735      // VUNPCKLPD m128, xmm, xmm
 95736      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 95737          self.require(ISA_AVX)
 95738          p.domain = DomainAVX
 95739          p.add(0, func(m *_Encoding, v []interface{}) {
 95740              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 95741              m.emit(0x14)
 95742              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 95743          })
 95744      }
 95745      // VUNPCKLPD ymm, ymm, ymm
 95746      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 95747          self.require(ISA_AVX)
 95748          p.domain = DomainAVX
 95749          p.add(0, func(m *_Encoding, v []interface{}) {
 95750              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 95751              m.emit(0x14)
 95752              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95753          })
 95754      }
 95755      // VUNPCKLPD m256, ymm, ymm
 95756      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 95757          self.require(ISA_AVX)
 95758          p.domain = DomainAVX
 95759          p.add(0, func(m *_Encoding, v []interface{}) {
 95760              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 95761              m.emit(0x14)
 95762              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 95763          })
 95764      }
 95765      // VUNPCKLPD m512/m64bcst, zmm, zmm{k}{z}
 95766      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 95767          self.require(ISA_AVX512F)
 95768          p.domain = DomainAVX
 95769          p.add(0, func(m *_Encoding, v []interface{}) {
 95770              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95771              m.emit(0x14)
 95772              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 95773          })
 95774      }
 95775      // VUNPCKLPD zmm, zmm, zmm{k}{z}
 95776      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 95777          self.require(ISA_AVX512F)
 95778          p.domain = DomainAVX
 95779          p.add(0, func(m *_Encoding, v []interface{}) {
 95780              m.emit(0x62)
 95781              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95782              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 95783              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 95784              m.emit(0x14)
 95785              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95786          })
 95787      }
 95788      // VUNPCKLPD m128/m64bcst, xmm, xmm{k}{z}
 95789      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95790          self.require(ISA_AVX512VL | ISA_AVX512F)
 95791          p.domain = DomainAVX
 95792          p.add(0, func(m *_Encoding, v []interface{}) {
 95793              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95794              m.emit(0x14)
 95795              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 95796          })
 95797      }
 95798      // VUNPCKLPD xmm, xmm, xmm{k}{z}
 95799      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95800          self.require(ISA_AVX512VL | ISA_AVX512F)
 95801          p.domain = DomainAVX
 95802          p.add(0, func(m *_Encoding, v []interface{}) {
 95803              m.emit(0x62)
 95804              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95805              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 95806              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 95807              m.emit(0x14)
 95808              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95809          })
 95810      }
 95811      // VUNPCKLPD m256/m64bcst, ymm, ymm{k}{z}
 95812      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 95813          self.require(ISA_AVX512VL | ISA_AVX512F)
 95814          p.domain = DomainAVX
 95815          p.add(0, func(m *_Encoding, v []interface{}) {
 95816              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95817              m.emit(0x14)
 95818              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 95819          })
 95820      }
 95821      // VUNPCKLPD ymm, ymm, ymm{k}{z}
 95822      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 95823          self.require(ISA_AVX512VL | ISA_AVX512F)
 95824          p.domain = DomainAVX
 95825          p.add(0, func(m *_Encoding, v []interface{}) {
 95826              m.emit(0x62)
 95827              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95828              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 95829              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 95830              m.emit(0x14)
 95831              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95832          })
 95833      }
 95834      if p.len == 0 {
 95835          panic("invalid operands for VUNPCKLPD")
 95836      }
 95837      return p
 95838  }
 95839  
 95840  // VUNPCKLPS performs "Unpack and Interleave Low Packed Single-Precision Floating-Point Values".
 95841  //
 95842  // Mnemonic        : VUNPCKLPS
 95843  // Supported forms : (10 forms)
 95844  //
 95845  //    * VUNPCKLPS xmm, xmm, xmm                   [AVX]
 95846  //    * VUNPCKLPS m128, xmm, xmm                  [AVX]
 95847  //    * VUNPCKLPS ymm, ymm, ymm                   [AVX]
 95848  //    * VUNPCKLPS m256, ymm, ymm                  [AVX]
 95849  //    * VUNPCKLPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 95850  //    * VUNPCKLPS zmm, zmm, zmm{k}{z}             [AVX512F]
 95851  //    * VUNPCKLPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 95852  //    * VUNPCKLPS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 95853  //    * VUNPCKLPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 95854  //    * VUNPCKLPS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 95855  //
 95856  func (self *Program) VUNPCKLPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 95857      p := self.alloc("VUNPCKLPS", 3, Operands { v0, v1, v2 })
 95858      // VUNPCKLPS xmm, xmm, xmm
 95859      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 95860          self.require(ISA_AVX)
 95861          p.domain = DomainAVX
 95862          p.add(0, func(m *_Encoding, v []interface{}) {
 95863              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 95864              m.emit(0x14)
 95865              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95866          })
 95867      }
 95868      // VUNPCKLPS m128, xmm, xmm
 95869      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 95870          self.require(ISA_AVX)
 95871          p.domain = DomainAVX
 95872          p.add(0, func(m *_Encoding, v []interface{}) {
 95873              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 95874              m.emit(0x14)
 95875              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 95876          })
 95877      }
 95878      // VUNPCKLPS ymm, ymm, ymm
 95879      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 95880          self.require(ISA_AVX)
 95881          p.domain = DomainAVX
 95882          p.add(0, func(m *_Encoding, v []interface{}) {
 95883              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 95884              m.emit(0x14)
 95885              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95886          })
 95887      }
 95888      // VUNPCKLPS m256, ymm, ymm
 95889      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 95890          self.require(ISA_AVX)
 95891          p.domain = DomainAVX
 95892          p.add(0, func(m *_Encoding, v []interface{}) {
 95893              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 95894              m.emit(0x14)
 95895              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 95896          })
 95897      }
 95898      // VUNPCKLPS m512/m32bcst, zmm, zmm{k}{z}
 95899      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 95900          self.require(ISA_AVX512F)
 95901          p.domain = DomainAVX
 95902          p.add(0, func(m *_Encoding, v []interface{}) {
 95903              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95904              m.emit(0x14)
 95905              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 95906          })
 95907      }
 95908      // VUNPCKLPS zmm, zmm, zmm{k}{z}
 95909      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 95910          self.require(ISA_AVX512F)
 95911          p.domain = DomainAVX
 95912          p.add(0, func(m *_Encoding, v []interface{}) {
 95913              m.emit(0x62)
 95914              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95915              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 95916              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 95917              m.emit(0x14)
 95918              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95919          })
 95920      }
 95921      // VUNPCKLPS m128/m32bcst, xmm, xmm{k}{z}
 95922      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95923          self.require(ISA_AVX512VL | ISA_AVX512F)
 95924          p.domain = DomainAVX
 95925          p.add(0, func(m *_Encoding, v []interface{}) {
 95926              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95927              m.emit(0x14)
 95928              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 95929          })
 95930      }
 95931      // VUNPCKLPS xmm, xmm, xmm{k}{z}
 95932      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95933          self.require(ISA_AVX512VL | ISA_AVX512F)
 95934          p.domain = DomainAVX
 95935          p.add(0, func(m *_Encoding, v []interface{}) {
 95936              m.emit(0x62)
 95937              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95938              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 95939              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 95940              m.emit(0x14)
 95941              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95942          })
 95943      }
 95944      // VUNPCKLPS m256/m32bcst, ymm, ymm{k}{z}
 95945      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 95946          self.require(ISA_AVX512VL | ISA_AVX512F)
 95947          p.domain = DomainAVX
 95948          p.add(0, func(m *_Encoding, v []interface{}) {
 95949              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95950              m.emit(0x14)
 95951              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 95952          })
 95953      }
 95954      // VUNPCKLPS ymm, ymm, ymm{k}{z}
 95955      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 95956          self.require(ISA_AVX512VL | ISA_AVX512F)
 95957          p.domain = DomainAVX
 95958          p.add(0, func(m *_Encoding, v []interface{}) {
 95959              m.emit(0x62)
 95960              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95961              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 95962              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 95963              m.emit(0x14)
 95964              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95965          })
 95966      }
 95967      if p.len == 0 {
 95968          panic("invalid operands for VUNPCKLPS")
 95969      }
 95970      return p
 95971  }
 95972  
 95973  // VXORPD performs "Bitwise Logical XOR for Double-Precision Floating-Point Values".
 95974  //
 95975  // Mnemonic        : VXORPD
 95976  // Supported forms : (10 forms)
 95977  //
 95978  //    * VXORPD xmm, xmm, xmm                   [AVX]
 95979  //    * VXORPD m128, xmm, xmm                  [AVX]
 95980  //    * VXORPD ymm, ymm, ymm                   [AVX]
 95981  //    * VXORPD m256, ymm, ymm                  [AVX]
 95982  //    * VXORPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512DQ]
 95983  //    * VXORPD zmm, zmm, zmm{k}{z}             [AVX512DQ]
 95984  //    * VXORPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 95985  //    * VXORPD xmm, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 95986  //    * VXORPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 95987  //    * VXORPD ymm, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 95988  //
 95989  func (self *Program) VXORPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 95990      p := self.alloc("VXORPD", 3, Operands { v0, v1, v2 })
 95991      // VXORPD xmm, xmm, xmm
 95992      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 95993          self.require(ISA_AVX)
 95994          p.domain = DomainAVX
 95995          p.add(0, func(m *_Encoding, v []interface{}) {
 95996              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 95997              m.emit(0x57)
 95998              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95999          })
 96000      }
 96001      // VXORPD m128, xmm, xmm
 96002      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 96003          self.require(ISA_AVX)
 96004          p.domain = DomainAVX
 96005          p.add(0, func(m *_Encoding, v []interface{}) {
 96006              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 96007              m.emit(0x57)
 96008              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 96009          })
 96010      }
 96011      // VXORPD ymm, ymm, ymm
 96012      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 96013          self.require(ISA_AVX)
 96014          p.domain = DomainAVX
 96015          p.add(0, func(m *_Encoding, v []interface{}) {
 96016              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 96017              m.emit(0x57)
 96018              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 96019          })
 96020      }
 96021      // VXORPD m256, ymm, ymm
 96022      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 96023          self.require(ISA_AVX)
 96024          p.domain = DomainAVX
 96025          p.add(0, func(m *_Encoding, v []interface{}) {
 96026              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 96027              m.emit(0x57)
 96028              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 96029          })
 96030      }
 96031      // VXORPD m512/m64bcst, zmm, zmm{k}{z}
 96032      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 96033          self.require(ISA_AVX512DQ)
 96034          p.domain = DomainAVX
 96035          p.add(0, func(m *_Encoding, v []interface{}) {
 96036              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 96037              m.emit(0x57)
 96038              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 96039          })
 96040      }
 96041      // VXORPD zmm, zmm, zmm{k}{z}
 96042      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 96043          self.require(ISA_AVX512DQ)
 96044          p.domain = DomainAVX
 96045          p.add(0, func(m *_Encoding, v []interface{}) {
 96046              m.emit(0x62)
 96047              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 96048              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 96049              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 96050              m.emit(0x57)
 96051              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 96052          })
 96053      }
 96054      // VXORPD m128/m64bcst, xmm, xmm{k}{z}
 96055      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 96056          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 96057          p.domain = DomainAVX
 96058          p.add(0, func(m *_Encoding, v []interface{}) {
 96059              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 96060              m.emit(0x57)
 96061              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 96062          })
 96063      }
 96064      // VXORPD xmm, xmm, xmm{k}{z}
 96065      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 96066          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 96067          p.domain = DomainAVX
 96068          p.add(0, func(m *_Encoding, v []interface{}) {
 96069              m.emit(0x62)
 96070              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 96071              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 96072              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 96073              m.emit(0x57)
 96074              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 96075          })
 96076      }
 96077      // VXORPD m256/m64bcst, ymm, ymm{k}{z}
 96078      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 96079          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 96080          p.domain = DomainAVX
 96081          p.add(0, func(m *_Encoding, v []interface{}) {
 96082              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 96083              m.emit(0x57)
 96084              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 96085          })
 96086      }
 96087      // VXORPD ymm, ymm, ymm{k}{z}
 96088      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 96089          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 96090          p.domain = DomainAVX
 96091          p.add(0, func(m *_Encoding, v []interface{}) {
 96092              m.emit(0x62)
 96093              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 96094              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 96095              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 96096              m.emit(0x57)
 96097              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 96098          })
 96099      }
 96100      if p.len == 0 {
 96101          panic("invalid operands for VXORPD")
 96102      }
 96103      return p
 96104  }
 96105  
 96106  // VXORPS performs "Bitwise Logical XOR for Single-Precision Floating-Point Values".
 96107  //
 96108  // Mnemonic        : VXORPS
 96109  // Supported forms : (10 forms)
 96110  //
 96111  //    * VXORPS xmm, xmm, xmm                   [AVX]
 96112  //    * VXORPS m128, xmm, xmm                  [AVX]
 96113  //    * VXORPS ymm, ymm, ymm                   [AVX]
 96114  //    * VXORPS m256, ymm, ymm                  [AVX]
 96115  //    * VXORPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512DQ]
 96116  //    * VXORPS zmm, zmm, zmm{k}{z}             [AVX512DQ]
 96117  //    * VXORPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 96118  //    * VXORPS xmm, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 96119  //    * VXORPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 96120  //    * VXORPS ymm, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 96121  //
 96122  func (self *Program) VXORPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 96123      p := self.alloc("VXORPS", 3, Operands { v0, v1, v2 })
 96124      // VXORPS xmm, xmm, xmm
 96125      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 96126          self.require(ISA_AVX)
 96127          p.domain = DomainAVX
 96128          p.add(0, func(m *_Encoding, v []interface{}) {
 96129              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 96130              m.emit(0x57)
 96131              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 96132          })
 96133      }
 96134      // VXORPS m128, xmm, xmm
 96135      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 96136          self.require(ISA_AVX)
 96137          p.domain = DomainAVX
 96138          p.add(0, func(m *_Encoding, v []interface{}) {
 96139              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 96140              m.emit(0x57)
 96141              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 96142          })
 96143      }
 96144      // VXORPS ymm, ymm, ymm
 96145      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 96146          self.require(ISA_AVX)
 96147          p.domain = DomainAVX
 96148          p.add(0, func(m *_Encoding, v []interface{}) {
 96149              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 96150              m.emit(0x57)
 96151              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 96152          })
 96153      }
 96154      // VXORPS m256, ymm, ymm
 96155      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 96156          self.require(ISA_AVX)
 96157          p.domain = DomainAVX
 96158          p.add(0, func(m *_Encoding, v []interface{}) {
 96159              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 96160              m.emit(0x57)
 96161              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 96162          })
 96163      }
 96164      // VXORPS m512/m32bcst, zmm, zmm{k}{z}
 96165      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 96166          self.require(ISA_AVX512DQ)
 96167          p.domain = DomainAVX
 96168          p.add(0, func(m *_Encoding, v []interface{}) {
 96169              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 96170              m.emit(0x57)
 96171              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 96172          })
 96173      }
 96174      // VXORPS zmm, zmm, zmm{k}{z}
 96175      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 96176          self.require(ISA_AVX512DQ)
 96177          p.domain = DomainAVX
 96178          p.add(0, func(m *_Encoding, v []interface{}) {
 96179              m.emit(0x62)
 96180              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 96181              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 96182              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 96183              m.emit(0x57)
 96184              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 96185          })
 96186      }
 96187      // VXORPS m128/m32bcst, xmm, xmm{k}{z}
 96188      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 96189          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 96190          p.domain = DomainAVX
 96191          p.add(0, func(m *_Encoding, v []interface{}) {
 96192              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 96193              m.emit(0x57)
 96194              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 96195          })
 96196      }
 96197      // VXORPS xmm, xmm, xmm{k}{z}
 96198      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 96199          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 96200          p.domain = DomainAVX
 96201          p.add(0, func(m *_Encoding, v []interface{}) {
 96202              m.emit(0x62)
 96203              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 96204              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 96205              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 96206              m.emit(0x57)
 96207              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 96208          })
 96209      }
 96210      // VXORPS m256/m32bcst, ymm, ymm{k}{z}
 96211      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 96212          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 96213          p.domain = DomainAVX
 96214          p.add(0, func(m *_Encoding, v []interface{}) {
 96215              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 96216              m.emit(0x57)
 96217              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 96218          })
 96219      }
 96220      // VXORPS ymm, ymm, ymm{k}{z}
 96221      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 96222          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 96223          p.domain = DomainAVX
 96224          p.add(0, func(m *_Encoding, v []interface{}) {
 96225              m.emit(0x62)
 96226              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 96227              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 96228              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 96229              m.emit(0x57)
 96230              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 96231          })
 96232      }
 96233      if p.len == 0 {
 96234          panic("invalid operands for VXORPS")
 96235      }
 96236      return p
 96237  }
 96238  
 96239  // VZEROALL performs "Zero All YMM Registers".
 96240  //
 96241  // Mnemonic        : VZEROALL
 96242  // Supported forms : (1 form)
 96243  //
 96244  //    * VZEROALL    [AVX]
 96245  //
 96246  func (self *Program) VZEROALL() *Instruction {
 96247      p := self.alloc("VZEROALL", 0, Operands {  })
 96248      // VZEROALL
 96249      self.require(ISA_AVX)
 96250      p.domain = DomainAVX
 96251      p.add(0, func(m *_Encoding, v []interface{}) {
 96252          m.vex2(4, 0, nil, 0)
 96253          m.emit(0x77)
 96254      })
 96255      return p
 96256  }
 96257  
 96258  // VZEROUPPER performs "Zero Upper Bits of YMM Registers".
 96259  //
 96260  // Mnemonic        : VZEROUPPER
 96261  // Supported forms : (1 form)
 96262  //
 96263  //    * VZEROUPPER    [AVX]
 96264  //
 96265  func (self *Program) VZEROUPPER() *Instruction {
 96266      p := self.alloc("VZEROUPPER", 0, Operands {  })
 96267      // VZEROUPPER
 96268      self.require(ISA_AVX)
 96269      p.domain = DomainAVX
 96270      p.add(0, func(m *_Encoding, v []interface{}) {
 96271          m.vex2(0, 0, nil, 0)
 96272          m.emit(0x77)
 96273      })
 96274      return p
 96275  }
 96276  
 96277  // XADDB performs "Exchange and Add".
 96278  //
 96279  // Mnemonic        : XADD
 96280  // Supported forms : (2 forms)
 96281  //
 96282  //    * XADDB r8, r8
 96283  //    * XADDB r8, m8
 96284  //
 96285  func (self *Program) XADDB(v0 interface{}, v1 interface{}) *Instruction {
 96286      p := self.alloc("XADDB", 2, Operands { v0, v1 })
 96287      // XADDB r8, r8
 96288      if isReg8(v0) && isReg8(v1) {
 96289          p.domain = DomainGeneric
 96290          p.add(0, func(m *_Encoding, v []interface{}) {
 96291              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
 96292              m.emit(0x0f)
 96293              m.emit(0xc0)
 96294              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 96295          })
 96296      }
 96297      // XADDB r8, m8
 96298      if isReg8(v0) && isM8(v1) {
 96299          p.domain = DomainGeneric
 96300          p.add(0, func(m *_Encoding, v []interface{}) {
 96301              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
 96302              m.emit(0x0f)
 96303              m.emit(0xc0)
 96304              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 96305          })
 96306      }
 96307      if p.len == 0 {
 96308          panic("invalid operands for XADDB")
 96309      }
 96310      return p
 96311  }
 96312  
 96313  // XADDL performs "Exchange and Add".
 96314  //
 96315  // Mnemonic        : XADD
 96316  // Supported forms : (2 forms)
 96317  //
 96318  //    * XADDL r32, r32
 96319  //    * XADDL r32, m32
 96320  //
 96321  func (self *Program) XADDL(v0 interface{}, v1 interface{}) *Instruction {
 96322      p := self.alloc("XADDL", 2, Operands { v0, v1 })
 96323      // XADDL r32, r32
 96324      if isReg32(v0) && isReg32(v1) {
 96325          p.domain = DomainGeneric
 96326          p.add(0, func(m *_Encoding, v []interface{}) {
 96327              m.rexo(hcode(v[0]), v[1], false)
 96328              m.emit(0x0f)
 96329              m.emit(0xc1)
 96330              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 96331          })
 96332      }
 96333      // XADDL r32, m32
 96334      if isReg32(v0) && isM32(v1) {
 96335          p.domain = DomainGeneric
 96336          p.add(0, func(m *_Encoding, v []interface{}) {
 96337              m.rexo(hcode(v[0]), addr(v[1]), false)
 96338              m.emit(0x0f)
 96339              m.emit(0xc1)
 96340              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 96341          })
 96342      }
 96343      if p.len == 0 {
 96344          panic("invalid operands for XADDL")
 96345      }
 96346      return p
 96347  }
 96348  
 96349  // XADDQ performs "Exchange and Add".
 96350  //
 96351  // Mnemonic        : XADD
 96352  // Supported forms : (2 forms)
 96353  //
 96354  //    * XADDQ r64, r64
 96355  //    * XADDQ r64, m64
 96356  //
 96357  func (self *Program) XADDQ(v0 interface{}, v1 interface{}) *Instruction {
 96358      p := self.alloc("XADDQ", 2, Operands { v0, v1 })
 96359      // XADDQ r64, r64
 96360      if isReg64(v0) && isReg64(v1) {
 96361          p.domain = DomainGeneric
 96362          p.add(0, func(m *_Encoding, v []interface{}) {
 96363              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
 96364              m.emit(0x0f)
 96365              m.emit(0xc1)
 96366              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 96367          })
 96368      }
 96369      // XADDQ r64, m64
 96370      if isReg64(v0) && isM64(v1) {
 96371          p.domain = DomainGeneric
 96372          p.add(0, func(m *_Encoding, v []interface{}) {
 96373              m.rexm(1, hcode(v[0]), addr(v[1]))
 96374              m.emit(0x0f)
 96375              m.emit(0xc1)
 96376              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 96377          })
 96378      }
 96379      if p.len == 0 {
 96380          panic("invalid operands for XADDQ")
 96381      }
 96382      return p
 96383  }
 96384  
 96385  // XADDW performs "Exchange and Add".
 96386  //
 96387  // Mnemonic        : XADD
 96388  // Supported forms : (2 forms)
 96389  //
 96390  //    * XADDW r16, r16
 96391  //    * XADDW r16, m16
 96392  //
 96393  func (self *Program) XADDW(v0 interface{}, v1 interface{}) *Instruction {
 96394      p := self.alloc("XADDW", 2, Operands { v0, v1 })
 96395      // XADDW r16, r16
 96396      if isReg16(v0) && isReg16(v1) {
 96397          p.domain = DomainGeneric
 96398          p.add(0, func(m *_Encoding, v []interface{}) {
 96399              m.emit(0x66)
 96400              m.rexo(hcode(v[0]), v[1], false)
 96401              m.emit(0x0f)
 96402              m.emit(0xc1)
 96403              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 96404          })
 96405      }
 96406      // XADDW r16, m16
 96407      if isReg16(v0) && isM16(v1) {
 96408          p.domain = DomainGeneric
 96409          p.add(0, func(m *_Encoding, v []interface{}) {
 96410              m.emit(0x66)
 96411              m.rexo(hcode(v[0]), addr(v[1]), false)
 96412              m.emit(0x0f)
 96413              m.emit(0xc1)
 96414              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 96415          })
 96416      }
 96417      if p.len == 0 {
 96418          panic("invalid operands for XADDW")
 96419      }
 96420      return p
 96421  }
 96422  
 96423  // XCHGB performs "Exchange Register/Memory with Register".
 96424  //
 96425  // Mnemonic        : XCHG
 96426  // Supported forms : (3 forms)
 96427  //
 96428  //    * XCHGB r8, r8
 96429  //    * XCHGB m8, r8
 96430  //    * XCHGB r8, m8
 96431  //
 96432  func (self *Program) XCHGB(v0 interface{}, v1 interface{}) *Instruction {
 96433      p := self.alloc("XCHGB", 2, Operands { v0, v1 })
 96434      // XCHGB r8, r8
 96435      if isReg8(v0) && isReg8(v1) {
 96436          p.domain = DomainGeneric
 96437          p.add(0, func(m *_Encoding, v []interface{}) {
 96438              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
 96439              m.emit(0x86)
 96440              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 96441          })
 96442          p.add(0, func(m *_Encoding, v []interface{}) {
 96443              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]) || isReg8REX(v[1]))
 96444              m.emit(0x86)
 96445              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 96446          })
 96447      }
 96448      // XCHGB m8, r8
 96449      if isM8(v0) && isReg8(v1) {
 96450          p.domain = DomainGeneric
 96451          p.add(0, func(m *_Encoding, v []interface{}) {
 96452              m.rexo(hcode(v[1]), addr(v[0]), isReg8REX(v[1]))
 96453              m.emit(0x86)
 96454              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 96455          })
 96456      }
 96457      // XCHGB r8, m8
 96458      if isReg8(v0) && isM8(v1) {
 96459          p.domain = DomainGeneric
 96460          p.add(0, func(m *_Encoding, v []interface{}) {
 96461              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
 96462              m.emit(0x86)
 96463              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 96464          })
 96465      }
 96466      if p.len == 0 {
 96467          panic("invalid operands for XCHGB")
 96468      }
 96469      return p
 96470  }
 96471  
 96472  // XCHGL performs "Exchange Register/Memory with Register".
 96473  //
 96474  // Mnemonic        : XCHG
 96475  // Supported forms : (5 forms)
 96476  //
 96477  //    * XCHGL r32, eax
 96478  //    * XCHGL eax, r32
 96479  //    * XCHGL r32, r32
 96480  //    * XCHGL m32, r32
 96481  //    * XCHGL r32, m32
 96482  //
 96483  func (self *Program) XCHGL(v0 interface{}, v1 interface{}) *Instruction {
 96484      p := self.alloc("XCHGL", 2, Operands { v0, v1 })
 96485      // XCHGL r32, eax
 96486      if isReg32(v0) && v1 == EAX {
 96487          p.domain = DomainGeneric
 96488          p.add(0, func(m *_Encoding, v []interface{}) {
 96489              m.rexo(0, v[0], false)
 96490              m.emit(0x90 | lcode(v[0]))
 96491          })
 96492      }
 96493      // XCHGL eax, r32
 96494      if v0 == EAX && isReg32(v1) {
 96495          p.domain = DomainGeneric
 96496          p.add(0, func(m *_Encoding, v []interface{}) {
 96497              m.rexo(0, v[1], false)
 96498              m.emit(0x90 | lcode(v[1]))
 96499          })
 96500      }
 96501      // XCHGL r32, r32
 96502      if isReg32(v0) && isReg32(v1) {
 96503          p.domain = DomainGeneric
 96504          p.add(0, func(m *_Encoding, v []interface{}) {
 96505              m.rexo(hcode(v[0]), v[1], false)
 96506              m.emit(0x87)
 96507              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 96508          })
 96509          p.add(0, func(m *_Encoding, v []interface{}) {
 96510              m.rexo(hcode(v[1]), v[0], false)
 96511              m.emit(0x87)
 96512              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 96513          })
 96514      }
 96515      // XCHGL m32, r32
 96516      if isM32(v0) && isReg32(v1) {
 96517          p.domain = DomainGeneric
 96518          p.add(0, func(m *_Encoding, v []interface{}) {
 96519              m.rexo(hcode(v[1]), addr(v[0]), false)
 96520              m.emit(0x87)
 96521              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 96522          })
 96523      }
 96524      // XCHGL r32, m32
 96525      if isReg32(v0) && isM32(v1) {
 96526          p.domain = DomainGeneric
 96527          p.add(0, func(m *_Encoding, v []interface{}) {
 96528              m.rexo(hcode(v[0]), addr(v[1]), false)
 96529              m.emit(0x87)
 96530              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 96531          })
 96532      }
 96533      if p.len == 0 {
 96534          panic("invalid operands for XCHGL")
 96535      }
 96536      return p
 96537  }
 96538  
 96539  // XCHGQ performs "Exchange Register/Memory with Register".
 96540  //
 96541  // Mnemonic        : XCHG
 96542  // Supported forms : (5 forms)
 96543  //
 96544  //    * XCHGQ r64, rax
 96545  //    * XCHGQ rax, r64
 96546  //    * XCHGQ r64, r64
 96547  //    * XCHGQ m64, r64
 96548  //    * XCHGQ r64, m64
 96549  //
 96550  func (self *Program) XCHGQ(v0 interface{}, v1 interface{}) *Instruction {
 96551      p := self.alloc("XCHGQ", 2, Operands { v0, v1 })
 96552      // XCHGQ r64, rax
 96553      if isReg64(v0) && v1 == RAX {
 96554          p.domain = DomainGeneric
 96555          p.add(0, func(m *_Encoding, v []interface{}) {
 96556              m.emit(0x48 | hcode(v[0]))
 96557              m.emit(0x90 | lcode(v[0]))
 96558          })
 96559      }
 96560      // XCHGQ rax, r64
 96561      if v0 == RAX && isReg64(v1) {
 96562          p.domain = DomainGeneric
 96563          p.add(0, func(m *_Encoding, v []interface{}) {
 96564              m.emit(0x48 | hcode(v[1]))
 96565              m.emit(0x90 | lcode(v[1]))
 96566          })
 96567      }
 96568      // XCHGQ r64, r64
 96569      if isReg64(v0) && isReg64(v1) {
 96570          p.domain = DomainGeneric
 96571          p.add(0, func(m *_Encoding, v []interface{}) {
 96572              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
 96573              m.emit(0x87)
 96574              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 96575          })
 96576          p.add(0, func(m *_Encoding, v []interface{}) {
 96577              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 96578              m.emit(0x87)
 96579              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 96580          })
 96581      }
 96582      // XCHGQ m64, r64
 96583      if isM64(v0) && isReg64(v1) {
 96584          p.domain = DomainGeneric
 96585          p.add(0, func(m *_Encoding, v []interface{}) {
 96586              m.rexm(1, hcode(v[1]), addr(v[0]))
 96587              m.emit(0x87)
 96588              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 96589          })
 96590      }
 96591      // XCHGQ r64, m64
 96592      if isReg64(v0) && isM64(v1) {
 96593          p.domain = DomainGeneric
 96594          p.add(0, func(m *_Encoding, v []interface{}) {
 96595              m.rexm(1, hcode(v[0]), addr(v[1]))
 96596              m.emit(0x87)
 96597              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 96598          })
 96599      }
 96600      if p.len == 0 {
 96601          panic("invalid operands for XCHGQ")
 96602      }
 96603      return p
 96604  }
 96605  
 96606  // XCHGW performs "Exchange Register/Memory with Register".
 96607  //
 96608  // Mnemonic        : XCHG
 96609  // Supported forms : (5 forms)
 96610  //
 96611  //    * XCHGW r16, ax
 96612  //    * XCHGW ax, r16
 96613  //    * XCHGW r16, r16
 96614  //    * XCHGW m16, r16
 96615  //    * XCHGW r16, m16
 96616  //
 96617  func (self *Program) XCHGW(v0 interface{}, v1 interface{}) *Instruction {
 96618      p := self.alloc("XCHGW", 2, Operands { v0, v1 })
 96619      // XCHGW r16, ax
 96620      if isReg16(v0) && v1 == AX {
 96621          p.domain = DomainGeneric
 96622          p.add(0, func(m *_Encoding, v []interface{}) {
 96623              m.emit(0x66)
 96624              m.rexo(0, v[0], false)
 96625              m.emit(0x90 | lcode(v[0]))
 96626          })
 96627      }
 96628      // XCHGW ax, r16
 96629      if v0 == AX && isReg16(v1) {
 96630          p.domain = DomainGeneric
 96631          p.add(0, func(m *_Encoding, v []interface{}) {
 96632              m.emit(0x66)
 96633              m.rexo(0, v[1], false)
 96634              m.emit(0x90 | lcode(v[1]))
 96635          })
 96636      }
 96637      // XCHGW r16, r16
 96638      if isReg16(v0) && isReg16(v1) {
 96639          p.domain = DomainGeneric
 96640          p.add(0, func(m *_Encoding, v []interface{}) {
 96641              m.emit(0x66)
 96642              m.rexo(hcode(v[0]), v[1], false)
 96643              m.emit(0x87)
 96644              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 96645          })
 96646          p.add(0, func(m *_Encoding, v []interface{}) {
 96647              m.emit(0x66)
 96648              m.rexo(hcode(v[1]), v[0], false)
 96649              m.emit(0x87)
 96650              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 96651          })
 96652      }
 96653      // XCHGW m16, r16
 96654      if isM16(v0) && isReg16(v1) {
 96655          p.domain = DomainGeneric
 96656          p.add(0, func(m *_Encoding, v []interface{}) {
 96657              m.emit(0x66)
 96658              m.rexo(hcode(v[1]), addr(v[0]), false)
 96659              m.emit(0x87)
 96660              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 96661          })
 96662      }
 96663      // XCHGW r16, m16
 96664      if isReg16(v0) && isM16(v1) {
 96665          p.domain = DomainGeneric
 96666          p.add(0, func(m *_Encoding, v []interface{}) {
 96667              m.emit(0x66)
 96668              m.rexo(hcode(v[0]), addr(v[1]), false)
 96669              m.emit(0x87)
 96670              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 96671          })
 96672      }
 96673      if p.len == 0 {
 96674          panic("invalid operands for XCHGW")
 96675      }
 96676      return p
 96677  }
 96678  
 96679  // XGETBV performs "Get Value of Extended Control Register".
 96680  //
 96681  // Mnemonic        : XGETBV
 96682  // Supported forms : (1 form)
 96683  //
 96684  //    * XGETBV
 96685  //
 96686  func (self *Program) XGETBV() *Instruction {
 96687      p := self.alloc("XGETBV", 0, Operands {  })
 96688      // XGETBV
 96689      p.domain = DomainGeneric
 96690      p.add(0, func(m *_Encoding, v []interface{}) {
 96691          m.emit(0x0f)
 96692          m.emit(0x01)
 96693          m.emit(0xd0)
 96694      })
 96695      return p
 96696  }
 96697  
 96698  // XLATB performs "Table Look-up Translation".
 96699  //
 96700  // Mnemonic        : XLATB
 96701  // Supported forms : (2 forms)
 96702  //
 96703  //    * XLATB
 96704  //    * XLATB
 96705  //
 96706  func (self *Program) XLATB() *Instruction {
 96707      p := self.alloc("XLATB", 0, Operands {  })
 96708      // XLATB
 96709      p.domain = DomainMisc
 96710      p.add(0, func(m *_Encoding, v []interface{}) {
 96711          m.emit(0xd7)
 96712      })
 96713      // XLATB
 96714      p.domain = DomainMisc
 96715      p.add(0, func(m *_Encoding, v []interface{}) {
 96716          m.emit(0x48)
 96717          m.emit(0xd7)
 96718      })
 96719      return p
 96720  }
 96721  
 96722  // XORB performs "Logical Exclusive OR".
 96723  //
 96724  // Mnemonic        : XOR
 96725  // Supported forms : (6 forms)
 96726  //
 96727  //    * XORB imm8, al
 96728  //    * XORB imm8, r8
 96729  //    * XORB r8, r8
 96730  //    * XORB m8, r8
 96731  //    * XORB imm8, m8
 96732  //    * XORB r8, m8
 96733  //
 96734  func (self *Program) XORB(v0 interface{}, v1 interface{}) *Instruction {
 96735      p := self.alloc("XORB", 2, Operands { v0, v1 })
 96736      // XORB imm8, al
 96737      if isImm8(v0) && v1 == AL {
 96738          p.domain = DomainGeneric
 96739          p.add(0, func(m *_Encoding, v []interface{}) {
 96740              m.emit(0x34)
 96741              m.imm1(toImmAny(v[0]))
 96742          })
 96743      }
 96744      // XORB imm8, r8
 96745      if isImm8(v0) && isReg8(v1) {
 96746          p.domain = DomainGeneric
 96747          p.add(0, func(m *_Encoding, v []interface{}) {
 96748              m.rexo(0, v[1], isReg8REX(v[1]))
 96749              m.emit(0x80)
 96750              m.emit(0xf0 | lcode(v[1]))
 96751              m.imm1(toImmAny(v[0]))
 96752          })
 96753      }
 96754      // XORB r8, r8
 96755      if isReg8(v0) && isReg8(v1) {
 96756          p.domain = DomainGeneric
 96757          p.add(0, func(m *_Encoding, v []interface{}) {
 96758              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
 96759              m.emit(0x30)
 96760              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 96761          })
 96762          p.add(0, func(m *_Encoding, v []interface{}) {
 96763              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]) || isReg8REX(v[1]))
 96764              m.emit(0x32)
 96765              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 96766          })
 96767      }
 96768      // XORB m8, r8
 96769      if isM8(v0) && isReg8(v1) {
 96770          p.domain = DomainGeneric
 96771          p.add(0, func(m *_Encoding, v []interface{}) {
 96772              m.rexo(hcode(v[1]), addr(v[0]), isReg8REX(v[1]))
 96773              m.emit(0x32)
 96774              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 96775          })
 96776      }
 96777      // XORB imm8, m8
 96778      if isImm8(v0) && isM8(v1) {
 96779          p.domain = DomainGeneric
 96780          p.add(0, func(m *_Encoding, v []interface{}) {
 96781              m.rexo(0, addr(v[1]), false)
 96782              m.emit(0x80)
 96783              m.mrsd(6, addr(v[1]), 1)
 96784              m.imm1(toImmAny(v[0]))
 96785          })
 96786      }
 96787      // XORB r8, m8
 96788      if isReg8(v0) && isM8(v1) {
 96789          p.domain = DomainGeneric
 96790          p.add(0, func(m *_Encoding, v []interface{}) {
 96791              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
 96792              m.emit(0x30)
 96793              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 96794          })
 96795      }
 96796      if p.len == 0 {
 96797          panic("invalid operands for XORB")
 96798      }
 96799      return p
 96800  }
 96801  
 96802  // XORL performs "Logical Exclusive OR".
 96803  //
 96804  // Mnemonic        : XOR
 96805  // Supported forms : (8 forms)
 96806  //
 96807  //    * XORL imm32, eax
 96808  //    * XORL imm8, r32
 96809  //    * XORL imm32, r32
 96810  //    * XORL r32, r32
 96811  //    * XORL m32, r32
 96812  //    * XORL imm8, m32
 96813  //    * XORL imm32, m32
 96814  //    * XORL r32, m32
 96815  //
 96816  func (self *Program) XORL(v0 interface{}, v1 interface{}) *Instruction {
 96817      p := self.alloc("XORL", 2, Operands { v0, v1 })
 96818      // XORL imm32, eax
 96819      if isImm32(v0) && v1 == EAX {
 96820          p.domain = DomainGeneric
 96821          p.add(0, func(m *_Encoding, v []interface{}) {
 96822              m.emit(0x35)
 96823              m.imm4(toImmAny(v[0]))
 96824          })
 96825      }
 96826      // XORL imm8, r32
 96827      if isImm8Ext(v0, 4) && isReg32(v1) {
 96828          p.domain = DomainGeneric
 96829          p.add(0, func(m *_Encoding, v []interface{}) {
 96830              m.rexo(0, v[1], false)
 96831              m.emit(0x83)
 96832              m.emit(0xf0 | lcode(v[1]))
 96833              m.imm1(toImmAny(v[0]))
 96834          })
 96835      }
 96836      // XORL imm32, r32
 96837      if isImm32(v0) && isReg32(v1) {
 96838          p.domain = DomainGeneric
 96839          p.add(0, func(m *_Encoding, v []interface{}) {
 96840              m.rexo(0, v[1], false)
 96841              m.emit(0x81)
 96842              m.emit(0xf0 | lcode(v[1]))
 96843              m.imm4(toImmAny(v[0]))
 96844          })
 96845      }
 96846      // XORL r32, r32
 96847      if isReg32(v0) && isReg32(v1) {
 96848          p.domain = DomainGeneric
 96849          p.add(0, func(m *_Encoding, v []interface{}) {
 96850              m.rexo(hcode(v[0]), v[1], false)
 96851              m.emit(0x31)
 96852              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 96853          })
 96854          p.add(0, func(m *_Encoding, v []interface{}) {
 96855              m.rexo(hcode(v[1]), v[0], false)
 96856              m.emit(0x33)
 96857              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 96858          })
 96859      }
 96860      // XORL m32, r32
 96861      if isM32(v0) && isReg32(v1) {
 96862          p.domain = DomainGeneric
 96863          p.add(0, func(m *_Encoding, v []interface{}) {
 96864              m.rexo(hcode(v[1]), addr(v[0]), false)
 96865              m.emit(0x33)
 96866              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 96867          })
 96868      }
 96869      // XORL imm8, m32
 96870      if isImm8Ext(v0, 4) && isM32(v1) {
 96871          p.domain = DomainGeneric
 96872          p.add(0, func(m *_Encoding, v []interface{}) {
 96873              m.rexo(0, addr(v[1]), false)
 96874              m.emit(0x83)
 96875              m.mrsd(6, addr(v[1]), 1)
 96876              m.imm1(toImmAny(v[0]))
 96877          })
 96878      }
 96879      // XORL imm32, m32
 96880      if isImm32(v0) && isM32(v1) {
 96881          p.domain = DomainGeneric
 96882          p.add(0, func(m *_Encoding, v []interface{}) {
 96883              m.rexo(0, addr(v[1]), false)
 96884              m.emit(0x81)
 96885              m.mrsd(6, addr(v[1]), 1)
 96886              m.imm4(toImmAny(v[0]))
 96887          })
 96888      }
 96889      // XORL r32, m32
 96890      if isReg32(v0) && isM32(v1) {
 96891          p.domain = DomainGeneric
 96892          p.add(0, func(m *_Encoding, v []interface{}) {
 96893              m.rexo(hcode(v[0]), addr(v[1]), false)
 96894              m.emit(0x31)
 96895              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 96896          })
 96897      }
 96898      if p.len == 0 {
 96899          panic("invalid operands for XORL")
 96900      }
 96901      return p
 96902  }
 96903  
 96904  // XORPD performs "Bitwise Logical XOR for Double-Precision Floating-Point Values".
 96905  //
 96906  // Mnemonic        : XORPD
 96907  // Supported forms : (2 forms)
 96908  //
 96909  //    * XORPD xmm, xmm     [SSE2]
 96910  //    * XORPD m128, xmm    [SSE2]
 96911  //
 96912  func (self *Program) XORPD(v0 interface{}, v1 interface{}) *Instruction {
 96913      p := self.alloc("XORPD", 2, Operands { v0, v1 })
 96914      // XORPD xmm, xmm
 96915      if isXMM(v0) && isXMM(v1) {
 96916          self.require(ISA_SSE2)
 96917          p.domain = DomainMMXSSE
 96918          p.add(0, func(m *_Encoding, v []interface{}) {
 96919              m.emit(0x66)
 96920              m.rexo(hcode(v[1]), v[0], false)
 96921              m.emit(0x0f)
 96922              m.emit(0x57)
 96923              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 96924          })
 96925      }
 96926      // XORPD m128, xmm
 96927      if isM128(v0) && isXMM(v1) {
 96928          self.require(ISA_SSE2)
 96929          p.domain = DomainMMXSSE
 96930          p.add(0, func(m *_Encoding, v []interface{}) {
 96931              m.emit(0x66)
 96932              m.rexo(hcode(v[1]), addr(v[0]), false)
 96933              m.emit(0x0f)
 96934              m.emit(0x57)
 96935              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 96936          })
 96937      }
 96938      if p.len == 0 {
 96939          panic("invalid operands for XORPD")
 96940      }
 96941      return p
 96942  }
 96943  
 96944  // XORPS performs "Bitwise Logical XOR for Single-Precision Floating-Point Values".
 96945  //
 96946  // Mnemonic        : XORPS
 96947  // Supported forms : (2 forms)
 96948  //
 96949  //    * XORPS xmm, xmm     [SSE]
 96950  //    * XORPS m128, xmm    [SSE]
 96951  //
 96952  func (self *Program) XORPS(v0 interface{}, v1 interface{}) *Instruction {
 96953      p := self.alloc("XORPS", 2, Operands { v0, v1 })
 96954      // XORPS xmm, xmm
 96955      if isXMM(v0) && isXMM(v1) {
 96956          self.require(ISA_SSE)
 96957          p.domain = DomainMMXSSE
 96958          p.add(0, func(m *_Encoding, v []interface{}) {
 96959              m.rexo(hcode(v[1]), v[0], false)
 96960              m.emit(0x0f)
 96961              m.emit(0x57)
 96962              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 96963          })
 96964      }
 96965      // XORPS m128, xmm
 96966      if isM128(v0) && isXMM(v1) {
 96967          self.require(ISA_SSE)
 96968          p.domain = DomainMMXSSE
 96969          p.add(0, func(m *_Encoding, v []interface{}) {
 96970              m.rexo(hcode(v[1]), addr(v[0]), false)
 96971              m.emit(0x0f)
 96972              m.emit(0x57)
 96973              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 96974          })
 96975      }
 96976      if p.len == 0 {
 96977          panic("invalid operands for XORPS")
 96978      }
 96979      return p
 96980  }
 96981  
 96982  // XORQ performs "Logical Exclusive OR".
 96983  //
 96984  // Mnemonic        : XOR
 96985  // Supported forms : (8 forms)
 96986  //
 96987  //    * XORQ imm32, rax
 96988  //    * XORQ imm8, r64
 96989  //    * XORQ imm32, r64
 96990  //    * XORQ r64, r64
 96991  //    * XORQ m64, r64
 96992  //    * XORQ imm8, m64
 96993  //    * XORQ imm32, m64
 96994  //    * XORQ r64, m64
 96995  //
 96996  func (self *Program) XORQ(v0 interface{}, v1 interface{}) *Instruction {
 96997      p := self.alloc("XORQ", 2, Operands { v0, v1 })
 96998      // XORQ imm32, rax
 96999      if isImm32(v0) && v1 == RAX {
 97000          p.domain = DomainGeneric
 97001          p.add(0, func(m *_Encoding, v []interface{}) {
 97002              m.emit(0x48)
 97003              m.emit(0x35)
 97004              m.imm4(toImmAny(v[0]))
 97005          })
 97006      }
 97007      // XORQ imm8, r64
 97008      if isImm8Ext(v0, 8) && isReg64(v1) {
 97009          p.domain = DomainGeneric
 97010          p.add(0, func(m *_Encoding, v []interface{}) {
 97011              m.emit(0x48 | hcode(v[1]))
 97012              m.emit(0x83)
 97013              m.emit(0xf0 | lcode(v[1]))
 97014              m.imm1(toImmAny(v[0]))
 97015          })
 97016      }
 97017      // XORQ imm32, r64
 97018      if isImm32Ext(v0, 8) && isReg64(v1) {
 97019          p.domain = DomainGeneric
 97020          p.add(0, func(m *_Encoding, v []interface{}) {
 97021              m.emit(0x48 | hcode(v[1]))
 97022              m.emit(0x81)
 97023              m.emit(0xf0 | lcode(v[1]))
 97024              m.imm4(toImmAny(v[0]))
 97025          })
 97026      }
 97027      // XORQ r64, r64
 97028      if isReg64(v0) && isReg64(v1) {
 97029          p.domain = DomainGeneric
 97030          p.add(0, func(m *_Encoding, v []interface{}) {
 97031              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
 97032              m.emit(0x31)
 97033              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 97034          })
 97035          p.add(0, func(m *_Encoding, v []interface{}) {
 97036              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 97037              m.emit(0x33)
 97038              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 97039          })
 97040      }
 97041      // XORQ m64, r64
 97042      if isM64(v0) && isReg64(v1) {
 97043          p.domain = DomainGeneric
 97044          p.add(0, func(m *_Encoding, v []interface{}) {
 97045              m.rexm(1, hcode(v[1]), addr(v[0]))
 97046              m.emit(0x33)
 97047              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 97048          })
 97049      }
 97050      // XORQ imm8, m64
 97051      if isImm8Ext(v0, 8) && isM64(v1) {
 97052          p.domain = DomainGeneric
 97053          p.add(0, func(m *_Encoding, v []interface{}) {
 97054              m.rexm(1, 0, addr(v[1]))
 97055              m.emit(0x83)
 97056              m.mrsd(6, addr(v[1]), 1)
 97057              m.imm1(toImmAny(v[0]))
 97058          })
 97059      }
 97060      // XORQ imm32, m64
 97061      if isImm32Ext(v0, 8) && isM64(v1) {
 97062          p.domain = DomainGeneric
 97063          p.add(0, func(m *_Encoding, v []interface{}) {
 97064              m.rexm(1, 0, addr(v[1]))
 97065              m.emit(0x81)
 97066              m.mrsd(6, addr(v[1]), 1)
 97067              m.imm4(toImmAny(v[0]))
 97068          })
 97069      }
 97070      // XORQ r64, m64
 97071      if isReg64(v0) && isM64(v1) {
 97072          p.domain = DomainGeneric
 97073          p.add(0, func(m *_Encoding, v []interface{}) {
 97074              m.rexm(1, hcode(v[0]), addr(v[1]))
 97075              m.emit(0x31)
 97076              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 97077          })
 97078      }
 97079      if p.len == 0 {
 97080          panic("invalid operands for XORQ")
 97081      }
 97082      return p
 97083  }
 97084  
 97085  // XORW performs "Logical Exclusive OR".
 97086  //
 97087  // Mnemonic        : XOR
 97088  // Supported forms : (8 forms)
 97089  //
 97090  //    * XORW imm16, ax
 97091  //    * XORW imm8, r16
 97092  //    * XORW imm16, r16
 97093  //    * XORW r16, r16
 97094  //    * XORW m16, r16
 97095  //    * XORW imm8, m16
 97096  //    * XORW imm16, m16
 97097  //    * XORW r16, m16
 97098  //
 97099  func (self *Program) XORW(v0 interface{}, v1 interface{}) *Instruction {
 97100      p := self.alloc("XORW", 2, Operands { v0, v1 })
 97101      // XORW imm16, ax
 97102      if isImm16(v0) && v1 == AX {
 97103          p.domain = DomainGeneric
 97104          p.add(0, func(m *_Encoding, v []interface{}) {
 97105              m.emit(0x66)
 97106              m.emit(0x35)
 97107              m.imm2(toImmAny(v[0]))
 97108          })
 97109      }
 97110      // XORW imm8, r16
 97111      if isImm8Ext(v0, 2) && isReg16(v1) {
 97112          p.domain = DomainGeneric
 97113          p.add(0, func(m *_Encoding, v []interface{}) {
 97114              m.emit(0x66)
 97115              m.rexo(0, v[1], false)
 97116              m.emit(0x83)
 97117              m.emit(0xf0 | lcode(v[1]))
 97118              m.imm1(toImmAny(v[0]))
 97119          })
 97120      }
 97121      // XORW imm16, r16
 97122      if isImm16(v0) && isReg16(v1) {
 97123          p.domain = DomainGeneric
 97124          p.add(0, func(m *_Encoding, v []interface{}) {
 97125              m.emit(0x66)
 97126              m.rexo(0, v[1], false)
 97127              m.emit(0x81)
 97128              m.emit(0xf0 | lcode(v[1]))
 97129              m.imm2(toImmAny(v[0]))
 97130          })
 97131      }
 97132      // XORW r16, r16
 97133      if isReg16(v0) && isReg16(v1) {
 97134          p.domain = DomainGeneric
 97135          p.add(0, func(m *_Encoding, v []interface{}) {
 97136              m.emit(0x66)
 97137              m.rexo(hcode(v[0]), v[1], false)
 97138              m.emit(0x31)
 97139              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 97140          })
 97141          p.add(0, func(m *_Encoding, v []interface{}) {
 97142              m.emit(0x66)
 97143              m.rexo(hcode(v[1]), v[0], false)
 97144              m.emit(0x33)
 97145              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 97146          })
 97147      }
 97148      // XORW m16, r16
 97149      if isM16(v0) && isReg16(v1) {
 97150          p.domain = DomainGeneric
 97151          p.add(0, func(m *_Encoding, v []interface{}) {
 97152              m.emit(0x66)
 97153              m.rexo(hcode(v[1]), addr(v[0]), false)
 97154              m.emit(0x33)
 97155              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 97156          })
 97157      }
 97158      // XORW imm8, m16
 97159      if isImm8Ext(v0, 2) && isM16(v1) {
 97160          p.domain = DomainGeneric
 97161          p.add(0, func(m *_Encoding, v []interface{}) {
 97162              m.emit(0x66)
 97163              m.rexo(0, addr(v[1]), false)
 97164              m.emit(0x83)
 97165              m.mrsd(6, addr(v[1]), 1)
 97166              m.imm1(toImmAny(v[0]))
 97167          })
 97168      }
 97169      // XORW imm16, m16
 97170      if isImm16(v0) && isM16(v1) {
 97171          p.domain = DomainGeneric
 97172          p.add(0, func(m *_Encoding, v []interface{}) {
 97173              m.emit(0x66)
 97174              m.rexo(0, addr(v[1]), false)
 97175              m.emit(0x81)
 97176              m.mrsd(6, addr(v[1]), 1)
 97177              m.imm2(toImmAny(v[0]))
 97178          })
 97179      }
 97180      // XORW r16, m16
 97181      if isReg16(v0) && isM16(v1) {
 97182          p.domain = DomainGeneric
 97183          p.add(0, func(m *_Encoding, v []interface{}) {
 97184              m.emit(0x66)
 97185              m.rexo(hcode(v[0]), addr(v[1]), false)
 97186              m.emit(0x31)
 97187              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 97188          })
 97189      }
 97190      if p.len == 0 {
 97191          panic("invalid operands for XORW")
 97192      }
 97193      return p
 97194  }