github.com/cloudwego/iasm@v0.2.0/x86_64/instructions.go (about)

     1  //
     2  // Copyright 2024 CloudWeGo Authors
     3  //
     4  // Licensed under the Apache License, Version 2.0 (the "License");
     5  // you may not use this file except in compliance with the License.
     6  // You may obtain a copy of the License at
     7  //
     8  //     http://www.apache.org/licenses/LICENSE-2.0
     9  //
    10  // Unless required by applicable law or agreed to in writing, software
    11  // distributed under the License is distributed on an "AS IS" BASIS,
    12  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  // See the License for the specific language governing permissions and
    14  // limitations under the License.
    15  //
    16  
    17  // Code generated by "mkasm_amd64.py", DO NOT EDIT.
    18  
    19  package x86_64
    20  
    21  // ADCB performs "Add with Carry".
    22  //
    23  // Mnemonic        : ADC
    24  // Supported forms : (6 forms)
    25  //
    26  //    * ADCB imm8, al
    27  //    * ADCB imm8, r8
    28  //    * ADCB r8, r8
    29  //    * ADCB m8, r8
    30  //    * ADCB imm8, m8
    31  //    * ADCB r8, m8
    32  //
    33  func (self *Program) ADCB(v0 interface{}, v1 interface{}) *Instruction {
    34      p := self.alloc("ADCB", 2, Operands { v0, v1 })
    35      // ADCB imm8, al
    36      if isImm8(v0) && v1 == AL {
    37          p.domain = DomainGeneric
    38          p.add(0, func(m *_Encoding, v []interface{}) {
    39              m.emit(0x14)
    40              m.imm1(toImmAny(v[0]))
    41          })
    42      }
    43      // ADCB imm8, r8
    44      if isImm8(v0) && isReg8(v1) {
    45          p.domain = DomainGeneric
    46          p.add(0, func(m *_Encoding, v []interface{}) {
    47              m.rexo(0, v[1], isReg8REX(v[1]))
    48              m.emit(0x80)
    49              m.emit(0xd0 | lcode(v[1]))
    50              m.imm1(toImmAny(v[0]))
    51          })
    52      }
    53      // ADCB r8, r8
    54      if isReg8(v0) && isReg8(v1) {
    55          p.domain = DomainGeneric
    56          p.add(0, func(m *_Encoding, v []interface{}) {
    57              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
    58              m.emit(0x10)
    59              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
    60          })
    61          p.add(0, func(m *_Encoding, v []interface{}) {
    62              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]) || isReg8REX(v[1]))
    63              m.emit(0x12)
    64              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
    65          })
    66      }
    67      // ADCB m8, r8
    68      if isM8(v0) && isReg8(v1) {
    69          p.domain = DomainGeneric
    70          p.add(0, func(m *_Encoding, v []interface{}) {
    71              m.rexo(hcode(v[1]), addr(v[0]), isReg8REX(v[1]))
    72              m.emit(0x12)
    73              m.mrsd(lcode(v[1]), addr(v[0]), 1)
    74          })
    75      }
    76      // ADCB imm8, m8
    77      if isImm8(v0) && isM8(v1) {
    78          p.domain = DomainGeneric
    79          p.add(0, func(m *_Encoding, v []interface{}) {
    80              m.rexo(0, addr(v[1]), false)
    81              m.emit(0x80)
    82              m.mrsd(2, addr(v[1]), 1)
    83              m.imm1(toImmAny(v[0]))
    84          })
    85      }
    86      // ADCB r8, m8
    87      if isReg8(v0) && isM8(v1) {
    88          p.domain = DomainGeneric
    89          p.add(0, func(m *_Encoding, v []interface{}) {
    90              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
    91              m.emit(0x10)
    92              m.mrsd(lcode(v[0]), addr(v[1]), 1)
    93          })
    94      }
    95      if p.len == 0 {
    96          panic("invalid operands for ADCB")
    97      }
    98      return p
    99  }
   100  
   101  // ADCL performs "Add with Carry".
   102  //
   103  // Mnemonic        : ADC
   104  // Supported forms : (8 forms)
   105  //
   106  //    * ADCL imm32, eax
   107  //    * ADCL imm8, r32
   108  //    * ADCL imm32, r32
   109  //    * ADCL r32, r32
   110  //    * ADCL m32, r32
   111  //    * ADCL imm8, m32
   112  //    * ADCL imm32, m32
   113  //    * ADCL r32, m32
   114  //
   115  func (self *Program) ADCL(v0 interface{}, v1 interface{}) *Instruction {
   116      p := self.alloc("ADCL", 2, Operands { v0, v1 })
   117      // ADCL imm32, eax
   118      if isImm32(v0) && v1 == EAX {
   119          p.domain = DomainGeneric
   120          p.add(0, func(m *_Encoding, v []interface{}) {
   121              m.emit(0x15)
   122              m.imm4(toImmAny(v[0]))
   123          })
   124      }
   125      // ADCL imm8, r32
   126      if isImm8Ext(v0, 4) && isReg32(v1) {
   127          p.domain = DomainGeneric
   128          p.add(0, func(m *_Encoding, v []interface{}) {
   129              m.rexo(0, v[1], false)
   130              m.emit(0x83)
   131              m.emit(0xd0 | lcode(v[1]))
   132              m.imm1(toImmAny(v[0]))
   133          })
   134      }
   135      // ADCL imm32, r32
   136      if isImm32(v0) && isReg32(v1) {
   137          p.domain = DomainGeneric
   138          p.add(0, func(m *_Encoding, v []interface{}) {
   139              m.rexo(0, v[1], false)
   140              m.emit(0x81)
   141              m.emit(0xd0 | lcode(v[1]))
   142              m.imm4(toImmAny(v[0]))
   143          })
   144      }
   145      // ADCL r32, r32
   146      if isReg32(v0) && isReg32(v1) {
   147          p.domain = DomainGeneric
   148          p.add(0, func(m *_Encoding, v []interface{}) {
   149              m.rexo(hcode(v[0]), v[1], false)
   150              m.emit(0x11)
   151              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
   152          })
   153          p.add(0, func(m *_Encoding, v []interface{}) {
   154              m.rexo(hcode(v[1]), v[0], false)
   155              m.emit(0x13)
   156              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   157          })
   158      }
   159      // ADCL m32, r32
   160      if isM32(v0) && isReg32(v1) {
   161          p.domain = DomainGeneric
   162          p.add(0, func(m *_Encoding, v []interface{}) {
   163              m.rexo(hcode(v[1]), addr(v[0]), false)
   164              m.emit(0x13)
   165              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   166          })
   167      }
   168      // ADCL imm8, m32
   169      if isImm8Ext(v0, 4) && isM32(v1) {
   170          p.domain = DomainGeneric
   171          p.add(0, func(m *_Encoding, v []interface{}) {
   172              m.rexo(0, addr(v[1]), false)
   173              m.emit(0x83)
   174              m.mrsd(2, addr(v[1]), 1)
   175              m.imm1(toImmAny(v[0]))
   176          })
   177      }
   178      // ADCL imm32, m32
   179      if isImm32(v0) && isM32(v1) {
   180          p.domain = DomainGeneric
   181          p.add(0, func(m *_Encoding, v []interface{}) {
   182              m.rexo(0, addr(v[1]), false)
   183              m.emit(0x81)
   184              m.mrsd(2, addr(v[1]), 1)
   185              m.imm4(toImmAny(v[0]))
   186          })
   187      }
   188      // ADCL r32, m32
   189      if isReg32(v0) && isM32(v1) {
   190          p.domain = DomainGeneric
   191          p.add(0, func(m *_Encoding, v []interface{}) {
   192              m.rexo(hcode(v[0]), addr(v[1]), false)
   193              m.emit(0x11)
   194              m.mrsd(lcode(v[0]), addr(v[1]), 1)
   195          })
   196      }
   197      if p.len == 0 {
   198          panic("invalid operands for ADCL")
   199      }
   200      return p
   201  }
   202  
   203  // ADCQ performs "Add with Carry".
   204  //
   205  // Mnemonic        : ADC
   206  // Supported forms : (8 forms)
   207  //
   208  //    * ADCQ imm32, rax
   209  //    * ADCQ imm8, r64
   210  //    * ADCQ imm32, r64
   211  //    * ADCQ r64, r64
   212  //    * ADCQ m64, r64
   213  //    * ADCQ imm8, m64
   214  //    * ADCQ imm32, m64
   215  //    * ADCQ r64, m64
   216  //
   217  func (self *Program) ADCQ(v0 interface{}, v1 interface{}) *Instruction {
   218      p := self.alloc("ADCQ", 2, Operands { v0, v1 })
   219      // ADCQ imm32, rax
   220      if isImm32(v0) && v1 == RAX {
   221          p.domain = DomainGeneric
   222          p.add(0, func(m *_Encoding, v []interface{}) {
   223              m.emit(0x48)
   224              m.emit(0x15)
   225              m.imm4(toImmAny(v[0]))
   226          })
   227      }
   228      // ADCQ imm8, r64
   229      if isImm8Ext(v0, 8) && isReg64(v1) {
   230          p.domain = DomainGeneric
   231          p.add(0, func(m *_Encoding, v []interface{}) {
   232              m.emit(0x48 | hcode(v[1]))
   233              m.emit(0x83)
   234              m.emit(0xd0 | lcode(v[1]))
   235              m.imm1(toImmAny(v[0]))
   236          })
   237      }
   238      // ADCQ imm32, r64
   239      if isImm32Ext(v0, 8) && isReg64(v1) {
   240          p.domain = DomainGeneric
   241          p.add(0, func(m *_Encoding, v []interface{}) {
   242              m.emit(0x48 | hcode(v[1]))
   243              m.emit(0x81)
   244              m.emit(0xd0 | lcode(v[1]))
   245              m.imm4(toImmAny(v[0]))
   246          })
   247      }
   248      // ADCQ r64, r64
   249      if isReg64(v0) && isReg64(v1) {
   250          p.domain = DomainGeneric
   251          p.add(0, func(m *_Encoding, v []interface{}) {
   252              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
   253              m.emit(0x11)
   254              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
   255          })
   256          p.add(0, func(m *_Encoding, v []interface{}) {
   257              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
   258              m.emit(0x13)
   259              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   260          })
   261      }
   262      // ADCQ m64, r64
   263      if isM64(v0) && isReg64(v1) {
   264          p.domain = DomainGeneric
   265          p.add(0, func(m *_Encoding, v []interface{}) {
   266              m.rexm(1, hcode(v[1]), addr(v[0]))
   267              m.emit(0x13)
   268              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   269          })
   270      }
   271      // ADCQ imm8, m64
   272      if isImm8Ext(v0, 8) && isM64(v1) {
   273          p.domain = DomainGeneric
   274          p.add(0, func(m *_Encoding, v []interface{}) {
   275              m.rexm(1, 0, addr(v[1]))
   276              m.emit(0x83)
   277              m.mrsd(2, addr(v[1]), 1)
   278              m.imm1(toImmAny(v[0]))
   279          })
   280      }
   281      // ADCQ imm32, m64
   282      if isImm32Ext(v0, 8) && isM64(v1) {
   283          p.domain = DomainGeneric
   284          p.add(0, func(m *_Encoding, v []interface{}) {
   285              m.rexm(1, 0, addr(v[1]))
   286              m.emit(0x81)
   287              m.mrsd(2, addr(v[1]), 1)
   288              m.imm4(toImmAny(v[0]))
   289          })
   290      }
   291      // ADCQ r64, m64
   292      if isReg64(v0) && isM64(v1) {
   293          p.domain = DomainGeneric
   294          p.add(0, func(m *_Encoding, v []interface{}) {
   295              m.rexm(1, hcode(v[0]), addr(v[1]))
   296              m.emit(0x11)
   297              m.mrsd(lcode(v[0]), addr(v[1]), 1)
   298          })
   299      }
   300      if p.len == 0 {
   301          panic("invalid operands for ADCQ")
   302      }
   303      return p
   304  }
   305  
   306  // ADCW performs "Add with Carry".
   307  //
   308  // Mnemonic        : ADC
   309  // Supported forms : (8 forms)
   310  //
   311  //    * ADCW imm16, ax
   312  //    * ADCW imm8, r16
   313  //    * ADCW imm16, r16
   314  //    * ADCW r16, r16
   315  //    * ADCW m16, r16
   316  //    * ADCW imm8, m16
   317  //    * ADCW imm16, m16
   318  //    * ADCW r16, m16
   319  //
   320  func (self *Program) ADCW(v0 interface{}, v1 interface{}) *Instruction {
   321      p := self.alloc("ADCW", 2, Operands { v0, v1 })
   322      // ADCW imm16, ax
   323      if isImm16(v0) && v1 == AX {
   324          p.domain = DomainGeneric
   325          p.add(0, func(m *_Encoding, v []interface{}) {
   326              m.emit(0x66)
   327              m.emit(0x15)
   328              m.imm2(toImmAny(v[0]))
   329          })
   330      }
   331      // ADCW imm8, r16
   332      if isImm8Ext(v0, 2) && isReg16(v1) {
   333          p.domain = DomainGeneric
   334          p.add(0, func(m *_Encoding, v []interface{}) {
   335              m.emit(0x66)
   336              m.rexo(0, v[1], false)
   337              m.emit(0x83)
   338              m.emit(0xd0 | lcode(v[1]))
   339              m.imm1(toImmAny(v[0]))
   340          })
   341      }
   342      // ADCW imm16, r16
   343      if isImm16(v0) && isReg16(v1) {
   344          p.domain = DomainGeneric
   345          p.add(0, func(m *_Encoding, v []interface{}) {
   346              m.emit(0x66)
   347              m.rexo(0, v[1], false)
   348              m.emit(0x81)
   349              m.emit(0xd0 | lcode(v[1]))
   350              m.imm2(toImmAny(v[0]))
   351          })
   352      }
   353      // ADCW r16, r16
   354      if isReg16(v0) && isReg16(v1) {
   355          p.domain = DomainGeneric
   356          p.add(0, func(m *_Encoding, v []interface{}) {
   357              m.emit(0x66)
   358              m.rexo(hcode(v[0]), v[1], false)
   359              m.emit(0x11)
   360              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
   361          })
   362          p.add(0, func(m *_Encoding, v []interface{}) {
   363              m.emit(0x66)
   364              m.rexo(hcode(v[1]), v[0], false)
   365              m.emit(0x13)
   366              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   367          })
   368      }
   369      // ADCW m16, r16
   370      if isM16(v0) && isReg16(v1) {
   371          p.domain = DomainGeneric
   372          p.add(0, func(m *_Encoding, v []interface{}) {
   373              m.emit(0x66)
   374              m.rexo(hcode(v[1]), addr(v[0]), false)
   375              m.emit(0x13)
   376              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   377          })
   378      }
   379      // ADCW imm8, m16
   380      if isImm8Ext(v0, 2) && isM16(v1) {
   381          p.domain = DomainGeneric
   382          p.add(0, func(m *_Encoding, v []interface{}) {
   383              m.emit(0x66)
   384              m.rexo(0, addr(v[1]), false)
   385              m.emit(0x83)
   386              m.mrsd(2, addr(v[1]), 1)
   387              m.imm1(toImmAny(v[0]))
   388          })
   389      }
   390      // ADCW imm16, m16
   391      if isImm16(v0) && isM16(v1) {
   392          p.domain = DomainGeneric
   393          p.add(0, func(m *_Encoding, v []interface{}) {
   394              m.emit(0x66)
   395              m.rexo(0, addr(v[1]), false)
   396              m.emit(0x81)
   397              m.mrsd(2, addr(v[1]), 1)
   398              m.imm2(toImmAny(v[0]))
   399          })
   400      }
   401      // ADCW r16, m16
   402      if isReg16(v0) && isM16(v1) {
   403          p.domain = DomainGeneric
   404          p.add(0, func(m *_Encoding, v []interface{}) {
   405              m.emit(0x66)
   406              m.rexo(hcode(v[0]), addr(v[1]), false)
   407              m.emit(0x11)
   408              m.mrsd(lcode(v[0]), addr(v[1]), 1)
   409          })
   410      }
   411      if p.len == 0 {
   412          panic("invalid operands for ADCW")
   413      }
   414      return p
   415  }
   416  
   417  // ADCXL performs "Unsigned Integer Addition of Two Operands with Carry Flag".
   418  //
   419  // Mnemonic        : ADCX
   420  // Supported forms : (2 forms)
   421  //
   422  //    * ADCXL r32, r32    [ADX]
   423  //    * ADCXL m32, r32    [ADX]
   424  //
   425  func (self *Program) ADCXL(v0 interface{}, v1 interface{}) *Instruction {
   426      p := self.alloc("ADCXL", 2, Operands { v0, v1 })
   427      // ADCXL r32, r32
   428      if isReg32(v0) && isReg32(v1) {
   429          self.require(ISA_ADX)
   430          p.domain = DomainGeneric
   431          p.add(0, func(m *_Encoding, v []interface{}) {
   432              m.emit(0x66)
   433              m.rexo(hcode(v[1]), v[0], false)
   434              m.emit(0x0f)
   435              m.emit(0x38)
   436              m.emit(0xf6)
   437              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   438          })
   439      }
   440      // ADCXL m32, r32
   441      if isM32(v0) && isReg32(v1) {
   442          self.require(ISA_ADX)
   443          p.domain = DomainGeneric
   444          p.add(0, func(m *_Encoding, v []interface{}) {
   445              m.emit(0x66)
   446              m.rexo(hcode(v[1]), addr(v[0]), false)
   447              m.emit(0x0f)
   448              m.emit(0x38)
   449              m.emit(0xf6)
   450              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   451          })
   452      }
   453      if p.len == 0 {
   454          panic("invalid operands for ADCXL")
   455      }
   456      return p
   457  }
   458  
   459  // ADCXQ performs "Unsigned Integer Addition of Two Operands with Carry Flag".
   460  //
   461  // Mnemonic        : ADCX
   462  // Supported forms : (2 forms)
   463  //
   464  //    * ADCXQ r64, r64    [ADX]
   465  //    * ADCXQ m64, r64    [ADX]
   466  //
   467  func (self *Program) ADCXQ(v0 interface{}, v1 interface{}) *Instruction {
   468      p := self.alloc("ADCXQ", 2, Operands { v0, v1 })
   469      // ADCXQ r64, r64
   470      if isReg64(v0) && isReg64(v1) {
   471          self.require(ISA_ADX)
   472          p.domain = DomainGeneric
   473          p.add(0, func(m *_Encoding, v []interface{}) {
   474              m.emit(0x66)
   475              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
   476              m.emit(0x0f)
   477              m.emit(0x38)
   478              m.emit(0xf6)
   479              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   480          })
   481      }
   482      // ADCXQ m64, r64
   483      if isM64(v0) && isReg64(v1) {
   484          self.require(ISA_ADX)
   485          p.domain = DomainGeneric
   486          p.add(0, func(m *_Encoding, v []interface{}) {
   487              m.emit(0x66)
   488              m.rexm(1, hcode(v[1]), addr(v[0]))
   489              m.emit(0x0f)
   490              m.emit(0x38)
   491              m.emit(0xf6)
   492              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   493          })
   494      }
   495      if p.len == 0 {
   496          panic("invalid operands for ADCXQ")
   497      }
   498      return p
   499  }
   500  
   501  // ADDB performs "Add".
   502  //
   503  // Mnemonic        : ADD
   504  // Supported forms : (6 forms)
   505  //
   506  //    * ADDB imm8, al
   507  //    * ADDB imm8, r8
   508  //    * ADDB r8, r8
   509  //    * ADDB m8, r8
   510  //    * ADDB imm8, m8
   511  //    * ADDB r8, m8
   512  //
   513  func (self *Program) ADDB(v0 interface{}, v1 interface{}) *Instruction {
   514      p := self.alloc("ADDB", 2, Operands { v0, v1 })
   515      // ADDB imm8, al
   516      if isImm8(v0) && v1 == AL {
   517          p.domain = DomainGeneric
   518          p.add(0, func(m *_Encoding, v []interface{}) {
   519              m.emit(0x04)
   520              m.imm1(toImmAny(v[0]))
   521          })
   522      }
   523      // ADDB imm8, r8
   524      if isImm8(v0) && isReg8(v1) {
   525          p.domain = DomainGeneric
   526          p.add(0, func(m *_Encoding, v []interface{}) {
   527              m.rexo(0, v[1], isReg8REX(v[1]))
   528              m.emit(0x80)
   529              m.emit(0xc0 | lcode(v[1]))
   530              m.imm1(toImmAny(v[0]))
   531          })
   532      }
   533      // ADDB r8, r8
   534      if isReg8(v0) && isReg8(v1) {
   535          p.domain = DomainGeneric
   536          p.add(0, func(m *_Encoding, v []interface{}) {
   537              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
   538              m.emit(0x00)
   539              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
   540          })
   541          p.add(0, func(m *_Encoding, v []interface{}) {
   542              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]) || isReg8REX(v[1]))
   543              m.emit(0x02)
   544              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   545          })
   546      }
   547      // ADDB m8, r8
   548      if isM8(v0) && isReg8(v1) {
   549          p.domain = DomainGeneric
   550          p.add(0, func(m *_Encoding, v []interface{}) {
   551              m.rexo(hcode(v[1]), addr(v[0]), isReg8REX(v[1]))
   552              m.emit(0x02)
   553              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   554          })
   555      }
   556      // ADDB imm8, m8
   557      if isImm8(v0) && isM8(v1) {
   558          p.domain = DomainGeneric
   559          p.add(0, func(m *_Encoding, v []interface{}) {
   560              m.rexo(0, addr(v[1]), false)
   561              m.emit(0x80)
   562              m.mrsd(0, addr(v[1]), 1)
   563              m.imm1(toImmAny(v[0]))
   564          })
   565      }
   566      // ADDB r8, m8
   567      if isReg8(v0) && isM8(v1) {
   568          p.domain = DomainGeneric
   569          p.add(0, func(m *_Encoding, v []interface{}) {
   570              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
   571              m.emit(0x00)
   572              m.mrsd(lcode(v[0]), addr(v[1]), 1)
   573          })
   574      }
   575      if p.len == 0 {
   576          panic("invalid operands for ADDB")
   577      }
   578      return p
   579  }
   580  
   581  // ADDL performs "Add".
   582  //
   583  // Mnemonic        : ADD
   584  // Supported forms : (8 forms)
   585  //
   586  //    * ADDL imm32, eax
   587  //    * ADDL imm8, r32
   588  //    * ADDL imm32, r32
   589  //    * ADDL r32, r32
   590  //    * ADDL m32, r32
   591  //    * ADDL imm8, m32
   592  //    * ADDL imm32, m32
   593  //    * ADDL r32, m32
   594  //
   595  func (self *Program) ADDL(v0 interface{}, v1 interface{}) *Instruction {
   596      p := self.alloc("ADDL", 2, Operands { v0, v1 })
   597      // ADDL imm32, eax
   598      if isImm32(v0) && v1 == EAX {
   599          p.domain = DomainGeneric
   600          p.add(0, func(m *_Encoding, v []interface{}) {
   601              m.emit(0x05)
   602              m.imm4(toImmAny(v[0]))
   603          })
   604      }
   605      // ADDL imm8, r32
   606      if isImm8Ext(v0, 4) && isReg32(v1) {
   607          p.domain = DomainGeneric
   608          p.add(0, func(m *_Encoding, v []interface{}) {
   609              m.rexo(0, v[1], false)
   610              m.emit(0x83)
   611              m.emit(0xc0 | lcode(v[1]))
   612              m.imm1(toImmAny(v[0]))
   613          })
   614      }
   615      // ADDL imm32, r32
   616      if isImm32(v0) && isReg32(v1) {
   617          p.domain = DomainGeneric
   618          p.add(0, func(m *_Encoding, v []interface{}) {
   619              m.rexo(0, v[1], false)
   620              m.emit(0x81)
   621              m.emit(0xc0 | lcode(v[1]))
   622              m.imm4(toImmAny(v[0]))
   623          })
   624      }
   625      // ADDL r32, r32
   626      if isReg32(v0) && isReg32(v1) {
   627          p.domain = DomainGeneric
   628          p.add(0, func(m *_Encoding, v []interface{}) {
   629              m.rexo(hcode(v[0]), v[1], false)
   630              m.emit(0x01)
   631              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
   632          })
   633          p.add(0, func(m *_Encoding, v []interface{}) {
   634              m.rexo(hcode(v[1]), v[0], false)
   635              m.emit(0x03)
   636              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   637          })
   638      }
   639      // ADDL m32, r32
   640      if isM32(v0) && isReg32(v1) {
   641          p.domain = DomainGeneric
   642          p.add(0, func(m *_Encoding, v []interface{}) {
   643              m.rexo(hcode(v[1]), addr(v[0]), false)
   644              m.emit(0x03)
   645              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   646          })
   647      }
   648      // ADDL imm8, m32
   649      if isImm8Ext(v0, 4) && isM32(v1) {
   650          p.domain = DomainGeneric
   651          p.add(0, func(m *_Encoding, v []interface{}) {
   652              m.rexo(0, addr(v[1]), false)
   653              m.emit(0x83)
   654              m.mrsd(0, addr(v[1]), 1)
   655              m.imm1(toImmAny(v[0]))
   656          })
   657      }
   658      // ADDL imm32, m32
   659      if isImm32(v0) && isM32(v1) {
   660          p.domain = DomainGeneric
   661          p.add(0, func(m *_Encoding, v []interface{}) {
   662              m.rexo(0, addr(v[1]), false)
   663              m.emit(0x81)
   664              m.mrsd(0, addr(v[1]), 1)
   665              m.imm4(toImmAny(v[0]))
   666          })
   667      }
   668      // ADDL r32, m32
   669      if isReg32(v0) && isM32(v1) {
   670          p.domain = DomainGeneric
   671          p.add(0, func(m *_Encoding, v []interface{}) {
   672              m.rexo(hcode(v[0]), addr(v[1]), false)
   673              m.emit(0x01)
   674              m.mrsd(lcode(v[0]), addr(v[1]), 1)
   675          })
   676      }
   677      if p.len == 0 {
   678          panic("invalid operands for ADDL")
   679      }
   680      return p
   681  }
   682  
   683  // ADDPD performs "Add Packed Double-Precision Floating-Point Values".
   684  //
   685  // Mnemonic        : ADDPD
   686  // Supported forms : (2 forms)
   687  //
   688  //    * ADDPD xmm, xmm     [SSE2]
   689  //    * ADDPD m128, xmm    [SSE2]
   690  //
   691  func (self *Program) ADDPD(v0 interface{}, v1 interface{}) *Instruction {
   692      p := self.alloc("ADDPD", 2, Operands { v0, v1 })
   693      // ADDPD xmm, xmm
   694      if isXMM(v0) && isXMM(v1) {
   695          self.require(ISA_SSE2)
   696          p.domain = DomainMMXSSE
   697          p.add(0, func(m *_Encoding, v []interface{}) {
   698              m.emit(0x66)
   699              m.rexo(hcode(v[1]), v[0], false)
   700              m.emit(0x0f)
   701              m.emit(0x58)
   702              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   703          })
   704      }
   705      // ADDPD m128, xmm
   706      if isM128(v0) && isXMM(v1) {
   707          self.require(ISA_SSE2)
   708          p.domain = DomainMMXSSE
   709          p.add(0, func(m *_Encoding, v []interface{}) {
   710              m.emit(0x66)
   711              m.rexo(hcode(v[1]), addr(v[0]), false)
   712              m.emit(0x0f)
   713              m.emit(0x58)
   714              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   715          })
   716      }
   717      if p.len == 0 {
   718          panic("invalid operands for ADDPD")
   719      }
   720      return p
   721  }
   722  
   723  // ADDPS performs "Add Packed Single-Precision Floating-Point Values".
   724  //
   725  // Mnemonic        : ADDPS
   726  // Supported forms : (2 forms)
   727  //
   728  //    * ADDPS xmm, xmm     [SSE]
   729  //    * ADDPS m128, xmm    [SSE]
   730  //
   731  func (self *Program) ADDPS(v0 interface{}, v1 interface{}) *Instruction {
   732      p := self.alloc("ADDPS", 2, Operands { v0, v1 })
   733      // ADDPS xmm, xmm
   734      if isXMM(v0) && isXMM(v1) {
   735          self.require(ISA_SSE)
   736          p.domain = DomainMMXSSE
   737          p.add(0, func(m *_Encoding, v []interface{}) {
   738              m.rexo(hcode(v[1]), v[0], false)
   739              m.emit(0x0f)
   740              m.emit(0x58)
   741              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   742          })
   743      }
   744      // ADDPS m128, xmm
   745      if isM128(v0) && isXMM(v1) {
   746          self.require(ISA_SSE)
   747          p.domain = DomainMMXSSE
   748          p.add(0, func(m *_Encoding, v []interface{}) {
   749              m.rexo(hcode(v[1]), addr(v[0]), false)
   750              m.emit(0x0f)
   751              m.emit(0x58)
   752              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   753          })
   754      }
   755      if p.len == 0 {
   756          panic("invalid operands for ADDPS")
   757      }
   758      return p
   759  }
   760  
   761  // ADDQ performs "Add".
   762  //
   763  // Mnemonic        : ADD
   764  // Supported forms : (8 forms)
   765  //
   766  //    * ADDQ imm32, rax
   767  //    * ADDQ imm8, r64
   768  //    * ADDQ imm32, r64
   769  //    * ADDQ r64, r64
   770  //    * ADDQ m64, r64
   771  //    * ADDQ imm8, m64
   772  //    * ADDQ imm32, m64
   773  //    * ADDQ r64, m64
   774  //
   775  func (self *Program) ADDQ(v0 interface{}, v1 interface{}) *Instruction {
   776      p := self.alloc("ADDQ", 2, Operands { v0, v1 })
   777      // ADDQ imm32, rax
   778      if isImm32(v0) && v1 == RAX {
   779          p.domain = DomainGeneric
   780          p.add(0, func(m *_Encoding, v []interface{}) {
   781              m.emit(0x48)
   782              m.emit(0x05)
   783              m.imm4(toImmAny(v[0]))
   784          })
   785      }
   786      // ADDQ imm8, r64
   787      if isImm8Ext(v0, 8) && isReg64(v1) {
   788          p.domain = DomainGeneric
   789          p.add(0, func(m *_Encoding, v []interface{}) {
   790              m.emit(0x48 | hcode(v[1]))
   791              m.emit(0x83)
   792              m.emit(0xc0 | lcode(v[1]))
   793              m.imm1(toImmAny(v[0]))
   794          })
   795      }
   796      // ADDQ imm32, r64
   797      if isImm32Ext(v0, 8) && isReg64(v1) {
   798          p.domain = DomainGeneric
   799          p.add(0, func(m *_Encoding, v []interface{}) {
   800              m.emit(0x48 | hcode(v[1]))
   801              m.emit(0x81)
   802              m.emit(0xc0 | lcode(v[1]))
   803              m.imm4(toImmAny(v[0]))
   804          })
   805      }
   806      // ADDQ r64, r64
   807      if isReg64(v0) && isReg64(v1) {
   808          p.domain = DomainGeneric
   809          p.add(0, func(m *_Encoding, v []interface{}) {
   810              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
   811              m.emit(0x01)
   812              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
   813          })
   814          p.add(0, func(m *_Encoding, v []interface{}) {
   815              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
   816              m.emit(0x03)
   817              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   818          })
   819      }
   820      // ADDQ m64, r64
   821      if isM64(v0) && isReg64(v1) {
   822          p.domain = DomainGeneric
   823          p.add(0, func(m *_Encoding, v []interface{}) {
   824              m.rexm(1, hcode(v[1]), addr(v[0]))
   825              m.emit(0x03)
   826              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   827          })
   828      }
   829      // ADDQ imm8, m64
   830      if isImm8Ext(v0, 8) && isM64(v1) {
   831          p.domain = DomainGeneric
   832          p.add(0, func(m *_Encoding, v []interface{}) {
   833              m.rexm(1, 0, addr(v[1]))
   834              m.emit(0x83)
   835              m.mrsd(0, addr(v[1]), 1)
   836              m.imm1(toImmAny(v[0]))
   837          })
   838      }
   839      // ADDQ imm32, m64
   840      if isImm32Ext(v0, 8) && isM64(v1) {
   841          p.domain = DomainGeneric
   842          p.add(0, func(m *_Encoding, v []interface{}) {
   843              m.rexm(1, 0, addr(v[1]))
   844              m.emit(0x81)
   845              m.mrsd(0, addr(v[1]), 1)
   846              m.imm4(toImmAny(v[0]))
   847          })
   848      }
   849      // ADDQ r64, m64
   850      if isReg64(v0) && isM64(v1) {
   851          p.domain = DomainGeneric
   852          p.add(0, func(m *_Encoding, v []interface{}) {
   853              m.rexm(1, hcode(v[0]), addr(v[1]))
   854              m.emit(0x01)
   855              m.mrsd(lcode(v[0]), addr(v[1]), 1)
   856          })
   857      }
   858      if p.len == 0 {
   859          panic("invalid operands for ADDQ")
   860      }
   861      return p
   862  }
   863  
   864  // ADDSD performs "Add Scalar Double-Precision Floating-Point Values".
   865  //
   866  // Mnemonic        : ADDSD
   867  // Supported forms : (2 forms)
   868  //
   869  //    * ADDSD xmm, xmm    [SSE2]
   870  //    * ADDSD m64, xmm    [SSE2]
   871  //
   872  func (self *Program) ADDSD(v0 interface{}, v1 interface{}) *Instruction {
   873      p := self.alloc("ADDSD", 2, Operands { v0, v1 })
   874      // ADDSD xmm, xmm
   875      if isXMM(v0) && isXMM(v1) {
   876          self.require(ISA_SSE2)
   877          p.domain = DomainMMXSSE
   878          p.add(0, func(m *_Encoding, v []interface{}) {
   879              m.emit(0xf2)
   880              m.rexo(hcode(v[1]), v[0], false)
   881              m.emit(0x0f)
   882              m.emit(0x58)
   883              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   884          })
   885      }
   886      // ADDSD m64, xmm
   887      if isM64(v0) && isXMM(v1) {
   888          self.require(ISA_SSE2)
   889          p.domain = DomainMMXSSE
   890          p.add(0, func(m *_Encoding, v []interface{}) {
   891              m.emit(0xf2)
   892              m.rexo(hcode(v[1]), addr(v[0]), false)
   893              m.emit(0x0f)
   894              m.emit(0x58)
   895              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   896          })
   897      }
   898      if p.len == 0 {
   899          panic("invalid operands for ADDSD")
   900      }
   901      return p
   902  }
   903  
   904  // ADDSS performs "Add Scalar Single-Precision Floating-Point Values".
   905  //
   906  // Mnemonic        : ADDSS
   907  // Supported forms : (2 forms)
   908  //
   909  //    * ADDSS xmm, xmm    [SSE]
   910  //    * ADDSS m32, xmm    [SSE]
   911  //
   912  func (self *Program) ADDSS(v0 interface{}, v1 interface{}) *Instruction {
   913      p := self.alloc("ADDSS", 2, Operands { v0, v1 })
   914      // ADDSS xmm, xmm
   915      if isXMM(v0) && isXMM(v1) {
   916          self.require(ISA_SSE)
   917          p.domain = DomainMMXSSE
   918          p.add(0, func(m *_Encoding, v []interface{}) {
   919              m.emit(0xf3)
   920              m.rexo(hcode(v[1]), v[0], false)
   921              m.emit(0x0f)
   922              m.emit(0x58)
   923              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   924          })
   925      }
   926      // ADDSS m32, xmm
   927      if isM32(v0) && isXMM(v1) {
   928          self.require(ISA_SSE)
   929          p.domain = DomainMMXSSE
   930          p.add(0, func(m *_Encoding, v []interface{}) {
   931              m.emit(0xf3)
   932              m.rexo(hcode(v[1]), addr(v[0]), false)
   933              m.emit(0x0f)
   934              m.emit(0x58)
   935              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   936          })
   937      }
   938      if p.len == 0 {
   939          panic("invalid operands for ADDSS")
   940      }
   941      return p
   942  }
   943  
   944  // ADDSUBPD performs "Packed Double-FP Add/Subtract".
   945  //
   946  // Mnemonic        : ADDSUBPD
   947  // Supported forms : (2 forms)
   948  //
   949  //    * ADDSUBPD xmm, xmm     [SSE3]
   950  //    * ADDSUBPD m128, xmm    [SSE3]
   951  //
   952  func (self *Program) ADDSUBPD(v0 interface{}, v1 interface{}) *Instruction {
   953      p := self.alloc("ADDSUBPD", 2, Operands { v0, v1 })
   954      // ADDSUBPD xmm, xmm
   955      if isXMM(v0) && isXMM(v1) {
   956          self.require(ISA_SSE3)
   957          p.domain = DomainMMXSSE
   958          p.add(0, func(m *_Encoding, v []interface{}) {
   959              m.emit(0x66)
   960              m.rexo(hcode(v[1]), v[0], false)
   961              m.emit(0x0f)
   962              m.emit(0xd0)
   963              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
   964          })
   965      }
   966      // ADDSUBPD m128, xmm
   967      if isM128(v0) && isXMM(v1) {
   968          self.require(ISA_SSE3)
   969          p.domain = DomainMMXSSE
   970          p.add(0, func(m *_Encoding, v []interface{}) {
   971              m.emit(0x66)
   972              m.rexo(hcode(v[1]), addr(v[0]), false)
   973              m.emit(0x0f)
   974              m.emit(0xd0)
   975              m.mrsd(lcode(v[1]), addr(v[0]), 1)
   976          })
   977      }
   978      if p.len == 0 {
   979          panic("invalid operands for ADDSUBPD")
   980      }
   981      return p
   982  }
   983  
   984  // ADDSUBPS performs "Packed Single-FP Add/Subtract".
   985  //
   986  // Mnemonic        : ADDSUBPS
   987  // Supported forms : (2 forms)
   988  //
   989  //    * ADDSUBPS xmm, xmm     [SSE3]
   990  //    * ADDSUBPS m128, xmm    [SSE3]
   991  //
   992  func (self *Program) ADDSUBPS(v0 interface{}, v1 interface{}) *Instruction {
   993      p := self.alloc("ADDSUBPS", 2, Operands { v0, v1 })
   994      // ADDSUBPS xmm, xmm
   995      if isXMM(v0) && isXMM(v1) {
   996          self.require(ISA_SSE3)
   997          p.domain = DomainMMXSSE
   998          p.add(0, func(m *_Encoding, v []interface{}) {
   999              m.emit(0xf2)
  1000              m.rexo(hcode(v[1]), v[0], false)
  1001              m.emit(0x0f)
  1002              m.emit(0xd0)
  1003              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1004          })
  1005      }
  1006      // ADDSUBPS m128, xmm
  1007      if isM128(v0) && isXMM(v1) {
  1008          self.require(ISA_SSE3)
  1009          p.domain = DomainMMXSSE
  1010          p.add(0, func(m *_Encoding, v []interface{}) {
  1011              m.emit(0xf2)
  1012              m.rexo(hcode(v[1]), addr(v[0]), false)
  1013              m.emit(0x0f)
  1014              m.emit(0xd0)
  1015              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1016          })
  1017      }
  1018      if p.len == 0 {
  1019          panic("invalid operands for ADDSUBPS")
  1020      }
  1021      return p
  1022  }
  1023  
  1024  // ADDW performs "Add".
  1025  //
  1026  // Mnemonic        : ADD
  1027  // Supported forms : (8 forms)
  1028  //
  1029  //    * ADDW imm16, ax
  1030  //    * ADDW imm8, r16
  1031  //    * ADDW imm16, r16
  1032  //    * ADDW r16, r16
  1033  //    * ADDW m16, r16
  1034  //    * ADDW imm8, m16
  1035  //    * ADDW imm16, m16
  1036  //    * ADDW r16, m16
  1037  //
  1038  func (self *Program) ADDW(v0 interface{}, v1 interface{}) *Instruction {
  1039      p := self.alloc("ADDW", 2, Operands { v0, v1 })
  1040      // ADDW imm16, ax
  1041      if isImm16(v0) && v1 == AX {
  1042          p.domain = DomainGeneric
  1043          p.add(0, func(m *_Encoding, v []interface{}) {
  1044              m.emit(0x66)
  1045              m.emit(0x05)
  1046              m.imm2(toImmAny(v[0]))
  1047          })
  1048      }
  1049      // ADDW imm8, r16
  1050      if isImm8Ext(v0, 2) && isReg16(v1) {
  1051          p.domain = DomainGeneric
  1052          p.add(0, func(m *_Encoding, v []interface{}) {
  1053              m.emit(0x66)
  1054              m.rexo(0, v[1], false)
  1055              m.emit(0x83)
  1056              m.emit(0xc0 | lcode(v[1]))
  1057              m.imm1(toImmAny(v[0]))
  1058          })
  1059      }
  1060      // ADDW imm16, r16
  1061      if isImm16(v0) && isReg16(v1) {
  1062          p.domain = DomainGeneric
  1063          p.add(0, func(m *_Encoding, v []interface{}) {
  1064              m.emit(0x66)
  1065              m.rexo(0, v[1], false)
  1066              m.emit(0x81)
  1067              m.emit(0xc0 | lcode(v[1]))
  1068              m.imm2(toImmAny(v[0]))
  1069          })
  1070      }
  1071      // ADDW r16, r16
  1072      if isReg16(v0) && isReg16(v1) {
  1073          p.domain = DomainGeneric
  1074          p.add(0, func(m *_Encoding, v []interface{}) {
  1075              m.emit(0x66)
  1076              m.rexo(hcode(v[0]), v[1], false)
  1077              m.emit(0x01)
  1078              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  1079          })
  1080          p.add(0, func(m *_Encoding, v []interface{}) {
  1081              m.emit(0x66)
  1082              m.rexo(hcode(v[1]), v[0], false)
  1083              m.emit(0x03)
  1084              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1085          })
  1086      }
  1087      // ADDW m16, r16
  1088      if isM16(v0) && isReg16(v1) {
  1089          p.domain = DomainGeneric
  1090          p.add(0, func(m *_Encoding, v []interface{}) {
  1091              m.emit(0x66)
  1092              m.rexo(hcode(v[1]), addr(v[0]), false)
  1093              m.emit(0x03)
  1094              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1095          })
  1096      }
  1097      // ADDW imm8, m16
  1098      if isImm8Ext(v0, 2) && isM16(v1) {
  1099          p.domain = DomainGeneric
  1100          p.add(0, func(m *_Encoding, v []interface{}) {
  1101              m.emit(0x66)
  1102              m.rexo(0, addr(v[1]), false)
  1103              m.emit(0x83)
  1104              m.mrsd(0, addr(v[1]), 1)
  1105              m.imm1(toImmAny(v[0]))
  1106          })
  1107      }
  1108      // ADDW imm16, m16
  1109      if isImm16(v0) && isM16(v1) {
  1110          p.domain = DomainGeneric
  1111          p.add(0, func(m *_Encoding, v []interface{}) {
  1112              m.emit(0x66)
  1113              m.rexo(0, addr(v[1]), false)
  1114              m.emit(0x81)
  1115              m.mrsd(0, addr(v[1]), 1)
  1116              m.imm2(toImmAny(v[0]))
  1117          })
  1118      }
  1119      // ADDW r16, m16
  1120      if isReg16(v0) && isM16(v1) {
  1121          p.domain = DomainGeneric
  1122          p.add(0, func(m *_Encoding, v []interface{}) {
  1123              m.emit(0x66)
  1124              m.rexo(hcode(v[0]), addr(v[1]), false)
  1125              m.emit(0x01)
  1126              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  1127          })
  1128      }
  1129      if p.len == 0 {
  1130          panic("invalid operands for ADDW")
  1131      }
  1132      return p
  1133  }
  1134  
  1135  // ADOXL performs "Unsigned Integer Addition of Two Operands with Overflow Flag".
  1136  //
  1137  // Mnemonic        : ADOX
  1138  // Supported forms : (2 forms)
  1139  //
  1140  //    * ADOXL r32, r32    [ADX]
  1141  //    * ADOXL m32, r32    [ADX]
  1142  //
  1143  func (self *Program) ADOXL(v0 interface{}, v1 interface{}) *Instruction {
  1144      p := self.alloc("ADOXL", 2, Operands { v0, v1 })
  1145      // ADOXL r32, r32
  1146      if isReg32(v0) && isReg32(v1) {
  1147          self.require(ISA_ADX)
  1148          p.domain = DomainGeneric
  1149          p.add(0, func(m *_Encoding, v []interface{}) {
  1150              m.emit(0xf3)
  1151              m.rexo(hcode(v[1]), v[0], false)
  1152              m.emit(0x0f)
  1153              m.emit(0x38)
  1154              m.emit(0xf6)
  1155              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1156          })
  1157      }
  1158      // ADOXL m32, r32
  1159      if isM32(v0) && isReg32(v1) {
  1160          self.require(ISA_ADX)
  1161          p.domain = DomainGeneric
  1162          p.add(0, func(m *_Encoding, v []interface{}) {
  1163              m.emit(0xf3)
  1164              m.rexo(hcode(v[1]), addr(v[0]), false)
  1165              m.emit(0x0f)
  1166              m.emit(0x38)
  1167              m.emit(0xf6)
  1168              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1169          })
  1170      }
  1171      if p.len == 0 {
  1172          panic("invalid operands for ADOXL")
  1173      }
  1174      return p
  1175  }
  1176  
  1177  // ADOXQ performs "Unsigned Integer Addition of Two Operands with Overflow Flag".
  1178  //
  1179  // Mnemonic        : ADOX
  1180  // Supported forms : (2 forms)
  1181  //
  1182  //    * ADOXQ r64, r64    [ADX]
  1183  //    * ADOXQ m64, r64    [ADX]
  1184  //
  1185  func (self *Program) ADOXQ(v0 interface{}, v1 interface{}) *Instruction {
  1186      p := self.alloc("ADOXQ", 2, Operands { v0, v1 })
  1187      // ADOXQ r64, r64
  1188      if isReg64(v0) && isReg64(v1) {
  1189          self.require(ISA_ADX)
  1190          p.domain = DomainGeneric
  1191          p.add(0, func(m *_Encoding, v []interface{}) {
  1192              m.emit(0xf3)
  1193              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  1194              m.emit(0x0f)
  1195              m.emit(0x38)
  1196              m.emit(0xf6)
  1197              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1198          })
  1199      }
  1200      // ADOXQ m64, r64
  1201      if isM64(v0) && isReg64(v1) {
  1202          self.require(ISA_ADX)
  1203          p.domain = DomainGeneric
  1204          p.add(0, func(m *_Encoding, v []interface{}) {
  1205              m.emit(0xf3)
  1206              m.rexm(1, hcode(v[1]), addr(v[0]))
  1207              m.emit(0x0f)
  1208              m.emit(0x38)
  1209              m.emit(0xf6)
  1210              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1211          })
  1212      }
  1213      if p.len == 0 {
  1214          panic("invalid operands for ADOXQ")
  1215      }
  1216      return p
  1217  }
  1218  
  1219  // AESDEC performs "Perform One Round of an AES Decryption Flow".
  1220  //
  1221  // Mnemonic        : AESDEC
  1222  // Supported forms : (2 forms)
  1223  //
  1224  //    * AESDEC xmm, xmm     [AES]
  1225  //    * AESDEC m128, xmm    [AES]
  1226  //
  1227  func (self *Program) AESDEC(v0 interface{}, v1 interface{}) *Instruction {
  1228      p := self.alloc("AESDEC", 2, Operands { v0, v1 })
  1229      // AESDEC xmm, xmm
  1230      if isXMM(v0) && isXMM(v1) {
  1231          self.require(ISA_AES)
  1232          p.domain = DomainCrypto
  1233          p.add(0, func(m *_Encoding, v []interface{}) {
  1234              m.emit(0x66)
  1235              m.rexo(hcode(v[1]), v[0], false)
  1236              m.emit(0x0f)
  1237              m.emit(0x38)
  1238              m.emit(0xde)
  1239              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1240          })
  1241      }
  1242      // AESDEC m128, xmm
  1243      if isM128(v0) && isXMM(v1) {
  1244          self.require(ISA_AES)
  1245          p.domain = DomainCrypto
  1246          p.add(0, func(m *_Encoding, v []interface{}) {
  1247              m.emit(0x66)
  1248              m.rexo(hcode(v[1]), addr(v[0]), false)
  1249              m.emit(0x0f)
  1250              m.emit(0x38)
  1251              m.emit(0xde)
  1252              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1253          })
  1254      }
  1255      if p.len == 0 {
  1256          panic("invalid operands for AESDEC")
  1257      }
  1258      return p
  1259  }
  1260  
  1261  // AESDECLAST performs "Perform Last Round of an AES Decryption Flow".
  1262  //
  1263  // Mnemonic        : AESDECLAST
  1264  // Supported forms : (2 forms)
  1265  //
  1266  //    * AESDECLAST xmm, xmm     [AES]
  1267  //    * AESDECLAST m128, xmm    [AES]
  1268  //
  1269  func (self *Program) AESDECLAST(v0 interface{}, v1 interface{}) *Instruction {
  1270      p := self.alloc("AESDECLAST", 2, Operands { v0, v1 })
  1271      // AESDECLAST xmm, xmm
  1272      if isXMM(v0) && isXMM(v1) {
  1273          self.require(ISA_AES)
  1274          p.domain = DomainCrypto
  1275          p.add(0, func(m *_Encoding, v []interface{}) {
  1276              m.emit(0x66)
  1277              m.rexo(hcode(v[1]), v[0], false)
  1278              m.emit(0x0f)
  1279              m.emit(0x38)
  1280              m.emit(0xdf)
  1281              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1282          })
  1283      }
  1284      // AESDECLAST m128, xmm
  1285      if isM128(v0) && isXMM(v1) {
  1286          self.require(ISA_AES)
  1287          p.domain = DomainCrypto
  1288          p.add(0, func(m *_Encoding, v []interface{}) {
  1289              m.emit(0x66)
  1290              m.rexo(hcode(v[1]), addr(v[0]), false)
  1291              m.emit(0x0f)
  1292              m.emit(0x38)
  1293              m.emit(0xdf)
  1294              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1295          })
  1296      }
  1297      if p.len == 0 {
  1298          panic("invalid operands for AESDECLAST")
  1299      }
  1300      return p
  1301  }
  1302  
  1303  // AESENC performs "Perform One Round of an AES Encryption Flow".
  1304  //
  1305  // Mnemonic        : AESENC
  1306  // Supported forms : (2 forms)
  1307  //
  1308  //    * AESENC xmm, xmm     [AES]
  1309  //    * AESENC m128, xmm    [AES]
  1310  //
  1311  func (self *Program) AESENC(v0 interface{}, v1 interface{}) *Instruction {
  1312      p := self.alloc("AESENC", 2, Operands { v0, v1 })
  1313      // AESENC xmm, xmm
  1314      if isXMM(v0) && isXMM(v1) {
  1315          self.require(ISA_AES)
  1316          p.domain = DomainCrypto
  1317          p.add(0, func(m *_Encoding, v []interface{}) {
  1318              m.emit(0x66)
  1319              m.rexo(hcode(v[1]), v[0], false)
  1320              m.emit(0x0f)
  1321              m.emit(0x38)
  1322              m.emit(0xdc)
  1323              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1324          })
  1325      }
  1326      // AESENC m128, xmm
  1327      if isM128(v0) && isXMM(v1) {
  1328          self.require(ISA_AES)
  1329          p.domain = DomainCrypto
  1330          p.add(0, func(m *_Encoding, v []interface{}) {
  1331              m.emit(0x66)
  1332              m.rexo(hcode(v[1]), addr(v[0]), false)
  1333              m.emit(0x0f)
  1334              m.emit(0x38)
  1335              m.emit(0xdc)
  1336              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1337          })
  1338      }
  1339      if p.len == 0 {
  1340          panic("invalid operands for AESENC")
  1341      }
  1342      return p
  1343  }
  1344  
  1345  // AESENCLAST performs "Perform Last Round of an AES Encryption Flow".
  1346  //
  1347  // Mnemonic        : AESENCLAST
  1348  // Supported forms : (2 forms)
  1349  //
  1350  //    * AESENCLAST xmm, xmm     [AES]
  1351  //    * AESENCLAST m128, xmm    [AES]
  1352  //
  1353  func (self *Program) AESENCLAST(v0 interface{}, v1 interface{}) *Instruction {
  1354      p := self.alloc("AESENCLAST", 2, Operands { v0, v1 })
  1355      // AESENCLAST xmm, xmm
  1356      if isXMM(v0) && isXMM(v1) {
  1357          self.require(ISA_AES)
  1358          p.domain = DomainCrypto
  1359          p.add(0, func(m *_Encoding, v []interface{}) {
  1360              m.emit(0x66)
  1361              m.rexo(hcode(v[1]), v[0], false)
  1362              m.emit(0x0f)
  1363              m.emit(0x38)
  1364              m.emit(0xdd)
  1365              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1366          })
  1367      }
  1368      // AESENCLAST m128, xmm
  1369      if isM128(v0) && isXMM(v1) {
  1370          self.require(ISA_AES)
  1371          p.domain = DomainCrypto
  1372          p.add(0, func(m *_Encoding, v []interface{}) {
  1373              m.emit(0x66)
  1374              m.rexo(hcode(v[1]), addr(v[0]), false)
  1375              m.emit(0x0f)
  1376              m.emit(0x38)
  1377              m.emit(0xdd)
  1378              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1379          })
  1380      }
  1381      if p.len == 0 {
  1382          panic("invalid operands for AESENCLAST")
  1383      }
  1384      return p
  1385  }
  1386  
  1387  // AESIMC performs "Perform the AES InvMixColumn Transformation".
  1388  //
  1389  // Mnemonic        : AESIMC
  1390  // Supported forms : (2 forms)
  1391  //
  1392  //    * AESIMC xmm, xmm     [AES]
  1393  //    * AESIMC m128, xmm    [AES]
  1394  //
  1395  func (self *Program) AESIMC(v0 interface{}, v1 interface{}) *Instruction {
  1396      p := self.alloc("AESIMC", 2, Operands { v0, v1 })
  1397      // AESIMC xmm, xmm
  1398      if isXMM(v0) && isXMM(v1) {
  1399          self.require(ISA_AES)
  1400          p.domain = DomainCrypto
  1401          p.add(0, func(m *_Encoding, v []interface{}) {
  1402              m.emit(0x66)
  1403              m.rexo(hcode(v[1]), v[0], false)
  1404              m.emit(0x0f)
  1405              m.emit(0x38)
  1406              m.emit(0xdb)
  1407              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1408          })
  1409      }
  1410      // AESIMC m128, xmm
  1411      if isM128(v0) && isXMM(v1) {
  1412          self.require(ISA_AES)
  1413          p.domain = DomainCrypto
  1414          p.add(0, func(m *_Encoding, v []interface{}) {
  1415              m.emit(0x66)
  1416              m.rexo(hcode(v[1]), addr(v[0]), false)
  1417              m.emit(0x0f)
  1418              m.emit(0x38)
  1419              m.emit(0xdb)
  1420              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1421          })
  1422      }
  1423      if p.len == 0 {
  1424          panic("invalid operands for AESIMC")
  1425      }
  1426      return p
  1427  }
  1428  
  1429  // AESKEYGENASSIST performs "AES Round Key Generation Assist".
  1430  //
  1431  // Mnemonic        : AESKEYGENASSIST
  1432  // Supported forms : (2 forms)
  1433  //
  1434  //    * AESKEYGENASSIST imm8, xmm, xmm     [AES]
  1435  //    * AESKEYGENASSIST imm8, m128, xmm    [AES]
  1436  //
  1437  func (self *Program) AESKEYGENASSIST(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  1438      p := self.alloc("AESKEYGENASSIST", 3, Operands { v0, v1, v2 })
  1439      // AESKEYGENASSIST imm8, xmm, xmm
  1440      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
  1441          self.require(ISA_AES)
  1442          p.domain = DomainCrypto
  1443          p.add(0, func(m *_Encoding, v []interface{}) {
  1444              m.emit(0x66)
  1445              m.rexo(hcode(v[2]), v[1], false)
  1446              m.emit(0x0f)
  1447              m.emit(0x3a)
  1448              m.emit(0xdf)
  1449              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  1450              m.imm1(toImmAny(v[0]))
  1451          })
  1452      }
  1453      // AESKEYGENASSIST imm8, m128, xmm
  1454      if isImm8(v0) && isM128(v1) && isXMM(v2) {
  1455          self.require(ISA_AES)
  1456          p.domain = DomainCrypto
  1457          p.add(0, func(m *_Encoding, v []interface{}) {
  1458              m.emit(0x66)
  1459              m.rexo(hcode(v[2]), addr(v[1]), false)
  1460              m.emit(0x0f)
  1461              m.emit(0x3a)
  1462              m.emit(0xdf)
  1463              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  1464              m.imm1(toImmAny(v[0]))
  1465          })
  1466      }
  1467      if p.len == 0 {
  1468          panic("invalid operands for AESKEYGENASSIST")
  1469      }
  1470      return p
  1471  }
  1472  
  1473  // ANDB performs "Logical AND".
  1474  //
  1475  // Mnemonic        : AND
  1476  // Supported forms : (6 forms)
  1477  //
  1478  //    * ANDB imm8, al
  1479  //    * ANDB imm8, r8
  1480  //    * ANDB r8, r8
  1481  //    * ANDB m8, r8
  1482  //    * ANDB imm8, m8
  1483  //    * ANDB r8, m8
  1484  //
  1485  func (self *Program) ANDB(v0 interface{}, v1 interface{}) *Instruction {
  1486      p := self.alloc("ANDB", 2, Operands { v0, v1 })
  1487      // ANDB imm8, al
  1488      if isImm8(v0) && v1 == AL {
  1489          p.domain = DomainGeneric
  1490          p.add(0, func(m *_Encoding, v []interface{}) {
  1491              m.emit(0x24)
  1492              m.imm1(toImmAny(v[0]))
  1493          })
  1494      }
  1495      // ANDB imm8, r8
  1496      if isImm8(v0) && isReg8(v1) {
  1497          p.domain = DomainGeneric
  1498          p.add(0, func(m *_Encoding, v []interface{}) {
  1499              m.rexo(0, v[1], isReg8REX(v[1]))
  1500              m.emit(0x80)
  1501              m.emit(0xe0 | lcode(v[1]))
  1502              m.imm1(toImmAny(v[0]))
  1503          })
  1504      }
  1505      // ANDB r8, r8
  1506      if isReg8(v0) && isReg8(v1) {
  1507          p.domain = DomainGeneric
  1508          p.add(0, func(m *_Encoding, v []interface{}) {
  1509              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
  1510              m.emit(0x20)
  1511              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  1512          })
  1513          p.add(0, func(m *_Encoding, v []interface{}) {
  1514              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]) || isReg8REX(v[1]))
  1515              m.emit(0x22)
  1516              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1517          })
  1518      }
  1519      // ANDB m8, r8
  1520      if isM8(v0) && isReg8(v1) {
  1521          p.domain = DomainGeneric
  1522          p.add(0, func(m *_Encoding, v []interface{}) {
  1523              m.rexo(hcode(v[1]), addr(v[0]), isReg8REX(v[1]))
  1524              m.emit(0x22)
  1525              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1526          })
  1527      }
  1528      // ANDB imm8, m8
  1529      if isImm8(v0) && isM8(v1) {
  1530          p.domain = DomainGeneric
  1531          p.add(0, func(m *_Encoding, v []interface{}) {
  1532              m.rexo(0, addr(v[1]), false)
  1533              m.emit(0x80)
  1534              m.mrsd(4, addr(v[1]), 1)
  1535              m.imm1(toImmAny(v[0]))
  1536          })
  1537      }
  1538      // ANDB r8, m8
  1539      if isReg8(v0) && isM8(v1) {
  1540          p.domain = DomainGeneric
  1541          p.add(0, func(m *_Encoding, v []interface{}) {
  1542              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
  1543              m.emit(0x20)
  1544              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  1545          })
  1546      }
  1547      if p.len == 0 {
  1548          panic("invalid operands for ANDB")
  1549      }
  1550      return p
  1551  }
  1552  
  1553  // ANDL performs "Logical AND".
  1554  //
  1555  // Mnemonic        : AND
  1556  // Supported forms : (8 forms)
  1557  //
  1558  //    * ANDL imm32, eax
  1559  //    * ANDL imm8, r32
  1560  //    * ANDL imm32, r32
  1561  //    * ANDL r32, r32
  1562  //    * ANDL m32, r32
  1563  //    * ANDL imm8, m32
  1564  //    * ANDL imm32, m32
  1565  //    * ANDL r32, m32
  1566  //
  1567  func (self *Program) ANDL(v0 interface{}, v1 interface{}) *Instruction {
  1568      p := self.alloc("ANDL", 2, Operands { v0, v1 })
  1569      // ANDL imm32, eax
  1570      if isImm32(v0) && v1 == EAX {
  1571          p.domain = DomainGeneric
  1572          p.add(0, func(m *_Encoding, v []interface{}) {
  1573              m.emit(0x25)
  1574              m.imm4(toImmAny(v[0]))
  1575          })
  1576      }
  1577      // ANDL imm8, r32
  1578      if isImm8Ext(v0, 4) && isReg32(v1) {
  1579          p.domain = DomainGeneric
  1580          p.add(0, func(m *_Encoding, v []interface{}) {
  1581              m.rexo(0, v[1], false)
  1582              m.emit(0x83)
  1583              m.emit(0xe0 | lcode(v[1]))
  1584              m.imm1(toImmAny(v[0]))
  1585          })
  1586      }
  1587      // ANDL imm32, r32
  1588      if isImm32(v0) && isReg32(v1) {
  1589          p.domain = DomainGeneric
  1590          p.add(0, func(m *_Encoding, v []interface{}) {
  1591              m.rexo(0, v[1], false)
  1592              m.emit(0x81)
  1593              m.emit(0xe0 | lcode(v[1]))
  1594              m.imm4(toImmAny(v[0]))
  1595          })
  1596      }
  1597      // ANDL r32, r32
  1598      if isReg32(v0) && isReg32(v1) {
  1599          p.domain = DomainGeneric
  1600          p.add(0, func(m *_Encoding, v []interface{}) {
  1601              m.rexo(hcode(v[0]), v[1], false)
  1602              m.emit(0x21)
  1603              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  1604          })
  1605          p.add(0, func(m *_Encoding, v []interface{}) {
  1606              m.rexo(hcode(v[1]), v[0], false)
  1607              m.emit(0x23)
  1608              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1609          })
  1610      }
  1611      // ANDL m32, r32
  1612      if isM32(v0) && isReg32(v1) {
  1613          p.domain = DomainGeneric
  1614          p.add(0, func(m *_Encoding, v []interface{}) {
  1615              m.rexo(hcode(v[1]), addr(v[0]), false)
  1616              m.emit(0x23)
  1617              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1618          })
  1619      }
  1620      // ANDL imm8, m32
  1621      if isImm8Ext(v0, 4) && isM32(v1) {
  1622          p.domain = DomainGeneric
  1623          p.add(0, func(m *_Encoding, v []interface{}) {
  1624              m.rexo(0, addr(v[1]), false)
  1625              m.emit(0x83)
  1626              m.mrsd(4, addr(v[1]), 1)
  1627              m.imm1(toImmAny(v[0]))
  1628          })
  1629      }
  1630      // ANDL imm32, m32
  1631      if isImm32(v0) && isM32(v1) {
  1632          p.domain = DomainGeneric
  1633          p.add(0, func(m *_Encoding, v []interface{}) {
  1634              m.rexo(0, addr(v[1]), false)
  1635              m.emit(0x81)
  1636              m.mrsd(4, addr(v[1]), 1)
  1637              m.imm4(toImmAny(v[0]))
  1638          })
  1639      }
  1640      // ANDL r32, m32
  1641      if isReg32(v0) && isM32(v1) {
  1642          p.domain = DomainGeneric
  1643          p.add(0, func(m *_Encoding, v []interface{}) {
  1644              m.rexo(hcode(v[0]), addr(v[1]), false)
  1645              m.emit(0x21)
  1646              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  1647          })
  1648      }
  1649      if p.len == 0 {
  1650          panic("invalid operands for ANDL")
  1651      }
  1652      return p
  1653  }
  1654  
  1655  // ANDNL performs "Logical AND NOT".
  1656  //
  1657  // Mnemonic        : ANDN
  1658  // Supported forms : (2 forms)
  1659  //
  1660  //    * ANDNL r32, r32, r32    [BMI]
  1661  //    * ANDNL m32, r32, r32    [BMI]
  1662  //
  1663  func (self *Program) ANDNL(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  1664      p := self.alloc("ANDNL", 3, Operands { v0, v1, v2 })
  1665      // ANDNL r32, r32, r32
  1666      if isReg32(v0) && isReg32(v1) && isReg32(v2) {
  1667          self.require(ISA_BMI)
  1668          p.domain = DomainGeneric
  1669          p.add(0, func(m *_Encoding, v []interface{}) {
  1670              m.emit(0xc4)
  1671              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
  1672              m.emit(0x78 ^ (hlcode(v[1]) << 3))
  1673              m.emit(0xf2)
  1674              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
  1675          })
  1676      }
  1677      // ANDNL m32, r32, r32
  1678      if isM32(v0) && isReg32(v1) && isReg32(v2) {
  1679          self.require(ISA_BMI)
  1680          p.domain = DomainGeneric
  1681          p.add(0, func(m *_Encoding, v []interface{}) {
  1682              m.vex3(0xc4, 0b10, 0x00, hcode(v[2]), addr(v[0]), hlcode(v[1]))
  1683              m.emit(0xf2)
  1684              m.mrsd(lcode(v[2]), addr(v[0]), 1)
  1685          })
  1686      }
  1687      if p.len == 0 {
  1688          panic("invalid operands for ANDNL")
  1689      }
  1690      return p
  1691  }
  1692  
  1693  // ANDNPD performs "Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values".
  1694  //
  1695  // Mnemonic        : ANDNPD
  1696  // Supported forms : (2 forms)
  1697  //
  1698  //    * ANDNPD xmm, xmm     [SSE2]
  1699  //    * ANDNPD m128, xmm    [SSE2]
  1700  //
  1701  func (self *Program) ANDNPD(v0 interface{}, v1 interface{}) *Instruction {
  1702      p := self.alloc("ANDNPD", 2, Operands { v0, v1 })
  1703      // ANDNPD xmm, xmm
  1704      if isXMM(v0) && isXMM(v1) {
  1705          self.require(ISA_SSE2)
  1706          p.domain = DomainMMXSSE
  1707          p.add(0, func(m *_Encoding, v []interface{}) {
  1708              m.emit(0x66)
  1709              m.rexo(hcode(v[1]), v[0], false)
  1710              m.emit(0x0f)
  1711              m.emit(0x55)
  1712              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1713          })
  1714      }
  1715      // ANDNPD m128, xmm
  1716      if isM128(v0) && isXMM(v1) {
  1717          self.require(ISA_SSE2)
  1718          p.domain = DomainMMXSSE
  1719          p.add(0, func(m *_Encoding, v []interface{}) {
  1720              m.emit(0x66)
  1721              m.rexo(hcode(v[1]), addr(v[0]), false)
  1722              m.emit(0x0f)
  1723              m.emit(0x55)
  1724              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1725          })
  1726      }
  1727      if p.len == 0 {
  1728          panic("invalid operands for ANDNPD")
  1729      }
  1730      return p
  1731  }
  1732  
  1733  // ANDNPS performs "Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values".
  1734  //
  1735  // Mnemonic        : ANDNPS
  1736  // Supported forms : (2 forms)
  1737  //
  1738  //    * ANDNPS xmm, xmm     [SSE]
  1739  //    * ANDNPS m128, xmm    [SSE]
  1740  //
  1741  func (self *Program) ANDNPS(v0 interface{}, v1 interface{}) *Instruction {
  1742      p := self.alloc("ANDNPS", 2, Operands { v0, v1 })
  1743      // ANDNPS xmm, xmm
  1744      if isXMM(v0) && isXMM(v1) {
  1745          self.require(ISA_SSE)
  1746          p.domain = DomainMMXSSE
  1747          p.add(0, func(m *_Encoding, v []interface{}) {
  1748              m.rexo(hcode(v[1]), v[0], false)
  1749              m.emit(0x0f)
  1750              m.emit(0x55)
  1751              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1752          })
  1753      }
  1754      // ANDNPS m128, xmm
  1755      if isM128(v0) && isXMM(v1) {
  1756          self.require(ISA_SSE)
  1757          p.domain = DomainMMXSSE
  1758          p.add(0, func(m *_Encoding, v []interface{}) {
  1759              m.rexo(hcode(v[1]), addr(v[0]), false)
  1760              m.emit(0x0f)
  1761              m.emit(0x55)
  1762              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1763          })
  1764      }
  1765      if p.len == 0 {
  1766          panic("invalid operands for ANDNPS")
  1767      }
  1768      return p
  1769  }
  1770  
  1771  // ANDNQ performs "Logical AND NOT".
  1772  //
  1773  // Mnemonic        : ANDN
  1774  // Supported forms : (2 forms)
  1775  //
  1776  //    * ANDNQ r64, r64, r64    [BMI]
  1777  //    * ANDNQ m64, r64, r64    [BMI]
  1778  //
  1779  func (self *Program) ANDNQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  1780      p := self.alloc("ANDNQ", 3, Operands { v0, v1, v2 })
  1781      // ANDNQ r64, r64, r64
  1782      if isReg64(v0) && isReg64(v1) && isReg64(v2) {
  1783          self.require(ISA_BMI)
  1784          p.domain = DomainGeneric
  1785          p.add(0, func(m *_Encoding, v []interface{}) {
  1786              m.emit(0xc4)
  1787              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
  1788              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
  1789              m.emit(0xf2)
  1790              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
  1791          })
  1792      }
  1793      // ANDNQ m64, r64, r64
  1794      if isM64(v0) && isReg64(v1) && isReg64(v2) {
  1795          self.require(ISA_BMI)
  1796          p.domain = DomainGeneric
  1797          p.add(0, func(m *_Encoding, v []interface{}) {
  1798              m.vex3(0xc4, 0b10, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
  1799              m.emit(0xf2)
  1800              m.mrsd(lcode(v[2]), addr(v[0]), 1)
  1801          })
  1802      }
  1803      if p.len == 0 {
  1804          panic("invalid operands for ANDNQ")
  1805      }
  1806      return p
  1807  }
  1808  
  1809  // ANDPD performs "Bitwise Logical AND of Packed Double-Precision Floating-Point Values".
  1810  //
  1811  // Mnemonic        : ANDPD
  1812  // Supported forms : (2 forms)
  1813  //
  1814  //    * ANDPD xmm, xmm     [SSE2]
  1815  //    * ANDPD m128, xmm    [SSE2]
  1816  //
  1817  func (self *Program) ANDPD(v0 interface{}, v1 interface{}) *Instruction {
  1818      p := self.alloc("ANDPD", 2, Operands { v0, v1 })
  1819      // ANDPD xmm, xmm
  1820      if isXMM(v0) && isXMM(v1) {
  1821          self.require(ISA_SSE2)
  1822          p.domain = DomainMMXSSE
  1823          p.add(0, func(m *_Encoding, v []interface{}) {
  1824              m.emit(0x66)
  1825              m.rexo(hcode(v[1]), v[0], false)
  1826              m.emit(0x0f)
  1827              m.emit(0x54)
  1828              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1829          })
  1830      }
  1831      // ANDPD m128, xmm
  1832      if isM128(v0) && isXMM(v1) {
  1833          self.require(ISA_SSE2)
  1834          p.domain = DomainMMXSSE
  1835          p.add(0, func(m *_Encoding, v []interface{}) {
  1836              m.emit(0x66)
  1837              m.rexo(hcode(v[1]), addr(v[0]), false)
  1838              m.emit(0x0f)
  1839              m.emit(0x54)
  1840              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1841          })
  1842      }
  1843      if p.len == 0 {
  1844          panic("invalid operands for ANDPD")
  1845      }
  1846      return p
  1847  }
  1848  
  1849  // ANDPS performs "Bitwise Logical AND of Packed Single-Precision Floating-Point Values".
  1850  //
  1851  // Mnemonic        : ANDPS
  1852  // Supported forms : (2 forms)
  1853  //
  1854  //    * ANDPS xmm, xmm     [SSE]
  1855  //    * ANDPS m128, xmm    [SSE]
  1856  //
  1857  func (self *Program) ANDPS(v0 interface{}, v1 interface{}) *Instruction {
  1858      p := self.alloc("ANDPS", 2, Operands { v0, v1 })
  1859      // ANDPS xmm, xmm
  1860      if isXMM(v0) && isXMM(v1) {
  1861          self.require(ISA_SSE)
  1862          p.domain = DomainMMXSSE
  1863          p.add(0, func(m *_Encoding, v []interface{}) {
  1864              m.rexo(hcode(v[1]), v[0], false)
  1865              m.emit(0x0f)
  1866              m.emit(0x54)
  1867              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1868          })
  1869      }
  1870      // ANDPS m128, xmm
  1871      if isM128(v0) && isXMM(v1) {
  1872          self.require(ISA_SSE)
  1873          p.domain = DomainMMXSSE
  1874          p.add(0, func(m *_Encoding, v []interface{}) {
  1875              m.rexo(hcode(v[1]), addr(v[0]), false)
  1876              m.emit(0x0f)
  1877              m.emit(0x54)
  1878              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1879          })
  1880      }
  1881      if p.len == 0 {
  1882          panic("invalid operands for ANDPS")
  1883      }
  1884      return p
  1885  }
  1886  
  1887  // ANDQ performs "Logical AND".
  1888  //
  1889  // Mnemonic        : AND
  1890  // Supported forms : (8 forms)
  1891  //
  1892  //    * ANDQ imm32, rax
  1893  //    * ANDQ imm8, r64
  1894  //    * ANDQ imm32, r64
  1895  //    * ANDQ r64, r64
  1896  //    * ANDQ m64, r64
  1897  //    * ANDQ imm8, m64
  1898  //    * ANDQ imm32, m64
  1899  //    * ANDQ r64, m64
  1900  //
  1901  func (self *Program) ANDQ(v0 interface{}, v1 interface{}) *Instruction {
  1902      p := self.alloc("ANDQ", 2, Operands { v0, v1 })
  1903      // ANDQ imm32, rax
  1904      if isImm32(v0) && v1 == RAX {
  1905          p.domain = DomainGeneric
  1906          p.add(0, func(m *_Encoding, v []interface{}) {
  1907              m.emit(0x48)
  1908              m.emit(0x25)
  1909              m.imm4(toImmAny(v[0]))
  1910          })
  1911      }
  1912      // ANDQ imm8, r64
  1913      if isImm8Ext(v0, 8) && isReg64(v1) {
  1914          p.domain = DomainGeneric
  1915          p.add(0, func(m *_Encoding, v []interface{}) {
  1916              m.emit(0x48 | hcode(v[1]))
  1917              m.emit(0x83)
  1918              m.emit(0xe0 | lcode(v[1]))
  1919              m.imm1(toImmAny(v[0]))
  1920          })
  1921      }
  1922      // ANDQ imm32, r64
  1923      if isImm32Ext(v0, 8) && isReg64(v1) {
  1924          p.domain = DomainGeneric
  1925          p.add(0, func(m *_Encoding, v []interface{}) {
  1926              m.emit(0x48 | hcode(v[1]))
  1927              m.emit(0x81)
  1928              m.emit(0xe0 | lcode(v[1]))
  1929              m.imm4(toImmAny(v[0]))
  1930          })
  1931      }
  1932      // ANDQ r64, r64
  1933      if isReg64(v0) && isReg64(v1) {
  1934          p.domain = DomainGeneric
  1935          p.add(0, func(m *_Encoding, v []interface{}) {
  1936              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
  1937              m.emit(0x21)
  1938              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  1939          })
  1940          p.add(0, func(m *_Encoding, v []interface{}) {
  1941              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  1942              m.emit(0x23)
  1943              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  1944          })
  1945      }
  1946      // ANDQ m64, r64
  1947      if isM64(v0) && isReg64(v1) {
  1948          p.domain = DomainGeneric
  1949          p.add(0, func(m *_Encoding, v []interface{}) {
  1950              m.rexm(1, hcode(v[1]), addr(v[0]))
  1951              m.emit(0x23)
  1952              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  1953          })
  1954      }
  1955      // ANDQ imm8, m64
  1956      if isImm8Ext(v0, 8) && isM64(v1) {
  1957          p.domain = DomainGeneric
  1958          p.add(0, func(m *_Encoding, v []interface{}) {
  1959              m.rexm(1, 0, addr(v[1]))
  1960              m.emit(0x83)
  1961              m.mrsd(4, addr(v[1]), 1)
  1962              m.imm1(toImmAny(v[0]))
  1963          })
  1964      }
  1965      // ANDQ imm32, m64
  1966      if isImm32Ext(v0, 8) && isM64(v1) {
  1967          p.domain = DomainGeneric
  1968          p.add(0, func(m *_Encoding, v []interface{}) {
  1969              m.rexm(1, 0, addr(v[1]))
  1970              m.emit(0x81)
  1971              m.mrsd(4, addr(v[1]), 1)
  1972              m.imm4(toImmAny(v[0]))
  1973          })
  1974      }
  1975      // ANDQ r64, m64
  1976      if isReg64(v0) && isM64(v1) {
  1977          p.domain = DomainGeneric
  1978          p.add(0, func(m *_Encoding, v []interface{}) {
  1979              m.rexm(1, hcode(v[0]), addr(v[1]))
  1980              m.emit(0x21)
  1981              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  1982          })
  1983      }
  1984      if p.len == 0 {
  1985          panic("invalid operands for ANDQ")
  1986      }
  1987      return p
  1988  }
  1989  
  1990  // ANDW performs "Logical AND".
  1991  //
  1992  // Mnemonic        : AND
  1993  // Supported forms : (8 forms)
  1994  //
  1995  //    * ANDW imm16, ax
  1996  //    * ANDW imm8, r16
  1997  //    * ANDW imm16, r16
  1998  //    * ANDW r16, r16
  1999  //    * ANDW m16, r16
  2000  //    * ANDW imm8, m16
  2001  //    * ANDW imm16, m16
  2002  //    * ANDW r16, m16
  2003  //
  2004  func (self *Program) ANDW(v0 interface{}, v1 interface{}) *Instruction {
  2005      p := self.alloc("ANDW", 2, Operands { v0, v1 })
  2006      // ANDW imm16, ax
  2007      if isImm16(v0) && v1 == AX {
  2008          p.domain = DomainGeneric
  2009          p.add(0, func(m *_Encoding, v []interface{}) {
  2010              m.emit(0x66)
  2011              m.emit(0x25)
  2012              m.imm2(toImmAny(v[0]))
  2013          })
  2014      }
  2015      // ANDW imm8, r16
  2016      if isImm8Ext(v0, 2) && isReg16(v1) {
  2017          p.domain = DomainGeneric
  2018          p.add(0, func(m *_Encoding, v []interface{}) {
  2019              m.emit(0x66)
  2020              m.rexo(0, v[1], false)
  2021              m.emit(0x83)
  2022              m.emit(0xe0 | lcode(v[1]))
  2023              m.imm1(toImmAny(v[0]))
  2024          })
  2025      }
  2026      // ANDW imm16, r16
  2027      if isImm16(v0) && isReg16(v1) {
  2028          p.domain = DomainGeneric
  2029          p.add(0, func(m *_Encoding, v []interface{}) {
  2030              m.emit(0x66)
  2031              m.rexo(0, v[1], false)
  2032              m.emit(0x81)
  2033              m.emit(0xe0 | lcode(v[1]))
  2034              m.imm2(toImmAny(v[0]))
  2035          })
  2036      }
  2037      // ANDW r16, r16
  2038      if isReg16(v0) && isReg16(v1) {
  2039          p.domain = DomainGeneric
  2040          p.add(0, func(m *_Encoding, v []interface{}) {
  2041              m.emit(0x66)
  2042              m.rexo(hcode(v[0]), v[1], false)
  2043              m.emit(0x21)
  2044              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  2045          })
  2046          p.add(0, func(m *_Encoding, v []interface{}) {
  2047              m.emit(0x66)
  2048              m.rexo(hcode(v[1]), v[0], false)
  2049              m.emit(0x23)
  2050              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  2051          })
  2052      }
  2053      // ANDW m16, r16
  2054      if isM16(v0) && isReg16(v1) {
  2055          p.domain = DomainGeneric
  2056          p.add(0, func(m *_Encoding, v []interface{}) {
  2057              m.emit(0x66)
  2058              m.rexo(hcode(v[1]), addr(v[0]), false)
  2059              m.emit(0x23)
  2060              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  2061          })
  2062      }
  2063      // ANDW imm8, m16
  2064      if isImm8Ext(v0, 2) && isM16(v1) {
  2065          p.domain = DomainGeneric
  2066          p.add(0, func(m *_Encoding, v []interface{}) {
  2067              m.emit(0x66)
  2068              m.rexo(0, addr(v[1]), false)
  2069              m.emit(0x83)
  2070              m.mrsd(4, addr(v[1]), 1)
  2071              m.imm1(toImmAny(v[0]))
  2072          })
  2073      }
  2074      // ANDW imm16, m16
  2075      if isImm16(v0) && isM16(v1) {
  2076          p.domain = DomainGeneric
  2077          p.add(0, func(m *_Encoding, v []interface{}) {
  2078              m.emit(0x66)
  2079              m.rexo(0, addr(v[1]), false)
  2080              m.emit(0x81)
  2081              m.mrsd(4, addr(v[1]), 1)
  2082              m.imm2(toImmAny(v[0]))
  2083          })
  2084      }
  2085      // ANDW r16, m16
  2086      if isReg16(v0) && isM16(v1) {
  2087          p.domain = DomainGeneric
  2088          p.add(0, func(m *_Encoding, v []interface{}) {
  2089              m.emit(0x66)
  2090              m.rexo(hcode(v[0]), addr(v[1]), false)
  2091              m.emit(0x21)
  2092              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  2093          })
  2094      }
  2095      if p.len == 0 {
  2096          panic("invalid operands for ANDW")
  2097      }
  2098      return p
  2099  }
  2100  
  2101  // BEXTR performs "Bit Field Extract".
  2102  //
  2103  // Mnemonic        : BEXTR
  2104  // Supported forms : (8 forms)
  2105  //
  2106  //    * BEXTR imm32, r32, r32    [TBM]
  2107  //    * BEXTR imm32, m32, r32    [TBM]
  2108  //    * BEXTR imm32, r64, r64    [TBM]
  2109  //    * BEXTR imm32, m64, r64    [TBM]
  2110  //    * BEXTR r32, r32, r32      [BMI]
  2111  //    * BEXTR r32, m32, r32      [BMI]
  2112  //    * BEXTR r64, r64, r64      [BMI]
  2113  //    * BEXTR r64, m64, r64      [BMI]
  2114  //
  2115  func (self *Program) BEXTR(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  2116      p := self.alloc("BEXTR", 3, Operands { v0, v1, v2 })
  2117      // BEXTR imm32, r32, r32
  2118      if isImm32(v0) && isReg32(v1) && isReg32(v2) {
  2119          self.require(ISA_TBM)
  2120          p.domain = DomainGeneric
  2121          p.add(0, func(m *_Encoding, v []interface{}) {
  2122              m.emit(0x8f)
  2123              m.emit(0xea ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
  2124              m.emit(0x78)
  2125              m.emit(0x10)
  2126              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  2127              m.imm4(toImmAny(v[0]))
  2128          })
  2129      }
  2130      // BEXTR imm32, m32, r32
  2131      if isImm32(v0) && isM32(v1) && isReg32(v2) {
  2132          self.require(ISA_TBM)
  2133          p.domain = DomainGeneric
  2134          p.add(0, func(m *_Encoding, v []interface{}) {
  2135              m.vex3(0x8f, 0b1010, 0x00, hcode(v[2]), addr(v[1]), 0)
  2136              m.emit(0x10)
  2137              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  2138              m.imm4(toImmAny(v[0]))
  2139          })
  2140      }
  2141      // BEXTR imm32, r64, r64
  2142      if isImm32(v0) && isReg64(v1) && isReg64(v2) {
  2143          self.require(ISA_TBM)
  2144          p.domain = DomainGeneric
  2145          p.add(0, func(m *_Encoding, v []interface{}) {
  2146              m.emit(0x8f)
  2147              m.emit(0xea ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
  2148              m.emit(0xf8)
  2149              m.emit(0x10)
  2150              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  2151              m.imm4(toImmAny(v[0]))
  2152          })
  2153      }
  2154      // BEXTR imm32, m64, r64
  2155      if isImm32(v0) && isM64(v1) && isReg64(v2) {
  2156          self.require(ISA_TBM)
  2157          p.domain = DomainGeneric
  2158          p.add(0, func(m *_Encoding, v []interface{}) {
  2159              m.vex3(0x8f, 0b1010, 0x80, hcode(v[2]), addr(v[1]), 0)
  2160              m.emit(0x10)
  2161              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  2162              m.imm4(toImmAny(v[0]))
  2163          })
  2164      }
  2165      // BEXTR r32, r32, r32
  2166      if isReg32(v0) && isReg32(v1) && isReg32(v2) {
  2167          self.require(ISA_BMI)
  2168          p.domain = DomainGeneric
  2169          p.add(0, func(m *_Encoding, v []interface{}) {
  2170              m.emit(0xc4)
  2171              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
  2172              m.emit(0x78 ^ (hlcode(v[0]) << 3))
  2173              m.emit(0xf7)
  2174              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  2175          })
  2176      }
  2177      // BEXTR r32, m32, r32
  2178      if isReg32(v0) && isM32(v1) && isReg32(v2) {
  2179          self.require(ISA_BMI)
  2180          p.domain = DomainGeneric
  2181          p.add(0, func(m *_Encoding, v []interface{}) {
  2182              m.vex3(0xc4, 0b10, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
  2183              m.emit(0xf7)
  2184              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  2185          })
  2186      }
  2187      // BEXTR r64, r64, r64
  2188      if isReg64(v0) && isReg64(v1) && isReg64(v2) {
  2189          self.require(ISA_BMI)
  2190          p.domain = DomainGeneric
  2191          p.add(0, func(m *_Encoding, v []interface{}) {
  2192              m.emit(0xc4)
  2193              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
  2194              m.emit(0xf8 ^ (hlcode(v[0]) << 3))
  2195              m.emit(0xf7)
  2196              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  2197          })
  2198      }
  2199      // BEXTR r64, m64, r64
  2200      if isReg64(v0) && isM64(v1) && isReg64(v2) {
  2201          self.require(ISA_BMI)
  2202          p.domain = DomainGeneric
  2203          p.add(0, func(m *_Encoding, v []interface{}) {
  2204              m.vex3(0xc4, 0b10, 0x80, hcode(v[2]), addr(v[1]), hlcode(v[0]))
  2205              m.emit(0xf7)
  2206              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  2207          })
  2208      }
  2209      if p.len == 0 {
  2210          panic("invalid operands for BEXTR")
  2211      }
  2212      return p
  2213  }
  2214  
  2215  // BLCFILL performs "Fill From Lowest Clear Bit".
  2216  //
  2217  // Mnemonic        : BLCFILL
  2218  // Supported forms : (4 forms)
  2219  //
  2220  //    * BLCFILL r32, r32    [TBM]
  2221  //    * BLCFILL m32, r32    [TBM]
  2222  //    * BLCFILL r64, r64    [TBM]
  2223  //    * BLCFILL m64, r64    [TBM]
  2224  //
  2225  func (self *Program) BLCFILL(v0 interface{}, v1 interface{}) *Instruction {
  2226      p := self.alloc("BLCFILL", 2, Operands { v0, v1 })
  2227      // BLCFILL r32, r32
  2228      if isReg32(v0) && isReg32(v1) {
  2229          self.require(ISA_TBM)
  2230          p.domain = DomainGeneric
  2231          p.add(0, func(m *_Encoding, v []interface{}) {
  2232              m.emit(0x8f)
  2233              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2234              m.emit(0x78 ^ (hlcode(v[1]) << 3))
  2235              m.emit(0x01)
  2236              m.emit(0xc8 | lcode(v[0]))
  2237          })
  2238      }
  2239      // BLCFILL m32, r32
  2240      if isM32(v0) && isReg32(v1) {
  2241          self.require(ISA_TBM)
  2242          p.domain = DomainGeneric
  2243          p.add(0, func(m *_Encoding, v []interface{}) {
  2244              m.vex3(0x8f, 0b1001, 0x00, 0, addr(v[0]), hlcode(v[1]))
  2245              m.emit(0x01)
  2246              m.mrsd(1, addr(v[0]), 1)
  2247          })
  2248      }
  2249      // BLCFILL r64, r64
  2250      if isReg64(v0) && isReg64(v1) {
  2251          self.require(ISA_TBM)
  2252          p.domain = DomainGeneric
  2253          p.add(0, func(m *_Encoding, v []interface{}) {
  2254              m.emit(0x8f)
  2255              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2256              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
  2257              m.emit(0x01)
  2258              m.emit(0xc8 | lcode(v[0]))
  2259          })
  2260      }
  2261      // BLCFILL m64, r64
  2262      if isM64(v0) && isReg64(v1) {
  2263          self.require(ISA_TBM)
  2264          p.domain = DomainGeneric
  2265          p.add(0, func(m *_Encoding, v []interface{}) {
  2266              m.vex3(0x8f, 0b1001, 0x80, 0, addr(v[0]), hlcode(v[1]))
  2267              m.emit(0x01)
  2268              m.mrsd(1, addr(v[0]), 1)
  2269          })
  2270      }
  2271      if p.len == 0 {
  2272          panic("invalid operands for BLCFILL")
  2273      }
  2274      return p
  2275  }
  2276  
  2277  // BLCI performs "Isolate Lowest Clear Bit".
  2278  //
  2279  // Mnemonic        : BLCI
  2280  // Supported forms : (4 forms)
  2281  //
  2282  //    * BLCI r32, r32    [TBM]
  2283  //    * BLCI m32, r32    [TBM]
  2284  //    * BLCI r64, r64    [TBM]
  2285  //    * BLCI m64, r64    [TBM]
  2286  //
  2287  func (self *Program) BLCI(v0 interface{}, v1 interface{}) *Instruction {
  2288      p := self.alloc("BLCI", 2, Operands { v0, v1 })
  2289      // BLCI r32, r32
  2290      if isReg32(v0) && isReg32(v1) {
  2291          self.require(ISA_TBM)
  2292          p.domain = DomainGeneric
  2293          p.add(0, func(m *_Encoding, v []interface{}) {
  2294              m.emit(0x8f)
  2295              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2296              m.emit(0x78 ^ (hlcode(v[1]) << 3))
  2297              m.emit(0x02)
  2298              m.emit(0xf0 | lcode(v[0]))
  2299          })
  2300      }
  2301      // BLCI m32, r32
  2302      if isM32(v0) && isReg32(v1) {
  2303          self.require(ISA_TBM)
  2304          p.domain = DomainGeneric
  2305          p.add(0, func(m *_Encoding, v []interface{}) {
  2306              m.vex3(0x8f, 0b1001, 0x00, 0, addr(v[0]), hlcode(v[1]))
  2307              m.emit(0x02)
  2308              m.mrsd(6, addr(v[0]), 1)
  2309          })
  2310      }
  2311      // BLCI r64, r64
  2312      if isReg64(v0) && isReg64(v1) {
  2313          self.require(ISA_TBM)
  2314          p.domain = DomainGeneric
  2315          p.add(0, func(m *_Encoding, v []interface{}) {
  2316              m.emit(0x8f)
  2317              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2318              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
  2319              m.emit(0x02)
  2320              m.emit(0xf0 | lcode(v[0]))
  2321          })
  2322      }
  2323      // BLCI m64, r64
  2324      if isM64(v0) && isReg64(v1) {
  2325          self.require(ISA_TBM)
  2326          p.domain = DomainGeneric
  2327          p.add(0, func(m *_Encoding, v []interface{}) {
  2328              m.vex3(0x8f, 0b1001, 0x80, 0, addr(v[0]), hlcode(v[1]))
  2329              m.emit(0x02)
  2330              m.mrsd(6, addr(v[0]), 1)
  2331          })
  2332      }
  2333      if p.len == 0 {
  2334          panic("invalid operands for BLCI")
  2335      }
  2336      return p
  2337  }
  2338  
  2339  // BLCIC performs "Isolate Lowest Set Bit and Complement".
  2340  //
  2341  // Mnemonic        : BLCIC
  2342  // Supported forms : (4 forms)
  2343  //
  2344  //    * BLCIC r32, r32    [TBM]
  2345  //    * BLCIC m32, r32    [TBM]
  2346  //    * BLCIC r64, r64    [TBM]
  2347  //    * BLCIC m64, r64    [TBM]
  2348  //
  2349  func (self *Program) BLCIC(v0 interface{}, v1 interface{}) *Instruction {
  2350      p := self.alloc("BLCIC", 2, Operands { v0, v1 })
  2351      // BLCIC r32, r32
  2352      if isReg32(v0) && isReg32(v1) {
  2353          self.require(ISA_TBM)
  2354          p.domain = DomainGeneric
  2355          p.add(0, func(m *_Encoding, v []interface{}) {
  2356              m.emit(0x8f)
  2357              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2358              m.emit(0x78 ^ (hlcode(v[1]) << 3))
  2359              m.emit(0x01)
  2360              m.emit(0xe8 | lcode(v[0]))
  2361          })
  2362      }
  2363      // BLCIC m32, r32
  2364      if isM32(v0) && isReg32(v1) {
  2365          self.require(ISA_TBM)
  2366          p.domain = DomainGeneric
  2367          p.add(0, func(m *_Encoding, v []interface{}) {
  2368              m.vex3(0x8f, 0b1001, 0x00, 0, addr(v[0]), hlcode(v[1]))
  2369              m.emit(0x01)
  2370              m.mrsd(5, addr(v[0]), 1)
  2371          })
  2372      }
  2373      // BLCIC r64, r64
  2374      if isReg64(v0) && isReg64(v1) {
  2375          self.require(ISA_TBM)
  2376          p.domain = DomainGeneric
  2377          p.add(0, func(m *_Encoding, v []interface{}) {
  2378              m.emit(0x8f)
  2379              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2380              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
  2381              m.emit(0x01)
  2382              m.emit(0xe8 | lcode(v[0]))
  2383          })
  2384      }
  2385      // BLCIC m64, r64
  2386      if isM64(v0) && isReg64(v1) {
  2387          self.require(ISA_TBM)
  2388          p.domain = DomainGeneric
  2389          p.add(0, func(m *_Encoding, v []interface{}) {
  2390              m.vex3(0x8f, 0b1001, 0x80, 0, addr(v[0]), hlcode(v[1]))
  2391              m.emit(0x01)
  2392              m.mrsd(5, addr(v[0]), 1)
  2393          })
  2394      }
  2395      if p.len == 0 {
  2396          panic("invalid operands for BLCIC")
  2397      }
  2398      return p
  2399  }
  2400  
  2401  // BLCMSK performs "Mask From Lowest Clear Bit".
  2402  //
  2403  // Mnemonic        : BLCMSK
  2404  // Supported forms : (4 forms)
  2405  //
  2406  //    * BLCMSK r32, r32    [TBM]
  2407  //    * BLCMSK m32, r32    [TBM]
  2408  //    * BLCMSK r64, r64    [TBM]
  2409  //    * BLCMSK m64, r64    [TBM]
  2410  //
  2411  func (self *Program) BLCMSK(v0 interface{}, v1 interface{}) *Instruction {
  2412      p := self.alloc("BLCMSK", 2, Operands { v0, v1 })
  2413      // BLCMSK r32, r32
  2414      if isReg32(v0) && isReg32(v1) {
  2415          self.require(ISA_TBM)
  2416          p.domain = DomainGeneric
  2417          p.add(0, func(m *_Encoding, v []interface{}) {
  2418              m.emit(0x8f)
  2419              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2420              m.emit(0x78 ^ (hlcode(v[1]) << 3))
  2421              m.emit(0x02)
  2422              m.emit(0xc8 | lcode(v[0]))
  2423          })
  2424      }
  2425      // BLCMSK m32, r32
  2426      if isM32(v0) && isReg32(v1) {
  2427          self.require(ISA_TBM)
  2428          p.domain = DomainGeneric
  2429          p.add(0, func(m *_Encoding, v []interface{}) {
  2430              m.vex3(0x8f, 0b1001, 0x00, 0, addr(v[0]), hlcode(v[1]))
  2431              m.emit(0x02)
  2432              m.mrsd(1, addr(v[0]), 1)
  2433          })
  2434      }
  2435      // BLCMSK r64, r64
  2436      if isReg64(v0) && isReg64(v1) {
  2437          self.require(ISA_TBM)
  2438          p.domain = DomainGeneric
  2439          p.add(0, func(m *_Encoding, v []interface{}) {
  2440              m.emit(0x8f)
  2441              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2442              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
  2443              m.emit(0x02)
  2444              m.emit(0xc8 | lcode(v[0]))
  2445          })
  2446      }
  2447      // BLCMSK m64, r64
  2448      if isM64(v0) && isReg64(v1) {
  2449          self.require(ISA_TBM)
  2450          p.domain = DomainGeneric
  2451          p.add(0, func(m *_Encoding, v []interface{}) {
  2452              m.vex3(0x8f, 0b1001, 0x80, 0, addr(v[0]), hlcode(v[1]))
  2453              m.emit(0x02)
  2454              m.mrsd(1, addr(v[0]), 1)
  2455          })
  2456      }
  2457      if p.len == 0 {
  2458          panic("invalid operands for BLCMSK")
  2459      }
  2460      return p
  2461  }
  2462  
  2463  // BLCS performs "Set Lowest Clear Bit".
  2464  //
  2465  // Mnemonic        : BLCS
  2466  // Supported forms : (4 forms)
  2467  //
  2468  //    * BLCS r32, r32    [TBM]
  2469  //    * BLCS m32, r32    [TBM]
  2470  //    * BLCS r64, r64    [TBM]
  2471  //    * BLCS m64, r64    [TBM]
  2472  //
  2473  func (self *Program) BLCS(v0 interface{}, v1 interface{}) *Instruction {
  2474      p := self.alloc("BLCS", 2, Operands { v0, v1 })
  2475      // BLCS r32, r32
  2476      if isReg32(v0) && isReg32(v1) {
  2477          self.require(ISA_TBM)
  2478          p.domain = DomainGeneric
  2479          p.add(0, func(m *_Encoding, v []interface{}) {
  2480              m.emit(0x8f)
  2481              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2482              m.emit(0x78 ^ (hlcode(v[1]) << 3))
  2483              m.emit(0x01)
  2484              m.emit(0xd8 | lcode(v[0]))
  2485          })
  2486      }
  2487      // BLCS m32, r32
  2488      if isM32(v0) && isReg32(v1) {
  2489          self.require(ISA_TBM)
  2490          p.domain = DomainGeneric
  2491          p.add(0, func(m *_Encoding, v []interface{}) {
  2492              m.vex3(0x8f, 0b1001, 0x00, 0, addr(v[0]), hlcode(v[1]))
  2493              m.emit(0x01)
  2494              m.mrsd(3, addr(v[0]), 1)
  2495          })
  2496      }
  2497      // BLCS r64, r64
  2498      if isReg64(v0) && isReg64(v1) {
  2499          self.require(ISA_TBM)
  2500          p.domain = DomainGeneric
  2501          p.add(0, func(m *_Encoding, v []interface{}) {
  2502              m.emit(0x8f)
  2503              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2504              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
  2505              m.emit(0x01)
  2506              m.emit(0xd8 | lcode(v[0]))
  2507          })
  2508      }
  2509      // BLCS m64, r64
  2510      if isM64(v0) && isReg64(v1) {
  2511          self.require(ISA_TBM)
  2512          p.domain = DomainGeneric
  2513          p.add(0, func(m *_Encoding, v []interface{}) {
  2514              m.vex3(0x8f, 0b1001, 0x80, 0, addr(v[0]), hlcode(v[1]))
  2515              m.emit(0x01)
  2516              m.mrsd(3, addr(v[0]), 1)
  2517          })
  2518      }
  2519      if p.len == 0 {
  2520          panic("invalid operands for BLCS")
  2521      }
  2522      return p
  2523  }
  2524  
  2525  // BLENDPD performs "Blend Packed Double Precision Floating-Point Values".
  2526  //
  2527  // Mnemonic        : BLENDPD
  2528  // Supported forms : (2 forms)
  2529  //
  2530  //    * BLENDPD imm8, xmm, xmm     [SSE4.1]
  2531  //    * BLENDPD imm8, m128, xmm    [SSE4.1]
  2532  //
  2533  func (self *Program) BLENDPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  2534      p := self.alloc("BLENDPD", 3, Operands { v0, v1, v2 })
  2535      // BLENDPD imm8, xmm, xmm
  2536      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
  2537          self.require(ISA_SSE4_1)
  2538          p.domain = DomainMMXSSE
  2539          p.add(0, func(m *_Encoding, v []interface{}) {
  2540              m.emit(0x66)
  2541              m.rexo(hcode(v[2]), v[1], false)
  2542              m.emit(0x0f)
  2543              m.emit(0x3a)
  2544              m.emit(0x0d)
  2545              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  2546              m.imm1(toImmAny(v[0]))
  2547          })
  2548      }
  2549      // BLENDPD imm8, m128, xmm
  2550      if isImm8(v0) && isM128(v1) && isXMM(v2) {
  2551          self.require(ISA_SSE4_1)
  2552          p.domain = DomainMMXSSE
  2553          p.add(0, func(m *_Encoding, v []interface{}) {
  2554              m.emit(0x66)
  2555              m.rexo(hcode(v[2]), addr(v[1]), false)
  2556              m.emit(0x0f)
  2557              m.emit(0x3a)
  2558              m.emit(0x0d)
  2559              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  2560              m.imm1(toImmAny(v[0]))
  2561          })
  2562      }
  2563      if p.len == 0 {
  2564          panic("invalid operands for BLENDPD")
  2565      }
  2566      return p
  2567  }
  2568  
  2569  // BLENDPS performs " Blend Packed Single Precision Floating-Point Values".
  2570  //
  2571  // Mnemonic        : BLENDPS
  2572  // Supported forms : (2 forms)
  2573  //
  2574  //    * BLENDPS imm8, xmm, xmm     [SSE4.1]
  2575  //    * BLENDPS imm8, m128, xmm    [SSE4.1]
  2576  //
  2577  func (self *Program) BLENDPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  2578      p := self.alloc("BLENDPS", 3, Operands { v0, v1, v2 })
  2579      // BLENDPS imm8, xmm, xmm
  2580      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
  2581          self.require(ISA_SSE4_1)
  2582          p.domain = DomainMMXSSE
  2583          p.add(0, func(m *_Encoding, v []interface{}) {
  2584              m.emit(0x66)
  2585              m.rexo(hcode(v[2]), v[1], false)
  2586              m.emit(0x0f)
  2587              m.emit(0x3a)
  2588              m.emit(0x0c)
  2589              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  2590              m.imm1(toImmAny(v[0]))
  2591          })
  2592      }
  2593      // BLENDPS imm8, m128, xmm
  2594      if isImm8(v0) && isM128(v1) && isXMM(v2) {
  2595          self.require(ISA_SSE4_1)
  2596          p.domain = DomainMMXSSE
  2597          p.add(0, func(m *_Encoding, v []interface{}) {
  2598              m.emit(0x66)
  2599              m.rexo(hcode(v[2]), addr(v[1]), false)
  2600              m.emit(0x0f)
  2601              m.emit(0x3a)
  2602              m.emit(0x0c)
  2603              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  2604              m.imm1(toImmAny(v[0]))
  2605          })
  2606      }
  2607      if p.len == 0 {
  2608          panic("invalid operands for BLENDPS")
  2609      }
  2610      return p
  2611  }
  2612  
  2613  // BLENDVPD performs " Variable Blend Packed Double Precision Floating-Point Values".
  2614  //
  2615  // Mnemonic        : BLENDVPD
  2616  // Supported forms : (2 forms)
  2617  //
  2618  //    * BLENDVPD xmm0, xmm, xmm     [SSE4.1]
  2619  //    * BLENDVPD xmm0, m128, xmm    [SSE4.1]
  2620  //
  2621  func (self *Program) BLENDVPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  2622      p := self.alloc("BLENDVPD", 3, Operands { v0, v1, v2 })
  2623      // BLENDVPD xmm0, xmm, xmm
  2624      if v0 == XMM0 && isXMM(v1) && isXMM(v2) {
  2625          self.require(ISA_SSE4_1)
  2626          p.domain = DomainMMXSSE
  2627          p.add(0, func(m *_Encoding, v []interface{}) {
  2628              m.emit(0x66)
  2629              m.rexo(hcode(v[2]), v[1], false)
  2630              m.emit(0x0f)
  2631              m.emit(0x38)
  2632              m.emit(0x15)
  2633              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  2634          })
  2635      }
  2636      // BLENDVPD xmm0, m128, xmm
  2637      if v0 == XMM0 && isM128(v1) && isXMM(v2) {
  2638          self.require(ISA_SSE4_1)
  2639          p.domain = DomainMMXSSE
  2640          p.add(0, func(m *_Encoding, v []interface{}) {
  2641              m.emit(0x66)
  2642              m.rexo(hcode(v[2]), addr(v[1]), false)
  2643              m.emit(0x0f)
  2644              m.emit(0x38)
  2645              m.emit(0x15)
  2646              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  2647          })
  2648      }
  2649      if p.len == 0 {
  2650          panic("invalid operands for BLENDVPD")
  2651      }
  2652      return p
  2653  }
  2654  
  2655  // BLENDVPS performs " Variable Blend Packed Single Precision Floating-Point Values".
  2656  //
  2657  // Mnemonic        : BLENDVPS
  2658  // Supported forms : (2 forms)
  2659  //
  2660  //    * BLENDVPS xmm0, xmm, xmm     [SSE4.1]
  2661  //    * BLENDVPS xmm0, m128, xmm    [SSE4.1]
  2662  //
  2663  func (self *Program) BLENDVPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  2664      p := self.alloc("BLENDVPS", 3, Operands { v0, v1, v2 })
  2665      // BLENDVPS xmm0, xmm, xmm
  2666      if v0 == XMM0 && isXMM(v1) && isXMM(v2) {
  2667          self.require(ISA_SSE4_1)
  2668          p.domain = DomainMMXSSE
  2669          p.add(0, func(m *_Encoding, v []interface{}) {
  2670              m.emit(0x66)
  2671              m.rexo(hcode(v[2]), v[1], false)
  2672              m.emit(0x0f)
  2673              m.emit(0x38)
  2674              m.emit(0x14)
  2675              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  2676          })
  2677      }
  2678      // BLENDVPS xmm0, m128, xmm
  2679      if v0 == XMM0 && isM128(v1) && isXMM(v2) {
  2680          self.require(ISA_SSE4_1)
  2681          p.domain = DomainMMXSSE
  2682          p.add(0, func(m *_Encoding, v []interface{}) {
  2683              m.emit(0x66)
  2684              m.rexo(hcode(v[2]), addr(v[1]), false)
  2685              m.emit(0x0f)
  2686              m.emit(0x38)
  2687              m.emit(0x14)
  2688              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  2689          })
  2690      }
  2691      if p.len == 0 {
  2692          panic("invalid operands for BLENDVPS")
  2693      }
  2694      return p
  2695  }
  2696  
  2697  // BLSFILL performs "Fill From Lowest Set Bit".
  2698  //
  2699  // Mnemonic        : BLSFILL
  2700  // Supported forms : (4 forms)
  2701  //
  2702  //    * BLSFILL r32, r32    [TBM]
  2703  //    * BLSFILL m32, r32    [TBM]
  2704  //    * BLSFILL r64, r64    [TBM]
  2705  //    * BLSFILL m64, r64    [TBM]
  2706  //
  2707  func (self *Program) BLSFILL(v0 interface{}, v1 interface{}) *Instruction {
  2708      p := self.alloc("BLSFILL", 2, Operands { v0, v1 })
  2709      // BLSFILL r32, r32
  2710      if isReg32(v0) && isReg32(v1) {
  2711          self.require(ISA_TBM)
  2712          p.domain = DomainGeneric
  2713          p.add(0, func(m *_Encoding, v []interface{}) {
  2714              m.emit(0x8f)
  2715              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2716              m.emit(0x78 ^ (hlcode(v[1]) << 3))
  2717              m.emit(0x01)
  2718              m.emit(0xd0 | lcode(v[0]))
  2719          })
  2720      }
  2721      // BLSFILL m32, r32
  2722      if isM32(v0) && isReg32(v1) {
  2723          self.require(ISA_TBM)
  2724          p.domain = DomainGeneric
  2725          p.add(0, func(m *_Encoding, v []interface{}) {
  2726              m.vex3(0x8f, 0b1001, 0x00, 0, addr(v[0]), hlcode(v[1]))
  2727              m.emit(0x01)
  2728              m.mrsd(2, addr(v[0]), 1)
  2729          })
  2730      }
  2731      // BLSFILL r64, r64
  2732      if isReg64(v0) && isReg64(v1) {
  2733          self.require(ISA_TBM)
  2734          p.domain = DomainGeneric
  2735          p.add(0, func(m *_Encoding, v []interface{}) {
  2736              m.emit(0x8f)
  2737              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2738              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
  2739              m.emit(0x01)
  2740              m.emit(0xd0 | lcode(v[0]))
  2741          })
  2742      }
  2743      // BLSFILL m64, r64
  2744      if isM64(v0) && isReg64(v1) {
  2745          self.require(ISA_TBM)
  2746          p.domain = DomainGeneric
  2747          p.add(0, func(m *_Encoding, v []interface{}) {
  2748              m.vex3(0x8f, 0b1001, 0x80, 0, addr(v[0]), hlcode(v[1]))
  2749              m.emit(0x01)
  2750              m.mrsd(2, addr(v[0]), 1)
  2751          })
  2752      }
  2753      if p.len == 0 {
  2754          panic("invalid operands for BLSFILL")
  2755      }
  2756      return p
  2757  }
  2758  
  2759  // BLSI performs "Isolate Lowest Set Bit".
  2760  //
  2761  // Mnemonic        : BLSI
  2762  // Supported forms : (4 forms)
  2763  //
  2764  //    * BLSI r32, r32    [BMI]
  2765  //    * BLSI m32, r32    [BMI]
  2766  //    * BLSI r64, r64    [BMI]
  2767  //    * BLSI m64, r64    [BMI]
  2768  //
  2769  func (self *Program) BLSI(v0 interface{}, v1 interface{}) *Instruction {
  2770      p := self.alloc("BLSI", 2, Operands { v0, v1 })
  2771      // BLSI r32, r32
  2772      if isReg32(v0) && isReg32(v1) {
  2773          self.require(ISA_BMI)
  2774          p.domain = DomainGeneric
  2775          p.add(0, func(m *_Encoding, v []interface{}) {
  2776              m.emit(0xc4)
  2777              m.emit(0xe2 ^ (hcode(v[0]) << 5))
  2778              m.emit(0x78 ^ (hlcode(v[1]) << 3))
  2779              m.emit(0xf3)
  2780              m.emit(0xd8 | lcode(v[0]))
  2781          })
  2782      }
  2783      // BLSI m32, r32
  2784      if isM32(v0) && isReg32(v1) {
  2785          self.require(ISA_BMI)
  2786          p.domain = DomainGeneric
  2787          p.add(0, func(m *_Encoding, v []interface{}) {
  2788              m.vex3(0xc4, 0b10, 0x00, 0, addr(v[0]), hlcode(v[1]))
  2789              m.emit(0xf3)
  2790              m.mrsd(3, addr(v[0]), 1)
  2791          })
  2792      }
  2793      // BLSI r64, r64
  2794      if isReg64(v0) && isReg64(v1) {
  2795          self.require(ISA_BMI)
  2796          p.domain = DomainGeneric
  2797          p.add(0, func(m *_Encoding, v []interface{}) {
  2798              m.emit(0xc4)
  2799              m.emit(0xe2 ^ (hcode(v[0]) << 5))
  2800              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
  2801              m.emit(0xf3)
  2802              m.emit(0xd8 | lcode(v[0]))
  2803          })
  2804      }
  2805      // BLSI m64, r64
  2806      if isM64(v0) && isReg64(v1) {
  2807          self.require(ISA_BMI)
  2808          p.domain = DomainGeneric
  2809          p.add(0, func(m *_Encoding, v []interface{}) {
  2810              m.vex3(0xc4, 0b10, 0x80, 0, addr(v[0]), hlcode(v[1]))
  2811              m.emit(0xf3)
  2812              m.mrsd(3, addr(v[0]), 1)
  2813          })
  2814      }
  2815      if p.len == 0 {
  2816          panic("invalid operands for BLSI")
  2817      }
  2818      return p
  2819  }
  2820  
  2821  // BLSIC performs "Isolate Lowest Set Bit and Complement".
  2822  //
  2823  // Mnemonic        : BLSIC
  2824  // Supported forms : (4 forms)
  2825  //
  2826  //    * BLSIC r32, r32    [TBM]
  2827  //    * BLSIC m32, r32    [TBM]
  2828  //    * BLSIC r64, r64    [TBM]
  2829  //    * BLSIC m64, r64    [TBM]
  2830  //
  2831  func (self *Program) BLSIC(v0 interface{}, v1 interface{}) *Instruction {
  2832      p := self.alloc("BLSIC", 2, Operands { v0, v1 })
  2833      // BLSIC r32, r32
  2834      if isReg32(v0) && isReg32(v1) {
  2835          self.require(ISA_TBM)
  2836          p.domain = DomainGeneric
  2837          p.add(0, func(m *_Encoding, v []interface{}) {
  2838              m.emit(0x8f)
  2839              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2840              m.emit(0x78 ^ (hlcode(v[1]) << 3))
  2841              m.emit(0x01)
  2842              m.emit(0xf0 | lcode(v[0]))
  2843          })
  2844      }
  2845      // BLSIC m32, r32
  2846      if isM32(v0) && isReg32(v1) {
  2847          self.require(ISA_TBM)
  2848          p.domain = DomainGeneric
  2849          p.add(0, func(m *_Encoding, v []interface{}) {
  2850              m.vex3(0x8f, 0b1001, 0x00, 0, addr(v[0]), hlcode(v[1]))
  2851              m.emit(0x01)
  2852              m.mrsd(6, addr(v[0]), 1)
  2853          })
  2854      }
  2855      // BLSIC r64, r64
  2856      if isReg64(v0) && isReg64(v1) {
  2857          self.require(ISA_TBM)
  2858          p.domain = DomainGeneric
  2859          p.add(0, func(m *_Encoding, v []interface{}) {
  2860              m.emit(0x8f)
  2861              m.emit(0xe9 ^ (hcode(v[0]) << 5))
  2862              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
  2863              m.emit(0x01)
  2864              m.emit(0xf0 | lcode(v[0]))
  2865          })
  2866      }
  2867      // BLSIC m64, r64
  2868      if isM64(v0) && isReg64(v1) {
  2869          self.require(ISA_TBM)
  2870          p.domain = DomainGeneric
  2871          p.add(0, func(m *_Encoding, v []interface{}) {
  2872              m.vex3(0x8f, 0b1001, 0x80, 0, addr(v[0]), hlcode(v[1]))
  2873              m.emit(0x01)
  2874              m.mrsd(6, addr(v[0]), 1)
  2875          })
  2876      }
  2877      if p.len == 0 {
  2878          panic("invalid operands for BLSIC")
  2879      }
  2880      return p
  2881  }
  2882  
  2883  // BLSMSK performs "Mask From Lowest Set Bit".
  2884  //
  2885  // Mnemonic        : BLSMSK
  2886  // Supported forms : (4 forms)
  2887  //
  2888  //    * BLSMSK r32, r32    [BMI]
  2889  //    * BLSMSK m32, r32    [BMI]
  2890  //    * BLSMSK r64, r64    [BMI]
  2891  //    * BLSMSK m64, r64    [BMI]
  2892  //
  2893  func (self *Program) BLSMSK(v0 interface{}, v1 interface{}) *Instruction {
  2894      p := self.alloc("BLSMSK", 2, Operands { v0, v1 })
  2895      // BLSMSK r32, r32
  2896      if isReg32(v0) && isReg32(v1) {
  2897          self.require(ISA_BMI)
  2898          p.domain = DomainGeneric
  2899          p.add(0, func(m *_Encoding, v []interface{}) {
  2900              m.emit(0xc4)
  2901              m.emit(0xe2 ^ (hcode(v[0]) << 5))
  2902              m.emit(0x78 ^ (hlcode(v[1]) << 3))
  2903              m.emit(0xf3)
  2904              m.emit(0xd0 | lcode(v[0]))
  2905          })
  2906      }
  2907      // BLSMSK m32, r32
  2908      if isM32(v0) && isReg32(v1) {
  2909          self.require(ISA_BMI)
  2910          p.domain = DomainGeneric
  2911          p.add(0, func(m *_Encoding, v []interface{}) {
  2912              m.vex3(0xc4, 0b10, 0x00, 0, addr(v[0]), hlcode(v[1]))
  2913              m.emit(0xf3)
  2914              m.mrsd(2, addr(v[0]), 1)
  2915          })
  2916      }
  2917      // BLSMSK r64, r64
  2918      if isReg64(v0) && isReg64(v1) {
  2919          self.require(ISA_BMI)
  2920          p.domain = DomainGeneric
  2921          p.add(0, func(m *_Encoding, v []interface{}) {
  2922              m.emit(0xc4)
  2923              m.emit(0xe2 ^ (hcode(v[0]) << 5))
  2924              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
  2925              m.emit(0xf3)
  2926              m.emit(0xd0 | lcode(v[0]))
  2927          })
  2928      }
  2929      // BLSMSK m64, r64
  2930      if isM64(v0) && isReg64(v1) {
  2931          self.require(ISA_BMI)
  2932          p.domain = DomainGeneric
  2933          p.add(0, func(m *_Encoding, v []interface{}) {
  2934              m.vex3(0xc4, 0b10, 0x80, 0, addr(v[0]), hlcode(v[1]))
  2935              m.emit(0xf3)
  2936              m.mrsd(2, addr(v[0]), 1)
  2937          })
  2938      }
  2939      if p.len == 0 {
  2940          panic("invalid operands for BLSMSK")
  2941      }
  2942      return p
  2943  }
  2944  
  2945  // BLSR performs "Reset Lowest Set Bit".
  2946  //
  2947  // Mnemonic        : BLSR
  2948  // Supported forms : (4 forms)
  2949  //
  2950  //    * BLSR r32, r32    [BMI]
  2951  //    * BLSR m32, r32    [BMI]
  2952  //    * BLSR r64, r64    [BMI]
  2953  //    * BLSR m64, r64    [BMI]
  2954  //
  2955  func (self *Program) BLSR(v0 interface{}, v1 interface{}) *Instruction {
  2956      p := self.alloc("BLSR", 2, Operands { v0, v1 })
  2957      // BLSR r32, r32
  2958      if isReg32(v0) && isReg32(v1) {
  2959          self.require(ISA_BMI)
  2960          p.domain = DomainGeneric
  2961          p.add(0, func(m *_Encoding, v []interface{}) {
  2962              m.emit(0xc4)
  2963              m.emit(0xe2 ^ (hcode(v[0]) << 5))
  2964              m.emit(0x78 ^ (hlcode(v[1]) << 3))
  2965              m.emit(0xf3)
  2966              m.emit(0xc8 | lcode(v[0]))
  2967          })
  2968      }
  2969      // BLSR m32, r32
  2970      if isM32(v0) && isReg32(v1) {
  2971          self.require(ISA_BMI)
  2972          p.domain = DomainGeneric
  2973          p.add(0, func(m *_Encoding, v []interface{}) {
  2974              m.vex3(0xc4, 0b10, 0x00, 0, addr(v[0]), hlcode(v[1]))
  2975              m.emit(0xf3)
  2976              m.mrsd(1, addr(v[0]), 1)
  2977          })
  2978      }
  2979      // BLSR r64, r64
  2980      if isReg64(v0) && isReg64(v1) {
  2981          self.require(ISA_BMI)
  2982          p.domain = DomainGeneric
  2983          p.add(0, func(m *_Encoding, v []interface{}) {
  2984              m.emit(0xc4)
  2985              m.emit(0xe2 ^ (hcode(v[0]) << 5))
  2986              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
  2987              m.emit(0xf3)
  2988              m.emit(0xc8 | lcode(v[0]))
  2989          })
  2990      }
  2991      // BLSR m64, r64
  2992      if isM64(v0) && isReg64(v1) {
  2993          self.require(ISA_BMI)
  2994          p.domain = DomainGeneric
  2995          p.add(0, func(m *_Encoding, v []interface{}) {
  2996              m.vex3(0xc4, 0b10, 0x80, 0, addr(v[0]), hlcode(v[1]))
  2997              m.emit(0xf3)
  2998              m.mrsd(1, addr(v[0]), 1)
  2999          })
  3000      }
  3001      if p.len == 0 {
  3002          panic("invalid operands for BLSR")
  3003      }
  3004      return p
  3005  }
  3006  
  3007  // BSFL performs "Bit Scan Forward".
  3008  //
  3009  // Mnemonic        : BSF
  3010  // Supported forms : (2 forms)
  3011  //
  3012  //    * BSFL r32, r32
  3013  //    * BSFL m32, r32
  3014  //
  3015  func (self *Program) BSFL(v0 interface{}, v1 interface{}) *Instruction {
  3016      p := self.alloc("BSFL", 2, Operands { v0, v1 })
  3017      // BSFL r32, r32
  3018      if isReg32(v0) && isReg32(v1) {
  3019          p.domain = DomainGeneric
  3020          p.add(0, func(m *_Encoding, v []interface{}) {
  3021              m.rexo(hcode(v[1]), v[0], false)
  3022              m.emit(0x0f)
  3023              m.emit(0xbc)
  3024              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  3025          })
  3026      }
  3027      // BSFL m32, r32
  3028      if isM32(v0) && isReg32(v1) {
  3029          p.domain = DomainGeneric
  3030          p.add(0, func(m *_Encoding, v []interface{}) {
  3031              m.rexo(hcode(v[1]), addr(v[0]), false)
  3032              m.emit(0x0f)
  3033              m.emit(0xbc)
  3034              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  3035          })
  3036      }
  3037      if p.len == 0 {
  3038          panic("invalid operands for BSFL")
  3039      }
  3040      return p
  3041  }
  3042  
  3043  // BSFQ performs "Bit Scan Forward".
  3044  //
  3045  // Mnemonic        : BSF
  3046  // Supported forms : (2 forms)
  3047  //
  3048  //    * BSFQ r64, r64
  3049  //    * BSFQ m64, r64
  3050  //
  3051  func (self *Program) BSFQ(v0 interface{}, v1 interface{}) *Instruction {
  3052      p := self.alloc("BSFQ", 2, Operands { v0, v1 })
  3053      // BSFQ r64, r64
  3054      if isReg64(v0) && isReg64(v1) {
  3055          p.domain = DomainGeneric
  3056          p.add(0, func(m *_Encoding, v []interface{}) {
  3057              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  3058              m.emit(0x0f)
  3059              m.emit(0xbc)
  3060              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  3061          })
  3062      }
  3063      // BSFQ m64, r64
  3064      if isM64(v0) && isReg64(v1) {
  3065          p.domain = DomainGeneric
  3066          p.add(0, func(m *_Encoding, v []interface{}) {
  3067              m.rexm(1, hcode(v[1]), addr(v[0]))
  3068              m.emit(0x0f)
  3069              m.emit(0xbc)
  3070              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  3071          })
  3072      }
  3073      if p.len == 0 {
  3074          panic("invalid operands for BSFQ")
  3075      }
  3076      return p
  3077  }
  3078  
  3079  // BSFW performs "Bit Scan Forward".
  3080  //
  3081  // Mnemonic        : BSF
  3082  // Supported forms : (2 forms)
  3083  //
  3084  //    * BSFW r16, r16
  3085  //    * BSFW m16, r16
  3086  //
  3087  func (self *Program) BSFW(v0 interface{}, v1 interface{}) *Instruction {
  3088      p := self.alloc("BSFW", 2, Operands { v0, v1 })
  3089      // BSFW r16, r16
  3090      if isReg16(v0) && isReg16(v1) {
  3091          p.domain = DomainGeneric
  3092          p.add(0, func(m *_Encoding, v []interface{}) {
  3093              m.emit(0x66)
  3094              m.rexo(hcode(v[1]), v[0], false)
  3095              m.emit(0x0f)
  3096              m.emit(0xbc)
  3097              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  3098          })
  3099      }
  3100      // BSFW m16, r16
  3101      if isM16(v0) && isReg16(v1) {
  3102          p.domain = DomainGeneric
  3103          p.add(0, func(m *_Encoding, v []interface{}) {
  3104              m.emit(0x66)
  3105              m.rexo(hcode(v[1]), addr(v[0]), false)
  3106              m.emit(0x0f)
  3107              m.emit(0xbc)
  3108              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  3109          })
  3110      }
  3111      if p.len == 0 {
  3112          panic("invalid operands for BSFW")
  3113      }
  3114      return p
  3115  }
  3116  
  3117  // BSRL performs "Bit Scan Reverse".
  3118  //
  3119  // Mnemonic        : BSR
  3120  // Supported forms : (2 forms)
  3121  //
  3122  //    * BSRL r32, r32
  3123  //    * BSRL m32, r32
  3124  //
  3125  func (self *Program) BSRL(v0 interface{}, v1 interface{}) *Instruction {
  3126      p := self.alloc("BSRL", 2, Operands { v0, v1 })
  3127      // BSRL r32, r32
  3128      if isReg32(v0) && isReg32(v1) {
  3129          p.domain = DomainGeneric
  3130          p.add(0, func(m *_Encoding, v []interface{}) {
  3131              m.rexo(hcode(v[1]), v[0], false)
  3132              m.emit(0x0f)
  3133              m.emit(0xbd)
  3134              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  3135          })
  3136      }
  3137      // BSRL m32, r32
  3138      if isM32(v0) && isReg32(v1) {
  3139          p.domain = DomainGeneric
  3140          p.add(0, func(m *_Encoding, v []interface{}) {
  3141              m.rexo(hcode(v[1]), addr(v[0]), false)
  3142              m.emit(0x0f)
  3143              m.emit(0xbd)
  3144              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  3145          })
  3146      }
  3147      if p.len == 0 {
  3148          panic("invalid operands for BSRL")
  3149      }
  3150      return p
  3151  }
  3152  
  3153  // BSRQ performs "Bit Scan Reverse".
  3154  //
  3155  // Mnemonic        : BSR
  3156  // Supported forms : (2 forms)
  3157  //
  3158  //    * BSRQ r64, r64
  3159  //    * BSRQ m64, r64
  3160  //
  3161  func (self *Program) BSRQ(v0 interface{}, v1 interface{}) *Instruction {
  3162      p := self.alloc("BSRQ", 2, Operands { v0, v1 })
  3163      // BSRQ r64, r64
  3164      if isReg64(v0) && isReg64(v1) {
  3165          p.domain = DomainGeneric
  3166          p.add(0, func(m *_Encoding, v []interface{}) {
  3167              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  3168              m.emit(0x0f)
  3169              m.emit(0xbd)
  3170              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  3171          })
  3172      }
  3173      // BSRQ m64, r64
  3174      if isM64(v0) && isReg64(v1) {
  3175          p.domain = DomainGeneric
  3176          p.add(0, func(m *_Encoding, v []interface{}) {
  3177              m.rexm(1, hcode(v[1]), addr(v[0]))
  3178              m.emit(0x0f)
  3179              m.emit(0xbd)
  3180              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  3181          })
  3182      }
  3183      if p.len == 0 {
  3184          panic("invalid operands for BSRQ")
  3185      }
  3186      return p
  3187  }
  3188  
  3189  // BSRW performs "Bit Scan Reverse".
  3190  //
  3191  // Mnemonic        : BSR
  3192  // Supported forms : (2 forms)
  3193  //
  3194  //    * BSRW r16, r16
  3195  //    * BSRW m16, r16
  3196  //
  3197  func (self *Program) BSRW(v0 interface{}, v1 interface{}) *Instruction {
  3198      p := self.alloc("BSRW", 2, Operands { v0, v1 })
  3199      // BSRW r16, r16
  3200      if isReg16(v0) && isReg16(v1) {
  3201          p.domain = DomainGeneric
  3202          p.add(0, func(m *_Encoding, v []interface{}) {
  3203              m.emit(0x66)
  3204              m.rexo(hcode(v[1]), v[0], false)
  3205              m.emit(0x0f)
  3206              m.emit(0xbd)
  3207              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  3208          })
  3209      }
  3210      // BSRW m16, r16
  3211      if isM16(v0) && isReg16(v1) {
  3212          p.domain = DomainGeneric
  3213          p.add(0, func(m *_Encoding, v []interface{}) {
  3214              m.emit(0x66)
  3215              m.rexo(hcode(v[1]), addr(v[0]), false)
  3216              m.emit(0x0f)
  3217              m.emit(0xbd)
  3218              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  3219          })
  3220      }
  3221      if p.len == 0 {
  3222          panic("invalid operands for BSRW")
  3223      }
  3224      return p
  3225  }
  3226  
  3227  // BSWAPL performs "Byte Swap".
  3228  //
  3229  // Mnemonic        : BSWAP
  3230  // Supported forms : (1 form)
  3231  //
  3232  //    * BSWAPL r32
  3233  //
  3234  func (self *Program) BSWAPL(v0 interface{}) *Instruction {
  3235      p := self.alloc("BSWAPL", 1, Operands { v0 })
  3236      // BSWAPL r32
  3237      if isReg32(v0) {
  3238          p.domain = DomainGeneric
  3239          p.add(0, func(m *_Encoding, v []interface{}) {
  3240              m.rexo(0, v[0], false)
  3241              m.emit(0x0f)
  3242              m.emit(0xc8 | lcode(v[0]))
  3243          })
  3244      }
  3245      if p.len == 0 {
  3246          panic("invalid operands for BSWAPL")
  3247      }
  3248      return p
  3249  }
  3250  
  3251  // BSWAPQ performs "Byte Swap".
  3252  //
  3253  // Mnemonic        : BSWAP
  3254  // Supported forms : (1 form)
  3255  //
  3256  //    * BSWAPQ r64
  3257  //
  3258  func (self *Program) BSWAPQ(v0 interface{}) *Instruction {
  3259      p := self.alloc("BSWAPQ", 1, Operands { v0 })
  3260      // BSWAPQ r64
  3261      if isReg64(v0) {
  3262          p.domain = DomainGeneric
  3263          p.add(0, func(m *_Encoding, v []interface{}) {
  3264              m.emit(0x48 | hcode(v[0]))
  3265              m.emit(0x0f)
  3266              m.emit(0xc8 | lcode(v[0]))
  3267          })
  3268      }
  3269      if p.len == 0 {
  3270          panic("invalid operands for BSWAPQ")
  3271      }
  3272      return p
  3273  }
  3274  
  3275  // BTCL performs "Bit Test and Complement".
  3276  //
  3277  // Mnemonic        : BTC
  3278  // Supported forms : (4 forms)
  3279  //
  3280  //    * BTCL imm8, r32
  3281  //    * BTCL r32, r32
  3282  //    * BTCL imm8, m32
  3283  //    * BTCL r32, m32
  3284  //
  3285  func (self *Program) BTCL(v0 interface{}, v1 interface{}) *Instruction {
  3286      p := self.alloc("BTCL", 2, Operands { v0, v1 })
  3287      // BTCL imm8, r32
  3288      if isImm8(v0) && isReg32(v1) {
  3289          p.domain = DomainGeneric
  3290          p.add(0, func(m *_Encoding, v []interface{}) {
  3291              m.rexo(0, v[1], false)
  3292              m.emit(0x0f)
  3293              m.emit(0xba)
  3294              m.emit(0xf8 | lcode(v[1]))
  3295              m.imm1(toImmAny(v[0]))
  3296          })
  3297      }
  3298      // BTCL r32, r32
  3299      if isReg32(v0) && isReg32(v1) {
  3300          p.domain = DomainGeneric
  3301          p.add(0, func(m *_Encoding, v []interface{}) {
  3302              m.rexo(hcode(v[0]), v[1], false)
  3303              m.emit(0x0f)
  3304              m.emit(0xbb)
  3305              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3306          })
  3307      }
  3308      // BTCL imm8, m32
  3309      if isImm8(v0) && isM32(v1) {
  3310          p.domain = DomainGeneric
  3311          p.add(0, func(m *_Encoding, v []interface{}) {
  3312              m.rexo(0, addr(v[1]), false)
  3313              m.emit(0x0f)
  3314              m.emit(0xba)
  3315              m.mrsd(7, addr(v[1]), 1)
  3316              m.imm1(toImmAny(v[0]))
  3317          })
  3318      }
  3319      // BTCL r32, m32
  3320      if isReg32(v0) && isM32(v1) {
  3321          p.domain = DomainGeneric
  3322          p.add(0, func(m *_Encoding, v []interface{}) {
  3323              m.rexo(hcode(v[0]), addr(v[1]), false)
  3324              m.emit(0x0f)
  3325              m.emit(0xbb)
  3326              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3327          })
  3328      }
  3329      if p.len == 0 {
  3330          panic("invalid operands for BTCL")
  3331      }
  3332      return p
  3333  }
  3334  
  3335  // BTCQ performs "Bit Test and Complement".
  3336  //
  3337  // Mnemonic        : BTC
  3338  // Supported forms : (4 forms)
  3339  //
  3340  //    * BTCQ imm8, r64
  3341  //    * BTCQ r64, r64
  3342  //    * BTCQ imm8, m64
  3343  //    * BTCQ r64, m64
  3344  //
  3345  func (self *Program) BTCQ(v0 interface{}, v1 interface{}) *Instruction {
  3346      p := self.alloc("BTCQ", 2, Operands { v0, v1 })
  3347      // BTCQ imm8, r64
  3348      if isImm8(v0) && isReg64(v1) {
  3349          p.domain = DomainGeneric
  3350          p.add(0, func(m *_Encoding, v []interface{}) {
  3351              m.emit(0x48 | hcode(v[1]))
  3352              m.emit(0x0f)
  3353              m.emit(0xba)
  3354              m.emit(0xf8 | lcode(v[1]))
  3355              m.imm1(toImmAny(v[0]))
  3356          })
  3357      }
  3358      // BTCQ r64, r64
  3359      if isReg64(v0) && isReg64(v1) {
  3360          p.domain = DomainGeneric
  3361          p.add(0, func(m *_Encoding, v []interface{}) {
  3362              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
  3363              m.emit(0x0f)
  3364              m.emit(0xbb)
  3365              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3366          })
  3367      }
  3368      // BTCQ imm8, m64
  3369      if isImm8(v0) && isM64(v1) {
  3370          p.domain = DomainGeneric
  3371          p.add(0, func(m *_Encoding, v []interface{}) {
  3372              m.rexm(1, 0, addr(v[1]))
  3373              m.emit(0x0f)
  3374              m.emit(0xba)
  3375              m.mrsd(7, addr(v[1]), 1)
  3376              m.imm1(toImmAny(v[0]))
  3377          })
  3378      }
  3379      // BTCQ r64, m64
  3380      if isReg64(v0) && isM64(v1) {
  3381          p.domain = DomainGeneric
  3382          p.add(0, func(m *_Encoding, v []interface{}) {
  3383              m.rexm(1, hcode(v[0]), addr(v[1]))
  3384              m.emit(0x0f)
  3385              m.emit(0xbb)
  3386              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3387          })
  3388      }
  3389      if p.len == 0 {
  3390          panic("invalid operands for BTCQ")
  3391      }
  3392      return p
  3393  }
  3394  
  3395  // BTCW performs "Bit Test and Complement".
  3396  //
  3397  // Mnemonic        : BTC
  3398  // Supported forms : (4 forms)
  3399  //
  3400  //    * BTCW imm8, r16
  3401  //    * BTCW r16, r16
  3402  //    * BTCW imm8, m16
  3403  //    * BTCW r16, m16
  3404  //
  3405  func (self *Program) BTCW(v0 interface{}, v1 interface{}) *Instruction {
  3406      p := self.alloc("BTCW", 2, Operands { v0, v1 })
  3407      // BTCW imm8, r16
  3408      if isImm8(v0) && isReg16(v1) {
  3409          p.domain = DomainGeneric
  3410          p.add(0, func(m *_Encoding, v []interface{}) {
  3411              m.emit(0x66)
  3412              m.rexo(0, v[1], false)
  3413              m.emit(0x0f)
  3414              m.emit(0xba)
  3415              m.emit(0xf8 | lcode(v[1]))
  3416              m.imm1(toImmAny(v[0]))
  3417          })
  3418      }
  3419      // BTCW r16, r16
  3420      if isReg16(v0) && isReg16(v1) {
  3421          p.domain = DomainGeneric
  3422          p.add(0, func(m *_Encoding, v []interface{}) {
  3423              m.emit(0x66)
  3424              m.rexo(hcode(v[0]), v[1], false)
  3425              m.emit(0x0f)
  3426              m.emit(0xbb)
  3427              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3428          })
  3429      }
  3430      // BTCW imm8, m16
  3431      if isImm8(v0) && isM16(v1) {
  3432          p.domain = DomainGeneric
  3433          p.add(0, func(m *_Encoding, v []interface{}) {
  3434              m.emit(0x66)
  3435              m.rexo(0, addr(v[1]), false)
  3436              m.emit(0x0f)
  3437              m.emit(0xba)
  3438              m.mrsd(7, addr(v[1]), 1)
  3439              m.imm1(toImmAny(v[0]))
  3440          })
  3441      }
  3442      // BTCW r16, m16
  3443      if isReg16(v0) && isM16(v1) {
  3444          p.domain = DomainGeneric
  3445          p.add(0, func(m *_Encoding, v []interface{}) {
  3446              m.emit(0x66)
  3447              m.rexo(hcode(v[0]), addr(v[1]), false)
  3448              m.emit(0x0f)
  3449              m.emit(0xbb)
  3450              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3451          })
  3452      }
  3453      if p.len == 0 {
  3454          panic("invalid operands for BTCW")
  3455      }
  3456      return p
  3457  }
  3458  
  3459  // BTL performs "Bit Test".
  3460  //
  3461  // Mnemonic        : BT
  3462  // Supported forms : (4 forms)
  3463  //
  3464  //    * BTL imm8, r32
  3465  //    * BTL r32, r32
  3466  //    * BTL imm8, m32
  3467  //    * BTL r32, m32
  3468  //
  3469  func (self *Program) BTL(v0 interface{}, v1 interface{}) *Instruction {
  3470      p := self.alloc("BTL", 2, Operands { v0, v1 })
  3471      // BTL imm8, r32
  3472      if isImm8(v0) && isReg32(v1) {
  3473          p.domain = DomainGeneric
  3474          p.add(0, func(m *_Encoding, v []interface{}) {
  3475              m.rexo(0, v[1], false)
  3476              m.emit(0x0f)
  3477              m.emit(0xba)
  3478              m.emit(0xe0 | lcode(v[1]))
  3479              m.imm1(toImmAny(v[0]))
  3480          })
  3481      }
  3482      // BTL r32, r32
  3483      if isReg32(v0) && isReg32(v1) {
  3484          p.domain = DomainGeneric
  3485          p.add(0, func(m *_Encoding, v []interface{}) {
  3486              m.rexo(hcode(v[0]), v[1], false)
  3487              m.emit(0x0f)
  3488              m.emit(0xa3)
  3489              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3490          })
  3491      }
  3492      // BTL imm8, m32
  3493      if isImm8(v0) && isM32(v1) {
  3494          p.domain = DomainGeneric
  3495          p.add(0, func(m *_Encoding, v []interface{}) {
  3496              m.rexo(0, addr(v[1]), false)
  3497              m.emit(0x0f)
  3498              m.emit(0xba)
  3499              m.mrsd(4, addr(v[1]), 1)
  3500              m.imm1(toImmAny(v[0]))
  3501          })
  3502      }
  3503      // BTL r32, m32
  3504      if isReg32(v0) && isM32(v1) {
  3505          p.domain = DomainGeneric
  3506          p.add(0, func(m *_Encoding, v []interface{}) {
  3507              m.rexo(hcode(v[0]), addr(v[1]), false)
  3508              m.emit(0x0f)
  3509              m.emit(0xa3)
  3510              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3511          })
  3512      }
  3513      if p.len == 0 {
  3514          panic("invalid operands for BTL")
  3515      }
  3516      return p
  3517  }
  3518  
  3519  // BTQ performs "Bit Test".
  3520  //
  3521  // Mnemonic        : BT
  3522  // Supported forms : (4 forms)
  3523  //
  3524  //    * BTQ imm8, r64
  3525  //    * BTQ r64, r64
  3526  //    * BTQ imm8, m64
  3527  //    * BTQ r64, m64
  3528  //
  3529  func (self *Program) BTQ(v0 interface{}, v1 interface{}) *Instruction {
  3530      p := self.alloc("BTQ", 2, Operands { v0, v1 })
  3531      // BTQ imm8, r64
  3532      if isImm8(v0) && isReg64(v1) {
  3533          p.domain = DomainGeneric
  3534          p.add(0, func(m *_Encoding, v []interface{}) {
  3535              m.emit(0x48 | hcode(v[1]))
  3536              m.emit(0x0f)
  3537              m.emit(0xba)
  3538              m.emit(0xe0 | lcode(v[1]))
  3539              m.imm1(toImmAny(v[0]))
  3540          })
  3541      }
  3542      // BTQ r64, r64
  3543      if isReg64(v0) && isReg64(v1) {
  3544          p.domain = DomainGeneric
  3545          p.add(0, func(m *_Encoding, v []interface{}) {
  3546              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
  3547              m.emit(0x0f)
  3548              m.emit(0xa3)
  3549              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3550          })
  3551      }
  3552      // BTQ imm8, m64
  3553      if isImm8(v0) && isM64(v1) {
  3554          p.domain = DomainGeneric
  3555          p.add(0, func(m *_Encoding, v []interface{}) {
  3556              m.rexm(1, 0, addr(v[1]))
  3557              m.emit(0x0f)
  3558              m.emit(0xba)
  3559              m.mrsd(4, addr(v[1]), 1)
  3560              m.imm1(toImmAny(v[0]))
  3561          })
  3562      }
  3563      // BTQ r64, m64
  3564      if isReg64(v0) && isM64(v1) {
  3565          p.domain = DomainGeneric
  3566          p.add(0, func(m *_Encoding, v []interface{}) {
  3567              m.rexm(1, hcode(v[0]), addr(v[1]))
  3568              m.emit(0x0f)
  3569              m.emit(0xa3)
  3570              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3571          })
  3572      }
  3573      if p.len == 0 {
  3574          panic("invalid operands for BTQ")
  3575      }
  3576      return p
  3577  }
  3578  
  3579  // BTRL performs "Bit Test and Reset".
  3580  //
  3581  // Mnemonic        : BTR
  3582  // Supported forms : (4 forms)
  3583  //
  3584  //    * BTRL imm8, r32
  3585  //    * BTRL r32, r32
  3586  //    * BTRL imm8, m32
  3587  //    * BTRL r32, m32
  3588  //
  3589  func (self *Program) BTRL(v0 interface{}, v1 interface{}) *Instruction {
  3590      p := self.alloc("BTRL", 2, Operands { v0, v1 })
  3591      // BTRL imm8, r32
  3592      if isImm8(v0) && isReg32(v1) {
  3593          p.domain = DomainGeneric
  3594          p.add(0, func(m *_Encoding, v []interface{}) {
  3595              m.rexo(0, v[1], false)
  3596              m.emit(0x0f)
  3597              m.emit(0xba)
  3598              m.emit(0xf0 | lcode(v[1]))
  3599              m.imm1(toImmAny(v[0]))
  3600          })
  3601      }
  3602      // BTRL r32, r32
  3603      if isReg32(v0) && isReg32(v1) {
  3604          p.domain = DomainGeneric
  3605          p.add(0, func(m *_Encoding, v []interface{}) {
  3606              m.rexo(hcode(v[0]), v[1], false)
  3607              m.emit(0x0f)
  3608              m.emit(0xb3)
  3609              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3610          })
  3611      }
  3612      // BTRL imm8, m32
  3613      if isImm8(v0) && isM32(v1) {
  3614          p.domain = DomainGeneric
  3615          p.add(0, func(m *_Encoding, v []interface{}) {
  3616              m.rexo(0, addr(v[1]), false)
  3617              m.emit(0x0f)
  3618              m.emit(0xba)
  3619              m.mrsd(6, addr(v[1]), 1)
  3620              m.imm1(toImmAny(v[0]))
  3621          })
  3622      }
  3623      // BTRL r32, m32
  3624      if isReg32(v0) && isM32(v1) {
  3625          p.domain = DomainGeneric
  3626          p.add(0, func(m *_Encoding, v []interface{}) {
  3627              m.rexo(hcode(v[0]), addr(v[1]), false)
  3628              m.emit(0x0f)
  3629              m.emit(0xb3)
  3630              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3631          })
  3632      }
  3633      if p.len == 0 {
  3634          panic("invalid operands for BTRL")
  3635      }
  3636      return p
  3637  }
  3638  
  3639  // BTRQ performs "Bit Test and Reset".
  3640  //
  3641  // Mnemonic        : BTR
  3642  // Supported forms : (4 forms)
  3643  //
  3644  //    * BTRQ imm8, r64
  3645  //    * BTRQ r64, r64
  3646  //    * BTRQ imm8, m64
  3647  //    * BTRQ r64, m64
  3648  //
  3649  func (self *Program) BTRQ(v0 interface{}, v1 interface{}) *Instruction {
  3650      p := self.alloc("BTRQ", 2, Operands { v0, v1 })
  3651      // BTRQ imm8, r64
  3652      if isImm8(v0) && isReg64(v1) {
  3653          p.domain = DomainGeneric
  3654          p.add(0, func(m *_Encoding, v []interface{}) {
  3655              m.emit(0x48 | hcode(v[1]))
  3656              m.emit(0x0f)
  3657              m.emit(0xba)
  3658              m.emit(0xf0 | lcode(v[1]))
  3659              m.imm1(toImmAny(v[0]))
  3660          })
  3661      }
  3662      // BTRQ r64, r64
  3663      if isReg64(v0) && isReg64(v1) {
  3664          p.domain = DomainGeneric
  3665          p.add(0, func(m *_Encoding, v []interface{}) {
  3666              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
  3667              m.emit(0x0f)
  3668              m.emit(0xb3)
  3669              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3670          })
  3671      }
  3672      // BTRQ imm8, m64
  3673      if isImm8(v0) && isM64(v1) {
  3674          p.domain = DomainGeneric
  3675          p.add(0, func(m *_Encoding, v []interface{}) {
  3676              m.rexm(1, 0, addr(v[1]))
  3677              m.emit(0x0f)
  3678              m.emit(0xba)
  3679              m.mrsd(6, addr(v[1]), 1)
  3680              m.imm1(toImmAny(v[0]))
  3681          })
  3682      }
  3683      // BTRQ r64, m64
  3684      if isReg64(v0) && isM64(v1) {
  3685          p.domain = DomainGeneric
  3686          p.add(0, func(m *_Encoding, v []interface{}) {
  3687              m.rexm(1, hcode(v[0]), addr(v[1]))
  3688              m.emit(0x0f)
  3689              m.emit(0xb3)
  3690              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3691          })
  3692      }
  3693      if p.len == 0 {
  3694          panic("invalid operands for BTRQ")
  3695      }
  3696      return p
  3697  }
  3698  
  3699  // BTRW performs "Bit Test and Reset".
  3700  //
  3701  // Mnemonic        : BTR
  3702  // Supported forms : (4 forms)
  3703  //
  3704  //    * BTRW imm8, r16
  3705  //    * BTRW r16, r16
  3706  //    * BTRW imm8, m16
  3707  //    * BTRW r16, m16
  3708  //
  3709  func (self *Program) BTRW(v0 interface{}, v1 interface{}) *Instruction {
  3710      p := self.alloc("BTRW", 2, Operands { v0, v1 })
  3711      // BTRW imm8, r16
  3712      if isImm8(v0) && isReg16(v1) {
  3713          p.domain = DomainGeneric
  3714          p.add(0, func(m *_Encoding, v []interface{}) {
  3715              m.emit(0x66)
  3716              m.rexo(0, v[1], false)
  3717              m.emit(0x0f)
  3718              m.emit(0xba)
  3719              m.emit(0xf0 | lcode(v[1]))
  3720              m.imm1(toImmAny(v[0]))
  3721          })
  3722      }
  3723      // BTRW r16, r16
  3724      if isReg16(v0) && isReg16(v1) {
  3725          p.domain = DomainGeneric
  3726          p.add(0, func(m *_Encoding, v []interface{}) {
  3727              m.emit(0x66)
  3728              m.rexo(hcode(v[0]), v[1], false)
  3729              m.emit(0x0f)
  3730              m.emit(0xb3)
  3731              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3732          })
  3733      }
  3734      // BTRW imm8, m16
  3735      if isImm8(v0) && isM16(v1) {
  3736          p.domain = DomainGeneric
  3737          p.add(0, func(m *_Encoding, v []interface{}) {
  3738              m.emit(0x66)
  3739              m.rexo(0, addr(v[1]), false)
  3740              m.emit(0x0f)
  3741              m.emit(0xba)
  3742              m.mrsd(6, addr(v[1]), 1)
  3743              m.imm1(toImmAny(v[0]))
  3744          })
  3745      }
  3746      // BTRW r16, m16
  3747      if isReg16(v0) && isM16(v1) {
  3748          p.domain = DomainGeneric
  3749          p.add(0, func(m *_Encoding, v []interface{}) {
  3750              m.emit(0x66)
  3751              m.rexo(hcode(v[0]), addr(v[1]), false)
  3752              m.emit(0x0f)
  3753              m.emit(0xb3)
  3754              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3755          })
  3756      }
  3757      if p.len == 0 {
  3758          panic("invalid operands for BTRW")
  3759      }
  3760      return p
  3761  }
  3762  
  3763  // BTSL performs "Bit Test and Set".
  3764  //
  3765  // Mnemonic        : BTS
  3766  // Supported forms : (4 forms)
  3767  //
  3768  //    * BTSL imm8, r32
  3769  //    * BTSL r32, r32
  3770  //    * BTSL imm8, m32
  3771  //    * BTSL r32, m32
  3772  //
  3773  func (self *Program) BTSL(v0 interface{}, v1 interface{}) *Instruction {
  3774      p := self.alloc("BTSL", 2, Operands { v0, v1 })
  3775      // BTSL imm8, r32
  3776      if isImm8(v0) && isReg32(v1) {
  3777          p.domain = DomainGeneric
  3778          p.add(0, func(m *_Encoding, v []interface{}) {
  3779              m.rexo(0, v[1], false)
  3780              m.emit(0x0f)
  3781              m.emit(0xba)
  3782              m.emit(0xe8 | lcode(v[1]))
  3783              m.imm1(toImmAny(v[0]))
  3784          })
  3785      }
  3786      // BTSL r32, r32
  3787      if isReg32(v0) && isReg32(v1) {
  3788          p.domain = DomainGeneric
  3789          p.add(0, func(m *_Encoding, v []interface{}) {
  3790              m.rexo(hcode(v[0]), v[1], false)
  3791              m.emit(0x0f)
  3792              m.emit(0xab)
  3793              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3794          })
  3795      }
  3796      // BTSL imm8, m32
  3797      if isImm8(v0) && isM32(v1) {
  3798          p.domain = DomainGeneric
  3799          p.add(0, func(m *_Encoding, v []interface{}) {
  3800              m.rexo(0, addr(v[1]), false)
  3801              m.emit(0x0f)
  3802              m.emit(0xba)
  3803              m.mrsd(5, addr(v[1]), 1)
  3804              m.imm1(toImmAny(v[0]))
  3805          })
  3806      }
  3807      // BTSL r32, m32
  3808      if isReg32(v0) && isM32(v1) {
  3809          p.domain = DomainGeneric
  3810          p.add(0, func(m *_Encoding, v []interface{}) {
  3811              m.rexo(hcode(v[0]), addr(v[1]), false)
  3812              m.emit(0x0f)
  3813              m.emit(0xab)
  3814              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3815          })
  3816      }
  3817      if p.len == 0 {
  3818          panic("invalid operands for BTSL")
  3819      }
  3820      return p
  3821  }
  3822  
  3823  // BTSQ performs "Bit Test and Set".
  3824  //
  3825  // Mnemonic        : BTS
  3826  // Supported forms : (4 forms)
  3827  //
  3828  //    * BTSQ imm8, r64
  3829  //    * BTSQ r64, r64
  3830  //    * BTSQ imm8, m64
  3831  //    * BTSQ r64, m64
  3832  //
  3833  func (self *Program) BTSQ(v0 interface{}, v1 interface{}) *Instruction {
  3834      p := self.alloc("BTSQ", 2, Operands { v0, v1 })
  3835      // BTSQ imm8, r64
  3836      if isImm8(v0) && isReg64(v1) {
  3837          p.domain = DomainGeneric
  3838          p.add(0, func(m *_Encoding, v []interface{}) {
  3839              m.emit(0x48 | hcode(v[1]))
  3840              m.emit(0x0f)
  3841              m.emit(0xba)
  3842              m.emit(0xe8 | lcode(v[1]))
  3843              m.imm1(toImmAny(v[0]))
  3844          })
  3845      }
  3846      // BTSQ r64, r64
  3847      if isReg64(v0) && isReg64(v1) {
  3848          p.domain = DomainGeneric
  3849          p.add(0, func(m *_Encoding, v []interface{}) {
  3850              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
  3851              m.emit(0x0f)
  3852              m.emit(0xab)
  3853              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3854          })
  3855      }
  3856      // BTSQ imm8, m64
  3857      if isImm8(v0) && isM64(v1) {
  3858          p.domain = DomainGeneric
  3859          p.add(0, func(m *_Encoding, v []interface{}) {
  3860              m.rexm(1, 0, addr(v[1]))
  3861              m.emit(0x0f)
  3862              m.emit(0xba)
  3863              m.mrsd(5, addr(v[1]), 1)
  3864              m.imm1(toImmAny(v[0]))
  3865          })
  3866      }
  3867      // BTSQ r64, m64
  3868      if isReg64(v0) && isM64(v1) {
  3869          p.domain = DomainGeneric
  3870          p.add(0, func(m *_Encoding, v []interface{}) {
  3871              m.rexm(1, hcode(v[0]), addr(v[1]))
  3872              m.emit(0x0f)
  3873              m.emit(0xab)
  3874              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3875          })
  3876      }
  3877      if p.len == 0 {
  3878          panic("invalid operands for BTSQ")
  3879      }
  3880      return p
  3881  }
  3882  
  3883  // BTSW performs "Bit Test and Set".
  3884  //
  3885  // Mnemonic        : BTS
  3886  // Supported forms : (4 forms)
  3887  //
  3888  //    * BTSW imm8, r16
  3889  //    * BTSW r16, r16
  3890  //    * BTSW imm8, m16
  3891  //    * BTSW r16, m16
  3892  //
  3893  func (self *Program) BTSW(v0 interface{}, v1 interface{}) *Instruction {
  3894      p := self.alloc("BTSW", 2, Operands { v0, v1 })
  3895      // BTSW imm8, r16
  3896      if isImm8(v0) && isReg16(v1) {
  3897          p.domain = DomainGeneric
  3898          p.add(0, func(m *_Encoding, v []interface{}) {
  3899              m.emit(0x66)
  3900              m.rexo(0, v[1], false)
  3901              m.emit(0x0f)
  3902              m.emit(0xba)
  3903              m.emit(0xe8 | lcode(v[1]))
  3904              m.imm1(toImmAny(v[0]))
  3905          })
  3906      }
  3907      // BTSW r16, r16
  3908      if isReg16(v0) && isReg16(v1) {
  3909          p.domain = DomainGeneric
  3910          p.add(0, func(m *_Encoding, v []interface{}) {
  3911              m.emit(0x66)
  3912              m.rexo(hcode(v[0]), v[1], false)
  3913              m.emit(0x0f)
  3914              m.emit(0xab)
  3915              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3916          })
  3917      }
  3918      // BTSW imm8, m16
  3919      if isImm8(v0) && isM16(v1) {
  3920          p.domain = DomainGeneric
  3921          p.add(0, func(m *_Encoding, v []interface{}) {
  3922              m.emit(0x66)
  3923              m.rexo(0, addr(v[1]), false)
  3924              m.emit(0x0f)
  3925              m.emit(0xba)
  3926              m.mrsd(5, addr(v[1]), 1)
  3927              m.imm1(toImmAny(v[0]))
  3928          })
  3929      }
  3930      // BTSW r16, m16
  3931      if isReg16(v0) && isM16(v1) {
  3932          p.domain = DomainGeneric
  3933          p.add(0, func(m *_Encoding, v []interface{}) {
  3934              m.emit(0x66)
  3935              m.rexo(hcode(v[0]), addr(v[1]), false)
  3936              m.emit(0x0f)
  3937              m.emit(0xab)
  3938              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  3939          })
  3940      }
  3941      if p.len == 0 {
  3942          panic("invalid operands for BTSW")
  3943      }
  3944      return p
  3945  }
  3946  
  3947  // BTW performs "Bit Test".
  3948  //
  3949  // Mnemonic        : BT
  3950  // Supported forms : (4 forms)
  3951  //
  3952  //    * BTW imm8, r16
  3953  //    * BTW r16, r16
  3954  //    * BTW imm8, m16
  3955  //    * BTW r16, m16
  3956  //
  3957  func (self *Program) BTW(v0 interface{}, v1 interface{}) *Instruction {
  3958      p := self.alloc("BTW", 2, Operands { v0, v1 })
  3959      // BTW imm8, r16
  3960      if isImm8(v0) && isReg16(v1) {
  3961          p.domain = DomainGeneric
  3962          p.add(0, func(m *_Encoding, v []interface{}) {
  3963              m.emit(0x66)
  3964              m.rexo(0, v[1], false)
  3965              m.emit(0x0f)
  3966              m.emit(0xba)
  3967              m.emit(0xe0 | lcode(v[1]))
  3968              m.imm1(toImmAny(v[0]))
  3969          })
  3970      }
  3971      // BTW r16, r16
  3972      if isReg16(v0) && isReg16(v1) {
  3973          p.domain = DomainGeneric
  3974          p.add(0, func(m *_Encoding, v []interface{}) {
  3975              m.emit(0x66)
  3976              m.rexo(hcode(v[0]), v[1], false)
  3977              m.emit(0x0f)
  3978              m.emit(0xa3)
  3979              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  3980          })
  3981      }
  3982      // BTW imm8, m16
  3983      if isImm8(v0) && isM16(v1) {
  3984          p.domain = DomainGeneric
  3985          p.add(0, func(m *_Encoding, v []interface{}) {
  3986              m.emit(0x66)
  3987              m.rexo(0, addr(v[1]), false)
  3988              m.emit(0x0f)
  3989              m.emit(0xba)
  3990              m.mrsd(4, addr(v[1]), 1)
  3991              m.imm1(toImmAny(v[0]))
  3992          })
  3993      }
  3994      // BTW r16, m16
  3995      if isReg16(v0) && isM16(v1) {
  3996          p.domain = DomainGeneric
  3997          p.add(0, func(m *_Encoding, v []interface{}) {
  3998              m.emit(0x66)
  3999              m.rexo(hcode(v[0]), addr(v[1]), false)
  4000              m.emit(0x0f)
  4001              m.emit(0xa3)
  4002              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  4003          })
  4004      }
  4005      if p.len == 0 {
  4006          panic("invalid operands for BTW")
  4007      }
  4008      return p
  4009  }
  4010  
  4011  // BZHI performs "Zero High Bits Starting with Specified Bit Position".
  4012  //
  4013  // Mnemonic        : BZHI
  4014  // Supported forms : (4 forms)
  4015  //
  4016  //    * BZHI r32, r32, r32    [BMI2]
  4017  //    * BZHI r32, m32, r32    [BMI2]
  4018  //    * BZHI r64, r64, r64    [BMI2]
  4019  //    * BZHI r64, m64, r64    [BMI2]
  4020  //
  4021  func (self *Program) BZHI(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  4022      p := self.alloc("BZHI", 3, Operands { v0, v1, v2 })
  4023      // BZHI r32, r32, r32
  4024      if isReg32(v0) && isReg32(v1) && isReg32(v2) {
  4025          self.require(ISA_BMI2)
  4026          p.domain = DomainGeneric
  4027          p.add(0, func(m *_Encoding, v []interface{}) {
  4028              m.emit(0xc4)
  4029              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
  4030              m.emit(0x78 ^ (hlcode(v[0]) << 3))
  4031              m.emit(0xf5)
  4032              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  4033          })
  4034      }
  4035      // BZHI r32, m32, r32
  4036      if isReg32(v0) && isM32(v1) && isReg32(v2) {
  4037          self.require(ISA_BMI2)
  4038          p.domain = DomainGeneric
  4039          p.add(0, func(m *_Encoding, v []interface{}) {
  4040              m.vex3(0xc4, 0b10, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
  4041              m.emit(0xf5)
  4042              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  4043          })
  4044      }
  4045      // BZHI r64, r64, r64
  4046      if isReg64(v0) && isReg64(v1) && isReg64(v2) {
  4047          self.require(ISA_BMI2)
  4048          p.domain = DomainGeneric
  4049          p.add(0, func(m *_Encoding, v []interface{}) {
  4050              m.emit(0xc4)
  4051              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
  4052              m.emit(0xf8 ^ (hlcode(v[0]) << 3))
  4053              m.emit(0xf5)
  4054              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  4055          })
  4056      }
  4057      // BZHI r64, m64, r64
  4058      if isReg64(v0) && isM64(v1) && isReg64(v2) {
  4059          self.require(ISA_BMI2)
  4060          p.domain = DomainGeneric
  4061          p.add(0, func(m *_Encoding, v []interface{}) {
  4062              m.vex3(0xc4, 0b10, 0x80, hcode(v[2]), addr(v[1]), hlcode(v[0]))
  4063              m.emit(0xf5)
  4064              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  4065          })
  4066      }
  4067      if p.len == 0 {
  4068          panic("invalid operands for BZHI")
  4069      }
  4070      return p
  4071  }
  4072  
  4073  // CALL performs "Call Procedure".
  4074  //
  4075  // Mnemonic        : CALL
  4076  // Supported forms : (1 form)
  4077  //
  4078  //    * CALL rel32
  4079  //
  4080  func (self *Program) CALL(v0 interface{}) *Instruction {
  4081      p := self.alloc("CALL", 1, Operands { v0 })
  4082      // CALL rel32
  4083      if isRel32(v0) {
  4084          p.domain = DomainGeneric
  4085          p.add(0, func(m *_Encoding, v []interface{}) {
  4086              m.emit(0xe8)
  4087              m.imm4(relv(v[0]))
  4088          })
  4089      }
  4090      // CALL label
  4091      if isLabel(v0) {
  4092          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
  4093              m.emit(0xe8)
  4094              m.imm4(relv(v[0]))
  4095          })
  4096      }
  4097      if p.len == 0 {
  4098          panic("invalid operands for CALL")
  4099      }
  4100      return p
  4101  }
  4102  
  4103  // CALLQ performs "Call Procedure".
  4104  //
  4105  // Mnemonic        : CALL
  4106  // Supported forms : (2 forms)
  4107  //
  4108  //    * CALLQ r64
  4109  //    * CALLQ m64
  4110  //
  4111  func (self *Program) CALLQ(v0 interface{}) *Instruction {
  4112      p := self.alloc("CALLQ", 1, Operands { v0 })
  4113      // CALLQ r64
  4114      if isReg64(v0) {
  4115          p.domain = DomainGeneric
  4116          p.add(0, func(m *_Encoding, v []interface{}) {
  4117              m.rexo(0, v[0], false)
  4118              m.emit(0xff)
  4119              m.emit(0xd0 | lcode(v[0]))
  4120          })
  4121      }
  4122      // CALLQ m64
  4123      if isM64(v0) {
  4124          p.domain = DomainGeneric
  4125          p.add(0, func(m *_Encoding, v []interface{}) {
  4126              m.rexo(0, addr(v[0]), false)
  4127              m.emit(0xff)
  4128              m.mrsd(2, addr(v[0]), 1)
  4129          })
  4130      }
  4131      if p.len == 0 {
  4132          panic("invalid operands for CALLQ")
  4133      }
  4134      return p
  4135  }
  4136  
  4137  // CBTW performs "Convert Byte to Word".
  4138  //
  4139  // Mnemonic        : CBW
  4140  // Supported forms : (1 form)
  4141  //
  4142  //    * CBTW
  4143  //
  4144  func (self *Program) CBTW() *Instruction {
  4145      p := self.alloc("CBTW", 0, Operands {  })
  4146      // CBTW
  4147      p.domain = DomainGeneric
  4148      p.add(0, func(m *_Encoding, v []interface{}) {
  4149          m.emit(0x66)
  4150          m.emit(0x98)
  4151      })
  4152      return p
  4153  }
  4154  
  4155  // CLC performs "Clear Carry Flag".
  4156  //
  4157  // Mnemonic        : CLC
  4158  // Supported forms : (1 form)
  4159  //
  4160  //    * CLC
  4161  //
  4162  func (self *Program) CLC() *Instruction {
  4163      p := self.alloc("CLC", 0, Operands {  })
  4164      // CLC
  4165      p.domain = DomainGeneric
  4166      p.add(0, func(m *_Encoding, v []interface{}) {
  4167          m.emit(0xf8)
  4168      })
  4169      return p
  4170  }
  4171  
  4172  // CLD performs "Clear Direction Flag".
  4173  //
  4174  // Mnemonic        : CLD
  4175  // Supported forms : (1 form)
  4176  //
  4177  //    * CLD
  4178  //
  4179  func (self *Program) CLD() *Instruction {
  4180      p := self.alloc("CLD", 0, Operands {  })
  4181      // CLD
  4182      p.domain = DomainGeneric
  4183      p.add(0, func(m *_Encoding, v []interface{}) {
  4184          m.emit(0xfc)
  4185      })
  4186      return p
  4187  }
  4188  
  4189  // CLFLUSH performs "Flush Cache Line".
  4190  //
  4191  // Mnemonic        : CLFLUSH
  4192  // Supported forms : (1 form)
  4193  //
  4194  //    * CLFLUSH m8    [CLFLUSH]
  4195  //
  4196  func (self *Program) CLFLUSH(v0 interface{}) *Instruction {
  4197      p := self.alloc("CLFLUSH", 1, Operands { v0 })
  4198      // CLFLUSH m8
  4199      if isM8(v0) {
  4200          self.require(ISA_CLFLUSH)
  4201          p.domain = DomainGeneric
  4202          p.add(0, func(m *_Encoding, v []interface{}) {
  4203              m.rexo(0, addr(v[0]), false)
  4204              m.emit(0x0f)
  4205              m.emit(0xae)
  4206              m.mrsd(7, addr(v[0]), 1)
  4207          })
  4208      }
  4209      if p.len == 0 {
  4210          panic("invalid operands for CLFLUSH")
  4211      }
  4212      return p
  4213  }
  4214  
  4215  // CLFLUSHOPT performs "Flush Cache Line Optimized".
  4216  //
  4217  // Mnemonic        : CLFLUSHOPT
  4218  // Supported forms : (1 form)
  4219  //
  4220  //    * CLFLUSHOPT m8    [CLFLUSHOPT]
  4221  //
  4222  func (self *Program) CLFLUSHOPT(v0 interface{}) *Instruction {
  4223      p := self.alloc("CLFLUSHOPT", 1, Operands { v0 })
  4224      // CLFLUSHOPT m8
  4225      if isM8(v0) {
  4226          self.require(ISA_CLFLUSHOPT)
  4227          p.domain = DomainGeneric
  4228          p.add(0, func(m *_Encoding, v []interface{}) {
  4229              m.emit(0x66)
  4230              m.rexo(0, addr(v[0]), false)
  4231              m.emit(0x0f)
  4232              m.emit(0xae)
  4233              m.mrsd(7, addr(v[0]), 1)
  4234          })
  4235      }
  4236      if p.len == 0 {
  4237          panic("invalid operands for CLFLUSHOPT")
  4238      }
  4239      return p
  4240  }
  4241  
  4242  // CLTD performs "Convert Doubleword to Quadword".
  4243  //
  4244  // Mnemonic        : CDQ
  4245  // Supported forms : (1 form)
  4246  //
  4247  //    * CLTD
  4248  //
  4249  func (self *Program) CLTD() *Instruction {
  4250      p := self.alloc("CLTD", 0, Operands {  })
  4251      // CLTD
  4252      p.domain = DomainGeneric
  4253      p.add(0, func(m *_Encoding, v []interface{}) {
  4254          m.emit(0x99)
  4255      })
  4256      return p
  4257  }
  4258  
  4259  // CLTQ performs "Convert Doubleword to Quadword".
  4260  //
  4261  // Mnemonic        : CDQE
  4262  // Supported forms : (1 form)
  4263  //
  4264  //    * CLTQ
  4265  //
  4266  func (self *Program) CLTQ() *Instruction {
  4267      p := self.alloc("CLTQ", 0, Operands {  })
  4268      // CLTQ
  4269      p.domain = DomainGeneric
  4270      p.add(0, func(m *_Encoding, v []interface{}) {
  4271          m.emit(0x48)
  4272          m.emit(0x98)
  4273      })
  4274      return p
  4275  }
  4276  
  4277  // CLWB performs "Cache Line Write Back".
  4278  //
  4279  // Mnemonic        : CLWB
  4280  // Supported forms : (1 form)
  4281  //
  4282  //    * CLWB m8    [CLWB]
  4283  //
  4284  func (self *Program) CLWB(v0 interface{}) *Instruction {
  4285      p := self.alloc("CLWB", 1, Operands { v0 })
  4286      // CLWB m8
  4287      if isM8(v0) {
  4288          self.require(ISA_CLWB)
  4289          p.domain = DomainGeneric
  4290          p.add(0, func(m *_Encoding, v []interface{}) {
  4291              m.emit(0x66)
  4292              m.rexo(0, addr(v[0]), false)
  4293              m.emit(0x0f)
  4294              m.emit(0xae)
  4295              m.mrsd(6, addr(v[0]), 1)
  4296          })
  4297      }
  4298      if p.len == 0 {
  4299          panic("invalid operands for CLWB")
  4300      }
  4301      return p
  4302  }
  4303  
  4304  // CLZERO performs "Zero-out 64-bit Cache Line".
  4305  //
  4306  // Mnemonic        : CLZERO
  4307  // Supported forms : (1 form)
  4308  //
  4309  //    * CLZERO    [CLZERO]
  4310  //
  4311  func (self *Program) CLZERO() *Instruction {
  4312      p := self.alloc("CLZERO", 0, Operands {  })
  4313      // CLZERO
  4314      self.require(ISA_CLZERO)
  4315      p.domain = DomainGeneric
  4316      p.add(0, func(m *_Encoding, v []interface{}) {
  4317          m.emit(0x0f)
  4318          m.emit(0x01)
  4319          m.emit(0xfc)
  4320      })
  4321      return p
  4322  }
  4323  
  4324  // CMC performs "Complement Carry Flag".
  4325  //
  4326  // Mnemonic        : CMC
  4327  // Supported forms : (1 form)
  4328  //
  4329  //    * CMC
  4330  //
  4331  func (self *Program) CMC() *Instruction {
  4332      p := self.alloc("CMC", 0, Operands {  })
  4333      // CMC
  4334      p.domain = DomainGeneric
  4335      p.add(0, func(m *_Encoding, v []interface{}) {
  4336          m.emit(0xf5)
  4337      })
  4338      return p
  4339  }
  4340  
  4341  // CMOVA performs "Move if above (CF == 0 and ZF == 0)".
  4342  //
  4343  // Mnemonic        : CMOVA
  4344  // Supported forms : (6 forms)
  4345  //
  4346  //    * CMOVA r16, r16    [CMOV]
  4347  //    * CMOVA m16, r16    [CMOV]
  4348  //    * CMOVA r32, r32    [CMOV]
  4349  //    * CMOVA m32, r32    [CMOV]
  4350  //    * CMOVA r64, r64    [CMOV]
  4351  //    * CMOVA m64, r64    [CMOV]
  4352  //
  4353  func (self *Program) CMOVA(v0 interface{}, v1 interface{}) *Instruction {
  4354      p := self.alloc("CMOVA", 2, Operands { v0, v1 })
  4355      // CMOVA r16, r16
  4356      if isReg16(v0) && isReg16(v1) {
  4357          self.require(ISA_CMOV)
  4358          p.domain = DomainGeneric
  4359          p.add(0, func(m *_Encoding, v []interface{}) {
  4360              m.emit(0x66)
  4361              m.rexo(hcode(v[1]), v[0], false)
  4362              m.emit(0x0f)
  4363              m.emit(0x47)
  4364              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4365          })
  4366      }
  4367      // CMOVA m16, r16
  4368      if isM16(v0) && isReg16(v1) {
  4369          self.require(ISA_CMOV)
  4370          p.domain = DomainGeneric
  4371          p.add(0, func(m *_Encoding, v []interface{}) {
  4372              m.emit(0x66)
  4373              m.rexo(hcode(v[1]), addr(v[0]), false)
  4374              m.emit(0x0f)
  4375              m.emit(0x47)
  4376              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4377          })
  4378      }
  4379      // CMOVA r32, r32
  4380      if isReg32(v0) && isReg32(v1) {
  4381          self.require(ISA_CMOV)
  4382          p.domain = DomainGeneric
  4383          p.add(0, func(m *_Encoding, v []interface{}) {
  4384              m.rexo(hcode(v[1]), v[0], false)
  4385              m.emit(0x0f)
  4386              m.emit(0x47)
  4387              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4388          })
  4389      }
  4390      // CMOVA m32, r32
  4391      if isM32(v0) && isReg32(v1) {
  4392          self.require(ISA_CMOV)
  4393          p.domain = DomainGeneric
  4394          p.add(0, func(m *_Encoding, v []interface{}) {
  4395              m.rexo(hcode(v[1]), addr(v[0]), false)
  4396              m.emit(0x0f)
  4397              m.emit(0x47)
  4398              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4399          })
  4400      }
  4401      // CMOVA r64, r64
  4402      if isReg64(v0) && isReg64(v1) {
  4403          self.require(ISA_CMOV)
  4404          p.domain = DomainGeneric
  4405          p.add(0, func(m *_Encoding, v []interface{}) {
  4406              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  4407              m.emit(0x0f)
  4408              m.emit(0x47)
  4409              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4410          })
  4411      }
  4412      // CMOVA m64, r64
  4413      if isM64(v0) && isReg64(v1) {
  4414          self.require(ISA_CMOV)
  4415          p.domain = DomainGeneric
  4416          p.add(0, func(m *_Encoding, v []interface{}) {
  4417              m.rexm(1, hcode(v[1]), addr(v[0]))
  4418              m.emit(0x0f)
  4419              m.emit(0x47)
  4420              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4421          })
  4422      }
  4423      if p.len == 0 {
  4424          panic("invalid operands for CMOVA")
  4425      }
  4426      return p
  4427  }
  4428  
  4429  // CMOVAE performs "Move if above or equal (CF == 0)".
  4430  //
  4431  // Mnemonic        : CMOVAE
  4432  // Supported forms : (6 forms)
  4433  //
  4434  //    * CMOVAE r16, r16    [CMOV]
  4435  //    * CMOVAE m16, r16    [CMOV]
  4436  //    * CMOVAE r32, r32    [CMOV]
  4437  //    * CMOVAE m32, r32    [CMOV]
  4438  //    * CMOVAE r64, r64    [CMOV]
  4439  //    * CMOVAE m64, r64    [CMOV]
  4440  //
  4441  func (self *Program) CMOVAE(v0 interface{}, v1 interface{}) *Instruction {
  4442      p := self.alloc("CMOVAE", 2, Operands { v0, v1 })
  4443      // CMOVAE r16, r16
  4444      if isReg16(v0) && isReg16(v1) {
  4445          self.require(ISA_CMOV)
  4446          p.domain = DomainGeneric
  4447          p.add(0, func(m *_Encoding, v []interface{}) {
  4448              m.emit(0x66)
  4449              m.rexo(hcode(v[1]), v[0], false)
  4450              m.emit(0x0f)
  4451              m.emit(0x43)
  4452              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4453          })
  4454      }
  4455      // CMOVAE m16, r16
  4456      if isM16(v0) && isReg16(v1) {
  4457          self.require(ISA_CMOV)
  4458          p.domain = DomainGeneric
  4459          p.add(0, func(m *_Encoding, v []interface{}) {
  4460              m.emit(0x66)
  4461              m.rexo(hcode(v[1]), addr(v[0]), false)
  4462              m.emit(0x0f)
  4463              m.emit(0x43)
  4464              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4465          })
  4466      }
  4467      // CMOVAE r32, r32
  4468      if isReg32(v0) && isReg32(v1) {
  4469          self.require(ISA_CMOV)
  4470          p.domain = DomainGeneric
  4471          p.add(0, func(m *_Encoding, v []interface{}) {
  4472              m.rexo(hcode(v[1]), v[0], false)
  4473              m.emit(0x0f)
  4474              m.emit(0x43)
  4475              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4476          })
  4477      }
  4478      // CMOVAE m32, r32
  4479      if isM32(v0) && isReg32(v1) {
  4480          self.require(ISA_CMOV)
  4481          p.domain = DomainGeneric
  4482          p.add(0, func(m *_Encoding, v []interface{}) {
  4483              m.rexo(hcode(v[1]), addr(v[0]), false)
  4484              m.emit(0x0f)
  4485              m.emit(0x43)
  4486              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4487          })
  4488      }
  4489      // CMOVAE r64, r64
  4490      if isReg64(v0) && isReg64(v1) {
  4491          self.require(ISA_CMOV)
  4492          p.domain = DomainGeneric
  4493          p.add(0, func(m *_Encoding, v []interface{}) {
  4494              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  4495              m.emit(0x0f)
  4496              m.emit(0x43)
  4497              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4498          })
  4499      }
  4500      // CMOVAE m64, r64
  4501      if isM64(v0) && isReg64(v1) {
  4502          self.require(ISA_CMOV)
  4503          p.domain = DomainGeneric
  4504          p.add(0, func(m *_Encoding, v []interface{}) {
  4505              m.rexm(1, hcode(v[1]), addr(v[0]))
  4506              m.emit(0x0f)
  4507              m.emit(0x43)
  4508              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4509          })
  4510      }
  4511      if p.len == 0 {
  4512          panic("invalid operands for CMOVAE")
  4513      }
  4514      return p
  4515  }
  4516  
  4517  // CMOVB performs "Move if below (CF == 1)".
  4518  //
  4519  // Mnemonic        : CMOVB
  4520  // Supported forms : (6 forms)
  4521  //
  4522  //    * CMOVB r16, r16    [CMOV]
  4523  //    * CMOVB m16, r16    [CMOV]
  4524  //    * CMOVB r32, r32    [CMOV]
  4525  //    * CMOVB m32, r32    [CMOV]
  4526  //    * CMOVB r64, r64    [CMOV]
  4527  //    * CMOVB m64, r64    [CMOV]
  4528  //
  4529  func (self *Program) CMOVB(v0 interface{}, v1 interface{}) *Instruction {
  4530      p := self.alloc("CMOVB", 2, Operands { v0, v1 })
  4531      // CMOVB r16, r16
  4532      if isReg16(v0) && isReg16(v1) {
  4533          self.require(ISA_CMOV)
  4534          p.domain = DomainGeneric
  4535          p.add(0, func(m *_Encoding, v []interface{}) {
  4536              m.emit(0x66)
  4537              m.rexo(hcode(v[1]), v[0], false)
  4538              m.emit(0x0f)
  4539              m.emit(0x42)
  4540              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4541          })
  4542      }
  4543      // CMOVB m16, r16
  4544      if isM16(v0) && isReg16(v1) {
  4545          self.require(ISA_CMOV)
  4546          p.domain = DomainGeneric
  4547          p.add(0, func(m *_Encoding, v []interface{}) {
  4548              m.emit(0x66)
  4549              m.rexo(hcode(v[1]), addr(v[0]), false)
  4550              m.emit(0x0f)
  4551              m.emit(0x42)
  4552              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4553          })
  4554      }
  4555      // CMOVB r32, r32
  4556      if isReg32(v0) && isReg32(v1) {
  4557          self.require(ISA_CMOV)
  4558          p.domain = DomainGeneric
  4559          p.add(0, func(m *_Encoding, v []interface{}) {
  4560              m.rexo(hcode(v[1]), v[0], false)
  4561              m.emit(0x0f)
  4562              m.emit(0x42)
  4563              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4564          })
  4565      }
  4566      // CMOVB m32, r32
  4567      if isM32(v0) && isReg32(v1) {
  4568          self.require(ISA_CMOV)
  4569          p.domain = DomainGeneric
  4570          p.add(0, func(m *_Encoding, v []interface{}) {
  4571              m.rexo(hcode(v[1]), addr(v[0]), false)
  4572              m.emit(0x0f)
  4573              m.emit(0x42)
  4574              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4575          })
  4576      }
  4577      // CMOVB r64, r64
  4578      if isReg64(v0) && isReg64(v1) {
  4579          self.require(ISA_CMOV)
  4580          p.domain = DomainGeneric
  4581          p.add(0, func(m *_Encoding, v []interface{}) {
  4582              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  4583              m.emit(0x0f)
  4584              m.emit(0x42)
  4585              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4586          })
  4587      }
  4588      // CMOVB m64, r64
  4589      if isM64(v0) && isReg64(v1) {
  4590          self.require(ISA_CMOV)
  4591          p.domain = DomainGeneric
  4592          p.add(0, func(m *_Encoding, v []interface{}) {
  4593              m.rexm(1, hcode(v[1]), addr(v[0]))
  4594              m.emit(0x0f)
  4595              m.emit(0x42)
  4596              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4597          })
  4598      }
  4599      if p.len == 0 {
  4600          panic("invalid operands for CMOVB")
  4601      }
  4602      return p
  4603  }
  4604  
  4605  // CMOVBE performs "Move if below or equal (CF == 1 or ZF == 1)".
  4606  //
  4607  // Mnemonic        : CMOVBE
  4608  // Supported forms : (6 forms)
  4609  //
  4610  //    * CMOVBE r16, r16    [CMOV]
  4611  //    * CMOVBE m16, r16    [CMOV]
  4612  //    * CMOVBE r32, r32    [CMOV]
  4613  //    * CMOVBE m32, r32    [CMOV]
  4614  //    * CMOVBE r64, r64    [CMOV]
  4615  //    * CMOVBE m64, r64    [CMOV]
  4616  //
  4617  func (self *Program) CMOVBE(v0 interface{}, v1 interface{}) *Instruction {
  4618      p := self.alloc("CMOVBE", 2, Operands { v0, v1 })
  4619      // CMOVBE r16, r16
  4620      if isReg16(v0) && isReg16(v1) {
  4621          self.require(ISA_CMOV)
  4622          p.domain = DomainGeneric
  4623          p.add(0, func(m *_Encoding, v []interface{}) {
  4624              m.emit(0x66)
  4625              m.rexo(hcode(v[1]), v[0], false)
  4626              m.emit(0x0f)
  4627              m.emit(0x46)
  4628              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4629          })
  4630      }
  4631      // CMOVBE m16, r16
  4632      if isM16(v0) && isReg16(v1) {
  4633          self.require(ISA_CMOV)
  4634          p.domain = DomainGeneric
  4635          p.add(0, func(m *_Encoding, v []interface{}) {
  4636              m.emit(0x66)
  4637              m.rexo(hcode(v[1]), addr(v[0]), false)
  4638              m.emit(0x0f)
  4639              m.emit(0x46)
  4640              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4641          })
  4642      }
  4643      // CMOVBE r32, r32
  4644      if isReg32(v0) && isReg32(v1) {
  4645          self.require(ISA_CMOV)
  4646          p.domain = DomainGeneric
  4647          p.add(0, func(m *_Encoding, v []interface{}) {
  4648              m.rexo(hcode(v[1]), v[0], false)
  4649              m.emit(0x0f)
  4650              m.emit(0x46)
  4651              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4652          })
  4653      }
  4654      // CMOVBE m32, r32
  4655      if isM32(v0) && isReg32(v1) {
  4656          self.require(ISA_CMOV)
  4657          p.domain = DomainGeneric
  4658          p.add(0, func(m *_Encoding, v []interface{}) {
  4659              m.rexo(hcode(v[1]), addr(v[0]), false)
  4660              m.emit(0x0f)
  4661              m.emit(0x46)
  4662              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4663          })
  4664      }
  4665      // CMOVBE r64, r64
  4666      if isReg64(v0) && isReg64(v1) {
  4667          self.require(ISA_CMOV)
  4668          p.domain = DomainGeneric
  4669          p.add(0, func(m *_Encoding, v []interface{}) {
  4670              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  4671              m.emit(0x0f)
  4672              m.emit(0x46)
  4673              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4674          })
  4675      }
  4676      // CMOVBE m64, r64
  4677      if isM64(v0) && isReg64(v1) {
  4678          self.require(ISA_CMOV)
  4679          p.domain = DomainGeneric
  4680          p.add(0, func(m *_Encoding, v []interface{}) {
  4681              m.rexm(1, hcode(v[1]), addr(v[0]))
  4682              m.emit(0x0f)
  4683              m.emit(0x46)
  4684              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4685          })
  4686      }
  4687      if p.len == 0 {
  4688          panic("invalid operands for CMOVBE")
  4689      }
  4690      return p
  4691  }
  4692  
  4693  // CMOVC performs "Move if carry (CF == 1)".
  4694  //
  4695  // Mnemonic        : CMOVC
  4696  // Supported forms : (6 forms)
  4697  //
  4698  //    * CMOVC r16, r16    [CMOV]
  4699  //    * CMOVC m16, r16    [CMOV]
  4700  //    * CMOVC r32, r32    [CMOV]
  4701  //    * CMOVC m32, r32    [CMOV]
  4702  //    * CMOVC r64, r64    [CMOV]
  4703  //    * CMOVC m64, r64    [CMOV]
  4704  //
  4705  func (self *Program) CMOVC(v0 interface{}, v1 interface{}) *Instruction {
  4706      p := self.alloc("CMOVC", 2, Operands { v0, v1 })
  4707      // CMOVC r16, r16
  4708      if isReg16(v0) && isReg16(v1) {
  4709          self.require(ISA_CMOV)
  4710          p.domain = DomainGeneric
  4711          p.add(0, func(m *_Encoding, v []interface{}) {
  4712              m.emit(0x66)
  4713              m.rexo(hcode(v[1]), v[0], false)
  4714              m.emit(0x0f)
  4715              m.emit(0x42)
  4716              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4717          })
  4718      }
  4719      // CMOVC m16, r16
  4720      if isM16(v0) && isReg16(v1) {
  4721          self.require(ISA_CMOV)
  4722          p.domain = DomainGeneric
  4723          p.add(0, func(m *_Encoding, v []interface{}) {
  4724              m.emit(0x66)
  4725              m.rexo(hcode(v[1]), addr(v[0]), false)
  4726              m.emit(0x0f)
  4727              m.emit(0x42)
  4728              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4729          })
  4730      }
  4731      // CMOVC r32, r32
  4732      if isReg32(v0) && isReg32(v1) {
  4733          self.require(ISA_CMOV)
  4734          p.domain = DomainGeneric
  4735          p.add(0, func(m *_Encoding, v []interface{}) {
  4736              m.rexo(hcode(v[1]), v[0], false)
  4737              m.emit(0x0f)
  4738              m.emit(0x42)
  4739              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4740          })
  4741      }
  4742      // CMOVC m32, r32
  4743      if isM32(v0) && isReg32(v1) {
  4744          self.require(ISA_CMOV)
  4745          p.domain = DomainGeneric
  4746          p.add(0, func(m *_Encoding, v []interface{}) {
  4747              m.rexo(hcode(v[1]), addr(v[0]), false)
  4748              m.emit(0x0f)
  4749              m.emit(0x42)
  4750              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4751          })
  4752      }
  4753      // CMOVC r64, r64
  4754      if isReg64(v0) && isReg64(v1) {
  4755          self.require(ISA_CMOV)
  4756          p.domain = DomainGeneric
  4757          p.add(0, func(m *_Encoding, v []interface{}) {
  4758              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  4759              m.emit(0x0f)
  4760              m.emit(0x42)
  4761              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4762          })
  4763      }
  4764      // CMOVC m64, r64
  4765      if isM64(v0) && isReg64(v1) {
  4766          self.require(ISA_CMOV)
  4767          p.domain = DomainGeneric
  4768          p.add(0, func(m *_Encoding, v []interface{}) {
  4769              m.rexm(1, hcode(v[1]), addr(v[0]))
  4770              m.emit(0x0f)
  4771              m.emit(0x42)
  4772              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4773          })
  4774      }
  4775      if p.len == 0 {
  4776          panic("invalid operands for CMOVC")
  4777      }
  4778      return p
  4779  }
  4780  
  4781  // CMOVE performs "Move if equal (ZF == 1)".
  4782  //
  4783  // Mnemonic        : CMOVE
  4784  // Supported forms : (6 forms)
  4785  //
  4786  //    * CMOVE r16, r16    [CMOV]
  4787  //    * CMOVE m16, r16    [CMOV]
  4788  //    * CMOVE r32, r32    [CMOV]
  4789  //    * CMOVE m32, r32    [CMOV]
  4790  //    * CMOVE r64, r64    [CMOV]
  4791  //    * CMOVE m64, r64    [CMOV]
  4792  //
  4793  func (self *Program) CMOVE(v0 interface{}, v1 interface{}) *Instruction {
  4794      p := self.alloc("CMOVE", 2, Operands { v0, v1 })
  4795      // CMOVE r16, r16
  4796      if isReg16(v0) && isReg16(v1) {
  4797          self.require(ISA_CMOV)
  4798          p.domain = DomainGeneric
  4799          p.add(0, func(m *_Encoding, v []interface{}) {
  4800              m.emit(0x66)
  4801              m.rexo(hcode(v[1]), v[0], false)
  4802              m.emit(0x0f)
  4803              m.emit(0x44)
  4804              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4805          })
  4806      }
  4807      // CMOVE m16, r16
  4808      if isM16(v0) && isReg16(v1) {
  4809          self.require(ISA_CMOV)
  4810          p.domain = DomainGeneric
  4811          p.add(0, func(m *_Encoding, v []interface{}) {
  4812              m.emit(0x66)
  4813              m.rexo(hcode(v[1]), addr(v[0]), false)
  4814              m.emit(0x0f)
  4815              m.emit(0x44)
  4816              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4817          })
  4818      }
  4819      // CMOVE r32, r32
  4820      if isReg32(v0) && isReg32(v1) {
  4821          self.require(ISA_CMOV)
  4822          p.domain = DomainGeneric
  4823          p.add(0, func(m *_Encoding, v []interface{}) {
  4824              m.rexo(hcode(v[1]), v[0], false)
  4825              m.emit(0x0f)
  4826              m.emit(0x44)
  4827              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4828          })
  4829      }
  4830      // CMOVE m32, r32
  4831      if isM32(v0) && isReg32(v1) {
  4832          self.require(ISA_CMOV)
  4833          p.domain = DomainGeneric
  4834          p.add(0, func(m *_Encoding, v []interface{}) {
  4835              m.rexo(hcode(v[1]), addr(v[0]), false)
  4836              m.emit(0x0f)
  4837              m.emit(0x44)
  4838              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4839          })
  4840      }
  4841      // CMOVE r64, r64
  4842      if isReg64(v0) && isReg64(v1) {
  4843          self.require(ISA_CMOV)
  4844          p.domain = DomainGeneric
  4845          p.add(0, func(m *_Encoding, v []interface{}) {
  4846              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  4847              m.emit(0x0f)
  4848              m.emit(0x44)
  4849              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4850          })
  4851      }
  4852      // CMOVE m64, r64
  4853      if isM64(v0) && isReg64(v1) {
  4854          self.require(ISA_CMOV)
  4855          p.domain = DomainGeneric
  4856          p.add(0, func(m *_Encoding, v []interface{}) {
  4857              m.rexm(1, hcode(v[1]), addr(v[0]))
  4858              m.emit(0x0f)
  4859              m.emit(0x44)
  4860              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4861          })
  4862      }
  4863      if p.len == 0 {
  4864          panic("invalid operands for CMOVE")
  4865      }
  4866      return p
  4867  }
  4868  
  4869  // CMOVG performs "Move if greater (ZF == 0 and SF == OF)".
  4870  //
  4871  // Mnemonic        : CMOVG
  4872  // Supported forms : (6 forms)
  4873  //
  4874  //    * CMOVG r16, r16    [CMOV]
  4875  //    * CMOVG m16, r16    [CMOV]
  4876  //    * CMOVG r32, r32    [CMOV]
  4877  //    * CMOVG m32, r32    [CMOV]
  4878  //    * CMOVG r64, r64    [CMOV]
  4879  //    * CMOVG m64, r64    [CMOV]
  4880  //
  4881  func (self *Program) CMOVG(v0 interface{}, v1 interface{}) *Instruction {
  4882      p := self.alloc("CMOVG", 2, Operands { v0, v1 })
  4883      // CMOVG r16, r16
  4884      if isReg16(v0) && isReg16(v1) {
  4885          self.require(ISA_CMOV)
  4886          p.domain = DomainGeneric
  4887          p.add(0, func(m *_Encoding, v []interface{}) {
  4888              m.emit(0x66)
  4889              m.rexo(hcode(v[1]), v[0], false)
  4890              m.emit(0x0f)
  4891              m.emit(0x4f)
  4892              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4893          })
  4894      }
  4895      // CMOVG m16, r16
  4896      if isM16(v0) && isReg16(v1) {
  4897          self.require(ISA_CMOV)
  4898          p.domain = DomainGeneric
  4899          p.add(0, func(m *_Encoding, v []interface{}) {
  4900              m.emit(0x66)
  4901              m.rexo(hcode(v[1]), addr(v[0]), false)
  4902              m.emit(0x0f)
  4903              m.emit(0x4f)
  4904              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4905          })
  4906      }
  4907      // CMOVG r32, r32
  4908      if isReg32(v0) && isReg32(v1) {
  4909          self.require(ISA_CMOV)
  4910          p.domain = DomainGeneric
  4911          p.add(0, func(m *_Encoding, v []interface{}) {
  4912              m.rexo(hcode(v[1]), v[0], false)
  4913              m.emit(0x0f)
  4914              m.emit(0x4f)
  4915              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4916          })
  4917      }
  4918      // CMOVG m32, r32
  4919      if isM32(v0) && isReg32(v1) {
  4920          self.require(ISA_CMOV)
  4921          p.domain = DomainGeneric
  4922          p.add(0, func(m *_Encoding, v []interface{}) {
  4923              m.rexo(hcode(v[1]), addr(v[0]), false)
  4924              m.emit(0x0f)
  4925              m.emit(0x4f)
  4926              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4927          })
  4928      }
  4929      // CMOVG r64, r64
  4930      if isReg64(v0) && isReg64(v1) {
  4931          self.require(ISA_CMOV)
  4932          p.domain = DomainGeneric
  4933          p.add(0, func(m *_Encoding, v []interface{}) {
  4934              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  4935              m.emit(0x0f)
  4936              m.emit(0x4f)
  4937              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4938          })
  4939      }
  4940      // CMOVG m64, r64
  4941      if isM64(v0) && isReg64(v1) {
  4942          self.require(ISA_CMOV)
  4943          p.domain = DomainGeneric
  4944          p.add(0, func(m *_Encoding, v []interface{}) {
  4945              m.rexm(1, hcode(v[1]), addr(v[0]))
  4946              m.emit(0x0f)
  4947              m.emit(0x4f)
  4948              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4949          })
  4950      }
  4951      if p.len == 0 {
  4952          panic("invalid operands for CMOVG")
  4953      }
  4954      return p
  4955  }
  4956  
  4957  // CMOVGE performs "Move if greater or equal (SF == OF)".
  4958  //
  4959  // Mnemonic        : CMOVGE
  4960  // Supported forms : (6 forms)
  4961  //
  4962  //    * CMOVGE r16, r16    [CMOV]
  4963  //    * CMOVGE m16, r16    [CMOV]
  4964  //    * CMOVGE r32, r32    [CMOV]
  4965  //    * CMOVGE m32, r32    [CMOV]
  4966  //    * CMOVGE r64, r64    [CMOV]
  4967  //    * CMOVGE m64, r64    [CMOV]
  4968  //
  4969  func (self *Program) CMOVGE(v0 interface{}, v1 interface{}) *Instruction {
  4970      p := self.alloc("CMOVGE", 2, Operands { v0, v1 })
  4971      // CMOVGE r16, r16
  4972      if isReg16(v0) && isReg16(v1) {
  4973          self.require(ISA_CMOV)
  4974          p.domain = DomainGeneric
  4975          p.add(0, func(m *_Encoding, v []interface{}) {
  4976              m.emit(0x66)
  4977              m.rexo(hcode(v[1]), v[0], false)
  4978              m.emit(0x0f)
  4979              m.emit(0x4d)
  4980              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  4981          })
  4982      }
  4983      // CMOVGE m16, r16
  4984      if isM16(v0) && isReg16(v1) {
  4985          self.require(ISA_CMOV)
  4986          p.domain = DomainGeneric
  4987          p.add(0, func(m *_Encoding, v []interface{}) {
  4988              m.emit(0x66)
  4989              m.rexo(hcode(v[1]), addr(v[0]), false)
  4990              m.emit(0x0f)
  4991              m.emit(0x4d)
  4992              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  4993          })
  4994      }
  4995      // CMOVGE r32, r32
  4996      if isReg32(v0) && isReg32(v1) {
  4997          self.require(ISA_CMOV)
  4998          p.domain = DomainGeneric
  4999          p.add(0, func(m *_Encoding, v []interface{}) {
  5000              m.rexo(hcode(v[1]), v[0], false)
  5001              m.emit(0x0f)
  5002              m.emit(0x4d)
  5003              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5004          })
  5005      }
  5006      // CMOVGE m32, r32
  5007      if isM32(v0) && isReg32(v1) {
  5008          self.require(ISA_CMOV)
  5009          p.domain = DomainGeneric
  5010          p.add(0, func(m *_Encoding, v []interface{}) {
  5011              m.rexo(hcode(v[1]), addr(v[0]), false)
  5012              m.emit(0x0f)
  5013              m.emit(0x4d)
  5014              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5015          })
  5016      }
  5017      // CMOVGE r64, r64
  5018      if isReg64(v0) && isReg64(v1) {
  5019          self.require(ISA_CMOV)
  5020          p.domain = DomainGeneric
  5021          p.add(0, func(m *_Encoding, v []interface{}) {
  5022              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5023              m.emit(0x0f)
  5024              m.emit(0x4d)
  5025              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5026          })
  5027      }
  5028      // CMOVGE m64, r64
  5029      if isM64(v0) && isReg64(v1) {
  5030          self.require(ISA_CMOV)
  5031          p.domain = DomainGeneric
  5032          p.add(0, func(m *_Encoding, v []interface{}) {
  5033              m.rexm(1, hcode(v[1]), addr(v[0]))
  5034              m.emit(0x0f)
  5035              m.emit(0x4d)
  5036              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5037          })
  5038      }
  5039      if p.len == 0 {
  5040          panic("invalid operands for CMOVGE")
  5041      }
  5042      return p
  5043  }
  5044  
  5045  // CMOVL performs "Move if less (SF != OF)".
  5046  //
  5047  // Mnemonic        : CMOVL
  5048  // Supported forms : (6 forms)
  5049  //
  5050  //    * CMOVL r16, r16    [CMOV]
  5051  //    * CMOVL m16, r16    [CMOV]
  5052  //    * CMOVL r32, r32    [CMOV]
  5053  //    * CMOVL m32, r32    [CMOV]
  5054  //    * CMOVL r64, r64    [CMOV]
  5055  //    * CMOVL m64, r64    [CMOV]
  5056  //
  5057  func (self *Program) CMOVL(v0 interface{}, v1 interface{}) *Instruction {
  5058      p := self.alloc("CMOVL", 2, Operands { v0, v1 })
  5059      // CMOVL r16, r16
  5060      if isReg16(v0) && isReg16(v1) {
  5061          self.require(ISA_CMOV)
  5062          p.domain = DomainGeneric
  5063          p.add(0, func(m *_Encoding, v []interface{}) {
  5064              m.emit(0x66)
  5065              m.rexo(hcode(v[1]), v[0], false)
  5066              m.emit(0x0f)
  5067              m.emit(0x4c)
  5068              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5069          })
  5070      }
  5071      // CMOVL m16, r16
  5072      if isM16(v0) && isReg16(v1) {
  5073          self.require(ISA_CMOV)
  5074          p.domain = DomainGeneric
  5075          p.add(0, func(m *_Encoding, v []interface{}) {
  5076              m.emit(0x66)
  5077              m.rexo(hcode(v[1]), addr(v[0]), false)
  5078              m.emit(0x0f)
  5079              m.emit(0x4c)
  5080              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5081          })
  5082      }
  5083      // CMOVL r32, r32
  5084      if isReg32(v0) && isReg32(v1) {
  5085          self.require(ISA_CMOV)
  5086          p.domain = DomainGeneric
  5087          p.add(0, func(m *_Encoding, v []interface{}) {
  5088              m.rexo(hcode(v[1]), v[0], false)
  5089              m.emit(0x0f)
  5090              m.emit(0x4c)
  5091              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5092          })
  5093      }
  5094      // CMOVL m32, r32
  5095      if isM32(v0) && isReg32(v1) {
  5096          self.require(ISA_CMOV)
  5097          p.domain = DomainGeneric
  5098          p.add(0, func(m *_Encoding, v []interface{}) {
  5099              m.rexo(hcode(v[1]), addr(v[0]), false)
  5100              m.emit(0x0f)
  5101              m.emit(0x4c)
  5102              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5103          })
  5104      }
  5105      // CMOVL r64, r64
  5106      if isReg64(v0) && isReg64(v1) {
  5107          self.require(ISA_CMOV)
  5108          p.domain = DomainGeneric
  5109          p.add(0, func(m *_Encoding, v []interface{}) {
  5110              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5111              m.emit(0x0f)
  5112              m.emit(0x4c)
  5113              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5114          })
  5115      }
  5116      // CMOVL m64, r64
  5117      if isM64(v0) && isReg64(v1) {
  5118          self.require(ISA_CMOV)
  5119          p.domain = DomainGeneric
  5120          p.add(0, func(m *_Encoding, v []interface{}) {
  5121              m.rexm(1, hcode(v[1]), addr(v[0]))
  5122              m.emit(0x0f)
  5123              m.emit(0x4c)
  5124              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5125          })
  5126      }
  5127      if p.len == 0 {
  5128          panic("invalid operands for CMOVL")
  5129      }
  5130      return p
  5131  }
  5132  
  5133  // CMOVLE performs "Move if less or equal (ZF == 1 or SF != OF)".
  5134  //
  5135  // Mnemonic        : CMOVLE
  5136  // Supported forms : (6 forms)
  5137  //
  5138  //    * CMOVLE r16, r16    [CMOV]
  5139  //    * CMOVLE m16, r16    [CMOV]
  5140  //    * CMOVLE r32, r32    [CMOV]
  5141  //    * CMOVLE m32, r32    [CMOV]
  5142  //    * CMOVLE r64, r64    [CMOV]
  5143  //    * CMOVLE m64, r64    [CMOV]
  5144  //
  5145  func (self *Program) CMOVLE(v0 interface{}, v1 interface{}) *Instruction {
  5146      p := self.alloc("CMOVLE", 2, Operands { v0, v1 })
  5147      // CMOVLE r16, r16
  5148      if isReg16(v0) && isReg16(v1) {
  5149          self.require(ISA_CMOV)
  5150          p.domain = DomainGeneric
  5151          p.add(0, func(m *_Encoding, v []interface{}) {
  5152              m.emit(0x66)
  5153              m.rexo(hcode(v[1]), v[0], false)
  5154              m.emit(0x0f)
  5155              m.emit(0x4e)
  5156              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5157          })
  5158      }
  5159      // CMOVLE m16, r16
  5160      if isM16(v0) && isReg16(v1) {
  5161          self.require(ISA_CMOV)
  5162          p.domain = DomainGeneric
  5163          p.add(0, func(m *_Encoding, v []interface{}) {
  5164              m.emit(0x66)
  5165              m.rexo(hcode(v[1]), addr(v[0]), false)
  5166              m.emit(0x0f)
  5167              m.emit(0x4e)
  5168              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5169          })
  5170      }
  5171      // CMOVLE r32, r32
  5172      if isReg32(v0) && isReg32(v1) {
  5173          self.require(ISA_CMOV)
  5174          p.domain = DomainGeneric
  5175          p.add(0, func(m *_Encoding, v []interface{}) {
  5176              m.rexo(hcode(v[1]), v[0], false)
  5177              m.emit(0x0f)
  5178              m.emit(0x4e)
  5179              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5180          })
  5181      }
  5182      // CMOVLE m32, r32
  5183      if isM32(v0) && isReg32(v1) {
  5184          self.require(ISA_CMOV)
  5185          p.domain = DomainGeneric
  5186          p.add(0, func(m *_Encoding, v []interface{}) {
  5187              m.rexo(hcode(v[1]), addr(v[0]), false)
  5188              m.emit(0x0f)
  5189              m.emit(0x4e)
  5190              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5191          })
  5192      }
  5193      // CMOVLE r64, r64
  5194      if isReg64(v0) && isReg64(v1) {
  5195          self.require(ISA_CMOV)
  5196          p.domain = DomainGeneric
  5197          p.add(0, func(m *_Encoding, v []interface{}) {
  5198              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5199              m.emit(0x0f)
  5200              m.emit(0x4e)
  5201              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5202          })
  5203      }
  5204      // CMOVLE m64, r64
  5205      if isM64(v0) && isReg64(v1) {
  5206          self.require(ISA_CMOV)
  5207          p.domain = DomainGeneric
  5208          p.add(0, func(m *_Encoding, v []interface{}) {
  5209              m.rexm(1, hcode(v[1]), addr(v[0]))
  5210              m.emit(0x0f)
  5211              m.emit(0x4e)
  5212              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5213          })
  5214      }
  5215      if p.len == 0 {
  5216          panic("invalid operands for CMOVLE")
  5217      }
  5218      return p
  5219  }
  5220  
  5221  // CMOVNA performs "Move if not above (CF == 1 or ZF == 1)".
  5222  //
  5223  // Mnemonic        : CMOVNA
  5224  // Supported forms : (6 forms)
  5225  //
  5226  //    * CMOVNA r16, r16    [CMOV]
  5227  //    * CMOVNA m16, r16    [CMOV]
  5228  //    * CMOVNA r32, r32    [CMOV]
  5229  //    * CMOVNA m32, r32    [CMOV]
  5230  //    * CMOVNA r64, r64    [CMOV]
  5231  //    * CMOVNA m64, r64    [CMOV]
  5232  //
  5233  func (self *Program) CMOVNA(v0 interface{}, v1 interface{}) *Instruction {
  5234      p := self.alloc("CMOVNA", 2, Operands { v0, v1 })
  5235      // CMOVNA r16, r16
  5236      if isReg16(v0) && isReg16(v1) {
  5237          self.require(ISA_CMOV)
  5238          p.domain = DomainGeneric
  5239          p.add(0, func(m *_Encoding, v []interface{}) {
  5240              m.emit(0x66)
  5241              m.rexo(hcode(v[1]), v[0], false)
  5242              m.emit(0x0f)
  5243              m.emit(0x46)
  5244              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5245          })
  5246      }
  5247      // CMOVNA m16, r16
  5248      if isM16(v0) && isReg16(v1) {
  5249          self.require(ISA_CMOV)
  5250          p.domain = DomainGeneric
  5251          p.add(0, func(m *_Encoding, v []interface{}) {
  5252              m.emit(0x66)
  5253              m.rexo(hcode(v[1]), addr(v[0]), false)
  5254              m.emit(0x0f)
  5255              m.emit(0x46)
  5256              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5257          })
  5258      }
  5259      // CMOVNA r32, r32
  5260      if isReg32(v0) && isReg32(v1) {
  5261          self.require(ISA_CMOV)
  5262          p.domain = DomainGeneric
  5263          p.add(0, func(m *_Encoding, v []interface{}) {
  5264              m.rexo(hcode(v[1]), v[0], false)
  5265              m.emit(0x0f)
  5266              m.emit(0x46)
  5267              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5268          })
  5269      }
  5270      // CMOVNA m32, r32
  5271      if isM32(v0) && isReg32(v1) {
  5272          self.require(ISA_CMOV)
  5273          p.domain = DomainGeneric
  5274          p.add(0, func(m *_Encoding, v []interface{}) {
  5275              m.rexo(hcode(v[1]), addr(v[0]), false)
  5276              m.emit(0x0f)
  5277              m.emit(0x46)
  5278              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5279          })
  5280      }
  5281      // CMOVNA r64, r64
  5282      if isReg64(v0) && isReg64(v1) {
  5283          self.require(ISA_CMOV)
  5284          p.domain = DomainGeneric
  5285          p.add(0, func(m *_Encoding, v []interface{}) {
  5286              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5287              m.emit(0x0f)
  5288              m.emit(0x46)
  5289              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5290          })
  5291      }
  5292      // CMOVNA m64, r64
  5293      if isM64(v0) && isReg64(v1) {
  5294          self.require(ISA_CMOV)
  5295          p.domain = DomainGeneric
  5296          p.add(0, func(m *_Encoding, v []interface{}) {
  5297              m.rexm(1, hcode(v[1]), addr(v[0]))
  5298              m.emit(0x0f)
  5299              m.emit(0x46)
  5300              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5301          })
  5302      }
  5303      if p.len == 0 {
  5304          panic("invalid operands for CMOVNA")
  5305      }
  5306      return p
  5307  }
  5308  
  5309  // CMOVNAE performs "Move if not above or equal (CF == 1)".
  5310  //
  5311  // Mnemonic        : CMOVNAE
  5312  // Supported forms : (6 forms)
  5313  //
  5314  //    * CMOVNAE r16, r16    [CMOV]
  5315  //    * CMOVNAE m16, r16    [CMOV]
  5316  //    * CMOVNAE r32, r32    [CMOV]
  5317  //    * CMOVNAE m32, r32    [CMOV]
  5318  //    * CMOVNAE r64, r64    [CMOV]
  5319  //    * CMOVNAE m64, r64    [CMOV]
  5320  //
  5321  func (self *Program) CMOVNAE(v0 interface{}, v1 interface{}) *Instruction {
  5322      p := self.alloc("CMOVNAE", 2, Operands { v0, v1 })
  5323      // CMOVNAE r16, r16
  5324      if isReg16(v0) && isReg16(v1) {
  5325          self.require(ISA_CMOV)
  5326          p.domain = DomainGeneric
  5327          p.add(0, func(m *_Encoding, v []interface{}) {
  5328              m.emit(0x66)
  5329              m.rexo(hcode(v[1]), v[0], false)
  5330              m.emit(0x0f)
  5331              m.emit(0x42)
  5332              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5333          })
  5334      }
  5335      // CMOVNAE m16, r16
  5336      if isM16(v0) && isReg16(v1) {
  5337          self.require(ISA_CMOV)
  5338          p.domain = DomainGeneric
  5339          p.add(0, func(m *_Encoding, v []interface{}) {
  5340              m.emit(0x66)
  5341              m.rexo(hcode(v[1]), addr(v[0]), false)
  5342              m.emit(0x0f)
  5343              m.emit(0x42)
  5344              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5345          })
  5346      }
  5347      // CMOVNAE r32, r32
  5348      if isReg32(v0) && isReg32(v1) {
  5349          self.require(ISA_CMOV)
  5350          p.domain = DomainGeneric
  5351          p.add(0, func(m *_Encoding, v []interface{}) {
  5352              m.rexo(hcode(v[1]), v[0], false)
  5353              m.emit(0x0f)
  5354              m.emit(0x42)
  5355              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5356          })
  5357      }
  5358      // CMOVNAE m32, r32
  5359      if isM32(v0) && isReg32(v1) {
  5360          self.require(ISA_CMOV)
  5361          p.domain = DomainGeneric
  5362          p.add(0, func(m *_Encoding, v []interface{}) {
  5363              m.rexo(hcode(v[1]), addr(v[0]), false)
  5364              m.emit(0x0f)
  5365              m.emit(0x42)
  5366              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5367          })
  5368      }
  5369      // CMOVNAE r64, r64
  5370      if isReg64(v0) && isReg64(v1) {
  5371          self.require(ISA_CMOV)
  5372          p.domain = DomainGeneric
  5373          p.add(0, func(m *_Encoding, v []interface{}) {
  5374              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5375              m.emit(0x0f)
  5376              m.emit(0x42)
  5377              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5378          })
  5379      }
  5380      // CMOVNAE m64, r64
  5381      if isM64(v0) && isReg64(v1) {
  5382          self.require(ISA_CMOV)
  5383          p.domain = DomainGeneric
  5384          p.add(0, func(m *_Encoding, v []interface{}) {
  5385              m.rexm(1, hcode(v[1]), addr(v[0]))
  5386              m.emit(0x0f)
  5387              m.emit(0x42)
  5388              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5389          })
  5390      }
  5391      if p.len == 0 {
  5392          panic("invalid operands for CMOVNAE")
  5393      }
  5394      return p
  5395  }
  5396  
  5397  // CMOVNB performs "Move if not below (CF == 0)".
  5398  //
  5399  // Mnemonic        : CMOVNB
  5400  // Supported forms : (6 forms)
  5401  //
  5402  //    * CMOVNB r16, r16    [CMOV]
  5403  //    * CMOVNB m16, r16    [CMOV]
  5404  //    * CMOVNB r32, r32    [CMOV]
  5405  //    * CMOVNB m32, r32    [CMOV]
  5406  //    * CMOVNB r64, r64    [CMOV]
  5407  //    * CMOVNB m64, r64    [CMOV]
  5408  //
  5409  func (self *Program) CMOVNB(v0 interface{}, v1 interface{}) *Instruction {
  5410      p := self.alloc("CMOVNB", 2, Operands { v0, v1 })
  5411      // CMOVNB r16, r16
  5412      if isReg16(v0) && isReg16(v1) {
  5413          self.require(ISA_CMOV)
  5414          p.domain = DomainGeneric
  5415          p.add(0, func(m *_Encoding, v []interface{}) {
  5416              m.emit(0x66)
  5417              m.rexo(hcode(v[1]), v[0], false)
  5418              m.emit(0x0f)
  5419              m.emit(0x43)
  5420              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5421          })
  5422      }
  5423      // CMOVNB m16, r16
  5424      if isM16(v0) && isReg16(v1) {
  5425          self.require(ISA_CMOV)
  5426          p.domain = DomainGeneric
  5427          p.add(0, func(m *_Encoding, v []interface{}) {
  5428              m.emit(0x66)
  5429              m.rexo(hcode(v[1]), addr(v[0]), false)
  5430              m.emit(0x0f)
  5431              m.emit(0x43)
  5432              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5433          })
  5434      }
  5435      // CMOVNB r32, r32
  5436      if isReg32(v0) && isReg32(v1) {
  5437          self.require(ISA_CMOV)
  5438          p.domain = DomainGeneric
  5439          p.add(0, func(m *_Encoding, v []interface{}) {
  5440              m.rexo(hcode(v[1]), v[0], false)
  5441              m.emit(0x0f)
  5442              m.emit(0x43)
  5443              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5444          })
  5445      }
  5446      // CMOVNB m32, r32
  5447      if isM32(v0) && isReg32(v1) {
  5448          self.require(ISA_CMOV)
  5449          p.domain = DomainGeneric
  5450          p.add(0, func(m *_Encoding, v []interface{}) {
  5451              m.rexo(hcode(v[1]), addr(v[0]), false)
  5452              m.emit(0x0f)
  5453              m.emit(0x43)
  5454              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5455          })
  5456      }
  5457      // CMOVNB r64, r64
  5458      if isReg64(v0) && isReg64(v1) {
  5459          self.require(ISA_CMOV)
  5460          p.domain = DomainGeneric
  5461          p.add(0, func(m *_Encoding, v []interface{}) {
  5462              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5463              m.emit(0x0f)
  5464              m.emit(0x43)
  5465              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5466          })
  5467      }
  5468      // CMOVNB m64, r64
  5469      if isM64(v0) && isReg64(v1) {
  5470          self.require(ISA_CMOV)
  5471          p.domain = DomainGeneric
  5472          p.add(0, func(m *_Encoding, v []interface{}) {
  5473              m.rexm(1, hcode(v[1]), addr(v[0]))
  5474              m.emit(0x0f)
  5475              m.emit(0x43)
  5476              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5477          })
  5478      }
  5479      if p.len == 0 {
  5480          panic("invalid operands for CMOVNB")
  5481      }
  5482      return p
  5483  }
  5484  
  5485  // CMOVNBE performs "Move if not below or equal (CF == 0 and ZF == 0)".
  5486  //
  5487  // Mnemonic        : CMOVNBE
  5488  // Supported forms : (6 forms)
  5489  //
  5490  //    * CMOVNBE r16, r16    [CMOV]
  5491  //    * CMOVNBE m16, r16    [CMOV]
  5492  //    * CMOVNBE r32, r32    [CMOV]
  5493  //    * CMOVNBE m32, r32    [CMOV]
  5494  //    * CMOVNBE r64, r64    [CMOV]
  5495  //    * CMOVNBE m64, r64    [CMOV]
  5496  //
  5497  func (self *Program) CMOVNBE(v0 interface{}, v1 interface{}) *Instruction {
  5498      p := self.alloc("CMOVNBE", 2, Operands { v0, v1 })
  5499      // CMOVNBE r16, r16
  5500      if isReg16(v0) && isReg16(v1) {
  5501          self.require(ISA_CMOV)
  5502          p.domain = DomainGeneric
  5503          p.add(0, func(m *_Encoding, v []interface{}) {
  5504              m.emit(0x66)
  5505              m.rexo(hcode(v[1]), v[0], false)
  5506              m.emit(0x0f)
  5507              m.emit(0x47)
  5508              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5509          })
  5510      }
  5511      // CMOVNBE m16, r16
  5512      if isM16(v0) && isReg16(v1) {
  5513          self.require(ISA_CMOV)
  5514          p.domain = DomainGeneric
  5515          p.add(0, func(m *_Encoding, v []interface{}) {
  5516              m.emit(0x66)
  5517              m.rexo(hcode(v[1]), addr(v[0]), false)
  5518              m.emit(0x0f)
  5519              m.emit(0x47)
  5520              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5521          })
  5522      }
  5523      // CMOVNBE r32, r32
  5524      if isReg32(v0) && isReg32(v1) {
  5525          self.require(ISA_CMOV)
  5526          p.domain = DomainGeneric
  5527          p.add(0, func(m *_Encoding, v []interface{}) {
  5528              m.rexo(hcode(v[1]), v[0], false)
  5529              m.emit(0x0f)
  5530              m.emit(0x47)
  5531              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5532          })
  5533      }
  5534      // CMOVNBE m32, r32
  5535      if isM32(v0) && isReg32(v1) {
  5536          self.require(ISA_CMOV)
  5537          p.domain = DomainGeneric
  5538          p.add(0, func(m *_Encoding, v []interface{}) {
  5539              m.rexo(hcode(v[1]), addr(v[0]), false)
  5540              m.emit(0x0f)
  5541              m.emit(0x47)
  5542              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5543          })
  5544      }
  5545      // CMOVNBE r64, r64
  5546      if isReg64(v0) && isReg64(v1) {
  5547          self.require(ISA_CMOV)
  5548          p.domain = DomainGeneric
  5549          p.add(0, func(m *_Encoding, v []interface{}) {
  5550              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5551              m.emit(0x0f)
  5552              m.emit(0x47)
  5553              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5554          })
  5555      }
  5556      // CMOVNBE m64, r64
  5557      if isM64(v0) && isReg64(v1) {
  5558          self.require(ISA_CMOV)
  5559          p.domain = DomainGeneric
  5560          p.add(0, func(m *_Encoding, v []interface{}) {
  5561              m.rexm(1, hcode(v[1]), addr(v[0]))
  5562              m.emit(0x0f)
  5563              m.emit(0x47)
  5564              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5565          })
  5566      }
  5567      if p.len == 0 {
  5568          panic("invalid operands for CMOVNBE")
  5569      }
  5570      return p
  5571  }
  5572  
  5573  // CMOVNC performs "Move if not carry (CF == 0)".
  5574  //
  5575  // Mnemonic        : CMOVNC
  5576  // Supported forms : (6 forms)
  5577  //
  5578  //    * CMOVNC r16, r16    [CMOV]
  5579  //    * CMOVNC m16, r16    [CMOV]
  5580  //    * CMOVNC r32, r32    [CMOV]
  5581  //    * CMOVNC m32, r32    [CMOV]
  5582  //    * CMOVNC r64, r64    [CMOV]
  5583  //    * CMOVNC m64, r64    [CMOV]
  5584  //
  5585  func (self *Program) CMOVNC(v0 interface{}, v1 interface{}) *Instruction {
  5586      p := self.alloc("CMOVNC", 2, Operands { v0, v1 })
  5587      // CMOVNC r16, r16
  5588      if isReg16(v0) && isReg16(v1) {
  5589          self.require(ISA_CMOV)
  5590          p.domain = DomainGeneric
  5591          p.add(0, func(m *_Encoding, v []interface{}) {
  5592              m.emit(0x66)
  5593              m.rexo(hcode(v[1]), v[0], false)
  5594              m.emit(0x0f)
  5595              m.emit(0x43)
  5596              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5597          })
  5598      }
  5599      // CMOVNC m16, r16
  5600      if isM16(v0) && isReg16(v1) {
  5601          self.require(ISA_CMOV)
  5602          p.domain = DomainGeneric
  5603          p.add(0, func(m *_Encoding, v []interface{}) {
  5604              m.emit(0x66)
  5605              m.rexo(hcode(v[1]), addr(v[0]), false)
  5606              m.emit(0x0f)
  5607              m.emit(0x43)
  5608              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5609          })
  5610      }
  5611      // CMOVNC r32, r32
  5612      if isReg32(v0) && isReg32(v1) {
  5613          self.require(ISA_CMOV)
  5614          p.domain = DomainGeneric
  5615          p.add(0, func(m *_Encoding, v []interface{}) {
  5616              m.rexo(hcode(v[1]), v[0], false)
  5617              m.emit(0x0f)
  5618              m.emit(0x43)
  5619              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5620          })
  5621      }
  5622      // CMOVNC m32, r32
  5623      if isM32(v0) && isReg32(v1) {
  5624          self.require(ISA_CMOV)
  5625          p.domain = DomainGeneric
  5626          p.add(0, func(m *_Encoding, v []interface{}) {
  5627              m.rexo(hcode(v[1]), addr(v[0]), false)
  5628              m.emit(0x0f)
  5629              m.emit(0x43)
  5630              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5631          })
  5632      }
  5633      // CMOVNC r64, r64
  5634      if isReg64(v0) && isReg64(v1) {
  5635          self.require(ISA_CMOV)
  5636          p.domain = DomainGeneric
  5637          p.add(0, func(m *_Encoding, v []interface{}) {
  5638              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5639              m.emit(0x0f)
  5640              m.emit(0x43)
  5641              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5642          })
  5643      }
  5644      // CMOVNC m64, r64
  5645      if isM64(v0) && isReg64(v1) {
  5646          self.require(ISA_CMOV)
  5647          p.domain = DomainGeneric
  5648          p.add(0, func(m *_Encoding, v []interface{}) {
  5649              m.rexm(1, hcode(v[1]), addr(v[0]))
  5650              m.emit(0x0f)
  5651              m.emit(0x43)
  5652              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5653          })
  5654      }
  5655      if p.len == 0 {
  5656          panic("invalid operands for CMOVNC")
  5657      }
  5658      return p
  5659  }
  5660  
  5661  // CMOVNE performs "Move if not equal (ZF == 0)".
  5662  //
  5663  // Mnemonic        : CMOVNE
  5664  // Supported forms : (6 forms)
  5665  //
  5666  //    * CMOVNE r16, r16    [CMOV]
  5667  //    * CMOVNE m16, r16    [CMOV]
  5668  //    * CMOVNE r32, r32    [CMOV]
  5669  //    * CMOVNE m32, r32    [CMOV]
  5670  //    * CMOVNE r64, r64    [CMOV]
  5671  //    * CMOVNE m64, r64    [CMOV]
  5672  //
  5673  func (self *Program) CMOVNE(v0 interface{}, v1 interface{}) *Instruction {
  5674      p := self.alloc("CMOVNE", 2, Operands { v0, v1 })
  5675      // CMOVNE r16, r16
  5676      if isReg16(v0) && isReg16(v1) {
  5677          self.require(ISA_CMOV)
  5678          p.domain = DomainGeneric
  5679          p.add(0, func(m *_Encoding, v []interface{}) {
  5680              m.emit(0x66)
  5681              m.rexo(hcode(v[1]), v[0], false)
  5682              m.emit(0x0f)
  5683              m.emit(0x45)
  5684              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5685          })
  5686      }
  5687      // CMOVNE m16, r16
  5688      if isM16(v0) && isReg16(v1) {
  5689          self.require(ISA_CMOV)
  5690          p.domain = DomainGeneric
  5691          p.add(0, func(m *_Encoding, v []interface{}) {
  5692              m.emit(0x66)
  5693              m.rexo(hcode(v[1]), addr(v[0]), false)
  5694              m.emit(0x0f)
  5695              m.emit(0x45)
  5696              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5697          })
  5698      }
  5699      // CMOVNE r32, r32
  5700      if isReg32(v0) && isReg32(v1) {
  5701          self.require(ISA_CMOV)
  5702          p.domain = DomainGeneric
  5703          p.add(0, func(m *_Encoding, v []interface{}) {
  5704              m.rexo(hcode(v[1]), v[0], false)
  5705              m.emit(0x0f)
  5706              m.emit(0x45)
  5707              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5708          })
  5709      }
  5710      // CMOVNE m32, r32
  5711      if isM32(v0) && isReg32(v1) {
  5712          self.require(ISA_CMOV)
  5713          p.domain = DomainGeneric
  5714          p.add(0, func(m *_Encoding, v []interface{}) {
  5715              m.rexo(hcode(v[1]), addr(v[0]), false)
  5716              m.emit(0x0f)
  5717              m.emit(0x45)
  5718              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5719          })
  5720      }
  5721      // CMOVNE r64, r64
  5722      if isReg64(v0) && isReg64(v1) {
  5723          self.require(ISA_CMOV)
  5724          p.domain = DomainGeneric
  5725          p.add(0, func(m *_Encoding, v []interface{}) {
  5726              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5727              m.emit(0x0f)
  5728              m.emit(0x45)
  5729              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5730          })
  5731      }
  5732      // CMOVNE m64, r64
  5733      if isM64(v0) && isReg64(v1) {
  5734          self.require(ISA_CMOV)
  5735          p.domain = DomainGeneric
  5736          p.add(0, func(m *_Encoding, v []interface{}) {
  5737              m.rexm(1, hcode(v[1]), addr(v[0]))
  5738              m.emit(0x0f)
  5739              m.emit(0x45)
  5740              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5741          })
  5742      }
  5743      if p.len == 0 {
  5744          panic("invalid operands for CMOVNE")
  5745      }
  5746      return p
  5747  }
  5748  
  5749  // CMOVNG performs "Move if not greater (ZF == 1 or SF != OF)".
  5750  //
  5751  // Mnemonic        : CMOVNG
  5752  // Supported forms : (6 forms)
  5753  //
  5754  //    * CMOVNG r16, r16    [CMOV]
  5755  //    * CMOVNG m16, r16    [CMOV]
  5756  //    * CMOVNG r32, r32    [CMOV]
  5757  //    * CMOVNG m32, r32    [CMOV]
  5758  //    * CMOVNG r64, r64    [CMOV]
  5759  //    * CMOVNG m64, r64    [CMOV]
  5760  //
  5761  func (self *Program) CMOVNG(v0 interface{}, v1 interface{}) *Instruction {
  5762      p := self.alloc("CMOVNG", 2, Operands { v0, v1 })
  5763      // CMOVNG r16, r16
  5764      if isReg16(v0) && isReg16(v1) {
  5765          self.require(ISA_CMOV)
  5766          p.domain = DomainGeneric
  5767          p.add(0, func(m *_Encoding, v []interface{}) {
  5768              m.emit(0x66)
  5769              m.rexo(hcode(v[1]), v[0], false)
  5770              m.emit(0x0f)
  5771              m.emit(0x4e)
  5772              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5773          })
  5774      }
  5775      // CMOVNG m16, r16
  5776      if isM16(v0) && isReg16(v1) {
  5777          self.require(ISA_CMOV)
  5778          p.domain = DomainGeneric
  5779          p.add(0, func(m *_Encoding, v []interface{}) {
  5780              m.emit(0x66)
  5781              m.rexo(hcode(v[1]), addr(v[0]), false)
  5782              m.emit(0x0f)
  5783              m.emit(0x4e)
  5784              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5785          })
  5786      }
  5787      // CMOVNG r32, r32
  5788      if isReg32(v0) && isReg32(v1) {
  5789          self.require(ISA_CMOV)
  5790          p.domain = DomainGeneric
  5791          p.add(0, func(m *_Encoding, v []interface{}) {
  5792              m.rexo(hcode(v[1]), v[0], false)
  5793              m.emit(0x0f)
  5794              m.emit(0x4e)
  5795              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5796          })
  5797      }
  5798      // CMOVNG m32, r32
  5799      if isM32(v0) && isReg32(v1) {
  5800          self.require(ISA_CMOV)
  5801          p.domain = DomainGeneric
  5802          p.add(0, func(m *_Encoding, v []interface{}) {
  5803              m.rexo(hcode(v[1]), addr(v[0]), false)
  5804              m.emit(0x0f)
  5805              m.emit(0x4e)
  5806              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5807          })
  5808      }
  5809      // CMOVNG r64, r64
  5810      if isReg64(v0) && isReg64(v1) {
  5811          self.require(ISA_CMOV)
  5812          p.domain = DomainGeneric
  5813          p.add(0, func(m *_Encoding, v []interface{}) {
  5814              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5815              m.emit(0x0f)
  5816              m.emit(0x4e)
  5817              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5818          })
  5819      }
  5820      // CMOVNG m64, r64
  5821      if isM64(v0) && isReg64(v1) {
  5822          self.require(ISA_CMOV)
  5823          p.domain = DomainGeneric
  5824          p.add(0, func(m *_Encoding, v []interface{}) {
  5825              m.rexm(1, hcode(v[1]), addr(v[0]))
  5826              m.emit(0x0f)
  5827              m.emit(0x4e)
  5828              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5829          })
  5830      }
  5831      if p.len == 0 {
  5832          panic("invalid operands for CMOVNG")
  5833      }
  5834      return p
  5835  }
  5836  
  5837  // CMOVNGE performs "Move if not greater or equal (SF != OF)".
  5838  //
  5839  // Mnemonic        : CMOVNGE
  5840  // Supported forms : (6 forms)
  5841  //
  5842  //    * CMOVNGE r16, r16    [CMOV]
  5843  //    * CMOVNGE m16, r16    [CMOV]
  5844  //    * CMOVNGE r32, r32    [CMOV]
  5845  //    * CMOVNGE m32, r32    [CMOV]
  5846  //    * CMOVNGE r64, r64    [CMOV]
  5847  //    * CMOVNGE m64, r64    [CMOV]
  5848  //
  5849  func (self *Program) CMOVNGE(v0 interface{}, v1 interface{}) *Instruction {
  5850      p := self.alloc("CMOVNGE", 2, Operands { v0, v1 })
  5851      // CMOVNGE r16, r16
  5852      if isReg16(v0) && isReg16(v1) {
  5853          self.require(ISA_CMOV)
  5854          p.domain = DomainGeneric
  5855          p.add(0, func(m *_Encoding, v []interface{}) {
  5856              m.emit(0x66)
  5857              m.rexo(hcode(v[1]), v[0], false)
  5858              m.emit(0x0f)
  5859              m.emit(0x4c)
  5860              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5861          })
  5862      }
  5863      // CMOVNGE m16, r16
  5864      if isM16(v0) && isReg16(v1) {
  5865          self.require(ISA_CMOV)
  5866          p.domain = DomainGeneric
  5867          p.add(0, func(m *_Encoding, v []interface{}) {
  5868              m.emit(0x66)
  5869              m.rexo(hcode(v[1]), addr(v[0]), false)
  5870              m.emit(0x0f)
  5871              m.emit(0x4c)
  5872              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5873          })
  5874      }
  5875      // CMOVNGE r32, r32
  5876      if isReg32(v0) && isReg32(v1) {
  5877          self.require(ISA_CMOV)
  5878          p.domain = DomainGeneric
  5879          p.add(0, func(m *_Encoding, v []interface{}) {
  5880              m.rexo(hcode(v[1]), v[0], false)
  5881              m.emit(0x0f)
  5882              m.emit(0x4c)
  5883              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5884          })
  5885      }
  5886      // CMOVNGE m32, r32
  5887      if isM32(v0) && isReg32(v1) {
  5888          self.require(ISA_CMOV)
  5889          p.domain = DomainGeneric
  5890          p.add(0, func(m *_Encoding, v []interface{}) {
  5891              m.rexo(hcode(v[1]), addr(v[0]), false)
  5892              m.emit(0x0f)
  5893              m.emit(0x4c)
  5894              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5895          })
  5896      }
  5897      // CMOVNGE r64, r64
  5898      if isReg64(v0) && isReg64(v1) {
  5899          self.require(ISA_CMOV)
  5900          p.domain = DomainGeneric
  5901          p.add(0, func(m *_Encoding, v []interface{}) {
  5902              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5903              m.emit(0x0f)
  5904              m.emit(0x4c)
  5905              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5906          })
  5907      }
  5908      // CMOVNGE m64, r64
  5909      if isM64(v0) && isReg64(v1) {
  5910          self.require(ISA_CMOV)
  5911          p.domain = DomainGeneric
  5912          p.add(0, func(m *_Encoding, v []interface{}) {
  5913              m.rexm(1, hcode(v[1]), addr(v[0]))
  5914              m.emit(0x0f)
  5915              m.emit(0x4c)
  5916              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5917          })
  5918      }
  5919      if p.len == 0 {
  5920          panic("invalid operands for CMOVNGE")
  5921      }
  5922      return p
  5923  }
  5924  
  5925  // CMOVNL performs "Move if not less (SF == OF)".
  5926  //
  5927  // Mnemonic        : CMOVNL
  5928  // Supported forms : (6 forms)
  5929  //
  5930  //    * CMOVNL r16, r16    [CMOV]
  5931  //    * CMOVNL m16, r16    [CMOV]
  5932  //    * CMOVNL r32, r32    [CMOV]
  5933  //    * CMOVNL m32, r32    [CMOV]
  5934  //    * CMOVNL r64, r64    [CMOV]
  5935  //    * CMOVNL m64, r64    [CMOV]
  5936  //
  5937  func (self *Program) CMOVNL(v0 interface{}, v1 interface{}) *Instruction {
  5938      p := self.alloc("CMOVNL", 2, Operands { v0, v1 })
  5939      // CMOVNL r16, r16
  5940      if isReg16(v0) && isReg16(v1) {
  5941          self.require(ISA_CMOV)
  5942          p.domain = DomainGeneric
  5943          p.add(0, func(m *_Encoding, v []interface{}) {
  5944              m.emit(0x66)
  5945              m.rexo(hcode(v[1]), v[0], false)
  5946              m.emit(0x0f)
  5947              m.emit(0x4d)
  5948              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5949          })
  5950      }
  5951      // CMOVNL m16, r16
  5952      if isM16(v0) && isReg16(v1) {
  5953          self.require(ISA_CMOV)
  5954          p.domain = DomainGeneric
  5955          p.add(0, func(m *_Encoding, v []interface{}) {
  5956              m.emit(0x66)
  5957              m.rexo(hcode(v[1]), addr(v[0]), false)
  5958              m.emit(0x0f)
  5959              m.emit(0x4d)
  5960              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5961          })
  5962      }
  5963      // CMOVNL r32, r32
  5964      if isReg32(v0) && isReg32(v1) {
  5965          self.require(ISA_CMOV)
  5966          p.domain = DomainGeneric
  5967          p.add(0, func(m *_Encoding, v []interface{}) {
  5968              m.rexo(hcode(v[1]), v[0], false)
  5969              m.emit(0x0f)
  5970              m.emit(0x4d)
  5971              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5972          })
  5973      }
  5974      // CMOVNL m32, r32
  5975      if isM32(v0) && isReg32(v1) {
  5976          self.require(ISA_CMOV)
  5977          p.domain = DomainGeneric
  5978          p.add(0, func(m *_Encoding, v []interface{}) {
  5979              m.rexo(hcode(v[1]), addr(v[0]), false)
  5980              m.emit(0x0f)
  5981              m.emit(0x4d)
  5982              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  5983          })
  5984      }
  5985      // CMOVNL r64, r64
  5986      if isReg64(v0) && isReg64(v1) {
  5987          self.require(ISA_CMOV)
  5988          p.domain = DomainGeneric
  5989          p.add(0, func(m *_Encoding, v []interface{}) {
  5990              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  5991              m.emit(0x0f)
  5992              m.emit(0x4d)
  5993              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  5994          })
  5995      }
  5996      // CMOVNL m64, r64
  5997      if isM64(v0) && isReg64(v1) {
  5998          self.require(ISA_CMOV)
  5999          p.domain = DomainGeneric
  6000          p.add(0, func(m *_Encoding, v []interface{}) {
  6001              m.rexm(1, hcode(v[1]), addr(v[0]))
  6002              m.emit(0x0f)
  6003              m.emit(0x4d)
  6004              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6005          })
  6006      }
  6007      if p.len == 0 {
  6008          panic("invalid operands for CMOVNL")
  6009      }
  6010      return p
  6011  }
  6012  
  6013  // CMOVNLE performs "Move if not less or equal (ZF == 0 and SF == OF)".
  6014  //
  6015  // Mnemonic        : CMOVNLE
  6016  // Supported forms : (6 forms)
  6017  //
  6018  //    * CMOVNLE r16, r16    [CMOV]
  6019  //    * CMOVNLE m16, r16    [CMOV]
  6020  //    * CMOVNLE r32, r32    [CMOV]
  6021  //    * CMOVNLE m32, r32    [CMOV]
  6022  //    * CMOVNLE r64, r64    [CMOV]
  6023  //    * CMOVNLE m64, r64    [CMOV]
  6024  //
  6025  func (self *Program) CMOVNLE(v0 interface{}, v1 interface{}) *Instruction {
  6026      p := self.alloc("CMOVNLE", 2, Operands { v0, v1 })
  6027      // CMOVNLE r16, r16
  6028      if isReg16(v0) && isReg16(v1) {
  6029          self.require(ISA_CMOV)
  6030          p.domain = DomainGeneric
  6031          p.add(0, func(m *_Encoding, v []interface{}) {
  6032              m.emit(0x66)
  6033              m.rexo(hcode(v[1]), v[0], false)
  6034              m.emit(0x0f)
  6035              m.emit(0x4f)
  6036              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6037          })
  6038      }
  6039      // CMOVNLE m16, r16
  6040      if isM16(v0) && isReg16(v1) {
  6041          self.require(ISA_CMOV)
  6042          p.domain = DomainGeneric
  6043          p.add(0, func(m *_Encoding, v []interface{}) {
  6044              m.emit(0x66)
  6045              m.rexo(hcode(v[1]), addr(v[0]), false)
  6046              m.emit(0x0f)
  6047              m.emit(0x4f)
  6048              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6049          })
  6050      }
  6051      // CMOVNLE r32, r32
  6052      if isReg32(v0) && isReg32(v1) {
  6053          self.require(ISA_CMOV)
  6054          p.domain = DomainGeneric
  6055          p.add(0, func(m *_Encoding, v []interface{}) {
  6056              m.rexo(hcode(v[1]), v[0], false)
  6057              m.emit(0x0f)
  6058              m.emit(0x4f)
  6059              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6060          })
  6061      }
  6062      // CMOVNLE m32, r32
  6063      if isM32(v0) && isReg32(v1) {
  6064          self.require(ISA_CMOV)
  6065          p.domain = DomainGeneric
  6066          p.add(0, func(m *_Encoding, v []interface{}) {
  6067              m.rexo(hcode(v[1]), addr(v[0]), false)
  6068              m.emit(0x0f)
  6069              m.emit(0x4f)
  6070              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6071          })
  6072      }
  6073      // CMOVNLE r64, r64
  6074      if isReg64(v0) && isReg64(v1) {
  6075          self.require(ISA_CMOV)
  6076          p.domain = DomainGeneric
  6077          p.add(0, func(m *_Encoding, v []interface{}) {
  6078              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  6079              m.emit(0x0f)
  6080              m.emit(0x4f)
  6081              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6082          })
  6083      }
  6084      // CMOVNLE m64, r64
  6085      if isM64(v0) && isReg64(v1) {
  6086          self.require(ISA_CMOV)
  6087          p.domain = DomainGeneric
  6088          p.add(0, func(m *_Encoding, v []interface{}) {
  6089              m.rexm(1, hcode(v[1]), addr(v[0]))
  6090              m.emit(0x0f)
  6091              m.emit(0x4f)
  6092              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6093          })
  6094      }
  6095      if p.len == 0 {
  6096          panic("invalid operands for CMOVNLE")
  6097      }
  6098      return p
  6099  }
  6100  
  6101  // CMOVNO performs "Move if not overflow (OF == 0)".
  6102  //
  6103  // Mnemonic        : CMOVNO
  6104  // Supported forms : (6 forms)
  6105  //
  6106  //    * CMOVNO r16, r16    [CMOV]
  6107  //    * CMOVNO m16, r16    [CMOV]
  6108  //    * CMOVNO r32, r32    [CMOV]
  6109  //    * CMOVNO m32, r32    [CMOV]
  6110  //    * CMOVNO r64, r64    [CMOV]
  6111  //    * CMOVNO m64, r64    [CMOV]
  6112  //
  6113  func (self *Program) CMOVNO(v0 interface{}, v1 interface{}) *Instruction {
  6114      p := self.alloc("CMOVNO", 2, Operands { v0, v1 })
  6115      // CMOVNO r16, r16
  6116      if isReg16(v0) && isReg16(v1) {
  6117          self.require(ISA_CMOV)
  6118          p.domain = DomainGeneric
  6119          p.add(0, func(m *_Encoding, v []interface{}) {
  6120              m.emit(0x66)
  6121              m.rexo(hcode(v[1]), v[0], false)
  6122              m.emit(0x0f)
  6123              m.emit(0x41)
  6124              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6125          })
  6126      }
  6127      // CMOVNO m16, r16
  6128      if isM16(v0) && isReg16(v1) {
  6129          self.require(ISA_CMOV)
  6130          p.domain = DomainGeneric
  6131          p.add(0, func(m *_Encoding, v []interface{}) {
  6132              m.emit(0x66)
  6133              m.rexo(hcode(v[1]), addr(v[0]), false)
  6134              m.emit(0x0f)
  6135              m.emit(0x41)
  6136              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6137          })
  6138      }
  6139      // CMOVNO r32, r32
  6140      if isReg32(v0) && isReg32(v1) {
  6141          self.require(ISA_CMOV)
  6142          p.domain = DomainGeneric
  6143          p.add(0, func(m *_Encoding, v []interface{}) {
  6144              m.rexo(hcode(v[1]), v[0], false)
  6145              m.emit(0x0f)
  6146              m.emit(0x41)
  6147              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6148          })
  6149      }
  6150      // CMOVNO m32, r32
  6151      if isM32(v0) && isReg32(v1) {
  6152          self.require(ISA_CMOV)
  6153          p.domain = DomainGeneric
  6154          p.add(0, func(m *_Encoding, v []interface{}) {
  6155              m.rexo(hcode(v[1]), addr(v[0]), false)
  6156              m.emit(0x0f)
  6157              m.emit(0x41)
  6158              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6159          })
  6160      }
  6161      // CMOVNO r64, r64
  6162      if isReg64(v0) && isReg64(v1) {
  6163          self.require(ISA_CMOV)
  6164          p.domain = DomainGeneric
  6165          p.add(0, func(m *_Encoding, v []interface{}) {
  6166              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  6167              m.emit(0x0f)
  6168              m.emit(0x41)
  6169              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6170          })
  6171      }
  6172      // CMOVNO m64, r64
  6173      if isM64(v0) && isReg64(v1) {
  6174          self.require(ISA_CMOV)
  6175          p.domain = DomainGeneric
  6176          p.add(0, func(m *_Encoding, v []interface{}) {
  6177              m.rexm(1, hcode(v[1]), addr(v[0]))
  6178              m.emit(0x0f)
  6179              m.emit(0x41)
  6180              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6181          })
  6182      }
  6183      if p.len == 0 {
  6184          panic("invalid operands for CMOVNO")
  6185      }
  6186      return p
  6187  }
  6188  
  6189  // CMOVNP performs "Move if not parity (PF == 0)".
  6190  //
  6191  // Mnemonic        : CMOVNP
  6192  // Supported forms : (6 forms)
  6193  //
  6194  //    * CMOVNP r16, r16    [CMOV]
  6195  //    * CMOVNP m16, r16    [CMOV]
  6196  //    * CMOVNP r32, r32    [CMOV]
  6197  //    * CMOVNP m32, r32    [CMOV]
  6198  //    * CMOVNP r64, r64    [CMOV]
  6199  //    * CMOVNP m64, r64    [CMOV]
  6200  //
  6201  func (self *Program) CMOVNP(v0 interface{}, v1 interface{}) *Instruction {
  6202      p := self.alloc("CMOVNP", 2, Operands { v0, v1 })
  6203      // CMOVNP r16, r16
  6204      if isReg16(v0) && isReg16(v1) {
  6205          self.require(ISA_CMOV)
  6206          p.domain = DomainGeneric
  6207          p.add(0, func(m *_Encoding, v []interface{}) {
  6208              m.emit(0x66)
  6209              m.rexo(hcode(v[1]), v[0], false)
  6210              m.emit(0x0f)
  6211              m.emit(0x4b)
  6212              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6213          })
  6214      }
  6215      // CMOVNP m16, r16
  6216      if isM16(v0) && isReg16(v1) {
  6217          self.require(ISA_CMOV)
  6218          p.domain = DomainGeneric
  6219          p.add(0, func(m *_Encoding, v []interface{}) {
  6220              m.emit(0x66)
  6221              m.rexo(hcode(v[1]), addr(v[0]), false)
  6222              m.emit(0x0f)
  6223              m.emit(0x4b)
  6224              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6225          })
  6226      }
  6227      // CMOVNP r32, r32
  6228      if isReg32(v0) && isReg32(v1) {
  6229          self.require(ISA_CMOV)
  6230          p.domain = DomainGeneric
  6231          p.add(0, func(m *_Encoding, v []interface{}) {
  6232              m.rexo(hcode(v[1]), v[0], false)
  6233              m.emit(0x0f)
  6234              m.emit(0x4b)
  6235              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6236          })
  6237      }
  6238      // CMOVNP m32, r32
  6239      if isM32(v0) && isReg32(v1) {
  6240          self.require(ISA_CMOV)
  6241          p.domain = DomainGeneric
  6242          p.add(0, func(m *_Encoding, v []interface{}) {
  6243              m.rexo(hcode(v[1]), addr(v[0]), false)
  6244              m.emit(0x0f)
  6245              m.emit(0x4b)
  6246              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6247          })
  6248      }
  6249      // CMOVNP r64, r64
  6250      if isReg64(v0) && isReg64(v1) {
  6251          self.require(ISA_CMOV)
  6252          p.domain = DomainGeneric
  6253          p.add(0, func(m *_Encoding, v []interface{}) {
  6254              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  6255              m.emit(0x0f)
  6256              m.emit(0x4b)
  6257              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6258          })
  6259      }
  6260      // CMOVNP m64, r64
  6261      if isM64(v0) && isReg64(v1) {
  6262          self.require(ISA_CMOV)
  6263          p.domain = DomainGeneric
  6264          p.add(0, func(m *_Encoding, v []interface{}) {
  6265              m.rexm(1, hcode(v[1]), addr(v[0]))
  6266              m.emit(0x0f)
  6267              m.emit(0x4b)
  6268              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6269          })
  6270      }
  6271      if p.len == 0 {
  6272          panic("invalid operands for CMOVNP")
  6273      }
  6274      return p
  6275  }
  6276  
  6277  // CMOVNS performs "Move if not sign (SF == 0)".
  6278  //
  6279  // Mnemonic        : CMOVNS
  6280  // Supported forms : (6 forms)
  6281  //
  6282  //    * CMOVNS r16, r16    [CMOV]
  6283  //    * CMOVNS m16, r16    [CMOV]
  6284  //    * CMOVNS r32, r32    [CMOV]
  6285  //    * CMOVNS m32, r32    [CMOV]
  6286  //    * CMOVNS r64, r64    [CMOV]
  6287  //    * CMOVNS m64, r64    [CMOV]
  6288  //
  6289  func (self *Program) CMOVNS(v0 interface{}, v1 interface{}) *Instruction {
  6290      p := self.alloc("CMOVNS", 2, Operands { v0, v1 })
  6291      // CMOVNS r16, r16
  6292      if isReg16(v0) && isReg16(v1) {
  6293          self.require(ISA_CMOV)
  6294          p.domain = DomainGeneric
  6295          p.add(0, func(m *_Encoding, v []interface{}) {
  6296              m.emit(0x66)
  6297              m.rexo(hcode(v[1]), v[0], false)
  6298              m.emit(0x0f)
  6299              m.emit(0x49)
  6300              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6301          })
  6302      }
  6303      // CMOVNS m16, r16
  6304      if isM16(v0) && isReg16(v1) {
  6305          self.require(ISA_CMOV)
  6306          p.domain = DomainGeneric
  6307          p.add(0, func(m *_Encoding, v []interface{}) {
  6308              m.emit(0x66)
  6309              m.rexo(hcode(v[1]), addr(v[0]), false)
  6310              m.emit(0x0f)
  6311              m.emit(0x49)
  6312              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6313          })
  6314      }
  6315      // CMOVNS r32, r32
  6316      if isReg32(v0) && isReg32(v1) {
  6317          self.require(ISA_CMOV)
  6318          p.domain = DomainGeneric
  6319          p.add(0, func(m *_Encoding, v []interface{}) {
  6320              m.rexo(hcode(v[1]), v[0], false)
  6321              m.emit(0x0f)
  6322              m.emit(0x49)
  6323              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6324          })
  6325      }
  6326      // CMOVNS m32, r32
  6327      if isM32(v0) && isReg32(v1) {
  6328          self.require(ISA_CMOV)
  6329          p.domain = DomainGeneric
  6330          p.add(0, func(m *_Encoding, v []interface{}) {
  6331              m.rexo(hcode(v[1]), addr(v[0]), false)
  6332              m.emit(0x0f)
  6333              m.emit(0x49)
  6334              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6335          })
  6336      }
  6337      // CMOVNS r64, r64
  6338      if isReg64(v0) && isReg64(v1) {
  6339          self.require(ISA_CMOV)
  6340          p.domain = DomainGeneric
  6341          p.add(0, func(m *_Encoding, v []interface{}) {
  6342              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  6343              m.emit(0x0f)
  6344              m.emit(0x49)
  6345              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6346          })
  6347      }
  6348      // CMOVNS m64, r64
  6349      if isM64(v0) && isReg64(v1) {
  6350          self.require(ISA_CMOV)
  6351          p.domain = DomainGeneric
  6352          p.add(0, func(m *_Encoding, v []interface{}) {
  6353              m.rexm(1, hcode(v[1]), addr(v[0]))
  6354              m.emit(0x0f)
  6355              m.emit(0x49)
  6356              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6357          })
  6358      }
  6359      if p.len == 0 {
  6360          panic("invalid operands for CMOVNS")
  6361      }
  6362      return p
  6363  }
  6364  
  6365  // CMOVNZ performs "Move if not zero (ZF == 0)".
  6366  //
  6367  // Mnemonic        : CMOVNZ
  6368  // Supported forms : (6 forms)
  6369  //
  6370  //    * CMOVNZ r16, r16    [CMOV]
  6371  //    * CMOVNZ m16, r16    [CMOV]
  6372  //    * CMOVNZ r32, r32    [CMOV]
  6373  //    * CMOVNZ m32, r32    [CMOV]
  6374  //    * CMOVNZ r64, r64    [CMOV]
  6375  //    * CMOVNZ m64, r64    [CMOV]
  6376  //
  6377  func (self *Program) CMOVNZ(v0 interface{}, v1 interface{}) *Instruction {
  6378      p := self.alloc("CMOVNZ", 2, Operands { v0, v1 })
  6379      // CMOVNZ r16, r16
  6380      if isReg16(v0) && isReg16(v1) {
  6381          self.require(ISA_CMOV)
  6382          p.domain = DomainGeneric
  6383          p.add(0, func(m *_Encoding, v []interface{}) {
  6384              m.emit(0x66)
  6385              m.rexo(hcode(v[1]), v[0], false)
  6386              m.emit(0x0f)
  6387              m.emit(0x45)
  6388              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6389          })
  6390      }
  6391      // CMOVNZ m16, r16
  6392      if isM16(v0) && isReg16(v1) {
  6393          self.require(ISA_CMOV)
  6394          p.domain = DomainGeneric
  6395          p.add(0, func(m *_Encoding, v []interface{}) {
  6396              m.emit(0x66)
  6397              m.rexo(hcode(v[1]), addr(v[0]), false)
  6398              m.emit(0x0f)
  6399              m.emit(0x45)
  6400              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6401          })
  6402      }
  6403      // CMOVNZ r32, r32
  6404      if isReg32(v0) && isReg32(v1) {
  6405          self.require(ISA_CMOV)
  6406          p.domain = DomainGeneric
  6407          p.add(0, func(m *_Encoding, v []interface{}) {
  6408              m.rexo(hcode(v[1]), v[0], false)
  6409              m.emit(0x0f)
  6410              m.emit(0x45)
  6411              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6412          })
  6413      }
  6414      // CMOVNZ m32, r32
  6415      if isM32(v0) && isReg32(v1) {
  6416          self.require(ISA_CMOV)
  6417          p.domain = DomainGeneric
  6418          p.add(0, func(m *_Encoding, v []interface{}) {
  6419              m.rexo(hcode(v[1]), addr(v[0]), false)
  6420              m.emit(0x0f)
  6421              m.emit(0x45)
  6422              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6423          })
  6424      }
  6425      // CMOVNZ r64, r64
  6426      if isReg64(v0) && isReg64(v1) {
  6427          self.require(ISA_CMOV)
  6428          p.domain = DomainGeneric
  6429          p.add(0, func(m *_Encoding, v []interface{}) {
  6430              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  6431              m.emit(0x0f)
  6432              m.emit(0x45)
  6433              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6434          })
  6435      }
  6436      // CMOVNZ m64, r64
  6437      if isM64(v0) && isReg64(v1) {
  6438          self.require(ISA_CMOV)
  6439          p.domain = DomainGeneric
  6440          p.add(0, func(m *_Encoding, v []interface{}) {
  6441              m.rexm(1, hcode(v[1]), addr(v[0]))
  6442              m.emit(0x0f)
  6443              m.emit(0x45)
  6444              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6445          })
  6446      }
  6447      if p.len == 0 {
  6448          panic("invalid operands for CMOVNZ")
  6449      }
  6450      return p
  6451  }
  6452  
  6453  // CMOVO performs "Move if overflow (OF == 1)".
  6454  //
  6455  // Mnemonic        : CMOVO
  6456  // Supported forms : (6 forms)
  6457  //
  6458  //    * CMOVO r16, r16    [CMOV]
  6459  //    * CMOVO m16, r16    [CMOV]
  6460  //    * CMOVO r32, r32    [CMOV]
  6461  //    * CMOVO m32, r32    [CMOV]
  6462  //    * CMOVO r64, r64    [CMOV]
  6463  //    * CMOVO m64, r64    [CMOV]
  6464  //
  6465  func (self *Program) CMOVO(v0 interface{}, v1 interface{}) *Instruction {
  6466      p := self.alloc("CMOVO", 2, Operands { v0, v1 })
  6467      // CMOVO r16, r16
  6468      if isReg16(v0) && isReg16(v1) {
  6469          self.require(ISA_CMOV)
  6470          p.domain = DomainGeneric
  6471          p.add(0, func(m *_Encoding, v []interface{}) {
  6472              m.emit(0x66)
  6473              m.rexo(hcode(v[1]), v[0], false)
  6474              m.emit(0x0f)
  6475              m.emit(0x40)
  6476              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6477          })
  6478      }
  6479      // CMOVO m16, r16
  6480      if isM16(v0) && isReg16(v1) {
  6481          self.require(ISA_CMOV)
  6482          p.domain = DomainGeneric
  6483          p.add(0, func(m *_Encoding, v []interface{}) {
  6484              m.emit(0x66)
  6485              m.rexo(hcode(v[1]), addr(v[0]), false)
  6486              m.emit(0x0f)
  6487              m.emit(0x40)
  6488              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6489          })
  6490      }
  6491      // CMOVO r32, r32
  6492      if isReg32(v0) && isReg32(v1) {
  6493          self.require(ISA_CMOV)
  6494          p.domain = DomainGeneric
  6495          p.add(0, func(m *_Encoding, v []interface{}) {
  6496              m.rexo(hcode(v[1]), v[0], false)
  6497              m.emit(0x0f)
  6498              m.emit(0x40)
  6499              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6500          })
  6501      }
  6502      // CMOVO m32, r32
  6503      if isM32(v0) && isReg32(v1) {
  6504          self.require(ISA_CMOV)
  6505          p.domain = DomainGeneric
  6506          p.add(0, func(m *_Encoding, v []interface{}) {
  6507              m.rexo(hcode(v[1]), addr(v[0]), false)
  6508              m.emit(0x0f)
  6509              m.emit(0x40)
  6510              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6511          })
  6512      }
  6513      // CMOVO r64, r64
  6514      if isReg64(v0) && isReg64(v1) {
  6515          self.require(ISA_CMOV)
  6516          p.domain = DomainGeneric
  6517          p.add(0, func(m *_Encoding, v []interface{}) {
  6518              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  6519              m.emit(0x0f)
  6520              m.emit(0x40)
  6521              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6522          })
  6523      }
  6524      // CMOVO m64, r64
  6525      if isM64(v0) && isReg64(v1) {
  6526          self.require(ISA_CMOV)
  6527          p.domain = DomainGeneric
  6528          p.add(0, func(m *_Encoding, v []interface{}) {
  6529              m.rexm(1, hcode(v[1]), addr(v[0]))
  6530              m.emit(0x0f)
  6531              m.emit(0x40)
  6532              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6533          })
  6534      }
  6535      if p.len == 0 {
  6536          panic("invalid operands for CMOVO")
  6537      }
  6538      return p
  6539  }
  6540  
  6541  // CMOVP performs "Move if parity (PF == 1)".
  6542  //
  6543  // Mnemonic        : CMOVP
  6544  // Supported forms : (6 forms)
  6545  //
  6546  //    * CMOVP r16, r16    [CMOV]
  6547  //    * CMOVP m16, r16    [CMOV]
  6548  //    * CMOVP r32, r32    [CMOV]
  6549  //    * CMOVP m32, r32    [CMOV]
  6550  //    * CMOVP r64, r64    [CMOV]
  6551  //    * CMOVP m64, r64    [CMOV]
  6552  //
  6553  func (self *Program) CMOVP(v0 interface{}, v1 interface{}) *Instruction {
  6554      p := self.alloc("CMOVP", 2, Operands { v0, v1 })
  6555      // CMOVP r16, r16
  6556      if isReg16(v0) && isReg16(v1) {
  6557          self.require(ISA_CMOV)
  6558          p.domain = DomainGeneric
  6559          p.add(0, func(m *_Encoding, v []interface{}) {
  6560              m.emit(0x66)
  6561              m.rexo(hcode(v[1]), v[0], false)
  6562              m.emit(0x0f)
  6563              m.emit(0x4a)
  6564              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6565          })
  6566      }
  6567      // CMOVP m16, r16
  6568      if isM16(v0) && isReg16(v1) {
  6569          self.require(ISA_CMOV)
  6570          p.domain = DomainGeneric
  6571          p.add(0, func(m *_Encoding, v []interface{}) {
  6572              m.emit(0x66)
  6573              m.rexo(hcode(v[1]), addr(v[0]), false)
  6574              m.emit(0x0f)
  6575              m.emit(0x4a)
  6576              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6577          })
  6578      }
  6579      // CMOVP r32, r32
  6580      if isReg32(v0) && isReg32(v1) {
  6581          self.require(ISA_CMOV)
  6582          p.domain = DomainGeneric
  6583          p.add(0, func(m *_Encoding, v []interface{}) {
  6584              m.rexo(hcode(v[1]), v[0], false)
  6585              m.emit(0x0f)
  6586              m.emit(0x4a)
  6587              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6588          })
  6589      }
  6590      // CMOVP m32, r32
  6591      if isM32(v0) && isReg32(v1) {
  6592          self.require(ISA_CMOV)
  6593          p.domain = DomainGeneric
  6594          p.add(0, func(m *_Encoding, v []interface{}) {
  6595              m.rexo(hcode(v[1]), addr(v[0]), false)
  6596              m.emit(0x0f)
  6597              m.emit(0x4a)
  6598              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6599          })
  6600      }
  6601      // CMOVP r64, r64
  6602      if isReg64(v0) && isReg64(v1) {
  6603          self.require(ISA_CMOV)
  6604          p.domain = DomainGeneric
  6605          p.add(0, func(m *_Encoding, v []interface{}) {
  6606              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  6607              m.emit(0x0f)
  6608              m.emit(0x4a)
  6609              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6610          })
  6611      }
  6612      // CMOVP m64, r64
  6613      if isM64(v0) && isReg64(v1) {
  6614          self.require(ISA_CMOV)
  6615          p.domain = DomainGeneric
  6616          p.add(0, func(m *_Encoding, v []interface{}) {
  6617              m.rexm(1, hcode(v[1]), addr(v[0]))
  6618              m.emit(0x0f)
  6619              m.emit(0x4a)
  6620              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6621          })
  6622      }
  6623      if p.len == 0 {
  6624          panic("invalid operands for CMOVP")
  6625      }
  6626      return p
  6627  }
  6628  
  6629  // CMOVPE performs "Move if parity even (PF == 1)".
  6630  //
  6631  // Mnemonic        : CMOVPE
  6632  // Supported forms : (6 forms)
  6633  //
  6634  //    * CMOVPE r16, r16    [CMOV]
  6635  //    * CMOVPE m16, r16    [CMOV]
  6636  //    * CMOVPE r32, r32    [CMOV]
  6637  //    * CMOVPE m32, r32    [CMOV]
  6638  //    * CMOVPE r64, r64    [CMOV]
  6639  //    * CMOVPE m64, r64    [CMOV]
  6640  //
  6641  func (self *Program) CMOVPE(v0 interface{}, v1 interface{}) *Instruction {
  6642      p := self.alloc("CMOVPE", 2, Operands { v0, v1 })
  6643      // CMOVPE r16, r16
  6644      if isReg16(v0) && isReg16(v1) {
  6645          self.require(ISA_CMOV)
  6646          p.domain = DomainGeneric
  6647          p.add(0, func(m *_Encoding, v []interface{}) {
  6648              m.emit(0x66)
  6649              m.rexo(hcode(v[1]), v[0], false)
  6650              m.emit(0x0f)
  6651              m.emit(0x4a)
  6652              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6653          })
  6654      }
  6655      // CMOVPE m16, r16
  6656      if isM16(v0) && isReg16(v1) {
  6657          self.require(ISA_CMOV)
  6658          p.domain = DomainGeneric
  6659          p.add(0, func(m *_Encoding, v []interface{}) {
  6660              m.emit(0x66)
  6661              m.rexo(hcode(v[1]), addr(v[0]), false)
  6662              m.emit(0x0f)
  6663              m.emit(0x4a)
  6664              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6665          })
  6666      }
  6667      // CMOVPE r32, r32
  6668      if isReg32(v0) && isReg32(v1) {
  6669          self.require(ISA_CMOV)
  6670          p.domain = DomainGeneric
  6671          p.add(0, func(m *_Encoding, v []interface{}) {
  6672              m.rexo(hcode(v[1]), v[0], false)
  6673              m.emit(0x0f)
  6674              m.emit(0x4a)
  6675              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6676          })
  6677      }
  6678      // CMOVPE m32, r32
  6679      if isM32(v0) && isReg32(v1) {
  6680          self.require(ISA_CMOV)
  6681          p.domain = DomainGeneric
  6682          p.add(0, func(m *_Encoding, v []interface{}) {
  6683              m.rexo(hcode(v[1]), addr(v[0]), false)
  6684              m.emit(0x0f)
  6685              m.emit(0x4a)
  6686              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6687          })
  6688      }
  6689      // CMOVPE r64, r64
  6690      if isReg64(v0) && isReg64(v1) {
  6691          self.require(ISA_CMOV)
  6692          p.domain = DomainGeneric
  6693          p.add(0, func(m *_Encoding, v []interface{}) {
  6694              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  6695              m.emit(0x0f)
  6696              m.emit(0x4a)
  6697              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6698          })
  6699      }
  6700      // CMOVPE m64, r64
  6701      if isM64(v0) && isReg64(v1) {
  6702          self.require(ISA_CMOV)
  6703          p.domain = DomainGeneric
  6704          p.add(0, func(m *_Encoding, v []interface{}) {
  6705              m.rexm(1, hcode(v[1]), addr(v[0]))
  6706              m.emit(0x0f)
  6707              m.emit(0x4a)
  6708              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6709          })
  6710      }
  6711      if p.len == 0 {
  6712          panic("invalid operands for CMOVPE")
  6713      }
  6714      return p
  6715  }
  6716  
  6717  // CMOVPO performs "Move if parity odd (PF == 0)".
  6718  //
  6719  // Mnemonic        : CMOVPO
  6720  // Supported forms : (6 forms)
  6721  //
  6722  //    * CMOVPO r16, r16    [CMOV]
  6723  //    * CMOVPO m16, r16    [CMOV]
  6724  //    * CMOVPO r32, r32    [CMOV]
  6725  //    * CMOVPO m32, r32    [CMOV]
  6726  //    * CMOVPO r64, r64    [CMOV]
  6727  //    * CMOVPO m64, r64    [CMOV]
  6728  //
  6729  func (self *Program) CMOVPO(v0 interface{}, v1 interface{}) *Instruction {
  6730      p := self.alloc("CMOVPO", 2, Operands { v0, v1 })
  6731      // CMOVPO r16, r16
  6732      if isReg16(v0) && isReg16(v1) {
  6733          self.require(ISA_CMOV)
  6734          p.domain = DomainGeneric
  6735          p.add(0, func(m *_Encoding, v []interface{}) {
  6736              m.emit(0x66)
  6737              m.rexo(hcode(v[1]), v[0], false)
  6738              m.emit(0x0f)
  6739              m.emit(0x4b)
  6740              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6741          })
  6742      }
  6743      // CMOVPO m16, r16
  6744      if isM16(v0) && isReg16(v1) {
  6745          self.require(ISA_CMOV)
  6746          p.domain = DomainGeneric
  6747          p.add(0, func(m *_Encoding, v []interface{}) {
  6748              m.emit(0x66)
  6749              m.rexo(hcode(v[1]), addr(v[0]), false)
  6750              m.emit(0x0f)
  6751              m.emit(0x4b)
  6752              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6753          })
  6754      }
  6755      // CMOVPO r32, r32
  6756      if isReg32(v0) && isReg32(v1) {
  6757          self.require(ISA_CMOV)
  6758          p.domain = DomainGeneric
  6759          p.add(0, func(m *_Encoding, v []interface{}) {
  6760              m.rexo(hcode(v[1]), v[0], false)
  6761              m.emit(0x0f)
  6762              m.emit(0x4b)
  6763              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6764          })
  6765      }
  6766      // CMOVPO m32, r32
  6767      if isM32(v0) && isReg32(v1) {
  6768          self.require(ISA_CMOV)
  6769          p.domain = DomainGeneric
  6770          p.add(0, func(m *_Encoding, v []interface{}) {
  6771              m.rexo(hcode(v[1]), addr(v[0]), false)
  6772              m.emit(0x0f)
  6773              m.emit(0x4b)
  6774              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6775          })
  6776      }
  6777      // CMOVPO r64, r64
  6778      if isReg64(v0) && isReg64(v1) {
  6779          self.require(ISA_CMOV)
  6780          p.domain = DomainGeneric
  6781          p.add(0, func(m *_Encoding, v []interface{}) {
  6782              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  6783              m.emit(0x0f)
  6784              m.emit(0x4b)
  6785              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6786          })
  6787      }
  6788      // CMOVPO m64, r64
  6789      if isM64(v0) && isReg64(v1) {
  6790          self.require(ISA_CMOV)
  6791          p.domain = DomainGeneric
  6792          p.add(0, func(m *_Encoding, v []interface{}) {
  6793              m.rexm(1, hcode(v[1]), addr(v[0]))
  6794              m.emit(0x0f)
  6795              m.emit(0x4b)
  6796              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6797          })
  6798      }
  6799      if p.len == 0 {
  6800          panic("invalid operands for CMOVPO")
  6801      }
  6802      return p
  6803  }
  6804  
  6805  // CMOVS performs "Move if sign (SF == 1)".
  6806  //
  6807  // Mnemonic        : CMOVS
  6808  // Supported forms : (6 forms)
  6809  //
  6810  //    * CMOVS r16, r16    [CMOV]
  6811  //    * CMOVS m16, r16    [CMOV]
  6812  //    * CMOVS r32, r32    [CMOV]
  6813  //    * CMOVS m32, r32    [CMOV]
  6814  //    * CMOVS r64, r64    [CMOV]
  6815  //    * CMOVS m64, r64    [CMOV]
  6816  //
  6817  func (self *Program) CMOVS(v0 interface{}, v1 interface{}) *Instruction {
  6818      p := self.alloc("CMOVS", 2, Operands { v0, v1 })
  6819      // CMOVS r16, r16
  6820      if isReg16(v0) && isReg16(v1) {
  6821          self.require(ISA_CMOV)
  6822          p.domain = DomainGeneric
  6823          p.add(0, func(m *_Encoding, v []interface{}) {
  6824              m.emit(0x66)
  6825              m.rexo(hcode(v[1]), v[0], false)
  6826              m.emit(0x0f)
  6827              m.emit(0x48)
  6828              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6829          })
  6830      }
  6831      // CMOVS m16, r16
  6832      if isM16(v0) && isReg16(v1) {
  6833          self.require(ISA_CMOV)
  6834          p.domain = DomainGeneric
  6835          p.add(0, func(m *_Encoding, v []interface{}) {
  6836              m.emit(0x66)
  6837              m.rexo(hcode(v[1]), addr(v[0]), false)
  6838              m.emit(0x0f)
  6839              m.emit(0x48)
  6840              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6841          })
  6842      }
  6843      // CMOVS r32, r32
  6844      if isReg32(v0) && isReg32(v1) {
  6845          self.require(ISA_CMOV)
  6846          p.domain = DomainGeneric
  6847          p.add(0, func(m *_Encoding, v []interface{}) {
  6848              m.rexo(hcode(v[1]), v[0], false)
  6849              m.emit(0x0f)
  6850              m.emit(0x48)
  6851              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6852          })
  6853      }
  6854      // CMOVS m32, r32
  6855      if isM32(v0) && isReg32(v1) {
  6856          self.require(ISA_CMOV)
  6857          p.domain = DomainGeneric
  6858          p.add(0, func(m *_Encoding, v []interface{}) {
  6859              m.rexo(hcode(v[1]), addr(v[0]), false)
  6860              m.emit(0x0f)
  6861              m.emit(0x48)
  6862              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6863          })
  6864      }
  6865      // CMOVS r64, r64
  6866      if isReg64(v0) && isReg64(v1) {
  6867          self.require(ISA_CMOV)
  6868          p.domain = DomainGeneric
  6869          p.add(0, func(m *_Encoding, v []interface{}) {
  6870              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  6871              m.emit(0x0f)
  6872              m.emit(0x48)
  6873              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6874          })
  6875      }
  6876      // CMOVS m64, r64
  6877      if isM64(v0) && isReg64(v1) {
  6878          self.require(ISA_CMOV)
  6879          p.domain = DomainGeneric
  6880          p.add(0, func(m *_Encoding, v []interface{}) {
  6881              m.rexm(1, hcode(v[1]), addr(v[0]))
  6882              m.emit(0x0f)
  6883              m.emit(0x48)
  6884              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6885          })
  6886      }
  6887      if p.len == 0 {
  6888          panic("invalid operands for CMOVS")
  6889      }
  6890      return p
  6891  }
  6892  
  6893  // CMOVZ performs "Move if zero (ZF == 1)".
  6894  //
  6895  // Mnemonic        : CMOVZ
  6896  // Supported forms : (6 forms)
  6897  //
  6898  //    * CMOVZ r16, r16    [CMOV]
  6899  //    * CMOVZ m16, r16    [CMOV]
  6900  //    * CMOVZ r32, r32    [CMOV]
  6901  //    * CMOVZ m32, r32    [CMOV]
  6902  //    * CMOVZ r64, r64    [CMOV]
  6903  //    * CMOVZ m64, r64    [CMOV]
  6904  //
  6905  func (self *Program) CMOVZ(v0 interface{}, v1 interface{}) *Instruction {
  6906      p := self.alloc("CMOVZ", 2, Operands { v0, v1 })
  6907      // CMOVZ r16, r16
  6908      if isReg16(v0) && isReg16(v1) {
  6909          self.require(ISA_CMOV)
  6910          p.domain = DomainGeneric
  6911          p.add(0, func(m *_Encoding, v []interface{}) {
  6912              m.emit(0x66)
  6913              m.rexo(hcode(v[1]), v[0], false)
  6914              m.emit(0x0f)
  6915              m.emit(0x44)
  6916              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6917          })
  6918      }
  6919      // CMOVZ m16, r16
  6920      if isM16(v0) && isReg16(v1) {
  6921          self.require(ISA_CMOV)
  6922          p.domain = DomainGeneric
  6923          p.add(0, func(m *_Encoding, v []interface{}) {
  6924              m.emit(0x66)
  6925              m.rexo(hcode(v[1]), addr(v[0]), false)
  6926              m.emit(0x0f)
  6927              m.emit(0x44)
  6928              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6929          })
  6930      }
  6931      // CMOVZ r32, r32
  6932      if isReg32(v0) && isReg32(v1) {
  6933          self.require(ISA_CMOV)
  6934          p.domain = DomainGeneric
  6935          p.add(0, func(m *_Encoding, v []interface{}) {
  6936              m.rexo(hcode(v[1]), v[0], false)
  6937              m.emit(0x0f)
  6938              m.emit(0x44)
  6939              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6940          })
  6941      }
  6942      // CMOVZ m32, r32
  6943      if isM32(v0) && isReg32(v1) {
  6944          self.require(ISA_CMOV)
  6945          p.domain = DomainGeneric
  6946          p.add(0, func(m *_Encoding, v []interface{}) {
  6947              m.rexo(hcode(v[1]), addr(v[0]), false)
  6948              m.emit(0x0f)
  6949              m.emit(0x44)
  6950              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6951          })
  6952      }
  6953      // CMOVZ r64, r64
  6954      if isReg64(v0) && isReg64(v1) {
  6955          self.require(ISA_CMOV)
  6956          p.domain = DomainGeneric
  6957          p.add(0, func(m *_Encoding, v []interface{}) {
  6958              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  6959              m.emit(0x0f)
  6960              m.emit(0x44)
  6961              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  6962          })
  6963      }
  6964      // CMOVZ m64, r64
  6965      if isM64(v0) && isReg64(v1) {
  6966          self.require(ISA_CMOV)
  6967          p.domain = DomainGeneric
  6968          p.add(0, func(m *_Encoding, v []interface{}) {
  6969              m.rexm(1, hcode(v[1]), addr(v[0]))
  6970              m.emit(0x0f)
  6971              m.emit(0x44)
  6972              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  6973          })
  6974      }
  6975      if p.len == 0 {
  6976          panic("invalid operands for CMOVZ")
  6977      }
  6978      return p
  6979  }
  6980  
  6981  // CMPB performs "Compare Two Operands".
  6982  //
  6983  // Mnemonic        : CMP
  6984  // Supported forms : (6 forms)
  6985  //
  6986  //    * CMPB imm8, al
  6987  //    * CMPB imm8, r8
  6988  //    * CMPB r8, r8
  6989  //    * CMPB m8, r8
  6990  //    * CMPB imm8, m8
  6991  //    * CMPB r8, m8
  6992  //
  6993  func (self *Program) CMPB(v0 interface{}, v1 interface{}) *Instruction {
  6994      p := self.alloc("CMPB", 2, Operands { v0, v1 })
  6995      // CMPB imm8, al
  6996      if isImm8(v0) && v1 == AL {
  6997          p.domain = DomainGeneric
  6998          p.add(0, func(m *_Encoding, v []interface{}) {
  6999              m.emit(0x3c)
  7000              m.imm1(toImmAny(v[0]))
  7001          })
  7002      }
  7003      // CMPB imm8, r8
  7004      if isImm8(v0) && isReg8(v1) {
  7005          p.domain = DomainGeneric
  7006          p.add(0, func(m *_Encoding, v []interface{}) {
  7007              m.rexo(0, v[1], isReg8REX(v[1]))
  7008              m.emit(0x80)
  7009              m.emit(0xf8 | lcode(v[1]))
  7010              m.imm1(toImmAny(v[0]))
  7011          })
  7012      }
  7013      // CMPB r8, r8
  7014      if isReg8(v0) && isReg8(v1) {
  7015          p.domain = DomainGeneric
  7016          p.add(0, func(m *_Encoding, v []interface{}) {
  7017              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
  7018              m.emit(0x38)
  7019              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  7020          })
  7021          p.add(0, func(m *_Encoding, v []interface{}) {
  7022              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]) || isReg8REX(v[1]))
  7023              m.emit(0x3a)
  7024              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  7025          })
  7026      }
  7027      // CMPB m8, r8
  7028      if isM8(v0) && isReg8(v1) {
  7029          p.domain = DomainGeneric
  7030          p.add(0, func(m *_Encoding, v []interface{}) {
  7031              m.rexo(hcode(v[1]), addr(v[0]), isReg8REX(v[1]))
  7032              m.emit(0x3a)
  7033              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  7034          })
  7035      }
  7036      // CMPB imm8, m8
  7037      if isImm8(v0) && isM8(v1) {
  7038          p.domain = DomainGeneric
  7039          p.add(0, func(m *_Encoding, v []interface{}) {
  7040              m.rexo(0, addr(v[1]), false)
  7041              m.emit(0x80)
  7042              m.mrsd(7, addr(v[1]), 1)
  7043              m.imm1(toImmAny(v[0]))
  7044          })
  7045      }
  7046      // CMPB r8, m8
  7047      if isReg8(v0) && isM8(v1) {
  7048          p.domain = DomainGeneric
  7049          p.add(0, func(m *_Encoding, v []interface{}) {
  7050              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
  7051              m.emit(0x38)
  7052              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  7053          })
  7054      }
  7055      if p.len == 0 {
  7056          panic("invalid operands for CMPB")
  7057      }
  7058      return p
  7059  }
  7060  
  7061  // CMPL performs "Compare Two Operands".
  7062  //
  7063  // Mnemonic        : CMP
  7064  // Supported forms : (8 forms)
  7065  //
  7066  //    * CMPL imm32, eax
  7067  //    * CMPL imm8, r32
  7068  //    * CMPL imm32, r32
  7069  //    * CMPL r32, r32
  7070  //    * CMPL m32, r32
  7071  //    * CMPL imm8, m32
  7072  //    * CMPL imm32, m32
  7073  //    * CMPL r32, m32
  7074  //
  7075  func (self *Program) CMPL(v0 interface{}, v1 interface{}) *Instruction {
  7076      p := self.alloc("CMPL", 2, Operands { v0, v1 })
  7077      // CMPL imm32, eax
  7078      if isImm32(v0) && v1 == EAX {
  7079          p.domain = DomainGeneric
  7080          p.add(0, func(m *_Encoding, v []interface{}) {
  7081              m.emit(0x3d)
  7082              m.imm4(toImmAny(v[0]))
  7083          })
  7084      }
  7085      // CMPL imm8, r32
  7086      if isImm8Ext(v0, 4) && isReg32(v1) {
  7087          p.domain = DomainGeneric
  7088          p.add(0, func(m *_Encoding, v []interface{}) {
  7089              m.rexo(0, v[1], false)
  7090              m.emit(0x83)
  7091              m.emit(0xf8 | lcode(v[1]))
  7092              m.imm1(toImmAny(v[0]))
  7093          })
  7094      }
  7095      // CMPL imm32, r32
  7096      if isImm32(v0) && isReg32(v1) {
  7097          p.domain = DomainGeneric
  7098          p.add(0, func(m *_Encoding, v []interface{}) {
  7099              m.rexo(0, v[1], false)
  7100              m.emit(0x81)
  7101              m.emit(0xf8 | lcode(v[1]))
  7102              m.imm4(toImmAny(v[0]))
  7103          })
  7104      }
  7105      // CMPL r32, r32
  7106      if isReg32(v0) && isReg32(v1) {
  7107          p.domain = DomainGeneric
  7108          p.add(0, func(m *_Encoding, v []interface{}) {
  7109              m.rexo(hcode(v[0]), v[1], false)
  7110              m.emit(0x39)
  7111              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  7112          })
  7113          p.add(0, func(m *_Encoding, v []interface{}) {
  7114              m.rexo(hcode(v[1]), v[0], false)
  7115              m.emit(0x3b)
  7116              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  7117          })
  7118      }
  7119      // CMPL m32, r32
  7120      if isM32(v0) && isReg32(v1) {
  7121          p.domain = DomainGeneric
  7122          p.add(0, func(m *_Encoding, v []interface{}) {
  7123              m.rexo(hcode(v[1]), addr(v[0]), false)
  7124              m.emit(0x3b)
  7125              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  7126          })
  7127      }
  7128      // CMPL imm8, m32
  7129      if isImm8Ext(v0, 4) && isM32(v1) {
  7130          p.domain = DomainGeneric
  7131          p.add(0, func(m *_Encoding, v []interface{}) {
  7132              m.rexo(0, addr(v[1]), false)
  7133              m.emit(0x83)
  7134              m.mrsd(7, addr(v[1]), 1)
  7135              m.imm1(toImmAny(v[0]))
  7136          })
  7137      }
  7138      // CMPL imm32, m32
  7139      if isImm32(v0) && isM32(v1) {
  7140          p.domain = DomainGeneric
  7141          p.add(0, func(m *_Encoding, v []interface{}) {
  7142              m.rexo(0, addr(v[1]), false)
  7143              m.emit(0x81)
  7144              m.mrsd(7, addr(v[1]), 1)
  7145              m.imm4(toImmAny(v[0]))
  7146          })
  7147      }
  7148      // CMPL r32, m32
  7149      if isReg32(v0) && isM32(v1) {
  7150          p.domain = DomainGeneric
  7151          p.add(0, func(m *_Encoding, v []interface{}) {
  7152              m.rexo(hcode(v[0]), addr(v[1]), false)
  7153              m.emit(0x39)
  7154              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  7155          })
  7156      }
  7157      if p.len == 0 {
  7158          panic("invalid operands for CMPL")
  7159      }
  7160      return p
  7161  }
  7162  
  7163  // CMPPD performs "Compare Packed Double-Precision Floating-Point Values".
  7164  //
  7165  // Mnemonic        : CMPPD
  7166  // Supported forms : (2 forms)
  7167  //
  7168  //    * CMPPD imm8, xmm, xmm     [SSE2]
  7169  //    * CMPPD imm8, m128, xmm    [SSE2]
  7170  //
  7171  func (self *Program) CMPPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  7172      p := self.alloc("CMPPD", 3, Operands { v0, v1, v2 })
  7173      // CMPPD imm8, xmm, xmm
  7174      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
  7175          self.require(ISA_SSE2)
  7176          p.domain = DomainMMXSSE
  7177          p.add(0, func(m *_Encoding, v []interface{}) {
  7178              m.emit(0x66)
  7179              m.rexo(hcode(v[2]), v[1], false)
  7180              m.emit(0x0f)
  7181              m.emit(0xc2)
  7182              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  7183              m.imm1(toImmAny(v[0]))
  7184          })
  7185      }
  7186      // CMPPD imm8, m128, xmm
  7187      if isImm8(v0) && isM128(v1) && isXMM(v2) {
  7188          self.require(ISA_SSE2)
  7189          p.domain = DomainMMXSSE
  7190          p.add(0, func(m *_Encoding, v []interface{}) {
  7191              m.emit(0x66)
  7192              m.rexo(hcode(v[2]), addr(v[1]), false)
  7193              m.emit(0x0f)
  7194              m.emit(0xc2)
  7195              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  7196              m.imm1(toImmAny(v[0]))
  7197          })
  7198      }
  7199      if p.len == 0 {
  7200          panic("invalid operands for CMPPD")
  7201      }
  7202      return p
  7203  }
  7204  
  7205  // CMPPS performs "Compare Packed Single-Precision Floating-Point Values".
  7206  //
  7207  // Mnemonic        : CMPPS
  7208  // Supported forms : (2 forms)
  7209  //
  7210  //    * CMPPS imm8, xmm, xmm     [SSE]
  7211  //    * CMPPS imm8, m128, xmm    [SSE]
  7212  //
  7213  func (self *Program) CMPPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  7214      p := self.alloc("CMPPS", 3, Operands { v0, v1, v2 })
  7215      // CMPPS imm8, xmm, xmm
  7216      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
  7217          self.require(ISA_SSE)
  7218          p.domain = DomainMMXSSE
  7219          p.add(0, func(m *_Encoding, v []interface{}) {
  7220              m.rexo(hcode(v[2]), v[1], false)
  7221              m.emit(0x0f)
  7222              m.emit(0xc2)
  7223              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  7224              m.imm1(toImmAny(v[0]))
  7225          })
  7226      }
  7227      // CMPPS imm8, m128, xmm
  7228      if isImm8(v0) && isM128(v1) && isXMM(v2) {
  7229          self.require(ISA_SSE)
  7230          p.domain = DomainMMXSSE
  7231          p.add(0, func(m *_Encoding, v []interface{}) {
  7232              m.rexo(hcode(v[2]), addr(v[1]), false)
  7233              m.emit(0x0f)
  7234              m.emit(0xc2)
  7235              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  7236              m.imm1(toImmAny(v[0]))
  7237          })
  7238      }
  7239      if p.len == 0 {
  7240          panic("invalid operands for CMPPS")
  7241      }
  7242      return p
  7243  }
  7244  
  7245  // CMPQ performs "Compare Two Operands".
  7246  //
  7247  // Mnemonic        : CMP
  7248  // Supported forms : (8 forms)
  7249  //
  7250  //    * CMPQ imm32, rax
  7251  //    * CMPQ imm8, r64
  7252  //    * CMPQ imm32, r64
  7253  //    * CMPQ r64, r64
  7254  //    * CMPQ m64, r64
  7255  //    * CMPQ imm8, m64
  7256  //    * CMPQ imm32, m64
  7257  //    * CMPQ r64, m64
  7258  //
  7259  func (self *Program) CMPQ(v0 interface{}, v1 interface{}) *Instruction {
  7260      p := self.alloc("CMPQ", 2, Operands { v0, v1 })
  7261      // CMPQ imm32, rax
  7262      if isImm32(v0) && v1 == RAX {
  7263          p.domain = DomainGeneric
  7264          p.add(0, func(m *_Encoding, v []interface{}) {
  7265              m.emit(0x48)
  7266              m.emit(0x3d)
  7267              m.imm4(toImmAny(v[0]))
  7268          })
  7269      }
  7270      // CMPQ imm8, r64
  7271      if isImm8Ext(v0, 8) && isReg64(v1) {
  7272          p.domain = DomainGeneric
  7273          p.add(0, func(m *_Encoding, v []interface{}) {
  7274              m.emit(0x48 | hcode(v[1]))
  7275              m.emit(0x83)
  7276              m.emit(0xf8 | lcode(v[1]))
  7277              m.imm1(toImmAny(v[0]))
  7278          })
  7279      }
  7280      // CMPQ imm32, r64
  7281      if isImm32Ext(v0, 8) && isReg64(v1) {
  7282          p.domain = DomainGeneric
  7283          p.add(0, func(m *_Encoding, v []interface{}) {
  7284              m.emit(0x48 | hcode(v[1]))
  7285              m.emit(0x81)
  7286              m.emit(0xf8 | lcode(v[1]))
  7287              m.imm4(toImmAny(v[0]))
  7288          })
  7289      }
  7290      // CMPQ r64, r64
  7291      if isReg64(v0) && isReg64(v1) {
  7292          p.domain = DomainGeneric
  7293          p.add(0, func(m *_Encoding, v []interface{}) {
  7294              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
  7295              m.emit(0x39)
  7296              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  7297          })
  7298          p.add(0, func(m *_Encoding, v []interface{}) {
  7299              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  7300              m.emit(0x3b)
  7301              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  7302          })
  7303      }
  7304      // CMPQ m64, r64
  7305      if isM64(v0) && isReg64(v1) {
  7306          p.domain = DomainGeneric
  7307          p.add(0, func(m *_Encoding, v []interface{}) {
  7308              m.rexm(1, hcode(v[1]), addr(v[0]))
  7309              m.emit(0x3b)
  7310              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  7311          })
  7312      }
  7313      // CMPQ imm8, m64
  7314      if isImm8Ext(v0, 8) && isM64(v1) {
  7315          p.domain = DomainGeneric
  7316          p.add(0, func(m *_Encoding, v []interface{}) {
  7317              m.rexm(1, 0, addr(v[1]))
  7318              m.emit(0x83)
  7319              m.mrsd(7, addr(v[1]), 1)
  7320              m.imm1(toImmAny(v[0]))
  7321          })
  7322      }
  7323      // CMPQ imm32, m64
  7324      if isImm32Ext(v0, 8) && isM64(v1) {
  7325          p.domain = DomainGeneric
  7326          p.add(0, func(m *_Encoding, v []interface{}) {
  7327              m.rexm(1, 0, addr(v[1]))
  7328              m.emit(0x81)
  7329              m.mrsd(7, addr(v[1]), 1)
  7330              m.imm4(toImmAny(v[0]))
  7331          })
  7332      }
  7333      // CMPQ r64, m64
  7334      if isReg64(v0) && isM64(v1) {
  7335          p.domain = DomainGeneric
  7336          p.add(0, func(m *_Encoding, v []interface{}) {
  7337              m.rexm(1, hcode(v[0]), addr(v[1]))
  7338              m.emit(0x39)
  7339              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  7340          })
  7341      }
  7342      if p.len == 0 {
  7343          panic("invalid operands for CMPQ")
  7344      }
  7345      return p
  7346  }
  7347  
  7348  // CMPSD performs "Compare Scalar Double-Precision Floating-Point Values".
  7349  //
  7350  // Mnemonic        : CMPSD
  7351  // Supported forms : (2 forms)
  7352  //
  7353  //    * CMPSD imm8, xmm, xmm    [SSE2]
  7354  //    * CMPSD imm8, m64, xmm    [SSE2]
  7355  //
  7356  func (self *Program) CMPSD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  7357      p := self.alloc("CMPSD", 3, Operands { v0, v1, v2 })
  7358      // CMPSD imm8, xmm, xmm
  7359      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
  7360          self.require(ISA_SSE2)
  7361          p.domain = DomainMMXSSE
  7362          p.add(0, func(m *_Encoding, v []interface{}) {
  7363              m.emit(0xf2)
  7364              m.rexo(hcode(v[2]), v[1], false)
  7365              m.emit(0x0f)
  7366              m.emit(0xc2)
  7367              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  7368              m.imm1(toImmAny(v[0]))
  7369          })
  7370      }
  7371      // CMPSD imm8, m64, xmm
  7372      if isImm8(v0) && isM64(v1) && isXMM(v2) {
  7373          self.require(ISA_SSE2)
  7374          p.domain = DomainMMXSSE
  7375          p.add(0, func(m *_Encoding, v []interface{}) {
  7376              m.emit(0xf2)
  7377              m.rexo(hcode(v[2]), addr(v[1]), false)
  7378              m.emit(0x0f)
  7379              m.emit(0xc2)
  7380              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  7381              m.imm1(toImmAny(v[0]))
  7382          })
  7383      }
  7384      if p.len == 0 {
  7385          panic("invalid operands for CMPSD")
  7386      }
  7387      return p
  7388  }
  7389  
  7390  // CMPSS performs "Compare Scalar Single-Precision Floating-Point Values".
  7391  //
  7392  // Mnemonic        : CMPSS
  7393  // Supported forms : (2 forms)
  7394  //
  7395  //    * CMPSS imm8, xmm, xmm    [SSE]
  7396  //    * CMPSS imm8, m32, xmm    [SSE]
  7397  //
  7398  func (self *Program) CMPSS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  7399      p := self.alloc("CMPSS", 3, Operands { v0, v1, v2 })
  7400      // CMPSS imm8, xmm, xmm
  7401      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
  7402          self.require(ISA_SSE)
  7403          p.domain = DomainMMXSSE
  7404          p.add(0, func(m *_Encoding, v []interface{}) {
  7405              m.emit(0xf3)
  7406              m.rexo(hcode(v[2]), v[1], false)
  7407              m.emit(0x0f)
  7408              m.emit(0xc2)
  7409              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  7410              m.imm1(toImmAny(v[0]))
  7411          })
  7412      }
  7413      // CMPSS imm8, m32, xmm
  7414      if isImm8(v0) && isM32(v1) && isXMM(v2) {
  7415          self.require(ISA_SSE)
  7416          p.domain = DomainMMXSSE
  7417          p.add(0, func(m *_Encoding, v []interface{}) {
  7418              m.emit(0xf3)
  7419              m.rexo(hcode(v[2]), addr(v[1]), false)
  7420              m.emit(0x0f)
  7421              m.emit(0xc2)
  7422              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  7423              m.imm1(toImmAny(v[0]))
  7424          })
  7425      }
  7426      if p.len == 0 {
  7427          panic("invalid operands for CMPSS")
  7428      }
  7429      return p
  7430  }
  7431  
  7432  // CMPW performs "Compare Two Operands".
  7433  //
  7434  // Mnemonic        : CMP
  7435  // Supported forms : (8 forms)
  7436  //
  7437  //    * CMPW imm16, ax
  7438  //    * CMPW imm8, r16
  7439  //    * CMPW imm16, r16
  7440  //    * CMPW r16, r16
  7441  //    * CMPW m16, r16
  7442  //    * CMPW imm8, m16
  7443  //    * CMPW imm16, m16
  7444  //    * CMPW r16, m16
  7445  //
  7446  func (self *Program) CMPW(v0 interface{}, v1 interface{}) *Instruction {
  7447      p := self.alloc("CMPW", 2, Operands { v0, v1 })
  7448      // CMPW imm16, ax
  7449      if isImm16(v0) && v1 == AX {
  7450          p.domain = DomainGeneric
  7451          p.add(0, func(m *_Encoding, v []interface{}) {
  7452              m.emit(0x66)
  7453              m.emit(0x3d)
  7454              m.imm2(toImmAny(v[0]))
  7455          })
  7456      }
  7457      // CMPW imm8, r16
  7458      if isImm8Ext(v0, 2) && isReg16(v1) {
  7459          p.domain = DomainGeneric
  7460          p.add(0, func(m *_Encoding, v []interface{}) {
  7461              m.emit(0x66)
  7462              m.rexo(0, v[1], false)
  7463              m.emit(0x83)
  7464              m.emit(0xf8 | lcode(v[1]))
  7465              m.imm1(toImmAny(v[0]))
  7466          })
  7467      }
  7468      // CMPW imm16, r16
  7469      if isImm16(v0) && isReg16(v1) {
  7470          p.domain = DomainGeneric
  7471          p.add(0, func(m *_Encoding, v []interface{}) {
  7472              m.emit(0x66)
  7473              m.rexo(0, v[1], false)
  7474              m.emit(0x81)
  7475              m.emit(0xf8 | lcode(v[1]))
  7476              m.imm2(toImmAny(v[0]))
  7477          })
  7478      }
  7479      // CMPW r16, r16
  7480      if isReg16(v0) && isReg16(v1) {
  7481          p.domain = DomainGeneric
  7482          p.add(0, func(m *_Encoding, v []interface{}) {
  7483              m.emit(0x66)
  7484              m.rexo(hcode(v[0]), v[1], false)
  7485              m.emit(0x39)
  7486              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  7487          })
  7488          p.add(0, func(m *_Encoding, v []interface{}) {
  7489              m.emit(0x66)
  7490              m.rexo(hcode(v[1]), v[0], false)
  7491              m.emit(0x3b)
  7492              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  7493          })
  7494      }
  7495      // CMPW m16, r16
  7496      if isM16(v0) && isReg16(v1) {
  7497          p.domain = DomainGeneric
  7498          p.add(0, func(m *_Encoding, v []interface{}) {
  7499              m.emit(0x66)
  7500              m.rexo(hcode(v[1]), addr(v[0]), false)
  7501              m.emit(0x3b)
  7502              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  7503          })
  7504      }
  7505      // CMPW imm8, m16
  7506      if isImm8Ext(v0, 2) && isM16(v1) {
  7507          p.domain = DomainGeneric
  7508          p.add(0, func(m *_Encoding, v []interface{}) {
  7509              m.emit(0x66)
  7510              m.rexo(0, addr(v[1]), false)
  7511              m.emit(0x83)
  7512              m.mrsd(7, addr(v[1]), 1)
  7513              m.imm1(toImmAny(v[0]))
  7514          })
  7515      }
  7516      // CMPW imm16, m16
  7517      if isImm16(v0) && isM16(v1) {
  7518          p.domain = DomainGeneric
  7519          p.add(0, func(m *_Encoding, v []interface{}) {
  7520              m.emit(0x66)
  7521              m.rexo(0, addr(v[1]), false)
  7522              m.emit(0x81)
  7523              m.mrsd(7, addr(v[1]), 1)
  7524              m.imm2(toImmAny(v[0]))
  7525          })
  7526      }
  7527      // CMPW r16, m16
  7528      if isReg16(v0) && isM16(v1) {
  7529          p.domain = DomainGeneric
  7530          p.add(0, func(m *_Encoding, v []interface{}) {
  7531              m.emit(0x66)
  7532              m.rexo(hcode(v[0]), addr(v[1]), false)
  7533              m.emit(0x39)
  7534              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  7535          })
  7536      }
  7537      if p.len == 0 {
  7538          panic("invalid operands for CMPW")
  7539      }
  7540      return p
  7541  }
  7542  
  7543  // CMPXCHG16B performs "Compare and Exchange 16 Bytes".
  7544  //
  7545  // Mnemonic        : CMPXCHG16B
  7546  // Supported forms : (1 form)
  7547  //
  7548  //    * CMPXCHG16B m128
  7549  //
  7550  func (self *Program) CMPXCHG16B(v0 interface{}) *Instruction {
  7551      p := self.alloc("CMPXCHG16B", 1, Operands { v0 })
  7552      // CMPXCHG16B m128
  7553      if isM128(v0) {
  7554          p.domain = DomainGeneric
  7555          p.add(0, func(m *_Encoding, v []interface{}) {
  7556              m.rexm(1, 0, addr(v[0]))
  7557              m.emit(0x0f)
  7558              m.emit(0xc7)
  7559              m.mrsd(1, addr(v[0]), 1)
  7560          })
  7561      }
  7562      if p.len == 0 {
  7563          panic("invalid operands for CMPXCHG16B")
  7564      }
  7565      return p
  7566  }
  7567  
  7568  // CMPXCHG8B performs "Compare and Exchange 8 Bytes".
  7569  //
  7570  // Mnemonic        : CMPXCHG8B
  7571  // Supported forms : (1 form)
  7572  //
  7573  //    * CMPXCHG8B m64
  7574  //
  7575  func (self *Program) CMPXCHG8B(v0 interface{}) *Instruction {
  7576      p := self.alloc("CMPXCHG8B", 1, Operands { v0 })
  7577      // CMPXCHG8B m64
  7578      if isM64(v0) {
  7579          p.domain = DomainGeneric
  7580          p.add(0, func(m *_Encoding, v []interface{}) {
  7581              m.rexo(0, addr(v[0]), false)
  7582              m.emit(0x0f)
  7583              m.emit(0xc7)
  7584              m.mrsd(1, addr(v[0]), 1)
  7585          })
  7586      }
  7587      if p.len == 0 {
  7588          panic("invalid operands for CMPXCHG8B")
  7589      }
  7590      return p
  7591  }
  7592  
  7593  // CMPXCHGB performs "Compare and Exchange".
  7594  //
  7595  // Mnemonic        : CMPXCHG
  7596  // Supported forms : (2 forms)
  7597  //
  7598  //    * CMPXCHGB r8, r8
  7599  //    * CMPXCHGB r8, m8
  7600  //
  7601  func (self *Program) CMPXCHGB(v0 interface{}, v1 interface{}) *Instruction {
  7602      p := self.alloc("CMPXCHGB", 2, Operands { v0, v1 })
  7603      // CMPXCHGB r8, r8
  7604      if isReg8(v0) && isReg8(v1) {
  7605          p.domain = DomainGeneric
  7606          p.add(0, func(m *_Encoding, v []interface{}) {
  7607              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
  7608              m.emit(0x0f)
  7609              m.emit(0xb0)
  7610              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  7611          })
  7612      }
  7613      // CMPXCHGB r8, m8
  7614      if isReg8(v0) && isM8(v1) {
  7615          p.domain = DomainGeneric
  7616          p.add(0, func(m *_Encoding, v []interface{}) {
  7617              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
  7618              m.emit(0x0f)
  7619              m.emit(0xb0)
  7620              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  7621          })
  7622      }
  7623      if p.len == 0 {
  7624          panic("invalid operands for CMPXCHGB")
  7625      }
  7626      return p
  7627  }
  7628  
  7629  // CMPXCHGL performs "Compare and Exchange".
  7630  //
  7631  // Mnemonic        : CMPXCHG
  7632  // Supported forms : (2 forms)
  7633  //
  7634  //    * CMPXCHGL r32, r32
  7635  //    * CMPXCHGL r32, m32
  7636  //
  7637  func (self *Program) CMPXCHGL(v0 interface{}, v1 interface{}) *Instruction {
  7638      p := self.alloc("CMPXCHGL", 2, Operands { v0, v1 })
  7639      // CMPXCHGL r32, r32
  7640      if isReg32(v0) && isReg32(v1) {
  7641          p.domain = DomainGeneric
  7642          p.add(0, func(m *_Encoding, v []interface{}) {
  7643              m.rexo(hcode(v[0]), v[1], false)
  7644              m.emit(0x0f)
  7645              m.emit(0xb1)
  7646              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  7647          })
  7648      }
  7649      // CMPXCHGL r32, m32
  7650      if isReg32(v0) && isM32(v1) {
  7651          p.domain = DomainGeneric
  7652          p.add(0, func(m *_Encoding, v []interface{}) {
  7653              m.rexo(hcode(v[0]), addr(v[1]), false)
  7654              m.emit(0x0f)
  7655              m.emit(0xb1)
  7656              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  7657          })
  7658      }
  7659      if p.len == 0 {
  7660          panic("invalid operands for CMPXCHGL")
  7661      }
  7662      return p
  7663  }
  7664  
  7665  // CMPXCHGQ performs "Compare and Exchange".
  7666  //
  7667  // Mnemonic        : CMPXCHG
  7668  // Supported forms : (2 forms)
  7669  //
  7670  //    * CMPXCHGQ r64, r64
  7671  //    * CMPXCHGQ r64, m64
  7672  //
  7673  func (self *Program) CMPXCHGQ(v0 interface{}, v1 interface{}) *Instruction {
  7674      p := self.alloc("CMPXCHGQ", 2, Operands { v0, v1 })
  7675      // CMPXCHGQ r64, r64
  7676      if isReg64(v0) && isReg64(v1) {
  7677          p.domain = DomainGeneric
  7678          p.add(0, func(m *_Encoding, v []interface{}) {
  7679              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
  7680              m.emit(0x0f)
  7681              m.emit(0xb1)
  7682              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  7683          })
  7684      }
  7685      // CMPXCHGQ r64, m64
  7686      if isReg64(v0) && isM64(v1) {
  7687          p.domain = DomainGeneric
  7688          p.add(0, func(m *_Encoding, v []interface{}) {
  7689              m.rexm(1, hcode(v[0]), addr(v[1]))
  7690              m.emit(0x0f)
  7691              m.emit(0xb1)
  7692              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  7693          })
  7694      }
  7695      if p.len == 0 {
  7696          panic("invalid operands for CMPXCHGQ")
  7697      }
  7698      return p
  7699  }
  7700  
  7701  // CMPXCHGW performs "Compare and Exchange".
  7702  //
  7703  // Mnemonic        : CMPXCHG
  7704  // Supported forms : (2 forms)
  7705  //
  7706  //    * CMPXCHGW r16, r16
  7707  //    * CMPXCHGW r16, m16
  7708  //
  7709  func (self *Program) CMPXCHGW(v0 interface{}, v1 interface{}) *Instruction {
  7710      p := self.alloc("CMPXCHGW", 2, Operands { v0, v1 })
  7711      // CMPXCHGW r16, r16
  7712      if isReg16(v0) && isReg16(v1) {
  7713          p.domain = DomainGeneric
  7714          p.add(0, func(m *_Encoding, v []interface{}) {
  7715              m.emit(0x66)
  7716              m.rexo(hcode(v[0]), v[1], false)
  7717              m.emit(0x0f)
  7718              m.emit(0xb1)
  7719              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
  7720          })
  7721      }
  7722      // CMPXCHGW r16, m16
  7723      if isReg16(v0) && isM16(v1) {
  7724          p.domain = DomainGeneric
  7725          p.add(0, func(m *_Encoding, v []interface{}) {
  7726              m.emit(0x66)
  7727              m.rexo(hcode(v[0]), addr(v[1]), false)
  7728              m.emit(0x0f)
  7729              m.emit(0xb1)
  7730              m.mrsd(lcode(v[0]), addr(v[1]), 1)
  7731          })
  7732      }
  7733      if p.len == 0 {
  7734          panic("invalid operands for CMPXCHGW")
  7735      }
  7736      return p
  7737  }
  7738  
  7739  // COMISD performs "Compare Scalar Ordered Double-Precision Floating-Point Values and Set EFLAGS".
  7740  //
  7741  // Mnemonic        : COMISD
  7742  // Supported forms : (2 forms)
  7743  //
  7744  //    * COMISD xmm, xmm    [SSE2]
  7745  //    * COMISD m64, xmm    [SSE2]
  7746  //
  7747  func (self *Program) COMISD(v0 interface{}, v1 interface{}) *Instruction {
  7748      p := self.alloc("COMISD", 2, Operands { v0, v1 })
  7749      // COMISD xmm, xmm
  7750      if isXMM(v0) && isXMM(v1) {
  7751          self.require(ISA_SSE2)
  7752          p.domain = DomainMMXSSE
  7753          p.add(0, func(m *_Encoding, v []interface{}) {
  7754              m.emit(0x66)
  7755              m.rexo(hcode(v[1]), v[0], false)
  7756              m.emit(0x0f)
  7757              m.emit(0x2f)
  7758              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  7759          })
  7760      }
  7761      // COMISD m64, xmm
  7762      if isM64(v0) && isXMM(v1) {
  7763          self.require(ISA_SSE2)
  7764          p.domain = DomainMMXSSE
  7765          p.add(0, func(m *_Encoding, v []interface{}) {
  7766              m.emit(0x66)
  7767              m.rexo(hcode(v[1]), addr(v[0]), false)
  7768              m.emit(0x0f)
  7769              m.emit(0x2f)
  7770              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  7771          })
  7772      }
  7773      if p.len == 0 {
  7774          panic("invalid operands for COMISD")
  7775      }
  7776      return p
  7777  }
  7778  
  7779  // COMISS performs "Compare Scalar Ordered Single-Precision Floating-Point Values and Set EFLAGS".
  7780  //
  7781  // Mnemonic        : COMISS
  7782  // Supported forms : (2 forms)
  7783  //
  7784  //    * COMISS xmm, xmm    [SSE]
  7785  //    * COMISS m32, xmm    [SSE]
  7786  //
  7787  func (self *Program) COMISS(v0 interface{}, v1 interface{}) *Instruction {
  7788      p := self.alloc("COMISS", 2, Operands { v0, v1 })
  7789      // COMISS xmm, xmm
  7790      if isXMM(v0) && isXMM(v1) {
  7791          self.require(ISA_SSE)
  7792          p.domain = DomainMMXSSE
  7793          p.add(0, func(m *_Encoding, v []interface{}) {
  7794              m.rexo(hcode(v[1]), v[0], false)
  7795              m.emit(0x0f)
  7796              m.emit(0x2f)
  7797              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  7798          })
  7799      }
  7800      // COMISS m32, xmm
  7801      if isM32(v0) && isXMM(v1) {
  7802          self.require(ISA_SSE)
  7803          p.domain = DomainMMXSSE
  7804          p.add(0, func(m *_Encoding, v []interface{}) {
  7805              m.rexo(hcode(v[1]), addr(v[0]), false)
  7806              m.emit(0x0f)
  7807              m.emit(0x2f)
  7808              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  7809          })
  7810      }
  7811      if p.len == 0 {
  7812          panic("invalid operands for COMISS")
  7813      }
  7814      return p
  7815  }
  7816  
  7817  // CPUID performs "CPU Identification".
  7818  //
  7819  // Mnemonic        : CPUID
  7820  // Supported forms : (1 form)
  7821  //
  7822  //    * CPUID    [CPUID]
  7823  //
  7824  func (self *Program) CPUID() *Instruction {
  7825      p := self.alloc("CPUID", 0, Operands {  })
  7826      // CPUID
  7827      self.require(ISA_CPUID)
  7828      p.domain = DomainGeneric
  7829      p.add(0, func(m *_Encoding, v []interface{}) {
  7830          m.emit(0x0f)
  7831          m.emit(0xa2)
  7832      })
  7833      return p
  7834  }
  7835  
  7836  // CQTO performs "Convert Quadword to Octaword".
  7837  //
  7838  // Mnemonic        : CQO
  7839  // Supported forms : (1 form)
  7840  //
  7841  //    * CQTO
  7842  //
  7843  func (self *Program) CQTO() *Instruction {
  7844      p := self.alloc("CQTO", 0, Operands {  })
  7845      // CQTO
  7846      p.domain = DomainGeneric
  7847      p.add(0, func(m *_Encoding, v []interface{}) {
  7848          m.emit(0x48)
  7849          m.emit(0x99)
  7850      })
  7851      return p
  7852  }
  7853  
  7854  // CRC32B performs "Accumulate CRC32 Value".
  7855  //
  7856  // Mnemonic        : CRC32
  7857  // Supported forms : (4 forms)
  7858  //
  7859  //    * CRC32B r8, r32    [SSE4.2]
  7860  //    * CRC32B m8, r32    [SSE4.2]
  7861  //    * CRC32B r8, r64    [SSE4.2]
  7862  //    * CRC32B m8, r64    [SSE4.2]
  7863  //
  7864  func (self *Program) CRC32B(v0 interface{}, v1 interface{}) *Instruction {
  7865      p := self.alloc("CRC32B", 2, Operands { v0, v1 })
  7866      // CRC32B r8, r32
  7867      if isReg8(v0) && isReg32(v1) {
  7868          self.require(ISA_SSE4_2)
  7869          p.domain = DomainGeneric
  7870          p.add(0, func(m *_Encoding, v []interface{}) {
  7871              m.emit(0xf2)
  7872              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]))
  7873              m.emit(0x0f)
  7874              m.emit(0x38)
  7875              m.emit(0xf0)
  7876              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  7877          })
  7878      }
  7879      // CRC32B m8, r32
  7880      if isM8(v0) && isReg32(v1) {
  7881          self.require(ISA_SSE4_2)
  7882          p.domain = DomainGeneric
  7883          p.add(0, func(m *_Encoding, v []interface{}) {
  7884              m.emit(0xf2)
  7885              m.rexo(hcode(v[1]), addr(v[0]), false)
  7886              m.emit(0x0f)
  7887              m.emit(0x38)
  7888              m.emit(0xf0)
  7889              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  7890          })
  7891      }
  7892      // CRC32B r8, r64
  7893      if isReg8(v0) && isReg64(v1) {
  7894          self.require(ISA_SSE4_2)
  7895          p.domain = DomainGeneric
  7896          p.add(0, func(m *_Encoding, v []interface{}) {
  7897              m.emit(0xf2)
  7898              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  7899              m.emit(0x0f)
  7900              m.emit(0x38)
  7901              m.emit(0xf0)
  7902              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  7903          })
  7904      }
  7905      // CRC32B m8, r64
  7906      if isM8(v0) && isReg64(v1) {
  7907          self.require(ISA_SSE4_2)
  7908          p.domain = DomainGeneric
  7909          p.add(0, func(m *_Encoding, v []interface{}) {
  7910              m.emit(0xf2)
  7911              m.rexm(1, hcode(v[1]), addr(v[0]))
  7912              m.emit(0x0f)
  7913              m.emit(0x38)
  7914              m.emit(0xf0)
  7915              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  7916          })
  7917      }
  7918      if p.len == 0 {
  7919          panic("invalid operands for CRC32B")
  7920      }
  7921      return p
  7922  }
  7923  
  7924  // CRC32L performs "Accumulate CRC32 Value".
  7925  //
  7926  // Mnemonic        : CRC32
  7927  // Supported forms : (2 forms)
  7928  //
  7929  //    * CRC32L r32, r32    [SSE4.2]
  7930  //    * CRC32L m32, r32    [SSE4.2]
  7931  //
  7932  func (self *Program) CRC32L(v0 interface{}, v1 interface{}) *Instruction {
  7933      p := self.alloc("CRC32L", 2, Operands { v0, v1 })
  7934      // CRC32L r32, r32
  7935      if isReg32(v0) && isReg32(v1) {
  7936          self.require(ISA_SSE4_2)
  7937          p.domain = DomainGeneric
  7938          p.add(0, func(m *_Encoding, v []interface{}) {
  7939              m.emit(0xf2)
  7940              m.rexo(hcode(v[1]), v[0], false)
  7941              m.emit(0x0f)
  7942              m.emit(0x38)
  7943              m.emit(0xf1)
  7944              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  7945          })
  7946      }
  7947      // CRC32L m32, r32
  7948      if isM32(v0) && isReg32(v1) {
  7949          self.require(ISA_SSE4_2)
  7950          p.domain = DomainGeneric
  7951          p.add(0, func(m *_Encoding, v []interface{}) {
  7952              m.emit(0xf2)
  7953              m.rexo(hcode(v[1]), addr(v[0]), false)
  7954              m.emit(0x0f)
  7955              m.emit(0x38)
  7956              m.emit(0xf1)
  7957              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  7958          })
  7959      }
  7960      if p.len == 0 {
  7961          panic("invalid operands for CRC32L")
  7962      }
  7963      return p
  7964  }
  7965  
  7966  // CRC32Q performs "Accumulate CRC32 Value".
  7967  //
  7968  // Mnemonic        : CRC32
  7969  // Supported forms : (2 forms)
  7970  //
  7971  //    * CRC32Q r64, r64    [SSE4.2]
  7972  //    * CRC32Q m64, r64    [SSE4.2]
  7973  //
  7974  func (self *Program) CRC32Q(v0 interface{}, v1 interface{}) *Instruction {
  7975      p := self.alloc("CRC32Q", 2, Operands { v0, v1 })
  7976      // CRC32Q r64, r64
  7977      if isReg64(v0) && isReg64(v1) {
  7978          self.require(ISA_SSE4_2)
  7979          p.domain = DomainGeneric
  7980          p.add(0, func(m *_Encoding, v []interface{}) {
  7981              m.emit(0xf2)
  7982              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  7983              m.emit(0x0f)
  7984              m.emit(0x38)
  7985              m.emit(0xf1)
  7986              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  7987          })
  7988      }
  7989      // CRC32Q m64, r64
  7990      if isM64(v0) && isReg64(v1) {
  7991          self.require(ISA_SSE4_2)
  7992          p.domain = DomainGeneric
  7993          p.add(0, func(m *_Encoding, v []interface{}) {
  7994              m.emit(0xf2)
  7995              m.rexm(1, hcode(v[1]), addr(v[0]))
  7996              m.emit(0x0f)
  7997              m.emit(0x38)
  7998              m.emit(0xf1)
  7999              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8000          })
  8001      }
  8002      if p.len == 0 {
  8003          panic("invalid operands for CRC32Q")
  8004      }
  8005      return p
  8006  }
  8007  
  8008  // CRC32W performs "Accumulate CRC32 Value".
  8009  //
  8010  // Mnemonic        : CRC32
  8011  // Supported forms : (2 forms)
  8012  //
  8013  //    * CRC32W r16, r32    [SSE4.2]
  8014  //    * CRC32W m16, r32    [SSE4.2]
  8015  //
  8016  func (self *Program) CRC32W(v0 interface{}, v1 interface{}) *Instruction {
  8017      p := self.alloc("CRC32W", 2, Operands { v0, v1 })
  8018      // CRC32W r16, r32
  8019      if isReg16(v0) && isReg32(v1) {
  8020          self.require(ISA_SSE4_2)
  8021          p.domain = DomainGeneric
  8022          p.add(0, func(m *_Encoding, v []interface{}) {
  8023              m.emit(0x66)
  8024              m.emit(0xf2)
  8025              m.rexo(hcode(v[1]), v[0], false)
  8026              m.emit(0x0f)
  8027              m.emit(0x38)
  8028              m.emit(0xf1)
  8029              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8030          })
  8031      }
  8032      // CRC32W m16, r32
  8033      if isM16(v0) && isReg32(v1) {
  8034          self.require(ISA_SSE4_2)
  8035          p.domain = DomainGeneric
  8036          p.add(0, func(m *_Encoding, v []interface{}) {
  8037              m.emit(0x66)
  8038              m.emit(0xf2)
  8039              m.rexo(hcode(v[1]), addr(v[0]), false)
  8040              m.emit(0x0f)
  8041              m.emit(0x38)
  8042              m.emit(0xf1)
  8043              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8044          })
  8045      }
  8046      if p.len == 0 {
  8047          panic("invalid operands for CRC32W")
  8048      }
  8049      return p
  8050  }
  8051  
  8052  // CVTDQ2PD performs "Convert Packed Dword Integers to Packed Double-Precision FP Values".
  8053  //
  8054  // Mnemonic        : CVTDQ2PD
  8055  // Supported forms : (2 forms)
  8056  //
  8057  //    * CVTDQ2PD xmm, xmm    [SSE2]
  8058  //    * CVTDQ2PD m64, xmm    [SSE2]
  8059  //
  8060  func (self *Program) CVTDQ2PD(v0 interface{}, v1 interface{}) *Instruction {
  8061      p := self.alloc("CVTDQ2PD", 2, Operands { v0, v1 })
  8062      // CVTDQ2PD xmm, xmm
  8063      if isXMM(v0) && isXMM(v1) {
  8064          self.require(ISA_SSE2)
  8065          p.domain = DomainMMXSSE
  8066          p.add(0, func(m *_Encoding, v []interface{}) {
  8067              m.emit(0xf3)
  8068              m.rexo(hcode(v[1]), v[0], false)
  8069              m.emit(0x0f)
  8070              m.emit(0xe6)
  8071              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8072          })
  8073      }
  8074      // CVTDQ2PD m64, xmm
  8075      if isM64(v0) && isXMM(v1) {
  8076          self.require(ISA_SSE2)
  8077          p.domain = DomainMMXSSE
  8078          p.add(0, func(m *_Encoding, v []interface{}) {
  8079              m.emit(0xf3)
  8080              m.rexo(hcode(v[1]), addr(v[0]), false)
  8081              m.emit(0x0f)
  8082              m.emit(0xe6)
  8083              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8084          })
  8085      }
  8086      if p.len == 0 {
  8087          panic("invalid operands for CVTDQ2PD")
  8088      }
  8089      return p
  8090  }
  8091  
  8092  // CVTDQ2PS performs "Convert Packed Dword Integers to Packed Single-Precision FP Values".
  8093  //
  8094  // Mnemonic        : CVTDQ2PS
  8095  // Supported forms : (2 forms)
  8096  //
  8097  //    * CVTDQ2PS xmm, xmm     [SSE2]
  8098  //    * CVTDQ2PS m128, xmm    [SSE2]
  8099  //
  8100  func (self *Program) CVTDQ2PS(v0 interface{}, v1 interface{}) *Instruction {
  8101      p := self.alloc("CVTDQ2PS", 2, Operands { v0, v1 })
  8102      // CVTDQ2PS xmm, xmm
  8103      if isXMM(v0) && isXMM(v1) {
  8104          self.require(ISA_SSE2)
  8105          p.domain = DomainMMXSSE
  8106          p.add(0, func(m *_Encoding, v []interface{}) {
  8107              m.rexo(hcode(v[1]), v[0], false)
  8108              m.emit(0x0f)
  8109              m.emit(0x5b)
  8110              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8111          })
  8112      }
  8113      // CVTDQ2PS m128, xmm
  8114      if isM128(v0) && isXMM(v1) {
  8115          self.require(ISA_SSE2)
  8116          p.domain = DomainMMXSSE
  8117          p.add(0, func(m *_Encoding, v []interface{}) {
  8118              m.rexo(hcode(v[1]), addr(v[0]), false)
  8119              m.emit(0x0f)
  8120              m.emit(0x5b)
  8121              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8122          })
  8123      }
  8124      if p.len == 0 {
  8125          panic("invalid operands for CVTDQ2PS")
  8126      }
  8127      return p
  8128  }
  8129  
  8130  // CVTPD2DQ performs "Convert Packed Double-Precision FP Values to Packed Dword Integers".
  8131  //
  8132  // Mnemonic        : CVTPD2DQ
  8133  // Supported forms : (2 forms)
  8134  //
  8135  //    * CVTPD2DQ xmm, xmm     [SSE2]
  8136  //    * CVTPD2DQ m128, xmm    [SSE2]
  8137  //
  8138  func (self *Program) CVTPD2DQ(v0 interface{}, v1 interface{}) *Instruction {
  8139      p := self.alloc("CVTPD2DQ", 2, Operands { v0, v1 })
  8140      // CVTPD2DQ xmm, xmm
  8141      if isXMM(v0) && isXMM(v1) {
  8142          self.require(ISA_SSE2)
  8143          p.domain = DomainMMXSSE
  8144          p.add(0, func(m *_Encoding, v []interface{}) {
  8145              m.emit(0xf2)
  8146              m.rexo(hcode(v[1]), v[0], false)
  8147              m.emit(0x0f)
  8148              m.emit(0xe6)
  8149              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8150          })
  8151      }
  8152      // CVTPD2DQ m128, xmm
  8153      if isM128(v0) && isXMM(v1) {
  8154          self.require(ISA_SSE2)
  8155          p.domain = DomainMMXSSE
  8156          p.add(0, func(m *_Encoding, v []interface{}) {
  8157              m.emit(0xf2)
  8158              m.rexo(hcode(v[1]), addr(v[0]), false)
  8159              m.emit(0x0f)
  8160              m.emit(0xe6)
  8161              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8162          })
  8163      }
  8164      if p.len == 0 {
  8165          panic("invalid operands for CVTPD2DQ")
  8166      }
  8167      return p
  8168  }
  8169  
  8170  // CVTPD2PI performs "Convert Packed Double-Precision FP Values to Packed Dword Integers".
  8171  //
  8172  // Mnemonic        : CVTPD2PI
  8173  // Supported forms : (2 forms)
  8174  //
  8175  //    * CVTPD2PI xmm, mm     [SSE]
  8176  //    * CVTPD2PI m128, mm    [SSE]
  8177  //
  8178  func (self *Program) CVTPD2PI(v0 interface{}, v1 interface{}) *Instruction {
  8179      p := self.alloc("CVTPD2PI", 2, Operands { v0, v1 })
  8180      // CVTPD2PI xmm, mm
  8181      if isXMM(v0) && isMM(v1) {
  8182          self.require(ISA_SSE)
  8183          p.domain = DomainMMXSSE
  8184          p.add(0, func(m *_Encoding, v []interface{}) {
  8185              m.emit(0x66)
  8186              m.rexo(hcode(v[1]), v[0], false)
  8187              m.emit(0x0f)
  8188              m.emit(0x2d)
  8189              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8190          })
  8191      }
  8192      // CVTPD2PI m128, mm
  8193      if isM128(v0) && isMM(v1) {
  8194          self.require(ISA_SSE)
  8195          p.domain = DomainMMXSSE
  8196          p.add(0, func(m *_Encoding, v []interface{}) {
  8197              m.emit(0x66)
  8198              m.rexo(hcode(v[1]), addr(v[0]), false)
  8199              m.emit(0x0f)
  8200              m.emit(0x2d)
  8201              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8202          })
  8203      }
  8204      if p.len == 0 {
  8205          panic("invalid operands for CVTPD2PI")
  8206      }
  8207      return p
  8208  }
  8209  
  8210  // CVTPD2PS performs "Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values".
  8211  //
  8212  // Mnemonic        : CVTPD2PS
  8213  // Supported forms : (2 forms)
  8214  //
  8215  //    * CVTPD2PS xmm, xmm     [SSE2]
  8216  //    * CVTPD2PS m128, xmm    [SSE2]
  8217  //
  8218  func (self *Program) CVTPD2PS(v0 interface{}, v1 interface{}) *Instruction {
  8219      p := self.alloc("CVTPD2PS", 2, Operands { v0, v1 })
  8220      // CVTPD2PS xmm, xmm
  8221      if isXMM(v0) && isXMM(v1) {
  8222          self.require(ISA_SSE2)
  8223          p.domain = DomainMMXSSE
  8224          p.add(0, func(m *_Encoding, v []interface{}) {
  8225              m.emit(0x66)
  8226              m.rexo(hcode(v[1]), v[0], false)
  8227              m.emit(0x0f)
  8228              m.emit(0x5a)
  8229              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8230          })
  8231      }
  8232      // CVTPD2PS m128, xmm
  8233      if isM128(v0) && isXMM(v1) {
  8234          self.require(ISA_SSE2)
  8235          p.domain = DomainMMXSSE
  8236          p.add(0, func(m *_Encoding, v []interface{}) {
  8237              m.emit(0x66)
  8238              m.rexo(hcode(v[1]), addr(v[0]), false)
  8239              m.emit(0x0f)
  8240              m.emit(0x5a)
  8241              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8242          })
  8243      }
  8244      if p.len == 0 {
  8245          panic("invalid operands for CVTPD2PS")
  8246      }
  8247      return p
  8248  }
  8249  
  8250  // CVTPI2PD performs "Convert Packed Dword Integers to Packed Double-Precision FP Values".
  8251  //
  8252  // Mnemonic        : CVTPI2PD
  8253  // Supported forms : (2 forms)
  8254  //
  8255  //    * CVTPI2PD mm, xmm     [SSE2]
  8256  //    * CVTPI2PD m64, xmm    [SSE2]
  8257  //
  8258  func (self *Program) CVTPI2PD(v0 interface{}, v1 interface{}) *Instruction {
  8259      p := self.alloc("CVTPI2PD", 2, Operands { v0, v1 })
  8260      // CVTPI2PD mm, xmm
  8261      if isMM(v0) && isXMM(v1) {
  8262          self.require(ISA_SSE2)
  8263          p.domain = DomainMMXSSE
  8264          p.add(0, func(m *_Encoding, v []interface{}) {
  8265              m.emit(0x66)
  8266              m.rexo(hcode(v[1]), v[0], false)
  8267              m.emit(0x0f)
  8268              m.emit(0x2a)
  8269              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8270          })
  8271      }
  8272      // CVTPI2PD m64, xmm
  8273      if isM64(v0) && isXMM(v1) {
  8274          self.require(ISA_SSE2)
  8275          p.domain = DomainMMXSSE
  8276          p.add(0, func(m *_Encoding, v []interface{}) {
  8277              m.emit(0x66)
  8278              m.rexo(hcode(v[1]), addr(v[0]), false)
  8279              m.emit(0x0f)
  8280              m.emit(0x2a)
  8281              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8282          })
  8283      }
  8284      if p.len == 0 {
  8285          panic("invalid operands for CVTPI2PD")
  8286      }
  8287      return p
  8288  }
  8289  
  8290  // CVTPI2PS performs "Convert Packed Dword Integers to Packed Single-Precision FP Values".
  8291  //
  8292  // Mnemonic        : CVTPI2PS
  8293  // Supported forms : (2 forms)
  8294  //
  8295  //    * CVTPI2PS mm, xmm     [SSE]
  8296  //    * CVTPI2PS m64, xmm    [SSE]
  8297  //
  8298  func (self *Program) CVTPI2PS(v0 interface{}, v1 interface{}) *Instruction {
  8299      p := self.alloc("CVTPI2PS", 2, Operands { v0, v1 })
  8300      // CVTPI2PS mm, xmm
  8301      if isMM(v0) && isXMM(v1) {
  8302          self.require(ISA_SSE)
  8303          p.domain = DomainMMXSSE
  8304          p.add(0, func(m *_Encoding, v []interface{}) {
  8305              m.rexo(hcode(v[1]), v[0], false)
  8306              m.emit(0x0f)
  8307              m.emit(0x2a)
  8308              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8309          })
  8310      }
  8311      // CVTPI2PS m64, xmm
  8312      if isM64(v0) && isXMM(v1) {
  8313          self.require(ISA_SSE)
  8314          p.domain = DomainMMXSSE
  8315          p.add(0, func(m *_Encoding, v []interface{}) {
  8316              m.rexo(hcode(v[1]), addr(v[0]), false)
  8317              m.emit(0x0f)
  8318              m.emit(0x2a)
  8319              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8320          })
  8321      }
  8322      if p.len == 0 {
  8323          panic("invalid operands for CVTPI2PS")
  8324      }
  8325      return p
  8326  }
  8327  
  8328  // CVTPS2DQ performs "Convert Packed Single-Precision FP Values to Packed Dword Integers".
  8329  //
  8330  // Mnemonic        : CVTPS2DQ
  8331  // Supported forms : (2 forms)
  8332  //
  8333  //    * CVTPS2DQ xmm, xmm     [SSE2]
  8334  //    * CVTPS2DQ m128, xmm    [SSE2]
  8335  //
  8336  func (self *Program) CVTPS2DQ(v0 interface{}, v1 interface{}) *Instruction {
  8337      p := self.alloc("CVTPS2DQ", 2, Operands { v0, v1 })
  8338      // CVTPS2DQ xmm, xmm
  8339      if isXMM(v0) && isXMM(v1) {
  8340          self.require(ISA_SSE2)
  8341          p.domain = DomainMMXSSE
  8342          p.add(0, func(m *_Encoding, v []interface{}) {
  8343              m.emit(0x66)
  8344              m.rexo(hcode(v[1]), v[0], false)
  8345              m.emit(0x0f)
  8346              m.emit(0x5b)
  8347              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8348          })
  8349      }
  8350      // CVTPS2DQ m128, xmm
  8351      if isM128(v0) && isXMM(v1) {
  8352          self.require(ISA_SSE2)
  8353          p.domain = DomainMMXSSE
  8354          p.add(0, func(m *_Encoding, v []interface{}) {
  8355              m.emit(0x66)
  8356              m.rexo(hcode(v[1]), addr(v[0]), false)
  8357              m.emit(0x0f)
  8358              m.emit(0x5b)
  8359              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8360          })
  8361      }
  8362      if p.len == 0 {
  8363          panic("invalid operands for CVTPS2DQ")
  8364      }
  8365      return p
  8366  }
  8367  
  8368  // CVTPS2PD performs "Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values".
  8369  //
  8370  // Mnemonic        : CVTPS2PD
  8371  // Supported forms : (2 forms)
  8372  //
  8373  //    * CVTPS2PD xmm, xmm    [SSE2]
  8374  //    * CVTPS2PD m64, xmm    [SSE2]
  8375  //
  8376  func (self *Program) CVTPS2PD(v0 interface{}, v1 interface{}) *Instruction {
  8377      p := self.alloc("CVTPS2PD", 2, Operands { v0, v1 })
  8378      // CVTPS2PD xmm, xmm
  8379      if isXMM(v0) && isXMM(v1) {
  8380          self.require(ISA_SSE2)
  8381          p.domain = DomainMMXSSE
  8382          p.add(0, func(m *_Encoding, v []interface{}) {
  8383              m.rexo(hcode(v[1]), v[0], false)
  8384              m.emit(0x0f)
  8385              m.emit(0x5a)
  8386              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8387          })
  8388      }
  8389      // CVTPS2PD m64, xmm
  8390      if isM64(v0) && isXMM(v1) {
  8391          self.require(ISA_SSE2)
  8392          p.domain = DomainMMXSSE
  8393          p.add(0, func(m *_Encoding, v []interface{}) {
  8394              m.rexo(hcode(v[1]), addr(v[0]), false)
  8395              m.emit(0x0f)
  8396              m.emit(0x5a)
  8397              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8398          })
  8399      }
  8400      if p.len == 0 {
  8401          panic("invalid operands for CVTPS2PD")
  8402      }
  8403      return p
  8404  }
  8405  
  8406  // CVTPS2PI performs "Convert Packed Single-Precision FP Values to Packed Dword Integers".
  8407  //
  8408  // Mnemonic        : CVTPS2PI
  8409  // Supported forms : (2 forms)
  8410  //
  8411  //    * CVTPS2PI xmm, mm    [SSE]
  8412  //    * CVTPS2PI m64, mm    [SSE]
  8413  //
  8414  func (self *Program) CVTPS2PI(v0 interface{}, v1 interface{}) *Instruction {
  8415      p := self.alloc("CVTPS2PI", 2, Operands { v0, v1 })
  8416      // CVTPS2PI xmm, mm
  8417      if isXMM(v0) && isMM(v1) {
  8418          self.require(ISA_SSE)
  8419          p.domain = DomainMMXSSE
  8420          p.add(0, func(m *_Encoding, v []interface{}) {
  8421              m.rexo(hcode(v[1]), v[0], false)
  8422              m.emit(0x0f)
  8423              m.emit(0x2d)
  8424              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8425          })
  8426      }
  8427      // CVTPS2PI m64, mm
  8428      if isM64(v0) && isMM(v1) {
  8429          self.require(ISA_SSE)
  8430          p.domain = DomainMMXSSE
  8431          p.add(0, func(m *_Encoding, v []interface{}) {
  8432              m.rexo(hcode(v[1]), addr(v[0]), false)
  8433              m.emit(0x0f)
  8434              m.emit(0x2d)
  8435              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8436          })
  8437      }
  8438      if p.len == 0 {
  8439          panic("invalid operands for CVTPS2PI")
  8440      }
  8441      return p
  8442  }
  8443  
  8444  // CVTSD2SI performs "Convert Scalar Double-Precision FP Value to Integer".
  8445  //
  8446  // Mnemonic        : CVTSD2SI
  8447  // Supported forms : (4 forms)
  8448  //
  8449  //    * CVTSD2SI xmm, r32    [SSE2]
  8450  //    * CVTSD2SI m64, r32    [SSE2]
  8451  //    * CVTSD2SI xmm, r64    [SSE2]
  8452  //    * CVTSD2SI m64, r64    [SSE2]
  8453  //
  8454  func (self *Program) CVTSD2SI(v0 interface{}, v1 interface{}) *Instruction {
  8455      p := self.alloc("CVTSD2SI", 2, Operands { v0, v1 })
  8456      // CVTSD2SI xmm, r32
  8457      if isXMM(v0) && isReg32(v1) {
  8458          self.require(ISA_SSE2)
  8459          p.domain = DomainMMXSSE
  8460          p.add(0, func(m *_Encoding, v []interface{}) {
  8461              m.emit(0xf2)
  8462              m.rexo(hcode(v[1]), v[0], false)
  8463              m.emit(0x0f)
  8464              m.emit(0x2d)
  8465              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8466          })
  8467      }
  8468      // CVTSD2SI m64, r32
  8469      if isM64(v0) && isReg32(v1) {
  8470          self.require(ISA_SSE2)
  8471          p.domain = DomainMMXSSE
  8472          p.add(0, func(m *_Encoding, v []interface{}) {
  8473              m.emit(0xf2)
  8474              m.rexo(hcode(v[1]), addr(v[0]), false)
  8475              m.emit(0x0f)
  8476              m.emit(0x2d)
  8477              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8478          })
  8479      }
  8480      // CVTSD2SI xmm, r64
  8481      if isXMM(v0) && isReg64(v1) {
  8482          self.require(ISA_SSE2)
  8483          p.domain = DomainMMXSSE
  8484          p.add(0, func(m *_Encoding, v []interface{}) {
  8485              m.emit(0xf2)
  8486              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  8487              m.emit(0x0f)
  8488              m.emit(0x2d)
  8489              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8490          })
  8491      }
  8492      // CVTSD2SI m64, r64
  8493      if isM64(v0) && isReg64(v1) {
  8494          self.require(ISA_SSE2)
  8495          p.domain = DomainMMXSSE
  8496          p.add(0, func(m *_Encoding, v []interface{}) {
  8497              m.emit(0xf2)
  8498              m.rexm(1, hcode(v[1]), addr(v[0]))
  8499              m.emit(0x0f)
  8500              m.emit(0x2d)
  8501              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8502          })
  8503      }
  8504      if p.len == 0 {
  8505          panic("invalid operands for CVTSD2SI")
  8506      }
  8507      return p
  8508  }
  8509  
  8510  // CVTSD2SS performs "Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value".
  8511  //
  8512  // Mnemonic        : CVTSD2SS
  8513  // Supported forms : (2 forms)
  8514  //
  8515  //    * CVTSD2SS xmm, xmm    [SSE2]
  8516  //    * CVTSD2SS m64, xmm    [SSE2]
  8517  //
  8518  func (self *Program) CVTSD2SS(v0 interface{}, v1 interface{}) *Instruction {
  8519      p := self.alloc("CVTSD2SS", 2, Operands { v0, v1 })
  8520      // CVTSD2SS xmm, xmm
  8521      if isXMM(v0) && isXMM(v1) {
  8522          self.require(ISA_SSE2)
  8523          p.domain = DomainMMXSSE
  8524          p.add(0, func(m *_Encoding, v []interface{}) {
  8525              m.emit(0xf2)
  8526              m.rexo(hcode(v[1]), v[0], false)
  8527              m.emit(0x0f)
  8528              m.emit(0x5a)
  8529              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8530          })
  8531      }
  8532      // CVTSD2SS m64, xmm
  8533      if isM64(v0) && isXMM(v1) {
  8534          self.require(ISA_SSE2)
  8535          p.domain = DomainMMXSSE
  8536          p.add(0, func(m *_Encoding, v []interface{}) {
  8537              m.emit(0xf2)
  8538              m.rexo(hcode(v[1]), addr(v[0]), false)
  8539              m.emit(0x0f)
  8540              m.emit(0x5a)
  8541              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8542          })
  8543      }
  8544      if p.len == 0 {
  8545          panic("invalid operands for CVTSD2SS")
  8546      }
  8547      return p
  8548  }
  8549  
  8550  // CVTSI2SD performs "Convert Dword Integer to Scalar Double-Precision FP Value".
  8551  //
  8552  // Mnemonic        : CVTSI2SD
  8553  // Supported forms : (4 forms)
  8554  //
  8555  //    * CVTSI2SD r32, xmm    [SSE2]
  8556  //    * CVTSI2SD r64, xmm    [SSE2]
  8557  //    * CVTSI2SD m32, xmm    [SSE2]
  8558  //    * CVTSI2SD m64, xmm    [SSE2]
  8559  //
  8560  func (self *Program) CVTSI2SD(v0 interface{}, v1 interface{}) *Instruction {
  8561      p := self.alloc("CVTSI2SD", 2, Operands { v0, v1 })
  8562      // CVTSI2SD r32, xmm
  8563      if isReg32(v0) && isXMM(v1) {
  8564          self.require(ISA_SSE2)
  8565          p.domain = DomainMMXSSE
  8566          p.add(0, func(m *_Encoding, v []interface{}) {
  8567              m.emit(0xf2)
  8568              m.rexo(hcode(v[1]), v[0], false)
  8569              m.emit(0x0f)
  8570              m.emit(0x2a)
  8571              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8572          })
  8573      }
  8574      // CVTSI2SD r64, xmm
  8575      if isReg64(v0) && isXMM(v1) {
  8576          self.require(ISA_SSE2)
  8577          p.domain = DomainMMXSSE
  8578          p.add(0, func(m *_Encoding, v []interface{}) {
  8579              m.emit(0xf2)
  8580              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  8581              m.emit(0x0f)
  8582              m.emit(0x2a)
  8583              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8584          })
  8585      }
  8586      // CVTSI2SD m32, xmm
  8587      if isM32(v0) && isXMM(v1) {
  8588          self.require(ISA_SSE2)
  8589          p.domain = DomainMMXSSE
  8590          p.add(0, func(m *_Encoding, v []interface{}) {
  8591              m.emit(0xf2)
  8592              m.rexo(hcode(v[1]), addr(v[0]), false)
  8593              m.emit(0x0f)
  8594              m.emit(0x2a)
  8595              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8596          })
  8597      }
  8598      // CVTSI2SD m64, xmm
  8599      if isM64(v0) && isXMM(v1) {
  8600          self.require(ISA_SSE2)
  8601          p.domain = DomainMMXSSE
  8602          p.add(0, func(m *_Encoding, v []interface{}) {
  8603              m.emit(0xf2)
  8604              m.rexm(1, hcode(v[1]), addr(v[0]))
  8605              m.emit(0x0f)
  8606              m.emit(0x2a)
  8607              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8608          })
  8609      }
  8610      if p.len == 0 {
  8611          panic("invalid operands for CVTSI2SD")
  8612      }
  8613      return p
  8614  }
  8615  
  8616  // CVTSI2SS performs "Convert Dword Integer to Scalar Single-Precision FP Value".
  8617  //
  8618  // Mnemonic        : CVTSI2SS
  8619  // Supported forms : (4 forms)
  8620  //
  8621  //    * CVTSI2SS r32, xmm    [SSE]
  8622  //    * CVTSI2SS r64, xmm    [SSE]
  8623  //    * CVTSI2SS m32, xmm    [SSE]
  8624  //    * CVTSI2SS m64, xmm    [SSE]
  8625  //
  8626  func (self *Program) CVTSI2SS(v0 interface{}, v1 interface{}) *Instruction {
  8627      p := self.alloc("CVTSI2SS", 2, Operands { v0, v1 })
  8628      // CVTSI2SS r32, xmm
  8629      if isReg32(v0) && isXMM(v1) {
  8630          self.require(ISA_SSE)
  8631          p.domain = DomainMMXSSE
  8632          p.add(0, func(m *_Encoding, v []interface{}) {
  8633              m.emit(0xf3)
  8634              m.rexo(hcode(v[1]), v[0], false)
  8635              m.emit(0x0f)
  8636              m.emit(0x2a)
  8637              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8638          })
  8639      }
  8640      // CVTSI2SS r64, xmm
  8641      if isReg64(v0) && isXMM(v1) {
  8642          self.require(ISA_SSE)
  8643          p.domain = DomainMMXSSE
  8644          p.add(0, func(m *_Encoding, v []interface{}) {
  8645              m.emit(0xf3)
  8646              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  8647              m.emit(0x0f)
  8648              m.emit(0x2a)
  8649              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8650          })
  8651      }
  8652      // CVTSI2SS m32, xmm
  8653      if isM32(v0) && isXMM(v1) {
  8654          self.require(ISA_SSE)
  8655          p.domain = DomainMMXSSE
  8656          p.add(0, func(m *_Encoding, v []interface{}) {
  8657              m.emit(0xf3)
  8658              m.rexo(hcode(v[1]), addr(v[0]), false)
  8659              m.emit(0x0f)
  8660              m.emit(0x2a)
  8661              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8662          })
  8663      }
  8664      // CVTSI2SS m64, xmm
  8665      if isM64(v0) && isXMM(v1) {
  8666          self.require(ISA_SSE)
  8667          p.domain = DomainMMXSSE
  8668          p.add(0, func(m *_Encoding, v []interface{}) {
  8669              m.emit(0xf3)
  8670              m.rexm(1, hcode(v[1]), addr(v[0]))
  8671              m.emit(0x0f)
  8672              m.emit(0x2a)
  8673              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8674          })
  8675      }
  8676      if p.len == 0 {
  8677          panic("invalid operands for CVTSI2SS")
  8678      }
  8679      return p
  8680  }
  8681  
  8682  // CVTSS2SD performs "Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value".
  8683  //
  8684  // Mnemonic        : CVTSS2SD
  8685  // Supported forms : (2 forms)
  8686  //
  8687  //    * CVTSS2SD xmm, xmm    [SSE2]
  8688  //    * CVTSS2SD m32, xmm    [SSE2]
  8689  //
  8690  func (self *Program) CVTSS2SD(v0 interface{}, v1 interface{}) *Instruction {
  8691      p := self.alloc("CVTSS2SD", 2, Operands { v0, v1 })
  8692      // CVTSS2SD xmm, xmm
  8693      if isXMM(v0) && isXMM(v1) {
  8694          self.require(ISA_SSE2)
  8695          p.domain = DomainMMXSSE
  8696          p.add(0, func(m *_Encoding, v []interface{}) {
  8697              m.emit(0xf3)
  8698              m.rexo(hcode(v[1]), v[0], false)
  8699              m.emit(0x0f)
  8700              m.emit(0x5a)
  8701              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8702          })
  8703      }
  8704      // CVTSS2SD m32, xmm
  8705      if isM32(v0) && isXMM(v1) {
  8706          self.require(ISA_SSE2)
  8707          p.domain = DomainMMXSSE
  8708          p.add(0, func(m *_Encoding, v []interface{}) {
  8709              m.emit(0xf3)
  8710              m.rexo(hcode(v[1]), addr(v[0]), false)
  8711              m.emit(0x0f)
  8712              m.emit(0x5a)
  8713              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8714          })
  8715      }
  8716      if p.len == 0 {
  8717          panic("invalid operands for CVTSS2SD")
  8718      }
  8719      return p
  8720  }
  8721  
  8722  // CVTSS2SI performs "Convert Scalar Single-Precision FP Value to Dword Integer".
  8723  //
  8724  // Mnemonic        : CVTSS2SI
  8725  // Supported forms : (4 forms)
  8726  //
  8727  //    * CVTSS2SI xmm, r32    [SSE]
  8728  //    * CVTSS2SI m32, r32    [SSE]
  8729  //    * CVTSS2SI xmm, r64    [SSE]
  8730  //    * CVTSS2SI m32, r64    [SSE]
  8731  //
  8732  func (self *Program) CVTSS2SI(v0 interface{}, v1 interface{}) *Instruction {
  8733      p := self.alloc("CVTSS2SI", 2, Operands { v0, v1 })
  8734      // CVTSS2SI xmm, r32
  8735      if isXMM(v0) && isReg32(v1) {
  8736          self.require(ISA_SSE)
  8737          p.domain = DomainMMXSSE
  8738          p.add(0, func(m *_Encoding, v []interface{}) {
  8739              m.emit(0xf3)
  8740              m.rexo(hcode(v[1]), v[0], false)
  8741              m.emit(0x0f)
  8742              m.emit(0x2d)
  8743              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8744          })
  8745      }
  8746      // CVTSS2SI m32, r32
  8747      if isM32(v0) && isReg32(v1) {
  8748          self.require(ISA_SSE)
  8749          p.domain = DomainMMXSSE
  8750          p.add(0, func(m *_Encoding, v []interface{}) {
  8751              m.emit(0xf3)
  8752              m.rexo(hcode(v[1]), addr(v[0]), false)
  8753              m.emit(0x0f)
  8754              m.emit(0x2d)
  8755              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8756          })
  8757      }
  8758      // CVTSS2SI xmm, r64
  8759      if isXMM(v0) && isReg64(v1) {
  8760          self.require(ISA_SSE)
  8761          p.domain = DomainMMXSSE
  8762          p.add(0, func(m *_Encoding, v []interface{}) {
  8763              m.emit(0xf3)
  8764              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  8765              m.emit(0x0f)
  8766              m.emit(0x2d)
  8767              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8768          })
  8769      }
  8770      // CVTSS2SI m32, r64
  8771      if isM32(v0) && isReg64(v1) {
  8772          self.require(ISA_SSE)
  8773          p.domain = DomainMMXSSE
  8774          p.add(0, func(m *_Encoding, v []interface{}) {
  8775              m.emit(0xf3)
  8776              m.rexm(1, hcode(v[1]), addr(v[0]))
  8777              m.emit(0x0f)
  8778              m.emit(0x2d)
  8779              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8780          })
  8781      }
  8782      if p.len == 0 {
  8783          panic("invalid operands for CVTSS2SI")
  8784      }
  8785      return p
  8786  }
  8787  
  8788  // CVTTPD2DQ performs "Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers".
  8789  //
  8790  // Mnemonic        : CVTTPD2DQ
  8791  // Supported forms : (2 forms)
  8792  //
  8793  //    * CVTTPD2DQ xmm, xmm     [SSE2]
  8794  //    * CVTTPD2DQ m128, xmm    [SSE2]
  8795  //
  8796  func (self *Program) CVTTPD2DQ(v0 interface{}, v1 interface{}) *Instruction {
  8797      p := self.alloc("CVTTPD2DQ", 2, Operands { v0, v1 })
  8798      // CVTTPD2DQ xmm, xmm
  8799      if isXMM(v0) && isXMM(v1) {
  8800          self.require(ISA_SSE2)
  8801          p.domain = DomainMMXSSE
  8802          p.add(0, func(m *_Encoding, v []interface{}) {
  8803              m.emit(0x66)
  8804              m.rexo(hcode(v[1]), v[0], false)
  8805              m.emit(0x0f)
  8806              m.emit(0xe6)
  8807              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8808          })
  8809      }
  8810      // CVTTPD2DQ m128, xmm
  8811      if isM128(v0) && isXMM(v1) {
  8812          self.require(ISA_SSE2)
  8813          p.domain = DomainMMXSSE
  8814          p.add(0, func(m *_Encoding, v []interface{}) {
  8815              m.emit(0x66)
  8816              m.rexo(hcode(v[1]), addr(v[0]), false)
  8817              m.emit(0x0f)
  8818              m.emit(0xe6)
  8819              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8820          })
  8821      }
  8822      if p.len == 0 {
  8823          panic("invalid operands for CVTTPD2DQ")
  8824      }
  8825      return p
  8826  }
  8827  
  8828  // CVTTPD2PI performs "Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers".
  8829  //
  8830  // Mnemonic        : CVTTPD2PI
  8831  // Supported forms : (2 forms)
  8832  //
  8833  //    * CVTTPD2PI xmm, mm     [SSE2]
  8834  //    * CVTTPD2PI m128, mm    [SSE2]
  8835  //
  8836  func (self *Program) CVTTPD2PI(v0 interface{}, v1 interface{}) *Instruction {
  8837      p := self.alloc("CVTTPD2PI", 2, Operands { v0, v1 })
  8838      // CVTTPD2PI xmm, mm
  8839      if isXMM(v0) && isMM(v1) {
  8840          self.require(ISA_SSE2)
  8841          p.domain = DomainMMXSSE
  8842          p.add(0, func(m *_Encoding, v []interface{}) {
  8843              m.emit(0x66)
  8844              m.rexo(hcode(v[1]), v[0], false)
  8845              m.emit(0x0f)
  8846              m.emit(0x2c)
  8847              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8848          })
  8849      }
  8850      // CVTTPD2PI m128, mm
  8851      if isM128(v0) && isMM(v1) {
  8852          self.require(ISA_SSE2)
  8853          p.domain = DomainMMXSSE
  8854          p.add(0, func(m *_Encoding, v []interface{}) {
  8855              m.emit(0x66)
  8856              m.rexo(hcode(v[1]), addr(v[0]), false)
  8857              m.emit(0x0f)
  8858              m.emit(0x2c)
  8859              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8860          })
  8861      }
  8862      if p.len == 0 {
  8863          panic("invalid operands for CVTTPD2PI")
  8864      }
  8865      return p
  8866  }
  8867  
  8868  // CVTTPS2DQ performs "Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers".
  8869  //
  8870  // Mnemonic        : CVTTPS2DQ
  8871  // Supported forms : (2 forms)
  8872  //
  8873  //    * CVTTPS2DQ xmm, xmm     [SSE2]
  8874  //    * CVTTPS2DQ m128, xmm    [SSE2]
  8875  //
  8876  func (self *Program) CVTTPS2DQ(v0 interface{}, v1 interface{}) *Instruction {
  8877      p := self.alloc("CVTTPS2DQ", 2, Operands { v0, v1 })
  8878      // CVTTPS2DQ xmm, xmm
  8879      if isXMM(v0) && isXMM(v1) {
  8880          self.require(ISA_SSE2)
  8881          p.domain = DomainMMXSSE
  8882          p.add(0, func(m *_Encoding, v []interface{}) {
  8883              m.emit(0xf3)
  8884              m.rexo(hcode(v[1]), v[0], false)
  8885              m.emit(0x0f)
  8886              m.emit(0x5b)
  8887              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8888          })
  8889      }
  8890      // CVTTPS2DQ m128, xmm
  8891      if isM128(v0) && isXMM(v1) {
  8892          self.require(ISA_SSE2)
  8893          p.domain = DomainMMXSSE
  8894          p.add(0, func(m *_Encoding, v []interface{}) {
  8895              m.emit(0xf3)
  8896              m.rexo(hcode(v[1]), addr(v[0]), false)
  8897              m.emit(0x0f)
  8898              m.emit(0x5b)
  8899              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8900          })
  8901      }
  8902      if p.len == 0 {
  8903          panic("invalid operands for CVTTPS2DQ")
  8904      }
  8905      return p
  8906  }
  8907  
  8908  // CVTTPS2PI performs "Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers".
  8909  //
  8910  // Mnemonic        : CVTTPS2PI
  8911  // Supported forms : (2 forms)
  8912  //
  8913  //    * CVTTPS2PI xmm, mm    [SSE]
  8914  //    * CVTTPS2PI m64, mm    [SSE]
  8915  //
  8916  func (self *Program) CVTTPS2PI(v0 interface{}, v1 interface{}) *Instruction {
  8917      p := self.alloc("CVTTPS2PI", 2, Operands { v0, v1 })
  8918      // CVTTPS2PI xmm, mm
  8919      if isXMM(v0) && isMM(v1) {
  8920          self.require(ISA_SSE)
  8921          p.domain = DomainMMXSSE
  8922          p.add(0, func(m *_Encoding, v []interface{}) {
  8923              m.rexo(hcode(v[1]), v[0], false)
  8924              m.emit(0x0f)
  8925              m.emit(0x2c)
  8926              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8927          })
  8928      }
  8929      // CVTTPS2PI m64, mm
  8930      if isM64(v0) && isMM(v1) {
  8931          self.require(ISA_SSE)
  8932          p.domain = DomainMMXSSE
  8933          p.add(0, func(m *_Encoding, v []interface{}) {
  8934              m.rexo(hcode(v[1]), addr(v[0]), false)
  8935              m.emit(0x0f)
  8936              m.emit(0x2c)
  8937              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8938          })
  8939      }
  8940      if p.len == 0 {
  8941          panic("invalid operands for CVTTPS2PI")
  8942      }
  8943      return p
  8944  }
  8945  
  8946  // CVTTSD2SI performs "Convert with Truncation Scalar Double-Precision FP Value to Signed Integer".
  8947  //
  8948  // Mnemonic        : CVTTSD2SI
  8949  // Supported forms : (4 forms)
  8950  //
  8951  //    * CVTTSD2SI xmm, r32    [SSE2]
  8952  //    * CVTTSD2SI m64, r32    [SSE2]
  8953  //    * CVTTSD2SI xmm, r64    [SSE2]
  8954  //    * CVTTSD2SI m64, r64    [SSE2]
  8955  //
  8956  func (self *Program) CVTTSD2SI(v0 interface{}, v1 interface{}) *Instruction {
  8957      p := self.alloc("CVTTSD2SI", 2, Operands { v0, v1 })
  8958      // CVTTSD2SI xmm, r32
  8959      if isXMM(v0) && isReg32(v1) {
  8960          self.require(ISA_SSE2)
  8961          p.domain = DomainMMXSSE
  8962          p.add(0, func(m *_Encoding, v []interface{}) {
  8963              m.emit(0xf2)
  8964              m.rexo(hcode(v[1]), v[0], false)
  8965              m.emit(0x0f)
  8966              m.emit(0x2c)
  8967              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8968          })
  8969      }
  8970      // CVTTSD2SI m64, r32
  8971      if isM64(v0) && isReg32(v1) {
  8972          self.require(ISA_SSE2)
  8973          p.domain = DomainMMXSSE
  8974          p.add(0, func(m *_Encoding, v []interface{}) {
  8975              m.emit(0xf2)
  8976              m.rexo(hcode(v[1]), addr(v[0]), false)
  8977              m.emit(0x0f)
  8978              m.emit(0x2c)
  8979              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  8980          })
  8981      }
  8982      // CVTTSD2SI xmm, r64
  8983      if isXMM(v0) && isReg64(v1) {
  8984          self.require(ISA_SSE2)
  8985          p.domain = DomainMMXSSE
  8986          p.add(0, func(m *_Encoding, v []interface{}) {
  8987              m.emit(0xf2)
  8988              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  8989              m.emit(0x0f)
  8990              m.emit(0x2c)
  8991              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  8992          })
  8993      }
  8994      // CVTTSD2SI m64, r64
  8995      if isM64(v0) && isReg64(v1) {
  8996          self.require(ISA_SSE2)
  8997          p.domain = DomainMMXSSE
  8998          p.add(0, func(m *_Encoding, v []interface{}) {
  8999              m.emit(0xf2)
  9000              m.rexm(1, hcode(v[1]), addr(v[0]))
  9001              m.emit(0x0f)
  9002              m.emit(0x2c)
  9003              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  9004          })
  9005      }
  9006      if p.len == 0 {
  9007          panic("invalid operands for CVTTSD2SI")
  9008      }
  9009      return p
  9010  }
  9011  
  9012  // CVTTSS2SI performs "Convert with Truncation Scalar Single-Precision FP Value to Dword Integer".
  9013  //
  9014  // Mnemonic        : CVTTSS2SI
  9015  // Supported forms : (4 forms)
  9016  //
  9017  //    * CVTTSS2SI xmm, r32    [SSE]
  9018  //    * CVTTSS2SI m32, r32    [SSE]
  9019  //    * CVTTSS2SI xmm, r64    [SSE]
  9020  //    * CVTTSS2SI m32, r64    [SSE]
  9021  //
  9022  func (self *Program) CVTTSS2SI(v0 interface{}, v1 interface{}) *Instruction {
  9023      p := self.alloc("CVTTSS2SI", 2, Operands { v0, v1 })
  9024      // CVTTSS2SI xmm, r32
  9025      if isXMM(v0) && isReg32(v1) {
  9026          self.require(ISA_SSE)
  9027          p.domain = DomainMMXSSE
  9028          p.add(0, func(m *_Encoding, v []interface{}) {
  9029              m.emit(0xf3)
  9030              m.rexo(hcode(v[1]), v[0], false)
  9031              m.emit(0x0f)
  9032              m.emit(0x2c)
  9033              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  9034          })
  9035      }
  9036      // CVTTSS2SI m32, r32
  9037      if isM32(v0) && isReg32(v1) {
  9038          self.require(ISA_SSE)
  9039          p.domain = DomainMMXSSE
  9040          p.add(0, func(m *_Encoding, v []interface{}) {
  9041              m.emit(0xf3)
  9042              m.rexo(hcode(v[1]), addr(v[0]), false)
  9043              m.emit(0x0f)
  9044              m.emit(0x2c)
  9045              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  9046          })
  9047      }
  9048      // CVTTSS2SI xmm, r64
  9049      if isXMM(v0) && isReg64(v1) {
  9050          self.require(ISA_SSE)
  9051          p.domain = DomainMMXSSE
  9052          p.add(0, func(m *_Encoding, v []interface{}) {
  9053              m.emit(0xf3)
  9054              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
  9055              m.emit(0x0f)
  9056              m.emit(0x2c)
  9057              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  9058          })
  9059      }
  9060      // CVTTSS2SI m32, r64
  9061      if isM32(v0) && isReg64(v1) {
  9062          self.require(ISA_SSE)
  9063          p.domain = DomainMMXSSE
  9064          p.add(0, func(m *_Encoding, v []interface{}) {
  9065              m.emit(0xf3)
  9066              m.rexm(1, hcode(v[1]), addr(v[0]))
  9067              m.emit(0x0f)
  9068              m.emit(0x2c)
  9069              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  9070          })
  9071      }
  9072      if p.len == 0 {
  9073          panic("invalid operands for CVTTSS2SI")
  9074      }
  9075      return p
  9076  }
  9077  
  9078  // CWTD performs "Convert Word to Doubleword".
  9079  //
  9080  // Mnemonic        : CWD
  9081  // Supported forms : (1 form)
  9082  //
  9083  //    * CWTD
  9084  //
  9085  func (self *Program) CWTD() *Instruction {
  9086      p := self.alloc("CWTD", 0, Operands {  })
  9087      // CWTD
  9088      p.domain = DomainGeneric
  9089      p.add(0, func(m *_Encoding, v []interface{}) {
  9090          m.emit(0x66)
  9091          m.emit(0x99)
  9092      })
  9093      return p
  9094  }
  9095  
  9096  // CWTL performs "Convert Word to Doubleword".
  9097  //
  9098  // Mnemonic        : CWDE
  9099  // Supported forms : (1 form)
  9100  //
  9101  //    * CWTL
  9102  //
  9103  func (self *Program) CWTL() *Instruction {
  9104      p := self.alloc("CWTL", 0, Operands {  })
  9105      // CWTL
  9106      p.domain = DomainGeneric
  9107      p.add(0, func(m *_Encoding, v []interface{}) {
  9108          m.emit(0x98)
  9109      })
  9110      return p
  9111  }
  9112  
  9113  // DECB performs "Decrement by 1".
  9114  //
  9115  // Mnemonic        : DEC
  9116  // Supported forms : (2 forms)
  9117  //
  9118  //    * DECB r8
  9119  //    * DECB m8
  9120  //
  9121  func (self *Program) DECB(v0 interface{}) *Instruction {
  9122      p := self.alloc("DECB", 1, Operands { v0 })
  9123      // DECB r8
  9124      if isReg8(v0) {
  9125          p.domain = DomainGeneric
  9126          p.add(0, func(m *_Encoding, v []interface{}) {
  9127              m.rexo(0, v[0], isReg8REX(v[0]))
  9128              m.emit(0xfe)
  9129              m.emit(0xc8 | lcode(v[0]))
  9130          })
  9131      }
  9132      // DECB m8
  9133      if isM8(v0) {
  9134          p.domain = DomainGeneric
  9135          p.add(0, func(m *_Encoding, v []interface{}) {
  9136              m.rexo(0, addr(v[0]), false)
  9137              m.emit(0xfe)
  9138              m.mrsd(1, addr(v[0]), 1)
  9139          })
  9140      }
  9141      if p.len == 0 {
  9142          panic("invalid operands for DECB")
  9143      }
  9144      return p
  9145  }
  9146  
  9147  // DECL performs "Decrement by 1".
  9148  //
  9149  // Mnemonic        : DEC
  9150  // Supported forms : (2 forms)
  9151  //
  9152  //    * DECL r32
  9153  //    * DECL m32
  9154  //
  9155  func (self *Program) DECL(v0 interface{}) *Instruction {
  9156      p := self.alloc("DECL", 1, Operands { v0 })
  9157      // DECL r32
  9158      if isReg32(v0) {
  9159          p.domain = DomainGeneric
  9160          p.add(0, func(m *_Encoding, v []interface{}) {
  9161              m.rexo(0, v[0], false)
  9162              m.emit(0xff)
  9163              m.emit(0xc8 | lcode(v[0]))
  9164          })
  9165      }
  9166      // DECL m32
  9167      if isM32(v0) {
  9168          p.domain = DomainGeneric
  9169          p.add(0, func(m *_Encoding, v []interface{}) {
  9170              m.rexo(0, addr(v[0]), false)
  9171              m.emit(0xff)
  9172              m.mrsd(1, addr(v[0]), 1)
  9173          })
  9174      }
  9175      if p.len == 0 {
  9176          panic("invalid operands for DECL")
  9177      }
  9178      return p
  9179  }
  9180  
  9181  // DECQ performs "Decrement by 1".
  9182  //
  9183  // Mnemonic        : DEC
  9184  // Supported forms : (2 forms)
  9185  //
  9186  //    * DECQ r64
  9187  //    * DECQ m64
  9188  //
  9189  func (self *Program) DECQ(v0 interface{}) *Instruction {
  9190      p := self.alloc("DECQ", 1, Operands { v0 })
  9191      // DECQ r64
  9192      if isReg64(v0) {
  9193          p.domain = DomainGeneric
  9194          p.add(0, func(m *_Encoding, v []interface{}) {
  9195              m.emit(0x48 | hcode(v[0]))
  9196              m.emit(0xff)
  9197              m.emit(0xc8 | lcode(v[0]))
  9198          })
  9199      }
  9200      // DECQ m64
  9201      if isM64(v0) {
  9202          p.domain = DomainGeneric
  9203          p.add(0, func(m *_Encoding, v []interface{}) {
  9204              m.rexm(1, 0, addr(v[0]))
  9205              m.emit(0xff)
  9206              m.mrsd(1, addr(v[0]), 1)
  9207          })
  9208      }
  9209      if p.len == 0 {
  9210          panic("invalid operands for DECQ")
  9211      }
  9212      return p
  9213  }
  9214  
  9215  // DECW performs "Decrement by 1".
  9216  //
  9217  // Mnemonic        : DEC
  9218  // Supported forms : (2 forms)
  9219  //
  9220  //    * DECW r16
  9221  //    * DECW m16
  9222  //
  9223  func (self *Program) DECW(v0 interface{}) *Instruction {
  9224      p := self.alloc("DECW", 1, Operands { v0 })
  9225      // DECW r16
  9226      if isReg16(v0) {
  9227          p.domain = DomainGeneric
  9228          p.add(0, func(m *_Encoding, v []interface{}) {
  9229              m.emit(0x66)
  9230              m.rexo(0, v[0], false)
  9231              m.emit(0xff)
  9232              m.emit(0xc8 | lcode(v[0]))
  9233          })
  9234      }
  9235      // DECW m16
  9236      if isM16(v0) {
  9237          p.domain = DomainGeneric
  9238          p.add(0, func(m *_Encoding, v []interface{}) {
  9239              m.emit(0x66)
  9240              m.rexo(0, addr(v[0]), false)
  9241              m.emit(0xff)
  9242              m.mrsd(1, addr(v[0]), 1)
  9243          })
  9244      }
  9245      if p.len == 0 {
  9246          panic("invalid operands for DECW")
  9247      }
  9248      return p
  9249  }
  9250  
  9251  // DIVB performs "Unsigned Divide".
  9252  //
  9253  // Mnemonic        : DIV
  9254  // Supported forms : (2 forms)
  9255  //
  9256  //    * DIVB r8
  9257  //    * DIVB m8
  9258  //
  9259  func (self *Program) DIVB(v0 interface{}) *Instruction {
  9260      p := self.alloc("DIVB", 1, Operands { v0 })
  9261      // DIVB r8
  9262      if isReg8(v0) {
  9263          p.domain = DomainGeneric
  9264          p.add(0, func(m *_Encoding, v []interface{}) {
  9265              m.rexo(0, v[0], isReg8REX(v[0]))
  9266              m.emit(0xf6)
  9267              m.emit(0xf0 | lcode(v[0]))
  9268          })
  9269      }
  9270      // DIVB m8
  9271      if isM8(v0) {
  9272          p.domain = DomainGeneric
  9273          p.add(0, func(m *_Encoding, v []interface{}) {
  9274              m.rexo(0, addr(v[0]), false)
  9275              m.emit(0xf6)
  9276              m.mrsd(6, addr(v[0]), 1)
  9277          })
  9278      }
  9279      if p.len == 0 {
  9280          panic("invalid operands for DIVB")
  9281      }
  9282      return p
  9283  }
  9284  
  9285  // DIVL performs "Unsigned Divide".
  9286  //
  9287  // Mnemonic        : DIV
  9288  // Supported forms : (2 forms)
  9289  //
  9290  //    * DIVL r32
  9291  //    * DIVL m32
  9292  //
  9293  func (self *Program) DIVL(v0 interface{}) *Instruction {
  9294      p := self.alloc("DIVL", 1, Operands { v0 })
  9295      // DIVL r32
  9296      if isReg32(v0) {
  9297          p.domain = DomainGeneric
  9298          p.add(0, func(m *_Encoding, v []interface{}) {
  9299              m.rexo(0, v[0], false)
  9300              m.emit(0xf7)
  9301              m.emit(0xf0 | lcode(v[0]))
  9302          })
  9303      }
  9304      // DIVL m32
  9305      if isM32(v0) {
  9306          p.domain = DomainGeneric
  9307          p.add(0, func(m *_Encoding, v []interface{}) {
  9308              m.rexo(0, addr(v[0]), false)
  9309              m.emit(0xf7)
  9310              m.mrsd(6, addr(v[0]), 1)
  9311          })
  9312      }
  9313      if p.len == 0 {
  9314          panic("invalid operands for DIVL")
  9315      }
  9316      return p
  9317  }
  9318  
  9319  // DIVPD performs "Divide Packed Double-Precision Floating-Point Values".
  9320  //
  9321  // Mnemonic        : DIVPD
  9322  // Supported forms : (2 forms)
  9323  //
  9324  //    * DIVPD xmm, xmm     [SSE2]
  9325  //    * DIVPD m128, xmm    [SSE2]
  9326  //
  9327  func (self *Program) DIVPD(v0 interface{}, v1 interface{}) *Instruction {
  9328      p := self.alloc("DIVPD", 2, Operands { v0, v1 })
  9329      // DIVPD xmm, xmm
  9330      if isXMM(v0) && isXMM(v1) {
  9331          self.require(ISA_SSE2)
  9332          p.domain = DomainMMXSSE
  9333          p.add(0, func(m *_Encoding, v []interface{}) {
  9334              m.emit(0x66)
  9335              m.rexo(hcode(v[1]), v[0], false)
  9336              m.emit(0x0f)
  9337              m.emit(0x5e)
  9338              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  9339          })
  9340      }
  9341      // DIVPD m128, xmm
  9342      if isM128(v0) && isXMM(v1) {
  9343          self.require(ISA_SSE2)
  9344          p.domain = DomainMMXSSE
  9345          p.add(0, func(m *_Encoding, v []interface{}) {
  9346              m.emit(0x66)
  9347              m.rexo(hcode(v[1]), addr(v[0]), false)
  9348              m.emit(0x0f)
  9349              m.emit(0x5e)
  9350              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  9351          })
  9352      }
  9353      if p.len == 0 {
  9354          panic("invalid operands for DIVPD")
  9355      }
  9356      return p
  9357  }
  9358  
  9359  // DIVPS performs "Divide Packed Single-Precision Floating-Point Values".
  9360  //
  9361  // Mnemonic        : DIVPS
  9362  // Supported forms : (2 forms)
  9363  //
  9364  //    * DIVPS xmm, xmm     [SSE]
  9365  //    * DIVPS m128, xmm    [SSE]
  9366  //
  9367  func (self *Program) DIVPS(v0 interface{}, v1 interface{}) *Instruction {
  9368      p := self.alloc("DIVPS", 2, Operands { v0, v1 })
  9369      // DIVPS xmm, xmm
  9370      if isXMM(v0) && isXMM(v1) {
  9371          self.require(ISA_SSE)
  9372          p.domain = DomainMMXSSE
  9373          p.add(0, func(m *_Encoding, v []interface{}) {
  9374              m.rexo(hcode(v[1]), v[0], false)
  9375              m.emit(0x0f)
  9376              m.emit(0x5e)
  9377              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  9378          })
  9379      }
  9380      // DIVPS m128, xmm
  9381      if isM128(v0) && isXMM(v1) {
  9382          self.require(ISA_SSE)
  9383          p.domain = DomainMMXSSE
  9384          p.add(0, func(m *_Encoding, v []interface{}) {
  9385              m.rexo(hcode(v[1]), addr(v[0]), false)
  9386              m.emit(0x0f)
  9387              m.emit(0x5e)
  9388              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  9389          })
  9390      }
  9391      if p.len == 0 {
  9392          panic("invalid operands for DIVPS")
  9393      }
  9394      return p
  9395  }
  9396  
  9397  // DIVQ performs "Unsigned Divide".
  9398  //
  9399  // Mnemonic        : DIV
  9400  // Supported forms : (2 forms)
  9401  //
  9402  //    * DIVQ r64
  9403  //    * DIVQ m64
  9404  //
  9405  func (self *Program) DIVQ(v0 interface{}) *Instruction {
  9406      p := self.alloc("DIVQ", 1, Operands { v0 })
  9407      // DIVQ r64
  9408      if isReg64(v0) {
  9409          p.domain = DomainGeneric
  9410          p.add(0, func(m *_Encoding, v []interface{}) {
  9411              m.emit(0x48 | hcode(v[0]))
  9412              m.emit(0xf7)
  9413              m.emit(0xf0 | lcode(v[0]))
  9414          })
  9415      }
  9416      // DIVQ m64
  9417      if isM64(v0) {
  9418          p.domain = DomainGeneric
  9419          p.add(0, func(m *_Encoding, v []interface{}) {
  9420              m.rexm(1, 0, addr(v[0]))
  9421              m.emit(0xf7)
  9422              m.mrsd(6, addr(v[0]), 1)
  9423          })
  9424      }
  9425      if p.len == 0 {
  9426          panic("invalid operands for DIVQ")
  9427      }
  9428      return p
  9429  }
  9430  
  9431  // DIVSD performs "Divide Scalar Double-Precision Floating-Point Values".
  9432  //
  9433  // Mnemonic        : DIVSD
  9434  // Supported forms : (2 forms)
  9435  //
  9436  //    * DIVSD xmm, xmm    [SSE2]
  9437  //    * DIVSD m64, xmm    [SSE2]
  9438  //
  9439  func (self *Program) DIVSD(v0 interface{}, v1 interface{}) *Instruction {
  9440      p := self.alloc("DIVSD", 2, Operands { v0, v1 })
  9441      // DIVSD xmm, xmm
  9442      if isXMM(v0) && isXMM(v1) {
  9443          self.require(ISA_SSE2)
  9444          p.domain = DomainMMXSSE
  9445          p.add(0, func(m *_Encoding, v []interface{}) {
  9446              m.emit(0xf2)
  9447              m.rexo(hcode(v[1]), v[0], false)
  9448              m.emit(0x0f)
  9449              m.emit(0x5e)
  9450              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  9451          })
  9452      }
  9453      // DIVSD m64, xmm
  9454      if isM64(v0) && isXMM(v1) {
  9455          self.require(ISA_SSE2)
  9456          p.domain = DomainMMXSSE
  9457          p.add(0, func(m *_Encoding, v []interface{}) {
  9458              m.emit(0xf2)
  9459              m.rexo(hcode(v[1]), addr(v[0]), false)
  9460              m.emit(0x0f)
  9461              m.emit(0x5e)
  9462              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  9463          })
  9464      }
  9465      if p.len == 0 {
  9466          panic("invalid operands for DIVSD")
  9467      }
  9468      return p
  9469  }
  9470  
  9471  // DIVSS performs "Divide Scalar Single-Precision Floating-Point Values".
  9472  //
  9473  // Mnemonic        : DIVSS
  9474  // Supported forms : (2 forms)
  9475  //
  9476  //    * DIVSS xmm, xmm    [SSE]
  9477  //    * DIVSS m32, xmm    [SSE]
  9478  //
  9479  func (self *Program) DIVSS(v0 interface{}, v1 interface{}) *Instruction {
  9480      p := self.alloc("DIVSS", 2, Operands { v0, v1 })
  9481      // DIVSS xmm, xmm
  9482      if isXMM(v0) && isXMM(v1) {
  9483          self.require(ISA_SSE)
  9484          p.domain = DomainMMXSSE
  9485          p.add(0, func(m *_Encoding, v []interface{}) {
  9486              m.emit(0xf3)
  9487              m.rexo(hcode(v[1]), v[0], false)
  9488              m.emit(0x0f)
  9489              m.emit(0x5e)
  9490              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  9491          })
  9492      }
  9493      // DIVSS m32, xmm
  9494      if isM32(v0) && isXMM(v1) {
  9495          self.require(ISA_SSE)
  9496          p.domain = DomainMMXSSE
  9497          p.add(0, func(m *_Encoding, v []interface{}) {
  9498              m.emit(0xf3)
  9499              m.rexo(hcode(v[1]), addr(v[0]), false)
  9500              m.emit(0x0f)
  9501              m.emit(0x5e)
  9502              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  9503          })
  9504      }
  9505      if p.len == 0 {
  9506          panic("invalid operands for DIVSS")
  9507      }
  9508      return p
  9509  }
  9510  
  9511  // DIVW performs "Unsigned Divide".
  9512  //
  9513  // Mnemonic        : DIV
  9514  // Supported forms : (2 forms)
  9515  //
  9516  //    * DIVW r16
  9517  //    * DIVW m16
  9518  //
  9519  func (self *Program) DIVW(v0 interface{}) *Instruction {
  9520      p := self.alloc("DIVW", 1, Operands { v0 })
  9521      // DIVW r16
  9522      if isReg16(v0) {
  9523          p.domain = DomainGeneric
  9524          p.add(0, func(m *_Encoding, v []interface{}) {
  9525              m.emit(0x66)
  9526              m.rexo(0, v[0], false)
  9527              m.emit(0xf7)
  9528              m.emit(0xf0 | lcode(v[0]))
  9529          })
  9530      }
  9531      // DIVW m16
  9532      if isM16(v0) {
  9533          p.domain = DomainGeneric
  9534          p.add(0, func(m *_Encoding, v []interface{}) {
  9535              m.emit(0x66)
  9536              m.rexo(0, addr(v[0]), false)
  9537              m.emit(0xf7)
  9538              m.mrsd(6, addr(v[0]), 1)
  9539          })
  9540      }
  9541      if p.len == 0 {
  9542          panic("invalid operands for DIVW")
  9543      }
  9544      return p
  9545  }
  9546  
  9547  // DPPD performs "Dot Product of Packed Double Precision Floating-Point Values".
  9548  //
  9549  // Mnemonic        : DPPD
  9550  // Supported forms : (2 forms)
  9551  //
  9552  //    * DPPD imm8, xmm, xmm     [SSE4.1]
  9553  //    * DPPD imm8, m128, xmm    [SSE4.1]
  9554  //
  9555  func (self *Program) DPPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  9556      p := self.alloc("DPPD", 3, Operands { v0, v1, v2 })
  9557      // DPPD imm8, xmm, xmm
  9558      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
  9559          self.require(ISA_SSE4_1)
  9560          p.domain = DomainMMXSSE
  9561          p.add(0, func(m *_Encoding, v []interface{}) {
  9562              m.emit(0x66)
  9563              m.rexo(hcode(v[2]), v[1], false)
  9564              m.emit(0x0f)
  9565              m.emit(0x3a)
  9566              m.emit(0x41)
  9567              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  9568              m.imm1(toImmAny(v[0]))
  9569          })
  9570      }
  9571      // DPPD imm8, m128, xmm
  9572      if isImm8(v0) && isM128(v1) && isXMM(v2) {
  9573          self.require(ISA_SSE4_1)
  9574          p.domain = DomainMMXSSE
  9575          p.add(0, func(m *_Encoding, v []interface{}) {
  9576              m.emit(0x66)
  9577              m.rexo(hcode(v[2]), addr(v[1]), false)
  9578              m.emit(0x0f)
  9579              m.emit(0x3a)
  9580              m.emit(0x41)
  9581              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  9582              m.imm1(toImmAny(v[0]))
  9583          })
  9584      }
  9585      if p.len == 0 {
  9586          panic("invalid operands for DPPD")
  9587      }
  9588      return p
  9589  }
  9590  
  9591  // DPPS performs "Dot Product of Packed Single Precision Floating-Point Values".
  9592  //
  9593  // Mnemonic        : DPPS
  9594  // Supported forms : (2 forms)
  9595  //
  9596  //    * DPPS imm8, xmm, xmm     [SSE4.1]
  9597  //    * DPPS imm8, m128, xmm    [SSE4.1]
  9598  //
  9599  func (self *Program) DPPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  9600      p := self.alloc("DPPS", 3, Operands { v0, v1, v2 })
  9601      // DPPS imm8, xmm, xmm
  9602      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
  9603          self.require(ISA_SSE4_1)
  9604          p.domain = DomainMMXSSE
  9605          p.add(0, func(m *_Encoding, v []interface{}) {
  9606              m.emit(0x66)
  9607              m.rexo(hcode(v[2]), v[1], false)
  9608              m.emit(0x0f)
  9609              m.emit(0x3a)
  9610              m.emit(0x40)
  9611              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
  9612              m.imm1(toImmAny(v[0]))
  9613          })
  9614      }
  9615      // DPPS imm8, m128, xmm
  9616      if isImm8(v0) && isM128(v1) && isXMM(v2) {
  9617          self.require(ISA_SSE4_1)
  9618          p.domain = DomainMMXSSE
  9619          p.add(0, func(m *_Encoding, v []interface{}) {
  9620              m.emit(0x66)
  9621              m.rexo(hcode(v[2]), addr(v[1]), false)
  9622              m.emit(0x0f)
  9623              m.emit(0x3a)
  9624              m.emit(0x40)
  9625              m.mrsd(lcode(v[2]), addr(v[1]), 1)
  9626              m.imm1(toImmAny(v[0]))
  9627          })
  9628      }
  9629      if p.len == 0 {
  9630          panic("invalid operands for DPPS")
  9631      }
  9632      return p
  9633  }
  9634  
  9635  // EMMS performs "Exit MMX State".
  9636  //
  9637  // Mnemonic        : EMMS
  9638  // Supported forms : (1 form)
  9639  //
  9640  //    * EMMS    [MMX]
  9641  //
  9642  func (self *Program) EMMS() *Instruction {
  9643      p := self.alloc("EMMS", 0, Operands {  })
  9644      // EMMS
  9645      self.require(ISA_MMX)
  9646      p.domain = DomainMMXSSE
  9647      p.add(0, func(m *_Encoding, v []interface{}) {
  9648          m.emit(0x0f)
  9649          m.emit(0x77)
  9650      })
  9651      return p
  9652  }
  9653  
  9654  // EXTRACTPS performs "Extract Packed Single Precision Floating-Point Value".
  9655  //
  9656  // Mnemonic        : EXTRACTPS
  9657  // Supported forms : (2 forms)
  9658  //
  9659  //    * EXTRACTPS imm8, xmm, r32    [SSE4.1]
  9660  //    * EXTRACTPS imm8, xmm, m32    [SSE4.1]
  9661  //
  9662  func (self *Program) EXTRACTPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
  9663      p := self.alloc("EXTRACTPS", 3, Operands { v0, v1, v2 })
  9664      // EXTRACTPS imm8, xmm, r32
  9665      if isImm8(v0) && isXMM(v1) && isReg32(v2) {
  9666          self.require(ISA_SSE4_1)
  9667          p.domain = DomainMMXSSE
  9668          p.add(0, func(m *_Encoding, v []interface{}) {
  9669              m.emit(0x66)
  9670              m.rexo(hcode(v[1]), v[2], false)
  9671              m.emit(0x0f)
  9672              m.emit(0x3a)
  9673              m.emit(0x17)
  9674              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
  9675              m.imm1(toImmAny(v[0]))
  9676          })
  9677      }
  9678      // EXTRACTPS imm8, xmm, m32
  9679      if isImm8(v0) && isXMM(v1) && isM32(v2) {
  9680          self.require(ISA_SSE4_1)
  9681          p.domain = DomainMMXSSE
  9682          p.add(0, func(m *_Encoding, v []interface{}) {
  9683              m.emit(0x66)
  9684              m.rexo(hcode(v[1]), addr(v[2]), false)
  9685              m.emit(0x0f)
  9686              m.emit(0x3a)
  9687              m.emit(0x17)
  9688              m.mrsd(lcode(v[1]), addr(v[2]), 1)
  9689              m.imm1(toImmAny(v[0]))
  9690          })
  9691      }
  9692      if p.len == 0 {
  9693          panic("invalid operands for EXTRACTPS")
  9694      }
  9695      return p
  9696  }
  9697  
  9698  // EXTRQ performs "Extract Field".
  9699  //
  9700  // Mnemonic        : EXTRQ
  9701  // Supported forms : (2 forms)
  9702  //
  9703  //    * EXTRQ xmm, xmm           [SSE4A]
  9704  //    * EXTRQ imm8, imm8, xmm    [SSE4A]
  9705  //
  9706  func (self *Program) EXTRQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
  9707      var p *Instruction
  9708      switch len(vv) {
  9709          case 0  : p = self.alloc("EXTRQ", 2, Operands { v0, v1 })
  9710          case 1  : p = self.alloc("EXTRQ", 3, Operands { v0, v1, vv[0] })
  9711          default : panic("instruction EXTRQ takes 2 or 3 operands")
  9712      }
  9713      // EXTRQ xmm, xmm
  9714      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
  9715          self.require(ISA_SSE4A)
  9716          p.domain = DomainAMDSpecific
  9717          p.add(0, func(m *_Encoding, v []interface{}) {
  9718              m.emit(0x66)
  9719              m.rexo(hcode(v[1]), v[0], false)
  9720              m.emit(0x0f)
  9721              m.emit(0x79)
  9722              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  9723          })
  9724      }
  9725      // EXTRQ imm8, imm8, xmm
  9726      if len(vv) == 1 && isImm8(v0) && isImm8(v1) && isXMM(vv[0]) {
  9727          self.require(ISA_SSE4A)
  9728          p.domain = DomainAMDSpecific
  9729          p.add(0, func(m *_Encoding, v []interface{}) {
  9730              m.emit(0x66)
  9731              m.rexo(0, v[2], false)
  9732              m.emit(0x0f)
  9733              m.emit(0x78)
  9734              m.emit(0xc0 | lcode(v[2]))
  9735              m.imm1(toImmAny(v[1]))
  9736              m.imm1(toImmAny(v[0]))
  9737          })
  9738      }
  9739      if p.len == 0 {
  9740          panic("invalid operands for EXTRQ")
  9741      }
  9742      return p
  9743  }
  9744  
  9745  // FEMMS performs "Fast Exit Multimedia State".
  9746  //
  9747  // Mnemonic        : FEMMS
  9748  // Supported forms : (1 form)
  9749  //
  9750  //    * FEMMS    [FEMMS]
  9751  //
  9752  func (self *Program) FEMMS() *Instruction {
  9753      p := self.alloc("FEMMS", 0, Operands {  })
  9754      // FEMMS
  9755      self.require(ISA_FEMMS)
  9756      p.domain = DomainAMDSpecific
  9757      p.add(0, func(m *_Encoding, v []interface{}) {
  9758          m.emit(0x0f)
  9759          m.emit(0x0e)
  9760      })
  9761      return p
  9762  }
  9763  
  9764  // HADDPD performs "Packed Double-FP Horizontal Add".
  9765  //
  9766  // Mnemonic        : HADDPD
  9767  // Supported forms : (2 forms)
  9768  //
  9769  //    * HADDPD xmm, xmm     [SSE3]
  9770  //    * HADDPD m128, xmm    [SSE3]
  9771  //
  9772  func (self *Program) HADDPD(v0 interface{}, v1 interface{}) *Instruction {
  9773      p := self.alloc("HADDPD", 2, Operands { v0, v1 })
  9774      // HADDPD xmm, xmm
  9775      if isXMM(v0) && isXMM(v1) {
  9776          self.require(ISA_SSE3)
  9777          p.domain = DomainMMXSSE
  9778          p.add(0, func(m *_Encoding, v []interface{}) {
  9779              m.emit(0x66)
  9780              m.rexo(hcode(v[1]), v[0], false)
  9781              m.emit(0x0f)
  9782              m.emit(0x7c)
  9783              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  9784          })
  9785      }
  9786      // HADDPD m128, xmm
  9787      if isM128(v0) && isXMM(v1) {
  9788          self.require(ISA_SSE3)
  9789          p.domain = DomainMMXSSE
  9790          p.add(0, func(m *_Encoding, v []interface{}) {
  9791              m.emit(0x66)
  9792              m.rexo(hcode(v[1]), addr(v[0]), false)
  9793              m.emit(0x0f)
  9794              m.emit(0x7c)
  9795              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  9796          })
  9797      }
  9798      if p.len == 0 {
  9799          panic("invalid operands for HADDPD")
  9800      }
  9801      return p
  9802  }
  9803  
  9804  // HADDPS performs "Packed Single-FP Horizontal Add".
  9805  //
  9806  // Mnemonic        : HADDPS
  9807  // Supported forms : (2 forms)
  9808  //
  9809  //    * HADDPS xmm, xmm     [SSE3]
  9810  //    * HADDPS m128, xmm    [SSE3]
  9811  //
  9812  func (self *Program) HADDPS(v0 interface{}, v1 interface{}) *Instruction {
  9813      p := self.alloc("HADDPS", 2, Operands { v0, v1 })
  9814      // HADDPS xmm, xmm
  9815      if isXMM(v0) && isXMM(v1) {
  9816          self.require(ISA_SSE3)
  9817          p.domain = DomainMMXSSE
  9818          p.add(0, func(m *_Encoding, v []interface{}) {
  9819              m.emit(0xf2)
  9820              m.rexo(hcode(v[1]), v[0], false)
  9821              m.emit(0x0f)
  9822              m.emit(0x7c)
  9823              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  9824          })
  9825      }
  9826      // HADDPS m128, xmm
  9827      if isM128(v0) && isXMM(v1) {
  9828          self.require(ISA_SSE3)
  9829          p.domain = DomainMMXSSE
  9830          p.add(0, func(m *_Encoding, v []interface{}) {
  9831              m.emit(0xf2)
  9832              m.rexo(hcode(v[1]), addr(v[0]), false)
  9833              m.emit(0x0f)
  9834              m.emit(0x7c)
  9835              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  9836          })
  9837      }
  9838      if p.len == 0 {
  9839          panic("invalid operands for HADDPS")
  9840      }
  9841      return p
  9842  }
  9843  
  9844  // HSUBPD performs "Packed Double-FP Horizontal Subtract".
  9845  //
  9846  // Mnemonic        : HSUBPD
  9847  // Supported forms : (2 forms)
  9848  //
  9849  //    * HSUBPD xmm, xmm     [SSE3]
  9850  //    * HSUBPD m128, xmm    [SSE3]
  9851  //
  9852  func (self *Program) HSUBPD(v0 interface{}, v1 interface{}) *Instruction {
  9853      p := self.alloc("HSUBPD", 2, Operands { v0, v1 })
  9854      // HSUBPD xmm, xmm
  9855      if isXMM(v0) && isXMM(v1) {
  9856          self.require(ISA_SSE3)
  9857          p.domain = DomainMMXSSE
  9858          p.add(0, func(m *_Encoding, v []interface{}) {
  9859              m.emit(0x66)
  9860              m.rexo(hcode(v[1]), v[0], false)
  9861              m.emit(0x0f)
  9862              m.emit(0x7d)
  9863              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  9864          })
  9865      }
  9866      // HSUBPD m128, xmm
  9867      if isM128(v0) && isXMM(v1) {
  9868          self.require(ISA_SSE3)
  9869          p.domain = DomainMMXSSE
  9870          p.add(0, func(m *_Encoding, v []interface{}) {
  9871              m.emit(0x66)
  9872              m.rexo(hcode(v[1]), addr(v[0]), false)
  9873              m.emit(0x0f)
  9874              m.emit(0x7d)
  9875              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  9876          })
  9877      }
  9878      if p.len == 0 {
  9879          panic("invalid operands for HSUBPD")
  9880      }
  9881      return p
  9882  }
  9883  
  9884  // HSUBPS performs "Packed Single-FP Horizontal Subtract".
  9885  //
  9886  // Mnemonic        : HSUBPS
  9887  // Supported forms : (2 forms)
  9888  //
  9889  //    * HSUBPS xmm, xmm     [SSE3]
  9890  //    * HSUBPS m128, xmm    [SSE3]
  9891  //
  9892  func (self *Program) HSUBPS(v0 interface{}, v1 interface{}) *Instruction {
  9893      p := self.alloc("HSUBPS", 2, Operands { v0, v1 })
  9894      // HSUBPS xmm, xmm
  9895      if isXMM(v0) && isXMM(v1) {
  9896          self.require(ISA_SSE3)
  9897          p.domain = DomainMMXSSE
  9898          p.add(0, func(m *_Encoding, v []interface{}) {
  9899              m.emit(0xf2)
  9900              m.rexo(hcode(v[1]), v[0], false)
  9901              m.emit(0x0f)
  9902              m.emit(0x7d)
  9903              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
  9904          })
  9905      }
  9906      // HSUBPS m128, xmm
  9907      if isM128(v0) && isXMM(v1) {
  9908          self.require(ISA_SSE3)
  9909          p.domain = DomainMMXSSE
  9910          p.add(0, func(m *_Encoding, v []interface{}) {
  9911              m.emit(0xf2)
  9912              m.rexo(hcode(v[1]), addr(v[0]), false)
  9913              m.emit(0x0f)
  9914              m.emit(0x7d)
  9915              m.mrsd(lcode(v[1]), addr(v[0]), 1)
  9916          })
  9917      }
  9918      if p.len == 0 {
  9919          panic("invalid operands for HSUBPS")
  9920      }
  9921      return p
  9922  }
  9923  
  9924  // IDIVB performs "Signed Divide".
  9925  //
  9926  // Mnemonic        : IDIV
  9927  // Supported forms : (2 forms)
  9928  //
  9929  //    * IDIVB r8
  9930  //    * IDIVB m8
  9931  //
  9932  func (self *Program) IDIVB(v0 interface{}) *Instruction {
  9933      p := self.alloc("IDIVB", 1, Operands { v0 })
  9934      // IDIVB r8
  9935      if isReg8(v0) {
  9936          p.domain = DomainGeneric
  9937          p.add(0, func(m *_Encoding, v []interface{}) {
  9938              m.rexo(0, v[0], isReg8REX(v[0]))
  9939              m.emit(0xf6)
  9940              m.emit(0xf8 | lcode(v[0]))
  9941          })
  9942      }
  9943      // IDIVB m8
  9944      if isM8(v0) {
  9945          p.domain = DomainGeneric
  9946          p.add(0, func(m *_Encoding, v []interface{}) {
  9947              m.rexo(0, addr(v[0]), false)
  9948              m.emit(0xf6)
  9949              m.mrsd(7, addr(v[0]), 1)
  9950          })
  9951      }
  9952      if p.len == 0 {
  9953          panic("invalid operands for IDIVB")
  9954      }
  9955      return p
  9956  }
  9957  
  9958  // IDIVL performs "Signed Divide".
  9959  //
  9960  // Mnemonic        : IDIV
  9961  // Supported forms : (2 forms)
  9962  //
  9963  //    * IDIVL r32
  9964  //    * IDIVL m32
  9965  //
  9966  func (self *Program) IDIVL(v0 interface{}) *Instruction {
  9967      p := self.alloc("IDIVL", 1, Operands { v0 })
  9968      // IDIVL r32
  9969      if isReg32(v0) {
  9970          p.domain = DomainGeneric
  9971          p.add(0, func(m *_Encoding, v []interface{}) {
  9972              m.rexo(0, v[0], false)
  9973              m.emit(0xf7)
  9974              m.emit(0xf8 | lcode(v[0]))
  9975          })
  9976      }
  9977      // IDIVL m32
  9978      if isM32(v0) {
  9979          p.domain = DomainGeneric
  9980          p.add(0, func(m *_Encoding, v []interface{}) {
  9981              m.rexo(0, addr(v[0]), false)
  9982              m.emit(0xf7)
  9983              m.mrsd(7, addr(v[0]), 1)
  9984          })
  9985      }
  9986      if p.len == 0 {
  9987          panic("invalid operands for IDIVL")
  9988      }
  9989      return p
  9990  }
  9991  
  9992  // IDIVQ performs "Signed Divide".
  9993  //
  9994  // Mnemonic        : IDIV
  9995  // Supported forms : (2 forms)
  9996  //
  9997  //    * IDIVQ r64
  9998  //    * IDIVQ m64
  9999  //
 10000  func (self *Program) IDIVQ(v0 interface{}) *Instruction {
 10001      p := self.alloc("IDIVQ", 1, Operands { v0 })
 10002      // IDIVQ r64
 10003      if isReg64(v0) {
 10004          p.domain = DomainGeneric
 10005          p.add(0, func(m *_Encoding, v []interface{}) {
 10006              m.emit(0x48 | hcode(v[0]))
 10007              m.emit(0xf7)
 10008              m.emit(0xf8 | lcode(v[0]))
 10009          })
 10010      }
 10011      // IDIVQ m64
 10012      if isM64(v0) {
 10013          p.domain = DomainGeneric
 10014          p.add(0, func(m *_Encoding, v []interface{}) {
 10015              m.rexm(1, 0, addr(v[0]))
 10016              m.emit(0xf7)
 10017              m.mrsd(7, addr(v[0]), 1)
 10018          })
 10019      }
 10020      if p.len == 0 {
 10021          panic("invalid operands for IDIVQ")
 10022      }
 10023      return p
 10024  }
 10025  
 10026  // IDIVW performs "Signed Divide".
 10027  //
 10028  // Mnemonic        : IDIV
 10029  // Supported forms : (2 forms)
 10030  //
 10031  //    * IDIVW r16
 10032  //    * IDIVW m16
 10033  //
 10034  func (self *Program) IDIVW(v0 interface{}) *Instruction {
 10035      p := self.alloc("IDIVW", 1, Operands { v0 })
 10036      // IDIVW r16
 10037      if isReg16(v0) {
 10038          p.domain = DomainGeneric
 10039          p.add(0, func(m *_Encoding, v []interface{}) {
 10040              m.emit(0x66)
 10041              m.rexo(0, v[0], false)
 10042              m.emit(0xf7)
 10043              m.emit(0xf8 | lcode(v[0]))
 10044          })
 10045      }
 10046      // IDIVW m16
 10047      if isM16(v0) {
 10048          p.domain = DomainGeneric
 10049          p.add(0, func(m *_Encoding, v []interface{}) {
 10050              m.emit(0x66)
 10051              m.rexo(0, addr(v[0]), false)
 10052              m.emit(0xf7)
 10053              m.mrsd(7, addr(v[0]), 1)
 10054          })
 10055      }
 10056      if p.len == 0 {
 10057          panic("invalid operands for IDIVW")
 10058      }
 10059      return p
 10060  }
 10061  
 10062  // IMULB performs "Signed Multiply".
 10063  //
 10064  // Mnemonic        : IMUL
 10065  // Supported forms : (2 forms)
 10066  //
 10067  //    * IMULB r8
 10068  //    * IMULB m8
 10069  //
 10070  func (self *Program) IMULB(v0 interface{}) *Instruction {
 10071      p := self.alloc("IMULB", 1, Operands { v0 })
 10072      // IMULB r8
 10073      if isReg8(v0) {
 10074          p.domain = DomainGeneric
 10075          p.add(0, func(m *_Encoding, v []interface{}) {
 10076              m.rexo(0, v[0], isReg8REX(v[0]))
 10077              m.emit(0xf6)
 10078              m.emit(0xe8 | lcode(v[0]))
 10079          })
 10080      }
 10081      // IMULB m8
 10082      if isM8(v0) {
 10083          p.domain = DomainGeneric
 10084          p.add(0, func(m *_Encoding, v []interface{}) {
 10085              m.rexo(0, addr(v[0]), false)
 10086              m.emit(0xf6)
 10087              m.mrsd(5, addr(v[0]), 1)
 10088          })
 10089      }
 10090      if p.len == 0 {
 10091          panic("invalid operands for IMULB")
 10092      }
 10093      return p
 10094  }
 10095  
 10096  // IMULL performs "Signed Multiply".
 10097  //
 10098  // Mnemonic        : IMUL
 10099  // Supported forms : (8 forms)
 10100  //
 10101  //    * IMULL r32
 10102  //    * IMULL m32
 10103  //    * IMULL r32, r32
 10104  //    * IMULL m32, r32
 10105  //    * IMULL imm8, r32, r32
 10106  //    * IMULL imm32, r32, r32
 10107  //    * IMULL imm8, m32, r32
 10108  //    * IMULL imm32, m32, r32
 10109  //
 10110  func (self *Program) IMULL(v0 interface{}, vv ...interface{}) *Instruction {
 10111      var p *Instruction
 10112      switch len(vv) {
 10113          case 0  : p = self.alloc("IMULL", 1, Operands { v0 })
 10114          case 1  : p = self.alloc("IMULL", 2, Operands { v0, vv[0] })
 10115          case 2  : p = self.alloc("IMULL", 3, Operands { v0, vv[0], vv[1] })
 10116          default : panic("instruction IMULL takes 1 or 2 or 3 operands")
 10117      }
 10118      // IMULL r32
 10119      if len(vv) == 0 && isReg32(v0) {
 10120          p.domain = DomainGeneric
 10121          p.add(0, func(m *_Encoding, v []interface{}) {
 10122              m.rexo(0, v[0], false)
 10123              m.emit(0xf7)
 10124              m.emit(0xe8 | lcode(v[0]))
 10125          })
 10126      }
 10127      // IMULL m32
 10128      if len(vv) == 0 && isM32(v0) {
 10129          p.domain = DomainGeneric
 10130          p.add(0, func(m *_Encoding, v []interface{}) {
 10131              m.rexo(0, addr(v[0]), false)
 10132              m.emit(0xf7)
 10133              m.mrsd(5, addr(v[0]), 1)
 10134          })
 10135      }
 10136      // IMULL r32, r32
 10137      if len(vv) == 1 && isReg32(v0) && isReg32(vv[0]) {
 10138          p.domain = DomainGeneric
 10139          p.add(0, func(m *_Encoding, v []interface{}) {
 10140              m.rexo(hcode(v[1]), v[0], false)
 10141              m.emit(0x0f)
 10142              m.emit(0xaf)
 10143              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 10144          })
 10145      }
 10146      // IMULL m32, r32
 10147      if len(vv) == 1 && isM32(v0) && isReg32(vv[0]) {
 10148          p.domain = DomainGeneric
 10149          p.add(0, func(m *_Encoding, v []interface{}) {
 10150              m.rexo(hcode(v[1]), addr(v[0]), false)
 10151              m.emit(0x0f)
 10152              m.emit(0xaf)
 10153              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 10154          })
 10155      }
 10156      // IMULL imm8, r32, r32
 10157      if len(vv) == 2 && isImm8(v0) && isReg32(vv[0]) && isReg32(vv[1]) {
 10158          p.domain = DomainGeneric
 10159          p.add(0, func(m *_Encoding, v []interface{}) {
 10160              m.rexo(hcode(v[2]), v[1], false)
 10161              m.emit(0x6b)
 10162              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 10163              m.imm1(toImmAny(v[0]))
 10164          })
 10165      }
 10166      // IMULL imm32, r32, r32
 10167      if len(vv) == 2 && isImm32(v0) && isReg32(vv[0]) && isReg32(vv[1]) {
 10168          p.domain = DomainGeneric
 10169          p.add(0, func(m *_Encoding, v []interface{}) {
 10170              m.rexo(hcode(v[2]), v[1], false)
 10171              m.emit(0x69)
 10172              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 10173              m.imm4(toImmAny(v[0]))
 10174          })
 10175      }
 10176      // IMULL imm8, m32, r32
 10177      if len(vv) == 2 && isImm8(v0) && isM32(vv[0]) && isReg32(vv[1]) {
 10178          p.domain = DomainGeneric
 10179          p.add(0, func(m *_Encoding, v []interface{}) {
 10180              m.rexo(hcode(v[2]), addr(v[1]), false)
 10181              m.emit(0x6b)
 10182              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 10183              m.imm1(toImmAny(v[0]))
 10184          })
 10185      }
 10186      // IMULL imm32, m32, r32
 10187      if len(vv) == 2 && isImm32(v0) && isM32(vv[0]) && isReg32(vv[1]) {
 10188          p.domain = DomainGeneric
 10189          p.add(0, func(m *_Encoding, v []interface{}) {
 10190              m.rexo(hcode(v[2]), addr(v[1]), false)
 10191              m.emit(0x69)
 10192              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 10193              m.imm4(toImmAny(v[0]))
 10194          })
 10195      }
 10196      if p.len == 0 {
 10197          panic("invalid operands for IMULL")
 10198      }
 10199      return p
 10200  }
 10201  
 10202  // IMULQ performs "Signed Multiply".
 10203  //
 10204  // Mnemonic        : IMUL
 10205  // Supported forms : (8 forms)
 10206  //
 10207  //    * IMULQ r64
 10208  //    * IMULQ m64
 10209  //    * IMULQ r64, r64
 10210  //    * IMULQ m64, r64
 10211  //    * IMULQ imm8, r64, r64
 10212  //    * IMULQ imm32, r64, r64
 10213  //    * IMULQ imm8, m64, r64
 10214  //    * IMULQ imm32, m64, r64
 10215  //
 10216  func (self *Program) IMULQ(v0 interface{}, vv ...interface{}) *Instruction {
 10217      var p *Instruction
 10218      switch len(vv) {
 10219          case 0  : p = self.alloc("IMULQ", 1, Operands { v0 })
 10220          case 1  : p = self.alloc("IMULQ", 2, Operands { v0, vv[0] })
 10221          case 2  : p = self.alloc("IMULQ", 3, Operands { v0, vv[0], vv[1] })
 10222          default : panic("instruction IMULQ takes 1 or 2 or 3 operands")
 10223      }
 10224      // IMULQ r64
 10225      if len(vv) == 0 && isReg64(v0) {
 10226          p.domain = DomainGeneric
 10227          p.add(0, func(m *_Encoding, v []interface{}) {
 10228              m.emit(0x48 | hcode(v[0]))
 10229              m.emit(0xf7)
 10230              m.emit(0xe8 | lcode(v[0]))
 10231          })
 10232      }
 10233      // IMULQ m64
 10234      if len(vv) == 0 && isM64(v0) {
 10235          p.domain = DomainGeneric
 10236          p.add(0, func(m *_Encoding, v []interface{}) {
 10237              m.rexm(1, 0, addr(v[0]))
 10238              m.emit(0xf7)
 10239              m.mrsd(5, addr(v[0]), 1)
 10240          })
 10241      }
 10242      // IMULQ r64, r64
 10243      if len(vv) == 1 && isReg64(v0) && isReg64(vv[0]) {
 10244          p.domain = DomainGeneric
 10245          p.add(0, func(m *_Encoding, v []interface{}) {
 10246              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 10247              m.emit(0x0f)
 10248              m.emit(0xaf)
 10249              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 10250          })
 10251      }
 10252      // IMULQ m64, r64
 10253      if len(vv) == 1 && isM64(v0) && isReg64(vv[0]) {
 10254          p.domain = DomainGeneric
 10255          p.add(0, func(m *_Encoding, v []interface{}) {
 10256              m.rexm(1, hcode(v[1]), addr(v[0]))
 10257              m.emit(0x0f)
 10258              m.emit(0xaf)
 10259              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 10260          })
 10261      }
 10262      // IMULQ imm8, r64, r64
 10263      if len(vv) == 2 && isImm8(v0) && isReg64(vv[0]) && isReg64(vv[1]) {
 10264          p.domain = DomainGeneric
 10265          p.add(0, func(m *_Encoding, v []interface{}) {
 10266              m.emit(0x48 | hcode(v[2]) << 2 | hcode(v[1]))
 10267              m.emit(0x6b)
 10268              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 10269              m.imm1(toImmAny(v[0]))
 10270          })
 10271      }
 10272      // IMULQ imm32, r64, r64
 10273      if len(vv) == 2 && isImm32(v0) && isReg64(vv[0]) && isReg64(vv[1]) {
 10274          p.domain = DomainGeneric
 10275          p.add(0, func(m *_Encoding, v []interface{}) {
 10276              m.emit(0x48 | hcode(v[2]) << 2 | hcode(v[1]))
 10277              m.emit(0x69)
 10278              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 10279              m.imm4(toImmAny(v[0]))
 10280          })
 10281      }
 10282      // IMULQ imm8, m64, r64
 10283      if len(vv) == 2 && isImm8(v0) && isM64(vv[0]) && isReg64(vv[1]) {
 10284          p.domain = DomainGeneric
 10285          p.add(0, func(m *_Encoding, v []interface{}) {
 10286              m.rexm(1, hcode(v[2]), addr(v[1]))
 10287              m.emit(0x6b)
 10288              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 10289              m.imm1(toImmAny(v[0]))
 10290          })
 10291      }
 10292      // IMULQ imm32, m64, r64
 10293      if len(vv) == 2 && isImm32(v0) && isM64(vv[0]) && isReg64(vv[1]) {
 10294          p.domain = DomainGeneric
 10295          p.add(0, func(m *_Encoding, v []interface{}) {
 10296              m.rexm(1, hcode(v[2]), addr(v[1]))
 10297              m.emit(0x69)
 10298              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 10299              m.imm4(toImmAny(v[0]))
 10300          })
 10301      }
 10302      if p.len == 0 {
 10303          panic("invalid operands for IMULQ")
 10304      }
 10305      return p
 10306  }
 10307  
 10308  // IMULW performs "Signed Multiply".
 10309  //
 10310  // Mnemonic        : IMUL
 10311  // Supported forms : (8 forms)
 10312  //
 10313  //    * IMULW r16
 10314  //    * IMULW m16
 10315  //    * IMULW r16, r16
 10316  //    * IMULW m16, r16
 10317  //    * IMULW imm8, r16, r16
 10318  //    * IMULW imm16, r16, r16
 10319  //    * IMULW imm8, m16, r16
 10320  //    * IMULW imm16, m16, r16
 10321  //
 10322  func (self *Program) IMULW(v0 interface{}, vv ...interface{}) *Instruction {
 10323      var p *Instruction
 10324      switch len(vv) {
 10325          case 0  : p = self.alloc("IMULW", 1, Operands { v0 })
 10326          case 1  : p = self.alloc("IMULW", 2, Operands { v0, vv[0] })
 10327          case 2  : p = self.alloc("IMULW", 3, Operands { v0, vv[0], vv[1] })
 10328          default : panic("instruction IMULW takes 1 or 2 or 3 operands")
 10329      }
 10330      // IMULW r16
 10331      if len(vv) == 0 && isReg16(v0) {
 10332          p.domain = DomainGeneric
 10333          p.add(0, func(m *_Encoding, v []interface{}) {
 10334              m.emit(0x66)
 10335              m.rexo(0, v[0], false)
 10336              m.emit(0xf7)
 10337              m.emit(0xe8 | lcode(v[0]))
 10338          })
 10339      }
 10340      // IMULW m16
 10341      if len(vv) == 0 && isM16(v0) {
 10342          p.domain = DomainGeneric
 10343          p.add(0, func(m *_Encoding, v []interface{}) {
 10344              m.emit(0x66)
 10345              m.rexo(0, addr(v[0]), false)
 10346              m.emit(0xf7)
 10347              m.mrsd(5, addr(v[0]), 1)
 10348          })
 10349      }
 10350      // IMULW r16, r16
 10351      if len(vv) == 1 && isReg16(v0) && isReg16(vv[0]) {
 10352          p.domain = DomainGeneric
 10353          p.add(0, func(m *_Encoding, v []interface{}) {
 10354              m.emit(0x66)
 10355              m.rexo(hcode(v[1]), v[0], false)
 10356              m.emit(0x0f)
 10357              m.emit(0xaf)
 10358              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 10359          })
 10360      }
 10361      // IMULW m16, r16
 10362      if len(vv) == 1 && isM16(v0) && isReg16(vv[0]) {
 10363          p.domain = DomainGeneric
 10364          p.add(0, func(m *_Encoding, v []interface{}) {
 10365              m.emit(0x66)
 10366              m.rexo(hcode(v[1]), addr(v[0]), false)
 10367              m.emit(0x0f)
 10368              m.emit(0xaf)
 10369              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 10370          })
 10371      }
 10372      // IMULW imm8, r16, r16
 10373      if len(vv) == 2 && isImm8(v0) && isReg16(vv[0]) && isReg16(vv[1]) {
 10374          p.domain = DomainGeneric
 10375          p.add(0, func(m *_Encoding, v []interface{}) {
 10376              m.emit(0x66)
 10377              m.rexo(hcode(v[2]), v[1], false)
 10378              m.emit(0x6b)
 10379              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 10380              m.imm1(toImmAny(v[0]))
 10381          })
 10382      }
 10383      // IMULW imm16, r16, r16
 10384      if len(vv) == 2 && isImm16(v0) && isReg16(vv[0]) && isReg16(vv[1]) {
 10385          p.domain = DomainGeneric
 10386          p.add(0, func(m *_Encoding, v []interface{}) {
 10387              m.emit(0x66)
 10388              m.rexo(hcode(v[2]), v[1], false)
 10389              m.emit(0x69)
 10390              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 10391              m.imm2(toImmAny(v[0]))
 10392          })
 10393      }
 10394      // IMULW imm8, m16, r16
 10395      if len(vv) == 2 && isImm8(v0) && isM16(vv[0]) && isReg16(vv[1]) {
 10396          p.domain = DomainGeneric
 10397          p.add(0, func(m *_Encoding, v []interface{}) {
 10398              m.emit(0x66)
 10399              m.rexo(hcode(v[2]), addr(v[1]), false)
 10400              m.emit(0x6b)
 10401              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 10402              m.imm1(toImmAny(v[0]))
 10403          })
 10404      }
 10405      // IMULW imm16, m16, r16
 10406      if len(vv) == 2 && isImm16(v0) && isM16(vv[0]) && isReg16(vv[1]) {
 10407          p.domain = DomainGeneric
 10408          p.add(0, func(m *_Encoding, v []interface{}) {
 10409              m.emit(0x66)
 10410              m.rexo(hcode(v[2]), addr(v[1]), false)
 10411              m.emit(0x69)
 10412              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 10413              m.imm2(toImmAny(v[0]))
 10414          })
 10415      }
 10416      if p.len == 0 {
 10417          panic("invalid operands for IMULW")
 10418      }
 10419      return p
 10420  }
 10421  
 10422  // INCB performs "Increment by 1".
 10423  //
 10424  // Mnemonic        : INC
 10425  // Supported forms : (2 forms)
 10426  //
 10427  //    * INCB r8
 10428  //    * INCB m8
 10429  //
 10430  func (self *Program) INCB(v0 interface{}) *Instruction {
 10431      p := self.alloc("INCB", 1, Operands { v0 })
 10432      // INCB r8
 10433      if isReg8(v0) {
 10434          p.domain = DomainGeneric
 10435          p.add(0, func(m *_Encoding, v []interface{}) {
 10436              m.rexo(0, v[0], isReg8REX(v[0]))
 10437              m.emit(0xfe)
 10438              m.emit(0xc0 | lcode(v[0]))
 10439          })
 10440      }
 10441      // INCB m8
 10442      if isM8(v0) {
 10443          p.domain = DomainGeneric
 10444          p.add(0, func(m *_Encoding, v []interface{}) {
 10445              m.rexo(0, addr(v[0]), false)
 10446              m.emit(0xfe)
 10447              m.mrsd(0, addr(v[0]), 1)
 10448          })
 10449      }
 10450      if p.len == 0 {
 10451          panic("invalid operands for INCB")
 10452      }
 10453      return p
 10454  }
 10455  
 10456  // INCL performs "Increment by 1".
 10457  //
 10458  // Mnemonic        : INC
 10459  // Supported forms : (2 forms)
 10460  //
 10461  //    * INCL r32
 10462  //    * INCL m32
 10463  //
 10464  func (self *Program) INCL(v0 interface{}) *Instruction {
 10465      p := self.alloc("INCL", 1, Operands { v0 })
 10466      // INCL r32
 10467      if isReg32(v0) {
 10468          p.domain = DomainGeneric
 10469          p.add(0, func(m *_Encoding, v []interface{}) {
 10470              m.rexo(0, v[0], false)
 10471              m.emit(0xff)
 10472              m.emit(0xc0 | lcode(v[0]))
 10473          })
 10474      }
 10475      // INCL m32
 10476      if isM32(v0) {
 10477          p.domain = DomainGeneric
 10478          p.add(0, func(m *_Encoding, v []interface{}) {
 10479              m.rexo(0, addr(v[0]), false)
 10480              m.emit(0xff)
 10481              m.mrsd(0, addr(v[0]), 1)
 10482          })
 10483      }
 10484      if p.len == 0 {
 10485          panic("invalid operands for INCL")
 10486      }
 10487      return p
 10488  }
 10489  
 10490  // INCQ performs "Increment by 1".
 10491  //
 10492  // Mnemonic        : INC
 10493  // Supported forms : (2 forms)
 10494  //
 10495  //    * INCQ r64
 10496  //    * INCQ m64
 10497  //
 10498  func (self *Program) INCQ(v0 interface{}) *Instruction {
 10499      p := self.alloc("INCQ", 1, Operands { v0 })
 10500      // INCQ r64
 10501      if isReg64(v0) {
 10502          p.domain = DomainGeneric
 10503          p.add(0, func(m *_Encoding, v []interface{}) {
 10504              m.emit(0x48 | hcode(v[0]))
 10505              m.emit(0xff)
 10506              m.emit(0xc0 | lcode(v[0]))
 10507          })
 10508      }
 10509      // INCQ m64
 10510      if isM64(v0) {
 10511          p.domain = DomainGeneric
 10512          p.add(0, func(m *_Encoding, v []interface{}) {
 10513              m.rexm(1, 0, addr(v[0]))
 10514              m.emit(0xff)
 10515              m.mrsd(0, addr(v[0]), 1)
 10516          })
 10517      }
 10518      if p.len == 0 {
 10519          panic("invalid operands for INCQ")
 10520      }
 10521      return p
 10522  }
 10523  
 10524  // INCW performs "Increment by 1".
 10525  //
 10526  // Mnemonic        : INC
 10527  // Supported forms : (2 forms)
 10528  //
 10529  //    * INCW r16
 10530  //    * INCW m16
 10531  //
 10532  func (self *Program) INCW(v0 interface{}) *Instruction {
 10533      p := self.alloc("INCW", 1, Operands { v0 })
 10534      // INCW r16
 10535      if isReg16(v0) {
 10536          p.domain = DomainGeneric
 10537          p.add(0, func(m *_Encoding, v []interface{}) {
 10538              m.emit(0x66)
 10539              m.rexo(0, v[0], false)
 10540              m.emit(0xff)
 10541              m.emit(0xc0 | lcode(v[0]))
 10542          })
 10543      }
 10544      // INCW m16
 10545      if isM16(v0) {
 10546          p.domain = DomainGeneric
 10547          p.add(0, func(m *_Encoding, v []interface{}) {
 10548              m.emit(0x66)
 10549              m.rexo(0, addr(v[0]), false)
 10550              m.emit(0xff)
 10551              m.mrsd(0, addr(v[0]), 1)
 10552          })
 10553      }
 10554      if p.len == 0 {
 10555          panic("invalid operands for INCW")
 10556      }
 10557      return p
 10558  }
 10559  
 10560  // INSERTPS performs "Insert Packed Single Precision Floating-Point Value".
 10561  //
 10562  // Mnemonic        : INSERTPS
 10563  // Supported forms : (2 forms)
 10564  //
 10565  //    * INSERTPS imm8, xmm, xmm    [SSE4.1]
 10566  //    * INSERTPS imm8, m32, xmm    [SSE4.1]
 10567  //
 10568  func (self *Program) INSERTPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 10569      p := self.alloc("INSERTPS", 3, Operands { v0, v1, v2 })
 10570      // INSERTPS imm8, xmm, xmm
 10571      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 10572          self.require(ISA_SSE4_1)
 10573          p.domain = DomainMMXSSE
 10574          p.add(0, func(m *_Encoding, v []interface{}) {
 10575              m.emit(0x66)
 10576              m.rexo(hcode(v[2]), v[1], false)
 10577              m.emit(0x0f)
 10578              m.emit(0x3a)
 10579              m.emit(0x21)
 10580              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 10581              m.imm1(toImmAny(v[0]))
 10582          })
 10583      }
 10584      // INSERTPS imm8, m32, xmm
 10585      if isImm8(v0) && isM32(v1) && isXMM(v2) {
 10586          self.require(ISA_SSE4_1)
 10587          p.domain = DomainMMXSSE
 10588          p.add(0, func(m *_Encoding, v []interface{}) {
 10589              m.emit(0x66)
 10590              m.rexo(hcode(v[2]), addr(v[1]), false)
 10591              m.emit(0x0f)
 10592              m.emit(0x3a)
 10593              m.emit(0x21)
 10594              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 10595              m.imm1(toImmAny(v[0]))
 10596          })
 10597      }
 10598      if p.len == 0 {
 10599          panic("invalid operands for INSERTPS")
 10600      }
 10601      return p
 10602  }
 10603  
 10604  // INSERTQ performs "Insert Field".
 10605  //
 10606  // Mnemonic        : INSERTQ
 10607  // Supported forms : (2 forms)
 10608  //
 10609  //    * INSERTQ xmm, xmm                [SSE4A]
 10610  //    * INSERTQ imm8, imm8, xmm, xmm    [SSE4A]
 10611  //
 10612  func (self *Program) INSERTQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 10613      var p *Instruction
 10614      switch len(vv) {
 10615          case 0  : p = self.alloc("INSERTQ", 2, Operands { v0, v1 })
 10616          case 2  : p = self.alloc("INSERTQ", 4, Operands { v0, v1, vv[0], vv[1] })
 10617          default : panic("instruction INSERTQ takes 2 or 4 operands")
 10618      }
 10619      // INSERTQ xmm, xmm
 10620      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 10621          self.require(ISA_SSE4A)
 10622          p.domain = DomainAMDSpecific
 10623          p.add(0, func(m *_Encoding, v []interface{}) {
 10624              m.emit(0xf2)
 10625              m.rexo(hcode(v[1]), v[0], false)
 10626              m.emit(0x0f)
 10627              m.emit(0x79)
 10628              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 10629          })
 10630      }
 10631      // INSERTQ imm8, imm8, xmm, xmm
 10632      if len(vv) == 2 && isImm8(v0) && isImm8(v1) && isXMM(vv[0]) && isXMM(vv[1]) {
 10633          self.require(ISA_SSE4A)
 10634          p.domain = DomainAMDSpecific
 10635          p.add(0, func(m *_Encoding, v []interface{}) {
 10636              m.emit(0xf2)
 10637              m.rexo(hcode(v[3]), v[2], false)
 10638              m.emit(0x0f)
 10639              m.emit(0x78)
 10640              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[2]))
 10641              m.imm1(toImmAny(v[1]))
 10642              m.imm1(toImmAny(v[0]))
 10643          })
 10644      }
 10645      if p.len == 0 {
 10646          panic("invalid operands for INSERTQ")
 10647      }
 10648      return p
 10649  }
 10650  
 10651  // INT performs "Call to Interrupt Procedure".
 10652  //
 10653  // Mnemonic        : INT
 10654  // Supported forms : (2 forms)
 10655  //
 10656  //    * INT 3
 10657  //    * INT imm8
 10658  //
 10659  func (self *Program) INT(v0 interface{}) *Instruction {
 10660      p := self.alloc("INT", 1, Operands { v0 })
 10661      // INT 3
 10662      if isConst3(v0) {
 10663          p.domain = DomainGeneric
 10664          p.add(0, func(m *_Encoding, v []interface{}) {
 10665              m.emit(0xcc)
 10666          })
 10667      }
 10668      // INT imm8
 10669      if isImm8(v0) {
 10670          p.domain = DomainGeneric
 10671          p.add(0, func(m *_Encoding, v []interface{}) {
 10672              m.emit(0xcd)
 10673              m.imm1(toImmAny(v[0]))
 10674          })
 10675      }
 10676      if p.len == 0 {
 10677          panic("invalid operands for INT")
 10678      }
 10679      return p
 10680  }
 10681  
 10682  // JA performs "Jump if above (CF == 0 and ZF == 0)".
 10683  //
 10684  // Mnemonic        : JA
 10685  // Supported forms : (2 forms)
 10686  //
 10687  //    * JA rel8
 10688  //    * JA rel32
 10689  //
 10690  func (self *Program) JA(v0 interface{}) *Instruction {
 10691      p := self.alloc("JA", 1, Operands { v0 })
 10692      p.branch = _B_conditional
 10693      // JA rel8
 10694      if isRel8(v0) {
 10695          p.domain = DomainGeneric
 10696          p.add(0, func(m *_Encoding, v []interface{}) {
 10697              m.emit(0x77)
 10698              m.imm1(relv(v[0]))
 10699          })
 10700      }
 10701      // JA rel32
 10702      if isRel32(v0) {
 10703          p.domain = DomainGeneric
 10704          p.add(0, func(m *_Encoding, v []interface{}) {
 10705              m.emit(0x0f)
 10706              m.emit(0x87)
 10707              m.imm4(relv(v[0]))
 10708          })
 10709      }
 10710      // JA label
 10711      if isLabel(v0) {
 10712          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 10713              m.emit(0x77)
 10714              m.imm1(relv(v[0]))
 10715          })
 10716          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 10717              m.emit(0x0f)
 10718              m.emit(0x87)
 10719              m.imm4(relv(v[0]))
 10720          })
 10721      }
 10722      if p.len == 0 {
 10723          panic("invalid operands for JA")
 10724      }
 10725      return p
 10726  }
 10727  
 10728  // JAE performs "Jump if above or equal (CF == 0)".
 10729  //
 10730  // Mnemonic        : JAE
 10731  // Supported forms : (2 forms)
 10732  //
 10733  //    * JAE rel8
 10734  //    * JAE rel32
 10735  //
 10736  func (self *Program) JAE(v0 interface{}) *Instruction {
 10737      p := self.alloc("JAE", 1, Operands { v0 })
 10738      p.branch = _B_conditional
 10739      // JAE rel8
 10740      if isRel8(v0) {
 10741          p.domain = DomainGeneric
 10742          p.add(0, func(m *_Encoding, v []interface{}) {
 10743              m.emit(0x73)
 10744              m.imm1(relv(v[0]))
 10745          })
 10746      }
 10747      // JAE rel32
 10748      if isRel32(v0) {
 10749          p.domain = DomainGeneric
 10750          p.add(0, func(m *_Encoding, v []interface{}) {
 10751              m.emit(0x0f)
 10752              m.emit(0x83)
 10753              m.imm4(relv(v[0]))
 10754          })
 10755      }
 10756      // JAE label
 10757      if isLabel(v0) {
 10758          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 10759              m.emit(0x73)
 10760              m.imm1(relv(v[0]))
 10761          })
 10762          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 10763              m.emit(0x0f)
 10764              m.emit(0x83)
 10765              m.imm4(relv(v[0]))
 10766          })
 10767      }
 10768      if p.len == 0 {
 10769          panic("invalid operands for JAE")
 10770      }
 10771      return p
 10772  }
 10773  
 10774  // JB performs "Jump if below (CF == 1)".
 10775  //
 10776  // Mnemonic        : JB
 10777  // Supported forms : (2 forms)
 10778  //
 10779  //    * JB rel8
 10780  //    * JB rel32
 10781  //
 10782  func (self *Program) JB(v0 interface{}) *Instruction {
 10783      p := self.alloc("JB", 1, Operands { v0 })
 10784      p.branch = _B_conditional
 10785      // JB rel8
 10786      if isRel8(v0) {
 10787          p.domain = DomainGeneric
 10788          p.add(0, func(m *_Encoding, v []interface{}) {
 10789              m.emit(0x72)
 10790              m.imm1(relv(v[0]))
 10791          })
 10792      }
 10793      // JB rel32
 10794      if isRel32(v0) {
 10795          p.domain = DomainGeneric
 10796          p.add(0, func(m *_Encoding, v []interface{}) {
 10797              m.emit(0x0f)
 10798              m.emit(0x82)
 10799              m.imm4(relv(v[0]))
 10800          })
 10801      }
 10802      // JB label
 10803      if isLabel(v0) {
 10804          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 10805              m.emit(0x72)
 10806              m.imm1(relv(v[0]))
 10807          })
 10808          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 10809              m.emit(0x0f)
 10810              m.emit(0x82)
 10811              m.imm4(relv(v[0]))
 10812          })
 10813      }
 10814      if p.len == 0 {
 10815          panic("invalid operands for JB")
 10816      }
 10817      return p
 10818  }
 10819  
 10820  // JBE performs "Jump if below or equal (CF == 1 or ZF == 1)".
 10821  //
 10822  // Mnemonic        : JBE
 10823  // Supported forms : (2 forms)
 10824  //
 10825  //    * JBE rel8
 10826  //    * JBE rel32
 10827  //
 10828  func (self *Program) JBE(v0 interface{}) *Instruction {
 10829      p := self.alloc("JBE", 1, Operands { v0 })
 10830      p.branch = _B_conditional
 10831      // JBE rel8
 10832      if isRel8(v0) {
 10833          p.domain = DomainGeneric
 10834          p.add(0, func(m *_Encoding, v []interface{}) {
 10835              m.emit(0x76)
 10836              m.imm1(relv(v[0]))
 10837          })
 10838      }
 10839      // JBE rel32
 10840      if isRel32(v0) {
 10841          p.domain = DomainGeneric
 10842          p.add(0, func(m *_Encoding, v []interface{}) {
 10843              m.emit(0x0f)
 10844              m.emit(0x86)
 10845              m.imm4(relv(v[0]))
 10846          })
 10847      }
 10848      // JBE label
 10849      if isLabel(v0) {
 10850          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 10851              m.emit(0x76)
 10852              m.imm1(relv(v[0]))
 10853          })
 10854          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 10855              m.emit(0x0f)
 10856              m.emit(0x86)
 10857              m.imm4(relv(v[0]))
 10858          })
 10859      }
 10860      if p.len == 0 {
 10861          panic("invalid operands for JBE")
 10862      }
 10863      return p
 10864  }
 10865  
 10866  // JC performs "Jump if carry (CF == 1)".
 10867  //
 10868  // Mnemonic        : JC
 10869  // Supported forms : (2 forms)
 10870  //
 10871  //    * JC rel8
 10872  //    * JC rel32
 10873  //
 10874  func (self *Program) JC(v0 interface{}) *Instruction {
 10875      p := self.alloc("JC", 1, Operands { v0 })
 10876      p.branch = _B_conditional
 10877      // JC rel8
 10878      if isRel8(v0) {
 10879          p.domain = DomainGeneric
 10880          p.add(0, func(m *_Encoding, v []interface{}) {
 10881              m.emit(0x72)
 10882              m.imm1(relv(v[0]))
 10883          })
 10884      }
 10885      // JC rel32
 10886      if isRel32(v0) {
 10887          p.domain = DomainGeneric
 10888          p.add(0, func(m *_Encoding, v []interface{}) {
 10889              m.emit(0x0f)
 10890              m.emit(0x82)
 10891              m.imm4(relv(v[0]))
 10892          })
 10893      }
 10894      // JC label
 10895      if isLabel(v0) {
 10896          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 10897              m.emit(0x72)
 10898              m.imm1(relv(v[0]))
 10899          })
 10900          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 10901              m.emit(0x0f)
 10902              m.emit(0x82)
 10903              m.imm4(relv(v[0]))
 10904          })
 10905      }
 10906      if p.len == 0 {
 10907          panic("invalid operands for JC")
 10908      }
 10909      return p
 10910  }
 10911  
 10912  // JE performs "Jump if equal (ZF == 1)".
 10913  //
 10914  // Mnemonic        : JE
 10915  // Supported forms : (2 forms)
 10916  //
 10917  //    * JE rel8
 10918  //    * JE rel32
 10919  //
 10920  func (self *Program) JE(v0 interface{}) *Instruction {
 10921      p := self.alloc("JE", 1, Operands { v0 })
 10922      p.branch = _B_conditional
 10923      // JE rel8
 10924      if isRel8(v0) {
 10925          p.domain = DomainGeneric
 10926          p.add(0, func(m *_Encoding, v []interface{}) {
 10927              m.emit(0x74)
 10928              m.imm1(relv(v[0]))
 10929          })
 10930      }
 10931      // JE rel32
 10932      if isRel32(v0) {
 10933          p.domain = DomainGeneric
 10934          p.add(0, func(m *_Encoding, v []interface{}) {
 10935              m.emit(0x0f)
 10936              m.emit(0x84)
 10937              m.imm4(relv(v[0]))
 10938          })
 10939      }
 10940      // JE label
 10941      if isLabel(v0) {
 10942          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 10943              m.emit(0x74)
 10944              m.imm1(relv(v[0]))
 10945          })
 10946          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 10947              m.emit(0x0f)
 10948              m.emit(0x84)
 10949              m.imm4(relv(v[0]))
 10950          })
 10951      }
 10952      if p.len == 0 {
 10953          panic("invalid operands for JE")
 10954      }
 10955      return p
 10956  }
 10957  
 10958  // JECXZ performs "Jump if ECX register is 0".
 10959  //
 10960  // Mnemonic        : JECXZ
 10961  // Supported forms : (1 form)
 10962  //
 10963  //    * JECXZ rel8
 10964  //
 10965  func (self *Program) JECXZ(v0 interface{}) *Instruction {
 10966      p := self.alloc("JECXZ", 1, Operands { v0 })
 10967      p.branch = _B_conditional
 10968      // JECXZ rel8
 10969      if isRel8(v0) {
 10970          p.domain = DomainGeneric
 10971          p.add(0, func(m *_Encoding, v []interface{}) {
 10972              m.emit(0xe3)
 10973              m.imm1(relv(v[0]))
 10974          })
 10975      }
 10976      // JECXZ label
 10977      if isLabel(v0) {
 10978          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 10979              m.emit(0xe3)
 10980              m.imm1(relv(v[0]))
 10981          })
 10982      }
 10983      if p.len == 0 {
 10984          panic("invalid operands for JECXZ")
 10985      }
 10986      return p
 10987  }
 10988  
 10989  // JG performs "Jump if greater (ZF == 0 and SF == OF)".
 10990  //
 10991  // Mnemonic        : JG
 10992  // Supported forms : (2 forms)
 10993  //
 10994  //    * JG rel8
 10995  //    * JG rel32
 10996  //
 10997  func (self *Program) JG(v0 interface{}) *Instruction {
 10998      p := self.alloc("JG", 1, Operands { v0 })
 10999      p.branch = _B_conditional
 11000      // JG rel8
 11001      if isRel8(v0) {
 11002          p.domain = DomainGeneric
 11003          p.add(0, func(m *_Encoding, v []interface{}) {
 11004              m.emit(0x7f)
 11005              m.imm1(relv(v[0]))
 11006          })
 11007      }
 11008      // JG rel32
 11009      if isRel32(v0) {
 11010          p.domain = DomainGeneric
 11011          p.add(0, func(m *_Encoding, v []interface{}) {
 11012              m.emit(0x0f)
 11013              m.emit(0x8f)
 11014              m.imm4(relv(v[0]))
 11015          })
 11016      }
 11017      // JG label
 11018      if isLabel(v0) {
 11019          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11020              m.emit(0x7f)
 11021              m.imm1(relv(v[0]))
 11022          })
 11023          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11024              m.emit(0x0f)
 11025              m.emit(0x8f)
 11026              m.imm4(relv(v[0]))
 11027          })
 11028      }
 11029      if p.len == 0 {
 11030          panic("invalid operands for JG")
 11031      }
 11032      return p
 11033  }
 11034  
 11035  // JGE performs "Jump if greater or equal (SF == OF)".
 11036  //
 11037  // Mnemonic        : JGE
 11038  // Supported forms : (2 forms)
 11039  //
 11040  //    * JGE rel8
 11041  //    * JGE rel32
 11042  //
 11043  func (self *Program) JGE(v0 interface{}) *Instruction {
 11044      p := self.alloc("JGE", 1, Operands { v0 })
 11045      p.branch = _B_conditional
 11046      // JGE rel8
 11047      if isRel8(v0) {
 11048          p.domain = DomainGeneric
 11049          p.add(0, func(m *_Encoding, v []interface{}) {
 11050              m.emit(0x7d)
 11051              m.imm1(relv(v[0]))
 11052          })
 11053      }
 11054      // JGE rel32
 11055      if isRel32(v0) {
 11056          p.domain = DomainGeneric
 11057          p.add(0, func(m *_Encoding, v []interface{}) {
 11058              m.emit(0x0f)
 11059              m.emit(0x8d)
 11060              m.imm4(relv(v[0]))
 11061          })
 11062      }
 11063      // JGE label
 11064      if isLabel(v0) {
 11065          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11066              m.emit(0x7d)
 11067              m.imm1(relv(v[0]))
 11068          })
 11069          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11070              m.emit(0x0f)
 11071              m.emit(0x8d)
 11072              m.imm4(relv(v[0]))
 11073          })
 11074      }
 11075      if p.len == 0 {
 11076          panic("invalid operands for JGE")
 11077      }
 11078      return p
 11079  }
 11080  
 11081  // JL performs "Jump if less (SF != OF)".
 11082  //
 11083  // Mnemonic        : JL
 11084  // Supported forms : (2 forms)
 11085  //
 11086  //    * JL rel8
 11087  //    * JL rel32
 11088  //
 11089  func (self *Program) JL(v0 interface{}) *Instruction {
 11090      p := self.alloc("JL", 1, Operands { v0 })
 11091      p.branch = _B_conditional
 11092      // JL rel8
 11093      if isRel8(v0) {
 11094          p.domain = DomainGeneric
 11095          p.add(0, func(m *_Encoding, v []interface{}) {
 11096              m.emit(0x7c)
 11097              m.imm1(relv(v[0]))
 11098          })
 11099      }
 11100      // JL rel32
 11101      if isRel32(v0) {
 11102          p.domain = DomainGeneric
 11103          p.add(0, func(m *_Encoding, v []interface{}) {
 11104              m.emit(0x0f)
 11105              m.emit(0x8c)
 11106              m.imm4(relv(v[0]))
 11107          })
 11108      }
 11109      // JL label
 11110      if isLabel(v0) {
 11111          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11112              m.emit(0x7c)
 11113              m.imm1(relv(v[0]))
 11114          })
 11115          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11116              m.emit(0x0f)
 11117              m.emit(0x8c)
 11118              m.imm4(relv(v[0]))
 11119          })
 11120      }
 11121      if p.len == 0 {
 11122          panic("invalid operands for JL")
 11123      }
 11124      return p
 11125  }
 11126  
 11127  // JLE performs "Jump if less or equal (ZF == 1 or SF != OF)".
 11128  //
 11129  // Mnemonic        : JLE
 11130  // Supported forms : (2 forms)
 11131  //
 11132  //    * JLE rel8
 11133  //    * JLE rel32
 11134  //
 11135  func (self *Program) JLE(v0 interface{}) *Instruction {
 11136      p := self.alloc("JLE", 1, Operands { v0 })
 11137      p.branch = _B_conditional
 11138      // JLE rel8
 11139      if isRel8(v0) {
 11140          p.domain = DomainGeneric
 11141          p.add(0, func(m *_Encoding, v []interface{}) {
 11142              m.emit(0x7e)
 11143              m.imm1(relv(v[0]))
 11144          })
 11145      }
 11146      // JLE rel32
 11147      if isRel32(v0) {
 11148          p.domain = DomainGeneric
 11149          p.add(0, func(m *_Encoding, v []interface{}) {
 11150              m.emit(0x0f)
 11151              m.emit(0x8e)
 11152              m.imm4(relv(v[0]))
 11153          })
 11154      }
 11155      // JLE label
 11156      if isLabel(v0) {
 11157          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11158              m.emit(0x7e)
 11159              m.imm1(relv(v[0]))
 11160          })
 11161          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11162              m.emit(0x0f)
 11163              m.emit(0x8e)
 11164              m.imm4(relv(v[0]))
 11165          })
 11166      }
 11167      if p.len == 0 {
 11168          panic("invalid operands for JLE")
 11169      }
 11170      return p
 11171  }
 11172  
 11173  // JMP performs "Jump Unconditionally".
 11174  //
 11175  // Mnemonic        : JMP
 11176  // Supported forms : (2 forms)
 11177  //
 11178  //    * JMP rel8
 11179  //    * JMP rel32
 11180  //
 11181  func (self *Program) JMP(v0 interface{}) *Instruction {
 11182      p := self.alloc("JMP", 1, Operands { v0 })
 11183      p.branch = _B_unconditional
 11184      // JMP rel8
 11185      if isRel8(v0) {
 11186          p.domain = DomainGeneric
 11187          p.add(0, func(m *_Encoding, v []interface{}) {
 11188              m.emit(0xeb)
 11189              m.imm1(relv(v[0]))
 11190          })
 11191      }
 11192      // JMP rel32
 11193      if isRel32(v0) {
 11194          p.domain = DomainGeneric
 11195          p.add(0, func(m *_Encoding, v []interface{}) {
 11196              m.emit(0xe9)
 11197              m.imm4(relv(v[0]))
 11198          })
 11199      }
 11200      // JMP label
 11201      if isLabel(v0) {
 11202          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11203              m.emit(0xeb)
 11204              m.imm1(relv(v[0]))
 11205          })
 11206          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11207              m.emit(0xe9)
 11208              m.imm4(relv(v[0]))
 11209          })
 11210      }
 11211      if p.len == 0 {
 11212          panic("invalid operands for JMP")
 11213      }
 11214      return p
 11215  }
 11216  
 11217  // JMPQ performs "Jump Unconditionally".
 11218  //
 11219  // Mnemonic        : JMP
 11220  // Supported forms : (2 forms)
 11221  //
 11222  //    * JMPQ r64
 11223  //    * JMPQ m64
 11224  //
 11225  func (self *Program) JMPQ(v0 interface{}) *Instruction {
 11226      p := self.alloc("JMPQ", 1, Operands { v0 })
 11227      // JMPQ r64
 11228      if isReg64(v0) {
 11229          p.domain = DomainGeneric
 11230          p.add(0, func(m *_Encoding, v []interface{}) {
 11231              m.rexo(0, v[0], false)
 11232              m.emit(0xff)
 11233              m.emit(0xe0 | lcode(v[0]))
 11234          })
 11235      }
 11236      // JMPQ m64
 11237      if isM64(v0) {
 11238          p.domain = DomainGeneric
 11239          p.add(0, func(m *_Encoding, v []interface{}) {
 11240              m.rexo(0, addr(v[0]), false)
 11241              m.emit(0xff)
 11242              m.mrsd(4, addr(v[0]), 1)
 11243          })
 11244      }
 11245      if p.len == 0 {
 11246          panic("invalid operands for JMPQ")
 11247      }
 11248      return p
 11249  }
 11250  
 11251  // JNA performs "Jump if not above (CF == 1 or ZF == 1)".
 11252  //
 11253  // Mnemonic        : JNA
 11254  // Supported forms : (2 forms)
 11255  //
 11256  //    * JNA rel8
 11257  //    * JNA rel32
 11258  //
 11259  func (self *Program) JNA(v0 interface{}) *Instruction {
 11260      p := self.alloc("JNA", 1, Operands { v0 })
 11261      p.branch = _B_conditional
 11262      // JNA rel8
 11263      if isRel8(v0) {
 11264          p.domain = DomainGeneric
 11265          p.add(0, func(m *_Encoding, v []interface{}) {
 11266              m.emit(0x76)
 11267              m.imm1(relv(v[0]))
 11268          })
 11269      }
 11270      // JNA rel32
 11271      if isRel32(v0) {
 11272          p.domain = DomainGeneric
 11273          p.add(0, func(m *_Encoding, v []interface{}) {
 11274              m.emit(0x0f)
 11275              m.emit(0x86)
 11276              m.imm4(relv(v[0]))
 11277          })
 11278      }
 11279      // JNA label
 11280      if isLabel(v0) {
 11281          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11282              m.emit(0x76)
 11283              m.imm1(relv(v[0]))
 11284          })
 11285          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11286              m.emit(0x0f)
 11287              m.emit(0x86)
 11288              m.imm4(relv(v[0]))
 11289          })
 11290      }
 11291      if p.len == 0 {
 11292          panic("invalid operands for JNA")
 11293      }
 11294      return p
 11295  }
 11296  
 11297  // JNAE performs "Jump if not above or equal (CF == 1)".
 11298  //
 11299  // Mnemonic        : JNAE
 11300  // Supported forms : (2 forms)
 11301  //
 11302  //    * JNAE rel8
 11303  //    * JNAE rel32
 11304  //
 11305  func (self *Program) JNAE(v0 interface{}) *Instruction {
 11306      p := self.alloc("JNAE", 1, Operands { v0 })
 11307      p.branch = _B_conditional
 11308      // JNAE rel8
 11309      if isRel8(v0) {
 11310          p.domain = DomainGeneric
 11311          p.add(0, func(m *_Encoding, v []interface{}) {
 11312              m.emit(0x72)
 11313              m.imm1(relv(v[0]))
 11314          })
 11315      }
 11316      // JNAE rel32
 11317      if isRel32(v0) {
 11318          p.domain = DomainGeneric
 11319          p.add(0, func(m *_Encoding, v []interface{}) {
 11320              m.emit(0x0f)
 11321              m.emit(0x82)
 11322              m.imm4(relv(v[0]))
 11323          })
 11324      }
 11325      // JNAE label
 11326      if isLabel(v0) {
 11327          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11328              m.emit(0x72)
 11329              m.imm1(relv(v[0]))
 11330          })
 11331          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11332              m.emit(0x0f)
 11333              m.emit(0x82)
 11334              m.imm4(relv(v[0]))
 11335          })
 11336      }
 11337      if p.len == 0 {
 11338          panic("invalid operands for JNAE")
 11339      }
 11340      return p
 11341  }
 11342  
 11343  // JNB performs "Jump if not below (CF == 0)".
 11344  //
 11345  // Mnemonic        : JNB
 11346  // Supported forms : (2 forms)
 11347  //
 11348  //    * JNB rel8
 11349  //    * JNB rel32
 11350  //
 11351  func (self *Program) JNB(v0 interface{}) *Instruction {
 11352      p := self.alloc("JNB", 1, Operands { v0 })
 11353      p.branch = _B_conditional
 11354      // JNB rel8
 11355      if isRel8(v0) {
 11356          p.domain = DomainGeneric
 11357          p.add(0, func(m *_Encoding, v []interface{}) {
 11358              m.emit(0x73)
 11359              m.imm1(relv(v[0]))
 11360          })
 11361      }
 11362      // JNB rel32
 11363      if isRel32(v0) {
 11364          p.domain = DomainGeneric
 11365          p.add(0, func(m *_Encoding, v []interface{}) {
 11366              m.emit(0x0f)
 11367              m.emit(0x83)
 11368              m.imm4(relv(v[0]))
 11369          })
 11370      }
 11371      // JNB label
 11372      if isLabel(v0) {
 11373          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11374              m.emit(0x73)
 11375              m.imm1(relv(v[0]))
 11376          })
 11377          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11378              m.emit(0x0f)
 11379              m.emit(0x83)
 11380              m.imm4(relv(v[0]))
 11381          })
 11382      }
 11383      if p.len == 0 {
 11384          panic("invalid operands for JNB")
 11385      }
 11386      return p
 11387  }
 11388  
 11389  // JNBE performs "Jump if not below or equal (CF == 0 and ZF == 0)".
 11390  //
 11391  // Mnemonic        : JNBE
 11392  // Supported forms : (2 forms)
 11393  //
 11394  //    * JNBE rel8
 11395  //    * JNBE rel32
 11396  //
 11397  func (self *Program) JNBE(v0 interface{}) *Instruction {
 11398      p := self.alloc("JNBE", 1, Operands { v0 })
 11399      p.branch = _B_conditional
 11400      // JNBE rel8
 11401      if isRel8(v0) {
 11402          p.domain = DomainGeneric
 11403          p.add(0, func(m *_Encoding, v []interface{}) {
 11404              m.emit(0x77)
 11405              m.imm1(relv(v[0]))
 11406          })
 11407      }
 11408      // JNBE rel32
 11409      if isRel32(v0) {
 11410          p.domain = DomainGeneric
 11411          p.add(0, func(m *_Encoding, v []interface{}) {
 11412              m.emit(0x0f)
 11413              m.emit(0x87)
 11414              m.imm4(relv(v[0]))
 11415          })
 11416      }
 11417      // JNBE label
 11418      if isLabel(v0) {
 11419          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11420              m.emit(0x77)
 11421              m.imm1(relv(v[0]))
 11422          })
 11423          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11424              m.emit(0x0f)
 11425              m.emit(0x87)
 11426              m.imm4(relv(v[0]))
 11427          })
 11428      }
 11429      if p.len == 0 {
 11430          panic("invalid operands for JNBE")
 11431      }
 11432      return p
 11433  }
 11434  
 11435  // JNC performs "Jump if not carry (CF == 0)".
 11436  //
 11437  // Mnemonic        : JNC
 11438  // Supported forms : (2 forms)
 11439  //
 11440  //    * JNC rel8
 11441  //    * JNC rel32
 11442  //
 11443  func (self *Program) JNC(v0 interface{}) *Instruction {
 11444      p := self.alloc("JNC", 1, Operands { v0 })
 11445      p.branch = _B_conditional
 11446      // JNC rel8
 11447      if isRel8(v0) {
 11448          p.domain = DomainGeneric
 11449          p.add(0, func(m *_Encoding, v []interface{}) {
 11450              m.emit(0x73)
 11451              m.imm1(relv(v[0]))
 11452          })
 11453      }
 11454      // JNC rel32
 11455      if isRel32(v0) {
 11456          p.domain = DomainGeneric
 11457          p.add(0, func(m *_Encoding, v []interface{}) {
 11458              m.emit(0x0f)
 11459              m.emit(0x83)
 11460              m.imm4(relv(v[0]))
 11461          })
 11462      }
 11463      // JNC label
 11464      if isLabel(v0) {
 11465          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11466              m.emit(0x73)
 11467              m.imm1(relv(v[0]))
 11468          })
 11469          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11470              m.emit(0x0f)
 11471              m.emit(0x83)
 11472              m.imm4(relv(v[0]))
 11473          })
 11474      }
 11475      if p.len == 0 {
 11476          panic("invalid operands for JNC")
 11477      }
 11478      return p
 11479  }
 11480  
 11481  // JNE performs "Jump if not equal (ZF == 0)".
 11482  //
 11483  // Mnemonic        : JNE
 11484  // Supported forms : (2 forms)
 11485  //
 11486  //    * JNE rel8
 11487  //    * JNE rel32
 11488  //
 11489  func (self *Program) JNE(v0 interface{}) *Instruction {
 11490      p := self.alloc("JNE", 1, Operands { v0 })
 11491      p.branch = _B_conditional
 11492      // JNE rel8
 11493      if isRel8(v0) {
 11494          p.domain = DomainGeneric
 11495          p.add(0, func(m *_Encoding, v []interface{}) {
 11496              m.emit(0x75)
 11497              m.imm1(relv(v[0]))
 11498          })
 11499      }
 11500      // JNE rel32
 11501      if isRel32(v0) {
 11502          p.domain = DomainGeneric
 11503          p.add(0, func(m *_Encoding, v []interface{}) {
 11504              m.emit(0x0f)
 11505              m.emit(0x85)
 11506              m.imm4(relv(v[0]))
 11507          })
 11508      }
 11509      // JNE label
 11510      if isLabel(v0) {
 11511          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11512              m.emit(0x75)
 11513              m.imm1(relv(v[0]))
 11514          })
 11515          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11516              m.emit(0x0f)
 11517              m.emit(0x85)
 11518              m.imm4(relv(v[0]))
 11519          })
 11520      }
 11521      if p.len == 0 {
 11522          panic("invalid operands for JNE")
 11523      }
 11524      return p
 11525  }
 11526  
 11527  // JNG performs "Jump if not greater (ZF == 1 or SF != OF)".
 11528  //
 11529  // Mnemonic        : JNG
 11530  // Supported forms : (2 forms)
 11531  //
 11532  //    * JNG rel8
 11533  //    * JNG rel32
 11534  //
 11535  func (self *Program) JNG(v0 interface{}) *Instruction {
 11536      p := self.alloc("JNG", 1, Operands { v0 })
 11537      p.branch = _B_conditional
 11538      // JNG rel8
 11539      if isRel8(v0) {
 11540          p.domain = DomainGeneric
 11541          p.add(0, func(m *_Encoding, v []interface{}) {
 11542              m.emit(0x7e)
 11543              m.imm1(relv(v[0]))
 11544          })
 11545      }
 11546      // JNG rel32
 11547      if isRel32(v0) {
 11548          p.domain = DomainGeneric
 11549          p.add(0, func(m *_Encoding, v []interface{}) {
 11550              m.emit(0x0f)
 11551              m.emit(0x8e)
 11552              m.imm4(relv(v[0]))
 11553          })
 11554      }
 11555      // JNG label
 11556      if isLabel(v0) {
 11557          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11558              m.emit(0x7e)
 11559              m.imm1(relv(v[0]))
 11560          })
 11561          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11562              m.emit(0x0f)
 11563              m.emit(0x8e)
 11564              m.imm4(relv(v[0]))
 11565          })
 11566      }
 11567      if p.len == 0 {
 11568          panic("invalid operands for JNG")
 11569      }
 11570      return p
 11571  }
 11572  
 11573  // JNGE performs "Jump if not greater or equal (SF != OF)".
 11574  //
 11575  // Mnemonic        : JNGE
 11576  // Supported forms : (2 forms)
 11577  //
 11578  //    * JNGE rel8
 11579  //    * JNGE rel32
 11580  //
 11581  func (self *Program) JNGE(v0 interface{}) *Instruction {
 11582      p := self.alloc("JNGE", 1, Operands { v0 })
 11583      p.branch = _B_conditional
 11584      // JNGE rel8
 11585      if isRel8(v0) {
 11586          p.domain = DomainGeneric
 11587          p.add(0, func(m *_Encoding, v []interface{}) {
 11588              m.emit(0x7c)
 11589              m.imm1(relv(v[0]))
 11590          })
 11591      }
 11592      // JNGE rel32
 11593      if isRel32(v0) {
 11594          p.domain = DomainGeneric
 11595          p.add(0, func(m *_Encoding, v []interface{}) {
 11596              m.emit(0x0f)
 11597              m.emit(0x8c)
 11598              m.imm4(relv(v[0]))
 11599          })
 11600      }
 11601      // JNGE label
 11602      if isLabel(v0) {
 11603          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11604              m.emit(0x7c)
 11605              m.imm1(relv(v[0]))
 11606          })
 11607          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11608              m.emit(0x0f)
 11609              m.emit(0x8c)
 11610              m.imm4(relv(v[0]))
 11611          })
 11612      }
 11613      if p.len == 0 {
 11614          panic("invalid operands for JNGE")
 11615      }
 11616      return p
 11617  }
 11618  
 11619  // JNL performs "Jump if not less (SF == OF)".
 11620  //
 11621  // Mnemonic        : JNL
 11622  // Supported forms : (2 forms)
 11623  //
 11624  //    * JNL rel8
 11625  //    * JNL rel32
 11626  //
 11627  func (self *Program) JNL(v0 interface{}) *Instruction {
 11628      p := self.alloc("JNL", 1, Operands { v0 })
 11629      p.branch = _B_conditional
 11630      // JNL rel8
 11631      if isRel8(v0) {
 11632          p.domain = DomainGeneric
 11633          p.add(0, func(m *_Encoding, v []interface{}) {
 11634              m.emit(0x7d)
 11635              m.imm1(relv(v[0]))
 11636          })
 11637      }
 11638      // JNL rel32
 11639      if isRel32(v0) {
 11640          p.domain = DomainGeneric
 11641          p.add(0, func(m *_Encoding, v []interface{}) {
 11642              m.emit(0x0f)
 11643              m.emit(0x8d)
 11644              m.imm4(relv(v[0]))
 11645          })
 11646      }
 11647      // JNL label
 11648      if isLabel(v0) {
 11649          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11650              m.emit(0x7d)
 11651              m.imm1(relv(v[0]))
 11652          })
 11653          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11654              m.emit(0x0f)
 11655              m.emit(0x8d)
 11656              m.imm4(relv(v[0]))
 11657          })
 11658      }
 11659      if p.len == 0 {
 11660          panic("invalid operands for JNL")
 11661      }
 11662      return p
 11663  }
 11664  
 11665  // JNLE performs "Jump if not less or equal (ZF == 0 and SF == OF)".
 11666  //
 11667  // Mnemonic        : JNLE
 11668  // Supported forms : (2 forms)
 11669  //
 11670  //    * JNLE rel8
 11671  //    * JNLE rel32
 11672  //
 11673  func (self *Program) JNLE(v0 interface{}) *Instruction {
 11674      p := self.alloc("JNLE", 1, Operands { v0 })
 11675      p.branch = _B_conditional
 11676      // JNLE rel8
 11677      if isRel8(v0) {
 11678          p.domain = DomainGeneric
 11679          p.add(0, func(m *_Encoding, v []interface{}) {
 11680              m.emit(0x7f)
 11681              m.imm1(relv(v[0]))
 11682          })
 11683      }
 11684      // JNLE rel32
 11685      if isRel32(v0) {
 11686          p.domain = DomainGeneric
 11687          p.add(0, func(m *_Encoding, v []interface{}) {
 11688              m.emit(0x0f)
 11689              m.emit(0x8f)
 11690              m.imm4(relv(v[0]))
 11691          })
 11692      }
 11693      // JNLE label
 11694      if isLabel(v0) {
 11695          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11696              m.emit(0x7f)
 11697              m.imm1(relv(v[0]))
 11698          })
 11699          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11700              m.emit(0x0f)
 11701              m.emit(0x8f)
 11702              m.imm4(relv(v[0]))
 11703          })
 11704      }
 11705      if p.len == 0 {
 11706          panic("invalid operands for JNLE")
 11707      }
 11708      return p
 11709  }
 11710  
 11711  // JNO performs "Jump if not overflow (OF == 0)".
 11712  //
 11713  // Mnemonic        : JNO
 11714  // Supported forms : (2 forms)
 11715  //
 11716  //    * JNO rel8
 11717  //    * JNO rel32
 11718  //
 11719  func (self *Program) JNO(v0 interface{}) *Instruction {
 11720      p := self.alloc("JNO", 1, Operands { v0 })
 11721      p.branch = _B_conditional
 11722      // JNO rel8
 11723      if isRel8(v0) {
 11724          p.domain = DomainGeneric
 11725          p.add(0, func(m *_Encoding, v []interface{}) {
 11726              m.emit(0x71)
 11727              m.imm1(relv(v[0]))
 11728          })
 11729      }
 11730      // JNO rel32
 11731      if isRel32(v0) {
 11732          p.domain = DomainGeneric
 11733          p.add(0, func(m *_Encoding, v []interface{}) {
 11734              m.emit(0x0f)
 11735              m.emit(0x81)
 11736              m.imm4(relv(v[0]))
 11737          })
 11738      }
 11739      // JNO label
 11740      if isLabel(v0) {
 11741          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11742              m.emit(0x71)
 11743              m.imm1(relv(v[0]))
 11744          })
 11745          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11746              m.emit(0x0f)
 11747              m.emit(0x81)
 11748              m.imm4(relv(v[0]))
 11749          })
 11750      }
 11751      if p.len == 0 {
 11752          panic("invalid operands for JNO")
 11753      }
 11754      return p
 11755  }
 11756  
 11757  // JNP performs "Jump if not parity (PF == 0)".
 11758  //
 11759  // Mnemonic        : JNP
 11760  // Supported forms : (2 forms)
 11761  //
 11762  //    * JNP rel8
 11763  //    * JNP rel32
 11764  //
 11765  func (self *Program) JNP(v0 interface{}) *Instruction {
 11766      p := self.alloc("JNP", 1, Operands { v0 })
 11767      p.branch = _B_conditional
 11768      // JNP rel8
 11769      if isRel8(v0) {
 11770          p.domain = DomainGeneric
 11771          p.add(0, func(m *_Encoding, v []interface{}) {
 11772              m.emit(0x7b)
 11773              m.imm1(relv(v[0]))
 11774          })
 11775      }
 11776      // JNP rel32
 11777      if isRel32(v0) {
 11778          p.domain = DomainGeneric
 11779          p.add(0, func(m *_Encoding, v []interface{}) {
 11780              m.emit(0x0f)
 11781              m.emit(0x8b)
 11782              m.imm4(relv(v[0]))
 11783          })
 11784      }
 11785      // JNP label
 11786      if isLabel(v0) {
 11787          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11788              m.emit(0x7b)
 11789              m.imm1(relv(v[0]))
 11790          })
 11791          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11792              m.emit(0x0f)
 11793              m.emit(0x8b)
 11794              m.imm4(relv(v[0]))
 11795          })
 11796      }
 11797      if p.len == 0 {
 11798          panic("invalid operands for JNP")
 11799      }
 11800      return p
 11801  }
 11802  
 11803  // JNS performs "Jump if not sign (SF == 0)".
 11804  //
 11805  // Mnemonic        : JNS
 11806  // Supported forms : (2 forms)
 11807  //
 11808  //    * JNS rel8
 11809  //    * JNS rel32
 11810  //
 11811  func (self *Program) JNS(v0 interface{}) *Instruction {
 11812      p := self.alloc("JNS", 1, Operands { v0 })
 11813      p.branch = _B_conditional
 11814      // JNS rel8
 11815      if isRel8(v0) {
 11816          p.domain = DomainGeneric
 11817          p.add(0, func(m *_Encoding, v []interface{}) {
 11818              m.emit(0x79)
 11819              m.imm1(relv(v[0]))
 11820          })
 11821      }
 11822      // JNS rel32
 11823      if isRel32(v0) {
 11824          p.domain = DomainGeneric
 11825          p.add(0, func(m *_Encoding, v []interface{}) {
 11826              m.emit(0x0f)
 11827              m.emit(0x89)
 11828              m.imm4(relv(v[0]))
 11829          })
 11830      }
 11831      // JNS label
 11832      if isLabel(v0) {
 11833          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11834              m.emit(0x79)
 11835              m.imm1(relv(v[0]))
 11836          })
 11837          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11838              m.emit(0x0f)
 11839              m.emit(0x89)
 11840              m.imm4(relv(v[0]))
 11841          })
 11842      }
 11843      if p.len == 0 {
 11844          panic("invalid operands for JNS")
 11845      }
 11846      return p
 11847  }
 11848  
 11849  // JNZ performs "Jump if not zero (ZF == 0)".
 11850  //
 11851  // Mnemonic        : JNZ
 11852  // Supported forms : (2 forms)
 11853  //
 11854  //    * JNZ rel8
 11855  //    * JNZ rel32
 11856  //
 11857  func (self *Program) JNZ(v0 interface{}) *Instruction {
 11858      p := self.alloc("JNZ", 1, Operands { v0 })
 11859      p.branch = _B_conditional
 11860      // JNZ rel8
 11861      if isRel8(v0) {
 11862          p.domain = DomainGeneric
 11863          p.add(0, func(m *_Encoding, v []interface{}) {
 11864              m.emit(0x75)
 11865              m.imm1(relv(v[0]))
 11866          })
 11867      }
 11868      // JNZ rel32
 11869      if isRel32(v0) {
 11870          p.domain = DomainGeneric
 11871          p.add(0, func(m *_Encoding, v []interface{}) {
 11872              m.emit(0x0f)
 11873              m.emit(0x85)
 11874              m.imm4(relv(v[0]))
 11875          })
 11876      }
 11877      // JNZ label
 11878      if isLabel(v0) {
 11879          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11880              m.emit(0x75)
 11881              m.imm1(relv(v[0]))
 11882          })
 11883          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11884              m.emit(0x0f)
 11885              m.emit(0x85)
 11886              m.imm4(relv(v[0]))
 11887          })
 11888      }
 11889      if p.len == 0 {
 11890          panic("invalid operands for JNZ")
 11891      }
 11892      return p
 11893  }
 11894  
 11895  // JO performs "Jump if overflow (OF == 1)".
 11896  //
 11897  // Mnemonic        : JO
 11898  // Supported forms : (2 forms)
 11899  //
 11900  //    * JO rel8
 11901  //    * JO rel32
 11902  //
 11903  func (self *Program) JO(v0 interface{}) *Instruction {
 11904      p := self.alloc("JO", 1, Operands { v0 })
 11905      p.branch = _B_conditional
 11906      // JO rel8
 11907      if isRel8(v0) {
 11908          p.domain = DomainGeneric
 11909          p.add(0, func(m *_Encoding, v []interface{}) {
 11910              m.emit(0x70)
 11911              m.imm1(relv(v[0]))
 11912          })
 11913      }
 11914      // JO rel32
 11915      if isRel32(v0) {
 11916          p.domain = DomainGeneric
 11917          p.add(0, func(m *_Encoding, v []interface{}) {
 11918              m.emit(0x0f)
 11919              m.emit(0x80)
 11920              m.imm4(relv(v[0]))
 11921          })
 11922      }
 11923      // JO label
 11924      if isLabel(v0) {
 11925          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11926              m.emit(0x70)
 11927              m.imm1(relv(v[0]))
 11928          })
 11929          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11930              m.emit(0x0f)
 11931              m.emit(0x80)
 11932              m.imm4(relv(v[0]))
 11933          })
 11934      }
 11935      if p.len == 0 {
 11936          panic("invalid operands for JO")
 11937      }
 11938      return p
 11939  }
 11940  
 11941  // JP performs "Jump if parity (PF == 1)".
 11942  //
 11943  // Mnemonic        : JP
 11944  // Supported forms : (2 forms)
 11945  //
 11946  //    * JP rel8
 11947  //    * JP rel32
 11948  //
 11949  func (self *Program) JP(v0 interface{}) *Instruction {
 11950      p := self.alloc("JP", 1, Operands { v0 })
 11951      p.branch = _B_conditional
 11952      // JP rel8
 11953      if isRel8(v0) {
 11954          p.domain = DomainGeneric
 11955          p.add(0, func(m *_Encoding, v []interface{}) {
 11956              m.emit(0x7a)
 11957              m.imm1(relv(v[0]))
 11958          })
 11959      }
 11960      // JP rel32
 11961      if isRel32(v0) {
 11962          p.domain = DomainGeneric
 11963          p.add(0, func(m *_Encoding, v []interface{}) {
 11964              m.emit(0x0f)
 11965              m.emit(0x8a)
 11966              m.imm4(relv(v[0]))
 11967          })
 11968      }
 11969      // JP label
 11970      if isLabel(v0) {
 11971          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 11972              m.emit(0x7a)
 11973              m.imm1(relv(v[0]))
 11974          })
 11975          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 11976              m.emit(0x0f)
 11977              m.emit(0x8a)
 11978              m.imm4(relv(v[0]))
 11979          })
 11980      }
 11981      if p.len == 0 {
 11982          panic("invalid operands for JP")
 11983      }
 11984      return p
 11985  }
 11986  
 11987  // JPE performs "Jump if parity even (PF == 1)".
 11988  //
 11989  // Mnemonic        : JPE
 11990  // Supported forms : (2 forms)
 11991  //
 11992  //    * JPE rel8
 11993  //    * JPE rel32
 11994  //
 11995  func (self *Program) JPE(v0 interface{}) *Instruction {
 11996      p := self.alloc("JPE", 1, Operands { v0 })
 11997      p.branch = _B_conditional
 11998      // JPE rel8
 11999      if isRel8(v0) {
 12000          p.domain = DomainGeneric
 12001          p.add(0, func(m *_Encoding, v []interface{}) {
 12002              m.emit(0x7a)
 12003              m.imm1(relv(v[0]))
 12004          })
 12005      }
 12006      // JPE rel32
 12007      if isRel32(v0) {
 12008          p.domain = DomainGeneric
 12009          p.add(0, func(m *_Encoding, v []interface{}) {
 12010              m.emit(0x0f)
 12011              m.emit(0x8a)
 12012              m.imm4(relv(v[0]))
 12013          })
 12014      }
 12015      // JPE label
 12016      if isLabel(v0) {
 12017          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 12018              m.emit(0x7a)
 12019              m.imm1(relv(v[0]))
 12020          })
 12021          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 12022              m.emit(0x0f)
 12023              m.emit(0x8a)
 12024              m.imm4(relv(v[0]))
 12025          })
 12026      }
 12027      if p.len == 0 {
 12028          panic("invalid operands for JPE")
 12029      }
 12030      return p
 12031  }
 12032  
 12033  // JPO performs "Jump if parity odd (PF == 0)".
 12034  //
 12035  // Mnemonic        : JPO
 12036  // Supported forms : (2 forms)
 12037  //
 12038  //    * JPO rel8
 12039  //    * JPO rel32
 12040  //
 12041  func (self *Program) JPO(v0 interface{}) *Instruction {
 12042      p := self.alloc("JPO", 1, Operands { v0 })
 12043      p.branch = _B_conditional
 12044      // JPO rel8
 12045      if isRel8(v0) {
 12046          p.domain = DomainGeneric
 12047          p.add(0, func(m *_Encoding, v []interface{}) {
 12048              m.emit(0x7b)
 12049              m.imm1(relv(v[0]))
 12050          })
 12051      }
 12052      // JPO rel32
 12053      if isRel32(v0) {
 12054          p.domain = DomainGeneric
 12055          p.add(0, func(m *_Encoding, v []interface{}) {
 12056              m.emit(0x0f)
 12057              m.emit(0x8b)
 12058              m.imm4(relv(v[0]))
 12059          })
 12060      }
 12061      // JPO label
 12062      if isLabel(v0) {
 12063          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 12064              m.emit(0x7b)
 12065              m.imm1(relv(v[0]))
 12066          })
 12067          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 12068              m.emit(0x0f)
 12069              m.emit(0x8b)
 12070              m.imm4(relv(v[0]))
 12071          })
 12072      }
 12073      if p.len == 0 {
 12074          panic("invalid operands for JPO")
 12075      }
 12076      return p
 12077  }
 12078  
 12079  // JRCXZ performs "Jump if RCX register is 0".
 12080  //
 12081  // Mnemonic        : JRCXZ
 12082  // Supported forms : (1 form)
 12083  //
 12084  //    * JRCXZ rel8
 12085  //
 12086  func (self *Program) JRCXZ(v0 interface{}) *Instruction {
 12087      p := self.alloc("JRCXZ", 1, Operands { v0 })
 12088      p.branch = _B_conditional
 12089      // JRCXZ rel8
 12090      if isRel8(v0) {
 12091          p.domain = DomainGeneric
 12092          p.add(0, func(m *_Encoding, v []interface{}) {
 12093              m.emit(0xe3)
 12094              m.imm1(relv(v[0]))
 12095          })
 12096      }
 12097      // JRCXZ label
 12098      if isLabel(v0) {
 12099          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 12100              m.emit(0xe3)
 12101              m.imm1(relv(v[0]))
 12102          })
 12103      }
 12104      if p.len == 0 {
 12105          panic("invalid operands for JRCXZ")
 12106      }
 12107      return p
 12108  }
 12109  
 12110  // JS performs "Jump if sign (SF == 1)".
 12111  //
 12112  // Mnemonic        : JS
 12113  // Supported forms : (2 forms)
 12114  //
 12115  //    * JS rel8
 12116  //    * JS rel32
 12117  //
 12118  func (self *Program) JS(v0 interface{}) *Instruction {
 12119      p := self.alloc("JS", 1, Operands { v0 })
 12120      p.branch = _B_conditional
 12121      // JS rel8
 12122      if isRel8(v0) {
 12123          p.domain = DomainGeneric
 12124          p.add(0, func(m *_Encoding, v []interface{}) {
 12125              m.emit(0x78)
 12126              m.imm1(relv(v[0]))
 12127          })
 12128      }
 12129      // JS rel32
 12130      if isRel32(v0) {
 12131          p.domain = DomainGeneric
 12132          p.add(0, func(m *_Encoding, v []interface{}) {
 12133              m.emit(0x0f)
 12134              m.emit(0x88)
 12135              m.imm4(relv(v[0]))
 12136          })
 12137      }
 12138      // JS label
 12139      if isLabel(v0) {
 12140          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 12141              m.emit(0x78)
 12142              m.imm1(relv(v[0]))
 12143          })
 12144          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 12145              m.emit(0x0f)
 12146              m.emit(0x88)
 12147              m.imm4(relv(v[0]))
 12148          })
 12149      }
 12150      if p.len == 0 {
 12151          panic("invalid operands for JS")
 12152      }
 12153      return p
 12154  }
 12155  
 12156  // JZ performs "Jump if zero (ZF == 1)".
 12157  //
 12158  // Mnemonic        : JZ
 12159  // Supported forms : (2 forms)
 12160  //
 12161  //    * JZ rel8
 12162  //    * JZ rel32
 12163  //
 12164  func (self *Program) JZ(v0 interface{}) *Instruction {
 12165      p := self.alloc("JZ", 1, Operands { v0 })
 12166      p.branch = _B_conditional
 12167      // JZ rel8
 12168      if isRel8(v0) {
 12169          p.domain = DomainGeneric
 12170          p.add(0, func(m *_Encoding, v []interface{}) {
 12171              m.emit(0x74)
 12172              m.imm1(relv(v[0]))
 12173          })
 12174      }
 12175      // JZ rel32
 12176      if isRel32(v0) {
 12177          p.domain = DomainGeneric
 12178          p.add(0, func(m *_Encoding, v []interface{}) {
 12179              m.emit(0x0f)
 12180              m.emit(0x84)
 12181              m.imm4(relv(v[0]))
 12182          })
 12183      }
 12184      // JZ label
 12185      if isLabel(v0) {
 12186          p.add(_F_rel1, func(m *_Encoding, v []interface{}) {
 12187              m.emit(0x74)
 12188              m.imm1(relv(v[0]))
 12189          })
 12190          p.add(_F_rel4, func(m *_Encoding, v []interface{}) {
 12191              m.emit(0x0f)
 12192              m.emit(0x84)
 12193              m.imm4(relv(v[0]))
 12194          })
 12195      }
 12196      if p.len == 0 {
 12197          panic("invalid operands for JZ")
 12198      }
 12199      return p
 12200  }
 12201  
 12202  // KADDB performs "ADD Two 8-bit Masks".
 12203  //
 12204  // Mnemonic        : KADDB
 12205  // Supported forms : (1 form)
 12206  //
 12207  //    * KADDB k, k, k    [AVX512DQ]
 12208  //
 12209  func (self *Program) KADDB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12210      p := self.alloc("KADDB", 3, Operands { v0, v1, v2 })
 12211      // KADDB k, k, k
 12212      if isK(v0) && isK(v1) && isK(v2) {
 12213          self.require(ISA_AVX512DQ)
 12214          p.domain = DomainMask
 12215          p.add(0, func(m *_Encoding, v []interface{}) {
 12216              m.vex2(5, 0, nil, hlcode(v[1]))
 12217              m.emit(0x4a)
 12218              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12219          })
 12220      }
 12221      if p.len == 0 {
 12222          panic("invalid operands for KADDB")
 12223      }
 12224      return p
 12225  }
 12226  
 12227  // KADDD performs "ADD Two 32-bit Masks".
 12228  //
 12229  // Mnemonic        : KADDD
 12230  // Supported forms : (1 form)
 12231  //
 12232  //    * KADDD k, k, k    [AVX512BW]
 12233  //
 12234  func (self *Program) KADDD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12235      p := self.alloc("KADDD", 3, Operands { v0, v1, v2 })
 12236      // KADDD k, k, k
 12237      if isK(v0) && isK(v1) && isK(v2) {
 12238          self.require(ISA_AVX512BW)
 12239          p.domain = DomainMask
 12240          p.add(0, func(m *_Encoding, v []interface{}) {
 12241              m.emit(0xc4)
 12242              m.emit(0xe1)
 12243              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 12244              m.emit(0x4a)
 12245              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12246          })
 12247      }
 12248      if p.len == 0 {
 12249          panic("invalid operands for KADDD")
 12250      }
 12251      return p
 12252  }
 12253  
 12254  // KADDQ performs "ADD Two 64-bit Masks".
 12255  //
 12256  // Mnemonic        : KADDQ
 12257  // Supported forms : (1 form)
 12258  //
 12259  //    * KADDQ k, k, k    [AVX512BW]
 12260  //
 12261  func (self *Program) KADDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12262      p := self.alloc("KADDQ", 3, Operands { v0, v1, v2 })
 12263      // KADDQ k, k, k
 12264      if isK(v0) && isK(v1) && isK(v2) {
 12265          self.require(ISA_AVX512BW)
 12266          p.domain = DomainMask
 12267          p.add(0, func(m *_Encoding, v []interface{}) {
 12268              m.emit(0xc4)
 12269              m.emit(0xe1)
 12270              m.emit(0xfc ^ (hlcode(v[1]) << 3))
 12271              m.emit(0x4a)
 12272              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12273          })
 12274      }
 12275      if p.len == 0 {
 12276          panic("invalid operands for KADDQ")
 12277      }
 12278      return p
 12279  }
 12280  
 12281  // KADDW performs "ADD Two 16-bit Masks".
 12282  //
 12283  // Mnemonic        : KADDW
 12284  // Supported forms : (1 form)
 12285  //
 12286  //    * KADDW k, k, k    [AVX512DQ]
 12287  //
 12288  func (self *Program) KADDW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12289      p := self.alloc("KADDW", 3, Operands { v0, v1, v2 })
 12290      // KADDW k, k, k
 12291      if isK(v0) && isK(v1) && isK(v2) {
 12292          self.require(ISA_AVX512DQ)
 12293          p.domain = DomainMask
 12294          p.add(0, func(m *_Encoding, v []interface{}) {
 12295              m.vex2(4, 0, nil, hlcode(v[1]))
 12296              m.emit(0x4a)
 12297              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12298          })
 12299      }
 12300      if p.len == 0 {
 12301          panic("invalid operands for KADDW")
 12302      }
 12303      return p
 12304  }
 12305  
 12306  // KANDB performs "Bitwise Logical AND 8-bit Masks".
 12307  //
 12308  // Mnemonic        : KANDB
 12309  // Supported forms : (1 form)
 12310  //
 12311  //    * KANDB k, k, k    [AVX512DQ]
 12312  //
 12313  func (self *Program) KANDB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12314      p := self.alloc("KANDB", 3, Operands { v0, v1, v2 })
 12315      // KANDB k, k, k
 12316      if isK(v0) && isK(v1) && isK(v2) {
 12317          self.require(ISA_AVX512DQ)
 12318          p.domain = DomainMask
 12319          p.add(0, func(m *_Encoding, v []interface{}) {
 12320              m.vex2(5, 0, nil, hlcode(v[1]))
 12321              m.emit(0x41)
 12322              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12323          })
 12324      }
 12325      if p.len == 0 {
 12326          panic("invalid operands for KANDB")
 12327      }
 12328      return p
 12329  }
 12330  
 12331  // KANDD performs "Bitwise Logical AND 32-bit Masks".
 12332  //
 12333  // Mnemonic        : KANDD
 12334  // Supported forms : (1 form)
 12335  //
 12336  //    * KANDD k, k, k    [AVX512BW]
 12337  //
 12338  func (self *Program) KANDD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12339      p := self.alloc("KANDD", 3, Operands { v0, v1, v2 })
 12340      // KANDD k, k, k
 12341      if isK(v0) && isK(v1) && isK(v2) {
 12342          self.require(ISA_AVX512BW)
 12343          p.domain = DomainMask
 12344          p.add(0, func(m *_Encoding, v []interface{}) {
 12345              m.emit(0xc4)
 12346              m.emit(0xe1)
 12347              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 12348              m.emit(0x41)
 12349              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12350          })
 12351      }
 12352      if p.len == 0 {
 12353          panic("invalid operands for KANDD")
 12354      }
 12355      return p
 12356  }
 12357  
 12358  // KANDNB performs "Bitwise Logical AND NOT 8-bit Masks".
 12359  //
 12360  // Mnemonic        : KANDNB
 12361  // Supported forms : (1 form)
 12362  //
 12363  //    * KANDNB k, k, k    [AVX512DQ]
 12364  //
 12365  func (self *Program) KANDNB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12366      p := self.alloc("KANDNB", 3, Operands { v0, v1, v2 })
 12367      // KANDNB k, k, k
 12368      if isK(v0) && isK(v1) && isK(v2) {
 12369          self.require(ISA_AVX512DQ)
 12370          p.domain = DomainMask
 12371          p.add(0, func(m *_Encoding, v []interface{}) {
 12372              m.vex2(5, 0, nil, hlcode(v[1]))
 12373              m.emit(0x42)
 12374              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12375          })
 12376      }
 12377      if p.len == 0 {
 12378          panic("invalid operands for KANDNB")
 12379      }
 12380      return p
 12381  }
 12382  
 12383  // KANDND performs "Bitwise Logical AND NOT 32-bit Masks".
 12384  //
 12385  // Mnemonic        : KANDND
 12386  // Supported forms : (1 form)
 12387  //
 12388  //    * KANDND k, k, k    [AVX512BW]
 12389  //
 12390  func (self *Program) KANDND(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12391      p := self.alloc("KANDND", 3, Operands { v0, v1, v2 })
 12392      // KANDND k, k, k
 12393      if isK(v0) && isK(v1) && isK(v2) {
 12394          self.require(ISA_AVX512BW)
 12395          p.domain = DomainMask
 12396          p.add(0, func(m *_Encoding, v []interface{}) {
 12397              m.emit(0xc4)
 12398              m.emit(0xe1)
 12399              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 12400              m.emit(0x42)
 12401              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12402          })
 12403      }
 12404      if p.len == 0 {
 12405          panic("invalid operands for KANDND")
 12406      }
 12407      return p
 12408  }
 12409  
 12410  // KANDNQ performs "Bitwise Logical AND NOT 64-bit Masks".
 12411  //
 12412  // Mnemonic        : KANDNQ
 12413  // Supported forms : (1 form)
 12414  //
 12415  //    * KANDNQ k, k, k    [AVX512BW]
 12416  //
 12417  func (self *Program) KANDNQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12418      p := self.alloc("KANDNQ", 3, Operands { v0, v1, v2 })
 12419      // KANDNQ k, k, k
 12420      if isK(v0) && isK(v1) && isK(v2) {
 12421          self.require(ISA_AVX512BW)
 12422          p.domain = DomainMask
 12423          p.add(0, func(m *_Encoding, v []interface{}) {
 12424              m.emit(0xc4)
 12425              m.emit(0xe1)
 12426              m.emit(0xfc ^ (hlcode(v[1]) << 3))
 12427              m.emit(0x42)
 12428              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12429          })
 12430      }
 12431      if p.len == 0 {
 12432          panic("invalid operands for KANDNQ")
 12433      }
 12434      return p
 12435  }
 12436  
 12437  // KANDNW performs "Bitwise Logical AND NOT 16-bit Masks".
 12438  //
 12439  // Mnemonic        : KANDNW
 12440  // Supported forms : (1 form)
 12441  //
 12442  //    * KANDNW k, k, k    [AVX512F]
 12443  //
 12444  func (self *Program) KANDNW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12445      p := self.alloc("KANDNW", 3, Operands { v0, v1, v2 })
 12446      // KANDNW k, k, k
 12447      if isK(v0) && isK(v1) && isK(v2) {
 12448          self.require(ISA_AVX512F)
 12449          p.domain = DomainMask
 12450          p.add(0, func(m *_Encoding, v []interface{}) {
 12451              m.vex2(4, 0, nil, hlcode(v[1]))
 12452              m.emit(0x42)
 12453              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12454          })
 12455      }
 12456      if p.len == 0 {
 12457          panic("invalid operands for KANDNW")
 12458      }
 12459      return p
 12460  }
 12461  
 12462  // KANDQ performs "Bitwise Logical AND 64-bit Masks".
 12463  //
 12464  // Mnemonic        : KANDQ
 12465  // Supported forms : (1 form)
 12466  //
 12467  //    * KANDQ k, k, k    [AVX512BW]
 12468  //
 12469  func (self *Program) KANDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12470      p := self.alloc("KANDQ", 3, Operands { v0, v1, v2 })
 12471      // KANDQ k, k, k
 12472      if isK(v0) && isK(v1) && isK(v2) {
 12473          self.require(ISA_AVX512BW)
 12474          p.domain = DomainMask
 12475          p.add(0, func(m *_Encoding, v []interface{}) {
 12476              m.emit(0xc4)
 12477              m.emit(0xe1)
 12478              m.emit(0xfc ^ (hlcode(v[1]) << 3))
 12479              m.emit(0x41)
 12480              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12481          })
 12482      }
 12483      if p.len == 0 {
 12484          panic("invalid operands for KANDQ")
 12485      }
 12486      return p
 12487  }
 12488  
 12489  // KANDW performs "Bitwise Logical AND 16-bit Masks".
 12490  //
 12491  // Mnemonic        : KANDW
 12492  // Supported forms : (1 form)
 12493  //
 12494  //    * KANDW k, k, k    [AVX512F]
 12495  //
 12496  func (self *Program) KANDW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12497      p := self.alloc("KANDW", 3, Operands { v0, v1, v2 })
 12498      // KANDW k, k, k
 12499      if isK(v0) && isK(v1) && isK(v2) {
 12500          self.require(ISA_AVX512F)
 12501          p.domain = DomainMask
 12502          p.add(0, func(m *_Encoding, v []interface{}) {
 12503              m.vex2(4, 0, nil, hlcode(v[1]))
 12504              m.emit(0x41)
 12505              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12506          })
 12507      }
 12508      if p.len == 0 {
 12509          panic("invalid operands for KANDW")
 12510      }
 12511      return p
 12512  }
 12513  
 12514  // KMOVB performs "Move 8-bit Mask".
 12515  //
 12516  // Mnemonic        : KMOVB
 12517  // Supported forms : (5 forms)
 12518  //
 12519  //    * KMOVB k, k      [AVX512DQ]
 12520  //    * KMOVB r32, k    [AVX512DQ]
 12521  //    * KMOVB m8, k     [AVX512DQ]
 12522  //    * KMOVB k, r32    [AVX512DQ]
 12523  //    * KMOVB k, m8     [AVX512DQ]
 12524  //
 12525  func (self *Program) KMOVB(v0 interface{}, v1 interface{}) *Instruction {
 12526      p := self.alloc("KMOVB", 2, Operands { v0, v1 })
 12527      // KMOVB k, k
 12528      if isK(v0) && isK(v1) {
 12529          self.require(ISA_AVX512DQ)
 12530          p.domain = DomainMask
 12531          p.add(0, func(m *_Encoding, v []interface{}) {
 12532              m.vex2(1, 0, nil, 0)
 12533              m.emit(0x90)
 12534              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12535          })
 12536      }
 12537      // KMOVB r32, k
 12538      if isReg32(v0) && isK(v1) {
 12539          self.require(ISA_AVX512DQ)
 12540          p.domain = DomainMask
 12541          p.add(0, func(m *_Encoding, v []interface{}) {
 12542              m.vex2(1, 0, v[0], 0)
 12543              m.emit(0x92)
 12544              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12545          })
 12546      }
 12547      // KMOVB m8, k
 12548      if isM8(v0) && isK(v1) {
 12549          self.require(ISA_AVX512DQ)
 12550          p.domain = DomainMask
 12551          p.add(0, func(m *_Encoding, v []interface{}) {
 12552              m.vex2(1, 0, addr(v[0]), 0)
 12553              m.emit(0x90)
 12554              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 12555          })
 12556      }
 12557      // KMOVB k, r32
 12558      if isK(v0) && isReg32(v1) {
 12559          self.require(ISA_AVX512DQ)
 12560          p.domain = DomainMask
 12561          p.add(0, func(m *_Encoding, v []interface{}) {
 12562              m.vex2(1, hcode(v[1]), nil, 0)
 12563              m.emit(0x93)
 12564              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12565          })
 12566      }
 12567      // KMOVB k, m8
 12568      if isK(v0) && isM8(v1) {
 12569          self.require(ISA_AVX512DQ)
 12570          p.domain = DomainMask
 12571          p.add(0, func(m *_Encoding, v []interface{}) {
 12572              m.vex2(1, 0, addr(v[1]), 0)
 12573              m.emit(0x91)
 12574              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 12575          })
 12576      }
 12577      if p.len == 0 {
 12578          panic("invalid operands for KMOVB")
 12579      }
 12580      return p
 12581  }
 12582  
 12583  // KMOVD performs "Move 32-bit Mask".
 12584  //
 12585  // Mnemonic        : KMOVD
 12586  // Supported forms : (5 forms)
 12587  //
 12588  //    * KMOVD k, k      [AVX512BW]
 12589  //    * KMOVD r32, k    [AVX512BW]
 12590  //    * KMOVD m32, k    [AVX512BW]
 12591  //    * KMOVD k, r32    [AVX512BW]
 12592  //    * KMOVD k, m32    [AVX512BW]
 12593  //
 12594  func (self *Program) KMOVD(v0 interface{}, v1 interface{}) *Instruction {
 12595      p := self.alloc("KMOVD", 2, Operands { v0, v1 })
 12596      // KMOVD k, k
 12597      if isK(v0) && isK(v1) {
 12598          self.require(ISA_AVX512BW)
 12599          p.domain = DomainMask
 12600          p.add(0, func(m *_Encoding, v []interface{}) {
 12601              m.emit(0xc4)
 12602              m.emit(0xe1)
 12603              m.emit(0xf9)
 12604              m.emit(0x90)
 12605              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12606          })
 12607      }
 12608      // KMOVD r32, k
 12609      if isReg32(v0) && isK(v1) {
 12610          self.require(ISA_AVX512BW)
 12611          p.domain = DomainMask
 12612          p.add(0, func(m *_Encoding, v []interface{}) {
 12613              m.vex2(3, 0, v[0], 0)
 12614              m.emit(0x92)
 12615              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12616          })
 12617      }
 12618      // KMOVD m32, k
 12619      if isM32(v0) && isK(v1) {
 12620          self.require(ISA_AVX512BW)
 12621          p.domain = DomainMask
 12622          p.add(0, func(m *_Encoding, v []interface{}) {
 12623              m.vex3(0xc4, 0b1, 0x81, 0, addr(v[0]), 0)
 12624              m.emit(0x90)
 12625              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 12626          })
 12627      }
 12628      // KMOVD k, r32
 12629      if isK(v0) && isReg32(v1) {
 12630          self.require(ISA_AVX512BW)
 12631          p.domain = DomainMask
 12632          p.add(0, func(m *_Encoding, v []interface{}) {
 12633              m.vex2(3, hcode(v[1]), nil, 0)
 12634              m.emit(0x93)
 12635              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12636          })
 12637      }
 12638      // KMOVD k, m32
 12639      if isK(v0) && isM32(v1) {
 12640          self.require(ISA_AVX512BW)
 12641          p.domain = DomainMask
 12642          p.add(0, func(m *_Encoding, v []interface{}) {
 12643              m.vex3(0xc4, 0b1, 0x81, 0, addr(v[1]), 0)
 12644              m.emit(0x91)
 12645              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 12646          })
 12647      }
 12648      if p.len == 0 {
 12649          panic("invalid operands for KMOVD")
 12650      }
 12651      return p
 12652  }
 12653  
 12654  // KMOVQ performs "Move 64-bit Mask".
 12655  //
 12656  // Mnemonic        : KMOVQ
 12657  // Supported forms : (5 forms)
 12658  //
 12659  //    * KMOVQ k, k      [AVX512BW]
 12660  //    * KMOVQ r64, k    [AVX512BW]
 12661  //    * KMOVQ m64, k    [AVX512BW]
 12662  //    * KMOVQ k, r64    [AVX512BW]
 12663  //    * KMOVQ k, m64    [AVX512BW]
 12664  //
 12665  func (self *Program) KMOVQ(v0 interface{}, v1 interface{}) *Instruction {
 12666      p := self.alloc("KMOVQ", 2, Operands { v0, v1 })
 12667      // KMOVQ k, k
 12668      if isK(v0) && isK(v1) {
 12669          self.require(ISA_AVX512BW)
 12670          p.domain = DomainMask
 12671          p.add(0, func(m *_Encoding, v []interface{}) {
 12672              m.emit(0xc4)
 12673              m.emit(0xe1)
 12674              m.emit(0xf8)
 12675              m.emit(0x90)
 12676              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12677          })
 12678      }
 12679      // KMOVQ r64, k
 12680      if isReg64(v0) && isK(v1) {
 12681          self.require(ISA_AVX512BW)
 12682          p.domain = DomainMask
 12683          p.add(0, func(m *_Encoding, v []interface{}) {
 12684              m.emit(0xc4)
 12685              m.emit(0xe1 ^ (hcode(v[0]) << 5))
 12686              m.emit(0xfb)
 12687              m.emit(0x92)
 12688              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12689          })
 12690      }
 12691      // KMOVQ m64, k
 12692      if isM64(v0) && isK(v1) {
 12693          self.require(ISA_AVX512BW)
 12694          p.domain = DomainMask
 12695          p.add(0, func(m *_Encoding, v []interface{}) {
 12696              m.vex3(0xc4, 0b1, 0x80, 0, addr(v[0]), 0)
 12697              m.emit(0x90)
 12698              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 12699          })
 12700      }
 12701      // KMOVQ k, r64
 12702      if isK(v0) && isReg64(v1) {
 12703          self.require(ISA_AVX512BW)
 12704          p.domain = DomainMask
 12705          p.add(0, func(m *_Encoding, v []interface{}) {
 12706              m.emit(0xc4)
 12707              m.emit(0xe1 ^ (hcode(v[1]) << 7))
 12708              m.emit(0xfb)
 12709              m.emit(0x93)
 12710              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12711          })
 12712      }
 12713      // KMOVQ k, m64
 12714      if isK(v0) && isM64(v1) {
 12715          self.require(ISA_AVX512BW)
 12716          p.domain = DomainMask
 12717          p.add(0, func(m *_Encoding, v []interface{}) {
 12718              m.vex3(0xc4, 0b1, 0x80, 0, addr(v[1]), 0)
 12719              m.emit(0x91)
 12720              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 12721          })
 12722      }
 12723      if p.len == 0 {
 12724          panic("invalid operands for KMOVQ")
 12725      }
 12726      return p
 12727  }
 12728  
 12729  // KMOVW performs "Move 16-bit Mask".
 12730  //
 12731  // Mnemonic        : KMOVW
 12732  // Supported forms : (5 forms)
 12733  //
 12734  //    * KMOVW k, k      [AVX512F]
 12735  //    * KMOVW r32, k    [AVX512F]
 12736  //    * KMOVW m16, k    [AVX512F]
 12737  //    * KMOVW k, r32    [AVX512F]
 12738  //    * KMOVW k, m16    [AVX512F]
 12739  //
 12740  func (self *Program) KMOVW(v0 interface{}, v1 interface{}) *Instruction {
 12741      p := self.alloc("KMOVW", 2, Operands { v0, v1 })
 12742      // KMOVW k, k
 12743      if isK(v0) && isK(v1) {
 12744          self.require(ISA_AVX512F)
 12745          p.domain = DomainMask
 12746          p.add(0, func(m *_Encoding, v []interface{}) {
 12747              m.vex2(0, 0, nil, 0)
 12748              m.emit(0x90)
 12749              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12750          })
 12751      }
 12752      // KMOVW r32, k
 12753      if isReg32(v0) && isK(v1) {
 12754          self.require(ISA_AVX512F)
 12755          p.domain = DomainMask
 12756          p.add(0, func(m *_Encoding, v []interface{}) {
 12757              m.vex2(0, 0, v[0], 0)
 12758              m.emit(0x92)
 12759              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12760          })
 12761      }
 12762      // KMOVW m16, k
 12763      if isM16(v0) && isK(v1) {
 12764          self.require(ISA_AVX512F)
 12765          p.domain = DomainMask
 12766          p.add(0, func(m *_Encoding, v []interface{}) {
 12767              m.vex2(0, 0, addr(v[0]), 0)
 12768              m.emit(0x90)
 12769              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 12770          })
 12771      }
 12772      // KMOVW k, r32
 12773      if isK(v0) && isReg32(v1) {
 12774          self.require(ISA_AVX512F)
 12775          p.domain = DomainMask
 12776          p.add(0, func(m *_Encoding, v []interface{}) {
 12777              m.vex2(0, hcode(v[1]), nil, 0)
 12778              m.emit(0x93)
 12779              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12780          })
 12781      }
 12782      // KMOVW k, m16
 12783      if isK(v0) && isM16(v1) {
 12784          self.require(ISA_AVX512F)
 12785          p.domain = DomainMask
 12786          p.add(0, func(m *_Encoding, v []interface{}) {
 12787              m.vex2(0, 0, addr(v[1]), 0)
 12788              m.emit(0x91)
 12789              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 12790          })
 12791      }
 12792      if p.len == 0 {
 12793          panic("invalid operands for KMOVW")
 12794      }
 12795      return p
 12796  }
 12797  
 12798  // KNOTB performs "NOT 8-bit Mask Register".
 12799  //
 12800  // Mnemonic        : KNOTB
 12801  // Supported forms : (1 form)
 12802  //
 12803  //    * KNOTB k, k    [AVX512DQ]
 12804  //
 12805  func (self *Program) KNOTB(v0 interface{}, v1 interface{}) *Instruction {
 12806      p := self.alloc("KNOTB", 2, Operands { v0, v1 })
 12807      // KNOTB k, k
 12808      if isK(v0) && isK(v1) {
 12809          self.require(ISA_AVX512DQ)
 12810          p.domain = DomainMask
 12811          p.add(0, func(m *_Encoding, v []interface{}) {
 12812              m.vex2(1, 0, nil, 0)
 12813              m.emit(0x44)
 12814              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12815          })
 12816      }
 12817      if p.len == 0 {
 12818          panic("invalid operands for KNOTB")
 12819      }
 12820      return p
 12821  }
 12822  
 12823  // KNOTD performs "NOT 32-bit Mask Register".
 12824  //
 12825  // Mnemonic        : KNOTD
 12826  // Supported forms : (1 form)
 12827  //
 12828  //    * KNOTD k, k    [AVX512BW]
 12829  //
 12830  func (self *Program) KNOTD(v0 interface{}, v1 interface{}) *Instruction {
 12831      p := self.alloc("KNOTD", 2, Operands { v0, v1 })
 12832      // KNOTD k, k
 12833      if isK(v0) && isK(v1) {
 12834          self.require(ISA_AVX512BW)
 12835          p.domain = DomainMask
 12836          p.add(0, func(m *_Encoding, v []interface{}) {
 12837              m.emit(0xc4)
 12838              m.emit(0xe1)
 12839              m.emit(0xf9)
 12840              m.emit(0x44)
 12841              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12842          })
 12843      }
 12844      if p.len == 0 {
 12845          panic("invalid operands for KNOTD")
 12846      }
 12847      return p
 12848  }
 12849  
 12850  // KNOTQ performs "NOT 64-bit Mask Register".
 12851  //
 12852  // Mnemonic        : KNOTQ
 12853  // Supported forms : (1 form)
 12854  //
 12855  //    * KNOTQ k, k    [AVX512BW]
 12856  //
 12857  func (self *Program) KNOTQ(v0 interface{}, v1 interface{}) *Instruction {
 12858      p := self.alloc("KNOTQ", 2, Operands { v0, v1 })
 12859      // KNOTQ k, k
 12860      if isK(v0) && isK(v1) {
 12861          self.require(ISA_AVX512BW)
 12862          p.domain = DomainMask
 12863          p.add(0, func(m *_Encoding, v []interface{}) {
 12864              m.emit(0xc4)
 12865              m.emit(0xe1)
 12866              m.emit(0xf8)
 12867              m.emit(0x44)
 12868              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12869          })
 12870      }
 12871      if p.len == 0 {
 12872          panic("invalid operands for KNOTQ")
 12873      }
 12874      return p
 12875  }
 12876  
 12877  // KNOTW performs "NOT 16-bit Mask Register".
 12878  //
 12879  // Mnemonic        : KNOTW
 12880  // Supported forms : (1 form)
 12881  //
 12882  //    * KNOTW k, k    [AVX512F]
 12883  //
 12884  func (self *Program) KNOTW(v0 interface{}, v1 interface{}) *Instruction {
 12885      p := self.alloc("KNOTW", 2, Operands { v0, v1 })
 12886      // KNOTW k, k
 12887      if isK(v0) && isK(v1) {
 12888          self.require(ISA_AVX512F)
 12889          p.domain = DomainMask
 12890          p.add(0, func(m *_Encoding, v []interface{}) {
 12891              m.vex2(0, 0, nil, 0)
 12892              m.emit(0x44)
 12893              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12894          })
 12895      }
 12896      if p.len == 0 {
 12897          panic("invalid operands for KNOTW")
 12898      }
 12899      return p
 12900  }
 12901  
 12902  // KORB performs "Bitwise Logical OR 8-bit Masks".
 12903  //
 12904  // Mnemonic        : KORB
 12905  // Supported forms : (1 form)
 12906  //
 12907  //    * KORB k, k, k    [AVX512DQ]
 12908  //
 12909  func (self *Program) KORB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12910      p := self.alloc("KORB", 3, Operands { v0, v1, v2 })
 12911      // KORB k, k, k
 12912      if isK(v0) && isK(v1) && isK(v2) {
 12913          self.require(ISA_AVX512DQ)
 12914          p.domain = DomainMask
 12915          p.add(0, func(m *_Encoding, v []interface{}) {
 12916              m.vex2(5, 0, nil, hlcode(v[1]))
 12917              m.emit(0x45)
 12918              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12919          })
 12920      }
 12921      if p.len == 0 {
 12922          panic("invalid operands for KORB")
 12923      }
 12924      return p
 12925  }
 12926  
 12927  // KORD performs "Bitwise Logical OR 32-bit Masks".
 12928  //
 12929  // Mnemonic        : KORD
 12930  // Supported forms : (1 form)
 12931  //
 12932  //    * KORD k, k, k    [AVX512BW]
 12933  //
 12934  func (self *Program) KORD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12935      p := self.alloc("KORD", 3, Operands { v0, v1, v2 })
 12936      // KORD k, k, k
 12937      if isK(v0) && isK(v1) && isK(v2) {
 12938          self.require(ISA_AVX512BW)
 12939          p.domain = DomainMask
 12940          p.add(0, func(m *_Encoding, v []interface{}) {
 12941              m.emit(0xc4)
 12942              m.emit(0xe1)
 12943              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 12944              m.emit(0x45)
 12945              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12946          })
 12947      }
 12948      if p.len == 0 {
 12949          panic("invalid operands for KORD")
 12950      }
 12951      return p
 12952  }
 12953  
 12954  // KORQ performs "Bitwise Logical OR 64-bit Masks".
 12955  //
 12956  // Mnemonic        : KORQ
 12957  // Supported forms : (1 form)
 12958  //
 12959  //    * KORQ k, k, k    [AVX512BW]
 12960  //
 12961  func (self *Program) KORQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 12962      p := self.alloc("KORQ", 3, Operands { v0, v1, v2 })
 12963      // KORQ k, k, k
 12964      if isK(v0) && isK(v1) && isK(v2) {
 12965          self.require(ISA_AVX512BW)
 12966          p.domain = DomainMask
 12967          p.add(0, func(m *_Encoding, v []interface{}) {
 12968              m.emit(0xc4)
 12969              m.emit(0xe1)
 12970              m.emit(0xfc ^ (hlcode(v[1]) << 3))
 12971              m.emit(0x45)
 12972              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 12973          })
 12974      }
 12975      if p.len == 0 {
 12976          panic("invalid operands for KORQ")
 12977      }
 12978      return p
 12979  }
 12980  
 12981  // KORTESTB performs "OR 8-bit Masks and Set Flags".
 12982  //
 12983  // Mnemonic        : KORTESTB
 12984  // Supported forms : (1 form)
 12985  //
 12986  //    * KORTESTB k, k    [AVX512DQ]
 12987  //
 12988  func (self *Program) KORTESTB(v0 interface{}, v1 interface{}) *Instruction {
 12989      p := self.alloc("KORTESTB", 2, Operands { v0, v1 })
 12990      // KORTESTB k, k
 12991      if isK(v0) && isK(v1) {
 12992          self.require(ISA_AVX512DQ)
 12993          p.domain = DomainMask
 12994          p.add(0, func(m *_Encoding, v []interface{}) {
 12995              m.vex2(1, 0, nil, 0)
 12996              m.emit(0x98)
 12997              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 12998          })
 12999      }
 13000      if p.len == 0 {
 13001          panic("invalid operands for KORTESTB")
 13002      }
 13003      return p
 13004  }
 13005  
 13006  // KORTESTD performs "OR 32-bit Masks and Set Flags".
 13007  //
 13008  // Mnemonic        : KORTESTD
 13009  // Supported forms : (1 form)
 13010  //
 13011  //    * KORTESTD k, k    [AVX512BW]
 13012  //
 13013  func (self *Program) KORTESTD(v0 interface{}, v1 interface{}) *Instruction {
 13014      p := self.alloc("KORTESTD", 2, Operands { v0, v1 })
 13015      // KORTESTD k, k
 13016      if isK(v0) && isK(v1) {
 13017          self.require(ISA_AVX512BW)
 13018          p.domain = DomainMask
 13019          p.add(0, func(m *_Encoding, v []interface{}) {
 13020              m.emit(0xc4)
 13021              m.emit(0xe1)
 13022              m.emit(0xf9)
 13023              m.emit(0x98)
 13024              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 13025          })
 13026      }
 13027      if p.len == 0 {
 13028          panic("invalid operands for KORTESTD")
 13029      }
 13030      return p
 13031  }
 13032  
 13033  // KORTESTQ performs "OR 64-bit Masks and Set Flags".
 13034  //
 13035  // Mnemonic        : KORTESTQ
 13036  // Supported forms : (1 form)
 13037  //
 13038  //    * KORTESTQ k, k    [AVX512BW]
 13039  //
 13040  func (self *Program) KORTESTQ(v0 interface{}, v1 interface{}) *Instruction {
 13041      p := self.alloc("KORTESTQ", 2, Operands { v0, v1 })
 13042      // KORTESTQ k, k
 13043      if isK(v0) && isK(v1) {
 13044          self.require(ISA_AVX512BW)
 13045          p.domain = DomainMask
 13046          p.add(0, func(m *_Encoding, v []interface{}) {
 13047              m.emit(0xc4)
 13048              m.emit(0xe1)
 13049              m.emit(0xf8)
 13050              m.emit(0x98)
 13051              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 13052          })
 13053      }
 13054      if p.len == 0 {
 13055          panic("invalid operands for KORTESTQ")
 13056      }
 13057      return p
 13058  }
 13059  
 13060  // KORTESTW performs "OR 16-bit Masks and Set Flags".
 13061  //
 13062  // Mnemonic        : KORTESTW
 13063  // Supported forms : (1 form)
 13064  //
 13065  //    * KORTESTW k, k    [AVX512F]
 13066  //
 13067  func (self *Program) KORTESTW(v0 interface{}, v1 interface{}) *Instruction {
 13068      p := self.alloc("KORTESTW", 2, Operands { v0, v1 })
 13069      // KORTESTW k, k
 13070      if isK(v0) && isK(v1) {
 13071          self.require(ISA_AVX512F)
 13072          p.domain = DomainMask
 13073          p.add(0, func(m *_Encoding, v []interface{}) {
 13074              m.vex2(0, 0, nil, 0)
 13075              m.emit(0x98)
 13076              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 13077          })
 13078      }
 13079      if p.len == 0 {
 13080          panic("invalid operands for KORTESTW")
 13081      }
 13082      return p
 13083  }
 13084  
 13085  // KORW performs "Bitwise Logical OR 16-bit Masks".
 13086  //
 13087  // Mnemonic        : KORW
 13088  // Supported forms : (1 form)
 13089  //
 13090  //    * KORW k, k, k    [AVX512F]
 13091  //
 13092  func (self *Program) KORW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13093      p := self.alloc("KORW", 3, Operands { v0, v1, v2 })
 13094      // KORW k, k, k
 13095      if isK(v0) && isK(v1) && isK(v2) {
 13096          self.require(ISA_AVX512F)
 13097          p.domain = DomainMask
 13098          p.add(0, func(m *_Encoding, v []interface{}) {
 13099              m.vex2(4, 0, nil, hlcode(v[1]))
 13100              m.emit(0x45)
 13101              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13102          })
 13103      }
 13104      if p.len == 0 {
 13105          panic("invalid operands for KORW")
 13106      }
 13107      return p
 13108  }
 13109  
 13110  // KSHIFTLB performs "Shift Left 8-bit Masks".
 13111  //
 13112  // Mnemonic        : KSHIFTLB
 13113  // Supported forms : (1 form)
 13114  //
 13115  //    * KSHIFTLB imm8, k, k    [AVX512DQ]
 13116  //
 13117  func (self *Program) KSHIFTLB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13118      p := self.alloc("KSHIFTLB", 3, Operands { v0, v1, v2 })
 13119      // KSHIFTLB imm8, k, k
 13120      if isImm8(v0) && isK(v1) && isK(v2) {
 13121          self.require(ISA_AVX512DQ)
 13122          p.domain = DomainMask
 13123          p.add(0, func(m *_Encoding, v []interface{}) {
 13124              m.emit(0xc4)
 13125              m.emit(0xe3)
 13126              m.emit(0x79)
 13127              m.emit(0x32)
 13128              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 13129              m.imm1(toImmAny(v[0]))
 13130          })
 13131      }
 13132      if p.len == 0 {
 13133          panic("invalid operands for KSHIFTLB")
 13134      }
 13135      return p
 13136  }
 13137  
 13138  // KSHIFTLD performs "Shift Left 32-bit Masks".
 13139  //
 13140  // Mnemonic        : KSHIFTLD
 13141  // Supported forms : (1 form)
 13142  //
 13143  //    * KSHIFTLD imm8, k, k    [AVX512BW]
 13144  //
 13145  func (self *Program) KSHIFTLD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13146      p := self.alloc("KSHIFTLD", 3, Operands { v0, v1, v2 })
 13147      // KSHIFTLD imm8, k, k
 13148      if isImm8(v0) && isK(v1) && isK(v2) {
 13149          self.require(ISA_AVX512BW)
 13150          p.domain = DomainMask
 13151          p.add(0, func(m *_Encoding, v []interface{}) {
 13152              m.emit(0xc4)
 13153              m.emit(0xe3)
 13154              m.emit(0x79)
 13155              m.emit(0x33)
 13156              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 13157              m.imm1(toImmAny(v[0]))
 13158          })
 13159      }
 13160      if p.len == 0 {
 13161          panic("invalid operands for KSHIFTLD")
 13162      }
 13163      return p
 13164  }
 13165  
 13166  // KSHIFTLQ performs "Shift Left 64-bit Masks".
 13167  //
 13168  // Mnemonic        : KSHIFTLQ
 13169  // Supported forms : (1 form)
 13170  //
 13171  //    * KSHIFTLQ imm8, k, k    [AVX512BW]
 13172  //
 13173  func (self *Program) KSHIFTLQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13174      p := self.alloc("KSHIFTLQ", 3, Operands { v0, v1, v2 })
 13175      // KSHIFTLQ imm8, k, k
 13176      if isImm8(v0) && isK(v1) && isK(v2) {
 13177          self.require(ISA_AVX512BW)
 13178          p.domain = DomainMask
 13179          p.add(0, func(m *_Encoding, v []interface{}) {
 13180              m.emit(0xc4)
 13181              m.emit(0xe3)
 13182              m.emit(0xf9)
 13183              m.emit(0x33)
 13184              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 13185              m.imm1(toImmAny(v[0]))
 13186          })
 13187      }
 13188      if p.len == 0 {
 13189          panic("invalid operands for KSHIFTLQ")
 13190      }
 13191      return p
 13192  }
 13193  
 13194  // KSHIFTLW performs "Shift Left 16-bit Masks".
 13195  //
 13196  // Mnemonic        : KSHIFTLW
 13197  // Supported forms : (1 form)
 13198  //
 13199  //    * KSHIFTLW imm8, k, k    [AVX512F]
 13200  //
 13201  func (self *Program) KSHIFTLW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13202      p := self.alloc("KSHIFTLW", 3, Operands { v0, v1, v2 })
 13203      // KSHIFTLW imm8, k, k
 13204      if isImm8(v0) && isK(v1) && isK(v2) {
 13205          self.require(ISA_AVX512F)
 13206          p.domain = DomainMask
 13207          p.add(0, func(m *_Encoding, v []interface{}) {
 13208              m.emit(0xc4)
 13209              m.emit(0xe3)
 13210              m.emit(0xf9)
 13211              m.emit(0x32)
 13212              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 13213              m.imm1(toImmAny(v[0]))
 13214          })
 13215      }
 13216      if p.len == 0 {
 13217          panic("invalid operands for KSHIFTLW")
 13218      }
 13219      return p
 13220  }
 13221  
 13222  // KSHIFTRB performs "Shift Right 8-bit Masks".
 13223  //
 13224  // Mnemonic        : KSHIFTRB
 13225  // Supported forms : (1 form)
 13226  //
 13227  //    * KSHIFTRB imm8, k, k    [AVX512DQ]
 13228  //
 13229  func (self *Program) KSHIFTRB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13230      p := self.alloc("KSHIFTRB", 3, Operands { v0, v1, v2 })
 13231      // KSHIFTRB imm8, k, k
 13232      if isImm8(v0) && isK(v1) && isK(v2) {
 13233          self.require(ISA_AVX512DQ)
 13234          p.domain = DomainMask
 13235          p.add(0, func(m *_Encoding, v []interface{}) {
 13236              m.emit(0xc4)
 13237              m.emit(0xe3)
 13238              m.emit(0x79)
 13239              m.emit(0x30)
 13240              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 13241              m.imm1(toImmAny(v[0]))
 13242          })
 13243      }
 13244      if p.len == 0 {
 13245          panic("invalid operands for KSHIFTRB")
 13246      }
 13247      return p
 13248  }
 13249  
 13250  // KSHIFTRD performs "Shift Right 32-bit Masks".
 13251  //
 13252  // Mnemonic        : KSHIFTRD
 13253  // Supported forms : (1 form)
 13254  //
 13255  //    * KSHIFTRD imm8, k, k    [AVX512BW]
 13256  //
 13257  func (self *Program) KSHIFTRD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13258      p := self.alloc("KSHIFTRD", 3, Operands { v0, v1, v2 })
 13259      // KSHIFTRD imm8, k, k
 13260      if isImm8(v0) && isK(v1) && isK(v2) {
 13261          self.require(ISA_AVX512BW)
 13262          p.domain = DomainMask
 13263          p.add(0, func(m *_Encoding, v []interface{}) {
 13264              m.emit(0xc4)
 13265              m.emit(0xe3)
 13266              m.emit(0x79)
 13267              m.emit(0x31)
 13268              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 13269              m.imm1(toImmAny(v[0]))
 13270          })
 13271      }
 13272      if p.len == 0 {
 13273          panic("invalid operands for KSHIFTRD")
 13274      }
 13275      return p
 13276  }
 13277  
 13278  // KSHIFTRQ performs "Shift Right 64-bit Masks".
 13279  //
 13280  // Mnemonic        : KSHIFTRQ
 13281  // Supported forms : (1 form)
 13282  //
 13283  //    * KSHIFTRQ imm8, k, k    [AVX512BW]
 13284  //
 13285  func (self *Program) KSHIFTRQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13286      p := self.alloc("KSHIFTRQ", 3, Operands { v0, v1, v2 })
 13287      // KSHIFTRQ imm8, k, k
 13288      if isImm8(v0) && isK(v1) && isK(v2) {
 13289          self.require(ISA_AVX512BW)
 13290          p.domain = DomainMask
 13291          p.add(0, func(m *_Encoding, v []interface{}) {
 13292              m.emit(0xc4)
 13293              m.emit(0xe3)
 13294              m.emit(0xf9)
 13295              m.emit(0x31)
 13296              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 13297              m.imm1(toImmAny(v[0]))
 13298          })
 13299      }
 13300      if p.len == 0 {
 13301          panic("invalid operands for KSHIFTRQ")
 13302      }
 13303      return p
 13304  }
 13305  
 13306  // KSHIFTRW performs "Shift Right 16-bit Masks".
 13307  //
 13308  // Mnemonic        : KSHIFTRW
 13309  // Supported forms : (1 form)
 13310  //
 13311  //    * KSHIFTRW imm8, k, k    [AVX512F]
 13312  //
 13313  func (self *Program) KSHIFTRW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13314      p := self.alloc("KSHIFTRW", 3, Operands { v0, v1, v2 })
 13315      // KSHIFTRW imm8, k, k
 13316      if isImm8(v0) && isK(v1) && isK(v2) {
 13317          self.require(ISA_AVX512F)
 13318          p.domain = DomainMask
 13319          p.add(0, func(m *_Encoding, v []interface{}) {
 13320              m.emit(0xc4)
 13321              m.emit(0xe3)
 13322              m.emit(0xf9)
 13323              m.emit(0x30)
 13324              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 13325              m.imm1(toImmAny(v[0]))
 13326          })
 13327      }
 13328      if p.len == 0 {
 13329          panic("invalid operands for KSHIFTRW")
 13330      }
 13331      return p
 13332  }
 13333  
 13334  // KTESTB performs "Bit Test 8-bit Masks and Set Flags".
 13335  //
 13336  // Mnemonic        : KTESTB
 13337  // Supported forms : (1 form)
 13338  //
 13339  //    * KTESTB k, k    [AVX512DQ]
 13340  //
 13341  func (self *Program) KTESTB(v0 interface{}, v1 interface{}) *Instruction {
 13342      p := self.alloc("KTESTB", 2, Operands { v0, v1 })
 13343      // KTESTB k, k
 13344      if isK(v0) && isK(v1) {
 13345          self.require(ISA_AVX512DQ)
 13346          p.domain = DomainMask
 13347          p.add(0, func(m *_Encoding, v []interface{}) {
 13348              m.vex2(1, 0, nil, 0)
 13349              m.emit(0x99)
 13350              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 13351          })
 13352      }
 13353      if p.len == 0 {
 13354          panic("invalid operands for KTESTB")
 13355      }
 13356      return p
 13357  }
 13358  
 13359  // KTESTD performs "Bit Test 32-bit Masks and Set Flags".
 13360  //
 13361  // Mnemonic        : KTESTD
 13362  // Supported forms : (1 form)
 13363  //
 13364  //    * KTESTD k, k    [AVX512BW]
 13365  //
 13366  func (self *Program) KTESTD(v0 interface{}, v1 interface{}) *Instruction {
 13367      p := self.alloc("KTESTD", 2, Operands { v0, v1 })
 13368      // KTESTD k, k
 13369      if isK(v0) && isK(v1) {
 13370          self.require(ISA_AVX512BW)
 13371          p.domain = DomainMask
 13372          p.add(0, func(m *_Encoding, v []interface{}) {
 13373              m.emit(0xc4)
 13374              m.emit(0xe1)
 13375              m.emit(0xf9)
 13376              m.emit(0x99)
 13377              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 13378          })
 13379      }
 13380      if p.len == 0 {
 13381          panic("invalid operands for KTESTD")
 13382      }
 13383      return p
 13384  }
 13385  
 13386  // KTESTQ performs "Bit Test 64-bit Masks and Set Flags".
 13387  //
 13388  // Mnemonic        : KTESTQ
 13389  // Supported forms : (1 form)
 13390  //
 13391  //    * KTESTQ k, k    [AVX512BW]
 13392  //
 13393  func (self *Program) KTESTQ(v0 interface{}, v1 interface{}) *Instruction {
 13394      p := self.alloc("KTESTQ", 2, Operands { v0, v1 })
 13395      // KTESTQ k, k
 13396      if isK(v0) && isK(v1) {
 13397          self.require(ISA_AVX512BW)
 13398          p.domain = DomainMask
 13399          p.add(0, func(m *_Encoding, v []interface{}) {
 13400              m.emit(0xc4)
 13401              m.emit(0xe1)
 13402              m.emit(0xf8)
 13403              m.emit(0x99)
 13404              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 13405          })
 13406      }
 13407      if p.len == 0 {
 13408          panic("invalid operands for KTESTQ")
 13409      }
 13410      return p
 13411  }
 13412  
 13413  // KTESTW performs "Bit Test 16-bit Masks and Set Flags".
 13414  //
 13415  // Mnemonic        : KTESTW
 13416  // Supported forms : (1 form)
 13417  //
 13418  //    * KTESTW k, k    [AVX512DQ]
 13419  //
 13420  func (self *Program) KTESTW(v0 interface{}, v1 interface{}) *Instruction {
 13421      p := self.alloc("KTESTW", 2, Operands { v0, v1 })
 13422      // KTESTW k, k
 13423      if isK(v0) && isK(v1) {
 13424          self.require(ISA_AVX512DQ)
 13425          p.domain = DomainMask
 13426          p.add(0, func(m *_Encoding, v []interface{}) {
 13427              m.vex2(0, 0, nil, 0)
 13428              m.emit(0x99)
 13429              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 13430          })
 13431      }
 13432      if p.len == 0 {
 13433          panic("invalid operands for KTESTW")
 13434      }
 13435      return p
 13436  }
 13437  
 13438  // KUNPCKBW performs "Unpack and Interleave 8-bit Masks".
 13439  //
 13440  // Mnemonic        : KUNPCKBW
 13441  // Supported forms : (1 form)
 13442  //
 13443  //    * KUNPCKBW k, k, k    [AVX512F]
 13444  //
 13445  func (self *Program) KUNPCKBW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13446      p := self.alloc("KUNPCKBW", 3, Operands { v0, v1, v2 })
 13447      // KUNPCKBW k, k, k
 13448      if isK(v0) && isK(v1) && isK(v2) {
 13449          self.require(ISA_AVX512F)
 13450          p.domain = DomainMask
 13451          p.add(0, func(m *_Encoding, v []interface{}) {
 13452              m.vex2(5, 0, nil, hlcode(v[1]))
 13453              m.emit(0x4b)
 13454              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13455          })
 13456      }
 13457      if p.len == 0 {
 13458          panic("invalid operands for KUNPCKBW")
 13459      }
 13460      return p
 13461  }
 13462  
 13463  // KUNPCKDQ performs "Unpack and Interleave 32-bit Masks".
 13464  //
 13465  // Mnemonic        : KUNPCKDQ
 13466  // Supported forms : (1 form)
 13467  //
 13468  //    * KUNPCKDQ k, k, k    [AVX512BW]
 13469  //
 13470  func (self *Program) KUNPCKDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13471      p := self.alloc("KUNPCKDQ", 3, Operands { v0, v1, v2 })
 13472      // KUNPCKDQ k, k, k
 13473      if isK(v0) && isK(v1) && isK(v2) {
 13474          self.require(ISA_AVX512BW)
 13475          p.domain = DomainMask
 13476          p.add(0, func(m *_Encoding, v []interface{}) {
 13477              m.emit(0xc4)
 13478              m.emit(0xe1)
 13479              m.emit(0xfc ^ (hlcode(v[1]) << 3))
 13480              m.emit(0x4b)
 13481              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13482          })
 13483      }
 13484      if p.len == 0 {
 13485          panic("invalid operands for KUNPCKDQ")
 13486      }
 13487      return p
 13488  }
 13489  
 13490  // KUNPCKWD performs "Unpack and Interleave 16-bit Masks".
 13491  //
 13492  // Mnemonic        : KUNPCKWD
 13493  // Supported forms : (1 form)
 13494  //
 13495  //    * KUNPCKWD k, k, k    [AVX512BW]
 13496  //
 13497  func (self *Program) KUNPCKWD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13498      p := self.alloc("KUNPCKWD", 3, Operands { v0, v1, v2 })
 13499      // KUNPCKWD k, k, k
 13500      if isK(v0) && isK(v1) && isK(v2) {
 13501          self.require(ISA_AVX512BW)
 13502          p.domain = DomainMask
 13503          p.add(0, func(m *_Encoding, v []interface{}) {
 13504              m.vex2(4, 0, nil, hlcode(v[1]))
 13505              m.emit(0x4b)
 13506              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13507          })
 13508      }
 13509      if p.len == 0 {
 13510          panic("invalid operands for KUNPCKWD")
 13511      }
 13512      return p
 13513  }
 13514  
 13515  // KXNORB performs "Bitwise Logical XNOR 8-bit Masks".
 13516  //
 13517  // Mnemonic        : KXNORB
 13518  // Supported forms : (1 form)
 13519  //
 13520  //    * KXNORB k, k, k    [AVX512DQ]
 13521  //
 13522  func (self *Program) KXNORB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13523      p := self.alloc("KXNORB", 3, Operands { v0, v1, v2 })
 13524      // KXNORB k, k, k
 13525      if isK(v0) && isK(v1) && isK(v2) {
 13526          self.require(ISA_AVX512DQ)
 13527          p.domain = DomainMask
 13528          p.add(0, func(m *_Encoding, v []interface{}) {
 13529              m.vex2(5, 0, nil, hlcode(v[1]))
 13530              m.emit(0x46)
 13531              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13532          })
 13533      }
 13534      if p.len == 0 {
 13535          panic("invalid operands for KXNORB")
 13536      }
 13537      return p
 13538  }
 13539  
 13540  // KXNORD performs "Bitwise Logical XNOR 32-bit Masks".
 13541  //
 13542  // Mnemonic        : KXNORD
 13543  // Supported forms : (1 form)
 13544  //
 13545  //    * KXNORD k, k, k    [AVX512BW]
 13546  //
 13547  func (self *Program) KXNORD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13548      p := self.alloc("KXNORD", 3, Operands { v0, v1, v2 })
 13549      // KXNORD k, k, k
 13550      if isK(v0) && isK(v1) && isK(v2) {
 13551          self.require(ISA_AVX512BW)
 13552          p.domain = DomainMask
 13553          p.add(0, func(m *_Encoding, v []interface{}) {
 13554              m.emit(0xc4)
 13555              m.emit(0xe1)
 13556              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 13557              m.emit(0x46)
 13558              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13559          })
 13560      }
 13561      if p.len == 0 {
 13562          panic("invalid operands for KXNORD")
 13563      }
 13564      return p
 13565  }
 13566  
 13567  // KXNORQ performs "Bitwise Logical XNOR 64-bit Masks".
 13568  //
 13569  // Mnemonic        : KXNORQ
 13570  // Supported forms : (1 form)
 13571  //
 13572  //    * KXNORQ k, k, k    [AVX512BW]
 13573  //
 13574  func (self *Program) KXNORQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13575      p := self.alloc("KXNORQ", 3, Operands { v0, v1, v2 })
 13576      // KXNORQ k, k, k
 13577      if isK(v0) && isK(v1) && isK(v2) {
 13578          self.require(ISA_AVX512BW)
 13579          p.domain = DomainMask
 13580          p.add(0, func(m *_Encoding, v []interface{}) {
 13581              m.emit(0xc4)
 13582              m.emit(0xe1)
 13583              m.emit(0xfc ^ (hlcode(v[1]) << 3))
 13584              m.emit(0x46)
 13585              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13586          })
 13587      }
 13588      if p.len == 0 {
 13589          panic("invalid operands for KXNORQ")
 13590      }
 13591      return p
 13592  }
 13593  
 13594  // KXNORW performs "Bitwise Logical XNOR 16-bit Masks".
 13595  //
 13596  // Mnemonic        : KXNORW
 13597  // Supported forms : (1 form)
 13598  //
 13599  //    * KXNORW k, k, k    [AVX512F]
 13600  //
 13601  func (self *Program) KXNORW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13602      p := self.alloc("KXNORW", 3, Operands { v0, v1, v2 })
 13603      // KXNORW k, k, k
 13604      if isK(v0) && isK(v1) && isK(v2) {
 13605          self.require(ISA_AVX512F)
 13606          p.domain = DomainMask
 13607          p.add(0, func(m *_Encoding, v []interface{}) {
 13608              m.vex2(4, 0, nil, hlcode(v[1]))
 13609              m.emit(0x46)
 13610              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13611          })
 13612      }
 13613      if p.len == 0 {
 13614          panic("invalid operands for KXNORW")
 13615      }
 13616      return p
 13617  }
 13618  
 13619  // KXORB performs "Bitwise Logical XOR 8-bit Masks".
 13620  //
 13621  // Mnemonic        : KXORB
 13622  // Supported forms : (1 form)
 13623  //
 13624  //    * KXORB k, k, k    [AVX512DQ]
 13625  //
 13626  func (self *Program) KXORB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13627      p := self.alloc("KXORB", 3, Operands { v0, v1, v2 })
 13628      // KXORB k, k, k
 13629      if isK(v0) && isK(v1) && isK(v2) {
 13630          self.require(ISA_AVX512DQ)
 13631          p.domain = DomainMask
 13632          p.add(0, func(m *_Encoding, v []interface{}) {
 13633              m.vex2(5, 0, nil, hlcode(v[1]))
 13634              m.emit(0x47)
 13635              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13636          })
 13637      }
 13638      if p.len == 0 {
 13639          panic("invalid operands for KXORB")
 13640      }
 13641      return p
 13642  }
 13643  
 13644  // KXORD performs "Bitwise Logical XOR 32-bit Masks".
 13645  //
 13646  // Mnemonic        : KXORD
 13647  // Supported forms : (1 form)
 13648  //
 13649  //    * KXORD k, k, k    [AVX512BW]
 13650  //
 13651  func (self *Program) KXORD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13652      p := self.alloc("KXORD", 3, Operands { v0, v1, v2 })
 13653      // KXORD k, k, k
 13654      if isK(v0) && isK(v1) && isK(v2) {
 13655          self.require(ISA_AVX512BW)
 13656          p.domain = DomainMask
 13657          p.add(0, func(m *_Encoding, v []interface{}) {
 13658              m.emit(0xc4)
 13659              m.emit(0xe1)
 13660              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 13661              m.emit(0x47)
 13662              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13663          })
 13664      }
 13665      if p.len == 0 {
 13666          panic("invalid operands for KXORD")
 13667      }
 13668      return p
 13669  }
 13670  
 13671  // KXORQ performs "Bitwise Logical XOR 64-bit Masks".
 13672  //
 13673  // Mnemonic        : KXORQ
 13674  // Supported forms : (1 form)
 13675  //
 13676  //    * KXORQ k, k, k    [AVX512BW]
 13677  //
 13678  func (self *Program) KXORQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13679      p := self.alloc("KXORQ", 3, Operands { v0, v1, v2 })
 13680      // KXORQ k, k, k
 13681      if isK(v0) && isK(v1) && isK(v2) {
 13682          self.require(ISA_AVX512BW)
 13683          p.domain = DomainMask
 13684          p.add(0, func(m *_Encoding, v []interface{}) {
 13685              m.emit(0xc4)
 13686              m.emit(0xe1)
 13687              m.emit(0xfc ^ (hlcode(v[1]) << 3))
 13688              m.emit(0x47)
 13689              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13690          })
 13691      }
 13692      if p.len == 0 {
 13693          panic("invalid operands for KXORQ")
 13694      }
 13695      return p
 13696  }
 13697  
 13698  // KXORW performs "Bitwise Logical XOR 16-bit Masks".
 13699  //
 13700  // Mnemonic        : KXORW
 13701  // Supported forms : (1 form)
 13702  //
 13703  //    * KXORW k, k, k    [AVX512F]
 13704  //
 13705  func (self *Program) KXORW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 13706      p := self.alloc("KXORW", 3, Operands { v0, v1, v2 })
 13707      // KXORW k, k, k
 13708      if isK(v0) && isK(v1) && isK(v2) {
 13709          self.require(ISA_AVX512F)
 13710          p.domain = DomainMask
 13711          p.add(0, func(m *_Encoding, v []interface{}) {
 13712              m.vex2(4, 0, nil, hlcode(v[1]))
 13713              m.emit(0x47)
 13714              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 13715          })
 13716      }
 13717      if p.len == 0 {
 13718          panic("invalid operands for KXORW")
 13719      }
 13720      return p
 13721  }
 13722  
 13723  // LDDQU performs "Load Unaligned Integer 128 Bits".
 13724  //
 13725  // Mnemonic        : LDDQU
 13726  // Supported forms : (1 form)
 13727  //
 13728  //    * LDDQU m128, xmm    [SSE3]
 13729  //
 13730  func (self *Program) LDDQU(v0 interface{}, v1 interface{}) *Instruction {
 13731      p := self.alloc("LDDQU", 2, Operands { v0, v1 })
 13732      // LDDQU m128, xmm
 13733      if isM128(v0) && isXMM(v1) {
 13734          self.require(ISA_SSE3)
 13735          p.domain = DomainMMXSSE
 13736          p.add(0, func(m *_Encoding, v []interface{}) {
 13737              m.emit(0xf2)
 13738              m.rexo(hcode(v[1]), addr(v[0]), false)
 13739              m.emit(0x0f)
 13740              m.emit(0xf0)
 13741              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 13742          })
 13743      }
 13744      if p.len == 0 {
 13745          panic("invalid operands for LDDQU")
 13746      }
 13747      return p
 13748  }
 13749  
 13750  // LDMXCSR performs "Load MXCSR Register".
 13751  //
 13752  // Mnemonic        : LDMXCSR
 13753  // Supported forms : (1 form)
 13754  //
 13755  //    * LDMXCSR m32    [SSE]
 13756  //
 13757  func (self *Program) LDMXCSR(v0 interface{}) *Instruction {
 13758      p := self.alloc("LDMXCSR", 1, Operands { v0 })
 13759      // LDMXCSR m32
 13760      if isM32(v0) {
 13761          self.require(ISA_SSE)
 13762          p.domain = DomainMMXSSE
 13763          p.add(0, func(m *_Encoding, v []interface{}) {
 13764              m.rexo(0, addr(v[0]), false)
 13765              m.emit(0x0f)
 13766              m.emit(0xae)
 13767              m.mrsd(2, addr(v[0]), 1)
 13768          })
 13769      }
 13770      if p.len == 0 {
 13771          panic("invalid operands for LDMXCSR")
 13772      }
 13773      return p
 13774  }
 13775  
 13776  // LEAL performs "Load Effective Address".
 13777  //
 13778  // Mnemonic        : LEA
 13779  // Supported forms : (1 form)
 13780  //
 13781  //    * LEAL m, r32
 13782  //
 13783  func (self *Program) LEAL(v0 interface{}, v1 interface{}) *Instruction {
 13784      p := self.alloc("LEAL", 2, Operands { v0, v1 })
 13785      // LEAL m, r32
 13786      if isM(v0) && isReg32(v1) {
 13787          p.domain = DomainGeneric
 13788          p.add(0, func(m *_Encoding, v []interface{}) {
 13789              m.rexo(hcode(v[1]), addr(v[0]), false)
 13790              m.emit(0x8d)
 13791              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 13792          })
 13793      }
 13794      if p.len == 0 {
 13795          panic("invalid operands for LEAL")
 13796      }
 13797      return p
 13798  }
 13799  
 13800  // LEAQ performs "Load Effective Address".
 13801  //
 13802  // Mnemonic        : LEA
 13803  // Supported forms : (1 form)
 13804  //
 13805  //    * LEAQ m, r64
 13806  //
 13807  func (self *Program) LEAQ(v0 interface{}, v1 interface{}) *Instruction {
 13808      p := self.alloc("LEAQ", 2, Operands { v0, v1 })
 13809      // LEAQ m, r64
 13810      if isM(v0) && isReg64(v1) {
 13811          p.domain = DomainGeneric
 13812          p.add(0, func(m *_Encoding, v []interface{}) {
 13813              m.rexm(1, hcode(v[1]), addr(v[0]))
 13814              m.emit(0x8d)
 13815              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 13816          })
 13817      }
 13818      if p.len == 0 {
 13819          panic("invalid operands for LEAQ")
 13820      }
 13821      return p
 13822  }
 13823  
 13824  // LEAW performs "Load Effective Address".
 13825  //
 13826  // Mnemonic        : LEA
 13827  // Supported forms : (1 form)
 13828  //
 13829  //    * LEAW m, r16
 13830  //
 13831  func (self *Program) LEAW(v0 interface{}, v1 interface{}) *Instruction {
 13832      p := self.alloc("LEAW", 2, Operands { v0, v1 })
 13833      // LEAW m, r16
 13834      if isM(v0) && isReg16(v1) {
 13835          p.domain = DomainGeneric
 13836          p.add(0, func(m *_Encoding, v []interface{}) {
 13837              m.emit(0x66)
 13838              m.rexo(hcode(v[1]), addr(v[0]), false)
 13839              m.emit(0x8d)
 13840              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 13841          })
 13842      }
 13843      if p.len == 0 {
 13844          panic("invalid operands for LEAW")
 13845      }
 13846      return p
 13847  }
 13848  
 13849  // LFENCE performs "Load Fence".
 13850  //
 13851  // Mnemonic        : LFENCE
 13852  // Supported forms : (1 form)
 13853  //
 13854  //    * LFENCE    [SSE2]
 13855  //
 13856  func (self *Program) LFENCE() *Instruction {
 13857      p := self.alloc("LFENCE", 0, Operands {  })
 13858      // LFENCE
 13859      self.require(ISA_SSE2)
 13860      p.domain = DomainGeneric
 13861      p.add(0, func(m *_Encoding, v []interface{}) {
 13862          m.emit(0x0f)
 13863          m.emit(0xae)
 13864          m.emit(0xe8)
 13865      })
 13866      return p
 13867  }
 13868  
 13869  // LZCNTL performs "Count the Number of Leading Zero Bits".
 13870  //
 13871  // Mnemonic        : LZCNT
 13872  // Supported forms : (2 forms)
 13873  //
 13874  //    * LZCNTL r32, r32    [LZCNT]
 13875  //    * LZCNTL m32, r32    [LZCNT]
 13876  //
 13877  func (self *Program) LZCNTL(v0 interface{}, v1 interface{}) *Instruction {
 13878      p := self.alloc("LZCNTL", 2, Operands { v0, v1 })
 13879      // LZCNTL r32, r32
 13880      if isReg32(v0) && isReg32(v1) {
 13881          self.require(ISA_LZCNT)
 13882          p.domain = DomainGeneric
 13883          p.add(0, func(m *_Encoding, v []interface{}) {
 13884              m.emit(0xf3)
 13885              m.rexo(hcode(v[1]), v[0], false)
 13886              m.emit(0x0f)
 13887              m.emit(0xbd)
 13888              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 13889          })
 13890      }
 13891      // LZCNTL m32, r32
 13892      if isM32(v0) && isReg32(v1) {
 13893          self.require(ISA_LZCNT)
 13894          p.domain = DomainGeneric
 13895          p.add(0, func(m *_Encoding, v []interface{}) {
 13896              m.emit(0xf3)
 13897              m.rexo(hcode(v[1]), addr(v[0]), false)
 13898              m.emit(0x0f)
 13899              m.emit(0xbd)
 13900              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 13901          })
 13902      }
 13903      if p.len == 0 {
 13904          panic("invalid operands for LZCNTL")
 13905      }
 13906      return p
 13907  }
 13908  
 13909  // LZCNTQ performs "Count the Number of Leading Zero Bits".
 13910  //
 13911  // Mnemonic        : LZCNT
 13912  // Supported forms : (2 forms)
 13913  //
 13914  //    * LZCNTQ r64, r64    [LZCNT]
 13915  //    * LZCNTQ m64, r64    [LZCNT]
 13916  //
 13917  func (self *Program) LZCNTQ(v0 interface{}, v1 interface{}) *Instruction {
 13918      p := self.alloc("LZCNTQ", 2, Operands { v0, v1 })
 13919      // LZCNTQ r64, r64
 13920      if isReg64(v0) && isReg64(v1) {
 13921          self.require(ISA_LZCNT)
 13922          p.domain = DomainGeneric
 13923          p.add(0, func(m *_Encoding, v []interface{}) {
 13924              m.emit(0xf3)
 13925              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 13926              m.emit(0x0f)
 13927              m.emit(0xbd)
 13928              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 13929          })
 13930      }
 13931      // LZCNTQ m64, r64
 13932      if isM64(v0) && isReg64(v1) {
 13933          self.require(ISA_LZCNT)
 13934          p.domain = DomainGeneric
 13935          p.add(0, func(m *_Encoding, v []interface{}) {
 13936              m.emit(0xf3)
 13937              m.rexm(1, hcode(v[1]), addr(v[0]))
 13938              m.emit(0x0f)
 13939              m.emit(0xbd)
 13940              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 13941          })
 13942      }
 13943      if p.len == 0 {
 13944          panic("invalid operands for LZCNTQ")
 13945      }
 13946      return p
 13947  }
 13948  
 13949  // LZCNTW performs "Count the Number of Leading Zero Bits".
 13950  //
 13951  // Mnemonic        : LZCNT
 13952  // Supported forms : (2 forms)
 13953  //
 13954  //    * LZCNTW r16, r16    [LZCNT]
 13955  //    * LZCNTW m16, r16    [LZCNT]
 13956  //
 13957  func (self *Program) LZCNTW(v0 interface{}, v1 interface{}) *Instruction {
 13958      p := self.alloc("LZCNTW", 2, Operands { v0, v1 })
 13959      // LZCNTW r16, r16
 13960      if isReg16(v0) && isReg16(v1) {
 13961          self.require(ISA_LZCNT)
 13962          p.domain = DomainGeneric
 13963          p.add(0, func(m *_Encoding, v []interface{}) {
 13964              m.emit(0x66)
 13965              m.emit(0xf3)
 13966              m.rexo(hcode(v[1]), v[0], false)
 13967              m.emit(0x0f)
 13968              m.emit(0xbd)
 13969              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 13970          })
 13971      }
 13972      // LZCNTW m16, r16
 13973      if isM16(v0) && isReg16(v1) {
 13974          self.require(ISA_LZCNT)
 13975          p.domain = DomainGeneric
 13976          p.add(0, func(m *_Encoding, v []interface{}) {
 13977              m.emit(0x66)
 13978              m.emit(0xf3)
 13979              m.rexo(hcode(v[1]), addr(v[0]), false)
 13980              m.emit(0x0f)
 13981              m.emit(0xbd)
 13982              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 13983          })
 13984      }
 13985      if p.len == 0 {
 13986          panic("invalid operands for LZCNTW")
 13987      }
 13988      return p
 13989  }
 13990  
 13991  // MASKMOVDQU performs "Store Selected Bytes of Double Quadword".
 13992  //
 13993  // Mnemonic        : MASKMOVDQU
 13994  // Supported forms : (1 form)
 13995  //
 13996  //    * MASKMOVDQU xmm, xmm    [SSE2]
 13997  //
 13998  func (self *Program) MASKMOVDQU(v0 interface{}, v1 interface{}) *Instruction {
 13999      p := self.alloc("MASKMOVDQU", 2, Operands { v0, v1 })
 14000      // MASKMOVDQU xmm, xmm
 14001      if isXMM(v0) && isXMM(v1) {
 14002          self.require(ISA_SSE2)
 14003          p.domain = DomainMMXSSE
 14004          p.add(0, func(m *_Encoding, v []interface{}) {
 14005              m.emit(0x66)
 14006              m.rexo(hcode(v[1]), v[0], false)
 14007              m.emit(0x0f)
 14008              m.emit(0xf7)
 14009              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14010          })
 14011      }
 14012      if p.len == 0 {
 14013          panic("invalid operands for MASKMOVDQU")
 14014      }
 14015      return p
 14016  }
 14017  
 14018  // MASKMOVQ performs "Store Selected Bytes of Quadword".
 14019  //
 14020  // Mnemonic        : MASKMOVQ
 14021  // Supported forms : (1 form)
 14022  //
 14023  //    * MASKMOVQ mm, mm    [MMX+]
 14024  //
 14025  func (self *Program) MASKMOVQ(v0 interface{}, v1 interface{}) *Instruction {
 14026      p := self.alloc("MASKMOVQ", 2, Operands { v0, v1 })
 14027      // MASKMOVQ mm, mm
 14028      if isMM(v0) && isMM(v1) {
 14029          self.require(ISA_MMX_PLUS)
 14030          p.domain = DomainMMXSSE
 14031          p.add(0, func(m *_Encoding, v []interface{}) {
 14032              m.rexo(hcode(v[1]), v[0], false)
 14033              m.emit(0x0f)
 14034              m.emit(0xf7)
 14035              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14036          })
 14037      }
 14038      if p.len == 0 {
 14039          panic("invalid operands for MASKMOVQ")
 14040      }
 14041      return p
 14042  }
 14043  
 14044  // MAXPD performs "Return Maximum Packed Double-Precision Floating-Point Values".
 14045  //
 14046  // Mnemonic        : MAXPD
 14047  // Supported forms : (2 forms)
 14048  //
 14049  //    * MAXPD xmm, xmm     [SSE2]
 14050  //    * MAXPD m128, xmm    [SSE2]
 14051  //
 14052  func (self *Program) MAXPD(v0 interface{}, v1 interface{}) *Instruction {
 14053      p := self.alloc("MAXPD", 2, Operands { v0, v1 })
 14054      // MAXPD xmm, xmm
 14055      if isXMM(v0) && isXMM(v1) {
 14056          self.require(ISA_SSE2)
 14057          p.domain = DomainMMXSSE
 14058          p.add(0, func(m *_Encoding, v []interface{}) {
 14059              m.emit(0x66)
 14060              m.rexo(hcode(v[1]), v[0], false)
 14061              m.emit(0x0f)
 14062              m.emit(0x5f)
 14063              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14064          })
 14065      }
 14066      // MAXPD m128, xmm
 14067      if isM128(v0) && isXMM(v1) {
 14068          self.require(ISA_SSE2)
 14069          p.domain = DomainMMXSSE
 14070          p.add(0, func(m *_Encoding, v []interface{}) {
 14071              m.emit(0x66)
 14072              m.rexo(hcode(v[1]), addr(v[0]), false)
 14073              m.emit(0x0f)
 14074              m.emit(0x5f)
 14075              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14076          })
 14077      }
 14078      if p.len == 0 {
 14079          panic("invalid operands for MAXPD")
 14080      }
 14081      return p
 14082  }
 14083  
 14084  // MAXPS performs "Return Maximum Packed Single-Precision Floating-Point Values".
 14085  //
 14086  // Mnemonic        : MAXPS
 14087  // Supported forms : (2 forms)
 14088  //
 14089  //    * MAXPS xmm, xmm     [SSE]
 14090  //    * MAXPS m128, xmm    [SSE]
 14091  //
 14092  func (self *Program) MAXPS(v0 interface{}, v1 interface{}) *Instruction {
 14093      p := self.alloc("MAXPS", 2, Operands { v0, v1 })
 14094      // MAXPS xmm, xmm
 14095      if isXMM(v0) && isXMM(v1) {
 14096          self.require(ISA_SSE)
 14097          p.domain = DomainMMXSSE
 14098          p.add(0, func(m *_Encoding, v []interface{}) {
 14099              m.rexo(hcode(v[1]), v[0], false)
 14100              m.emit(0x0f)
 14101              m.emit(0x5f)
 14102              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14103          })
 14104      }
 14105      // MAXPS m128, xmm
 14106      if isM128(v0) && isXMM(v1) {
 14107          self.require(ISA_SSE)
 14108          p.domain = DomainMMXSSE
 14109          p.add(0, func(m *_Encoding, v []interface{}) {
 14110              m.rexo(hcode(v[1]), addr(v[0]), false)
 14111              m.emit(0x0f)
 14112              m.emit(0x5f)
 14113              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14114          })
 14115      }
 14116      if p.len == 0 {
 14117          panic("invalid operands for MAXPS")
 14118      }
 14119      return p
 14120  }
 14121  
 14122  // MAXSD performs "Return Maximum Scalar Double-Precision Floating-Point Value".
 14123  //
 14124  // Mnemonic        : MAXSD
 14125  // Supported forms : (2 forms)
 14126  //
 14127  //    * MAXSD xmm, xmm    [SSE2]
 14128  //    * MAXSD m64, xmm    [SSE2]
 14129  //
 14130  func (self *Program) MAXSD(v0 interface{}, v1 interface{}) *Instruction {
 14131      p := self.alloc("MAXSD", 2, Operands { v0, v1 })
 14132      // MAXSD xmm, xmm
 14133      if isXMM(v0) && isXMM(v1) {
 14134          self.require(ISA_SSE2)
 14135          p.domain = DomainMMXSSE
 14136          p.add(0, func(m *_Encoding, v []interface{}) {
 14137              m.emit(0xf2)
 14138              m.rexo(hcode(v[1]), v[0], false)
 14139              m.emit(0x0f)
 14140              m.emit(0x5f)
 14141              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14142          })
 14143      }
 14144      // MAXSD m64, xmm
 14145      if isM64(v0) && isXMM(v1) {
 14146          self.require(ISA_SSE2)
 14147          p.domain = DomainMMXSSE
 14148          p.add(0, func(m *_Encoding, v []interface{}) {
 14149              m.emit(0xf2)
 14150              m.rexo(hcode(v[1]), addr(v[0]), false)
 14151              m.emit(0x0f)
 14152              m.emit(0x5f)
 14153              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14154          })
 14155      }
 14156      if p.len == 0 {
 14157          panic("invalid operands for MAXSD")
 14158      }
 14159      return p
 14160  }
 14161  
 14162  // MAXSS performs "Return Maximum Scalar Single-Precision Floating-Point Value".
 14163  //
 14164  // Mnemonic        : MAXSS
 14165  // Supported forms : (2 forms)
 14166  //
 14167  //    * MAXSS xmm, xmm    [SSE]
 14168  //    * MAXSS m32, xmm    [SSE]
 14169  //
 14170  func (self *Program) MAXSS(v0 interface{}, v1 interface{}) *Instruction {
 14171      p := self.alloc("MAXSS", 2, Operands { v0, v1 })
 14172      // MAXSS xmm, xmm
 14173      if isXMM(v0) && isXMM(v1) {
 14174          self.require(ISA_SSE)
 14175          p.domain = DomainMMXSSE
 14176          p.add(0, func(m *_Encoding, v []interface{}) {
 14177              m.emit(0xf3)
 14178              m.rexo(hcode(v[1]), v[0], false)
 14179              m.emit(0x0f)
 14180              m.emit(0x5f)
 14181              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14182          })
 14183      }
 14184      // MAXSS m32, xmm
 14185      if isM32(v0) && isXMM(v1) {
 14186          self.require(ISA_SSE)
 14187          p.domain = DomainMMXSSE
 14188          p.add(0, func(m *_Encoding, v []interface{}) {
 14189              m.emit(0xf3)
 14190              m.rexo(hcode(v[1]), addr(v[0]), false)
 14191              m.emit(0x0f)
 14192              m.emit(0x5f)
 14193              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14194          })
 14195      }
 14196      if p.len == 0 {
 14197          panic("invalid operands for MAXSS")
 14198      }
 14199      return p
 14200  }
 14201  
 14202  // MFENCE performs "Memory Fence".
 14203  //
 14204  // Mnemonic        : MFENCE
 14205  // Supported forms : (1 form)
 14206  //
 14207  //    * MFENCE    [SSE2]
 14208  //
 14209  func (self *Program) MFENCE() *Instruction {
 14210      p := self.alloc("MFENCE", 0, Operands {  })
 14211      // MFENCE
 14212      self.require(ISA_SSE2)
 14213      p.domain = DomainGeneric
 14214      p.add(0, func(m *_Encoding, v []interface{}) {
 14215          m.emit(0x0f)
 14216          m.emit(0xae)
 14217          m.emit(0xf0)
 14218      })
 14219      return p
 14220  }
 14221  
 14222  // MINPD performs "Return Minimum Packed Double-Precision Floating-Point Values".
 14223  //
 14224  // Mnemonic        : MINPD
 14225  // Supported forms : (2 forms)
 14226  //
 14227  //    * MINPD xmm, xmm     [SSE2]
 14228  //    * MINPD m128, xmm    [SSE2]
 14229  //
 14230  func (self *Program) MINPD(v0 interface{}, v1 interface{}) *Instruction {
 14231      p := self.alloc("MINPD", 2, Operands { v0, v1 })
 14232      // MINPD xmm, xmm
 14233      if isXMM(v0) && isXMM(v1) {
 14234          self.require(ISA_SSE2)
 14235          p.domain = DomainMMXSSE
 14236          p.add(0, func(m *_Encoding, v []interface{}) {
 14237              m.emit(0x66)
 14238              m.rexo(hcode(v[1]), v[0], false)
 14239              m.emit(0x0f)
 14240              m.emit(0x5d)
 14241              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14242          })
 14243      }
 14244      // MINPD m128, xmm
 14245      if isM128(v0) && isXMM(v1) {
 14246          self.require(ISA_SSE2)
 14247          p.domain = DomainMMXSSE
 14248          p.add(0, func(m *_Encoding, v []interface{}) {
 14249              m.emit(0x66)
 14250              m.rexo(hcode(v[1]), addr(v[0]), false)
 14251              m.emit(0x0f)
 14252              m.emit(0x5d)
 14253              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14254          })
 14255      }
 14256      if p.len == 0 {
 14257          panic("invalid operands for MINPD")
 14258      }
 14259      return p
 14260  }
 14261  
 14262  // MINPS performs "Return Minimum Packed Single-Precision Floating-Point Values".
 14263  //
 14264  // Mnemonic        : MINPS
 14265  // Supported forms : (2 forms)
 14266  //
 14267  //    * MINPS xmm, xmm     [SSE]
 14268  //    * MINPS m128, xmm    [SSE]
 14269  //
 14270  func (self *Program) MINPS(v0 interface{}, v1 interface{}) *Instruction {
 14271      p := self.alloc("MINPS", 2, Operands { v0, v1 })
 14272      // MINPS xmm, xmm
 14273      if isXMM(v0) && isXMM(v1) {
 14274          self.require(ISA_SSE)
 14275          p.domain = DomainMMXSSE
 14276          p.add(0, func(m *_Encoding, v []interface{}) {
 14277              m.rexo(hcode(v[1]), v[0], false)
 14278              m.emit(0x0f)
 14279              m.emit(0x5d)
 14280              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14281          })
 14282      }
 14283      // MINPS m128, xmm
 14284      if isM128(v0) && isXMM(v1) {
 14285          self.require(ISA_SSE)
 14286          p.domain = DomainMMXSSE
 14287          p.add(0, func(m *_Encoding, v []interface{}) {
 14288              m.rexo(hcode(v[1]), addr(v[0]), false)
 14289              m.emit(0x0f)
 14290              m.emit(0x5d)
 14291              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14292          })
 14293      }
 14294      if p.len == 0 {
 14295          panic("invalid operands for MINPS")
 14296      }
 14297      return p
 14298  }
 14299  
 14300  // MINSD performs "Return Minimum Scalar Double-Precision Floating-Point Value".
 14301  //
 14302  // Mnemonic        : MINSD
 14303  // Supported forms : (2 forms)
 14304  //
 14305  //    * MINSD xmm, xmm    [SSE2]
 14306  //    * MINSD m64, xmm    [SSE2]
 14307  //
 14308  func (self *Program) MINSD(v0 interface{}, v1 interface{}) *Instruction {
 14309      p := self.alloc("MINSD", 2, Operands { v0, v1 })
 14310      // MINSD xmm, xmm
 14311      if isXMM(v0) && isXMM(v1) {
 14312          self.require(ISA_SSE2)
 14313          p.domain = DomainMMXSSE
 14314          p.add(0, func(m *_Encoding, v []interface{}) {
 14315              m.emit(0xf2)
 14316              m.rexo(hcode(v[1]), v[0], false)
 14317              m.emit(0x0f)
 14318              m.emit(0x5d)
 14319              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14320          })
 14321      }
 14322      // MINSD m64, xmm
 14323      if isM64(v0) && isXMM(v1) {
 14324          self.require(ISA_SSE2)
 14325          p.domain = DomainMMXSSE
 14326          p.add(0, func(m *_Encoding, v []interface{}) {
 14327              m.emit(0xf2)
 14328              m.rexo(hcode(v[1]), addr(v[0]), false)
 14329              m.emit(0x0f)
 14330              m.emit(0x5d)
 14331              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14332          })
 14333      }
 14334      if p.len == 0 {
 14335          panic("invalid operands for MINSD")
 14336      }
 14337      return p
 14338  }
 14339  
 14340  // MINSS performs "Return Minimum Scalar Single-Precision Floating-Point Value".
 14341  //
 14342  // Mnemonic        : MINSS
 14343  // Supported forms : (2 forms)
 14344  //
 14345  //    * MINSS xmm, xmm    [SSE]
 14346  //    * MINSS m32, xmm    [SSE]
 14347  //
 14348  func (self *Program) MINSS(v0 interface{}, v1 interface{}) *Instruction {
 14349      p := self.alloc("MINSS", 2, Operands { v0, v1 })
 14350      // MINSS xmm, xmm
 14351      if isXMM(v0) && isXMM(v1) {
 14352          self.require(ISA_SSE)
 14353          p.domain = DomainMMXSSE
 14354          p.add(0, func(m *_Encoding, v []interface{}) {
 14355              m.emit(0xf3)
 14356              m.rexo(hcode(v[1]), v[0], false)
 14357              m.emit(0x0f)
 14358              m.emit(0x5d)
 14359              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14360          })
 14361      }
 14362      // MINSS m32, xmm
 14363      if isM32(v0) && isXMM(v1) {
 14364          self.require(ISA_SSE)
 14365          p.domain = DomainMMXSSE
 14366          p.add(0, func(m *_Encoding, v []interface{}) {
 14367              m.emit(0xf3)
 14368              m.rexo(hcode(v[1]), addr(v[0]), false)
 14369              m.emit(0x0f)
 14370              m.emit(0x5d)
 14371              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14372          })
 14373      }
 14374      if p.len == 0 {
 14375          panic("invalid operands for MINSS")
 14376      }
 14377      return p
 14378  }
 14379  
 14380  // MONITOR performs "Monitor a Linear Address Range".
 14381  //
 14382  // Mnemonic        : MONITOR
 14383  // Supported forms : (1 form)
 14384  //
 14385  //    * MONITOR    [MONITOR]
 14386  //
 14387  func (self *Program) MONITOR() *Instruction {
 14388      p := self.alloc("MONITOR", 0, Operands {  })
 14389      // MONITOR
 14390      self.require(ISA_MONITOR)
 14391      p.domain = DomainMisc
 14392      p.add(0, func(m *_Encoding, v []interface{}) {
 14393          m.emit(0x0f)
 14394          m.emit(0x01)
 14395          m.emit(0xc8)
 14396      })
 14397      return p
 14398  }
 14399  
 14400  // MONITORX performs "Monitor a Linear Address Range with Timeout".
 14401  //
 14402  // Mnemonic        : MONITORX
 14403  // Supported forms : (1 form)
 14404  //
 14405  //    * MONITORX    [MONITORX]
 14406  //
 14407  func (self *Program) MONITORX() *Instruction {
 14408      p := self.alloc("MONITORX", 0, Operands {  })
 14409      // MONITORX
 14410      self.require(ISA_MONITORX)
 14411      p.domain = DomainMisc
 14412      p.add(0, func(m *_Encoding, v []interface{}) {
 14413          m.emit(0x0f)
 14414          m.emit(0x01)
 14415          m.emit(0xfa)
 14416      })
 14417      return p
 14418  }
 14419  
 14420  // MOVAPD performs "Move Aligned Packed Double-Precision Floating-Point Values".
 14421  //
 14422  // Mnemonic        : MOVAPD
 14423  // Supported forms : (3 forms)
 14424  //
 14425  //    * MOVAPD xmm, xmm     [SSE2]
 14426  //    * MOVAPD m128, xmm    [SSE2]
 14427  //    * MOVAPD xmm, m128    [SSE2]
 14428  //
 14429  func (self *Program) MOVAPD(v0 interface{}, v1 interface{}) *Instruction {
 14430      p := self.alloc("MOVAPD", 2, Operands { v0, v1 })
 14431      // MOVAPD xmm, xmm
 14432      if isXMM(v0) && isXMM(v1) {
 14433          self.require(ISA_SSE2)
 14434          p.domain = DomainMMXSSE
 14435          p.add(0, func(m *_Encoding, v []interface{}) {
 14436              m.emit(0x66)
 14437              m.rexo(hcode(v[1]), v[0], false)
 14438              m.emit(0x0f)
 14439              m.emit(0x28)
 14440              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14441          })
 14442          p.add(0, func(m *_Encoding, v []interface{}) {
 14443              m.emit(0x66)
 14444              m.rexo(hcode(v[0]), v[1], false)
 14445              m.emit(0x0f)
 14446              m.emit(0x29)
 14447              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 14448          })
 14449      }
 14450      // MOVAPD m128, xmm
 14451      if isM128(v0) && isXMM(v1) {
 14452          self.require(ISA_SSE2)
 14453          p.domain = DomainMMXSSE
 14454          p.add(0, func(m *_Encoding, v []interface{}) {
 14455              m.emit(0x66)
 14456              m.rexo(hcode(v[1]), addr(v[0]), false)
 14457              m.emit(0x0f)
 14458              m.emit(0x28)
 14459              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14460          })
 14461      }
 14462      // MOVAPD xmm, m128
 14463      if isXMM(v0) && isM128(v1) {
 14464          self.require(ISA_SSE2)
 14465          p.domain = DomainMMXSSE
 14466          p.add(0, func(m *_Encoding, v []interface{}) {
 14467              m.emit(0x66)
 14468              m.rexo(hcode(v[0]), addr(v[1]), false)
 14469              m.emit(0x0f)
 14470              m.emit(0x29)
 14471              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 14472          })
 14473      }
 14474      if p.len == 0 {
 14475          panic("invalid operands for MOVAPD")
 14476      }
 14477      return p
 14478  }
 14479  
 14480  // MOVAPS performs "Move Aligned Packed Single-Precision Floating-Point Values".
 14481  //
 14482  // Mnemonic        : MOVAPS
 14483  // Supported forms : (3 forms)
 14484  //
 14485  //    * MOVAPS xmm, xmm     [SSE]
 14486  //    * MOVAPS m128, xmm    [SSE]
 14487  //    * MOVAPS xmm, m128    [SSE]
 14488  //
 14489  func (self *Program) MOVAPS(v0 interface{}, v1 interface{}) *Instruction {
 14490      p := self.alloc("MOVAPS", 2, Operands { v0, v1 })
 14491      // MOVAPS xmm, xmm
 14492      if isXMM(v0) && isXMM(v1) {
 14493          self.require(ISA_SSE)
 14494          p.domain = DomainMMXSSE
 14495          p.add(0, func(m *_Encoding, v []interface{}) {
 14496              m.rexo(hcode(v[1]), v[0], false)
 14497              m.emit(0x0f)
 14498              m.emit(0x28)
 14499              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14500          })
 14501          p.add(0, func(m *_Encoding, v []interface{}) {
 14502              m.rexo(hcode(v[0]), v[1], false)
 14503              m.emit(0x0f)
 14504              m.emit(0x29)
 14505              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 14506          })
 14507      }
 14508      // MOVAPS m128, xmm
 14509      if isM128(v0) && isXMM(v1) {
 14510          self.require(ISA_SSE)
 14511          p.domain = DomainMMXSSE
 14512          p.add(0, func(m *_Encoding, v []interface{}) {
 14513              m.rexo(hcode(v[1]), addr(v[0]), false)
 14514              m.emit(0x0f)
 14515              m.emit(0x28)
 14516              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14517          })
 14518      }
 14519      // MOVAPS xmm, m128
 14520      if isXMM(v0) && isM128(v1) {
 14521          self.require(ISA_SSE)
 14522          p.domain = DomainMMXSSE
 14523          p.add(0, func(m *_Encoding, v []interface{}) {
 14524              m.rexo(hcode(v[0]), addr(v[1]), false)
 14525              m.emit(0x0f)
 14526              m.emit(0x29)
 14527              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 14528          })
 14529      }
 14530      if p.len == 0 {
 14531          panic("invalid operands for MOVAPS")
 14532      }
 14533      return p
 14534  }
 14535  
 14536  // MOVB performs "Move".
 14537  //
 14538  // Mnemonic        : MOV
 14539  // Supported forms : (5 forms)
 14540  //
 14541  //    * MOVB imm8, r8
 14542  //    * MOVB r8, r8
 14543  //    * MOVB m8, r8
 14544  //    * MOVB imm8, m8
 14545  //    * MOVB r8, m8
 14546  //
 14547  func (self *Program) MOVB(v0 interface{}, v1 interface{}) *Instruction {
 14548      p := self.alloc("MOVB", 2, Operands { v0, v1 })
 14549      // MOVB imm8, r8
 14550      if isImm8(v0) && isReg8(v1) {
 14551          p.domain = DomainGeneric
 14552          p.add(0, func(m *_Encoding, v []interface{}) {
 14553              m.rexo(0, v[1], isReg8REX(v[1]))
 14554              m.emit(0xc6)
 14555              m.emit(0xc0 | lcode(v[1]))
 14556              m.imm1(toImmAny(v[0]))
 14557          })
 14558          p.add(0, func(m *_Encoding, v []interface{}) {
 14559              m.rexo(0, v[1], isReg8REX(v[1]))
 14560              m.emit(0xb0 | lcode(v[1]))
 14561              m.imm1(toImmAny(v[0]))
 14562          })
 14563      }
 14564      // MOVB r8, r8
 14565      if isReg8(v0) && isReg8(v1) {
 14566          p.domain = DomainGeneric
 14567          p.add(0, func(m *_Encoding, v []interface{}) {
 14568              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
 14569              m.emit(0x88)
 14570              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 14571          })
 14572          p.add(0, func(m *_Encoding, v []interface{}) {
 14573              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]) || isReg8REX(v[1]))
 14574              m.emit(0x8a)
 14575              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14576          })
 14577      }
 14578      // MOVB m8, r8
 14579      if isM8(v0) && isReg8(v1) {
 14580          p.domain = DomainGeneric
 14581          p.add(0, func(m *_Encoding, v []interface{}) {
 14582              m.rexo(hcode(v[1]), addr(v[0]), isReg8REX(v[1]))
 14583              m.emit(0x8a)
 14584              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14585          })
 14586      }
 14587      // MOVB imm8, m8
 14588      if isImm8(v0) && isM8(v1) {
 14589          p.domain = DomainGeneric
 14590          p.add(0, func(m *_Encoding, v []interface{}) {
 14591              m.rexo(0, addr(v[1]), false)
 14592              m.emit(0xc6)
 14593              m.mrsd(0, addr(v[1]), 1)
 14594              m.imm1(toImmAny(v[0]))
 14595          })
 14596      }
 14597      // MOVB r8, m8
 14598      if isReg8(v0) && isM8(v1) {
 14599          p.domain = DomainGeneric
 14600          p.add(0, func(m *_Encoding, v []interface{}) {
 14601              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
 14602              m.emit(0x88)
 14603              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 14604          })
 14605      }
 14606      if p.len == 0 {
 14607          panic("invalid operands for MOVB")
 14608      }
 14609      return p
 14610  }
 14611  
 14612  // MOVBEL performs "Move Data After Swapping Bytes".
 14613  //
 14614  // Mnemonic        : MOVBE
 14615  // Supported forms : (2 forms)
 14616  //
 14617  //    * MOVBEL m32, r32    [MOVBE]
 14618  //    * MOVBEL r32, m32    [MOVBE]
 14619  //
 14620  func (self *Program) MOVBEL(v0 interface{}, v1 interface{}) *Instruction {
 14621      p := self.alloc("MOVBEL", 2, Operands { v0, v1 })
 14622      // MOVBEL m32, r32
 14623      if isM32(v0) && isReg32(v1) {
 14624          self.require(ISA_MOVBE)
 14625          p.domain = DomainGeneric
 14626          p.add(0, func(m *_Encoding, v []interface{}) {
 14627              m.rexo(hcode(v[1]), addr(v[0]), false)
 14628              m.emit(0x0f)
 14629              m.emit(0x38)
 14630              m.emit(0xf0)
 14631              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14632          })
 14633      }
 14634      // MOVBEL r32, m32
 14635      if isReg32(v0) && isM32(v1) {
 14636          self.require(ISA_MOVBE)
 14637          p.domain = DomainGeneric
 14638          p.add(0, func(m *_Encoding, v []interface{}) {
 14639              m.rexo(hcode(v[0]), addr(v[1]), false)
 14640              m.emit(0x0f)
 14641              m.emit(0x38)
 14642              m.emit(0xf1)
 14643              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 14644          })
 14645      }
 14646      if p.len == 0 {
 14647          panic("invalid operands for MOVBEL")
 14648      }
 14649      return p
 14650  }
 14651  
 14652  // MOVBEQ performs "Move Data After Swapping Bytes".
 14653  //
 14654  // Mnemonic        : MOVBE
 14655  // Supported forms : (2 forms)
 14656  //
 14657  //    * MOVBEQ m64, r64    [MOVBE]
 14658  //    * MOVBEQ r64, m64    [MOVBE]
 14659  //
 14660  func (self *Program) MOVBEQ(v0 interface{}, v1 interface{}) *Instruction {
 14661      p := self.alloc("MOVBEQ", 2, Operands { v0, v1 })
 14662      // MOVBEQ m64, r64
 14663      if isM64(v0) && isReg64(v1) {
 14664          self.require(ISA_MOVBE)
 14665          p.domain = DomainGeneric
 14666          p.add(0, func(m *_Encoding, v []interface{}) {
 14667              m.rexm(1, hcode(v[1]), addr(v[0]))
 14668              m.emit(0x0f)
 14669              m.emit(0x38)
 14670              m.emit(0xf0)
 14671              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14672          })
 14673      }
 14674      // MOVBEQ r64, m64
 14675      if isReg64(v0) && isM64(v1) {
 14676          self.require(ISA_MOVBE)
 14677          p.domain = DomainGeneric
 14678          p.add(0, func(m *_Encoding, v []interface{}) {
 14679              m.rexm(1, hcode(v[0]), addr(v[1]))
 14680              m.emit(0x0f)
 14681              m.emit(0x38)
 14682              m.emit(0xf1)
 14683              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 14684          })
 14685      }
 14686      if p.len == 0 {
 14687          panic("invalid operands for MOVBEQ")
 14688      }
 14689      return p
 14690  }
 14691  
 14692  // MOVBEW performs "Move Data After Swapping Bytes".
 14693  //
 14694  // Mnemonic        : MOVBE
 14695  // Supported forms : (2 forms)
 14696  //
 14697  //    * MOVBEW m16, r16    [MOVBE]
 14698  //    * MOVBEW r16, m16    [MOVBE]
 14699  //
 14700  func (self *Program) MOVBEW(v0 interface{}, v1 interface{}) *Instruction {
 14701      p := self.alloc("MOVBEW", 2, Operands { v0, v1 })
 14702      // MOVBEW m16, r16
 14703      if isM16(v0) && isReg16(v1) {
 14704          self.require(ISA_MOVBE)
 14705          p.domain = DomainGeneric
 14706          p.add(0, func(m *_Encoding, v []interface{}) {
 14707              m.emit(0x66)
 14708              m.rexo(hcode(v[1]), addr(v[0]), false)
 14709              m.emit(0x0f)
 14710              m.emit(0x38)
 14711              m.emit(0xf0)
 14712              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14713          })
 14714      }
 14715      // MOVBEW r16, m16
 14716      if isReg16(v0) && isM16(v1) {
 14717          self.require(ISA_MOVBE)
 14718          p.domain = DomainGeneric
 14719          p.add(0, func(m *_Encoding, v []interface{}) {
 14720              m.emit(0x66)
 14721              m.rexo(hcode(v[0]), addr(v[1]), false)
 14722              m.emit(0x0f)
 14723              m.emit(0x38)
 14724              m.emit(0xf1)
 14725              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 14726          })
 14727      }
 14728      if p.len == 0 {
 14729          panic("invalid operands for MOVBEW")
 14730      }
 14731      return p
 14732  }
 14733  
 14734  // MOVD performs "Move Doubleword".
 14735  //
 14736  // Mnemonic        : MOVD
 14737  // Supported forms : (8 forms)
 14738  //
 14739  //    * MOVD mm, r32     [MMX]
 14740  //    * MOVD r32, mm     [MMX]
 14741  //    * MOVD m32, mm     [MMX]
 14742  //    * MOVD mm, m32     [MMX]
 14743  //    * MOVD xmm, r32    [SSE2]
 14744  //    * MOVD r32, xmm    [SSE2]
 14745  //    * MOVD m32, xmm    [SSE2]
 14746  //    * MOVD xmm, m32    [SSE2]
 14747  //
 14748  func (self *Program) MOVD(v0 interface{}, v1 interface{}) *Instruction {
 14749      p := self.alloc("MOVD", 2, Operands { v0, v1 })
 14750      // MOVD mm, r32
 14751      if isMM(v0) && isReg32(v1) {
 14752          self.require(ISA_MMX)
 14753          p.domain = DomainMMXSSE
 14754          p.add(0, func(m *_Encoding, v []interface{}) {
 14755              m.rexo(hcode(v[0]), v[1], false)
 14756              m.emit(0x0f)
 14757              m.emit(0x7e)
 14758              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 14759          })
 14760      }
 14761      // MOVD r32, mm
 14762      if isReg32(v0) && isMM(v1) {
 14763          self.require(ISA_MMX)
 14764          p.domain = DomainMMXSSE
 14765          p.add(0, func(m *_Encoding, v []interface{}) {
 14766              m.rexo(hcode(v[1]), v[0], false)
 14767              m.emit(0x0f)
 14768              m.emit(0x6e)
 14769              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14770          })
 14771      }
 14772      // MOVD m32, mm
 14773      if isM32(v0) && isMM(v1) {
 14774          self.require(ISA_MMX)
 14775          p.domain = DomainMMXSSE
 14776          p.add(0, func(m *_Encoding, v []interface{}) {
 14777              m.rexo(hcode(v[1]), addr(v[0]), false)
 14778              m.emit(0x0f)
 14779              m.emit(0x6e)
 14780              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14781          })
 14782      }
 14783      // MOVD mm, m32
 14784      if isMM(v0) && isM32(v1) {
 14785          self.require(ISA_MMX)
 14786          p.domain = DomainMMXSSE
 14787          p.add(0, func(m *_Encoding, v []interface{}) {
 14788              m.rexo(hcode(v[0]), addr(v[1]), false)
 14789              m.emit(0x0f)
 14790              m.emit(0x7e)
 14791              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 14792          })
 14793      }
 14794      // MOVD xmm, r32
 14795      if isXMM(v0) && isReg32(v1) {
 14796          self.require(ISA_SSE2)
 14797          p.domain = DomainMMXSSE
 14798          p.add(0, func(m *_Encoding, v []interface{}) {
 14799              m.emit(0x66)
 14800              m.rexo(hcode(v[0]), v[1], false)
 14801              m.emit(0x0f)
 14802              m.emit(0x7e)
 14803              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 14804          })
 14805      }
 14806      // MOVD r32, xmm
 14807      if isReg32(v0) && isXMM(v1) {
 14808          self.require(ISA_SSE2)
 14809          p.domain = DomainMMXSSE
 14810          p.add(0, func(m *_Encoding, v []interface{}) {
 14811              m.emit(0x66)
 14812              m.rexo(hcode(v[1]), v[0], false)
 14813              m.emit(0x0f)
 14814              m.emit(0x6e)
 14815              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14816          })
 14817      }
 14818      // MOVD m32, xmm
 14819      if isM32(v0) && isXMM(v1) {
 14820          self.require(ISA_SSE2)
 14821          p.domain = DomainMMXSSE
 14822          p.add(0, func(m *_Encoding, v []interface{}) {
 14823              m.emit(0x66)
 14824              m.rexo(hcode(v[1]), addr(v[0]), false)
 14825              m.emit(0x0f)
 14826              m.emit(0x6e)
 14827              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14828          })
 14829      }
 14830      // MOVD xmm, m32
 14831      if isXMM(v0) && isM32(v1) {
 14832          self.require(ISA_SSE2)
 14833          p.domain = DomainMMXSSE
 14834          p.add(0, func(m *_Encoding, v []interface{}) {
 14835              m.emit(0x66)
 14836              m.rexo(hcode(v[0]), addr(v[1]), false)
 14837              m.emit(0x0f)
 14838              m.emit(0x7e)
 14839              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 14840          })
 14841      }
 14842      if p.len == 0 {
 14843          panic("invalid operands for MOVD")
 14844      }
 14845      return p
 14846  }
 14847  
 14848  // MOVDDUP performs "Move One Double-FP and Duplicate".
 14849  //
 14850  // Mnemonic        : MOVDDUP
 14851  // Supported forms : (2 forms)
 14852  //
 14853  //    * MOVDDUP xmm, xmm    [SSE3]
 14854  //    * MOVDDUP m64, xmm    [SSE3]
 14855  //
 14856  func (self *Program) MOVDDUP(v0 interface{}, v1 interface{}) *Instruction {
 14857      p := self.alloc("MOVDDUP", 2, Operands { v0, v1 })
 14858      // MOVDDUP xmm, xmm
 14859      if isXMM(v0) && isXMM(v1) {
 14860          self.require(ISA_SSE3)
 14861          p.domain = DomainMMXSSE
 14862          p.add(0, func(m *_Encoding, v []interface{}) {
 14863              m.emit(0xf2)
 14864              m.rexo(hcode(v[1]), v[0], false)
 14865              m.emit(0x0f)
 14866              m.emit(0x12)
 14867              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14868          })
 14869      }
 14870      // MOVDDUP m64, xmm
 14871      if isM64(v0) && isXMM(v1) {
 14872          self.require(ISA_SSE3)
 14873          p.domain = DomainMMXSSE
 14874          p.add(0, func(m *_Encoding, v []interface{}) {
 14875              m.emit(0xf2)
 14876              m.rexo(hcode(v[1]), addr(v[0]), false)
 14877              m.emit(0x0f)
 14878              m.emit(0x12)
 14879              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14880          })
 14881      }
 14882      if p.len == 0 {
 14883          panic("invalid operands for MOVDDUP")
 14884      }
 14885      return p
 14886  }
 14887  
 14888  // MOVDQ2Q performs "Move Quadword from XMM to MMX Technology Register".
 14889  //
 14890  // Mnemonic        : MOVDQ2Q
 14891  // Supported forms : (1 form)
 14892  //
 14893  //    * MOVDQ2Q xmm, mm    [SSE2]
 14894  //
 14895  func (self *Program) MOVDQ2Q(v0 interface{}, v1 interface{}) *Instruction {
 14896      p := self.alloc("MOVDQ2Q", 2, Operands { v0, v1 })
 14897      // MOVDQ2Q xmm, mm
 14898      if isXMM(v0) && isMM(v1) {
 14899          self.require(ISA_SSE2)
 14900          p.domain = DomainMMXSSE
 14901          p.add(0, func(m *_Encoding, v []interface{}) {
 14902              m.emit(0xf2)
 14903              m.rexo(hcode(v[1]), v[0], false)
 14904              m.emit(0x0f)
 14905              m.emit(0xd6)
 14906              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14907          })
 14908      }
 14909      if p.len == 0 {
 14910          panic("invalid operands for MOVDQ2Q")
 14911      }
 14912      return p
 14913  }
 14914  
 14915  // MOVDQA performs "Move Aligned Double Quadword".
 14916  //
 14917  // Mnemonic        : MOVDQA
 14918  // Supported forms : (3 forms)
 14919  //
 14920  //    * MOVDQA xmm, xmm     [SSE2]
 14921  //    * MOVDQA m128, xmm    [SSE2]
 14922  //    * MOVDQA xmm, m128    [SSE2]
 14923  //
 14924  func (self *Program) MOVDQA(v0 interface{}, v1 interface{}) *Instruction {
 14925      p := self.alloc("MOVDQA", 2, Operands { v0, v1 })
 14926      // MOVDQA xmm, xmm
 14927      if isXMM(v0) && isXMM(v1) {
 14928          self.require(ISA_SSE2)
 14929          p.domain = DomainMMXSSE
 14930          p.add(0, func(m *_Encoding, v []interface{}) {
 14931              m.emit(0x66)
 14932              m.rexo(hcode(v[1]), v[0], false)
 14933              m.emit(0x0f)
 14934              m.emit(0x6f)
 14935              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14936          })
 14937          p.add(0, func(m *_Encoding, v []interface{}) {
 14938              m.emit(0x66)
 14939              m.rexo(hcode(v[0]), v[1], false)
 14940              m.emit(0x0f)
 14941              m.emit(0x7f)
 14942              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 14943          })
 14944      }
 14945      // MOVDQA m128, xmm
 14946      if isM128(v0) && isXMM(v1) {
 14947          self.require(ISA_SSE2)
 14948          p.domain = DomainMMXSSE
 14949          p.add(0, func(m *_Encoding, v []interface{}) {
 14950              m.emit(0x66)
 14951              m.rexo(hcode(v[1]), addr(v[0]), false)
 14952              m.emit(0x0f)
 14953              m.emit(0x6f)
 14954              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 14955          })
 14956      }
 14957      // MOVDQA xmm, m128
 14958      if isXMM(v0) && isM128(v1) {
 14959          self.require(ISA_SSE2)
 14960          p.domain = DomainMMXSSE
 14961          p.add(0, func(m *_Encoding, v []interface{}) {
 14962              m.emit(0x66)
 14963              m.rexo(hcode(v[0]), addr(v[1]), false)
 14964              m.emit(0x0f)
 14965              m.emit(0x7f)
 14966              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 14967          })
 14968      }
 14969      if p.len == 0 {
 14970          panic("invalid operands for MOVDQA")
 14971      }
 14972      return p
 14973  }
 14974  
 14975  // MOVDQU performs "Move Unaligned Double Quadword".
 14976  //
 14977  // Mnemonic        : MOVDQU
 14978  // Supported forms : (3 forms)
 14979  //
 14980  //    * MOVDQU xmm, xmm     [SSE2]
 14981  //    * MOVDQU m128, xmm    [SSE2]
 14982  //    * MOVDQU xmm, m128    [SSE2]
 14983  //
 14984  func (self *Program) MOVDQU(v0 interface{}, v1 interface{}) *Instruction {
 14985      p := self.alloc("MOVDQU", 2, Operands { v0, v1 })
 14986      // MOVDQU xmm, xmm
 14987      if isXMM(v0) && isXMM(v1) {
 14988          self.require(ISA_SSE2)
 14989          p.domain = DomainMMXSSE
 14990          p.add(0, func(m *_Encoding, v []interface{}) {
 14991              m.emit(0xf3)
 14992              m.rexo(hcode(v[1]), v[0], false)
 14993              m.emit(0x0f)
 14994              m.emit(0x6f)
 14995              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 14996          })
 14997          p.add(0, func(m *_Encoding, v []interface{}) {
 14998              m.emit(0xf3)
 14999              m.rexo(hcode(v[0]), v[1], false)
 15000              m.emit(0x0f)
 15001              m.emit(0x7f)
 15002              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 15003          })
 15004      }
 15005      // MOVDQU m128, xmm
 15006      if isM128(v0) && isXMM(v1) {
 15007          self.require(ISA_SSE2)
 15008          p.domain = DomainMMXSSE
 15009          p.add(0, func(m *_Encoding, v []interface{}) {
 15010              m.emit(0xf3)
 15011              m.rexo(hcode(v[1]), addr(v[0]), false)
 15012              m.emit(0x0f)
 15013              m.emit(0x6f)
 15014              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15015          })
 15016      }
 15017      // MOVDQU xmm, m128
 15018      if isXMM(v0) && isM128(v1) {
 15019          self.require(ISA_SSE2)
 15020          p.domain = DomainMMXSSE
 15021          p.add(0, func(m *_Encoding, v []interface{}) {
 15022              m.emit(0xf3)
 15023              m.rexo(hcode(v[0]), addr(v[1]), false)
 15024              m.emit(0x0f)
 15025              m.emit(0x7f)
 15026              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15027          })
 15028      }
 15029      if p.len == 0 {
 15030          panic("invalid operands for MOVDQU")
 15031      }
 15032      return p
 15033  }
 15034  
 15035  // MOVHLPS performs "Move Packed Single-Precision Floating-Point Values High to Low".
 15036  //
 15037  // Mnemonic        : MOVHLPS
 15038  // Supported forms : (1 form)
 15039  //
 15040  //    * MOVHLPS xmm, xmm    [SSE]
 15041  //
 15042  func (self *Program) MOVHLPS(v0 interface{}, v1 interface{}) *Instruction {
 15043      p := self.alloc("MOVHLPS", 2, Operands { v0, v1 })
 15044      // MOVHLPS xmm, xmm
 15045      if isXMM(v0) && isXMM(v1) {
 15046          self.require(ISA_SSE)
 15047          p.domain = DomainMMXSSE
 15048          p.add(0, func(m *_Encoding, v []interface{}) {
 15049              m.rexo(hcode(v[1]), v[0], false)
 15050              m.emit(0x0f)
 15051              m.emit(0x12)
 15052              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15053          })
 15054      }
 15055      if p.len == 0 {
 15056          panic("invalid operands for MOVHLPS")
 15057      }
 15058      return p
 15059  }
 15060  
 15061  // MOVHPD performs "Move High Packed Double-Precision Floating-Point Value".
 15062  //
 15063  // Mnemonic        : MOVHPD
 15064  // Supported forms : (2 forms)
 15065  //
 15066  //    * MOVHPD m64, xmm    [SSE2]
 15067  //    * MOVHPD xmm, m64    [SSE2]
 15068  //
 15069  func (self *Program) MOVHPD(v0 interface{}, v1 interface{}) *Instruction {
 15070      p := self.alloc("MOVHPD", 2, Operands { v0, v1 })
 15071      // MOVHPD m64, xmm
 15072      if isM64(v0) && isXMM(v1) {
 15073          self.require(ISA_SSE2)
 15074          p.domain = DomainMMXSSE
 15075          p.add(0, func(m *_Encoding, v []interface{}) {
 15076              m.emit(0x66)
 15077              m.rexo(hcode(v[1]), addr(v[0]), false)
 15078              m.emit(0x0f)
 15079              m.emit(0x16)
 15080              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15081          })
 15082      }
 15083      // MOVHPD xmm, m64
 15084      if isXMM(v0) && isM64(v1) {
 15085          self.require(ISA_SSE2)
 15086          p.domain = DomainMMXSSE
 15087          p.add(0, func(m *_Encoding, v []interface{}) {
 15088              m.emit(0x66)
 15089              m.rexo(hcode(v[0]), addr(v[1]), false)
 15090              m.emit(0x0f)
 15091              m.emit(0x17)
 15092              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15093          })
 15094      }
 15095      if p.len == 0 {
 15096          panic("invalid operands for MOVHPD")
 15097      }
 15098      return p
 15099  }
 15100  
 15101  // MOVHPS performs "Move High Packed Single-Precision Floating-Point Values".
 15102  //
 15103  // Mnemonic        : MOVHPS
 15104  // Supported forms : (2 forms)
 15105  //
 15106  //    * MOVHPS m64, xmm    [SSE]
 15107  //    * MOVHPS xmm, m64    [SSE]
 15108  //
 15109  func (self *Program) MOVHPS(v0 interface{}, v1 interface{}) *Instruction {
 15110      p := self.alloc("MOVHPS", 2, Operands { v0, v1 })
 15111      // MOVHPS m64, xmm
 15112      if isM64(v0) && isXMM(v1) {
 15113          self.require(ISA_SSE)
 15114          p.domain = DomainMMXSSE
 15115          p.add(0, func(m *_Encoding, v []interface{}) {
 15116              m.rexo(hcode(v[1]), addr(v[0]), false)
 15117              m.emit(0x0f)
 15118              m.emit(0x16)
 15119              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15120          })
 15121      }
 15122      // MOVHPS xmm, m64
 15123      if isXMM(v0) && isM64(v1) {
 15124          self.require(ISA_SSE)
 15125          p.domain = DomainMMXSSE
 15126          p.add(0, func(m *_Encoding, v []interface{}) {
 15127              m.rexo(hcode(v[0]), addr(v[1]), false)
 15128              m.emit(0x0f)
 15129              m.emit(0x17)
 15130              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15131          })
 15132      }
 15133      if p.len == 0 {
 15134          panic("invalid operands for MOVHPS")
 15135      }
 15136      return p
 15137  }
 15138  
 15139  // MOVL performs "Move".
 15140  //
 15141  // Mnemonic        : MOV
 15142  // Supported forms : (5 forms)
 15143  //
 15144  //    * MOVL imm32, r32
 15145  //    * MOVL r32, r32
 15146  //    * MOVL m32, r32
 15147  //    * MOVL imm32, m32
 15148  //    * MOVL r32, m32
 15149  //
 15150  func (self *Program) MOVL(v0 interface{}, v1 interface{}) *Instruction {
 15151      p := self.alloc("MOVL", 2, Operands { v0, v1 })
 15152      // MOVL imm32, r32
 15153      if isImm32(v0) && isReg32(v1) {
 15154          p.domain = DomainGeneric
 15155          p.add(0, func(m *_Encoding, v []interface{}) {
 15156              m.rexo(0, v[1], false)
 15157              m.emit(0xc7)
 15158              m.emit(0xc0 | lcode(v[1]))
 15159              m.imm4(toImmAny(v[0]))
 15160          })
 15161          p.add(0, func(m *_Encoding, v []interface{}) {
 15162              m.rexo(0, v[1], false)
 15163              m.emit(0xb8 | lcode(v[1]))
 15164              m.imm4(toImmAny(v[0]))
 15165          })
 15166      }
 15167      // MOVL r32, r32
 15168      if isReg32(v0) && isReg32(v1) {
 15169          p.domain = DomainGeneric
 15170          p.add(0, func(m *_Encoding, v []interface{}) {
 15171              m.rexo(hcode(v[0]), v[1], false)
 15172              m.emit(0x89)
 15173              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 15174          })
 15175          p.add(0, func(m *_Encoding, v []interface{}) {
 15176              m.rexo(hcode(v[1]), v[0], false)
 15177              m.emit(0x8b)
 15178              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15179          })
 15180      }
 15181      // MOVL m32, r32
 15182      if isM32(v0) && isReg32(v1) {
 15183          p.domain = DomainGeneric
 15184          p.add(0, func(m *_Encoding, v []interface{}) {
 15185              m.rexo(hcode(v[1]), addr(v[0]), false)
 15186              m.emit(0x8b)
 15187              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15188          })
 15189      }
 15190      // MOVL imm32, m32
 15191      if isImm32(v0) && isM32(v1) {
 15192          p.domain = DomainGeneric
 15193          p.add(0, func(m *_Encoding, v []interface{}) {
 15194              m.rexo(0, addr(v[1]), false)
 15195              m.emit(0xc7)
 15196              m.mrsd(0, addr(v[1]), 1)
 15197              m.imm4(toImmAny(v[0]))
 15198          })
 15199      }
 15200      // MOVL r32, m32
 15201      if isReg32(v0) && isM32(v1) {
 15202          p.domain = DomainGeneric
 15203          p.add(0, func(m *_Encoding, v []interface{}) {
 15204              m.rexo(hcode(v[0]), addr(v[1]), false)
 15205              m.emit(0x89)
 15206              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15207          })
 15208      }
 15209      if p.len == 0 {
 15210          panic("invalid operands for MOVL")
 15211      }
 15212      return p
 15213  }
 15214  
 15215  // MOVLHPS performs "Move Packed Single-Precision Floating-Point Values Low to High".
 15216  //
 15217  // Mnemonic        : MOVLHPS
 15218  // Supported forms : (1 form)
 15219  //
 15220  //    * MOVLHPS xmm, xmm    [SSE]
 15221  //
 15222  func (self *Program) MOVLHPS(v0 interface{}, v1 interface{}) *Instruction {
 15223      p := self.alloc("MOVLHPS", 2, Operands { v0, v1 })
 15224      // MOVLHPS xmm, xmm
 15225      if isXMM(v0) && isXMM(v1) {
 15226          self.require(ISA_SSE)
 15227          p.domain = DomainMMXSSE
 15228          p.add(0, func(m *_Encoding, v []interface{}) {
 15229              m.rexo(hcode(v[1]), v[0], false)
 15230              m.emit(0x0f)
 15231              m.emit(0x16)
 15232              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15233          })
 15234      }
 15235      if p.len == 0 {
 15236          panic("invalid operands for MOVLHPS")
 15237      }
 15238      return p
 15239  }
 15240  
 15241  // MOVLPD performs "Move Low Packed Double-Precision Floating-Point Value".
 15242  //
 15243  // Mnemonic        : MOVLPD
 15244  // Supported forms : (2 forms)
 15245  //
 15246  //    * MOVLPD m64, xmm    [SSE2]
 15247  //    * MOVLPD xmm, m64    [SSE2]
 15248  //
 15249  func (self *Program) MOVLPD(v0 interface{}, v1 interface{}) *Instruction {
 15250      p := self.alloc("MOVLPD", 2, Operands { v0, v1 })
 15251      // MOVLPD m64, xmm
 15252      if isM64(v0) && isXMM(v1) {
 15253          self.require(ISA_SSE2)
 15254          p.domain = DomainMMXSSE
 15255          p.add(0, func(m *_Encoding, v []interface{}) {
 15256              m.emit(0x66)
 15257              m.rexo(hcode(v[1]), addr(v[0]), false)
 15258              m.emit(0x0f)
 15259              m.emit(0x12)
 15260              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15261          })
 15262      }
 15263      // MOVLPD xmm, m64
 15264      if isXMM(v0) && isM64(v1) {
 15265          self.require(ISA_SSE2)
 15266          p.domain = DomainMMXSSE
 15267          p.add(0, func(m *_Encoding, v []interface{}) {
 15268              m.emit(0x66)
 15269              m.rexo(hcode(v[0]), addr(v[1]), false)
 15270              m.emit(0x0f)
 15271              m.emit(0x13)
 15272              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15273          })
 15274      }
 15275      if p.len == 0 {
 15276          panic("invalid operands for MOVLPD")
 15277      }
 15278      return p
 15279  }
 15280  
 15281  // MOVLPS performs "Move Low Packed Single-Precision Floating-Point Values".
 15282  //
 15283  // Mnemonic        : MOVLPS
 15284  // Supported forms : (2 forms)
 15285  //
 15286  //    * MOVLPS m64, xmm    [SSE]
 15287  //    * MOVLPS xmm, m64    [SSE]
 15288  //
 15289  func (self *Program) MOVLPS(v0 interface{}, v1 interface{}) *Instruction {
 15290      p := self.alloc("MOVLPS", 2, Operands { v0, v1 })
 15291      // MOVLPS m64, xmm
 15292      if isM64(v0) && isXMM(v1) {
 15293          self.require(ISA_SSE)
 15294          p.domain = DomainMMXSSE
 15295          p.add(0, func(m *_Encoding, v []interface{}) {
 15296              m.rexo(hcode(v[1]), addr(v[0]), false)
 15297              m.emit(0x0f)
 15298              m.emit(0x12)
 15299              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15300          })
 15301      }
 15302      // MOVLPS xmm, m64
 15303      if isXMM(v0) && isM64(v1) {
 15304          self.require(ISA_SSE)
 15305          p.domain = DomainMMXSSE
 15306          p.add(0, func(m *_Encoding, v []interface{}) {
 15307              m.rexo(hcode(v[0]), addr(v[1]), false)
 15308              m.emit(0x0f)
 15309              m.emit(0x13)
 15310              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15311          })
 15312      }
 15313      if p.len == 0 {
 15314          panic("invalid operands for MOVLPS")
 15315      }
 15316      return p
 15317  }
 15318  
 15319  // MOVMSKPD performs "Extract Packed Double-Precision Floating-Point Sign Mask".
 15320  //
 15321  // Mnemonic        : MOVMSKPD
 15322  // Supported forms : (1 form)
 15323  //
 15324  //    * MOVMSKPD xmm, r32    [SSE2]
 15325  //
 15326  func (self *Program) MOVMSKPD(v0 interface{}, v1 interface{}) *Instruction {
 15327      p := self.alloc("MOVMSKPD", 2, Operands { v0, v1 })
 15328      // MOVMSKPD xmm, r32
 15329      if isXMM(v0) && isReg32(v1) {
 15330          self.require(ISA_SSE2)
 15331          p.domain = DomainMMXSSE
 15332          p.add(0, func(m *_Encoding, v []interface{}) {
 15333              m.emit(0x66)
 15334              m.rexo(hcode(v[1]), v[0], false)
 15335              m.emit(0x0f)
 15336              m.emit(0x50)
 15337              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15338          })
 15339      }
 15340      if p.len == 0 {
 15341          panic("invalid operands for MOVMSKPD")
 15342      }
 15343      return p
 15344  }
 15345  
 15346  // MOVMSKPS performs "Extract Packed Single-Precision Floating-Point Sign Mask".
 15347  //
 15348  // Mnemonic        : MOVMSKPS
 15349  // Supported forms : (1 form)
 15350  //
 15351  //    * MOVMSKPS xmm, r32    [SSE]
 15352  //
 15353  func (self *Program) MOVMSKPS(v0 interface{}, v1 interface{}) *Instruction {
 15354      p := self.alloc("MOVMSKPS", 2, Operands { v0, v1 })
 15355      // MOVMSKPS xmm, r32
 15356      if isXMM(v0) && isReg32(v1) {
 15357          self.require(ISA_SSE)
 15358          p.domain = DomainMMXSSE
 15359          p.add(0, func(m *_Encoding, v []interface{}) {
 15360              m.rexo(hcode(v[1]), v[0], false)
 15361              m.emit(0x0f)
 15362              m.emit(0x50)
 15363              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15364          })
 15365      }
 15366      if p.len == 0 {
 15367          panic("invalid operands for MOVMSKPS")
 15368      }
 15369      return p
 15370  }
 15371  
 15372  // MOVNTDQ performs "Store Double Quadword Using Non-Temporal Hint".
 15373  //
 15374  // Mnemonic        : MOVNTDQ
 15375  // Supported forms : (1 form)
 15376  //
 15377  //    * MOVNTDQ xmm, m128    [SSE2]
 15378  //
 15379  func (self *Program) MOVNTDQ(v0 interface{}, v1 interface{}) *Instruction {
 15380      p := self.alloc("MOVNTDQ", 2, Operands { v0, v1 })
 15381      // MOVNTDQ xmm, m128
 15382      if isXMM(v0) && isM128(v1) {
 15383          self.require(ISA_SSE2)
 15384          p.domain = DomainMMXSSE
 15385          p.add(0, func(m *_Encoding, v []interface{}) {
 15386              m.emit(0x66)
 15387              m.rexo(hcode(v[0]), addr(v[1]), false)
 15388              m.emit(0x0f)
 15389              m.emit(0xe7)
 15390              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15391          })
 15392      }
 15393      if p.len == 0 {
 15394          panic("invalid operands for MOVNTDQ")
 15395      }
 15396      return p
 15397  }
 15398  
 15399  // MOVNTDQA performs "Load Double Quadword Non-Temporal Aligned Hint".
 15400  //
 15401  // Mnemonic        : MOVNTDQA
 15402  // Supported forms : (1 form)
 15403  //
 15404  //    * MOVNTDQA m128, xmm    [SSE4.1]
 15405  //
 15406  func (self *Program) MOVNTDQA(v0 interface{}, v1 interface{}) *Instruction {
 15407      p := self.alloc("MOVNTDQA", 2, Operands { v0, v1 })
 15408      // MOVNTDQA m128, xmm
 15409      if isM128(v0) && isXMM(v1) {
 15410          self.require(ISA_SSE4_1)
 15411          p.domain = DomainMMXSSE
 15412          p.add(0, func(m *_Encoding, v []interface{}) {
 15413              m.emit(0x66)
 15414              m.rexo(hcode(v[1]), addr(v[0]), false)
 15415              m.emit(0x0f)
 15416              m.emit(0x38)
 15417              m.emit(0x2a)
 15418              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15419          })
 15420      }
 15421      if p.len == 0 {
 15422          panic("invalid operands for MOVNTDQA")
 15423      }
 15424      return p
 15425  }
 15426  
 15427  // MOVNTIL performs "Store Doubleword Using Non-Temporal Hint".
 15428  //
 15429  // Mnemonic        : MOVNTI
 15430  // Supported forms : (1 form)
 15431  //
 15432  //    * MOVNTIL r32, m32    [SSE2]
 15433  //
 15434  func (self *Program) MOVNTIL(v0 interface{}, v1 interface{}) *Instruction {
 15435      p := self.alloc("MOVNTIL", 2, Operands { v0, v1 })
 15436      // MOVNTIL r32, m32
 15437      if isReg32(v0) && isM32(v1) {
 15438          self.require(ISA_SSE2)
 15439          p.domain = DomainGeneric
 15440          p.add(0, func(m *_Encoding, v []interface{}) {
 15441              m.rexo(hcode(v[0]), addr(v[1]), false)
 15442              m.emit(0x0f)
 15443              m.emit(0xc3)
 15444              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15445          })
 15446      }
 15447      if p.len == 0 {
 15448          panic("invalid operands for MOVNTIL")
 15449      }
 15450      return p
 15451  }
 15452  
 15453  // MOVNTIQ performs "Store Doubleword Using Non-Temporal Hint".
 15454  //
 15455  // Mnemonic        : MOVNTI
 15456  // Supported forms : (1 form)
 15457  //
 15458  //    * MOVNTIQ r64, m64    [SSE2]
 15459  //
 15460  func (self *Program) MOVNTIQ(v0 interface{}, v1 interface{}) *Instruction {
 15461      p := self.alloc("MOVNTIQ", 2, Operands { v0, v1 })
 15462      // MOVNTIQ r64, m64
 15463      if isReg64(v0) && isM64(v1) {
 15464          self.require(ISA_SSE2)
 15465          p.domain = DomainGeneric
 15466          p.add(0, func(m *_Encoding, v []interface{}) {
 15467              m.rexm(1, hcode(v[0]), addr(v[1]))
 15468              m.emit(0x0f)
 15469              m.emit(0xc3)
 15470              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15471          })
 15472      }
 15473      if p.len == 0 {
 15474          panic("invalid operands for MOVNTIQ")
 15475      }
 15476      return p
 15477  }
 15478  
 15479  // MOVNTPD performs "Store Packed Double-Precision Floating-Point Values Using Non-Temporal Hint".
 15480  //
 15481  // Mnemonic        : MOVNTPD
 15482  // Supported forms : (1 form)
 15483  //
 15484  //    * MOVNTPD xmm, m128    [SSE2]
 15485  //
 15486  func (self *Program) MOVNTPD(v0 interface{}, v1 interface{}) *Instruction {
 15487      p := self.alloc("MOVNTPD", 2, Operands { v0, v1 })
 15488      // MOVNTPD xmm, m128
 15489      if isXMM(v0) && isM128(v1) {
 15490          self.require(ISA_SSE2)
 15491          p.domain = DomainMMXSSE
 15492          p.add(0, func(m *_Encoding, v []interface{}) {
 15493              m.emit(0x66)
 15494              m.rexo(hcode(v[0]), addr(v[1]), false)
 15495              m.emit(0x0f)
 15496              m.emit(0x2b)
 15497              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15498          })
 15499      }
 15500      if p.len == 0 {
 15501          panic("invalid operands for MOVNTPD")
 15502      }
 15503      return p
 15504  }
 15505  
 15506  // MOVNTPS performs "Store Packed Single-Precision Floating-Point Values Using Non-Temporal Hint".
 15507  //
 15508  // Mnemonic        : MOVNTPS
 15509  // Supported forms : (1 form)
 15510  //
 15511  //    * MOVNTPS xmm, m128    [SSE]
 15512  //
 15513  func (self *Program) MOVNTPS(v0 interface{}, v1 interface{}) *Instruction {
 15514      p := self.alloc("MOVNTPS", 2, Operands { v0, v1 })
 15515      // MOVNTPS xmm, m128
 15516      if isXMM(v0) && isM128(v1) {
 15517          self.require(ISA_SSE)
 15518          p.domain = DomainMMXSSE
 15519          p.add(0, func(m *_Encoding, v []interface{}) {
 15520              m.rexo(hcode(v[0]), addr(v[1]), false)
 15521              m.emit(0x0f)
 15522              m.emit(0x2b)
 15523              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15524          })
 15525      }
 15526      if p.len == 0 {
 15527          panic("invalid operands for MOVNTPS")
 15528      }
 15529      return p
 15530  }
 15531  
 15532  // MOVNTQ performs "Store of Quadword Using Non-Temporal Hint".
 15533  //
 15534  // Mnemonic        : MOVNTQ
 15535  // Supported forms : (1 form)
 15536  //
 15537  //    * MOVNTQ mm, m64    [MMX+]
 15538  //
 15539  func (self *Program) MOVNTQ(v0 interface{}, v1 interface{}) *Instruction {
 15540      p := self.alloc("MOVNTQ", 2, Operands { v0, v1 })
 15541      // MOVNTQ mm, m64
 15542      if isMM(v0) && isM64(v1) {
 15543          self.require(ISA_MMX_PLUS)
 15544          p.domain = DomainMMXSSE
 15545          p.add(0, func(m *_Encoding, v []interface{}) {
 15546              m.rexo(hcode(v[0]), addr(v[1]), false)
 15547              m.emit(0x0f)
 15548              m.emit(0xe7)
 15549              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15550          })
 15551      }
 15552      if p.len == 0 {
 15553          panic("invalid operands for MOVNTQ")
 15554      }
 15555      return p
 15556  }
 15557  
 15558  // MOVNTSD performs "Store Scalar Double-Precision Floating-Point Values Using Non-Temporal Hint".
 15559  //
 15560  // Mnemonic        : MOVNTSD
 15561  // Supported forms : (1 form)
 15562  //
 15563  //    * MOVNTSD xmm, m64    [SSE4A]
 15564  //
 15565  func (self *Program) MOVNTSD(v0 interface{}, v1 interface{}) *Instruction {
 15566      p := self.alloc("MOVNTSD", 2, Operands { v0, v1 })
 15567      // MOVNTSD xmm, m64
 15568      if isXMM(v0) && isM64(v1) {
 15569          self.require(ISA_SSE4A)
 15570          p.domain = DomainAMDSpecific
 15571          p.add(0, func(m *_Encoding, v []interface{}) {
 15572              m.emit(0xf2)
 15573              m.rexo(hcode(v[0]), addr(v[1]), false)
 15574              m.emit(0x0f)
 15575              m.emit(0x2b)
 15576              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15577          })
 15578      }
 15579      if p.len == 0 {
 15580          panic("invalid operands for MOVNTSD")
 15581      }
 15582      return p
 15583  }
 15584  
 15585  // MOVNTSS performs "Store Scalar Single-Precision Floating-Point Values Using Non-Temporal Hint".
 15586  //
 15587  // Mnemonic        : MOVNTSS
 15588  // Supported forms : (1 form)
 15589  //
 15590  //    * MOVNTSS xmm, m32    [SSE4A]
 15591  //
 15592  func (self *Program) MOVNTSS(v0 interface{}, v1 interface{}) *Instruction {
 15593      p := self.alloc("MOVNTSS", 2, Operands { v0, v1 })
 15594      // MOVNTSS xmm, m32
 15595      if isXMM(v0) && isM32(v1) {
 15596          self.require(ISA_SSE4A)
 15597          p.domain = DomainAMDSpecific
 15598          p.add(0, func(m *_Encoding, v []interface{}) {
 15599              m.emit(0xf3)
 15600              m.rexo(hcode(v[0]), addr(v[1]), false)
 15601              m.emit(0x0f)
 15602              m.emit(0x2b)
 15603              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15604          })
 15605      }
 15606      if p.len == 0 {
 15607          panic("invalid operands for MOVNTSS")
 15608      }
 15609      return p
 15610  }
 15611  
 15612  // MOVQ performs "Move".
 15613  //
 15614  // Mnemonic        : MOV
 15615  // Supported forms : (16 forms)
 15616  //
 15617  //    * MOVQ imm32, r64
 15618  //    * MOVQ imm64, r64
 15619  //    * MOVQ r64, r64
 15620  //    * MOVQ m64, r64
 15621  //    * MOVQ imm32, m64
 15622  //    * MOVQ r64, m64
 15623  //    * MOVQ mm, r64       [MMX]
 15624  //    * MOVQ r64, mm       [MMX]
 15625  //    * MOVQ mm, mm        [MMX]
 15626  //    * MOVQ m64, mm       [MMX]
 15627  //    * MOVQ mm, m64       [MMX]
 15628  //    * MOVQ xmm, r64      [SSE2]
 15629  //    * MOVQ r64, xmm      [SSE2]
 15630  //    * MOVQ xmm, xmm      [SSE2]
 15631  //    * MOVQ m64, xmm      [SSE2]
 15632  //    * MOVQ xmm, m64      [SSE2]
 15633  //
 15634  func (self *Program) MOVQ(v0 interface{}, v1 interface{}) *Instruction {
 15635      p := self.alloc("MOVQ", 2, Operands { v0, v1 })
 15636      // MOVQ imm32, r64
 15637      if isImm32Ext(v0, 8) && isReg64(v1) {
 15638          p.domain = DomainGeneric
 15639          p.add(0, func(m *_Encoding, v []interface{}) {
 15640              m.emit(0x48 | hcode(v[1]))
 15641              m.emit(0xc7)
 15642              m.emit(0xc0 | lcode(v[1]))
 15643              m.imm4(toImmAny(v[0]))
 15644          })
 15645      }
 15646      // MOVQ imm64, r64
 15647      if isImm64(v0) && isReg64(v1) {
 15648          p.domain = DomainGeneric
 15649          p.add(0, func(m *_Encoding, v []interface{}) {
 15650              m.emit(0x48 | hcode(v[1]))
 15651              m.emit(0xb8 | lcode(v[1]))
 15652              m.imm8(toImmAny(v[0]))
 15653          })
 15654      }
 15655      // MOVQ r64, r64
 15656      if isReg64(v0) && isReg64(v1) {
 15657          p.domain = DomainGeneric
 15658          p.add(0, func(m *_Encoding, v []interface{}) {
 15659              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
 15660              m.emit(0x89)
 15661              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 15662          })
 15663          p.add(0, func(m *_Encoding, v []interface{}) {
 15664              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 15665              m.emit(0x8b)
 15666              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15667          })
 15668      }
 15669      // MOVQ m64, r64
 15670      if isM64(v0) && isReg64(v1) {
 15671          p.domain = DomainGeneric
 15672          p.add(0, func(m *_Encoding, v []interface{}) {
 15673              m.rexm(1, hcode(v[1]), addr(v[0]))
 15674              m.emit(0x8b)
 15675              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15676          })
 15677      }
 15678      // MOVQ imm32, m64
 15679      if isImm32Ext(v0, 8) && isM64(v1) {
 15680          p.domain = DomainGeneric
 15681          p.add(0, func(m *_Encoding, v []interface{}) {
 15682              m.rexm(1, 0, addr(v[1]))
 15683              m.emit(0xc7)
 15684              m.mrsd(0, addr(v[1]), 1)
 15685              m.imm4(toImmAny(v[0]))
 15686          })
 15687      }
 15688      // MOVQ r64, m64
 15689      if isReg64(v0) && isM64(v1) {
 15690          p.domain = DomainGeneric
 15691          p.add(0, func(m *_Encoding, v []interface{}) {
 15692              m.rexm(1, hcode(v[0]), addr(v[1]))
 15693              m.emit(0x89)
 15694              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15695          })
 15696      }
 15697      // MOVQ mm, r64
 15698      if isMM(v0) && isReg64(v1) {
 15699          self.require(ISA_MMX)
 15700          p.domain = DomainMMXSSE
 15701          p.add(0, func(m *_Encoding, v []interface{}) {
 15702              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
 15703              m.emit(0x0f)
 15704              m.emit(0x7e)
 15705              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 15706          })
 15707      }
 15708      // MOVQ r64, mm
 15709      if isReg64(v0) && isMM(v1) {
 15710          self.require(ISA_MMX)
 15711          p.domain = DomainMMXSSE
 15712          p.add(0, func(m *_Encoding, v []interface{}) {
 15713              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 15714              m.emit(0x0f)
 15715              m.emit(0x6e)
 15716              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15717          })
 15718      }
 15719      // MOVQ mm, mm
 15720      if isMM(v0) && isMM(v1) {
 15721          self.require(ISA_MMX)
 15722          p.domain = DomainMMXSSE
 15723          p.add(0, func(m *_Encoding, v []interface{}) {
 15724              m.rexo(hcode(v[1]), v[0], false)
 15725              m.emit(0x0f)
 15726              m.emit(0x6f)
 15727              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15728          })
 15729          p.add(0, func(m *_Encoding, v []interface{}) {
 15730              m.rexo(hcode(v[0]), v[1], false)
 15731              m.emit(0x0f)
 15732              m.emit(0x7f)
 15733              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 15734          })
 15735      }
 15736      // MOVQ m64, mm
 15737      if isM64(v0) && isMM(v1) {
 15738          self.require(ISA_MMX)
 15739          p.domain = DomainMMXSSE
 15740          p.add(0, func(m *_Encoding, v []interface{}) {
 15741              m.rexo(hcode(v[1]), addr(v[0]), false)
 15742              m.emit(0x0f)
 15743              m.emit(0x6f)
 15744              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15745          })
 15746          p.add(0, func(m *_Encoding, v []interface{}) {
 15747              m.rexm(1, hcode(v[1]), addr(v[0]))
 15748              m.emit(0x0f)
 15749              m.emit(0x6e)
 15750              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15751          })
 15752      }
 15753      // MOVQ mm, m64
 15754      if isMM(v0) && isM64(v1) {
 15755          self.require(ISA_MMX)
 15756          p.domain = DomainMMXSSE
 15757          p.add(0, func(m *_Encoding, v []interface{}) {
 15758              m.rexo(hcode(v[0]), addr(v[1]), false)
 15759              m.emit(0x0f)
 15760              m.emit(0x7f)
 15761              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15762          })
 15763          p.add(0, func(m *_Encoding, v []interface{}) {
 15764              m.rexm(1, hcode(v[0]), addr(v[1]))
 15765              m.emit(0x0f)
 15766              m.emit(0x7e)
 15767              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15768          })
 15769      }
 15770      // MOVQ xmm, r64
 15771      if isXMM(v0) && isReg64(v1) {
 15772          self.require(ISA_SSE2)
 15773          p.domain = DomainMMXSSE
 15774          p.add(0, func(m *_Encoding, v []interface{}) {
 15775              m.emit(0x66)
 15776              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
 15777              m.emit(0x0f)
 15778              m.emit(0x7e)
 15779              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 15780          })
 15781      }
 15782      // MOVQ r64, xmm
 15783      if isReg64(v0) && isXMM(v1) {
 15784          self.require(ISA_SSE2)
 15785          p.domain = DomainMMXSSE
 15786          p.add(0, func(m *_Encoding, v []interface{}) {
 15787              m.emit(0x66)
 15788              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 15789              m.emit(0x0f)
 15790              m.emit(0x6e)
 15791              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15792          })
 15793      }
 15794      // MOVQ xmm, xmm
 15795      if isXMM(v0) && isXMM(v1) {
 15796          self.require(ISA_SSE2)
 15797          p.domain = DomainMMXSSE
 15798          p.add(0, func(m *_Encoding, v []interface{}) {
 15799              m.emit(0xf3)
 15800              m.rexo(hcode(v[1]), v[0], false)
 15801              m.emit(0x0f)
 15802              m.emit(0x7e)
 15803              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15804          })
 15805          p.add(0, func(m *_Encoding, v []interface{}) {
 15806              m.emit(0x66)
 15807              m.rexo(hcode(v[0]), v[1], false)
 15808              m.emit(0x0f)
 15809              m.emit(0xd6)
 15810              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 15811          })
 15812      }
 15813      // MOVQ m64, xmm
 15814      if isM64(v0) && isXMM(v1) {
 15815          self.require(ISA_SSE2)
 15816          p.domain = DomainMMXSSE
 15817          p.add(0, func(m *_Encoding, v []interface{}) {
 15818              m.emit(0xf3)
 15819              m.rexo(hcode(v[1]), addr(v[0]), false)
 15820              m.emit(0x0f)
 15821              m.emit(0x7e)
 15822              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15823          })
 15824          p.add(0, func(m *_Encoding, v []interface{}) {
 15825              m.emit(0x66)
 15826              m.rexm(1, hcode(v[1]), addr(v[0]))
 15827              m.emit(0x0f)
 15828              m.emit(0x6e)
 15829              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15830          })
 15831      }
 15832      // MOVQ xmm, m64
 15833      if isXMM(v0) && isM64(v1) {
 15834          self.require(ISA_SSE2)
 15835          p.domain = DomainMMXSSE
 15836          p.add(0, func(m *_Encoding, v []interface{}) {
 15837              m.emit(0x66)
 15838              m.rexo(hcode(v[0]), addr(v[1]), false)
 15839              m.emit(0x0f)
 15840              m.emit(0xd6)
 15841              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15842          })
 15843          p.add(0, func(m *_Encoding, v []interface{}) {
 15844              m.emit(0x66)
 15845              m.rexm(1, hcode(v[0]), addr(v[1]))
 15846              m.emit(0x0f)
 15847              m.emit(0x7e)
 15848              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 15849          })
 15850      }
 15851      if p.len == 0 {
 15852          panic("invalid operands for MOVQ")
 15853      }
 15854      return p
 15855  }
 15856  
 15857  // MOVQ2DQ performs "Move Quadword from MMX Technology to XMM Register".
 15858  //
 15859  // Mnemonic        : MOVQ2DQ
 15860  // Supported forms : (1 form)
 15861  //
 15862  //    * MOVQ2DQ mm, xmm    [SSE2]
 15863  //
 15864  func (self *Program) MOVQ2DQ(v0 interface{}, v1 interface{}) *Instruction {
 15865      p := self.alloc("MOVQ2DQ", 2, Operands { v0, v1 })
 15866      // MOVQ2DQ mm, xmm
 15867      if isMM(v0) && isXMM(v1) {
 15868          self.require(ISA_SSE2)
 15869          p.domain = DomainMMXSSE
 15870          p.add(0, func(m *_Encoding, v []interface{}) {
 15871              m.emit(0xf3)
 15872              m.rexo(hcode(v[1]), v[0], false)
 15873              m.emit(0x0f)
 15874              m.emit(0xd6)
 15875              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15876          })
 15877      }
 15878      if p.len == 0 {
 15879          panic("invalid operands for MOVQ2DQ")
 15880      }
 15881      return p
 15882  }
 15883  
 15884  // MOVSBL performs "Move with Sign-Extension".
 15885  //
 15886  // Mnemonic        : MOVSX
 15887  // Supported forms : (2 forms)
 15888  //
 15889  //    * MOVSBL r8, r32
 15890  //    * MOVSBL m8, r32
 15891  //
 15892  func (self *Program) MOVSBL(v0 interface{}, v1 interface{}) *Instruction {
 15893      p := self.alloc("MOVSBL", 2, Operands { v0, v1 })
 15894      // MOVSBL r8, r32
 15895      if isReg8(v0) && isReg32(v1) {
 15896          p.domain = DomainGeneric
 15897          p.add(0, func(m *_Encoding, v []interface{}) {
 15898              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]))
 15899              m.emit(0x0f)
 15900              m.emit(0xbe)
 15901              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15902          })
 15903      }
 15904      // MOVSBL m8, r32
 15905      if isM8(v0) && isReg32(v1) {
 15906          p.domain = DomainGeneric
 15907          p.add(0, func(m *_Encoding, v []interface{}) {
 15908              m.rexo(hcode(v[1]), addr(v[0]), false)
 15909              m.emit(0x0f)
 15910              m.emit(0xbe)
 15911              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15912          })
 15913      }
 15914      if p.len == 0 {
 15915          panic("invalid operands for MOVSBL")
 15916      }
 15917      return p
 15918  }
 15919  
 15920  // MOVSBQ performs "Move with Sign-Extension".
 15921  //
 15922  // Mnemonic        : MOVSX
 15923  // Supported forms : (2 forms)
 15924  //
 15925  //    * MOVSBQ r8, r64
 15926  //    * MOVSBQ m8, r64
 15927  //
 15928  func (self *Program) MOVSBQ(v0 interface{}, v1 interface{}) *Instruction {
 15929      p := self.alloc("MOVSBQ", 2, Operands { v0, v1 })
 15930      // MOVSBQ r8, r64
 15931      if isReg8(v0) && isReg64(v1) {
 15932          p.domain = DomainGeneric
 15933          p.add(0, func(m *_Encoding, v []interface{}) {
 15934              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 15935              m.emit(0x0f)
 15936              m.emit(0xbe)
 15937              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15938          })
 15939      }
 15940      // MOVSBQ m8, r64
 15941      if isM8(v0) && isReg64(v1) {
 15942          p.domain = DomainGeneric
 15943          p.add(0, func(m *_Encoding, v []interface{}) {
 15944              m.rexm(1, hcode(v[1]), addr(v[0]))
 15945              m.emit(0x0f)
 15946              m.emit(0xbe)
 15947              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15948          })
 15949      }
 15950      if p.len == 0 {
 15951          panic("invalid operands for MOVSBQ")
 15952      }
 15953      return p
 15954  }
 15955  
 15956  // MOVSBW performs "Move with Sign-Extension".
 15957  //
 15958  // Mnemonic        : MOVSX
 15959  // Supported forms : (2 forms)
 15960  //
 15961  //    * MOVSBW r8, r16
 15962  //    * MOVSBW m8, r16
 15963  //
 15964  func (self *Program) MOVSBW(v0 interface{}, v1 interface{}) *Instruction {
 15965      p := self.alloc("MOVSBW", 2, Operands { v0, v1 })
 15966      // MOVSBW r8, r16
 15967      if isReg8(v0) && isReg16(v1) {
 15968          p.domain = DomainGeneric
 15969          p.add(0, func(m *_Encoding, v []interface{}) {
 15970              m.emit(0x66)
 15971              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]))
 15972              m.emit(0x0f)
 15973              m.emit(0xbe)
 15974              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 15975          })
 15976      }
 15977      // MOVSBW m8, r16
 15978      if isM8(v0) && isReg16(v1) {
 15979          p.domain = DomainGeneric
 15980          p.add(0, func(m *_Encoding, v []interface{}) {
 15981              m.emit(0x66)
 15982              m.rexo(hcode(v[1]), addr(v[0]), false)
 15983              m.emit(0x0f)
 15984              m.emit(0xbe)
 15985              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 15986          })
 15987      }
 15988      if p.len == 0 {
 15989          panic("invalid operands for MOVSBW")
 15990      }
 15991      return p
 15992  }
 15993  
 15994  // MOVSD performs "Move Scalar Double-Precision Floating-Point Value".
 15995  //
 15996  // Mnemonic        : MOVSD
 15997  // Supported forms : (3 forms)
 15998  //
 15999  //    * MOVSD xmm, xmm    [SSE2]
 16000  //    * MOVSD m64, xmm    [SSE2]
 16001  //    * MOVSD xmm, m64    [SSE2]
 16002  //
 16003  func (self *Program) MOVSD(v0 interface{}, v1 interface{}) *Instruction {
 16004      p := self.alloc("MOVSD", 2, Operands { v0, v1 })
 16005      // MOVSD xmm, xmm
 16006      if isXMM(v0) && isXMM(v1) {
 16007          self.require(ISA_SSE2)
 16008          p.domain = DomainMMXSSE
 16009          p.add(0, func(m *_Encoding, v []interface{}) {
 16010              m.emit(0xf2)
 16011              m.rexo(hcode(v[1]), v[0], false)
 16012              m.emit(0x0f)
 16013              m.emit(0x10)
 16014              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16015          })
 16016          p.add(0, func(m *_Encoding, v []interface{}) {
 16017              m.emit(0xf2)
 16018              m.rexo(hcode(v[0]), v[1], false)
 16019              m.emit(0x0f)
 16020              m.emit(0x11)
 16021              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 16022          })
 16023      }
 16024      // MOVSD m64, xmm
 16025      if isM64(v0) && isXMM(v1) {
 16026          self.require(ISA_SSE2)
 16027          p.domain = DomainMMXSSE
 16028          p.add(0, func(m *_Encoding, v []interface{}) {
 16029              m.emit(0xf2)
 16030              m.rexo(hcode(v[1]), addr(v[0]), false)
 16031              m.emit(0x0f)
 16032              m.emit(0x10)
 16033              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16034          })
 16035      }
 16036      // MOVSD xmm, m64
 16037      if isXMM(v0) && isM64(v1) {
 16038          self.require(ISA_SSE2)
 16039          p.domain = DomainMMXSSE
 16040          p.add(0, func(m *_Encoding, v []interface{}) {
 16041              m.emit(0xf2)
 16042              m.rexo(hcode(v[0]), addr(v[1]), false)
 16043              m.emit(0x0f)
 16044              m.emit(0x11)
 16045              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 16046          })
 16047      }
 16048      if p.len == 0 {
 16049          panic("invalid operands for MOVSD")
 16050      }
 16051      return p
 16052  }
 16053  
 16054  // MOVSHDUP performs "Move Packed Single-FP High and Duplicate".
 16055  //
 16056  // Mnemonic        : MOVSHDUP
 16057  // Supported forms : (2 forms)
 16058  //
 16059  //    * MOVSHDUP xmm, xmm     [SSE3]
 16060  //    * MOVSHDUP m128, xmm    [SSE3]
 16061  //
 16062  func (self *Program) MOVSHDUP(v0 interface{}, v1 interface{}) *Instruction {
 16063      p := self.alloc("MOVSHDUP", 2, Operands { v0, v1 })
 16064      // MOVSHDUP xmm, xmm
 16065      if isXMM(v0) && isXMM(v1) {
 16066          self.require(ISA_SSE3)
 16067          p.domain = DomainMMXSSE
 16068          p.add(0, func(m *_Encoding, v []interface{}) {
 16069              m.emit(0xf3)
 16070              m.rexo(hcode(v[1]), v[0], false)
 16071              m.emit(0x0f)
 16072              m.emit(0x16)
 16073              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16074          })
 16075      }
 16076      // MOVSHDUP m128, xmm
 16077      if isM128(v0) && isXMM(v1) {
 16078          self.require(ISA_SSE3)
 16079          p.domain = DomainMMXSSE
 16080          p.add(0, func(m *_Encoding, v []interface{}) {
 16081              m.emit(0xf3)
 16082              m.rexo(hcode(v[1]), addr(v[0]), false)
 16083              m.emit(0x0f)
 16084              m.emit(0x16)
 16085              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16086          })
 16087      }
 16088      if p.len == 0 {
 16089          panic("invalid operands for MOVSHDUP")
 16090      }
 16091      return p
 16092  }
 16093  
 16094  // MOVSLDUP performs "Move Packed Single-FP Low and Duplicate".
 16095  //
 16096  // Mnemonic        : MOVSLDUP
 16097  // Supported forms : (2 forms)
 16098  //
 16099  //    * MOVSLDUP xmm, xmm     [SSE3]
 16100  //    * MOVSLDUP m128, xmm    [SSE3]
 16101  //
 16102  func (self *Program) MOVSLDUP(v0 interface{}, v1 interface{}) *Instruction {
 16103      p := self.alloc("MOVSLDUP", 2, Operands { v0, v1 })
 16104      // MOVSLDUP xmm, xmm
 16105      if isXMM(v0) && isXMM(v1) {
 16106          self.require(ISA_SSE3)
 16107          p.domain = DomainMMXSSE
 16108          p.add(0, func(m *_Encoding, v []interface{}) {
 16109              m.emit(0xf3)
 16110              m.rexo(hcode(v[1]), v[0], false)
 16111              m.emit(0x0f)
 16112              m.emit(0x12)
 16113              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16114          })
 16115      }
 16116      // MOVSLDUP m128, xmm
 16117      if isM128(v0) && isXMM(v1) {
 16118          self.require(ISA_SSE3)
 16119          p.domain = DomainMMXSSE
 16120          p.add(0, func(m *_Encoding, v []interface{}) {
 16121              m.emit(0xf3)
 16122              m.rexo(hcode(v[1]), addr(v[0]), false)
 16123              m.emit(0x0f)
 16124              m.emit(0x12)
 16125              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16126          })
 16127      }
 16128      if p.len == 0 {
 16129          panic("invalid operands for MOVSLDUP")
 16130      }
 16131      return p
 16132  }
 16133  
 16134  // MOVSLQ performs "Move Doubleword to Quadword with Sign-Extension".
 16135  //
 16136  // Mnemonic        : MOVSXD
 16137  // Supported forms : (2 forms)
 16138  //
 16139  //    * MOVSLQ r32, r64
 16140  //    * MOVSLQ m32, r64
 16141  //
 16142  func (self *Program) MOVSLQ(v0 interface{}, v1 interface{}) *Instruction {
 16143      p := self.alloc("MOVSLQ", 2, Operands { v0, v1 })
 16144      // MOVSLQ r32, r64
 16145      if isReg32(v0) && isReg64(v1) {
 16146          p.domain = DomainGeneric
 16147          p.add(0, func(m *_Encoding, v []interface{}) {
 16148              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 16149              m.emit(0x63)
 16150              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16151          })
 16152      }
 16153      // MOVSLQ m32, r64
 16154      if isM32(v0) && isReg64(v1) {
 16155          p.domain = DomainGeneric
 16156          p.add(0, func(m *_Encoding, v []interface{}) {
 16157              m.rexm(1, hcode(v[1]), addr(v[0]))
 16158              m.emit(0x63)
 16159              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16160          })
 16161      }
 16162      if p.len == 0 {
 16163          panic("invalid operands for MOVSLQ")
 16164      }
 16165      return p
 16166  }
 16167  
 16168  // MOVSS performs "Move Scalar Single-Precision Floating-Point Values".
 16169  //
 16170  // Mnemonic        : MOVSS
 16171  // Supported forms : (3 forms)
 16172  //
 16173  //    * MOVSS xmm, xmm    [SSE]
 16174  //    * MOVSS m32, xmm    [SSE]
 16175  //    * MOVSS xmm, m32    [SSE]
 16176  //
 16177  func (self *Program) MOVSS(v0 interface{}, v1 interface{}) *Instruction {
 16178      p := self.alloc("MOVSS", 2, Operands { v0, v1 })
 16179      // MOVSS xmm, xmm
 16180      if isXMM(v0) && isXMM(v1) {
 16181          self.require(ISA_SSE)
 16182          p.domain = DomainMMXSSE
 16183          p.add(0, func(m *_Encoding, v []interface{}) {
 16184              m.emit(0xf3)
 16185              m.rexo(hcode(v[1]), v[0], false)
 16186              m.emit(0x0f)
 16187              m.emit(0x10)
 16188              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16189          })
 16190          p.add(0, func(m *_Encoding, v []interface{}) {
 16191              m.emit(0xf3)
 16192              m.rexo(hcode(v[0]), v[1], false)
 16193              m.emit(0x0f)
 16194              m.emit(0x11)
 16195              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 16196          })
 16197      }
 16198      // MOVSS m32, xmm
 16199      if isM32(v0) && isXMM(v1) {
 16200          self.require(ISA_SSE)
 16201          p.domain = DomainMMXSSE
 16202          p.add(0, func(m *_Encoding, v []interface{}) {
 16203              m.emit(0xf3)
 16204              m.rexo(hcode(v[1]), addr(v[0]), false)
 16205              m.emit(0x0f)
 16206              m.emit(0x10)
 16207              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16208          })
 16209      }
 16210      // MOVSS xmm, m32
 16211      if isXMM(v0) && isM32(v1) {
 16212          self.require(ISA_SSE)
 16213          p.domain = DomainMMXSSE
 16214          p.add(0, func(m *_Encoding, v []interface{}) {
 16215              m.emit(0xf3)
 16216              m.rexo(hcode(v[0]), addr(v[1]), false)
 16217              m.emit(0x0f)
 16218              m.emit(0x11)
 16219              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 16220          })
 16221      }
 16222      if p.len == 0 {
 16223          panic("invalid operands for MOVSS")
 16224      }
 16225      return p
 16226  }
 16227  
 16228  // MOVSWL performs "Move with Sign-Extension".
 16229  //
 16230  // Mnemonic        : MOVSX
 16231  // Supported forms : (2 forms)
 16232  //
 16233  //    * MOVSWL r16, r32
 16234  //    * MOVSWL m16, r32
 16235  //
 16236  func (self *Program) MOVSWL(v0 interface{}, v1 interface{}) *Instruction {
 16237      p := self.alloc("MOVSWL", 2, Operands { v0, v1 })
 16238      // MOVSWL r16, r32
 16239      if isReg16(v0) && isReg32(v1) {
 16240          p.domain = DomainGeneric
 16241          p.add(0, func(m *_Encoding, v []interface{}) {
 16242              m.rexo(hcode(v[1]), v[0], false)
 16243              m.emit(0x0f)
 16244              m.emit(0xbf)
 16245              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16246          })
 16247      }
 16248      // MOVSWL m16, r32
 16249      if isM16(v0) && isReg32(v1) {
 16250          p.domain = DomainGeneric
 16251          p.add(0, func(m *_Encoding, v []interface{}) {
 16252              m.rexo(hcode(v[1]), addr(v[0]), false)
 16253              m.emit(0x0f)
 16254              m.emit(0xbf)
 16255              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16256          })
 16257      }
 16258      if p.len == 0 {
 16259          panic("invalid operands for MOVSWL")
 16260      }
 16261      return p
 16262  }
 16263  
 16264  // MOVSWQ performs "Move with Sign-Extension".
 16265  //
 16266  // Mnemonic        : MOVSX
 16267  // Supported forms : (2 forms)
 16268  //
 16269  //    * MOVSWQ r16, r64
 16270  //    * MOVSWQ m16, r64
 16271  //
 16272  func (self *Program) MOVSWQ(v0 interface{}, v1 interface{}) *Instruction {
 16273      p := self.alloc("MOVSWQ", 2, Operands { v0, v1 })
 16274      // MOVSWQ r16, r64
 16275      if isReg16(v0) && isReg64(v1) {
 16276          p.domain = DomainGeneric
 16277          p.add(0, func(m *_Encoding, v []interface{}) {
 16278              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 16279              m.emit(0x0f)
 16280              m.emit(0xbf)
 16281              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16282          })
 16283      }
 16284      // MOVSWQ m16, r64
 16285      if isM16(v0) && isReg64(v1) {
 16286          p.domain = DomainGeneric
 16287          p.add(0, func(m *_Encoding, v []interface{}) {
 16288              m.rexm(1, hcode(v[1]), addr(v[0]))
 16289              m.emit(0x0f)
 16290              m.emit(0xbf)
 16291              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16292          })
 16293      }
 16294      if p.len == 0 {
 16295          panic("invalid operands for MOVSWQ")
 16296      }
 16297      return p
 16298  }
 16299  
 16300  // MOVUPD performs "Move Unaligned Packed Double-Precision Floating-Point Values".
 16301  //
 16302  // Mnemonic        : MOVUPD
 16303  // Supported forms : (3 forms)
 16304  //
 16305  //    * MOVUPD xmm, xmm     [SSE2]
 16306  //    * MOVUPD m128, xmm    [SSE2]
 16307  //    * MOVUPD xmm, m128    [SSE2]
 16308  //
 16309  func (self *Program) MOVUPD(v0 interface{}, v1 interface{}) *Instruction {
 16310      p := self.alloc("MOVUPD", 2, Operands { v0, v1 })
 16311      // MOVUPD xmm, xmm
 16312      if isXMM(v0) && isXMM(v1) {
 16313          self.require(ISA_SSE2)
 16314          p.domain = DomainMMXSSE
 16315          p.add(0, func(m *_Encoding, v []interface{}) {
 16316              m.emit(0x66)
 16317              m.rexo(hcode(v[1]), v[0], false)
 16318              m.emit(0x0f)
 16319              m.emit(0x10)
 16320              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16321          })
 16322          p.add(0, func(m *_Encoding, v []interface{}) {
 16323              m.emit(0x66)
 16324              m.rexo(hcode(v[0]), v[1], false)
 16325              m.emit(0x0f)
 16326              m.emit(0x11)
 16327              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 16328          })
 16329      }
 16330      // MOVUPD m128, xmm
 16331      if isM128(v0) && isXMM(v1) {
 16332          self.require(ISA_SSE2)
 16333          p.domain = DomainMMXSSE
 16334          p.add(0, func(m *_Encoding, v []interface{}) {
 16335              m.emit(0x66)
 16336              m.rexo(hcode(v[1]), addr(v[0]), false)
 16337              m.emit(0x0f)
 16338              m.emit(0x10)
 16339              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16340          })
 16341      }
 16342      // MOVUPD xmm, m128
 16343      if isXMM(v0) && isM128(v1) {
 16344          self.require(ISA_SSE2)
 16345          p.domain = DomainMMXSSE
 16346          p.add(0, func(m *_Encoding, v []interface{}) {
 16347              m.emit(0x66)
 16348              m.rexo(hcode(v[0]), addr(v[1]), false)
 16349              m.emit(0x0f)
 16350              m.emit(0x11)
 16351              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 16352          })
 16353      }
 16354      if p.len == 0 {
 16355          panic("invalid operands for MOVUPD")
 16356      }
 16357      return p
 16358  }
 16359  
 16360  // MOVUPS performs "Move Unaligned Packed Single-Precision Floating-Point Values".
 16361  //
 16362  // Mnemonic        : MOVUPS
 16363  // Supported forms : (3 forms)
 16364  //
 16365  //    * MOVUPS xmm, xmm     [SSE]
 16366  //    * MOVUPS m128, xmm    [SSE]
 16367  //    * MOVUPS xmm, m128    [SSE]
 16368  //
 16369  func (self *Program) MOVUPS(v0 interface{}, v1 interface{}) *Instruction {
 16370      p := self.alloc("MOVUPS", 2, Operands { v0, v1 })
 16371      // MOVUPS xmm, xmm
 16372      if isXMM(v0) && isXMM(v1) {
 16373          self.require(ISA_SSE)
 16374          p.domain = DomainMMXSSE
 16375          p.add(0, func(m *_Encoding, v []interface{}) {
 16376              m.rexo(hcode(v[1]), v[0], false)
 16377              m.emit(0x0f)
 16378              m.emit(0x10)
 16379              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16380          })
 16381          p.add(0, func(m *_Encoding, v []interface{}) {
 16382              m.rexo(hcode(v[0]), v[1], false)
 16383              m.emit(0x0f)
 16384              m.emit(0x11)
 16385              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 16386          })
 16387      }
 16388      // MOVUPS m128, xmm
 16389      if isM128(v0) && isXMM(v1) {
 16390          self.require(ISA_SSE)
 16391          p.domain = DomainMMXSSE
 16392          p.add(0, func(m *_Encoding, v []interface{}) {
 16393              m.rexo(hcode(v[1]), addr(v[0]), false)
 16394              m.emit(0x0f)
 16395              m.emit(0x10)
 16396              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16397          })
 16398      }
 16399      // MOVUPS xmm, m128
 16400      if isXMM(v0) && isM128(v1) {
 16401          self.require(ISA_SSE)
 16402          p.domain = DomainMMXSSE
 16403          p.add(0, func(m *_Encoding, v []interface{}) {
 16404              m.rexo(hcode(v[0]), addr(v[1]), false)
 16405              m.emit(0x0f)
 16406              m.emit(0x11)
 16407              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 16408          })
 16409      }
 16410      if p.len == 0 {
 16411          panic("invalid operands for MOVUPS")
 16412      }
 16413      return p
 16414  }
 16415  
 16416  // MOVW performs "Move".
 16417  //
 16418  // Mnemonic        : MOV
 16419  // Supported forms : (5 forms)
 16420  //
 16421  //    * MOVW imm16, r16
 16422  //    * MOVW r16, r16
 16423  //    * MOVW m16, r16
 16424  //    * MOVW imm16, m16
 16425  //    * MOVW r16, m16
 16426  //
 16427  func (self *Program) MOVW(v0 interface{}, v1 interface{}) *Instruction {
 16428      p := self.alloc("MOVW", 2, Operands { v0, v1 })
 16429      // MOVW imm16, r16
 16430      if isImm16(v0) && isReg16(v1) {
 16431          p.domain = DomainGeneric
 16432          p.add(0, func(m *_Encoding, v []interface{}) {
 16433              m.emit(0x66)
 16434              m.rexo(0, v[1], false)
 16435              m.emit(0xc7)
 16436              m.emit(0xc0 | lcode(v[1]))
 16437              m.imm2(toImmAny(v[0]))
 16438          })
 16439          p.add(0, func(m *_Encoding, v []interface{}) {
 16440              m.emit(0x66)
 16441              m.rexo(0, v[1], false)
 16442              m.emit(0xb8 | lcode(v[1]))
 16443              m.imm2(toImmAny(v[0]))
 16444          })
 16445      }
 16446      // MOVW r16, r16
 16447      if isReg16(v0) && isReg16(v1) {
 16448          p.domain = DomainGeneric
 16449          p.add(0, func(m *_Encoding, v []interface{}) {
 16450              m.emit(0x66)
 16451              m.rexo(hcode(v[0]), v[1], false)
 16452              m.emit(0x89)
 16453              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 16454          })
 16455          p.add(0, func(m *_Encoding, v []interface{}) {
 16456              m.emit(0x66)
 16457              m.rexo(hcode(v[1]), v[0], false)
 16458              m.emit(0x8b)
 16459              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16460          })
 16461      }
 16462      // MOVW m16, r16
 16463      if isM16(v0) && isReg16(v1) {
 16464          p.domain = DomainGeneric
 16465          p.add(0, func(m *_Encoding, v []interface{}) {
 16466              m.emit(0x66)
 16467              m.rexo(hcode(v[1]), addr(v[0]), false)
 16468              m.emit(0x8b)
 16469              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16470          })
 16471      }
 16472      // MOVW imm16, m16
 16473      if isImm16(v0) && isM16(v1) {
 16474          p.domain = DomainGeneric
 16475          p.add(0, func(m *_Encoding, v []interface{}) {
 16476              m.emit(0x66)
 16477              m.rexo(0, addr(v[1]), false)
 16478              m.emit(0xc7)
 16479              m.mrsd(0, addr(v[1]), 1)
 16480              m.imm2(toImmAny(v[0]))
 16481          })
 16482      }
 16483      // MOVW r16, m16
 16484      if isReg16(v0) && isM16(v1) {
 16485          p.domain = DomainGeneric
 16486          p.add(0, func(m *_Encoding, v []interface{}) {
 16487              m.emit(0x66)
 16488              m.rexo(hcode(v[0]), addr(v[1]), false)
 16489              m.emit(0x89)
 16490              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 16491          })
 16492      }
 16493      if p.len == 0 {
 16494          panic("invalid operands for MOVW")
 16495      }
 16496      return p
 16497  }
 16498  
 16499  // MOVZBL performs "Move with Zero-Extend".
 16500  //
 16501  // Mnemonic        : MOVZX
 16502  // Supported forms : (2 forms)
 16503  //
 16504  //    * MOVZBL r8, r32
 16505  //    * MOVZBL m8, r32
 16506  //
 16507  func (self *Program) MOVZBL(v0 interface{}, v1 interface{}) *Instruction {
 16508      p := self.alloc("MOVZBL", 2, Operands { v0, v1 })
 16509      // MOVZBL r8, r32
 16510      if isReg8(v0) && isReg32(v1) {
 16511          p.domain = DomainGeneric
 16512          p.add(0, func(m *_Encoding, v []interface{}) {
 16513              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]))
 16514              m.emit(0x0f)
 16515              m.emit(0xb6)
 16516              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16517          })
 16518      }
 16519      // MOVZBL m8, r32
 16520      if isM8(v0) && isReg32(v1) {
 16521          p.domain = DomainGeneric
 16522          p.add(0, func(m *_Encoding, v []interface{}) {
 16523              m.rexo(hcode(v[1]), addr(v[0]), false)
 16524              m.emit(0x0f)
 16525              m.emit(0xb6)
 16526              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16527          })
 16528      }
 16529      if p.len == 0 {
 16530          panic("invalid operands for MOVZBL")
 16531      }
 16532      return p
 16533  }
 16534  
 16535  // MOVZBQ performs "Move with Zero-Extend".
 16536  //
 16537  // Mnemonic        : MOVZX
 16538  // Supported forms : (2 forms)
 16539  //
 16540  //    * MOVZBQ r8, r64
 16541  //    * MOVZBQ m8, r64
 16542  //
 16543  func (self *Program) MOVZBQ(v0 interface{}, v1 interface{}) *Instruction {
 16544      p := self.alloc("MOVZBQ", 2, Operands { v0, v1 })
 16545      // MOVZBQ r8, r64
 16546      if isReg8(v0) && isReg64(v1) {
 16547          p.domain = DomainGeneric
 16548          p.add(0, func(m *_Encoding, v []interface{}) {
 16549              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 16550              m.emit(0x0f)
 16551              m.emit(0xb6)
 16552              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16553          })
 16554      }
 16555      // MOVZBQ m8, r64
 16556      if isM8(v0) && isReg64(v1) {
 16557          p.domain = DomainGeneric
 16558          p.add(0, func(m *_Encoding, v []interface{}) {
 16559              m.rexm(1, hcode(v[1]), addr(v[0]))
 16560              m.emit(0x0f)
 16561              m.emit(0xb6)
 16562              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16563          })
 16564      }
 16565      if p.len == 0 {
 16566          panic("invalid operands for MOVZBQ")
 16567      }
 16568      return p
 16569  }
 16570  
 16571  // MOVZBW performs "Move with Zero-Extend".
 16572  //
 16573  // Mnemonic        : MOVZX
 16574  // Supported forms : (2 forms)
 16575  //
 16576  //    * MOVZBW r8, r16
 16577  //    * MOVZBW m8, r16
 16578  //
 16579  func (self *Program) MOVZBW(v0 interface{}, v1 interface{}) *Instruction {
 16580      p := self.alloc("MOVZBW", 2, Operands { v0, v1 })
 16581      // MOVZBW r8, r16
 16582      if isReg8(v0) && isReg16(v1) {
 16583          p.domain = DomainGeneric
 16584          p.add(0, func(m *_Encoding, v []interface{}) {
 16585              m.emit(0x66)
 16586              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]))
 16587              m.emit(0x0f)
 16588              m.emit(0xb6)
 16589              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16590          })
 16591      }
 16592      // MOVZBW m8, r16
 16593      if isM8(v0) && isReg16(v1) {
 16594          p.domain = DomainGeneric
 16595          p.add(0, func(m *_Encoding, v []interface{}) {
 16596              m.emit(0x66)
 16597              m.rexo(hcode(v[1]), addr(v[0]), false)
 16598              m.emit(0x0f)
 16599              m.emit(0xb6)
 16600              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16601          })
 16602      }
 16603      if p.len == 0 {
 16604          panic("invalid operands for MOVZBW")
 16605      }
 16606      return p
 16607  }
 16608  
 16609  // MOVZWL performs "Move with Zero-Extend".
 16610  //
 16611  // Mnemonic        : MOVZX
 16612  // Supported forms : (2 forms)
 16613  //
 16614  //    * MOVZWL r16, r32
 16615  //    * MOVZWL m16, r32
 16616  //
 16617  func (self *Program) MOVZWL(v0 interface{}, v1 interface{}) *Instruction {
 16618      p := self.alloc("MOVZWL", 2, Operands { v0, v1 })
 16619      // MOVZWL r16, r32
 16620      if isReg16(v0) && isReg32(v1) {
 16621          p.domain = DomainGeneric
 16622          p.add(0, func(m *_Encoding, v []interface{}) {
 16623              m.rexo(hcode(v[1]), v[0], false)
 16624              m.emit(0x0f)
 16625              m.emit(0xb7)
 16626              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16627          })
 16628      }
 16629      // MOVZWL m16, r32
 16630      if isM16(v0) && isReg32(v1) {
 16631          p.domain = DomainGeneric
 16632          p.add(0, func(m *_Encoding, v []interface{}) {
 16633              m.rexo(hcode(v[1]), addr(v[0]), false)
 16634              m.emit(0x0f)
 16635              m.emit(0xb7)
 16636              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16637          })
 16638      }
 16639      if p.len == 0 {
 16640          panic("invalid operands for MOVZWL")
 16641      }
 16642      return p
 16643  }
 16644  
 16645  // MOVZWQ performs "Move with Zero-Extend".
 16646  //
 16647  // Mnemonic        : MOVZX
 16648  // Supported forms : (2 forms)
 16649  //
 16650  //    * MOVZWQ r16, r64
 16651  //    * MOVZWQ m16, r64
 16652  //
 16653  func (self *Program) MOVZWQ(v0 interface{}, v1 interface{}) *Instruction {
 16654      p := self.alloc("MOVZWQ", 2, Operands { v0, v1 })
 16655      // MOVZWQ r16, r64
 16656      if isReg16(v0) && isReg64(v1) {
 16657          p.domain = DomainGeneric
 16658          p.add(0, func(m *_Encoding, v []interface{}) {
 16659              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 16660              m.emit(0x0f)
 16661              m.emit(0xb7)
 16662              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16663          })
 16664      }
 16665      // MOVZWQ m16, r64
 16666      if isM16(v0) && isReg64(v1) {
 16667          p.domain = DomainGeneric
 16668          p.add(0, func(m *_Encoding, v []interface{}) {
 16669              m.rexm(1, hcode(v[1]), addr(v[0]))
 16670              m.emit(0x0f)
 16671              m.emit(0xb7)
 16672              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16673          })
 16674      }
 16675      if p.len == 0 {
 16676          panic("invalid operands for MOVZWQ")
 16677      }
 16678      return p
 16679  }
 16680  
 16681  // MPSADBW performs "Compute Multiple Packed Sums of Absolute Difference".
 16682  //
 16683  // Mnemonic        : MPSADBW
 16684  // Supported forms : (2 forms)
 16685  //
 16686  //    * MPSADBW imm8, xmm, xmm     [SSE4.1]
 16687  //    * MPSADBW imm8, m128, xmm    [SSE4.1]
 16688  //
 16689  func (self *Program) MPSADBW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 16690      p := self.alloc("MPSADBW", 3, Operands { v0, v1, v2 })
 16691      // MPSADBW imm8, xmm, xmm
 16692      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 16693          self.require(ISA_SSE4_1)
 16694          p.domain = DomainMMXSSE
 16695          p.add(0, func(m *_Encoding, v []interface{}) {
 16696              m.emit(0x66)
 16697              m.rexo(hcode(v[2]), v[1], false)
 16698              m.emit(0x0f)
 16699              m.emit(0x3a)
 16700              m.emit(0x42)
 16701              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 16702              m.imm1(toImmAny(v[0]))
 16703          })
 16704      }
 16705      // MPSADBW imm8, m128, xmm
 16706      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 16707          self.require(ISA_SSE4_1)
 16708          p.domain = DomainMMXSSE
 16709          p.add(0, func(m *_Encoding, v []interface{}) {
 16710              m.emit(0x66)
 16711              m.rexo(hcode(v[2]), addr(v[1]), false)
 16712              m.emit(0x0f)
 16713              m.emit(0x3a)
 16714              m.emit(0x42)
 16715              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 16716              m.imm1(toImmAny(v[0]))
 16717          })
 16718      }
 16719      if p.len == 0 {
 16720          panic("invalid operands for MPSADBW")
 16721      }
 16722      return p
 16723  }
 16724  
 16725  // MULB performs "Unsigned Multiply".
 16726  //
 16727  // Mnemonic        : MUL
 16728  // Supported forms : (2 forms)
 16729  //
 16730  //    * MULB r8
 16731  //    * MULB m8
 16732  //
 16733  func (self *Program) MULB(v0 interface{}) *Instruction {
 16734      p := self.alloc("MULB", 1, Operands { v0 })
 16735      // MULB r8
 16736      if isReg8(v0) {
 16737          p.domain = DomainGeneric
 16738          p.add(0, func(m *_Encoding, v []interface{}) {
 16739              m.rexo(0, v[0], isReg8REX(v[0]))
 16740              m.emit(0xf6)
 16741              m.emit(0xe0 | lcode(v[0]))
 16742          })
 16743      }
 16744      // MULB m8
 16745      if isM8(v0) {
 16746          p.domain = DomainGeneric
 16747          p.add(0, func(m *_Encoding, v []interface{}) {
 16748              m.rexo(0, addr(v[0]), false)
 16749              m.emit(0xf6)
 16750              m.mrsd(4, addr(v[0]), 1)
 16751          })
 16752      }
 16753      if p.len == 0 {
 16754          panic("invalid operands for MULB")
 16755      }
 16756      return p
 16757  }
 16758  
 16759  // MULL performs "Unsigned Multiply".
 16760  //
 16761  // Mnemonic        : MUL
 16762  // Supported forms : (2 forms)
 16763  //
 16764  //    * MULL r32
 16765  //    * MULL m32
 16766  //
 16767  func (self *Program) MULL(v0 interface{}) *Instruction {
 16768      p := self.alloc("MULL", 1, Operands { v0 })
 16769      // MULL r32
 16770      if isReg32(v0) {
 16771          p.domain = DomainGeneric
 16772          p.add(0, func(m *_Encoding, v []interface{}) {
 16773              m.rexo(0, v[0], false)
 16774              m.emit(0xf7)
 16775              m.emit(0xe0 | lcode(v[0]))
 16776          })
 16777      }
 16778      // MULL m32
 16779      if isM32(v0) {
 16780          p.domain = DomainGeneric
 16781          p.add(0, func(m *_Encoding, v []interface{}) {
 16782              m.rexo(0, addr(v[0]), false)
 16783              m.emit(0xf7)
 16784              m.mrsd(4, addr(v[0]), 1)
 16785          })
 16786      }
 16787      if p.len == 0 {
 16788          panic("invalid operands for MULL")
 16789      }
 16790      return p
 16791  }
 16792  
 16793  // MULPD performs "Multiply Packed Double-Precision Floating-Point Values".
 16794  //
 16795  // Mnemonic        : MULPD
 16796  // Supported forms : (2 forms)
 16797  //
 16798  //    * MULPD xmm, xmm     [SSE2]
 16799  //    * MULPD m128, xmm    [SSE2]
 16800  //
 16801  func (self *Program) MULPD(v0 interface{}, v1 interface{}) *Instruction {
 16802      p := self.alloc("MULPD", 2, Operands { v0, v1 })
 16803      // MULPD xmm, xmm
 16804      if isXMM(v0) && isXMM(v1) {
 16805          self.require(ISA_SSE2)
 16806          p.domain = DomainMMXSSE
 16807          p.add(0, func(m *_Encoding, v []interface{}) {
 16808              m.emit(0x66)
 16809              m.rexo(hcode(v[1]), v[0], false)
 16810              m.emit(0x0f)
 16811              m.emit(0x59)
 16812              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16813          })
 16814      }
 16815      // MULPD m128, xmm
 16816      if isM128(v0) && isXMM(v1) {
 16817          self.require(ISA_SSE2)
 16818          p.domain = DomainMMXSSE
 16819          p.add(0, func(m *_Encoding, v []interface{}) {
 16820              m.emit(0x66)
 16821              m.rexo(hcode(v[1]), addr(v[0]), false)
 16822              m.emit(0x0f)
 16823              m.emit(0x59)
 16824              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16825          })
 16826      }
 16827      if p.len == 0 {
 16828          panic("invalid operands for MULPD")
 16829      }
 16830      return p
 16831  }
 16832  
 16833  // MULPS performs "Multiply Packed Single-Precision Floating-Point Values".
 16834  //
 16835  // Mnemonic        : MULPS
 16836  // Supported forms : (2 forms)
 16837  //
 16838  //    * MULPS xmm, xmm     [SSE]
 16839  //    * MULPS m128, xmm    [SSE]
 16840  //
 16841  func (self *Program) MULPS(v0 interface{}, v1 interface{}) *Instruction {
 16842      p := self.alloc("MULPS", 2, Operands { v0, v1 })
 16843      // MULPS xmm, xmm
 16844      if isXMM(v0) && isXMM(v1) {
 16845          self.require(ISA_SSE)
 16846          p.domain = DomainMMXSSE
 16847          p.add(0, func(m *_Encoding, v []interface{}) {
 16848              m.rexo(hcode(v[1]), v[0], false)
 16849              m.emit(0x0f)
 16850              m.emit(0x59)
 16851              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16852          })
 16853      }
 16854      // MULPS m128, xmm
 16855      if isM128(v0) && isXMM(v1) {
 16856          self.require(ISA_SSE)
 16857          p.domain = DomainMMXSSE
 16858          p.add(0, func(m *_Encoding, v []interface{}) {
 16859              m.rexo(hcode(v[1]), addr(v[0]), false)
 16860              m.emit(0x0f)
 16861              m.emit(0x59)
 16862              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16863          })
 16864      }
 16865      if p.len == 0 {
 16866          panic("invalid operands for MULPS")
 16867      }
 16868      return p
 16869  }
 16870  
 16871  // MULQ performs "Unsigned Multiply".
 16872  //
 16873  // Mnemonic        : MUL
 16874  // Supported forms : (2 forms)
 16875  //
 16876  //    * MULQ r64
 16877  //    * MULQ m64
 16878  //
 16879  func (self *Program) MULQ(v0 interface{}) *Instruction {
 16880      p := self.alloc("MULQ", 1, Operands { v0 })
 16881      // MULQ r64
 16882      if isReg64(v0) {
 16883          p.domain = DomainGeneric
 16884          p.add(0, func(m *_Encoding, v []interface{}) {
 16885              m.emit(0x48 | hcode(v[0]))
 16886              m.emit(0xf7)
 16887              m.emit(0xe0 | lcode(v[0]))
 16888          })
 16889      }
 16890      // MULQ m64
 16891      if isM64(v0) {
 16892          p.domain = DomainGeneric
 16893          p.add(0, func(m *_Encoding, v []interface{}) {
 16894              m.rexm(1, 0, addr(v[0]))
 16895              m.emit(0xf7)
 16896              m.mrsd(4, addr(v[0]), 1)
 16897          })
 16898      }
 16899      if p.len == 0 {
 16900          panic("invalid operands for MULQ")
 16901      }
 16902      return p
 16903  }
 16904  
 16905  // MULSD performs "Multiply Scalar Double-Precision Floating-Point Values".
 16906  //
 16907  // Mnemonic        : MULSD
 16908  // Supported forms : (2 forms)
 16909  //
 16910  //    * MULSD xmm, xmm    [SSE2]
 16911  //    * MULSD m64, xmm    [SSE2]
 16912  //
 16913  func (self *Program) MULSD(v0 interface{}, v1 interface{}) *Instruction {
 16914      p := self.alloc("MULSD", 2, Operands { v0, v1 })
 16915      // MULSD xmm, xmm
 16916      if isXMM(v0) && isXMM(v1) {
 16917          self.require(ISA_SSE2)
 16918          p.domain = DomainMMXSSE
 16919          p.add(0, func(m *_Encoding, v []interface{}) {
 16920              m.emit(0xf2)
 16921              m.rexo(hcode(v[1]), v[0], false)
 16922              m.emit(0x0f)
 16923              m.emit(0x59)
 16924              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16925          })
 16926      }
 16927      // MULSD m64, xmm
 16928      if isM64(v0) && isXMM(v1) {
 16929          self.require(ISA_SSE2)
 16930          p.domain = DomainMMXSSE
 16931          p.add(0, func(m *_Encoding, v []interface{}) {
 16932              m.emit(0xf2)
 16933              m.rexo(hcode(v[1]), addr(v[0]), false)
 16934              m.emit(0x0f)
 16935              m.emit(0x59)
 16936              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16937          })
 16938      }
 16939      if p.len == 0 {
 16940          panic("invalid operands for MULSD")
 16941      }
 16942      return p
 16943  }
 16944  
 16945  // MULSS performs "Multiply Scalar Single-Precision Floating-Point Values".
 16946  //
 16947  // Mnemonic        : MULSS
 16948  // Supported forms : (2 forms)
 16949  //
 16950  //    * MULSS xmm, xmm    [SSE]
 16951  //    * MULSS m32, xmm    [SSE]
 16952  //
 16953  func (self *Program) MULSS(v0 interface{}, v1 interface{}) *Instruction {
 16954      p := self.alloc("MULSS", 2, Operands { v0, v1 })
 16955      // MULSS xmm, xmm
 16956      if isXMM(v0) && isXMM(v1) {
 16957          self.require(ISA_SSE)
 16958          p.domain = DomainMMXSSE
 16959          p.add(0, func(m *_Encoding, v []interface{}) {
 16960              m.emit(0xf3)
 16961              m.rexo(hcode(v[1]), v[0], false)
 16962              m.emit(0x0f)
 16963              m.emit(0x59)
 16964              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 16965          })
 16966      }
 16967      // MULSS m32, xmm
 16968      if isM32(v0) && isXMM(v1) {
 16969          self.require(ISA_SSE)
 16970          p.domain = DomainMMXSSE
 16971          p.add(0, func(m *_Encoding, v []interface{}) {
 16972              m.emit(0xf3)
 16973              m.rexo(hcode(v[1]), addr(v[0]), false)
 16974              m.emit(0x0f)
 16975              m.emit(0x59)
 16976              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 16977          })
 16978      }
 16979      if p.len == 0 {
 16980          panic("invalid operands for MULSS")
 16981      }
 16982      return p
 16983  }
 16984  
 16985  // MULW performs "Unsigned Multiply".
 16986  //
 16987  // Mnemonic        : MUL
 16988  // Supported forms : (2 forms)
 16989  //
 16990  //    * MULW r16
 16991  //    * MULW m16
 16992  //
 16993  func (self *Program) MULW(v0 interface{}) *Instruction {
 16994      p := self.alloc("MULW", 1, Operands { v0 })
 16995      // MULW r16
 16996      if isReg16(v0) {
 16997          p.domain = DomainGeneric
 16998          p.add(0, func(m *_Encoding, v []interface{}) {
 16999              m.emit(0x66)
 17000              m.rexo(0, v[0], false)
 17001              m.emit(0xf7)
 17002              m.emit(0xe0 | lcode(v[0]))
 17003          })
 17004      }
 17005      // MULW m16
 17006      if isM16(v0) {
 17007          p.domain = DomainGeneric
 17008          p.add(0, func(m *_Encoding, v []interface{}) {
 17009              m.emit(0x66)
 17010              m.rexo(0, addr(v[0]), false)
 17011              m.emit(0xf7)
 17012              m.mrsd(4, addr(v[0]), 1)
 17013          })
 17014      }
 17015      if p.len == 0 {
 17016          panic("invalid operands for MULW")
 17017      }
 17018      return p
 17019  }
 17020  
 17021  // MULXL performs "Unsigned Multiply Without Affecting Flags".
 17022  //
 17023  // Mnemonic        : MULX
 17024  // Supported forms : (2 forms)
 17025  //
 17026  //    * MULXL r32, r32, r32    [BMI2]
 17027  //    * MULXL m32, r32, r32    [BMI2]
 17028  //
 17029  func (self *Program) MULXL(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 17030      p := self.alloc("MULXL", 3, Operands { v0, v1, v2 })
 17031      // MULXL r32, r32, r32
 17032      if isReg32(v0) && isReg32(v1) && isReg32(v2) {
 17033          self.require(ISA_BMI2)
 17034          p.domain = DomainGeneric
 17035          p.add(0, func(m *_Encoding, v []interface{}) {
 17036              m.emit(0xc4)
 17037              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 17038              m.emit(0x7b ^ (hlcode(v[1]) << 3))
 17039              m.emit(0xf6)
 17040              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 17041          })
 17042      }
 17043      // MULXL m32, r32, r32
 17044      if isM32(v0) && isReg32(v1) && isReg32(v2) {
 17045          self.require(ISA_BMI2)
 17046          p.domain = DomainGeneric
 17047          p.add(0, func(m *_Encoding, v []interface{}) {
 17048              m.vex3(0xc4, 0b10, 0x03, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 17049              m.emit(0xf6)
 17050              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 17051          })
 17052      }
 17053      if p.len == 0 {
 17054          panic("invalid operands for MULXL")
 17055      }
 17056      return p
 17057  }
 17058  
 17059  // MULXQ performs "Unsigned Multiply Without Affecting Flags".
 17060  //
 17061  // Mnemonic        : MULX
 17062  // Supported forms : (2 forms)
 17063  //
 17064  //    * MULXQ r64, r64, r64    [BMI2]
 17065  //    * MULXQ m64, r64, r64    [BMI2]
 17066  //
 17067  func (self *Program) MULXQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 17068      p := self.alloc("MULXQ", 3, Operands { v0, v1, v2 })
 17069      // MULXQ r64, r64, r64
 17070      if isReg64(v0) && isReg64(v1) && isReg64(v2) {
 17071          self.require(ISA_BMI2)
 17072          p.domain = DomainGeneric
 17073          p.add(0, func(m *_Encoding, v []interface{}) {
 17074              m.emit(0xc4)
 17075              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 17076              m.emit(0xfb ^ (hlcode(v[1]) << 3))
 17077              m.emit(0xf6)
 17078              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 17079          })
 17080      }
 17081      // MULXQ m64, r64, r64
 17082      if isM64(v0) && isReg64(v1) && isReg64(v2) {
 17083          self.require(ISA_BMI2)
 17084          p.domain = DomainGeneric
 17085          p.add(0, func(m *_Encoding, v []interface{}) {
 17086              m.vex3(0xc4, 0b10, 0x83, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 17087              m.emit(0xf6)
 17088              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 17089          })
 17090      }
 17091      if p.len == 0 {
 17092          panic("invalid operands for MULXQ")
 17093      }
 17094      return p
 17095  }
 17096  
 17097  // MWAIT performs "Monitor Wait".
 17098  //
 17099  // Mnemonic        : MWAIT
 17100  // Supported forms : (1 form)
 17101  //
 17102  //    * MWAIT    [MONITOR]
 17103  //
 17104  func (self *Program) MWAIT() *Instruction {
 17105      p := self.alloc("MWAIT", 0, Operands {  })
 17106      // MWAIT
 17107      self.require(ISA_MONITOR)
 17108      p.domain = DomainMisc
 17109      p.add(0, func(m *_Encoding, v []interface{}) {
 17110          m.emit(0x0f)
 17111          m.emit(0x01)
 17112          m.emit(0xc9)
 17113      })
 17114      return p
 17115  }
 17116  
 17117  // MWAITX performs "Monitor Wait with Timeout".
 17118  //
 17119  // Mnemonic        : MWAITX
 17120  // Supported forms : (1 form)
 17121  //
 17122  //    * MWAITX    [MONITORX]
 17123  //
 17124  func (self *Program) MWAITX() *Instruction {
 17125      p := self.alloc("MWAITX", 0, Operands {  })
 17126      // MWAITX
 17127      self.require(ISA_MONITORX)
 17128      p.domain = DomainMisc
 17129      p.add(0, func(m *_Encoding, v []interface{}) {
 17130          m.emit(0x0f)
 17131          m.emit(0x01)
 17132          m.emit(0xfb)
 17133      })
 17134      return p
 17135  }
 17136  
 17137  // NEGB performs "Two's Complement Negation".
 17138  //
 17139  // Mnemonic        : NEG
 17140  // Supported forms : (2 forms)
 17141  //
 17142  //    * NEGB r8
 17143  //    * NEGB m8
 17144  //
 17145  func (self *Program) NEGB(v0 interface{}) *Instruction {
 17146      p := self.alloc("NEGB", 1, Operands { v0 })
 17147      // NEGB r8
 17148      if isReg8(v0) {
 17149          p.domain = DomainGeneric
 17150          p.add(0, func(m *_Encoding, v []interface{}) {
 17151              m.rexo(0, v[0], isReg8REX(v[0]))
 17152              m.emit(0xf6)
 17153              m.emit(0xd8 | lcode(v[0]))
 17154          })
 17155      }
 17156      // NEGB m8
 17157      if isM8(v0) {
 17158          p.domain = DomainGeneric
 17159          p.add(0, func(m *_Encoding, v []interface{}) {
 17160              m.rexo(0, addr(v[0]), false)
 17161              m.emit(0xf6)
 17162              m.mrsd(3, addr(v[0]), 1)
 17163          })
 17164      }
 17165      if p.len == 0 {
 17166          panic("invalid operands for NEGB")
 17167      }
 17168      return p
 17169  }
 17170  
 17171  // NEGL performs "Two's Complement Negation".
 17172  //
 17173  // Mnemonic        : NEG
 17174  // Supported forms : (2 forms)
 17175  //
 17176  //    * NEGL r32
 17177  //    * NEGL m32
 17178  //
 17179  func (self *Program) NEGL(v0 interface{}) *Instruction {
 17180      p := self.alloc("NEGL", 1, Operands { v0 })
 17181      // NEGL r32
 17182      if isReg32(v0) {
 17183          p.domain = DomainGeneric
 17184          p.add(0, func(m *_Encoding, v []interface{}) {
 17185              m.rexo(0, v[0], false)
 17186              m.emit(0xf7)
 17187              m.emit(0xd8 | lcode(v[0]))
 17188          })
 17189      }
 17190      // NEGL m32
 17191      if isM32(v0) {
 17192          p.domain = DomainGeneric
 17193          p.add(0, func(m *_Encoding, v []interface{}) {
 17194              m.rexo(0, addr(v[0]), false)
 17195              m.emit(0xf7)
 17196              m.mrsd(3, addr(v[0]), 1)
 17197          })
 17198      }
 17199      if p.len == 0 {
 17200          panic("invalid operands for NEGL")
 17201      }
 17202      return p
 17203  }
 17204  
 17205  // NEGQ performs "Two's Complement Negation".
 17206  //
 17207  // Mnemonic        : NEG
 17208  // Supported forms : (2 forms)
 17209  //
 17210  //    * NEGQ r64
 17211  //    * NEGQ m64
 17212  //
 17213  func (self *Program) NEGQ(v0 interface{}) *Instruction {
 17214      p := self.alloc("NEGQ", 1, Operands { v0 })
 17215      // NEGQ r64
 17216      if isReg64(v0) {
 17217          p.domain = DomainGeneric
 17218          p.add(0, func(m *_Encoding, v []interface{}) {
 17219              m.emit(0x48 | hcode(v[0]))
 17220              m.emit(0xf7)
 17221              m.emit(0xd8 | lcode(v[0]))
 17222          })
 17223      }
 17224      // NEGQ m64
 17225      if isM64(v0) {
 17226          p.domain = DomainGeneric
 17227          p.add(0, func(m *_Encoding, v []interface{}) {
 17228              m.rexm(1, 0, addr(v[0]))
 17229              m.emit(0xf7)
 17230              m.mrsd(3, addr(v[0]), 1)
 17231          })
 17232      }
 17233      if p.len == 0 {
 17234          panic("invalid operands for NEGQ")
 17235      }
 17236      return p
 17237  }
 17238  
 17239  // NEGW performs "Two's Complement Negation".
 17240  //
 17241  // Mnemonic        : NEG
 17242  // Supported forms : (2 forms)
 17243  //
 17244  //    * NEGW r16
 17245  //    * NEGW m16
 17246  //
 17247  func (self *Program) NEGW(v0 interface{}) *Instruction {
 17248      p := self.alloc("NEGW", 1, Operands { v0 })
 17249      // NEGW r16
 17250      if isReg16(v0) {
 17251          p.domain = DomainGeneric
 17252          p.add(0, func(m *_Encoding, v []interface{}) {
 17253              m.emit(0x66)
 17254              m.rexo(0, v[0], false)
 17255              m.emit(0xf7)
 17256              m.emit(0xd8 | lcode(v[0]))
 17257          })
 17258      }
 17259      // NEGW m16
 17260      if isM16(v0) {
 17261          p.domain = DomainGeneric
 17262          p.add(0, func(m *_Encoding, v []interface{}) {
 17263              m.emit(0x66)
 17264              m.rexo(0, addr(v[0]), false)
 17265              m.emit(0xf7)
 17266              m.mrsd(3, addr(v[0]), 1)
 17267          })
 17268      }
 17269      if p.len == 0 {
 17270          panic("invalid operands for NEGW")
 17271      }
 17272      return p
 17273  }
 17274  
 17275  // NOP performs "No Operation".
 17276  //
 17277  // Mnemonic        : NOP
 17278  // Supported forms : (1 form)
 17279  //
 17280  //    * NOP
 17281  //
 17282  func (self *Program) NOP() *Instruction {
 17283      p := self.alloc("NOP", 0, Operands {  })
 17284      // NOP
 17285      p.domain = DomainGeneric
 17286      p.add(0, func(m *_Encoding, v []interface{}) {
 17287          m.emit(0x90)
 17288      })
 17289      return p
 17290  }
 17291  
 17292  // NOTB performs "One's Complement Negation".
 17293  //
 17294  // Mnemonic        : NOT
 17295  // Supported forms : (2 forms)
 17296  //
 17297  //    * NOTB r8
 17298  //    * NOTB m8
 17299  //
 17300  func (self *Program) NOTB(v0 interface{}) *Instruction {
 17301      p := self.alloc("NOTB", 1, Operands { v0 })
 17302      // NOTB r8
 17303      if isReg8(v0) {
 17304          p.domain = DomainGeneric
 17305          p.add(0, func(m *_Encoding, v []interface{}) {
 17306              m.rexo(0, v[0], isReg8REX(v[0]))
 17307              m.emit(0xf6)
 17308              m.emit(0xd0 | lcode(v[0]))
 17309          })
 17310      }
 17311      // NOTB m8
 17312      if isM8(v0) {
 17313          p.domain = DomainGeneric
 17314          p.add(0, func(m *_Encoding, v []interface{}) {
 17315              m.rexo(0, addr(v[0]), false)
 17316              m.emit(0xf6)
 17317              m.mrsd(2, addr(v[0]), 1)
 17318          })
 17319      }
 17320      if p.len == 0 {
 17321          panic("invalid operands for NOTB")
 17322      }
 17323      return p
 17324  }
 17325  
 17326  // NOTL performs "One's Complement Negation".
 17327  //
 17328  // Mnemonic        : NOT
 17329  // Supported forms : (2 forms)
 17330  //
 17331  //    * NOTL r32
 17332  //    * NOTL m32
 17333  //
 17334  func (self *Program) NOTL(v0 interface{}) *Instruction {
 17335      p := self.alloc("NOTL", 1, Operands { v0 })
 17336      // NOTL r32
 17337      if isReg32(v0) {
 17338          p.domain = DomainGeneric
 17339          p.add(0, func(m *_Encoding, v []interface{}) {
 17340              m.rexo(0, v[0], false)
 17341              m.emit(0xf7)
 17342              m.emit(0xd0 | lcode(v[0]))
 17343          })
 17344      }
 17345      // NOTL m32
 17346      if isM32(v0) {
 17347          p.domain = DomainGeneric
 17348          p.add(0, func(m *_Encoding, v []interface{}) {
 17349              m.rexo(0, addr(v[0]), false)
 17350              m.emit(0xf7)
 17351              m.mrsd(2, addr(v[0]), 1)
 17352          })
 17353      }
 17354      if p.len == 0 {
 17355          panic("invalid operands for NOTL")
 17356      }
 17357      return p
 17358  }
 17359  
 17360  // NOTQ performs "One's Complement Negation".
 17361  //
 17362  // Mnemonic        : NOT
 17363  // Supported forms : (2 forms)
 17364  //
 17365  //    * NOTQ r64
 17366  //    * NOTQ m64
 17367  //
 17368  func (self *Program) NOTQ(v0 interface{}) *Instruction {
 17369      p := self.alloc("NOTQ", 1, Operands { v0 })
 17370      // NOTQ r64
 17371      if isReg64(v0) {
 17372          p.domain = DomainGeneric
 17373          p.add(0, func(m *_Encoding, v []interface{}) {
 17374              m.emit(0x48 | hcode(v[0]))
 17375              m.emit(0xf7)
 17376              m.emit(0xd0 | lcode(v[0]))
 17377          })
 17378      }
 17379      // NOTQ m64
 17380      if isM64(v0) {
 17381          p.domain = DomainGeneric
 17382          p.add(0, func(m *_Encoding, v []interface{}) {
 17383              m.rexm(1, 0, addr(v[0]))
 17384              m.emit(0xf7)
 17385              m.mrsd(2, addr(v[0]), 1)
 17386          })
 17387      }
 17388      if p.len == 0 {
 17389          panic("invalid operands for NOTQ")
 17390      }
 17391      return p
 17392  }
 17393  
 17394  // NOTW performs "One's Complement Negation".
 17395  //
 17396  // Mnemonic        : NOT
 17397  // Supported forms : (2 forms)
 17398  //
 17399  //    * NOTW r16
 17400  //    * NOTW m16
 17401  //
 17402  func (self *Program) NOTW(v0 interface{}) *Instruction {
 17403      p := self.alloc("NOTW", 1, Operands { v0 })
 17404      // NOTW r16
 17405      if isReg16(v0) {
 17406          p.domain = DomainGeneric
 17407          p.add(0, func(m *_Encoding, v []interface{}) {
 17408              m.emit(0x66)
 17409              m.rexo(0, v[0], false)
 17410              m.emit(0xf7)
 17411              m.emit(0xd0 | lcode(v[0]))
 17412          })
 17413      }
 17414      // NOTW m16
 17415      if isM16(v0) {
 17416          p.domain = DomainGeneric
 17417          p.add(0, func(m *_Encoding, v []interface{}) {
 17418              m.emit(0x66)
 17419              m.rexo(0, addr(v[0]), false)
 17420              m.emit(0xf7)
 17421              m.mrsd(2, addr(v[0]), 1)
 17422          })
 17423      }
 17424      if p.len == 0 {
 17425          panic("invalid operands for NOTW")
 17426      }
 17427      return p
 17428  }
 17429  
 17430  // ORB performs "Logical Inclusive OR".
 17431  //
 17432  // Mnemonic        : OR
 17433  // Supported forms : (6 forms)
 17434  //
 17435  //    * ORB imm8, al
 17436  //    * ORB imm8, r8
 17437  //    * ORB r8, r8
 17438  //    * ORB m8, r8
 17439  //    * ORB imm8, m8
 17440  //    * ORB r8, m8
 17441  //
 17442  func (self *Program) ORB(v0 interface{}, v1 interface{}) *Instruction {
 17443      p := self.alloc("ORB", 2, Operands { v0, v1 })
 17444      // ORB imm8, al
 17445      if isImm8(v0) && v1 == AL {
 17446          p.domain = DomainGeneric
 17447          p.add(0, func(m *_Encoding, v []interface{}) {
 17448              m.emit(0x0c)
 17449              m.imm1(toImmAny(v[0]))
 17450          })
 17451      }
 17452      // ORB imm8, r8
 17453      if isImm8(v0) && isReg8(v1) {
 17454          p.domain = DomainGeneric
 17455          p.add(0, func(m *_Encoding, v []interface{}) {
 17456              m.rexo(0, v[1], isReg8REX(v[1]))
 17457              m.emit(0x80)
 17458              m.emit(0xc8 | lcode(v[1]))
 17459              m.imm1(toImmAny(v[0]))
 17460          })
 17461      }
 17462      // ORB r8, r8
 17463      if isReg8(v0) && isReg8(v1) {
 17464          p.domain = DomainGeneric
 17465          p.add(0, func(m *_Encoding, v []interface{}) {
 17466              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
 17467              m.emit(0x08)
 17468              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 17469          })
 17470          p.add(0, func(m *_Encoding, v []interface{}) {
 17471              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]) || isReg8REX(v[1]))
 17472              m.emit(0x0a)
 17473              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 17474          })
 17475      }
 17476      // ORB m8, r8
 17477      if isM8(v0) && isReg8(v1) {
 17478          p.domain = DomainGeneric
 17479          p.add(0, func(m *_Encoding, v []interface{}) {
 17480              m.rexo(hcode(v[1]), addr(v[0]), isReg8REX(v[1]))
 17481              m.emit(0x0a)
 17482              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 17483          })
 17484      }
 17485      // ORB imm8, m8
 17486      if isImm8(v0) && isM8(v1) {
 17487          p.domain = DomainGeneric
 17488          p.add(0, func(m *_Encoding, v []interface{}) {
 17489              m.rexo(0, addr(v[1]), false)
 17490              m.emit(0x80)
 17491              m.mrsd(1, addr(v[1]), 1)
 17492              m.imm1(toImmAny(v[0]))
 17493          })
 17494      }
 17495      // ORB r8, m8
 17496      if isReg8(v0) && isM8(v1) {
 17497          p.domain = DomainGeneric
 17498          p.add(0, func(m *_Encoding, v []interface{}) {
 17499              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
 17500              m.emit(0x08)
 17501              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 17502          })
 17503      }
 17504      if p.len == 0 {
 17505          panic("invalid operands for ORB")
 17506      }
 17507      return p
 17508  }
 17509  
 17510  // ORL performs "Logical Inclusive OR".
 17511  //
 17512  // Mnemonic        : OR
 17513  // Supported forms : (8 forms)
 17514  //
 17515  //    * ORL imm32, eax
 17516  //    * ORL imm8, r32
 17517  //    * ORL imm32, r32
 17518  //    * ORL r32, r32
 17519  //    * ORL m32, r32
 17520  //    * ORL imm8, m32
 17521  //    * ORL imm32, m32
 17522  //    * ORL r32, m32
 17523  //
 17524  func (self *Program) ORL(v0 interface{}, v1 interface{}) *Instruction {
 17525      p := self.alloc("ORL", 2, Operands { v0, v1 })
 17526      // ORL imm32, eax
 17527      if isImm32(v0) && v1 == EAX {
 17528          p.domain = DomainGeneric
 17529          p.add(0, func(m *_Encoding, v []interface{}) {
 17530              m.emit(0x0d)
 17531              m.imm4(toImmAny(v[0]))
 17532          })
 17533      }
 17534      // ORL imm8, r32
 17535      if isImm8Ext(v0, 4) && isReg32(v1) {
 17536          p.domain = DomainGeneric
 17537          p.add(0, func(m *_Encoding, v []interface{}) {
 17538              m.rexo(0, v[1], false)
 17539              m.emit(0x83)
 17540              m.emit(0xc8 | lcode(v[1]))
 17541              m.imm1(toImmAny(v[0]))
 17542          })
 17543      }
 17544      // ORL imm32, r32
 17545      if isImm32(v0) && isReg32(v1) {
 17546          p.domain = DomainGeneric
 17547          p.add(0, func(m *_Encoding, v []interface{}) {
 17548              m.rexo(0, v[1], false)
 17549              m.emit(0x81)
 17550              m.emit(0xc8 | lcode(v[1]))
 17551              m.imm4(toImmAny(v[0]))
 17552          })
 17553      }
 17554      // ORL r32, r32
 17555      if isReg32(v0) && isReg32(v1) {
 17556          p.domain = DomainGeneric
 17557          p.add(0, func(m *_Encoding, v []interface{}) {
 17558              m.rexo(hcode(v[0]), v[1], false)
 17559              m.emit(0x09)
 17560              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 17561          })
 17562          p.add(0, func(m *_Encoding, v []interface{}) {
 17563              m.rexo(hcode(v[1]), v[0], false)
 17564              m.emit(0x0b)
 17565              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 17566          })
 17567      }
 17568      // ORL m32, r32
 17569      if isM32(v0) && isReg32(v1) {
 17570          p.domain = DomainGeneric
 17571          p.add(0, func(m *_Encoding, v []interface{}) {
 17572              m.rexo(hcode(v[1]), addr(v[0]), false)
 17573              m.emit(0x0b)
 17574              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 17575          })
 17576      }
 17577      // ORL imm8, m32
 17578      if isImm8Ext(v0, 4) && isM32(v1) {
 17579          p.domain = DomainGeneric
 17580          p.add(0, func(m *_Encoding, v []interface{}) {
 17581              m.rexo(0, addr(v[1]), false)
 17582              m.emit(0x83)
 17583              m.mrsd(1, addr(v[1]), 1)
 17584              m.imm1(toImmAny(v[0]))
 17585          })
 17586      }
 17587      // ORL imm32, m32
 17588      if isImm32(v0) && isM32(v1) {
 17589          p.domain = DomainGeneric
 17590          p.add(0, func(m *_Encoding, v []interface{}) {
 17591              m.rexo(0, addr(v[1]), false)
 17592              m.emit(0x81)
 17593              m.mrsd(1, addr(v[1]), 1)
 17594              m.imm4(toImmAny(v[0]))
 17595          })
 17596      }
 17597      // ORL r32, m32
 17598      if isReg32(v0) && isM32(v1) {
 17599          p.domain = DomainGeneric
 17600          p.add(0, func(m *_Encoding, v []interface{}) {
 17601              m.rexo(hcode(v[0]), addr(v[1]), false)
 17602              m.emit(0x09)
 17603              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 17604          })
 17605      }
 17606      if p.len == 0 {
 17607          panic("invalid operands for ORL")
 17608      }
 17609      return p
 17610  }
 17611  
 17612  // ORPD performs "Bitwise Logical OR of Double-Precision Floating-Point Values".
 17613  //
 17614  // Mnemonic        : ORPD
 17615  // Supported forms : (2 forms)
 17616  //
 17617  //    * ORPD xmm, xmm     [SSE2]
 17618  //    * ORPD m128, xmm    [SSE2]
 17619  //
 17620  func (self *Program) ORPD(v0 interface{}, v1 interface{}) *Instruction {
 17621      p := self.alloc("ORPD", 2, Operands { v0, v1 })
 17622      // ORPD xmm, xmm
 17623      if isXMM(v0) && isXMM(v1) {
 17624          self.require(ISA_SSE2)
 17625          p.domain = DomainMMXSSE
 17626          p.add(0, func(m *_Encoding, v []interface{}) {
 17627              m.emit(0x66)
 17628              m.rexo(hcode(v[1]), v[0], false)
 17629              m.emit(0x0f)
 17630              m.emit(0x56)
 17631              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 17632          })
 17633      }
 17634      // ORPD m128, xmm
 17635      if isM128(v0) && isXMM(v1) {
 17636          self.require(ISA_SSE2)
 17637          p.domain = DomainMMXSSE
 17638          p.add(0, func(m *_Encoding, v []interface{}) {
 17639              m.emit(0x66)
 17640              m.rexo(hcode(v[1]), addr(v[0]), false)
 17641              m.emit(0x0f)
 17642              m.emit(0x56)
 17643              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 17644          })
 17645      }
 17646      if p.len == 0 {
 17647          panic("invalid operands for ORPD")
 17648      }
 17649      return p
 17650  }
 17651  
 17652  // ORPS performs "Bitwise Logical OR of Single-Precision Floating-Point Values".
 17653  //
 17654  // Mnemonic        : ORPS
 17655  // Supported forms : (2 forms)
 17656  //
 17657  //    * ORPS xmm, xmm     [SSE]
 17658  //    * ORPS m128, xmm    [SSE]
 17659  //
 17660  func (self *Program) ORPS(v0 interface{}, v1 interface{}) *Instruction {
 17661      p := self.alloc("ORPS", 2, Operands { v0, v1 })
 17662      // ORPS xmm, xmm
 17663      if isXMM(v0) && isXMM(v1) {
 17664          self.require(ISA_SSE)
 17665          p.domain = DomainMMXSSE
 17666          p.add(0, func(m *_Encoding, v []interface{}) {
 17667              m.rexo(hcode(v[1]), v[0], false)
 17668              m.emit(0x0f)
 17669              m.emit(0x56)
 17670              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 17671          })
 17672      }
 17673      // ORPS m128, xmm
 17674      if isM128(v0) && isXMM(v1) {
 17675          self.require(ISA_SSE)
 17676          p.domain = DomainMMXSSE
 17677          p.add(0, func(m *_Encoding, v []interface{}) {
 17678              m.rexo(hcode(v[1]), addr(v[0]), false)
 17679              m.emit(0x0f)
 17680              m.emit(0x56)
 17681              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 17682          })
 17683      }
 17684      if p.len == 0 {
 17685          panic("invalid operands for ORPS")
 17686      }
 17687      return p
 17688  }
 17689  
 17690  // ORQ performs "Logical Inclusive OR".
 17691  //
 17692  // Mnemonic        : OR
 17693  // Supported forms : (8 forms)
 17694  //
 17695  //    * ORQ imm32, rax
 17696  //    * ORQ imm8, r64
 17697  //    * ORQ imm32, r64
 17698  //    * ORQ r64, r64
 17699  //    * ORQ m64, r64
 17700  //    * ORQ imm8, m64
 17701  //    * ORQ imm32, m64
 17702  //    * ORQ r64, m64
 17703  //
 17704  func (self *Program) ORQ(v0 interface{}, v1 interface{}) *Instruction {
 17705      p := self.alloc("ORQ", 2, Operands { v0, v1 })
 17706      // ORQ imm32, rax
 17707      if isImm32(v0) && v1 == RAX {
 17708          p.domain = DomainGeneric
 17709          p.add(0, func(m *_Encoding, v []interface{}) {
 17710              m.emit(0x48)
 17711              m.emit(0x0d)
 17712              m.imm4(toImmAny(v[0]))
 17713          })
 17714      }
 17715      // ORQ imm8, r64
 17716      if isImm8Ext(v0, 8) && isReg64(v1) {
 17717          p.domain = DomainGeneric
 17718          p.add(0, func(m *_Encoding, v []interface{}) {
 17719              m.emit(0x48 | hcode(v[1]))
 17720              m.emit(0x83)
 17721              m.emit(0xc8 | lcode(v[1]))
 17722              m.imm1(toImmAny(v[0]))
 17723          })
 17724      }
 17725      // ORQ imm32, r64
 17726      if isImm32Ext(v0, 8) && isReg64(v1) {
 17727          p.domain = DomainGeneric
 17728          p.add(0, func(m *_Encoding, v []interface{}) {
 17729              m.emit(0x48 | hcode(v[1]))
 17730              m.emit(0x81)
 17731              m.emit(0xc8 | lcode(v[1]))
 17732              m.imm4(toImmAny(v[0]))
 17733          })
 17734      }
 17735      // ORQ r64, r64
 17736      if isReg64(v0) && isReg64(v1) {
 17737          p.domain = DomainGeneric
 17738          p.add(0, func(m *_Encoding, v []interface{}) {
 17739              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
 17740              m.emit(0x09)
 17741              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 17742          })
 17743          p.add(0, func(m *_Encoding, v []interface{}) {
 17744              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 17745              m.emit(0x0b)
 17746              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 17747          })
 17748      }
 17749      // ORQ m64, r64
 17750      if isM64(v0) && isReg64(v1) {
 17751          p.domain = DomainGeneric
 17752          p.add(0, func(m *_Encoding, v []interface{}) {
 17753              m.rexm(1, hcode(v[1]), addr(v[0]))
 17754              m.emit(0x0b)
 17755              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 17756          })
 17757      }
 17758      // ORQ imm8, m64
 17759      if isImm8Ext(v0, 8) && isM64(v1) {
 17760          p.domain = DomainGeneric
 17761          p.add(0, func(m *_Encoding, v []interface{}) {
 17762              m.rexm(1, 0, addr(v[1]))
 17763              m.emit(0x83)
 17764              m.mrsd(1, addr(v[1]), 1)
 17765              m.imm1(toImmAny(v[0]))
 17766          })
 17767      }
 17768      // ORQ imm32, m64
 17769      if isImm32Ext(v0, 8) && isM64(v1) {
 17770          p.domain = DomainGeneric
 17771          p.add(0, func(m *_Encoding, v []interface{}) {
 17772              m.rexm(1, 0, addr(v[1]))
 17773              m.emit(0x81)
 17774              m.mrsd(1, addr(v[1]), 1)
 17775              m.imm4(toImmAny(v[0]))
 17776          })
 17777      }
 17778      // ORQ r64, m64
 17779      if isReg64(v0) && isM64(v1) {
 17780          p.domain = DomainGeneric
 17781          p.add(0, func(m *_Encoding, v []interface{}) {
 17782              m.rexm(1, hcode(v[0]), addr(v[1]))
 17783              m.emit(0x09)
 17784              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 17785          })
 17786      }
 17787      if p.len == 0 {
 17788          panic("invalid operands for ORQ")
 17789      }
 17790      return p
 17791  }
 17792  
 17793  // ORW performs "Logical Inclusive OR".
 17794  //
 17795  // Mnemonic        : OR
 17796  // Supported forms : (8 forms)
 17797  //
 17798  //    * ORW imm16, ax
 17799  //    * ORW imm8, r16
 17800  //    * ORW imm16, r16
 17801  //    * ORW r16, r16
 17802  //    * ORW m16, r16
 17803  //    * ORW imm8, m16
 17804  //    * ORW imm16, m16
 17805  //    * ORW r16, m16
 17806  //
 17807  func (self *Program) ORW(v0 interface{}, v1 interface{}) *Instruction {
 17808      p := self.alloc("ORW", 2, Operands { v0, v1 })
 17809      // ORW imm16, ax
 17810      if isImm16(v0) && v1 == AX {
 17811          p.domain = DomainGeneric
 17812          p.add(0, func(m *_Encoding, v []interface{}) {
 17813              m.emit(0x66)
 17814              m.emit(0x0d)
 17815              m.imm2(toImmAny(v[0]))
 17816          })
 17817      }
 17818      // ORW imm8, r16
 17819      if isImm8Ext(v0, 2) && isReg16(v1) {
 17820          p.domain = DomainGeneric
 17821          p.add(0, func(m *_Encoding, v []interface{}) {
 17822              m.emit(0x66)
 17823              m.rexo(0, v[1], false)
 17824              m.emit(0x83)
 17825              m.emit(0xc8 | lcode(v[1]))
 17826              m.imm1(toImmAny(v[0]))
 17827          })
 17828      }
 17829      // ORW imm16, r16
 17830      if isImm16(v0) && isReg16(v1) {
 17831          p.domain = DomainGeneric
 17832          p.add(0, func(m *_Encoding, v []interface{}) {
 17833              m.emit(0x66)
 17834              m.rexo(0, v[1], false)
 17835              m.emit(0x81)
 17836              m.emit(0xc8 | lcode(v[1]))
 17837              m.imm2(toImmAny(v[0]))
 17838          })
 17839      }
 17840      // ORW r16, r16
 17841      if isReg16(v0) && isReg16(v1) {
 17842          p.domain = DomainGeneric
 17843          p.add(0, func(m *_Encoding, v []interface{}) {
 17844              m.emit(0x66)
 17845              m.rexo(hcode(v[0]), v[1], false)
 17846              m.emit(0x09)
 17847              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 17848          })
 17849          p.add(0, func(m *_Encoding, v []interface{}) {
 17850              m.emit(0x66)
 17851              m.rexo(hcode(v[1]), v[0], false)
 17852              m.emit(0x0b)
 17853              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 17854          })
 17855      }
 17856      // ORW m16, r16
 17857      if isM16(v0) && isReg16(v1) {
 17858          p.domain = DomainGeneric
 17859          p.add(0, func(m *_Encoding, v []interface{}) {
 17860              m.emit(0x66)
 17861              m.rexo(hcode(v[1]), addr(v[0]), false)
 17862              m.emit(0x0b)
 17863              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 17864          })
 17865      }
 17866      // ORW imm8, m16
 17867      if isImm8Ext(v0, 2) && isM16(v1) {
 17868          p.domain = DomainGeneric
 17869          p.add(0, func(m *_Encoding, v []interface{}) {
 17870              m.emit(0x66)
 17871              m.rexo(0, addr(v[1]), false)
 17872              m.emit(0x83)
 17873              m.mrsd(1, addr(v[1]), 1)
 17874              m.imm1(toImmAny(v[0]))
 17875          })
 17876      }
 17877      // ORW imm16, m16
 17878      if isImm16(v0) && isM16(v1) {
 17879          p.domain = DomainGeneric
 17880          p.add(0, func(m *_Encoding, v []interface{}) {
 17881              m.emit(0x66)
 17882              m.rexo(0, addr(v[1]), false)
 17883              m.emit(0x81)
 17884              m.mrsd(1, addr(v[1]), 1)
 17885              m.imm2(toImmAny(v[0]))
 17886          })
 17887      }
 17888      // ORW r16, m16
 17889      if isReg16(v0) && isM16(v1) {
 17890          p.domain = DomainGeneric
 17891          p.add(0, func(m *_Encoding, v []interface{}) {
 17892              m.emit(0x66)
 17893              m.rexo(hcode(v[0]), addr(v[1]), false)
 17894              m.emit(0x09)
 17895              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 17896          })
 17897      }
 17898      if p.len == 0 {
 17899          panic("invalid operands for ORW")
 17900      }
 17901      return p
 17902  }
 17903  
 17904  // PABSB performs "Packed Absolute Value of Byte Integers".
 17905  //
 17906  // Mnemonic        : PABSB
 17907  // Supported forms : (4 forms)
 17908  //
 17909  //    * PABSB mm, mm       [SSSE3]
 17910  //    * PABSB m64, mm      [SSSE3]
 17911  //    * PABSB xmm, xmm     [SSSE3]
 17912  //    * PABSB m128, xmm    [SSSE3]
 17913  //
 17914  func (self *Program) PABSB(v0 interface{}, v1 interface{}) *Instruction {
 17915      p := self.alloc("PABSB", 2, Operands { v0, v1 })
 17916      // PABSB mm, mm
 17917      if isMM(v0) && isMM(v1) {
 17918          self.require(ISA_SSSE3)
 17919          p.domain = DomainMMXSSE
 17920          p.add(0, func(m *_Encoding, v []interface{}) {
 17921              m.rexo(hcode(v[1]), v[0], false)
 17922              m.emit(0x0f)
 17923              m.emit(0x38)
 17924              m.emit(0x1c)
 17925              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 17926          })
 17927      }
 17928      // PABSB m64, mm
 17929      if isM64(v0) && isMM(v1) {
 17930          self.require(ISA_SSSE3)
 17931          p.domain = DomainMMXSSE
 17932          p.add(0, func(m *_Encoding, v []interface{}) {
 17933              m.rexo(hcode(v[1]), addr(v[0]), false)
 17934              m.emit(0x0f)
 17935              m.emit(0x38)
 17936              m.emit(0x1c)
 17937              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 17938          })
 17939      }
 17940      // PABSB xmm, xmm
 17941      if isXMM(v0) && isXMM(v1) {
 17942          self.require(ISA_SSSE3)
 17943          p.domain = DomainMMXSSE
 17944          p.add(0, func(m *_Encoding, v []interface{}) {
 17945              m.emit(0x66)
 17946              m.rexo(hcode(v[1]), v[0], false)
 17947              m.emit(0x0f)
 17948              m.emit(0x38)
 17949              m.emit(0x1c)
 17950              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 17951          })
 17952      }
 17953      // PABSB m128, xmm
 17954      if isM128(v0) && isXMM(v1) {
 17955          self.require(ISA_SSSE3)
 17956          p.domain = DomainMMXSSE
 17957          p.add(0, func(m *_Encoding, v []interface{}) {
 17958              m.emit(0x66)
 17959              m.rexo(hcode(v[1]), addr(v[0]), false)
 17960              m.emit(0x0f)
 17961              m.emit(0x38)
 17962              m.emit(0x1c)
 17963              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 17964          })
 17965      }
 17966      if p.len == 0 {
 17967          panic("invalid operands for PABSB")
 17968      }
 17969      return p
 17970  }
 17971  
 17972  // PABSD performs "Packed Absolute Value of Doubleword Integers".
 17973  //
 17974  // Mnemonic        : PABSD
 17975  // Supported forms : (4 forms)
 17976  //
 17977  //    * PABSD mm, mm       [SSSE3]
 17978  //    * PABSD m64, mm      [SSSE3]
 17979  //    * PABSD xmm, xmm     [SSSE3]
 17980  //    * PABSD m128, xmm    [SSSE3]
 17981  //
 17982  func (self *Program) PABSD(v0 interface{}, v1 interface{}) *Instruction {
 17983      p := self.alloc("PABSD", 2, Operands { v0, v1 })
 17984      // PABSD mm, mm
 17985      if isMM(v0) && isMM(v1) {
 17986          self.require(ISA_SSSE3)
 17987          p.domain = DomainMMXSSE
 17988          p.add(0, func(m *_Encoding, v []interface{}) {
 17989              m.rexo(hcode(v[1]), v[0], false)
 17990              m.emit(0x0f)
 17991              m.emit(0x38)
 17992              m.emit(0x1e)
 17993              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 17994          })
 17995      }
 17996      // PABSD m64, mm
 17997      if isM64(v0) && isMM(v1) {
 17998          self.require(ISA_SSSE3)
 17999          p.domain = DomainMMXSSE
 18000          p.add(0, func(m *_Encoding, v []interface{}) {
 18001              m.rexo(hcode(v[1]), addr(v[0]), false)
 18002              m.emit(0x0f)
 18003              m.emit(0x38)
 18004              m.emit(0x1e)
 18005              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18006          })
 18007      }
 18008      // PABSD xmm, xmm
 18009      if isXMM(v0) && isXMM(v1) {
 18010          self.require(ISA_SSSE3)
 18011          p.domain = DomainMMXSSE
 18012          p.add(0, func(m *_Encoding, v []interface{}) {
 18013              m.emit(0x66)
 18014              m.rexo(hcode(v[1]), v[0], false)
 18015              m.emit(0x0f)
 18016              m.emit(0x38)
 18017              m.emit(0x1e)
 18018              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18019          })
 18020      }
 18021      // PABSD m128, xmm
 18022      if isM128(v0) && isXMM(v1) {
 18023          self.require(ISA_SSSE3)
 18024          p.domain = DomainMMXSSE
 18025          p.add(0, func(m *_Encoding, v []interface{}) {
 18026              m.emit(0x66)
 18027              m.rexo(hcode(v[1]), addr(v[0]), false)
 18028              m.emit(0x0f)
 18029              m.emit(0x38)
 18030              m.emit(0x1e)
 18031              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18032          })
 18033      }
 18034      if p.len == 0 {
 18035          panic("invalid operands for PABSD")
 18036      }
 18037      return p
 18038  }
 18039  
 18040  // PABSW performs "Packed Absolute Value of Word Integers".
 18041  //
 18042  // Mnemonic        : PABSW
 18043  // Supported forms : (4 forms)
 18044  //
 18045  //    * PABSW mm, mm       [SSSE3]
 18046  //    * PABSW m64, mm      [SSSE3]
 18047  //    * PABSW xmm, xmm     [SSSE3]
 18048  //    * PABSW m128, xmm    [SSSE3]
 18049  //
 18050  func (self *Program) PABSW(v0 interface{}, v1 interface{}) *Instruction {
 18051      p := self.alloc("PABSW", 2, Operands { v0, v1 })
 18052      // PABSW mm, mm
 18053      if isMM(v0) && isMM(v1) {
 18054          self.require(ISA_SSSE3)
 18055          p.domain = DomainMMXSSE
 18056          p.add(0, func(m *_Encoding, v []interface{}) {
 18057              m.rexo(hcode(v[1]), v[0], false)
 18058              m.emit(0x0f)
 18059              m.emit(0x38)
 18060              m.emit(0x1d)
 18061              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18062          })
 18063      }
 18064      // PABSW m64, mm
 18065      if isM64(v0) && isMM(v1) {
 18066          self.require(ISA_SSSE3)
 18067          p.domain = DomainMMXSSE
 18068          p.add(0, func(m *_Encoding, v []interface{}) {
 18069              m.rexo(hcode(v[1]), addr(v[0]), false)
 18070              m.emit(0x0f)
 18071              m.emit(0x38)
 18072              m.emit(0x1d)
 18073              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18074          })
 18075      }
 18076      // PABSW xmm, xmm
 18077      if isXMM(v0) && isXMM(v1) {
 18078          self.require(ISA_SSSE3)
 18079          p.domain = DomainMMXSSE
 18080          p.add(0, func(m *_Encoding, v []interface{}) {
 18081              m.emit(0x66)
 18082              m.rexo(hcode(v[1]), v[0], false)
 18083              m.emit(0x0f)
 18084              m.emit(0x38)
 18085              m.emit(0x1d)
 18086              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18087          })
 18088      }
 18089      // PABSW m128, xmm
 18090      if isM128(v0) && isXMM(v1) {
 18091          self.require(ISA_SSSE3)
 18092          p.domain = DomainMMXSSE
 18093          p.add(0, func(m *_Encoding, v []interface{}) {
 18094              m.emit(0x66)
 18095              m.rexo(hcode(v[1]), addr(v[0]), false)
 18096              m.emit(0x0f)
 18097              m.emit(0x38)
 18098              m.emit(0x1d)
 18099              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18100          })
 18101      }
 18102      if p.len == 0 {
 18103          panic("invalid operands for PABSW")
 18104      }
 18105      return p
 18106  }
 18107  
 18108  // PACKSSDW performs "Pack Doublewords into Words with Signed Saturation".
 18109  //
 18110  // Mnemonic        : PACKSSDW
 18111  // Supported forms : (4 forms)
 18112  //
 18113  //    * PACKSSDW mm, mm       [MMX]
 18114  //    * PACKSSDW m64, mm      [MMX]
 18115  //    * PACKSSDW xmm, xmm     [SSE2]
 18116  //    * PACKSSDW m128, xmm    [SSE2]
 18117  //
 18118  func (self *Program) PACKSSDW(v0 interface{}, v1 interface{}) *Instruction {
 18119      p := self.alloc("PACKSSDW", 2, Operands { v0, v1 })
 18120      // PACKSSDW mm, mm
 18121      if isMM(v0) && isMM(v1) {
 18122          self.require(ISA_MMX)
 18123          p.domain = DomainMMXSSE
 18124          p.add(0, func(m *_Encoding, v []interface{}) {
 18125              m.rexo(hcode(v[1]), v[0], false)
 18126              m.emit(0x0f)
 18127              m.emit(0x6b)
 18128              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18129          })
 18130      }
 18131      // PACKSSDW m64, mm
 18132      if isM64(v0) && isMM(v1) {
 18133          self.require(ISA_MMX)
 18134          p.domain = DomainMMXSSE
 18135          p.add(0, func(m *_Encoding, v []interface{}) {
 18136              m.rexo(hcode(v[1]), addr(v[0]), false)
 18137              m.emit(0x0f)
 18138              m.emit(0x6b)
 18139              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18140          })
 18141      }
 18142      // PACKSSDW xmm, xmm
 18143      if isXMM(v0) && isXMM(v1) {
 18144          self.require(ISA_SSE2)
 18145          p.domain = DomainMMXSSE
 18146          p.add(0, func(m *_Encoding, v []interface{}) {
 18147              m.emit(0x66)
 18148              m.rexo(hcode(v[1]), v[0], false)
 18149              m.emit(0x0f)
 18150              m.emit(0x6b)
 18151              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18152          })
 18153      }
 18154      // PACKSSDW m128, xmm
 18155      if isM128(v0) && isXMM(v1) {
 18156          self.require(ISA_SSE2)
 18157          p.domain = DomainMMXSSE
 18158          p.add(0, func(m *_Encoding, v []interface{}) {
 18159              m.emit(0x66)
 18160              m.rexo(hcode(v[1]), addr(v[0]), false)
 18161              m.emit(0x0f)
 18162              m.emit(0x6b)
 18163              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18164          })
 18165      }
 18166      if p.len == 0 {
 18167          panic("invalid operands for PACKSSDW")
 18168      }
 18169      return p
 18170  }
 18171  
 18172  // PACKSSWB performs "Pack Words into Bytes with Signed Saturation".
 18173  //
 18174  // Mnemonic        : PACKSSWB
 18175  // Supported forms : (4 forms)
 18176  //
 18177  //    * PACKSSWB mm, mm       [MMX]
 18178  //    * PACKSSWB m64, mm      [MMX]
 18179  //    * PACKSSWB xmm, xmm     [SSE2]
 18180  //    * PACKSSWB m128, xmm    [SSE2]
 18181  //
 18182  func (self *Program) PACKSSWB(v0 interface{}, v1 interface{}) *Instruction {
 18183      p := self.alloc("PACKSSWB", 2, Operands { v0, v1 })
 18184      // PACKSSWB mm, mm
 18185      if isMM(v0) && isMM(v1) {
 18186          self.require(ISA_MMX)
 18187          p.domain = DomainMMXSSE
 18188          p.add(0, func(m *_Encoding, v []interface{}) {
 18189              m.rexo(hcode(v[1]), v[0], false)
 18190              m.emit(0x0f)
 18191              m.emit(0x63)
 18192              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18193          })
 18194      }
 18195      // PACKSSWB m64, mm
 18196      if isM64(v0) && isMM(v1) {
 18197          self.require(ISA_MMX)
 18198          p.domain = DomainMMXSSE
 18199          p.add(0, func(m *_Encoding, v []interface{}) {
 18200              m.rexo(hcode(v[1]), addr(v[0]), false)
 18201              m.emit(0x0f)
 18202              m.emit(0x63)
 18203              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18204          })
 18205      }
 18206      // PACKSSWB xmm, xmm
 18207      if isXMM(v0) && isXMM(v1) {
 18208          self.require(ISA_SSE2)
 18209          p.domain = DomainMMXSSE
 18210          p.add(0, func(m *_Encoding, v []interface{}) {
 18211              m.emit(0x66)
 18212              m.rexo(hcode(v[1]), v[0], false)
 18213              m.emit(0x0f)
 18214              m.emit(0x63)
 18215              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18216          })
 18217      }
 18218      // PACKSSWB m128, xmm
 18219      if isM128(v0) && isXMM(v1) {
 18220          self.require(ISA_SSE2)
 18221          p.domain = DomainMMXSSE
 18222          p.add(0, func(m *_Encoding, v []interface{}) {
 18223              m.emit(0x66)
 18224              m.rexo(hcode(v[1]), addr(v[0]), false)
 18225              m.emit(0x0f)
 18226              m.emit(0x63)
 18227              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18228          })
 18229      }
 18230      if p.len == 0 {
 18231          panic("invalid operands for PACKSSWB")
 18232      }
 18233      return p
 18234  }
 18235  
 18236  // PACKUSDW performs "Pack Doublewords into Words with Unsigned Saturation".
 18237  //
 18238  // Mnemonic        : PACKUSDW
 18239  // Supported forms : (2 forms)
 18240  //
 18241  //    * PACKUSDW xmm, xmm     [SSE4.1]
 18242  //    * PACKUSDW m128, xmm    [SSE4.1]
 18243  //
 18244  func (self *Program) PACKUSDW(v0 interface{}, v1 interface{}) *Instruction {
 18245      p := self.alloc("PACKUSDW", 2, Operands { v0, v1 })
 18246      // PACKUSDW xmm, xmm
 18247      if isXMM(v0) && isXMM(v1) {
 18248          self.require(ISA_SSE4_1)
 18249          p.domain = DomainMMXSSE
 18250          p.add(0, func(m *_Encoding, v []interface{}) {
 18251              m.emit(0x66)
 18252              m.rexo(hcode(v[1]), v[0], false)
 18253              m.emit(0x0f)
 18254              m.emit(0x38)
 18255              m.emit(0x2b)
 18256              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18257          })
 18258      }
 18259      // PACKUSDW m128, xmm
 18260      if isM128(v0) && isXMM(v1) {
 18261          self.require(ISA_SSE4_1)
 18262          p.domain = DomainMMXSSE
 18263          p.add(0, func(m *_Encoding, v []interface{}) {
 18264              m.emit(0x66)
 18265              m.rexo(hcode(v[1]), addr(v[0]), false)
 18266              m.emit(0x0f)
 18267              m.emit(0x38)
 18268              m.emit(0x2b)
 18269              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18270          })
 18271      }
 18272      if p.len == 0 {
 18273          panic("invalid operands for PACKUSDW")
 18274      }
 18275      return p
 18276  }
 18277  
 18278  // PACKUSWB performs "Pack Words into Bytes with Unsigned Saturation".
 18279  //
 18280  // Mnemonic        : PACKUSWB
 18281  // Supported forms : (4 forms)
 18282  //
 18283  //    * PACKUSWB mm, mm       [MMX]
 18284  //    * PACKUSWB m64, mm      [MMX]
 18285  //    * PACKUSWB xmm, xmm     [SSE2]
 18286  //    * PACKUSWB m128, xmm    [SSE2]
 18287  //
 18288  func (self *Program) PACKUSWB(v0 interface{}, v1 interface{}) *Instruction {
 18289      p := self.alloc("PACKUSWB", 2, Operands { v0, v1 })
 18290      // PACKUSWB mm, mm
 18291      if isMM(v0) && isMM(v1) {
 18292          self.require(ISA_MMX)
 18293          p.domain = DomainMMXSSE
 18294          p.add(0, func(m *_Encoding, v []interface{}) {
 18295              m.rexo(hcode(v[1]), v[0], false)
 18296              m.emit(0x0f)
 18297              m.emit(0x67)
 18298              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18299          })
 18300      }
 18301      // PACKUSWB m64, mm
 18302      if isM64(v0) && isMM(v1) {
 18303          self.require(ISA_MMX)
 18304          p.domain = DomainMMXSSE
 18305          p.add(0, func(m *_Encoding, v []interface{}) {
 18306              m.rexo(hcode(v[1]), addr(v[0]), false)
 18307              m.emit(0x0f)
 18308              m.emit(0x67)
 18309              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18310          })
 18311      }
 18312      // PACKUSWB xmm, xmm
 18313      if isXMM(v0) && isXMM(v1) {
 18314          self.require(ISA_SSE2)
 18315          p.domain = DomainMMXSSE
 18316          p.add(0, func(m *_Encoding, v []interface{}) {
 18317              m.emit(0x66)
 18318              m.rexo(hcode(v[1]), v[0], false)
 18319              m.emit(0x0f)
 18320              m.emit(0x67)
 18321              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18322          })
 18323      }
 18324      // PACKUSWB m128, xmm
 18325      if isM128(v0) && isXMM(v1) {
 18326          self.require(ISA_SSE2)
 18327          p.domain = DomainMMXSSE
 18328          p.add(0, func(m *_Encoding, v []interface{}) {
 18329              m.emit(0x66)
 18330              m.rexo(hcode(v[1]), addr(v[0]), false)
 18331              m.emit(0x0f)
 18332              m.emit(0x67)
 18333              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18334          })
 18335      }
 18336      if p.len == 0 {
 18337          panic("invalid operands for PACKUSWB")
 18338      }
 18339      return p
 18340  }
 18341  
 18342  // PADDB performs "Add Packed Byte Integers".
 18343  //
 18344  // Mnemonic        : PADDB
 18345  // Supported forms : (4 forms)
 18346  //
 18347  //    * PADDB mm, mm       [MMX]
 18348  //    * PADDB m64, mm      [MMX]
 18349  //    * PADDB xmm, xmm     [SSE2]
 18350  //    * PADDB m128, xmm    [SSE2]
 18351  //
 18352  func (self *Program) PADDB(v0 interface{}, v1 interface{}) *Instruction {
 18353      p := self.alloc("PADDB", 2, Operands { v0, v1 })
 18354      // PADDB mm, mm
 18355      if isMM(v0) && isMM(v1) {
 18356          self.require(ISA_MMX)
 18357          p.domain = DomainMMXSSE
 18358          p.add(0, func(m *_Encoding, v []interface{}) {
 18359              m.rexo(hcode(v[1]), v[0], false)
 18360              m.emit(0x0f)
 18361              m.emit(0xfc)
 18362              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18363          })
 18364      }
 18365      // PADDB m64, mm
 18366      if isM64(v0) && isMM(v1) {
 18367          self.require(ISA_MMX)
 18368          p.domain = DomainMMXSSE
 18369          p.add(0, func(m *_Encoding, v []interface{}) {
 18370              m.rexo(hcode(v[1]), addr(v[0]), false)
 18371              m.emit(0x0f)
 18372              m.emit(0xfc)
 18373              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18374          })
 18375      }
 18376      // PADDB xmm, xmm
 18377      if isXMM(v0) && isXMM(v1) {
 18378          self.require(ISA_SSE2)
 18379          p.domain = DomainMMXSSE
 18380          p.add(0, func(m *_Encoding, v []interface{}) {
 18381              m.emit(0x66)
 18382              m.rexo(hcode(v[1]), v[0], false)
 18383              m.emit(0x0f)
 18384              m.emit(0xfc)
 18385              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18386          })
 18387      }
 18388      // PADDB m128, xmm
 18389      if isM128(v0) && isXMM(v1) {
 18390          self.require(ISA_SSE2)
 18391          p.domain = DomainMMXSSE
 18392          p.add(0, func(m *_Encoding, v []interface{}) {
 18393              m.emit(0x66)
 18394              m.rexo(hcode(v[1]), addr(v[0]), false)
 18395              m.emit(0x0f)
 18396              m.emit(0xfc)
 18397              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18398          })
 18399      }
 18400      if p.len == 0 {
 18401          panic("invalid operands for PADDB")
 18402      }
 18403      return p
 18404  }
 18405  
 18406  // PADDD performs "Add Packed Doubleword Integers".
 18407  //
 18408  // Mnemonic        : PADDD
 18409  // Supported forms : (4 forms)
 18410  //
 18411  //    * PADDD mm, mm       [MMX]
 18412  //    * PADDD m64, mm      [MMX]
 18413  //    * PADDD xmm, xmm     [SSE2]
 18414  //    * PADDD m128, xmm    [SSE2]
 18415  //
 18416  func (self *Program) PADDD(v0 interface{}, v1 interface{}) *Instruction {
 18417      p := self.alloc("PADDD", 2, Operands { v0, v1 })
 18418      // PADDD mm, mm
 18419      if isMM(v0) && isMM(v1) {
 18420          self.require(ISA_MMX)
 18421          p.domain = DomainMMXSSE
 18422          p.add(0, func(m *_Encoding, v []interface{}) {
 18423              m.rexo(hcode(v[1]), v[0], false)
 18424              m.emit(0x0f)
 18425              m.emit(0xfe)
 18426              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18427          })
 18428      }
 18429      // PADDD m64, mm
 18430      if isM64(v0) && isMM(v1) {
 18431          self.require(ISA_MMX)
 18432          p.domain = DomainMMXSSE
 18433          p.add(0, func(m *_Encoding, v []interface{}) {
 18434              m.rexo(hcode(v[1]), addr(v[0]), false)
 18435              m.emit(0x0f)
 18436              m.emit(0xfe)
 18437              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18438          })
 18439      }
 18440      // PADDD xmm, xmm
 18441      if isXMM(v0) && isXMM(v1) {
 18442          self.require(ISA_SSE2)
 18443          p.domain = DomainMMXSSE
 18444          p.add(0, func(m *_Encoding, v []interface{}) {
 18445              m.emit(0x66)
 18446              m.rexo(hcode(v[1]), v[0], false)
 18447              m.emit(0x0f)
 18448              m.emit(0xfe)
 18449              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18450          })
 18451      }
 18452      // PADDD m128, xmm
 18453      if isM128(v0) && isXMM(v1) {
 18454          self.require(ISA_SSE2)
 18455          p.domain = DomainMMXSSE
 18456          p.add(0, func(m *_Encoding, v []interface{}) {
 18457              m.emit(0x66)
 18458              m.rexo(hcode(v[1]), addr(v[0]), false)
 18459              m.emit(0x0f)
 18460              m.emit(0xfe)
 18461              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18462          })
 18463      }
 18464      if p.len == 0 {
 18465          panic("invalid operands for PADDD")
 18466      }
 18467      return p
 18468  }
 18469  
 18470  // PADDQ performs "Add Packed Quadword Integers".
 18471  //
 18472  // Mnemonic        : PADDQ
 18473  // Supported forms : (4 forms)
 18474  //
 18475  //    * PADDQ mm, mm       [SSE2]
 18476  //    * PADDQ m64, mm      [SSE2]
 18477  //    * PADDQ xmm, xmm     [SSE2]
 18478  //    * PADDQ m128, xmm    [SSE2]
 18479  //
 18480  func (self *Program) PADDQ(v0 interface{}, v1 interface{}) *Instruction {
 18481      p := self.alloc("PADDQ", 2, Operands { v0, v1 })
 18482      // PADDQ mm, mm
 18483      if isMM(v0) && isMM(v1) {
 18484          self.require(ISA_SSE2)
 18485          p.domain = DomainMMXSSE
 18486          p.add(0, func(m *_Encoding, v []interface{}) {
 18487              m.rexo(hcode(v[1]), v[0], false)
 18488              m.emit(0x0f)
 18489              m.emit(0xd4)
 18490              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18491          })
 18492      }
 18493      // PADDQ m64, mm
 18494      if isM64(v0) && isMM(v1) {
 18495          self.require(ISA_SSE2)
 18496          p.domain = DomainMMXSSE
 18497          p.add(0, func(m *_Encoding, v []interface{}) {
 18498              m.rexo(hcode(v[1]), addr(v[0]), false)
 18499              m.emit(0x0f)
 18500              m.emit(0xd4)
 18501              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18502          })
 18503      }
 18504      // PADDQ xmm, xmm
 18505      if isXMM(v0) && isXMM(v1) {
 18506          self.require(ISA_SSE2)
 18507          p.domain = DomainMMXSSE
 18508          p.add(0, func(m *_Encoding, v []interface{}) {
 18509              m.emit(0x66)
 18510              m.rexo(hcode(v[1]), v[0], false)
 18511              m.emit(0x0f)
 18512              m.emit(0xd4)
 18513              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18514          })
 18515      }
 18516      // PADDQ m128, xmm
 18517      if isM128(v0) && isXMM(v1) {
 18518          self.require(ISA_SSE2)
 18519          p.domain = DomainMMXSSE
 18520          p.add(0, func(m *_Encoding, v []interface{}) {
 18521              m.emit(0x66)
 18522              m.rexo(hcode(v[1]), addr(v[0]), false)
 18523              m.emit(0x0f)
 18524              m.emit(0xd4)
 18525              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18526          })
 18527      }
 18528      if p.len == 0 {
 18529          panic("invalid operands for PADDQ")
 18530      }
 18531      return p
 18532  }
 18533  
 18534  // PADDSB performs "Add Packed Signed Byte Integers with Signed Saturation".
 18535  //
 18536  // Mnemonic        : PADDSB
 18537  // Supported forms : (4 forms)
 18538  //
 18539  //    * PADDSB mm, mm       [MMX]
 18540  //    * PADDSB m64, mm      [MMX]
 18541  //    * PADDSB xmm, xmm     [SSE2]
 18542  //    * PADDSB m128, xmm    [SSE2]
 18543  //
 18544  func (self *Program) PADDSB(v0 interface{}, v1 interface{}) *Instruction {
 18545      p := self.alloc("PADDSB", 2, Operands { v0, v1 })
 18546      // PADDSB mm, mm
 18547      if isMM(v0) && isMM(v1) {
 18548          self.require(ISA_MMX)
 18549          p.domain = DomainMMXSSE
 18550          p.add(0, func(m *_Encoding, v []interface{}) {
 18551              m.rexo(hcode(v[1]), v[0], false)
 18552              m.emit(0x0f)
 18553              m.emit(0xec)
 18554              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18555          })
 18556      }
 18557      // PADDSB m64, mm
 18558      if isM64(v0) && isMM(v1) {
 18559          self.require(ISA_MMX)
 18560          p.domain = DomainMMXSSE
 18561          p.add(0, func(m *_Encoding, v []interface{}) {
 18562              m.rexo(hcode(v[1]), addr(v[0]), false)
 18563              m.emit(0x0f)
 18564              m.emit(0xec)
 18565              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18566          })
 18567      }
 18568      // PADDSB xmm, xmm
 18569      if isXMM(v0) && isXMM(v1) {
 18570          self.require(ISA_SSE2)
 18571          p.domain = DomainMMXSSE
 18572          p.add(0, func(m *_Encoding, v []interface{}) {
 18573              m.emit(0x66)
 18574              m.rexo(hcode(v[1]), v[0], false)
 18575              m.emit(0x0f)
 18576              m.emit(0xec)
 18577              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18578          })
 18579      }
 18580      // PADDSB m128, xmm
 18581      if isM128(v0) && isXMM(v1) {
 18582          self.require(ISA_SSE2)
 18583          p.domain = DomainMMXSSE
 18584          p.add(0, func(m *_Encoding, v []interface{}) {
 18585              m.emit(0x66)
 18586              m.rexo(hcode(v[1]), addr(v[0]), false)
 18587              m.emit(0x0f)
 18588              m.emit(0xec)
 18589              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18590          })
 18591      }
 18592      if p.len == 0 {
 18593          panic("invalid operands for PADDSB")
 18594      }
 18595      return p
 18596  }
 18597  
 18598  // PADDSW performs "Add Packed Signed Word Integers with Signed Saturation".
 18599  //
 18600  // Mnemonic        : PADDSW
 18601  // Supported forms : (4 forms)
 18602  //
 18603  //    * PADDSW mm, mm       [MMX]
 18604  //    * PADDSW m64, mm      [MMX]
 18605  //    * PADDSW xmm, xmm     [SSE2]
 18606  //    * PADDSW m128, xmm    [SSE2]
 18607  //
 18608  func (self *Program) PADDSW(v0 interface{}, v1 interface{}) *Instruction {
 18609      p := self.alloc("PADDSW", 2, Operands { v0, v1 })
 18610      // PADDSW mm, mm
 18611      if isMM(v0) && isMM(v1) {
 18612          self.require(ISA_MMX)
 18613          p.domain = DomainMMXSSE
 18614          p.add(0, func(m *_Encoding, v []interface{}) {
 18615              m.rexo(hcode(v[1]), v[0], false)
 18616              m.emit(0x0f)
 18617              m.emit(0xed)
 18618              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18619          })
 18620      }
 18621      // PADDSW m64, mm
 18622      if isM64(v0) && isMM(v1) {
 18623          self.require(ISA_MMX)
 18624          p.domain = DomainMMXSSE
 18625          p.add(0, func(m *_Encoding, v []interface{}) {
 18626              m.rexo(hcode(v[1]), addr(v[0]), false)
 18627              m.emit(0x0f)
 18628              m.emit(0xed)
 18629              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18630          })
 18631      }
 18632      // PADDSW xmm, xmm
 18633      if isXMM(v0) && isXMM(v1) {
 18634          self.require(ISA_SSE2)
 18635          p.domain = DomainMMXSSE
 18636          p.add(0, func(m *_Encoding, v []interface{}) {
 18637              m.emit(0x66)
 18638              m.rexo(hcode(v[1]), v[0], false)
 18639              m.emit(0x0f)
 18640              m.emit(0xed)
 18641              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18642          })
 18643      }
 18644      // PADDSW m128, xmm
 18645      if isM128(v0) && isXMM(v1) {
 18646          self.require(ISA_SSE2)
 18647          p.domain = DomainMMXSSE
 18648          p.add(0, func(m *_Encoding, v []interface{}) {
 18649              m.emit(0x66)
 18650              m.rexo(hcode(v[1]), addr(v[0]), false)
 18651              m.emit(0x0f)
 18652              m.emit(0xed)
 18653              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18654          })
 18655      }
 18656      if p.len == 0 {
 18657          panic("invalid operands for PADDSW")
 18658      }
 18659      return p
 18660  }
 18661  
 18662  // PADDUSB performs "Add Packed Unsigned Byte Integers with Unsigned Saturation".
 18663  //
 18664  // Mnemonic        : PADDUSB
 18665  // Supported forms : (4 forms)
 18666  //
 18667  //    * PADDUSB mm, mm       [MMX]
 18668  //    * PADDUSB m64, mm      [MMX]
 18669  //    * PADDUSB xmm, xmm     [SSE2]
 18670  //    * PADDUSB m128, xmm    [SSE2]
 18671  //
 18672  func (self *Program) PADDUSB(v0 interface{}, v1 interface{}) *Instruction {
 18673      p := self.alloc("PADDUSB", 2, Operands { v0, v1 })
 18674      // PADDUSB mm, mm
 18675      if isMM(v0) && isMM(v1) {
 18676          self.require(ISA_MMX)
 18677          p.domain = DomainMMXSSE
 18678          p.add(0, func(m *_Encoding, v []interface{}) {
 18679              m.rexo(hcode(v[1]), v[0], false)
 18680              m.emit(0x0f)
 18681              m.emit(0xdc)
 18682              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18683          })
 18684      }
 18685      // PADDUSB m64, mm
 18686      if isM64(v0) && isMM(v1) {
 18687          self.require(ISA_MMX)
 18688          p.domain = DomainMMXSSE
 18689          p.add(0, func(m *_Encoding, v []interface{}) {
 18690              m.rexo(hcode(v[1]), addr(v[0]), false)
 18691              m.emit(0x0f)
 18692              m.emit(0xdc)
 18693              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18694          })
 18695      }
 18696      // PADDUSB xmm, xmm
 18697      if isXMM(v0) && isXMM(v1) {
 18698          self.require(ISA_SSE2)
 18699          p.domain = DomainMMXSSE
 18700          p.add(0, func(m *_Encoding, v []interface{}) {
 18701              m.emit(0x66)
 18702              m.rexo(hcode(v[1]), v[0], false)
 18703              m.emit(0x0f)
 18704              m.emit(0xdc)
 18705              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18706          })
 18707      }
 18708      // PADDUSB m128, xmm
 18709      if isM128(v0) && isXMM(v1) {
 18710          self.require(ISA_SSE2)
 18711          p.domain = DomainMMXSSE
 18712          p.add(0, func(m *_Encoding, v []interface{}) {
 18713              m.emit(0x66)
 18714              m.rexo(hcode(v[1]), addr(v[0]), false)
 18715              m.emit(0x0f)
 18716              m.emit(0xdc)
 18717              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18718          })
 18719      }
 18720      if p.len == 0 {
 18721          panic("invalid operands for PADDUSB")
 18722      }
 18723      return p
 18724  }
 18725  
 18726  // PADDUSW performs "Add Packed Unsigned Word Integers with Unsigned Saturation".
 18727  //
 18728  // Mnemonic        : PADDUSW
 18729  // Supported forms : (4 forms)
 18730  //
 18731  //    * PADDUSW mm, mm       [MMX]
 18732  //    * PADDUSW m64, mm      [MMX]
 18733  //    * PADDUSW xmm, xmm     [SSE2]
 18734  //    * PADDUSW m128, xmm    [SSE2]
 18735  //
 18736  func (self *Program) PADDUSW(v0 interface{}, v1 interface{}) *Instruction {
 18737      p := self.alloc("PADDUSW", 2, Operands { v0, v1 })
 18738      // PADDUSW mm, mm
 18739      if isMM(v0) && isMM(v1) {
 18740          self.require(ISA_MMX)
 18741          p.domain = DomainMMXSSE
 18742          p.add(0, func(m *_Encoding, v []interface{}) {
 18743              m.rexo(hcode(v[1]), v[0], false)
 18744              m.emit(0x0f)
 18745              m.emit(0xdd)
 18746              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18747          })
 18748      }
 18749      // PADDUSW m64, mm
 18750      if isM64(v0) && isMM(v1) {
 18751          self.require(ISA_MMX)
 18752          p.domain = DomainMMXSSE
 18753          p.add(0, func(m *_Encoding, v []interface{}) {
 18754              m.rexo(hcode(v[1]), addr(v[0]), false)
 18755              m.emit(0x0f)
 18756              m.emit(0xdd)
 18757              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18758          })
 18759      }
 18760      // PADDUSW xmm, xmm
 18761      if isXMM(v0) && isXMM(v1) {
 18762          self.require(ISA_SSE2)
 18763          p.domain = DomainMMXSSE
 18764          p.add(0, func(m *_Encoding, v []interface{}) {
 18765              m.emit(0x66)
 18766              m.rexo(hcode(v[1]), v[0], false)
 18767              m.emit(0x0f)
 18768              m.emit(0xdd)
 18769              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18770          })
 18771      }
 18772      // PADDUSW m128, xmm
 18773      if isM128(v0) && isXMM(v1) {
 18774          self.require(ISA_SSE2)
 18775          p.domain = DomainMMXSSE
 18776          p.add(0, func(m *_Encoding, v []interface{}) {
 18777              m.emit(0x66)
 18778              m.rexo(hcode(v[1]), addr(v[0]), false)
 18779              m.emit(0x0f)
 18780              m.emit(0xdd)
 18781              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18782          })
 18783      }
 18784      if p.len == 0 {
 18785          panic("invalid operands for PADDUSW")
 18786      }
 18787      return p
 18788  }
 18789  
 18790  // PADDW performs "Add Packed Word Integers".
 18791  //
 18792  // Mnemonic        : PADDW
 18793  // Supported forms : (4 forms)
 18794  //
 18795  //    * PADDW mm, mm       [MMX]
 18796  //    * PADDW m64, mm      [MMX]
 18797  //    * PADDW xmm, xmm     [SSE2]
 18798  //    * PADDW m128, xmm    [SSE2]
 18799  //
 18800  func (self *Program) PADDW(v0 interface{}, v1 interface{}) *Instruction {
 18801      p := self.alloc("PADDW", 2, Operands { v0, v1 })
 18802      // PADDW mm, mm
 18803      if isMM(v0) && isMM(v1) {
 18804          self.require(ISA_MMX)
 18805          p.domain = DomainMMXSSE
 18806          p.add(0, func(m *_Encoding, v []interface{}) {
 18807              m.rexo(hcode(v[1]), v[0], false)
 18808              m.emit(0x0f)
 18809              m.emit(0xfd)
 18810              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18811          })
 18812      }
 18813      // PADDW m64, mm
 18814      if isM64(v0) && isMM(v1) {
 18815          self.require(ISA_MMX)
 18816          p.domain = DomainMMXSSE
 18817          p.add(0, func(m *_Encoding, v []interface{}) {
 18818              m.rexo(hcode(v[1]), addr(v[0]), false)
 18819              m.emit(0x0f)
 18820              m.emit(0xfd)
 18821              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18822          })
 18823      }
 18824      // PADDW xmm, xmm
 18825      if isXMM(v0) && isXMM(v1) {
 18826          self.require(ISA_SSE2)
 18827          p.domain = DomainMMXSSE
 18828          p.add(0, func(m *_Encoding, v []interface{}) {
 18829              m.emit(0x66)
 18830              m.rexo(hcode(v[1]), v[0], false)
 18831              m.emit(0x0f)
 18832              m.emit(0xfd)
 18833              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18834          })
 18835      }
 18836      // PADDW m128, xmm
 18837      if isM128(v0) && isXMM(v1) {
 18838          self.require(ISA_SSE2)
 18839          p.domain = DomainMMXSSE
 18840          p.add(0, func(m *_Encoding, v []interface{}) {
 18841              m.emit(0x66)
 18842              m.rexo(hcode(v[1]), addr(v[0]), false)
 18843              m.emit(0x0f)
 18844              m.emit(0xfd)
 18845              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18846          })
 18847      }
 18848      if p.len == 0 {
 18849          panic("invalid operands for PADDW")
 18850      }
 18851      return p
 18852  }
 18853  
 18854  // PALIGNR performs "Packed Align Right".
 18855  //
 18856  // Mnemonic        : PALIGNR
 18857  // Supported forms : (4 forms)
 18858  //
 18859  //    * PALIGNR imm8, mm, mm       [SSSE3]
 18860  //    * PALIGNR imm8, m64, mm      [SSSE3]
 18861  //    * PALIGNR imm8, xmm, xmm     [SSSE3]
 18862  //    * PALIGNR imm8, m128, xmm    [SSSE3]
 18863  //
 18864  func (self *Program) PALIGNR(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 18865      p := self.alloc("PALIGNR", 3, Operands { v0, v1, v2 })
 18866      // PALIGNR imm8, mm, mm
 18867      if isImm8(v0) && isMM(v1) && isMM(v2) {
 18868          self.require(ISA_SSSE3)
 18869          p.domain = DomainMMXSSE
 18870          p.add(0, func(m *_Encoding, v []interface{}) {
 18871              m.rexo(hcode(v[2]), v[1], false)
 18872              m.emit(0x0f)
 18873              m.emit(0x3a)
 18874              m.emit(0x0f)
 18875              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 18876              m.imm1(toImmAny(v[0]))
 18877          })
 18878      }
 18879      // PALIGNR imm8, m64, mm
 18880      if isImm8(v0) && isM64(v1) && isMM(v2) {
 18881          self.require(ISA_SSSE3)
 18882          p.domain = DomainMMXSSE
 18883          p.add(0, func(m *_Encoding, v []interface{}) {
 18884              m.rexo(hcode(v[2]), addr(v[1]), false)
 18885              m.emit(0x0f)
 18886              m.emit(0x3a)
 18887              m.emit(0x0f)
 18888              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 18889              m.imm1(toImmAny(v[0]))
 18890          })
 18891      }
 18892      // PALIGNR imm8, xmm, xmm
 18893      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 18894          self.require(ISA_SSSE3)
 18895          p.domain = DomainMMXSSE
 18896          p.add(0, func(m *_Encoding, v []interface{}) {
 18897              m.emit(0x66)
 18898              m.rexo(hcode(v[2]), v[1], false)
 18899              m.emit(0x0f)
 18900              m.emit(0x3a)
 18901              m.emit(0x0f)
 18902              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 18903              m.imm1(toImmAny(v[0]))
 18904          })
 18905      }
 18906      // PALIGNR imm8, m128, xmm
 18907      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 18908          self.require(ISA_SSSE3)
 18909          p.domain = DomainMMXSSE
 18910          p.add(0, func(m *_Encoding, v []interface{}) {
 18911              m.emit(0x66)
 18912              m.rexo(hcode(v[2]), addr(v[1]), false)
 18913              m.emit(0x0f)
 18914              m.emit(0x3a)
 18915              m.emit(0x0f)
 18916              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 18917              m.imm1(toImmAny(v[0]))
 18918          })
 18919      }
 18920      if p.len == 0 {
 18921          panic("invalid operands for PALIGNR")
 18922      }
 18923      return p
 18924  }
 18925  
 18926  // PAND performs "Packed Bitwise Logical AND".
 18927  //
 18928  // Mnemonic        : PAND
 18929  // Supported forms : (4 forms)
 18930  //
 18931  //    * PAND mm, mm       [MMX]
 18932  //    * PAND m64, mm      [MMX]
 18933  //    * PAND xmm, xmm     [SSE2]
 18934  //    * PAND m128, xmm    [SSE2]
 18935  //
 18936  func (self *Program) PAND(v0 interface{}, v1 interface{}) *Instruction {
 18937      p := self.alloc("PAND", 2, Operands { v0, v1 })
 18938      // PAND mm, mm
 18939      if isMM(v0) && isMM(v1) {
 18940          self.require(ISA_MMX)
 18941          p.domain = DomainMMXSSE
 18942          p.add(0, func(m *_Encoding, v []interface{}) {
 18943              m.rexo(hcode(v[1]), v[0], false)
 18944              m.emit(0x0f)
 18945              m.emit(0xdb)
 18946              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18947          })
 18948      }
 18949      // PAND m64, mm
 18950      if isM64(v0) && isMM(v1) {
 18951          self.require(ISA_MMX)
 18952          p.domain = DomainMMXSSE
 18953          p.add(0, func(m *_Encoding, v []interface{}) {
 18954              m.rexo(hcode(v[1]), addr(v[0]), false)
 18955              m.emit(0x0f)
 18956              m.emit(0xdb)
 18957              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18958          })
 18959      }
 18960      // PAND xmm, xmm
 18961      if isXMM(v0) && isXMM(v1) {
 18962          self.require(ISA_SSE2)
 18963          p.domain = DomainMMXSSE
 18964          p.add(0, func(m *_Encoding, v []interface{}) {
 18965              m.emit(0x66)
 18966              m.rexo(hcode(v[1]), v[0], false)
 18967              m.emit(0x0f)
 18968              m.emit(0xdb)
 18969              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 18970          })
 18971      }
 18972      // PAND m128, xmm
 18973      if isM128(v0) && isXMM(v1) {
 18974          self.require(ISA_SSE2)
 18975          p.domain = DomainMMXSSE
 18976          p.add(0, func(m *_Encoding, v []interface{}) {
 18977              m.emit(0x66)
 18978              m.rexo(hcode(v[1]), addr(v[0]), false)
 18979              m.emit(0x0f)
 18980              m.emit(0xdb)
 18981              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 18982          })
 18983      }
 18984      if p.len == 0 {
 18985          panic("invalid operands for PAND")
 18986      }
 18987      return p
 18988  }
 18989  
 18990  // PANDN performs "Packed Bitwise Logical AND NOT".
 18991  //
 18992  // Mnemonic        : PANDN
 18993  // Supported forms : (4 forms)
 18994  //
 18995  //    * PANDN mm, mm       [MMX]
 18996  //    * PANDN m64, mm      [MMX]
 18997  //    * PANDN xmm, xmm     [SSE2]
 18998  //    * PANDN m128, xmm    [SSE2]
 18999  //
 19000  func (self *Program) PANDN(v0 interface{}, v1 interface{}) *Instruction {
 19001      p := self.alloc("PANDN", 2, Operands { v0, v1 })
 19002      // PANDN mm, mm
 19003      if isMM(v0) && isMM(v1) {
 19004          self.require(ISA_MMX)
 19005          p.domain = DomainMMXSSE
 19006          p.add(0, func(m *_Encoding, v []interface{}) {
 19007              m.rexo(hcode(v[1]), v[0], false)
 19008              m.emit(0x0f)
 19009              m.emit(0xdf)
 19010              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19011          })
 19012      }
 19013      // PANDN m64, mm
 19014      if isM64(v0) && isMM(v1) {
 19015          self.require(ISA_MMX)
 19016          p.domain = DomainMMXSSE
 19017          p.add(0, func(m *_Encoding, v []interface{}) {
 19018              m.rexo(hcode(v[1]), addr(v[0]), false)
 19019              m.emit(0x0f)
 19020              m.emit(0xdf)
 19021              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19022          })
 19023      }
 19024      // PANDN xmm, xmm
 19025      if isXMM(v0) && isXMM(v1) {
 19026          self.require(ISA_SSE2)
 19027          p.domain = DomainMMXSSE
 19028          p.add(0, func(m *_Encoding, v []interface{}) {
 19029              m.emit(0x66)
 19030              m.rexo(hcode(v[1]), v[0], false)
 19031              m.emit(0x0f)
 19032              m.emit(0xdf)
 19033              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19034          })
 19035      }
 19036      // PANDN m128, xmm
 19037      if isM128(v0) && isXMM(v1) {
 19038          self.require(ISA_SSE2)
 19039          p.domain = DomainMMXSSE
 19040          p.add(0, func(m *_Encoding, v []interface{}) {
 19041              m.emit(0x66)
 19042              m.rexo(hcode(v[1]), addr(v[0]), false)
 19043              m.emit(0x0f)
 19044              m.emit(0xdf)
 19045              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19046          })
 19047      }
 19048      if p.len == 0 {
 19049          panic("invalid operands for PANDN")
 19050      }
 19051      return p
 19052  }
 19053  
 19054  // PAUSE performs "Spin Loop Hint".
 19055  //
 19056  // Mnemonic        : PAUSE
 19057  // Supported forms : (1 form)
 19058  //
 19059  //    * PAUSE
 19060  //
 19061  func (self *Program) PAUSE() *Instruction {
 19062      p := self.alloc("PAUSE", 0, Operands {  })
 19063      // PAUSE
 19064      p.domain = DomainGeneric
 19065      p.add(0, func(m *_Encoding, v []interface{}) {
 19066          m.emit(0xf3)
 19067          m.emit(0x90)
 19068      })
 19069      return p
 19070  }
 19071  
 19072  // PAVGB performs "Average Packed Byte Integers".
 19073  //
 19074  // Mnemonic        : PAVGB
 19075  // Supported forms : (4 forms)
 19076  //
 19077  //    * PAVGB mm, mm       [MMX+]
 19078  //    * PAVGB m64, mm      [MMX+]
 19079  //    * PAVGB xmm, xmm     [SSE2]
 19080  //    * PAVGB m128, xmm    [SSE2]
 19081  //
 19082  func (self *Program) PAVGB(v0 interface{}, v1 interface{}) *Instruction {
 19083      p := self.alloc("PAVGB", 2, Operands { v0, v1 })
 19084      // PAVGB mm, mm
 19085      if isMM(v0) && isMM(v1) {
 19086          self.require(ISA_MMX_PLUS)
 19087          p.domain = DomainMMXSSE
 19088          p.add(0, func(m *_Encoding, v []interface{}) {
 19089              m.rexo(hcode(v[1]), v[0], false)
 19090              m.emit(0x0f)
 19091              m.emit(0xe0)
 19092              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19093          })
 19094      }
 19095      // PAVGB m64, mm
 19096      if isM64(v0) && isMM(v1) {
 19097          self.require(ISA_MMX_PLUS)
 19098          p.domain = DomainMMXSSE
 19099          p.add(0, func(m *_Encoding, v []interface{}) {
 19100              m.rexo(hcode(v[1]), addr(v[0]), false)
 19101              m.emit(0x0f)
 19102              m.emit(0xe0)
 19103              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19104          })
 19105      }
 19106      // PAVGB xmm, xmm
 19107      if isXMM(v0) && isXMM(v1) {
 19108          self.require(ISA_SSE2)
 19109          p.domain = DomainMMXSSE
 19110          p.add(0, func(m *_Encoding, v []interface{}) {
 19111              m.emit(0x66)
 19112              m.rexo(hcode(v[1]), v[0], false)
 19113              m.emit(0x0f)
 19114              m.emit(0xe0)
 19115              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19116          })
 19117      }
 19118      // PAVGB m128, xmm
 19119      if isM128(v0) && isXMM(v1) {
 19120          self.require(ISA_SSE2)
 19121          p.domain = DomainMMXSSE
 19122          p.add(0, func(m *_Encoding, v []interface{}) {
 19123              m.emit(0x66)
 19124              m.rexo(hcode(v[1]), addr(v[0]), false)
 19125              m.emit(0x0f)
 19126              m.emit(0xe0)
 19127              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19128          })
 19129      }
 19130      if p.len == 0 {
 19131          panic("invalid operands for PAVGB")
 19132      }
 19133      return p
 19134  }
 19135  
 19136  // PAVGUSB performs "Average Packed Byte Integers".
 19137  //
 19138  // Mnemonic        : PAVGUSB
 19139  // Supported forms : (2 forms)
 19140  //
 19141  //    * PAVGUSB mm, mm     [3dnow!]
 19142  //    * PAVGUSB m64, mm    [3dnow!]
 19143  //
 19144  func (self *Program) PAVGUSB(v0 interface{}, v1 interface{}) *Instruction {
 19145      p := self.alloc("PAVGUSB", 2, Operands { v0, v1 })
 19146      // PAVGUSB mm, mm
 19147      if isMM(v0) && isMM(v1) {
 19148          self.require(ISA_3DNOW)
 19149          p.domain = DomainAMDSpecific
 19150          p.add(0, func(m *_Encoding, v []interface{}) {
 19151              m.rexo(hcode(v[1]), v[0], false)
 19152              m.emit(0x0f)
 19153              m.emit(0x0f)
 19154              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19155              m.emit(0xbf)
 19156          })
 19157      }
 19158      // PAVGUSB m64, mm
 19159      if isM64(v0) && isMM(v1) {
 19160          self.require(ISA_3DNOW)
 19161          p.domain = DomainAMDSpecific
 19162          p.add(0, func(m *_Encoding, v []interface{}) {
 19163              m.rexo(hcode(v[1]), addr(v[0]), false)
 19164              m.emit(0x0f)
 19165              m.emit(0x0f)
 19166              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19167              m.emit(0xbf)
 19168          })
 19169      }
 19170      if p.len == 0 {
 19171          panic("invalid operands for PAVGUSB")
 19172      }
 19173      return p
 19174  }
 19175  
 19176  // PAVGW performs "Average Packed Word Integers".
 19177  //
 19178  // Mnemonic        : PAVGW
 19179  // Supported forms : (4 forms)
 19180  //
 19181  //    * PAVGW mm, mm       [MMX+]
 19182  //    * PAVGW m64, mm      [MMX+]
 19183  //    * PAVGW xmm, xmm     [SSE2]
 19184  //    * PAVGW m128, xmm    [SSE2]
 19185  //
 19186  func (self *Program) PAVGW(v0 interface{}, v1 interface{}) *Instruction {
 19187      p := self.alloc("PAVGW", 2, Operands { v0, v1 })
 19188      // PAVGW mm, mm
 19189      if isMM(v0) && isMM(v1) {
 19190          self.require(ISA_MMX_PLUS)
 19191          p.domain = DomainMMXSSE
 19192          p.add(0, func(m *_Encoding, v []interface{}) {
 19193              m.rexo(hcode(v[1]), v[0], false)
 19194              m.emit(0x0f)
 19195              m.emit(0xe3)
 19196              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19197          })
 19198      }
 19199      // PAVGW m64, mm
 19200      if isM64(v0) && isMM(v1) {
 19201          self.require(ISA_MMX_PLUS)
 19202          p.domain = DomainMMXSSE
 19203          p.add(0, func(m *_Encoding, v []interface{}) {
 19204              m.rexo(hcode(v[1]), addr(v[0]), false)
 19205              m.emit(0x0f)
 19206              m.emit(0xe3)
 19207              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19208          })
 19209      }
 19210      // PAVGW xmm, xmm
 19211      if isXMM(v0) && isXMM(v1) {
 19212          self.require(ISA_SSE2)
 19213          p.domain = DomainMMXSSE
 19214          p.add(0, func(m *_Encoding, v []interface{}) {
 19215              m.emit(0x66)
 19216              m.rexo(hcode(v[1]), v[0], false)
 19217              m.emit(0x0f)
 19218              m.emit(0xe3)
 19219              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19220          })
 19221      }
 19222      // PAVGW m128, xmm
 19223      if isM128(v0) && isXMM(v1) {
 19224          self.require(ISA_SSE2)
 19225          p.domain = DomainMMXSSE
 19226          p.add(0, func(m *_Encoding, v []interface{}) {
 19227              m.emit(0x66)
 19228              m.rexo(hcode(v[1]), addr(v[0]), false)
 19229              m.emit(0x0f)
 19230              m.emit(0xe3)
 19231              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19232          })
 19233      }
 19234      if p.len == 0 {
 19235          panic("invalid operands for PAVGW")
 19236      }
 19237      return p
 19238  }
 19239  
 19240  // PBLENDVB performs "Variable Blend Packed Bytes".
 19241  //
 19242  // Mnemonic        : PBLENDVB
 19243  // Supported forms : (2 forms)
 19244  //
 19245  //    * PBLENDVB xmm0, xmm, xmm     [SSE4.1]
 19246  //    * PBLENDVB xmm0, m128, xmm    [SSE4.1]
 19247  //
 19248  func (self *Program) PBLENDVB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 19249      p := self.alloc("PBLENDVB", 3, Operands { v0, v1, v2 })
 19250      // PBLENDVB xmm0, xmm, xmm
 19251      if v0 == XMM0 && isXMM(v1) && isXMM(v2) {
 19252          self.require(ISA_SSE4_1)
 19253          p.domain = DomainMMXSSE
 19254          p.add(0, func(m *_Encoding, v []interface{}) {
 19255              m.emit(0x66)
 19256              m.rexo(hcode(v[2]), v[1], false)
 19257              m.emit(0x0f)
 19258              m.emit(0x38)
 19259              m.emit(0x10)
 19260              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 19261          })
 19262      }
 19263      // PBLENDVB xmm0, m128, xmm
 19264      if v0 == XMM0 && isM128(v1) && isXMM(v2) {
 19265          self.require(ISA_SSE4_1)
 19266          p.domain = DomainMMXSSE
 19267          p.add(0, func(m *_Encoding, v []interface{}) {
 19268              m.emit(0x66)
 19269              m.rexo(hcode(v[2]), addr(v[1]), false)
 19270              m.emit(0x0f)
 19271              m.emit(0x38)
 19272              m.emit(0x10)
 19273              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 19274          })
 19275      }
 19276      if p.len == 0 {
 19277          panic("invalid operands for PBLENDVB")
 19278      }
 19279      return p
 19280  }
 19281  
 19282  // PBLENDW performs "Blend Packed Words".
 19283  //
 19284  // Mnemonic        : PBLENDW
 19285  // Supported forms : (2 forms)
 19286  //
 19287  //    * PBLENDW imm8, xmm, xmm     [SSE4.1]
 19288  //    * PBLENDW imm8, m128, xmm    [SSE4.1]
 19289  //
 19290  func (self *Program) PBLENDW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 19291      p := self.alloc("PBLENDW", 3, Operands { v0, v1, v2 })
 19292      // PBLENDW imm8, xmm, xmm
 19293      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 19294          self.require(ISA_SSE4_1)
 19295          p.domain = DomainMMXSSE
 19296          p.add(0, func(m *_Encoding, v []interface{}) {
 19297              m.emit(0x66)
 19298              m.rexo(hcode(v[2]), v[1], false)
 19299              m.emit(0x0f)
 19300              m.emit(0x3a)
 19301              m.emit(0x0e)
 19302              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 19303              m.imm1(toImmAny(v[0]))
 19304          })
 19305      }
 19306      // PBLENDW imm8, m128, xmm
 19307      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 19308          self.require(ISA_SSE4_1)
 19309          p.domain = DomainMMXSSE
 19310          p.add(0, func(m *_Encoding, v []interface{}) {
 19311              m.emit(0x66)
 19312              m.rexo(hcode(v[2]), addr(v[1]), false)
 19313              m.emit(0x0f)
 19314              m.emit(0x3a)
 19315              m.emit(0x0e)
 19316              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 19317              m.imm1(toImmAny(v[0]))
 19318          })
 19319      }
 19320      if p.len == 0 {
 19321          panic("invalid operands for PBLENDW")
 19322      }
 19323      return p
 19324  }
 19325  
 19326  // PCLMULQDQ performs "Carry-Less Quadword Multiplication".
 19327  //
 19328  // Mnemonic        : PCLMULQDQ
 19329  // Supported forms : (2 forms)
 19330  //
 19331  //    * PCLMULQDQ imm8, xmm, xmm     [PCLMULQDQ]
 19332  //    * PCLMULQDQ imm8, m128, xmm    [PCLMULQDQ]
 19333  //
 19334  func (self *Program) PCLMULQDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 19335      p := self.alloc("PCLMULQDQ", 3, Operands { v0, v1, v2 })
 19336      // PCLMULQDQ imm8, xmm, xmm
 19337      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 19338          self.require(ISA_PCLMULQDQ)
 19339          p.domain = DomainCrypto
 19340          p.add(0, func(m *_Encoding, v []interface{}) {
 19341              m.emit(0x66)
 19342              m.rexo(hcode(v[2]), v[1], false)
 19343              m.emit(0x0f)
 19344              m.emit(0x3a)
 19345              m.emit(0x44)
 19346              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 19347              m.imm1(toImmAny(v[0]))
 19348          })
 19349      }
 19350      // PCLMULQDQ imm8, m128, xmm
 19351      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 19352          self.require(ISA_PCLMULQDQ)
 19353          p.domain = DomainCrypto
 19354          p.add(0, func(m *_Encoding, v []interface{}) {
 19355              m.emit(0x66)
 19356              m.rexo(hcode(v[2]), addr(v[1]), false)
 19357              m.emit(0x0f)
 19358              m.emit(0x3a)
 19359              m.emit(0x44)
 19360              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 19361              m.imm1(toImmAny(v[0]))
 19362          })
 19363      }
 19364      if p.len == 0 {
 19365          panic("invalid operands for PCLMULQDQ")
 19366      }
 19367      return p
 19368  }
 19369  
 19370  // PCMPEQB performs "Compare Packed Byte Data for Equality".
 19371  //
 19372  // Mnemonic        : PCMPEQB
 19373  // Supported forms : (4 forms)
 19374  //
 19375  //    * PCMPEQB mm, mm       [MMX]
 19376  //    * PCMPEQB m64, mm      [MMX]
 19377  //    * PCMPEQB xmm, xmm     [SSE2]
 19378  //    * PCMPEQB m128, xmm    [SSE2]
 19379  //
 19380  func (self *Program) PCMPEQB(v0 interface{}, v1 interface{}) *Instruction {
 19381      p := self.alloc("PCMPEQB", 2, Operands { v0, v1 })
 19382      // PCMPEQB mm, mm
 19383      if isMM(v0) && isMM(v1) {
 19384          self.require(ISA_MMX)
 19385          p.domain = DomainMMXSSE
 19386          p.add(0, func(m *_Encoding, v []interface{}) {
 19387              m.rexo(hcode(v[1]), v[0], false)
 19388              m.emit(0x0f)
 19389              m.emit(0x74)
 19390              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19391          })
 19392      }
 19393      // PCMPEQB m64, mm
 19394      if isM64(v0) && isMM(v1) {
 19395          self.require(ISA_MMX)
 19396          p.domain = DomainMMXSSE
 19397          p.add(0, func(m *_Encoding, v []interface{}) {
 19398              m.rexo(hcode(v[1]), addr(v[0]), false)
 19399              m.emit(0x0f)
 19400              m.emit(0x74)
 19401              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19402          })
 19403      }
 19404      // PCMPEQB xmm, xmm
 19405      if isXMM(v0) && isXMM(v1) {
 19406          self.require(ISA_SSE2)
 19407          p.domain = DomainMMXSSE
 19408          p.add(0, func(m *_Encoding, v []interface{}) {
 19409              m.emit(0x66)
 19410              m.rexo(hcode(v[1]), v[0], false)
 19411              m.emit(0x0f)
 19412              m.emit(0x74)
 19413              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19414          })
 19415      }
 19416      // PCMPEQB m128, xmm
 19417      if isM128(v0) && isXMM(v1) {
 19418          self.require(ISA_SSE2)
 19419          p.domain = DomainMMXSSE
 19420          p.add(0, func(m *_Encoding, v []interface{}) {
 19421              m.emit(0x66)
 19422              m.rexo(hcode(v[1]), addr(v[0]), false)
 19423              m.emit(0x0f)
 19424              m.emit(0x74)
 19425              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19426          })
 19427      }
 19428      if p.len == 0 {
 19429          panic("invalid operands for PCMPEQB")
 19430      }
 19431      return p
 19432  }
 19433  
 19434  // PCMPEQD performs "Compare Packed Doubleword Data for Equality".
 19435  //
 19436  // Mnemonic        : PCMPEQD
 19437  // Supported forms : (4 forms)
 19438  //
 19439  //    * PCMPEQD mm, mm       [MMX]
 19440  //    * PCMPEQD m64, mm      [MMX]
 19441  //    * PCMPEQD xmm, xmm     [SSE2]
 19442  //    * PCMPEQD m128, xmm    [SSE2]
 19443  //
 19444  func (self *Program) PCMPEQD(v0 interface{}, v1 interface{}) *Instruction {
 19445      p := self.alloc("PCMPEQD", 2, Operands { v0, v1 })
 19446      // PCMPEQD mm, mm
 19447      if isMM(v0) && isMM(v1) {
 19448          self.require(ISA_MMX)
 19449          p.domain = DomainMMXSSE
 19450          p.add(0, func(m *_Encoding, v []interface{}) {
 19451              m.rexo(hcode(v[1]), v[0], false)
 19452              m.emit(0x0f)
 19453              m.emit(0x76)
 19454              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19455          })
 19456      }
 19457      // PCMPEQD m64, mm
 19458      if isM64(v0) && isMM(v1) {
 19459          self.require(ISA_MMX)
 19460          p.domain = DomainMMXSSE
 19461          p.add(0, func(m *_Encoding, v []interface{}) {
 19462              m.rexo(hcode(v[1]), addr(v[0]), false)
 19463              m.emit(0x0f)
 19464              m.emit(0x76)
 19465              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19466          })
 19467      }
 19468      // PCMPEQD xmm, xmm
 19469      if isXMM(v0) && isXMM(v1) {
 19470          self.require(ISA_SSE2)
 19471          p.domain = DomainMMXSSE
 19472          p.add(0, func(m *_Encoding, v []interface{}) {
 19473              m.emit(0x66)
 19474              m.rexo(hcode(v[1]), v[0], false)
 19475              m.emit(0x0f)
 19476              m.emit(0x76)
 19477              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19478          })
 19479      }
 19480      // PCMPEQD m128, xmm
 19481      if isM128(v0) && isXMM(v1) {
 19482          self.require(ISA_SSE2)
 19483          p.domain = DomainMMXSSE
 19484          p.add(0, func(m *_Encoding, v []interface{}) {
 19485              m.emit(0x66)
 19486              m.rexo(hcode(v[1]), addr(v[0]), false)
 19487              m.emit(0x0f)
 19488              m.emit(0x76)
 19489              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19490          })
 19491      }
 19492      if p.len == 0 {
 19493          panic("invalid operands for PCMPEQD")
 19494      }
 19495      return p
 19496  }
 19497  
 19498  // PCMPEQQ performs "Compare Packed Quadword Data for Equality".
 19499  //
 19500  // Mnemonic        : PCMPEQQ
 19501  // Supported forms : (2 forms)
 19502  //
 19503  //    * PCMPEQQ xmm, xmm     [SSE4.1]
 19504  //    * PCMPEQQ m128, xmm    [SSE4.1]
 19505  //
 19506  func (self *Program) PCMPEQQ(v0 interface{}, v1 interface{}) *Instruction {
 19507      p := self.alloc("PCMPEQQ", 2, Operands { v0, v1 })
 19508      // PCMPEQQ xmm, xmm
 19509      if isXMM(v0) && isXMM(v1) {
 19510          self.require(ISA_SSE4_1)
 19511          p.domain = DomainMMXSSE
 19512          p.add(0, func(m *_Encoding, v []interface{}) {
 19513              m.emit(0x66)
 19514              m.rexo(hcode(v[1]), v[0], false)
 19515              m.emit(0x0f)
 19516              m.emit(0x38)
 19517              m.emit(0x29)
 19518              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19519          })
 19520      }
 19521      // PCMPEQQ m128, xmm
 19522      if isM128(v0) && isXMM(v1) {
 19523          self.require(ISA_SSE4_1)
 19524          p.domain = DomainMMXSSE
 19525          p.add(0, func(m *_Encoding, v []interface{}) {
 19526              m.emit(0x66)
 19527              m.rexo(hcode(v[1]), addr(v[0]), false)
 19528              m.emit(0x0f)
 19529              m.emit(0x38)
 19530              m.emit(0x29)
 19531              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19532          })
 19533      }
 19534      if p.len == 0 {
 19535          panic("invalid operands for PCMPEQQ")
 19536      }
 19537      return p
 19538  }
 19539  
 19540  // PCMPEQW performs "Compare Packed Word Data for Equality".
 19541  //
 19542  // Mnemonic        : PCMPEQW
 19543  // Supported forms : (4 forms)
 19544  //
 19545  //    * PCMPEQW mm, mm       [MMX]
 19546  //    * PCMPEQW m64, mm      [MMX]
 19547  //    * PCMPEQW xmm, xmm     [SSE2]
 19548  //    * PCMPEQW m128, xmm    [SSE2]
 19549  //
 19550  func (self *Program) PCMPEQW(v0 interface{}, v1 interface{}) *Instruction {
 19551      p := self.alloc("PCMPEQW", 2, Operands { v0, v1 })
 19552      // PCMPEQW mm, mm
 19553      if isMM(v0) && isMM(v1) {
 19554          self.require(ISA_MMX)
 19555          p.domain = DomainMMXSSE
 19556          p.add(0, func(m *_Encoding, v []interface{}) {
 19557              m.rexo(hcode(v[1]), v[0], false)
 19558              m.emit(0x0f)
 19559              m.emit(0x75)
 19560              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19561          })
 19562      }
 19563      // PCMPEQW m64, mm
 19564      if isM64(v0) && isMM(v1) {
 19565          self.require(ISA_MMX)
 19566          p.domain = DomainMMXSSE
 19567          p.add(0, func(m *_Encoding, v []interface{}) {
 19568              m.rexo(hcode(v[1]), addr(v[0]), false)
 19569              m.emit(0x0f)
 19570              m.emit(0x75)
 19571              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19572          })
 19573      }
 19574      // PCMPEQW xmm, xmm
 19575      if isXMM(v0) && isXMM(v1) {
 19576          self.require(ISA_SSE2)
 19577          p.domain = DomainMMXSSE
 19578          p.add(0, func(m *_Encoding, v []interface{}) {
 19579              m.emit(0x66)
 19580              m.rexo(hcode(v[1]), v[0], false)
 19581              m.emit(0x0f)
 19582              m.emit(0x75)
 19583              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19584          })
 19585      }
 19586      // PCMPEQW m128, xmm
 19587      if isM128(v0) && isXMM(v1) {
 19588          self.require(ISA_SSE2)
 19589          p.domain = DomainMMXSSE
 19590          p.add(0, func(m *_Encoding, v []interface{}) {
 19591              m.emit(0x66)
 19592              m.rexo(hcode(v[1]), addr(v[0]), false)
 19593              m.emit(0x0f)
 19594              m.emit(0x75)
 19595              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19596          })
 19597      }
 19598      if p.len == 0 {
 19599          panic("invalid operands for PCMPEQW")
 19600      }
 19601      return p
 19602  }
 19603  
 19604  // PCMPESTRI performs "Packed Compare Explicit Length Strings, Return Index".
 19605  //
 19606  // Mnemonic        : PCMPESTRI
 19607  // Supported forms : (2 forms)
 19608  //
 19609  //    * PCMPESTRI imm8, xmm, xmm     [SSE4.2]
 19610  //    * PCMPESTRI imm8, m128, xmm    [SSE4.2]
 19611  //
 19612  func (self *Program) PCMPESTRI(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 19613      p := self.alloc("PCMPESTRI", 3, Operands { v0, v1, v2 })
 19614      // PCMPESTRI imm8, xmm, xmm
 19615      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 19616          self.require(ISA_SSE4_2)
 19617          p.domain = DomainMMXSSE
 19618          p.add(0, func(m *_Encoding, v []interface{}) {
 19619              m.emit(0x66)
 19620              m.rexo(hcode(v[2]), v[1], false)
 19621              m.emit(0x0f)
 19622              m.emit(0x3a)
 19623              m.emit(0x61)
 19624              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 19625              m.imm1(toImmAny(v[0]))
 19626          })
 19627      }
 19628      // PCMPESTRI imm8, m128, xmm
 19629      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 19630          self.require(ISA_SSE4_2)
 19631          p.domain = DomainMMXSSE
 19632          p.add(0, func(m *_Encoding, v []interface{}) {
 19633              m.emit(0x66)
 19634              m.rexo(hcode(v[2]), addr(v[1]), false)
 19635              m.emit(0x0f)
 19636              m.emit(0x3a)
 19637              m.emit(0x61)
 19638              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 19639              m.imm1(toImmAny(v[0]))
 19640          })
 19641      }
 19642      if p.len == 0 {
 19643          panic("invalid operands for PCMPESTRI")
 19644      }
 19645      return p
 19646  }
 19647  
 19648  // PCMPESTRM performs "Packed Compare Explicit Length Strings, Return Mask".
 19649  //
 19650  // Mnemonic        : PCMPESTRM
 19651  // Supported forms : (2 forms)
 19652  //
 19653  //    * PCMPESTRM imm8, xmm, xmm     [SSE4.2]
 19654  //    * PCMPESTRM imm8, m128, xmm    [SSE4.2]
 19655  //
 19656  func (self *Program) PCMPESTRM(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 19657      p := self.alloc("PCMPESTRM", 3, Operands { v0, v1, v2 })
 19658      // PCMPESTRM imm8, xmm, xmm
 19659      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 19660          self.require(ISA_SSE4_2)
 19661          p.domain = DomainMMXSSE
 19662          p.add(0, func(m *_Encoding, v []interface{}) {
 19663              m.emit(0x66)
 19664              m.rexo(hcode(v[2]), v[1], false)
 19665              m.emit(0x0f)
 19666              m.emit(0x3a)
 19667              m.emit(0x60)
 19668              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 19669              m.imm1(toImmAny(v[0]))
 19670          })
 19671      }
 19672      // PCMPESTRM imm8, m128, xmm
 19673      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 19674          self.require(ISA_SSE4_2)
 19675          p.domain = DomainMMXSSE
 19676          p.add(0, func(m *_Encoding, v []interface{}) {
 19677              m.emit(0x66)
 19678              m.rexo(hcode(v[2]), addr(v[1]), false)
 19679              m.emit(0x0f)
 19680              m.emit(0x3a)
 19681              m.emit(0x60)
 19682              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 19683              m.imm1(toImmAny(v[0]))
 19684          })
 19685      }
 19686      if p.len == 0 {
 19687          panic("invalid operands for PCMPESTRM")
 19688      }
 19689      return p
 19690  }
 19691  
 19692  // PCMPGTB performs "Compare Packed Signed Byte Integers for Greater Than".
 19693  //
 19694  // Mnemonic        : PCMPGTB
 19695  // Supported forms : (4 forms)
 19696  //
 19697  //    * PCMPGTB mm, mm       [MMX]
 19698  //    * PCMPGTB m64, mm      [MMX]
 19699  //    * PCMPGTB xmm, xmm     [SSE2]
 19700  //    * PCMPGTB m128, xmm    [SSE2]
 19701  //
 19702  func (self *Program) PCMPGTB(v0 interface{}, v1 interface{}) *Instruction {
 19703      p := self.alloc("PCMPGTB", 2, Operands { v0, v1 })
 19704      // PCMPGTB mm, mm
 19705      if isMM(v0) && isMM(v1) {
 19706          self.require(ISA_MMX)
 19707          p.domain = DomainMMXSSE
 19708          p.add(0, func(m *_Encoding, v []interface{}) {
 19709              m.rexo(hcode(v[1]), v[0], false)
 19710              m.emit(0x0f)
 19711              m.emit(0x64)
 19712              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19713          })
 19714      }
 19715      // PCMPGTB m64, mm
 19716      if isM64(v0) && isMM(v1) {
 19717          self.require(ISA_MMX)
 19718          p.domain = DomainMMXSSE
 19719          p.add(0, func(m *_Encoding, v []interface{}) {
 19720              m.rexo(hcode(v[1]), addr(v[0]), false)
 19721              m.emit(0x0f)
 19722              m.emit(0x64)
 19723              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19724          })
 19725      }
 19726      // PCMPGTB xmm, xmm
 19727      if isXMM(v0) && isXMM(v1) {
 19728          self.require(ISA_SSE2)
 19729          p.domain = DomainMMXSSE
 19730          p.add(0, func(m *_Encoding, v []interface{}) {
 19731              m.emit(0x66)
 19732              m.rexo(hcode(v[1]), v[0], false)
 19733              m.emit(0x0f)
 19734              m.emit(0x64)
 19735              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19736          })
 19737      }
 19738      // PCMPGTB m128, xmm
 19739      if isM128(v0) && isXMM(v1) {
 19740          self.require(ISA_SSE2)
 19741          p.domain = DomainMMXSSE
 19742          p.add(0, func(m *_Encoding, v []interface{}) {
 19743              m.emit(0x66)
 19744              m.rexo(hcode(v[1]), addr(v[0]), false)
 19745              m.emit(0x0f)
 19746              m.emit(0x64)
 19747              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19748          })
 19749      }
 19750      if p.len == 0 {
 19751          panic("invalid operands for PCMPGTB")
 19752      }
 19753      return p
 19754  }
 19755  
 19756  // PCMPGTD performs "Compare Packed Signed Doubleword Integers for Greater Than".
 19757  //
 19758  // Mnemonic        : PCMPGTD
 19759  // Supported forms : (4 forms)
 19760  //
 19761  //    * PCMPGTD mm, mm       [MMX]
 19762  //    * PCMPGTD m64, mm      [MMX]
 19763  //    * PCMPGTD xmm, xmm     [SSE2]
 19764  //    * PCMPGTD m128, xmm    [SSE2]
 19765  //
 19766  func (self *Program) PCMPGTD(v0 interface{}, v1 interface{}) *Instruction {
 19767      p := self.alloc("PCMPGTD", 2, Operands { v0, v1 })
 19768      // PCMPGTD mm, mm
 19769      if isMM(v0) && isMM(v1) {
 19770          self.require(ISA_MMX)
 19771          p.domain = DomainMMXSSE
 19772          p.add(0, func(m *_Encoding, v []interface{}) {
 19773              m.rexo(hcode(v[1]), v[0], false)
 19774              m.emit(0x0f)
 19775              m.emit(0x66)
 19776              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19777          })
 19778      }
 19779      // PCMPGTD m64, mm
 19780      if isM64(v0) && isMM(v1) {
 19781          self.require(ISA_MMX)
 19782          p.domain = DomainMMXSSE
 19783          p.add(0, func(m *_Encoding, v []interface{}) {
 19784              m.rexo(hcode(v[1]), addr(v[0]), false)
 19785              m.emit(0x0f)
 19786              m.emit(0x66)
 19787              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19788          })
 19789      }
 19790      // PCMPGTD xmm, xmm
 19791      if isXMM(v0) && isXMM(v1) {
 19792          self.require(ISA_SSE2)
 19793          p.domain = DomainMMXSSE
 19794          p.add(0, func(m *_Encoding, v []interface{}) {
 19795              m.emit(0x66)
 19796              m.rexo(hcode(v[1]), v[0], false)
 19797              m.emit(0x0f)
 19798              m.emit(0x66)
 19799              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19800          })
 19801      }
 19802      // PCMPGTD m128, xmm
 19803      if isM128(v0) && isXMM(v1) {
 19804          self.require(ISA_SSE2)
 19805          p.domain = DomainMMXSSE
 19806          p.add(0, func(m *_Encoding, v []interface{}) {
 19807              m.emit(0x66)
 19808              m.rexo(hcode(v[1]), addr(v[0]), false)
 19809              m.emit(0x0f)
 19810              m.emit(0x66)
 19811              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19812          })
 19813      }
 19814      if p.len == 0 {
 19815          panic("invalid operands for PCMPGTD")
 19816      }
 19817      return p
 19818  }
 19819  
 19820  // PCMPGTQ performs "Compare Packed Data for Greater Than".
 19821  //
 19822  // Mnemonic        : PCMPGTQ
 19823  // Supported forms : (2 forms)
 19824  //
 19825  //    * PCMPGTQ xmm, xmm     [SSE4.2]
 19826  //    * PCMPGTQ m128, xmm    [SSE4.2]
 19827  //
 19828  func (self *Program) PCMPGTQ(v0 interface{}, v1 interface{}) *Instruction {
 19829      p := self.alloc("PCMPGTQ", 2, Operands { v0, v1 })
 19830      // PCMPGTQ xmm, xmm
 19831      if isXMM(v0) && isXMM(v1) {
 19832          self.require(ISA_SSE4_2)
 19833          p.domain = DomainMMXSSE
 19834          p.add(0, func(m *_Encoding, v []interface{}) {
 19835              m.emit(0x66)
 19836              m.rexo(hcode(v[1]), v[0], false)
 19837              m.emit(0x0f)
 19838              m.emit(0x38)
 19839              m.emit(0x37)
 19840              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19841          })
 19842      }
 19843      // PCMPGTQ m128, xmm
 19844      if isM128(v0) && isXMM(v1) {
 19845          self.require(ISA_SSE4_2)
 19846          p.domain = DomainMMXSSE
 19847          p.add(0, func(m *_Encoding, v []interface{}) {
 19848              m.emit(0x66)
 19849              m.rexo(hcode(v[1]), addr(v[0]), false)
 19850              m.emit(0x0f)
 19851              m.emit(0x38)
 19852              m.emit(0x37)
 19853              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19854          })
 19855      }
 19856      if p.len == 0 {
 19857          panic("invalid operands for PCMPGTQ")
 19858      }
 19859      return p
 19860  }
 19861  
 19862  // PCMPGTW performs "Compare Packed Signed Word Integers for Greater Than".
 19863  //
 19864  // Mnemonic        : PCMPGTW
 19865  // Supported forms : (4 forms)
 19866  //
 19867  //    * PCMPGTW mm, mm       [MMX]
 19868  //    * PCMPGTW m64, mm      [MMX]
 19869  //    * PCMPGTW xmm, xmm     [SSE2]
 19870  //    * PCMPGTW m128, xmm    [SSE2]
 19871  //
 19872  func (self *Program) PCMPGTW(v0 interface{}, v1 interface{}) *Instruction {
 19873      p := self.alloc("PCMPGTW", 2, Operands { v0, v1 })
 19874      // PCMPGTW mm, mm
 19875      if isMM(v0) && isMM(v1) {
 19876          self.require(ISA_MMX)
 19877          p.domain = DomainMMXSSE
 19878          p.add(0, func(m *_Encoding, v []interface{}) {
 19879              m.rexo(hcode(v[1]), v[0], false)
 19880              m.emit(0x0f)
 19881              m.emit(0x65)
 19882              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19883          })
 19884      }
 19885      // PCMPGTW m64, mm
 19886      if isM64(v0) && isMM(v1) {
 19887          self.require(ISA_MMX)
 19888          p.domain = DomainMMXSSE
 19889          p.add(0, func(m *_Encoding, v []interface{}) {
 19890              m.rexo(hcode(v[1]), addr(v[0]), false)
 19891              m.emit(0x0f)
 19892              m.emit(0x65)
 19893              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19894          })
 19895      }
 19896      // PCMPGTW xmm, xmm
 19897      if isXMM(v0) && isXMM(v1) {
 19898          self.require(ISA_SSE2)
 19899          p.domain = DomainMMXSSE
 19900          p.add(0, func(m *_Encoding, v []interface{}) {
 19901              m.emit(0x66)
 19902              m.rexo(hcode(v[1]), v[0], false)
 19903              m.emit(0x0f)
 19904              m.emit(0x65)
 19905              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 19906          })
 19907      }
 19908      // PCMPGTW m128, xmm
 19909      if isM128(v0) && isXMM(v1) {
 19910          self.require(ISA_SSE2)
 19911          p.domain = DomainMMXSSE
 19912          p.add(0, func(m *_Encoding, v []interface{}) {
 19913              m.emit(0x66)
 19914              m.rexo(hcode(v[1]), addr(v[0]), false)
 19915              m.emit(0x0f)
 19916              m.emit(0x65)
 19917              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 19918          })
 19919      }
 19920      if p.len == 0 {
 19921          panic("invalid operands for PCMPGTW")
 19922      }
 19923      return p
 19924  }
 19925  
 19926  // PCMPISTRI performs "Packed Compare Implicit Length Strings, Return Index".
 19927  //
 19928  // Mnemonic        : PCMPISTRI
 19929  // Supported forms : (2 forms)
 19930  //
 19931  //    * PCMPISTRI imm8, xmm, xmm     [SSE4.2]
 19932  //    * PCMPISTRI imm8, m128, xmm    [SSE4.2]
 19933  //
 19934  func (self *Program) PCMPISTRI(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 19935      p := self.alloc("PCMPISTRI", 3, Operands { v0, v1, v2 })
 19936      // PCMPISTRI imm8, xmm, xmm
 19937      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 19938          self.require(ISA_SSE4_2)
 19939          p.domain = DomainMMXSSE
 19940          p.add(0, func(m *_Encoding, v []interface{}) {
 19941              m.emit(0x66)
 19942              m.rexo(hcode(v[2]), v[1], false)
 19943              m.emit(0x0f)
 19944              m.emit(0x3a)
 19945              m.emit(0x63)
 19946              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 19947              m.imm1(toImmAny(v[0]))
 19948          })
 19949      }
 19950      // PCMPISTRI imm8, m128, xmm
 19951      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 19952          self.require(ISA_SSE4_2)
 19953          p.domain = DomainMMXSSE
 19954          p.add(0, func(m *_Encoding, v []interface{}) {
 19955              m.emit(0x66)
 19956              m.rexo(hcode(v[2]), addr(v[1]), false)
 19957              m.emit(0x0f)
 19958              m.emit(0x3a)
 19959              m.emit(0x63)
 19960              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 19961              m.imm1(toImmAny(v[0]))
 19962          })
 19963      }
 19964      if p.len == 0 {
 19965          panic("invalid operands for PCMPISTRI")
 19966      }
 19967      return p
 19968  }
 19969  
 19970  // PCMPISTRM performs "Packed Compare Implicit Length Strings, Return Mask".
 19971  //
 19972  // Mnemonic        : PCMPISTRM
 19973  // Supported forms : (2 forms)
 19974  //
 19975  //    * PCMPISTRM imm8, xmm, xmm     [SSE4.2]
 19976  //    * PCMPISTRM imm8, m128, xmm    [SSE4.2]
 19977  //
 19978  func (self *Program) PCMPISTRM(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 19979      p := self.alloc("PCMPISTRM", 3, Operands { v0, v1, v2 })
 19980      // PCMPISTRM imm8, xmm, xmm
 19981      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 19982          self.require(ISA_SSE4_2)
 19983          p.domain = DomainMMXSSE
 19984          p.add(0, func(m *_Encoding, v []interface{}) {
 19985              m.emit(0x66)
 19986              m.rexo(hcode(v[2]), v[1], false)
 19987              m.emit(0x0f)
 19988              m.emit(0x3a)
 19989              m.emit(0x62)
 19990              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 19991              m.imm1(toImmAny(v[0]))
 19992          })
 19993      }
 19994      // PCMPISTRM imm8, m128, xmm
 19995      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 19996          self.require(ISA_SSE4_2)
 19997          p.domain = DomainMMXSSE
 19998          p.add(0, func(m *_Encoding, v []interface{}) {
 19999              m.emit(0x66)
 20000              m.rexo(hcode(v[2]), addr(v[1]), false)
 20001              m.emit(0x0f)
 20002              m.emit(0x3a)
 20003              m.emit(0x62)
 20004              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 20005              m.imm1(toImmAny(v[0]))
 20006          })
 20007      }
 20008      if p.len == 0 {
 20009          panic("invalid operands for PCMPISTRM")
 20010      }
 20011      return p
 20012  }
 20013  
 20014  // PDEP performs "Parallel Bits Deposit".
 20015  //
 20016  // Mnemonic        : PDEP
 20017  // Supported forms : (4 forms)
 20018  //
 20019  //    * PDEP r32, r32, r32    [BMI2]
 20020  //    * PDEP m32, r32, r32    [BMI2]
 20021  //    * PDEP r64, r64, r64    [BMI2]
 20022  //    * PDEP m64, r64, r64    [BMI2]
 20023  //
 20024  func (self *Program) PDEP(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 20025      p := self.alloc("PDEP", 3, Operands { v0, v1, v2 })
 20026      // PDEP r32, r32, r32
 20027      if isReg32(v0) && isReg32(v1) && isReg32(v2) {
 20028          self.require(ISA_BMI2)
 20029          p.domain = DomainGeneric
 20030          p.add(0, func(m *_Encoding, v []interface{}) {
 20031              m.emit(0xc4)
 20032              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 20033              m.emit(0x7b ^ (hlcode(v[1]) << 3))
 20034              m.emit(0xf5)
 20035              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 20036          })
 20037      }
 20038      // PDEP m32, r32, r32
 20039      if isM32(v0) && isReg32(v1) && isReg32(v2) {
 20040          self.require(ISA_BMI2)
 20041          p.domain = DomainGeneric
 20042          p.add(0, func(m *_Encoding, v []interface{}) {
 20043              m.vex3(0xc4, 0b10, 0x03, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 20044              m.emit(0xf5)
 20045              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 20046          })
 20047      }
 20048      // PDEP r64, r64, r64
 20049      if isReg64(v0) && isReg64(v1) && isReg64(v2) {
 20050          self.require(ISA_BMI2)
 20051          p.domain = DomainGeneric
 20052          p.add(0, func(m *_Encoding, v []interface{}) {
 20053              m.emit(0xc4)
 20054              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 20055              m.emit(0xfb ^ (hlcode(v[1]) << 3))
 20056              m.emit(0xf5)
 20057              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 20058          })
 20059      }
 20060      // PDEP m64, r64, r64
 20061      if isM64(v0) && isReg64(v1) && isReg64(v2) {
 20062          self.require(ISA_BMI2)
 20063          p.domain = DomainGeneric
 20064          p.add(0, func(m *_Encoding, v []interface{}) {
 20065              m.vex3(0xc4, 0b10, 0x83, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 20066              m.emit(0xf5)
 20067              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 20068          })
 20069      }
 20070      if p.len == 0 {
 20071          panic("invalid operands for PDEP")
 20072      }
 20073      return p
 20074  }
 20075  
 20076  // PEXT performs "Parallel Bits Extract".
 20077  //
 20078  // Mnemonic        : PEXT
 20079  // Supported forms : (4 forms)
 20080  //
 20081  //    * PEXT r32, r32, r32    [BMI2]
 20082  //    * PEXT m32, r32, r32    [BMI2]
 20083  //    * PEXT r64, r64, r64    [BMI2]
 20084  //    * PEXT m64, r64, r64    [BMI2]
 20085  //
 20086  func (self *Program) PEXT(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 20087      p := self.alloc("PEXT", 3, Operands { v0, v1, v2 })
 20088      // PEXT r32, r32, r32
 20089      if isReg32(v0) && isReg32(v1) && isReg32(v2) {
 20090          self.require(ISA_BMI2)
 20091          p.domain = DomainGeneric
 20092          p.add(0, func(m *_Encoding, v []interface{}) {
 20093              m.emit(0xc4)
 20094              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 20095              m.emit(0x7a ^ (hlcode(v[1]) << 3))
 20096              m.emit(0xf5)
 20097              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 20098          })
 20099      }
 20100      // PEXT m32, r32, r32
 20101      if isM32(v0) && isReg32(v1) && isReg32(v2) {
 20102          self.require(ISA_BMI2)
 20103          p.domain = DomainGeneric
 20104          p.add(0, func(m *_Encoding, v []interface{}) {
 20105              m.vex3(0xc4, 0b10, 0x02, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 20106              m.emit(0xf5)
 20107              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 20108          })
 20109      }
 20110      // PEXT r64, r64, r64
 20111      if isReg64(v0) && isReg64(v1) && isReg64(v2) {
 20112          self.require(ISA_BMI2)
 20113          p.domain = DomainGeneric
 20114          p.add(0, func(m *_Encoding, v []interface{}) {
 20115              m.emit(0xc4)
 20116              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 20117              m.emit(0xfa ^ (hlcode(v[1]) << 3))
 20118              m.emit(0xf5)
 20119              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 20120          })
 20121      }
 20122      // PEXT m64, r64, r64
 20123      if isM64(v0) && isReg64(v1) && isReg64(v2) {
 20124          self.require(ISA_BMI2)
 20125          p.domain = DomainGeneric
 20126          p.add(0, func(m *_Encoding, v []interface{}) {
 20127              m.vex3(0xc4, 0b10, 0x82, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 20128              m.emit(0xf5)
 20129              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 20130          })
 20131      }
 20132      if p.len == 0 {
 20133          panic("invalid operands for PEXT")
 20134      }
 20135      return p
 20136  }
 20137  
 20138  // PEXTRB performs "Extract Byte".
 20139  //
 20140  // Mnemonic        : PEXTRB
 20141  // Supported forms : (2 forms)
 20142  //
 20143  //    * PEXTRB imm8, xmm, r32    [SSE4.1]
 20144  //    * PEXTRB imm8, xmm, m8     [SSE4.1]
 20145  //
 20146  func (self *Program) PEXTRB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 20147      p := self.alloc("PEXTRB", 3, Operands { v0, v1, v2 })
 20148      // PEXTRB imm8, xmm, r32
 20149      if isImm8(v0) && isXMM(v1) && isReg32(v2) {
 20150          self.require(ISA_SSE4_1)
 20151          p.domain = DomainMMXSSE
 20152          p.add(0, func(m *_Encoding, v []interface{}) {
 20153              m.emit(0x66)
 20154              m.rexo(hcode(v[1]), v[2], false)
 20155              m.emit(0x0f)
 20156              m.emit(0x3a)
 20157              m.emit(0x14)
 20158              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 20159              m.imm1(toImmAny(v[0]))
 20160          })
 20161      }
 20162      // PEXTRB imm8, xmm, m8
 20163      if isImm8(v0) && isXMM(v1) && isM8(v2) {
 20164          self.require(ISA_SSE4_1)
 20165          p.domain = DomainMMXSSE
 20166          p.add(0, func(m *_Encoding, v []interface{}) {
 20167              m.emit(0x66)
 20168              m.rexo(hcode(v[1]), addr(v[2]), false)
 20169              m.emit(0x0f)
 20170              m.emit(0x3a)
 20171              m.emit(0x14)
 20172              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 20173              m.imm1(toImmAny(v[0]))
 20174          })
 20175      }
 20176      if p.len == 0 {
 20177          panic("invalid operands for PEXTRB")
 20178      }
 20179      return p
 20180  }
 20181  
 20182  // PEXTRD performs "Extract Doubleword".
 20183  //
 20184  // Mnemonic        : PEXTRD
 20185  // Supported forms : (2 forms)
 20186  //
 20187  //    * PEXTRD imm8, xmm, r32    [SSE4.1]
 20188  //    * PEXTRD imm8, xmm, m32    [SSE4.1]
 20189  //
 20190  func (self *Program) PEXTRD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 20191      p := self.alloc("PEXTRD", 3, Operands { v0, v1, v2 })
 20192      // PEXTRD imm8, xmm, r32
 20193      if isImm8(v0) && isXMM(v1) && isReg32(v2) {
 20194          self.require(ISA_SSE4_1)
 20195          p.domain = DomainMMXSSE
 20196          p.add(0, func(m *_Encoding, v []interface{}) {
 20197              m.emit(0x66)
 20198              m.rexo(hcode(v[1]), v[2], false)
 20199              m.emit(0x0f)
 20200              m.emit(0x3a)
 20201              m.emit(0x16)
 20202              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 20203              m.imm1(toImmAny(v[0]))
 20204          })
 20205      }
 20206      // PEXTRD imm8, xmm, m32
 20207      if isImm8(v0) && isXMM(v1) && isM32(v2) {
 20208          self.require(ISA_SSE4_1)
 20209          p.domain = DomainMMXSSE
 20210          p.add(0, func(m *_Encoding, v []interface{}) {
 20211              m.emit(0x66)
 20212              m.rexo(hcode(v[1]), addr(v[2]), false)
 20213              m.emit(0x0f)
 20214              m.emit(0x3a)
 20215              m.emit(0x16)
 20216              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 20217              m.imm1(toImmAny(v[0]))
 20218          })
 20219      }
 20220      if p.len == 0 {
 20221          panic("invalid operands for PEXTRD")
 20222      }
 20223      return p
 20224  }
 20225  
 20226  // PEXTRQ performs "Extract Quadword".
 20227  //
 20228  // Mnemonic        : PEXTRQ
 20229  // Supported forms : (2 forms)
 20230  //
 20231  //    * PEXTRQ imm8, xmm, r64    [SSE4.1]
 20232  //    * PEXTRQ imm8, xmm, m64    [SSE4.1]
 20233  //
 20234  func (self *Program) PEXTRQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 20235      p := self.alloc("PEXTRQ", 3, Operands { v0, v1, v2 })
 20236      // PEXTRQ imm8, xmm, r64
 20237      if isImm8(v0) && isXMM(v1) && isReg64(v2) {
 20238          self.require(ISA_SSE4_1)
 20239          p.domain = DomainMMXSSE
 20240          p.add(0, func(m *_Encoding, v []interface{}) {
 20241              m.emit(0x66)
 20242              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[2]))
 20243              m.emit(0x0f)
 20244              m.emit(0x3a)
 20245              m.emit(0x16)
 20246              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 20247              m.imm1(toImmAny(v[0]))
 20248          })
 20249      }
 20250      // PEXTRQ imm8, xmm, m64
 20251      if isImm8(v0) && isXMM(v1) && isM64(v2) {
 20252          self.require(ISA_SSE4_1)
 20253          p.domain = DomainMMXSSE
 20254          p.add(0, func(m *_Encoding, v []interface{}) {
 20255              m.emit(0x66)
 20256              m.rexm(1, hcode(v[1]), addr(v[2]))
 20257              m.emit(0x0f)
 20258              m.emit(0x3a)
 20259              m.emit(0x16)
 20260              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 20261              m.imm1(toImmAny(v[0]))
 20262          })
 20263      }
 20264      if p.len == 0 {
 20265          panic("invalid operands for PEXTRQ")
 20266      }
 20267      return p
 20268  }
 20269  
 20270  // PEXTRW performs "Extract Word".
 20271  //
 20272  // Mnemonic        : PEXTRW
 20273  // Supported forms : (3 forms)
 20274  //
 20275  //    * PEXTRW imm8, mm, r32     [MMX+]
 20276  //    * PEXTRW imm8, xmm, r32    [SSE4.1]
 20277  //    * PEXTRW imm8, xmm, m16    [SSE4.1]
 20278  //
 20279  func (self *Program) PEXTRW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 20280      p := self.alloc("PEXTRW", 3, Operands { v0, v1, v2 })
 20281      // PEXTRW imm8, mm, r32
 20282      if isImm8(v0) && isMM(v1) && isReg32(v2) {
 20283          self.require(ISA_MMX_PLUS)
 20284          p.domain = DomainMMXSSE
 20285          p.add(0, func(m *_Encoding, v []interface{}) {
 20286              m.rexo(hcode(v[2]), v[1], false)
 20287              m.emit(0x0f)
 20288              m.emit(0xc5)
 20289              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 20290              m.imm1(toImmAny(v[0]))
 20291          })
 20292      }
 20293      // PEXTRW imm8, xmm, r32
 20294      if isImm8(v0) && isXMM(v1) && isReg32(v2) {
 20295          self.require(ISA_SSE4_1)
 20296          p.domain = DomainMMXSSE
 20297          p.add(0, func(m *_Encoding, v []interface{}) {
 20298              m.emit(0x66)
 20299              m.rexo(hcode(v[1]), v[2], false)
 20300              m.emit(0x0f)
 20301              m.emit(0x3a)
 20302              m.emit(0x15)
 20303              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 20304              m.imm1(toImmAny(v[0]))
 20305          })
 20306          p.add(0, func(m *_Encoding, v []interface{}) {
 20307              m.emit(0x66)
 20308              m.rexo(hcode(v[2]), v[1], false)
 20309              m.emit(0x0f)
 20310              m.emit(0xc5)
 20311              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 20312              m.imm1(toImmAny(v[0]))
 20313          })
 20314      }
 20315      // PEXTRW imm8, xmm, m16
 20316      if isImm8(v0) && isXMM(v1) && isM16(v2) {
 20317          self.require(ISA_SSE4_1)
 20318          p.domain = DomainMMXSSE
 20319          p.add(0, func(m *_Encoding, v []interface{}) {
 20320              m.emit(0x66)
 20321              m.rexo(hcode(v[1]), addr(v[2]), false)
 20322              m.emit(0x0f)
 20323              m.emit(0x3a)
 20324              m.emit(0x15)
 20325              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 20326              m.imm1(toImmAny(v[0]))
 20327          })
 20328      }
 20329      if p.len == 0 {
 20330          panic("invalid operands for PEXTRW")
 20331      }
 20332      return p
 20333  }
 20334  
 20335  // PF2ID performs "Packed Floating-Point to Integer Doubleword Converson".
 20336  //
 20337  // Mnemonic        : PF2ID
 20338  // Supported forms : (2 forms)
 20339  //
 20340  //    * PF2ID mm, mm     [3dnow!]
 20341  //    * PF2ID m64, mm    [3dnow!]
 20342  //
 20343  func (self *Program) PF2ID(v0 interface{}, v1 interface{}) *Instruction {
 20344      p := self.alloc("PF2ID", 2, Operands { v0, v1 })
 20345      // PF2ID mm, mm
 20346      if isMM(v0) && isMM(v1) {
 20347          self.require(ISA_3DNOW)
 20348          p.domain = DomainAMDSpecific
 20349          p.add(0, func(m *_Encoding, v []interface{}) {
 20350              m.rexo(hcode(v[1]), v[0], false)
 20351              m.emit(0x0f)
 20352              m.emit(0x0f)
 20353              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20354              m.emit(0x1d)
 20355          })
 20356      }
 20357      // PF2ID m64, mm
 20358      if isM64(v0) && isMM(v1) {
 20359          self.require(ISA_3DNOW)
 20360          p.domain = DomainAMDSpecific
 20361          p.add(0, func(m *_Encoding, v []interface{}) {
 20362              m.rexo(hcode(v[1]), addr(v[0]), false)
 20363              m.emit(0x0f)
 20364              m.emit(0x0f)
 20365              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20366              m.emit(0x1d)
 20367          })
 20368      }
 20369      if p.len == 0 {
 20370          panic("invalid operands for PF2ID")
 20371      }
 20372      return p
 20373  }
 20374  
 20375  // PF2IW performs "Packed Floating-Point to Integer Word Conversion".
 20376  //
 20377  // Mnemonic        : PF2IW
 20378  // Supported forms : (2 forms)
 20379  //
 20380  //    * PF2IW mm, mm     [3dnow!+]
 20381  //    * PF2IW m64, mm    [3dnow!+]
 20382  //
 20383  func (self *Program) PF2IW(v0 interface{}, v1 interface{}) *Instruction {
 20384      p := self.alloc("PF2IW", 2, Operands { v0, v1 })
 20385      // PF2IW mm, mm
 20386      if isMM(v0) && isMM(v1) {
 20387          self.require(ISA_3DNOW_PLUS)
 20388          p.domain = DomainAMDSpecific
 20389          p.add(0, func(m *_Encoding, v []interface{}) {
 20390              m.rexo(hcode(v[1]), v[0], false)
 20391              m.emit(0x0f)
 20392              m.emit(0x0f)
 20393              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20394              m.emit(0x1c)
 20395          })
 20396      }
 20397      // PF2IW m64, mm
 20398      if isM64(v0) && isMM(v1) {
 20399          self.require(ISA_3DNOW_PLUS)
 20400          p.domain = DomainAMDSpecific
 20401          p.add(0, func(m *_Encoding, v []interface{}) {
 20402              m.rexo(hcode(v[1]), addr(v[0]), false)
 20403              m.emit(0x0f)
 20404              m.emit(0x0f)
 20405              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20406              m.emit(0x1c)
 20407          })
 20408      }
 20409      if p.len == 0 {
 20410          panic("invalid operands for PF2IW")
 20411      }
 20412      return p
 20413  }
 20414  
 20415  // PFACC performs "Packed Floating-Point Accumulate".
 20416  //
 20417  // Mnemonic        : PFACC
 20418  // Supported forms : (2 forms)
 20419  //
 20420  //    * PFACC mm, mm     [3dnow!]
 20421  //    * PFACC m64, mm    [3dnow!]
 20422  //
 20423  func (self *Program) PFACC(v0 interface{}, v1 interface{}) *Instruction {
 20424      p := self.alloc("PFACC", 2, Operands { v0, v1 })
 20425      // PFACC mm, mm
 20426      if isMM(v0) && isMM(v1) {
 20427          self.require(ISA_3DNOW)
 20428          p.domain = DomainAMDSpecific
 20429          p.add(0, func(m *_Encoding, v []interface{}) {
 20430              m.rexo(hcode(v[1]), v[0], false)
 20431              m.emit(0x0f)
 20432              m.emit(0x0f)
 20433              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20434              m.emit(0xae)
 20435          })
 20436      }
 20437      // PFACC m64, mm
 20438      if isM64(v0) && isMM(v1) {
 20439          self.require(ISA_3DNOW)
 20440          p.domain = DomainAMDSpecific
 20441          p.add(0, func(m *_Encoding, v []interface{}) {
 20442              m.rexo(hcode(v[1]), addr(v[0]), false)
 20443              m.emit(0x0f)
 20444              m.emit(0x0f)
 20445              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20446              m.emit(0xae)
 20447          })
 20448      }
 20449      if p.len == 0 {
 20450          panic("invalid operands for PFACC")
 20451      }
 20452      return p
 20453  }
 20454  
 20455  // PFADD performs "Packed Floating-Point Add".
 20456  //
 20457  // Mnemonic        : PFADD
 20458  // Supported forms : (2 forms)
 20459  //
 20460  //    * PFADD mm, mm     [3dnow!]
 20461  //    * PFADD m64, mm    [3dnow!]
 20462  //
 20463  func (self *Program) PFADD(v0 interface{}, v1 interface{}) *Instruction {
 20464      p := self.alloc("PFADD", 2, Operands { v0, v1 })
 20465      // PFADD mm, mm
 20466      if isMM(v0) && isMM(v1) {
 20467          self.require(ISA_3DNOW)
 20468          p.domain = DomainAMDSpecific
 20469          p.add(0, func(m *_Encoding, v []interface{}) {
 20470              m.rexo(hcode(v[1]), v[0], false)
 20471              m.emit(0x0f)
 20472              m.emit(0x0f)
 20473              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20474              m.emit(0x9e)
 20475          })
 20476      }
 20477      // PFADD m64, mm
 20478      if isM64(v0) && isMM(v1) {
 20479          self.require(ISA_3DNOW)
 20480          p.domain = DomainAMDSpecific
 20481          p.add(0, func(m *_Encoding, v []interface{}) {
 20482              m.rexo(hcode(v[1]), addr(v[0]), false)
 20483              m.emit(0x0f)
 20484              m.emit(0x0f)
 20485              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20486              m.emit(0x9e)
 20487          })
 20488      }
 20489      if p.len == 0 {
 20490          panic("invalid operands for PFADD")
 20491      }
 20492      return p
 20493  }
 20494  
 20495  // PFCMPEQ performs "Packed Floating-Point Compare for Equal".
 20496  //
 20497  // Mnemonic        : PFCMPEQ
 20498  // Supported forms : (2 forms)
 20499  //
 20500  //    * PFCMPEQ mm, mm     [3dnow!]
 20501  //    * PFCMPEQ m64, mm    [3dnow!]
 20502  //
 20503  func (self *Program) PFCMPEQ(v0 interface{}, v1 interface{}) *Instruction {
 20504      p := self.alloc("PFCMPEQ", 2, Operands { v0, v1 })
 20505      // PFCMPEQ mm, mm
 20506      if isMM(v0) && isMM(v1) {
 20507          self.require(ISA_3DNOW)
 20508          p.domain = DomainAMDSpecific
 20509          p.add(0, func(m *_Encoding, v []interface{}) {
 20510              m.rexo(hcode(v[1]), v[0], false)
 20511              m.emit(0x0f)
 20512              m.emit(0x0f)
 20513              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20514              m.emit(0xb0)
 20515          })
 20516      }
 20517      // PFCMPEQ m64, mm
 20518      if isM64(v0) && isMM(v1) {
 20519          self.require(ISA_3DNOW)
 20520          p.domain = DomainAMDSpecific
 20521          p.add(0, func(m *_Encoding, v []interface{}) {
 20522              m.rexo(hcode(v[1]), addr(v[0]), false)
 20523              m.emit(0x0f)
 20524              m.emit(0x0f)
 20525              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20526              m.emit(0xb0)
 20527          })
 20528      }
 20529      if p.len == 0 {
 20530          panic("invalid operands for PFCMPEQ")
 20531      }
 20532      return p
 20533  }
 20534  
 20535  // PFCMPGE performs "Packed Floating-Point Compare for Greater or Equal".
 20536  //
 20537  // Mnemonic        : PFCMPGE
 20538  // Supported forms : (2 forms)
 20539  //
 20540  //    * PFCMPGE mm, mm     [3dnow!]
 20541  //    * PFCMPGE m64, mm    [3dnow!]
 20542  //
 20543  func (self *Program) PFCMPGE(v0 interface{}, v1 interface{}) *Instruction {
 20544      p := self.alloc("PFCMPGE", 2, Operands { v0, v1 })
 20545      // PFCMPGE mm, mm
 20546      if isMM(v0) && isMM(v1) {
 20547          self.require(ISA_3DNOW)
 20548          p.domain = DomainAMDSpecific
 20549          p.add(0, func(m *_Encoding, v []interface{}) {
 20550              m.rexo(hcode(v[1]), v[0], false)
 20551              m.emit(0x0f)
 20552              m.emit(0x0f)
 20553              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20554              m.emit(0x90)
 20555          })
 20556      }
 20557      // PFCMPGE m64, mm
 20558      if isM64(v0) && isMM(v1) {
 20559          self.require(ISA_3DNOW)
 20560          p.domain = DomainAMDSpecific
 20561          p.add(0, func(m *_Encoding, v []interface{}) {
 20562              m.rexo(hcode(v[1]), addr(v[0]), false)
 20563              m.emit(0x0f)
 20564              m.emit(0x0f)
 20565              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20566              m.emit(0x90)
 20567          })
 20568      }
 20569      if p.len == 0 {
 20570          panic("invalid operands for PFCMPGE")
 20571      }
 20572      return p
 20573  }
 20574  
 20575  // PFCMPGT performs "Packed Floating-Point Compare for Greater Than".
 20576  //
 20577  // Mnemonic        : PFCMPGT
 20578  // Supported forms : (2 forms)
 20579  //
 20580  //    * PFCMPGT mm, mm     [3dnow!]
 20581  //    * PFCMPGT m64, mm    [3dnow!]
 20582  //
 20583  func (self *Program) PFCMPGT(v0 interface{}, v1 interface{}) *Instruction {
 20584      p := self.alloc("PFCMPGT", 2, Operands { v0, v1 })
 20585      // PFCMPGT mm, mm
 20586      if isMM(v0) && isMM(v1) {
 20587          self.require(ISA_3DNOW)
 20588          p.domain = DomainAMDSpecific
 20589          p.add(0, func(m *_Encoding, v []interface{}) {
 20590              m.rexo(hcode(v[1]), v[0], false)
 20591              m.emit(0x0f)
 20592              m.emit(0x0f)
 20593              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20594              m.emit(0xa0)
 20595          })
 20596      }
 20597      // PFCMPGT m64, mm
 20598      if isM64(v0) && isMM(v1) {
 20599          self.require(ISA_3DNOW)
 20600          p.domain = DomainAMDSpecific
 20601          p.add(0, func(m *_Encoding, v []interface{}) {
 20602              m.rexo(hcode(v[1]), addr(v[0]), false)
 20603              m.emit(0x0f)
 20604              m.emit(0x0f)
 20605              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20606              m.emit(0xa0)
 20607          })
 20608      }
 20609      if p.len == 0 {
 20610          panic("invalid operands for PFCMPGT")
 20611      }
 20612      return p
 20613  }
 20614  
 20615  // PFMAX performs "Packed Floating-Point Maximum".
 20616  //
 20617  // Mnemonic        : PFMAX
 20618  // Supported forms : (2 forms)
 20619  //
 20620  //    * PFMAX mm, mm     [3dnow!]
 20621  //    * PFMAX m64, mm    [3dnow!]
 20622  //
 20623  func (self *Program) PFMAX(v0 interface{}, v1 interface{}) *Instruction {
 20624      p := self.alloc("PFMAX", 2, Operands { v0, v1 })
 20625      // PFMAX mm, mm
 20626      if isMM(v0) && isMM(v1) {
 20627          self.require(ISA_3DNOW)
 20628          p.domain = DomainAMDSpecific
 20629          p.add(0, func(m *_Encoding, v []interface{}) {
 20630              m.rexo(hcode(v[1]), v[0], false)
 20631              m.emit(0x0f)
 20632              m.emit(0x0f)
 20633              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20634              m.emit(0xa4)
 20635          })
 20636      }
 20637      // PFMAX m64, mm
 20638      if isM64(v0) && isMM(v1) {
 20639          self.require(ISA_3DNOW)
 20640          p.domain = DomainAMDSpecific
 20641          p.add(0, func(m *_Encoding, v []interface{}) {
 20642              m.rexo(hcode(v[1]), addr(v[0]), false)
 20643              m.emit(0x0f)
 20644              m.emit(0x0f)
 20645              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20646              m.emit(0xa4)
 20647          })
 20648      }
 20649      if p.len == 0 {
 20650          panic("invalid operands for PFMAX")
 20651      }
 20652      return p
 20653  }
 20654  
 20655  // PFMIN performs "Packed Floating-Point Minimum".
 20656  //
 20657  // Mnemonic        : PFMIN
 20658  // Supported forms : (2 forms)
 20659  //
 20660  //    * PFMIN mm, mm     [3dnow!]
 20661  //    * PFMIN m64, mm    [3dnow!]
 20662  //
 20663  func (self *Program) PFMIN(v0 interface{}, v1 interface{}) *Instruction {
 20664      p := self.alloc("PFMIN", 2, Operands { v0, v1 })
 20665      // PFMIN mm, mm
 20666      if isMM(v0) && isMM(v1) {
 20667          self.require(ISA_3DNOW)
 20668          p.domain = DomainAMDSpecific
 20669          p.add(0, func(m *_Encoding, v []interface{}) {
 20670              m.rexo(hcode(v[1]), v[0], false)
 20671              m.emit(0x0f)
 20672              m.emit(0x0f)
 20673              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20674              m.emit(0x94)
 20675          })
 20676      }
 20677      // PFMIN m64, mm
 20678      if isM64(v0) && isMM(v1) {
 20679          self.require(ISA_3DNOW)
 20680          p.domain = DomainAMDSpecific
 20681          p.add(0, func(m *_Encoding, v []interface{}) {
 20682              m.rexo(hcode(v[1]), addr(v[0]), false)
 20683              m.emit(0x0f)
 20684              m.emit(0x0f)
 20685              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20686              m.emit(0x94)
 20687          })
 20688      }
 20689      if p.len == 0 {
 20690          panic("invalid operands for PFMIN")
 20691      }
 20692      return p
 20693  }
 20694  
 20695  // PFMUL performs "Packed Floating-Point Multiply".
 20696  //
 20697  // Mnemonic        : PFMUL
 20698  // Supported forms : (2 forms)
 20699  //
 20700  //    * PFMUL mm, mm     [3dnow!]
 20701  //    * PFMUL m64, mm    [3dnow!]
 20702  //
 20703  func (self *Program) PFMUL(v0 interface{}, v1 interface{}) *Instruction {
 20704      p := self.alloc("PFMUL", 2, Operands { v0, v1 })
 20705      // PFMUL mm, mm
 20706      if isMM(v0) && isMM(v1) {
 20707          self.require(ISA_3DNOW)
 20708          p.domain = DomainAMDSpecific
 20709          p.add(0, func(m *_Encoding, v []interface{}) {
 20710              m.rexo(hcode(v[1]), v[0], false)
 20711              m.emit(0x0f)
 20712              m.emit(0x0f)
 20713              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20714              m.emit(0xb4)
 20715          })
 20716      }
 20717      // PFMUL m64, mm
 20718      if isM64(v0) && isMM(v1) {
 20719          self.require(ISA_3DNOW)
 20720          p.domain = DomainAMDSpecific
 20721          p.add(0, func(m *_Encoding, v []interface{}) {
 20722              m.rexo(hcode(v[1]), addr(v[0]), false)
 20723              m.emit(0x0f)
 20724              m.emit(0x0f)
 20725              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20726              m.emit(0xb4)
 20727          })
 20728      }
 20729      if p.len == 0 {
 20730          panic("invalid operands for PFMUL")
 20731      }
 20732      return p
 20733  }
 20734  
 20735  // PFNACC performs "Packed Floating-Point Negative Accumulate".
 20736  //
 20737  // Mnemonic        : PFNACC
 20738  // Supported forms : (2 forms)
 20739  //
 20740  //    * PFNACC mm, mm     [3dnow!+]
 20741  //    * PFNACC m64, mm    [3dnow!+]
 20742  //
 20743  func (self *Program) PFNACC(v0 interface{}, v1 interface{}) *Instruction {
 20744      p := self.alloc("PFNACC", 2, Operands { v0, v1 })
 20745      // PFNACC mm, mm
 20746      if isMM(v0) && isMM(v1) {
 20747          self.require(ISA_3DNOW_PLUS)
 20748          p.domain = DomainAMDSpecific
 20749          p.add(0, func(m *_Encoding, v []interface{}) {
 20750              m.rexo(hcode(v[1]), v[0], false)
 20751              m.emit(0x0f)
 20752              m.emit(0x0f)
 20753              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20754              m.emit(0x8a)
 20755          })
 20756      }
 20757      // PFNACC m64, mm
 20758      if isM64(v0) && isMM(v1) {
 20759          self.require(ISA_3DNOW_PLUS)
 20760          p.domain = DomainAMDSpecific
 20761          p.add(0, func(m *_Encoding, v []interface{}) {
 20762              m.rexo(hcode(v[1]), addr(v[0]), false)
 20763              m.emit(0x0f)
 20764              m.emit(0x0f)
 20765              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20766              m.emit(0x8a)
 20767          })
 20768      }
 20769      if p.len == 0 {
 20770          panic("invalid operands for PFNACC")
 20771      }
 20772      return p
 20773  }
 20774  
 20775  // PFPNACC performs "Packed Floating-Point Positive-Negative Accumulate".
 20776  //
 20777  // Mnemonic        : PFPNACC
 20778  // Supported forms : (2 forms)
 20779  //
 20780  //    * PFPNACC mm, mm     [3dnow!+]
 20781  //    * PFPNACC m64, mm    [3dnow!+]
 20782  //
 20783  func (self *Program) PFPNACC(v0 interface{}, v1 interface{}) *Instruction {
 20784      p := self.alloc("PFPNACC", 2, Operands { v0, v1 })
 20785      // PFPNACC mm, mm
 20786      if isMM(v0) && isMM(v1) {
 20787          self.require(ISA_3DNOW_PLUS)
 20788          p.domain = DomainAMDSpecific
 20789          p.add(0, func(m *_Encoding, v []interface{}) {
 20790              m.rexo(hcode(v[1]), v[0], false)
 20791              m.emit(0x0f)
 20792              m.emit(0x0f)
 20793              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20794              m.emit(0x8e)
 20795          })
 20796      }
 20797      // PFPNACC m64, mm
 20798      if isM64(v0) && isMM(v1) {
 20799          self.require(ISA_3DNOW_PLUS)
 20800          p.domain = DomainAMDSpecific
 20801          p.add(0, func(m *_Encoding, v []interface{}) {
 20802              m.rexo(hcode(v[1]), addr(v[0]), false)
 20803              m.emit(0x0f)
 20804              m.emit(0x0f)
 20805              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20806              m.emit(0x8e)
 20807          })
 20808      }
 20809      if p.len == 0 {
 20810          panic("invalid operands for PFPNACC")
 20811      }
 20812      return p
 20813  }
 20814  
 20815  // PFRCP performs "Packed Floating-Point Reciprocal Approximation".
 20816  //
 20817  // Mnemonic        : PFRCP
 20818  // Supported forms : (2 forms)
 20819  //
 20820  //    * PFRCP mm, mm     [3dnow!]
 20821  //    * PFRCP m64, mm    [3dnow!]
 20822  //
 20823  func (self *Program) PFRCP(v0 interface{}, v1 interface{}) *Instruction {
 20824      p := self.alloc("PFRCP", 2, Operands { v0, v1 })
 20825      // PFRCP mm, mm
 20826      if isMM(v0) && isMM(v1) {
 20827          self.require(ISA_3DNOW)
 20828          p.domain = DomainAMDSpecific
 20829          p.add(0, func(m *_Encoding, v []interface{}) {
 20830              m.rexo(hcode(v[1]), v[0], false)
 20831              m.emit(0x0f)
 20832              m.emit(0x0f)
 20833              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20834              m.emit(0x96)
 20835          })
 20836      }
 20837      // PFRCP m64, mm
 20838      if isM64(v0) && isMM(v1) {
 20839          self.require(ISA_3DNOW)
 20840          p.domain = DomainAMDSpecific
 20841          p.add(0, func(m *_Encoding, v []interface{}) {
 20842              m.rexo(hcode(v[1]), addr(v[0]), false)
 20843              m.emit(0x0f)
 20844              m.emit(0x0f)
 20845              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20846              m.emit(0x96)
 20847          })
 20848      }
 20849      if p.len == 0 {
 20850          panic("invalid operands for PFRCP")
 20851      }
 20852      return p
 20853  }
 20854  
 20855  // PFRCPIT1 performs "Packed Floating-Point Reciprocal Iteration 1".
 20856  //
 20857  // Mnemonic        : PFRCPIT1
 20858  // Supported forms : (2 forms)
 20859  //
 20860  //    * PFRCPIT1 mm, mm     [3dnow!]
 20861  //    * PFRCPIT1 m64, mm    [3dnow!]
 20862  //
 20863  func (self *Program) PFRCPIT1(v0 interface{}, v1 interface{}) *Instruction {
 20864      p := self.alloc("PFRCPIT1", 2, Operands { v0, v1 })
 20865      // PFRCPIT1 mm, mm
 20866      if isMM(v0) && isMM(v1) {
 20867          self.require(ISA_3DNOW)
 20868          p.domain = DomainAMDSpecific
 20869          p.add(0, func(m *_Encoding, v []interface{}) {
 20870              m.rexo(hcode(v[1]), v[0], false)
 20871              m.emit(0x0f)
 20872              m.emit(0x0f)
 20873              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20874              m.emit(0xa6)
 20875          })
 20876      }
 20877      // PFRCPIT1 m64, mm
 20878      if isM64(v0) && isMM(v1) {
 20879          self.require(ISA_3DNOW)
 20880          p.domain = DomainAMDSpecific
 20881          p.add(0, func(m *_Encoding, v []interface{}) {
 20882              m.rexo(hcode(v[1]), addr(v[0]), false)
 20883              m.emit(0x0f)
 20884              m.emit(0x0f)
 20885              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20886              m.emit(0xa6)
 20887          })
 20888      }
 20889      if p.len == 0 {
 20890          panic("invalid operands for PFRCPIT1")
 20891      }
 20892      return p
 20893  }
 20894  
 20895  // PFRCPIT2 performs "Packed Floating-Point Reciprocal Iteration 2".
 20896  //
 20897  // Mnemonic        : PFRCPIT2
 20898  // Supported forms : (2 forms)
 20899  //
 20900  //    * PFRCPIT2 mm, mm     [3dnow!]
 20901  //    * PFRCPIT2 m64, mm    [3dnow!]
 20902  //
 20903  func (self *Program) PFRCPIT2(v0 interface{}, v1 interface{}) *Instruction {
 20904      p := self.alloc("PFRCPIT2", 2, Operands { v0, v1 })
 20905      // PFRCPIT2 mm, mm
 20906      if isMM(v0) && isMM(v1) {
 20907          self.require(ISA_3DNOW)
 20908          p.domain = DomainAMDSpecific
 20909          p.add(0, func(m *_Encoding, v []interface{}) {
 20910              m.rexo(hcode(v[1]), v[0], false)
 20911              m.emit(0x0f)
 20912              m.emit(0x0f)
 20913              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20914              m.emit(0xb6)
 20915          })
 20916      }
 20917      // PFRCPIT2 m64, mm
 20918      if isM64(v0) && isMM(v1) {
 20919          self.require(ISA_3DNOW)
 20920          p.domain = DomainAMDSpecific
 20921          p.add(0, func(m *_Encoding, v []interface{}) {
 20922              m.rexo(hcode(v[1]), addr(v[0]), false)
 20923              m.emit(0x0f)
 20924              m.emit(0x0f)
 20925              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20926              m.emit(0xb6)
 20927          })
 20928      }
 20929      if p.len == 0 {
 20930          panic("invalid operands for PFRCPIT2")
 20931      }
 20932      return p
 20933  }
 20934  
 20935  // PFRSQIT1 performs "Packed Floating-Point Reciprocal Square Root Iteration 1".
 20936  //
 20937  // Mnemonic        : PFRSQIT1
 20938  // Supported forms : (2 forms)
 20939  //
 20940  //    * PFRSQIT1 mm, mm     [3dnow!]
 20941  //    * PFRSQIT1 m64, mm    [3dnow!]
 20942  //
 20943  func (self *Program) PFRSQIT1(v0 interface{}, v1 interface{}) *Instruction {
 20944      p := self.alloc("PFRSQIT1", 2, Operands { v0, v1 })
 20945      // PFRSQIT1 mm, mm
 20946      if isMM(v0) && isMM(v1) {
 20947          self.require(ISA_3DNOW)
 20948          p.domain = DomainAMDSpecific
 20949          p.add(0, func(m *_Encoding, v []interface{}) {
 20950              m.rexo(hcode(v[1]), v[0], false)
 20951              m.emit(0x0f)
 20952              m.emit(0x0f)
 20953              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20954              m.emit(0xa7)
 20955          })
 20956      }
 20957      // PFRSQIT1 m64, mm
 20958      if isM64(v0) && isMM(v1) {
 20959          self.require(ISA_3DNOW)
 20960          p.domain = DomainAMDSpecific
 20961          p.add(0, func(m *_Encoding, v []interface{}) {
 20962              m.rexo(hcode(v[1]), addr(v[0]), false)
 20963              m.emit(0x0f)
 20964              m.emit(0x0f)
 20965              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 20966              m.emit(0xa7)
 20967          })
 20968      }
 20969      if p.len == 0 {
 20970          panic("invalid operands for PFRSQIT1")
 20971      }
 20972      return p
 20973  }
 20974  
 20975  // PFRSQRT performs "Packed Floating-Point Reciprocal Square Root Approximation".
 20976  //
 20977  // Mnemonic        : PFRSQRT
 20978  // Supported forms : (2 forms)
 20979  //
 20980  //    * PFRSQRT mm, mm     [3dnow!]
 20981  //    * PFRSQRT m64, mm    [3dnow!]
 20982  //
 20983  func (self *Program) PFRSQRT(v0 interface{}, v1 interface{}) *Instruction {
 20984      p := self.alloc("PFRSQRT", 2, Operands { v0, v1 })
 20985      // PFRSQRT mm, mm
 20986      if isMM(v0) && isMM(v1) {
 20987          self.require(ISA_3DNOW)
 20988          p.domain = DomainAMDSpecific
 20989          p.add(0, func(m *_Encoding, v []interface{}) {
 20990              m.rexo(hcode(v[1]), v[0], false)
 20991              m.emit(0x0f)
 20992              m.emit(0x0f)
 20993              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 20994              m.emit(0x97)
 20995          })
 20996      }
 20997      // PFRSQRT m64, mm
 20998      if isM64(v0) && isMM(v1) {
 20999          self.require(ISA_3DNOW)
 21000          p.domain = DomainAMDSpecific
 21001          p.add(0, func(m *_Encoding, v []interface{}) {
 21002              m.rexo(hcode(v[1]), addr(v[0]), false)
 21003              m.emit(0x0f)
 21004              m.emit(0x0f)
 21005              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21006              m.emit(0x97)
 21007          })
 21008      }
 21009      if p.len == 0 {
 21010          panic("invalid operands for PFRSQRT")
 21011      }
 21012      return p
 21013  }
 21014  
 21015  // PFSUB performs "Packed Floating-Point Subtract".
 21016  //
 21017  // Mnemonic        : PFSUB
 21018  // Supported forms : (2 forms)
 21019  //
 21020  //    * PFSUB mm, mm     [3dnow!]
 21021  //    * PFSUB m64, mm    [3dnow!]
 21022  //
 21023  func (self *Program) PFSUB(v0 interface{}, v1 interface{}) *Instruction {
 21024      p := self.alloc("PFSUB", 2, Operands { v0, v1 })
 21025      // PFSUB mm, mm
 21026      if isMM(v0) && isMM(v1) {
 21027          self.require(ISA_3DNOW)
 21028          p.domain = DomainAMDSpecific
 21029          p.add(0, func(m *_Encoding, v []interface{}) {
 21030              m.rexo(hcode(v[1]), v[0], false)
 21031              m.emit(0x0f)
 21032              m.emit(0x0f)
 21033              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21034              m.emit(0x9a)
 21035          })
 21036      }
 21037      // PFSUB m64, mm
 21038      if isM64(v0) && isMM(v1) {
 21039          self.require(ISA_3DNOW)
 21040          p.domain = DomainAMDSpecific
 21041          p.add(0, func(m *_Encoding, v []interface{}) {
 21042              m.rexo(hcode(v[1]), addr(v[0]), false)
 21043              m.emit(0x0f)
 21044              m.emit(0x0f)
 21045              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21046              m.emit(0x9a)
 21047          })
 21048      }
 21049      if p.len == 0 {
 21050          panic("invalid operands for PFSUB")
 21051      }
 21052      return p
 21053  }
 21054  
 21055  // PFSUBR performs "Packed Floating-Point Subtract Reverse".
 21056  //
 21057  // Mnemonic        : PFSUBR
 21058  // Supported forms : (2 forms)
 21059  //
 21060  //    * PFSUBR mm, mm     [3dnow!]
 21061  //    * PFSUBR m64, mm    [3dnow!]
 21062  //
 21063  func (self *Program) PFSUBR(v0 interface{}, v1 interface{}) *Instruction {
 21064      p := self.alloc("PFSUBR", 2, Operands { v0, v1 })
 21065      // PFSUBR mm, mm
 21066      if isMM(v0) && isMM(v1) {
 21067          self.require(ISA_3DNOW)
 21068          p.domain = DomainAMDSpecific
 21069          p.add(0, func(m *_Encoding, v []interface{}) {
 21070              m.rexo(hcode(v[1]), v[0], false)
 21071              m.emit(0x0f)
 21072              m.emit(0x0f)
 21073              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21074              m.emit(0xaa)
 21075          })
 21076      }
 21077      // PFSUBR m64, mm
 21078      if isM64(v0) && isMM(v1) {
 21079          self.require(ISA_3DNOW)
 21080          p.domain = DomainAMDSpecific
 21081          p.add(0, func(m *_Encoding, v []interface{}) {
 21082              m.rexo(hcode(v[1]), addr(v[0]), false)
 21083              m.emit(0x0f)
 21084              m.emit(0x0f)
 21085              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21086              m.emit(0xaa)
 21087          })
 21088      }
 21089      if p.len == 0 {
 21090          panic("invalid operands for PFSUBR")
 21091      }
 21092      return p
 21093  }
 21094  
 21095  // PHADDD performs "Packed Horizontal Add Doubleword Integer".
 21096  //
 21097  // Mnemonic        : PHADDD
 21098  // Supported forms : (4 forms)
 21099  //
 21100  //    * PHADDD mm, mm       [SSSE3]
 21101  //    * PHADDD m64, mm      [SSSE3]
 21102  //    * PHADDD xmm, xmm     [SSSE3]
 21103  //    * PHADDD m128, xmm    [SSSE3]
 21104  //
 21105  func (self *Program) PHADDD(v0 interface{}, v1 interface{}) *Instruction {
 21106      p := self.alloc("PHADDD", 2, Operands { v0, v1 })
 21107      // PHADDD mm, mm
 21108      if isMM(v0) && isMM(v1) {
 21109          self.require(ISA_SSSE3)
 21110          p.domain = DomainMMXSSE
 21111          p.add(0, func(m *_Encoding, v []interface{}) {
 21112              m.rexo(hcode(v[1]), v[0], false)
 21113              m.emit(0x0f)
 21114              m.emit(0x38)
 21115              m.emit(0x02)
 21116              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21117          })
 21118      }
 21119      // PHADDD m64, mm
 21120      if isM64(v0) && isMM(v1) {
 21121          self.require(ISA_SSSE3)
 21122          p.domain = DomainMMXSSE
 21123          p.add(0, func(m *_Encoding, v []interface{}) {
 21124              m.rexo(hcode(v[1]), addr(v[0]), false)
 21125              m.emit(0x0f)
 21126              m.emit(0x38)
 21127              m.emit(0x02)
 21128              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21129          })
 21130      }
 21131      // PHADDD xmm, xmm
 21132      if isXMM(v0) && isXMM(v1) {
 21133          self.require(ISA_SSSE3)
 21134          p.domain = DomainMMXSSE
 21135          p.add(0, func(m *_Encoding, v []interface{}) {
 21136              m.emit(0x66)
 21137              m.rexo(hcode(v[1]), v[0], false)
 21138              m.emit(0x0f)
 21139              m.emit(0x38)
 21140              m.emit(0x02)
 21141              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21142          })
 21143      }
 21144      // PHADDD m128, xmm
 21145      if isM128(v0) && isXMM(v1) {
 21146          self.require(ISA_SSSE3)
 21147          p.domain = DomainMMXSSE
 21148          p.add(0, func(m *_Encoding, v []interface{}) {
 21149              m.emit(0x66)
 21150              m.rexo(hcode(v[1]), addr(v[0]), false)
 21151              m.emit(0x0f)
 21152              m.emit(0x38)
 21153              m.emit(0x02)
 21154              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21155          })
 21156      }
 21157      if p.len == 0 {
 21158          panic("invalid operands for PHADDD")
 21159      }
 21160      return p
 21161  }
 21162  
 21163  // PHADDSW performs "Packed Horizontal Add Signed Word Integers with Signed Saturation".
 21164  //
 21165  // Mnemonic        : PHADDSW
 21166  // Supported forms : (4 forms)
 21167  //
 21168  //    * PHADDSW mm, mm       [SSSE3]
 21169  //    * PHADDSW m64, mm      [SSSE3]
 21170  //    * PHADDSW xmm, xmm     [SSSE3]
 21171  //    * PHADDSW m128, xmm    [SSSE3]
 21172  //
 21173  func (self *Program) PHADDSW(v0 interface{}, v1 interface{}) *Instruction {
 21174      p := self.alloc("PHADDSW", 2, Operands { v0, v1 })
 21175      // PHADDSW mm, mm
 21176      if isMM(v0) && isMM(v1) {
 21177          self.require(ISA_SSSE3)
 21178          p.domain = DomainMMXSSE
 21179          p.add(0, func(m *_Encoding, v []interface{}) {
 21180              m.rexo(hcode(v[1]), v[0], false)
 21181              m.emit(0x0f)
 21182              m.emit(0x38)
 21183              m.emit(0x03)
 21184              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21185          })
 21186      }
 21187      // PHADDSW m64, mm
 21188      if isM64(v0) && isMM(v1) {
 21189          self.require(ISA_SSSE3)
 21190          p.domain = DomainMMXSSE
 21191          p.add(0, func(m *_Encoding, v []interface{}) {
 21192              m.rexo(hcode(v[1]), addr(v[0]), false)
 21193              m.emit(0x0f)
 21194              m.emit(0x38)
 21195              m.emit(0x03)
 21196              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21197          })
 21198      }
 21199      // PHADDSW xmm, xmm
 21200      if isXMM(v0) && isXMM(v1) {
 21201          self.require(ISA_SSSE3)
 21202          p.domain = DomainMMXSSE
 21203          p.add(0, func(m *_Encoding, v []interface{}) {
 21204              m.emit(0x66)
 21205              m.rexo(hcode(v[1]), v[0], false)
 21206              m.emit(0x0f)
 21207              m.emit(0x38)
 21208              m.emit(0x03)
 21209              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21210          })
 21211      }
 21212      // PHADDSW m128, xmm
 21213      if isM128(v0) && isXMM(v1) {
 21214          self.require(ISA_SSSE3)
 21215          p.domain = DomainMMXSSE
 21216          p.add(0, func(m *_Encoding, v []interface{}) {
 21217              m.emit(0x66)
 21218              m.rexo(hcode(v[1]), addr(v[0]), false)
 21219              m.emit(0x0f)
 21220              m.emit(0x38)
 21221              m.emit(0x03)
 21222              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21223          })
 21224      }
 21225      if p.len == 0 {
 21226          panic("invalid operands for PHADDSW")
 21227      }
 21228      return p
 21229  }
 21230  
 21231  // PHADDW performs "Packed Horizontal Add Word Integers".
 21232  //
 21233  // Mnemonic        : PHADDW
 21234  // Supported forms : (4 forms)
 21235  //
 21236  //    * PHADDW mm, mm       [SSSE3]
 21237  //    * PHADDW m64, mm      [SSSE3]
 21238  //    * PHADDW xmm, xmm     [SSSE3]
 21239  //    * PHADDW m128, xmm    [SSSE3]
 21240  //
 21241  func (self *Program) PHADDW(v0 interface{}, v1 interface{}) *Instruction {
 21242      p := self.alloc("PHADDW", 2, Operands { v0, v1 })
 21243      // PHADDW mm, mm
 21244      if isMM(v0) && isMM(v1) {
 21245          self.require(ISA_SSSE3)
 21246          p.domain = DomainMMXSSE
 21247          p.add(0, func(m *_Encoding, v []interface{}) {
 21248              m.rexo(hcode(v[1]), v[0], false)
 21249              m.emit(0x0f)
 21250              m.emit(0x38)
 21251              m.emit(0x01)
 21252              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21253          })
 21254      }
 21255      // PHADDW m64, mm
 21256      if isM64(v0) && isMM(v1) {
 21257          self.require(ISA_SSSE3)
 21258          p.domain = DomainMMXSSE
 21259          p.add(0, func(m *_Encoding, v []interface{}) {
 21260              m.rexo(hcode(v[1]), addr(v[0]), false)
 21261              m.emit(0x0f)
 21262              m.emit(0x38)
 21263              m.emit(0x01)
 21264              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21265          })
 21266      }
 21267      // PHADDW xmm, xmm
 21268      if isXMM(v0) && isXMM(v1) {
 21269          self.require(ISA_SSSE3)
 21270          p.domain = DomainMMXSSE
 21271          p.add(0, func(m *_Encoding, v []interface{}) {
 21272              m.emit(0x66)
 21273              m.rexo(hcode(v[1]), v[0], false)
 21274              m.emit(0x0f)
 21275              m.emit(0x38)
 21276              m.emit(0x01)
 21277              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21278          })
 21279      }
 21280      // PHADDW m128, xmm
 21281      if isM128(v0) && isXMM(v1) {
 21282          self.require(ISA_SSSE3)
 21283          p.domain = DomainMMXSSE
 21284          p.add(0, func(m *_Encoding, v []interface{}) {
 21285              m.emit(0x66)
 21286              m.rexo(hcode(v[1]), addr(v[0]), false)
 21287              m.emit(0x0f)
 21288              m.emit(0x38)
 21289              m.emit(0x01)
 21290              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21291          })
 21292      }
 21293      if p.len == 0 {
 21294          panic("invalid operands for PHADDW")
 21295      }
 21296      return p
 21297  }
 21298  
 21299  // PHMINPOSUW performs "Packed Horizontal Minimum of Unsigned Word Integers".
 21300  //
 21301  // Mnemonic        : PHMINPOSUW
 21302  // Supported forms : (2 forms)
 21303  //
 21304  //    * PHMINPOSUW xmm, xmm     [SSE4.1]
 21305  //    * PHMINPOSUW m128, xmm    [SSE4.1]
 21306  //
 21307  func (self *Program) PHMINPOSUW(v0 interface{}, v1 interface{}) *Instruction {
 21308      p := self.alloc("PHMINPOSUW", 2, Operands { v0, v1 })
 21309      // PHMINPOSUW xmm, xmm
 21310      if isXMM(v0) && isXMM(v1) {
 21311          self.require(ISA_SSE4_1)
 21312          p.domain = DomainMMXSSE
 21313          p.add(0, func(m *_Encoding, v []interface{}) {
 21314              m.emit(0x66)
 21315              m.rexo(hcode(v[1]), v[0], false)
 21316              m.emit(0x0f)
 21317              m.emit(0x38)
 21318              m.emit(0x41)
 21319              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21320          })
 21321      }
 21322      // PHMINPOSUW m128, xmm
 21323      if isM128(v0) && isXMM(v1) {
 21324          self.require(ISA_SSE4_1)
 21325          p.domain = DomainMMXSSE
 21326          p.add(0, func(m *_Encoding, v []interface{}) {
 21327              m.emit(0x66)
 21328              m.rexo(hcode(v[1]), addr(v[0]), false)
 21329              m.emit(0x0f)
 21330              m.emit(0x38)
 21331              m.emit(0x41)
 21332              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21333          })
 21334      }
 21335      if p.len == 0 {
 21336          panic("invalid operands for PHMINPOSUW")
 21337      }
 21338      return p
 21339  }
 21340  
 21341  // PHSUBD performs "Packed Horizontal Subtract Doubleword Integers".
 21342  //
 21343  // Mnemonic        : PHSUBD
 21344  // Supported forms : (4 forms)
 21345  //
 21346  //    * PHSUBD mm, mm       [SSSE3]
 21347  //    * PHSUBD m64, mm      [SSSE3]
 21348  //    * PHSUBD xmm, xmm     [SSSE3]
 21349  //    * PHSUBD m128, xmm    [SSSE3]
 21350  //
 21351  func (self *Program) PHSUBD(v0 interface{}, v1 interface{}) *Instruction {
 21352      p := self.alloc("PHSUBD", 2, Operands { v0, v1 })
 21353      // PHSUBD mm, mm
 21354      if isMM(v0) && isMM(v1) {
 21355          self.require(ISA_SSSE3)
 21356          p.domain = DomainMMXSSE
 21357          p.add(0, func(m *_Encoding, v []interface{}) {
 21358              m.rexo(hcode(v[1]), v[0], false)
 21359              m.emit(0x0f)
 21360              m.emit(0x38)
 21361              m.emit(0x06)
 21362              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21363          })
 21364      }
 21365      // PHSUBD m64, mm
 21366      if isM64(v0) && isMM(v1) {
 21367          self.require(ISA_SSSE3)
 21368          p.domain = DomainMMXSSE
 21369          p.add(0, func(m *_Encoding, v []interface{}) {
 21370              m.rexo(hcode(v[1]), addr(v[0]), false)
 21371              m.emit(0x0f)
 21372              m.emit(0x38)
 21373              m.emit(0x06)
 21374              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21375          })
 21376      }
 21377      // PHSUBD xmm, xmm
 21378      if isXMM(v0) && isXMM(v1) {
 21379          self.require(ISA_SSSE3)
 21380          p.domain = DomainMMXSSE
 21381          p.add(0, func(m *_Encoding, v []interface{}) {
 21382              m.emit(0x66)
 21383              m.rexo(hcode(v[1]), v[0], false)
 21384              m.emit(0x0f)
 21385              m.emit(0x38)
 21386              m.emit(0x06)
 21387              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21388          })
 21389      }
 21390      // PHSUBD m128, xmm
 21391      if isM128(v0) && isXMM(v1) {
 21392          self.require(ISA_SSSE3)
 21393          p.domain = DomainMMXSSE
 21394          p.add(0, func(m *_Encoding, v []interface{}) {
 21395              m.emit(0x66)
 21396              m.rexo(hcode(v[1]), addr(v[0]), false)
 21397              m.emit(0x0f)
 21398              m.emit(0x38)
 21399              m.emit(0x06)
 21400              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21401          })
 21402      }
 21403      if p.len == 0 {
 21404          panic("invalid operands for PHSUBD")
 21405      }
 21406      return p
 21407  }
 21408  
 21409  // PHSUBSW performs "Packed Horizontal Subtract Signed Word Integers with Signed Saturation".
 21410  //
 21411  // Mnemonic        : PHSUBSW
 21412  // Supported forms : (4 forms)
 21413  //
 21414  //    * PHSUBSW mm, mm       [SSSE3]
 21415  //    * PHSUBSW m64, mm      [SSSE3]
 21416  //    * PHSUBSW xmm, xmm     [SSSE3]
 21417  //    * PHSUBSW m128, xmm    [SSSE3]
 21418  //
 21419  func (self *Program) PHSUBSW(v0 interface{}, v1 interface{}) *Instruction {
 21420      p := self.alloc("PHSUBSW", 2, Operands { v0, v1 })
 21421      // PHSUBSW mm, mm
 21422      if isMM(v0) && isMM(v1) {
 21423          self.require(ISA_SSSE3)
 21424          p.domain = DomainMMXSSE
 21425          p.add(0, func(m *_Encoding, v []interface{}) {
 21426              m.rexo(hcode(v[1]), v[0], false)
 21427              m.emit(0x0f)
 21428              m.emit(0x38)
 21429              m.emit(0x07)
 21430              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21431          })
 21432      }
 21433      // PHSUBSW m64, mm
 21434      if isM64(v0) && isMM(v1) {
 21435          self.require(ISA_SSSE3)
 21436          p.domain = DomainMMXSSE
 21437          p.add(0, func(m *_Encoding, v []interface{}) {
 21438              m.rexo(hcode(v[1]), addr(v[0]), false)
 21439              m.emit(0x0f)
 21440              m.emit(0x38)
 21441              m.emit(0x07)
 21442              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21443          })
 21444      }
 21445      // PHSUBSW xmm, xmm
 21446      if isXMM(v0) && isXMM(v1) {
 21447          self.require(ISA_SSSE3)
 21448          p.domain = DomainMMXSSE
 21449          p.add(0, func(m *_Encoding, v []interface{}) {
 21450              m.emit(0x66)
 21451              m.rexo(hcode(v[1]), v[0], false)
 21452              m.emit(0x0f)
 21453              m.emit(0x38)
 21454              m.emit(0x07)
 21455              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21456          })
 21457      }
 21458      // PHSUBSW m128, xmm
 21459      if isM128(v0) && isXMM(v1) {
 21460          self.require(ISA_SSSE3)
 21461          p.domain = DomainMMXSSE
 21462          p.add(0, func(m *_Encoding, v []interface{}) {
 21463              m.emit(0x66)
 21464              m.rexo(hcode(v[1]), addr(v[0]), false)
 21465              m.emit(0x0f)
 21466              m.emit(0x38)
 21467              m.emit(0x07)
 21468              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21469          })
 21470      }
 21471      if p.len == 0 {
 21472          panic("invalid operands for PHSUBSW")
 21473      }
 21474      return p
 21475  }
 21476  
 21477  // PHSUBW performs "Packed Horizontal Subtract Word Integers".
 21478  //
 21479  // Mnemonic        : PHSUBW
 21480  // Supported forms : (4 forms)
 21481  //
 21482  //    * PHSUBW mm, mm       [SSSE3]
 21483  //    * PHSUBW m64, mm      [SSSE3]
 21484  //    * PHSUBW xmm, xmm     [SSSE3]
 21485  //    * PHSUBW m128, xmm    [SSSE3]
 21486  //
 21487  func (self *Program) PHSUBW(v0 interface{}, v1 interface{}) *Instruction {
 21488      p := self.alloc("PHSUBW", 2, Operands { v0, v1 })
 21489      // PHSUBW mm, mm
 21490      if isMM(v0) && isMM(v1) {
 21491          self.require(ISA_SSSE3)
 21492          p.domain = DomainMMXSSE
 21493          p.add(0, func(m *_Encoding, v []interface{}) {
 21494              m.rexo(hcode(v[1]), v[0], false)
 21495              m.emit(0x0f)
 21496              m.emit(0x38)
 21497              m.emit(0x05)
 21498              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21499          })
 21500      }
 21501      // PHSUBW m64, mm
 21502      if isM64(v0) && isMM(v1) {
 21503          self.require(ISA_SSSE3)
 21504          p.domain = DomainMMXSSE
 21505          p.add(0, func(m *_Encoding, v []interface{}) {
 21506              m.rexo(hcode(v[1]), addr(v[0]), false)
 21507              m.emit(0x0f)
 21508              m.emit(0x38)
 21509              m.emit(0x05)
 21510              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21511          })
 21512      }
 21513      // PHSUBW xmm, xmm
 21514      if isXMM(v0) && isXMM(v1) {
 21515          self.require(ISA_SSSE3)
 21516          p.domain = DomainMMXSSE
 21517          p.add(0, func(m *_Encoding, v []interface{}) {
 21518              m.emit(0x66)
 21519              m.rexo(hcode(v[1]), v[0], false)
 21520              m.emit(0x0f)
 21521              m.emit(0x38)
 21522              m.emit(0x05)
 21523              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21524          })
 21525      }
 21526      // PHSUBW m128, xmm
 21527      if isM128(v0) && isXMM(v1) {
 21528          self.require(ISA_SSSE3)
 21529          p.domain = DomainMMXSSE
 21530          p.add(0, func(m *_Encoding, v []interface{}) {
 21531              m.emit(0x66)
 21532              m.rexo(hcode(v[1]), addr(v[0]), false)
 21533              m.emit(0x0f)
 21534              m.emit(0x38)
 21535              m.emit(0x05)
 21536              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21537          })
 21538      }
 21539      if p.len == 0 {
 21540          panic("invalid operands for PHSUBW")
 21541      }
 21542      return p
 21543  }
 21544  
 21545  // PI2FD performs "Packed Integer to Floating-Point Doubleword Conversion".
 21546  //
 21547  // Mnemonic        : PI2FD
 21548  // Supported forms : (2 forms)
 21549  //
 21550  //    * PI2FD mm, mm     [3dnow!]
 21551  //    * PI2FD m64, mm    [3dnow!]
 21552  //
 21553  func (self *Program) PI2FD(v0 interface{}, v1 interface{}) *Instruction {
 21554      p := self.alloc("PI2FD", 2, Operands { v0, v1 })
 21555      // PI2FD mm, mm
 21556      if isMM(v0) && isMM(v1) {
 21557          self.require(ISA_3DNOW)
 21558          p.domain = DomainAMDSpecific
 21559          p.add(0, func(m *_Encoding, v []interface{}) {
 21560              m.rexo(hcode(v[1]), v[0], false)
 21561              m.emit(0x0f)
 21562              m.emit(0x0f)
 21563              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21564              m.emit(0x0d)
 21565          })
 21566      }
 21567      // PI2FD m64, mm
 21568      if isM64(v0) && isMM(v1) {
 21569          self.require(ISA_3DNOW)
 21570          p.domain = DomainAMDSpecific
 21571          p.add(0, func(m *_Encoding, v []interface{}) {
 21572              m.rexo(hcode(v[1]), addr(v[0]), false)
 21573              m.emit(0x0f)
 21574              m.emit(0x0f)
 21575              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21576              m.emit(0x0d)
 21577          })
 21578      }
 21579      if p.len == 0 {
 21580          panic("invalid operands for PI2FD")
 21581      }
 21582      return p
 21583  }
 21584  
 21585  // PI2FW performs "Packed Integer to Floating-Point Word Conversion".
 21586  //
 21587  // Mnemonic        : PI2FW
 21588  // Supported forms : (2 forms)
 21589  //
 21590  //    * PI2FW mm, mm     [3dnow!+]
 21591  //    * PI2FW m64, mm    [3dnow!+]
 21592  //
 21593  func (self *Program) PI2FW(v0 interface{}, v1 interface{}) *Instruction {
 21594      p := self.alloc("PI2FW", 2, Operands { v0, v1 })
 21595      // PI2FW mm, mm
 21596      if isMM(v0) && isMM(v1) {
 21597          self.require(ISA_3DNOW_PLUS)
 21598          p.domain = DomainAMDSpecific
 21599          p.add(0, func(m *_Encoding, v []interface{}) {
 21600              m.rexo(hcode(v[1]), v[0], false)
 21601              m.emit(0x0f)
 21602              m.emit(0x0f)
 21603              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21604              m.emit(0x0c)
 21605          })
 21606      }
 21607      // PI2FW m64, mm
 21608      if isM64(v0) && isMM(v1) {
 21609          self.require(ISA_3DNOW_PLUS)
 21610          p.domain = DomainAMDSpecific
 21611          p.add(0, func(m *_Encoding, v []interface{}) {
 21612              m.rexo(hcode(v[1]), addr(v[0]), false)
 21613              m.emit(0x0f)
 21614              m.emit(0x0f)
 21615              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21616              m.emit(0x0c)
 21617          })
 21618      }
 21619      if p.len == 0 {
 21620          panic("invalid operands for PI2FW")
 21621      }
 21622      return p
 21623  }
 21624  
 21625  // PINSRB performs "Insert Byte".
 21626  //
 21627  // Mnemonic        : PINSRB
 21628  // Supported forms : (2 forms)
 21629  //
 21630  //    * PINSRB imm8, r32, xmm    [SSE4.1]
 21631  //    * PINSRB imm8, m8, xmm     [SSE4.1]
 21632  //
 21633  func (self *Program) PINSRB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 21634      p := self.alloc("PINSRB", 3, Operands { v0, v1, v2 })
 21635      // PINSRB imm8, r32, xmm
 21636      if isImm8(v0) && isReg32(v1) && isXMM(v2) {
 21637          self.require(ISA_SSE4_1)
 21638          p.domain = DomainMMXSSE
 21639          p.add(0, func(m *_Encoding, v []interface{}) {
 21640              m.emit(0x66)
 21641              m.rexo(hcode(v[2]), v[1], false)
 21642              m.emit(0x0f)
 21643              m.emit(0x3a)
 21644              m.emit(0x20)
 21645              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 21646              m.imm1(toImmAny(v[0]))
 21647          })
 21648      }
 21649      // PINSRB imm8, m8, xmm
 21650      if isImm8(v0) && isM8(v1) && isXMM(v2) {
 21651          self.require(ISA_SSE4_1)
 21652          p.domain = DomainMMXSSE
 21653          p.add(0, func(m *_Encoding, v []interface{}) {
 21654              m.emit(0x66)
 21655              m.rexo(hcode(v[2]), addr(v[1]), false)
 21656              m.emit(0x0f)
 21657              m.emit(0x3a)
 21658              m.emit(0x20)
 21659              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 21660              m.imm1(toImmAny(v[0]))
 21661          })
 21662      }
 21663      if p.len == 0 {
 21664          panic("invalid operands for PINSRB")
 21665      }
 21666      return p
 21667  }
 21668  
 21669  // PINSRD performs "Insert Doubleword".
 21670  //
 21671  // Mnemonic        : PINSRD
 21672  // Supported forms : (2 forms)
 21673  //
 21674  //    * PINSRD imm8, r32, xmm    [SSE4.1]
 21675  //    * PINSRD imm8, m32, xmm    [SSE4.1]
 21676  //
 21677  func (self *Program) PINSRD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 21678      p := self.alloc("PINSRD", 3, Operands { v0, v1, v2 })
 21679      // PINSRD imm8, r32, xmm
 21680      if isImm8(v0) && isReg32(v1) && isXMM(v2) {
 21681          self.require(ISA_SSE4_1)
 21682          p.domain = DomainMMXSSE
 21683          p.add(0, func(m *_Encoding, v []interface{}) {
 21684              m.emit(0x66)
 21685              m.rexo(hcode(v[2]), v[1], false)
 21686              m.emit(0x0f)
 21687              m.emit(0x3a)
 21688              m.emit(0x22)
 21689              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 21690              m.imm1(toImmAny(v[0]))
 21691          })
 21692      }
 21693      // PINSRD imm8, m32, xmm
 21694      if isImm8(v0) && isM32(v1) && isXMM(v2) {
 21695          self.require(ISA_SSE4_1)
 21696          p.domain = DomainMMXSSE
 21697          p.add(0, func(m *_Encoding, v []interface{}) {
 21698              m.emit(0x66)
 21699              m.rexo(hcode(v[2]), addr(v[1]), false)
 21700              m.emit(0x0f)
 21701              m.emit(0x3a)
 21702              m.emit(0x22)
 21703              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 21704              m.imm1(toImmAny(v[0]))
 21705          })
 21706      }
 21707      if p.len == 0 {
 21708          panic("invalid operands for PINSRD")
 21709      }
 21710      return p
 21711  }
 21712  
 21713  // PINSRQ performs "Insert Quadword".
 21714  //
 21715  // Mnemonic        : PINSRQ
 21716  // Supported forms : (2 forms)
 21717  //
 21718  //    * PINSRQ imm8, r64, xmm    [SSE4.1]
 21719  //    * PINSRQ imm8, m64, xmm    [SSE4.1]
 21720  //
 21721  func (self *Program) PINSRQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 21722      p := self.alloc("PINSRQ", 3, Operands { v0, v1, v2 })
 21723      // PINSRQ imm8, r64, xmm
 21724      if isImm8(v0) && isReg64(v1) && isXMM(v2) {
 21725          self.require(ISA_SSE4_1)
 21726          p.domain = DomainMMXSSE
 21727          p.add(0, func(m *_Encoding, v []interface{}) {
 21728              m.emit(0x66)
 21729              m.emit(0x48 | hcode(v[2]) << 2 | hcode(v[1]))
 21730              m.emit(0x0f)
 21731              m.emit(0x3a)
 21732              m.emit(0x22)
 21733              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 21734              m.imm1(toImmAny(v[0]))
 21735          })
 21736      }
 21737      // PINSRQ imm8, m64, xmm
 21738      if isImm8(v0) && isM64(v1) && isXMM(v2) {
 21739          self.require(ISA_SSE4_1)
 21740          p.domain = DomainMMXSSE
 21741          p.add(0, func(m *_Encoding, v []interface{}) {
 21742              m.emit(0x66)
 21743              m.rexm(1, hcode(v[2]), addr(v[1]))
 21744              m.emit(0x0f)
 21745              m.emit(0x3a)
 21746              m.emit(0x22)
 21747              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 21748              m.imm1(toImmAny(v[0]))
 21749          })
 21750      }
 21751      if p.len == 0 {
 21752          panic("invalid operands for PINSRQ")
 21753      }
 21754      return p
 21755  }
 21756  
 21757  // PINSRW performs "Insert Word".
 21758  //
 21759  // Mnemonic        : PINSRW
 21760  // Supported forms : (4 forms)
 21761  //
 21762  //    * PINSRW imm8, r32, mm     [MMX+]
 21763  //    * PINSRW imm8, m16, mm     [MMX+]
 21764  //    * PINSRW imm8, r32, xmm    [SSE2]
 21765  //    * PINSRW imm8, m16, xmm    [SSE2]
 21766  //
 21767  func (self *Program) PINSRW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 21768      p := self.alloc("PINSRW", 3, Operands { v0, v1, v2 })
 21769      // PINSRW imm8, r32, mm
 21770      if isImm8(v0) && isReg32(v1) && isMM(v2) {
 21771          self.require(ISA_MMX_PLUS)
 21772          p.domain = DomainMMXSSE
 21773          p.add(0, func(m *_Encoding, v []interface{}) {
 21774              m.rexo(hcode(v[2]), v[1], false)
 21775              m.emit(0x0f)
 21776              m.emit(0xc4)
 21777              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 21778              m.imm1(toImmAny(v[0]))
 21779          })
 21780      }
 21781      // PINSRW imm8, m16, mm
 21782      if isImm8(v0) && isM16(v1) && isMM(v2) {
 21783          self.require(ISA_MMX_PLUS)
 21784          p.domain = DomainMMXSSE
 21785          p.add(0, func(m *_Encoding, v []interface{}) {
 21786              m.rexo(hcode(v[2]), addr(v[1]), false)
 21787              m.emit(0x0f)
 21788              m.emit(0xc4)
 21789              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 21790              m.imm1(toImmAny(v[0]))
 21791          })
 21792      }
 21793      // PINSRW imm8, r32, xmm
 21794      if isImm8(v0) && isReg32(v1) && isXMM(v2) {
 21795          self.require(ISA_SSE2)
 21796          p.domain = DomainMMXSSE
 21797          p.add(0, func(m *_Encoding, v []interface{}) {
 21798              m.emit(0x66)
 21799              m.rexo(hcode(v[2]), v[1], false)
 21800              m.emit(0x0f)
 21801              m.emit(0xc4)
 21802              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 21803              m.imm1(toImmAny(v[0]))
 21804          })
 21805      }
 21806      // PINSRW imm8, m16, xmm
 21807      if isImm8(v0) && isM16(v1) && isXMM(v2) {
 21808          self.require(ISA_SSE2)
 21809          p.domain = DomainMMXSSE
 21810          p.add(0, func(m *_Encoding, v []interface{}) {
 21811              m.emit(0x66)
 21812              m.rexo(hcode(v[2]), addr(v[1]), false)
 21813              m.emit(0x0f)
 21814              m.emit(0xc4)
 21815              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 21816              m.imm1(toImmAny(v[0]))
 21817          })
 21818      }
 21819      if p.len == 0 {
 21820          panic("invalid operands for PINSRW")
 21821      }
 21822      return p
 21823  }
 21824  
 21825  // PMADDUBSW performs "Multiply and Add Packed Signed and Unsigned Byte Integers".
 21826  //
 21827  // Mnemonic        : PMADDUBSW
 21828  // Supported forms : (4 forms)
 21829  //
 21830  //    * PMADDUBSW mm, mm       [SSSE3]
 21831  //    * PMADDUBSW m64, mm      [SSSE3]
 21832  //    * PMADDUBSW xmm, xmm     [SSSE3]
 21833  //    * PMADDUBSW m128, xmm    [SSSE3]
 21834  //
 21835  func (self *Program) PMADDUBSW(v0 interface{}, v1 interface{}) *Instruction {
 21836      p := self.alloc("PMADDUBSW", 2, Operands { v0, v1 })
 21837      // PMADDUBSW mm, mm
 21838      if isMM(v0) && isMM(v1) {
 21839          self.require(ISA_SSSE3)
 21840          p.domain = DomainMMXSSE
 21841          p.add(0, func(m *_Encoding, v []interface{}) {
 21842              m.rexo(hcode(v[1]), v[0], false)
 21843              m.emit(0x0f)
 21844              m.emit(0x38)
 21845              m.emit(0x04)
 21846              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21847          })
 21848      }
 21849      // PMADDUBSW m64, mm
 21850      if isM64(v0) && isMM(v1) {
 21851          self.require(ISA_SSSE3)
 21852          p.domain = DomainMMXSSE
 21853          p.add(0, func(m *_Encoding, v []interface{}) {
 21854              m.rexo(hcode(v[1]), addr(v[0]), false)
 21855              m.emit(0x0f)
 21856              m.emit(0x38)
 21857              m.emit(0x04)
 21858              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21859          })
 21860      }
 21861      // PMADDUBSW xmm, xmm
 21862      if isXMM(v0) && isXMM(v1) {
 21863          self.require(ISA_SSSE3)
 21864          p.domain = DomainMMXSSE
 21865          p.add(0, func(m *_Encoding, v []interface{}) {
 21866              m.emit(0x66)
 21867              m.rexo(hcode(v[1]), v[0], false)
 21868              m.emit(0x0f)
 21869              m.emit(0x38)
 21870              m.emit(0x04)
 21871              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21872          })
 21873      }
 21874      // PMADDUBSW m128, xmm
 21875      if isM128(v0) && isXMM(v1) {
 21876          self.require(ISA_SSSE3)
 21877          p.domain = DomainMMXSSE
 21878          p.add(0, func(m *_Encoding, v []interface{}) {
 21879              m.emit(0x66)
 21880              m.rexo(hcode(v[1]), addr(v[0]), false)
 21881              m.emit(0x0f)
 21882              m.emit(0x38)
 21883              m.emit(0x04)
 21884              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21885          })
 21886      }
 21887      if p.len == 0 {
 21888          panic("invalid operands for PMADDUBSW")
 21889      }
 21890      return p
 21891  }
 21892  
 21893  // PMADDWD performs "Multiply and Add Packed Signed Word Integers".
 21894  //
 21895  // Mnemonic        : PMADDWD
 21896  // Supported forms : (4 forms)
 21897  //
 21898  //    * PMADDWD mm, mm       [MMX]
 21899  //    * PMADDWD m64, mm      [MMX]
 21900  //    * PMADDWD xmm, xmm     [SSE2]
 21901  //    * PMADDWD m128, xmm    [SSE2]
 21902  //
 21903  func (self *Program) PMADDWD(v0 interface{}, v1 interface{}) *Instruction {
 21904      p := self.alloc("PMADDWD", 2, Operands { v0, v1 })
 21905      // PMADDWD mm, mm
 21906      if isMM(v0) && isMM(v1) {
 21907          self.require(ISA_MMX)
 21908          p.domain = DomainMMXSSE
 21909          p.add(0, func(m *_Encoding, v []interface{}) {
 21910              m.rexo(hcode(v[1]), v[0], false)
 21911              m.emit(0x0f)
 21912              m.emit(0xf5)
 21913              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21914          })
 21915      }
 21916      // PMADDWD m64, mm
 21917      if isM64(v0) && isMM(v1) {
 21918          self.require(ISA_MMX)
 21919          p.domain = DomainMMXSSE
 21920          p.add(0, func(m *_Encoding, v []interface{}) {
 21921              m.rexo(hcode(v[1]), addr(v[0]), false)
 21922              m.emit(0x0f)
 21923              m.emit(0xf5)
 21924              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21925          })
 21926      }
 21927      // PMADDWD xmm, xmm
 21928      if isXMM(v0) && isXMM(v1) {
 21929          self.require(ISA_SSE2)
 21930          p.domain = DomainMMXSSE
 21931          p.add(0, func(m *_Encoding, v []interface{}) {
 21932              m.emit(0x66)
 21933              m.rexo(hcode(v[1]), v[0], false)
 21934              m.emit(0x0f)
 21935              m.emit(0xf5)
 21936              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21937          })
 21938      }
 21939      // PMADDWD m128, xmm
 21940      if isM128(v0) && isXMM(v1) {
 21941          self.require(ISA_SSE2)
 21942          p.domain = DomainMMXSSE
 21943          p.add(0, func(m *_Encoding, v []interface{}) {
 21944              m.emit(0x66)
 21945              m.rexo(hcode(v[1]), addr(v[0]), false)
 21946              m.emit(0x0f)
 21947              m.emit(0xf5)
 21948              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21949          })
 21950      }
 21951      if p.len == 0 {
 21952          panic("invalid operands for PMADDWD")
 21953      }
 21954      return p
 21955  }
 21956  
 21957  // PMAXSB performs "Maximum of Packed Signed Byte Integers".
 21958  //
 21959  // Mnemonic        : PMAXSB
 21960  // Supported forms : (2 forms)
 21961  //
 21962  //    * PMAXSB xmm, xmm     [SSE4.1]
 21963  //    * PMAXSB m128, xmm    [SSE4.1]
 21964  //
 21965  func (self *Program) PMAXSB(v0 interface{}, v1 interface{}) *Instruction {
 21966      p := self.alloc("PMAXSB", 2, Operands { v0, v1 })
 21967      // PMAXSB xmm, xmm
 21968      if isXMM(v0) && isXMM(v1) {
 21969          self.require(ISA_SSE4_1)
 21970          p.domain = DomainMMXSSE
 21971          p.add(0, func(m *_Encoding, v []interface{}) {
 21972              m.emit(0x66)
 21973              m.rexo(hcode(v[1]), v[0], false)
 21974              m.emit(0x0f)
 21975              m.emit(0x38)
 21976              m.emit(0x3c)
 21977              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 21978          })
 21979      }
 21980      // PMAXSB m128, xmm
 21981      if isM128(v0) && isXMM(v1) {
 21982          self.require(ISA_SSE4_1)
 21983          p.domain = DomainMMXSSE
 21984          p.add(0, func(m *_Encoding, v []interface{}) {
 21985              m.emit(0x66)
 21986              m.rexo(hcode(v[1]), addr(v[0]), false)
 21987              m.emit(0x0f)
 21988              m.emit(0x38)
 21989              m.emit(0x3c)
 21990              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 21991          })
 21992      }
 21993      if p.len == 0 {
 21994          panic("invalid operands for PMAXSB")
 21995      }
 21996      return p
 21997  }
 21998  
 21999  // PMAXSD performs "Maximum of Packed Signed Doubleword Integers".
 22000  //
 22001  // Mnemonic        : PMAXSD
 22002  // Supported forms : (2 forms)
 22003  //
 22004  //    * PMAXSD xmm, xmm     [SSE4.1]
 22005  //    * PMAXSD m128, xmm    [SSE4.1]
 22006  //
 22007  func (self *Program) PMAXSD(v0 interface{}, v1 interface{}) *Instruction {
 22008      p := self.alloc("PMAXSD", 2, Operands { v0, v1 })
 22009      // PMAXSD xmm, xmm
 22010      if isXMM(v0) && isXMM(v1) {
 22011          self.require(ISA_SSE4_1)
 22012          p.domain = DomainMMXSSE
 22013          p.add(0, func(m *_Encoding, v []interface{}) {
 22014              m.emit(0x66)
 22015              m.rexo(hcode(v[1]), v[0], false)
 22016              m.emit(0x0f)
 22017              m.emit(0x38)
 22018              m.emit(0x3d)
 22019              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22020          })
 22021      }
 22022      // PMAXSD m128, xmm
 22023      if isM128(v0) && isXMM(v1) {
 22024          self.require(ISA_SSE4_1)
 22025          p.domain = DomainMMXSSE
 22026          p.add(0, func(m *_Encoding, v []interface{}) {
 22027              m.emit(0x66)
 22028              m.rexo(hcode(v[1]), addr(v[0]), false)
 22029              m.emit(0x0f)
 22030              m.emit(0x38)
 22031              m.emit(0x3d)
 22032              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22033          })
 22034      }
 22035      if p.len == 0 {
 22036          panic("invalid operands for PMAXSD")
 22037      }
 22038      return p
 22039  }
 22040  
 22041  // PMAXSW performs "Maximum of Packed Signed Word Integers".
 22042  //
 22043  // Mnemonic        : PMAXSW
 22044  // Supported forms : (4 forms)
 22045  //
 22046  //    * PMAXSW mm, mm       [MMX+]
 22047  //    * PMAXSW m64, mm      [MMX+]
 22048  //    * PMAXSW xmm, xmm     [SSE2]
 22049  //    * PMAXSW m128, xmm    [SSE2]
 22050  //
 22051  func (self *Program) PMAXSW(v0 interface{}, v1 interface{}) *Instruction {
 22052      p := self.alloc("PMAXSW", 2, Operands { v0, v1 })
 22053      // PMAXSW mm, mm
 22054      if isMM(v0) && isMM(v1) {
 22055          self.require(ISA_MMX_PLUS)
 22056          p.domain = DomainMMXSSE
 22057          p.add(0, func(m *_Encoding, v []interface{}) {
 22058              m.rexo(hcode(v[1]), v[0], false)
 22059              m.emit(0x0f)
 22060              m.emit(0xee)
 22061              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22062          })
 22063      }
 22064      // PMAXSW m64, mm
 22065      if isM64(v0) && isMM(v1) {
 22066          self.require(ISA_MMX_PLUS)
 22067          p.domain = DomainMMXSSE
 22068          p.add(0, func(m *_Encoding, v []interface{}) {
 22069              m.rexo(hcode(v[1]), addr(v[0]), false)
 22070              m.emit(0x0f)
 22071              m.emit(0xee)
 22072              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22073          })
 22074      }
 22075      // PMAXSW xmm, xmm
 22076      if isXMM(v0) && isXMM(v1) {
 22077          self.require(ISA_SSE2)
 22078          p.domain = DomainMMXSSE
 22079          p.add(0, func(m *_Encoding, v []interface{}) {
 22080              m.emit(0x66)
 22081              m.rexo(hcode(v[1]), v[0], false)
 22082              m.emit(0x0f)
 22083              m.emit(0xee)
 22084              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22085          })
 22086      }
 22087      // PMAXSW m128, xmm
 22088      if isM128(v0) && isXMM(v1) {
 22089          self.require(ISA_SSE2)
 22090          p.domain = DomainMMXSSE
 22091          p.add(0, func(m *_Encoding, v []interface{}) {
 22092              m.emit(0x66)
 22093              m.rexo(hcode(v[1]), addr(v[0]), false)
 22094              m.emit(0x0f)
 22095              m.emit(0xee)
 22096              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22097          })
 22098      }
 22099      if p.len == 0 {
 22100          panic("invalid operands for PMAXSW")
 22101      }
 22102      return p
 22103  }
 22104  
 22105  // PMAXUB performs "Maximum of Packed Unsigned Byte Integers".
 22106  //
 22107  // Mnemonic        : PMAXUB
 22108  // Supported forms : (4 forms)
 22109  //
 22110  //    * PMAXUB mm, mm       [MMX+]
 22111  //    * PMAXUB m64, mm      [MMX+]
 22112  //    * PMAXUB xmm, xmm     [SSE2]
 22113  //    * PMAXUB m128, xmm    [SSE2]
 22114  //
 22115  func (self *Program) PMAXUB(v0 interface{}, v1 interface{}) *Instruction {
 22116      p := self.alloc("PMAXUB", 2, Operands { v0, v1 })
 22117      // PMAXUB mm, mm
 22118      if isMM(v0) && isMM(v1) {
 22119          self.require(ISA_MMX_PLUS)
 22120          p.domain = DomainMMXSSE
 22121          p.add(0, func(m *_Encoding, v []interface{}) {
 22122              m.rexo(hcode(v[1]), v[0], false)
 22123              m.emit(0x0f)
 22124              m.emit(0xde)
 22125              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22126          })
 22127      }
 22128      // PMAXUB m64, mm
 22129      if isM64(v0) && isMM(v1) {
 22130          self.require(ISA_MMX_PLUS)
 22131          p.domain = DomainMMXSSE
 22132          p.add(0, func(m *_Encoding, v []interface{}) {
 22133              m.rexo(hcode(v[1]), addr(v[0]), false)
 22134              m.emit(0x0f)
 22135              m.emit(0xde)
 22136              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22137          })
 22138      }
 22139      // PMAXUB xmm, xmm
 22140      if isXMM(v0) && isXMM(v1) {
 22141          self.require(ISA_SSE2)
 22142          p.domain = DomainMMXSSE
 22143          p.add(0, func(m *_Encoding, v []interface{}) {
 22144              m.emit(0x66)
 22145              m.rexo(hcode(v[1]), v[0], false)
 22146              m.emit(0x0f)
 22147              m.emit(0xde)
 22148              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22149          })
 22150      }
 22151      // PMAXUB m128, xmm
 22152      if isM128(v0) && isXMM(v1) {
 22153          self.require(ISA_SSE2)
 22154          p.domain = DomainMMXSSE
 22155          p.add(0, func(m *_Encoding, v []interface{}) {
 22156              m.emit(0x66)
 22157              m.rexo(hcode(v[1]), addr(v[0]), false)
 22158              m.emit(0x0f)
 22159              m.emit(0xde)
 22160              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22161          })
 22162      }
 22163      if p.len == 0 {
 22164          panic("invalid operands for PMAXUB")
 22165      }
 22166      return p
 22167  }
 22168  
 22169  // PMAXUD performs "Maximum of Packed Unsigned Doubleword Integers".
 22170  //
 22171  // Mnemonic        : PMAXUD
 22172  // Supported forms : (2 forms)
 22173  //
 22174  //    * PMAXUD xmm, xmm     [SSE4.1]
 22175  //    * PMAXUD m128, xmm    [SSE4.1]
 22176  //
 22177  func (self *Program) PMAXUD(v0 interface{}, v1 interface{}) *Instruction {
 22178      p := self.alloc("PMAXUD", 2, Operands { v0, v1 })
 22179      // PMAXUD xmm, xmm
 22180      if isXMM(v0) && isXMM(v1) {
 22181          self.require(ISA_SSE4_1)
 22182          p.domain = DomainMMXSSE
 22183          p.add(0, func(m *_Encoding, v []interface{}) {
 22184              m.emit(0x66)
 22185              m.rexo(hcode(v[1]), v[0], false)
 22186              m.emit(0x0f)
 22187              m.emit(0x38)
 22188              m.emit(0x3f)
 22189              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22190          })
 22191      }
 22192      // PMAXUD m128, xmm
 22193      if isM128(v0) && isXMM(v1) {
 22194          self.require(ISA_SSE4_1)
 22195          p.domain = DomainMMXSSE
 22196          p.add(0, func(m *_Encoding, v []interface{}) {
 22197              m.emit(0x66)
 22198              m.rexo(hcode(v[1]), addr(v[0]), false)
 22199              m.emit(0x0f)
 22200              m.emit(0x38)
 22201              m.emit(0x3f)
 22202              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22203          })
 22204      }
 22205      if p.len == 0 {
 22206          panic("invalid operands for PMAXUD")
 22207      }
 22208      return p
 22209  }
 22210  
 22211  // PMAXUW performs "Maximum of Packed Unsigned Word Integers".
 22212  //
 22213  // Mnemonic        : PMAXUW
 22214  // Supported forms : (2 forms)
 22215  //
 22216  //    * PMAXUW xmm, xmm     [SSE4.1]
 22217  //    * PMAXUW m128, xmm    [SSE4.1]
 22218  //
 22219  func (self *Program) PMAXUW(v0 interface{}, v1 interface{}) *Instruction {
 22220      p := self.alloc("PMAXUW", 2, Operands { v0, v1 })
 22221      // PMAXUW xmm, xmm
 22222      if isXMM(v0) && isXMM(v1) {
 22223          self.require(ISA_SSE4_1)
 22224          p.domain = DomainMMXSSE
 22225          p.add(0, func(m *_Encoding, v []interface{}) {
 22226              m.emit(0x66)
 22227              m.rexo(hcode(v[1]), v[0], false)
 22228              m.emit(0x0f)
 22229              m.emit(0x38)
 22230              m.emit(0x3e)
 22231              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22232          })
 22233      }
 22234      // PMAXUW m128, xmm
 22235      if isM128(v0) && isXMM(v1) {
 22236          self.require(ISA_SSE4_1)
 22237          p.domain = DomainMMXSSE
 22238          p.add(0, func(m *_Encoding, v []interface{}) {
 22239              m.emit(0x66)
 22240              m.rexo(hcode(v[1]), addr(v[0]), false)
 22241              m.emit(0x0f)
 22242              m.emit(0x38)
 22243              m.emit(0x3e)
 22244              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22245          })
 22246      }
 22247      if p.len == 0 {
 22248          panic("invalid operands for PMAXUW")
 22249      }
 22250      return p
 22251  }
 22252  
 22253  // PMINSB performs "Minimum of Packed Signed Byte Integers".
 22254  //
 22255  // Mnemonic        : PMINSB
 22256  // Supported forms : (2 forms)
 22257  //
 22258  //    * PMINSB xmm, xmm     [SSE4.1]
 22259  //    * PMINSB m128, xmm    [SSE4.1]
 22260  //
 22261  func (self *Program) PMINSB(v0 interface{}, v1 interface{}) *Instruction {
 22262      p := self.alloc("PMINSB", 2, Operands { v0, v1 })
 22263      // PMINSB xmm, xmm
 22264      if isXMM(v0) && isXMM(v1) {
 22265          self.require(ISA_SSE4_1)
 22266          p.domain = DomainMMXSSE
 22267          p.add(0, func(m *_Encoding, v []interface{}) {
 22268              m.emit(0x66)
 22269              m.rexo(hcode(v[1]), v[0], false)
 22270              m.emit(0x0f)
 22271              m.emit(0x38)
 22272              m.emit(0x38)
 22273              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22274          })
 22275      }
 22276      // PMINSB m128, xmm
 22277      if isM128(v0) && isXMM(v1) {
 22278          self.require(ISA_SSE4_1)
 22279          p.domain = DomainMMXSSE
 22280          p.add(0, func(m *_Encoding, v []interface{}) {
 22281              m.emit(0x66)
 22282              m.rexo(hcode(v[1]), addr(v[0]), false)
 22283              m.emit(0x0f)
 22284              m.emit(0x38)
 22285              m.emit(0x38)
 22286              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22287          })
 22288      }
 22289      if p.len == 0 {
 22290          panic("invalid operands for PMINSB")
 22291      }
 22292      return p
 22293  }
 22294  
 22295  // PMINSD performs "Minimum of Packed Signed Doubleword Integers".
 22296  //
 22297  // Mnemonic        : PMINSD
 22298  // Supported forms : (2 forms)
 22299  //
 22300  //    * PMINSD xmm, xmm     [SSE4.1]
 22301  //    * PMINSD m128, xmm    [SSE4.1]
 22302  //
 22303  func (self *Program) PMINSD(v0 interface{}, v1 interface{}) *Instruction {
 22304      p := self.alloc("PMINSD", 2, Operands { v0, v1 })
 22305      // PMINSD xmm, xmm
 22306      if isXMM(v0) && isXMM(v1) {
 22307          self.require(ISA_SSE4_1)
 22308          p.domain = DomainMMXSSE
 22309          p.add(0, func(m *_Encoding, v []interface{}) {
 22310              m.emit(0x66)
 22311              m.rexo(hcode(v[1]), v[0], false)
 22312              m.emit(0x0f)
 22313              m.emit(0x38)
 22314              m.emit(0x39)
 22315              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22316          })
 22317      }
 22318      // PMINSD m128, xmm
 22319      if isM128(v0) && isXMM(v1) {
 22320          self.require(ISA_SSE4_1)
 22321          p.domain = DomainMMXSSE
 22322          p.add(0, func(m *_Encoding, v []interface{}) {
 22323              m.emit(0x66)
 22324              m.rexo(hcode(v[1]), addr(v[0]), false)
 22325              m.emit(0x0f)
 22326              m.emit(0x38)
 22327              m.emit(0x39)
 22328              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22329          })
 22330      }
 22331      if p.len == 0 {
 22332          panic("invalid operands for PMINSD")
 22333      }
 22334      return p
 22335  }
 22336  
 22337  // PMINSW performs "Minimum of Packed Signed Word Integers".
 22338  //
 22339  // Mnemonic        : PMINSW
 22340  // Supported forms : (4 forms)
 22341  //
 22342  //    * PMINSW mm, mm       [MMX+]
 22343  //    * PMINSW m64, mm      [MMX+]
 22344  //    * PMINSW xmm, xmm     [SSE2]
 22345  //    * PMINSW m128, xmm    [SSE2]
 22346  //
 22347  func (self *Program) PMINSW(v0 interface{}, v1 interface{}) *Instruction {
 22348      p := self.alloc("PMINSW", 2, Operands { v0, v1 })
 22349      // PMINSW mm, mm
 22350      if isMM(v0) && isMM(v1) {
 22351          self.require(ISA_MMX_PLUS)
 22352          p.domain = DomainMMXSSE
 22353          p.add(0, func(m *_Encoding, v []interface{}) {
 22354              m.rexo(hcode(v[1]), v[0], false)
 22355              m.emit(0x0f)
 22356              m.emit(0xea)
 22357              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22358          })
 22359      }
 22360      // PMINSW m64, mm
 22361      if isM64(v0) && isMM(v1) {
 22362          self.require(ISA_MMX_PLUS)
 22363          p.domain = DomainMMXSSE
 22364          p.add(0, func(m *_Encoding, v []interface{}) {
 22365              m.rexo(hcode(v[1]), addr(v[0]), false)
 22366              m.emit(0x0f)
 22367              m.emit(0xea)
 22368              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22369          })
 22370      }
 22371      // PMINSW xmm, xmm
 22372      if isXMM(v0) && isXMM(v1) {
 22373          self.require(ISA_SSE2)
 22374          p.domain = DomainMMXSSE
 22375          p.add(0, func(m *_Encoding, v []interface{}) {
 22376              m.emit(0x66)
 22377              m.rexo(hcode(v[1]), v[0], false)
 22378              m.emit(0x0f)
 22379              m.emit(0xea)
 22380              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22381          })
 22382      }
 22383      // PMINSW m128, xmm
 22384      if isM128(v0) && isXMM(v1) {
 22385          self.require(ISA_SSE2)
 22386          p.domain = DomainMMXSSE
 22387          p.add(0, func(m *_Encoding, v []interface{}) {
 22388              m.emit(0x66)
 22389              m.rexo(hcode(v[1]), addr(v[0]), false)
 22390              m.emit(0x0f)
 22391              m.emit(0xea)
 22392              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22393          })
 22394      }
 22395      if p.len == 0 {
 22396          panic("invalid operands for PMINSW")
 22397      }
 22398      return p
 22399  }
 22400  
 22401  // PMINUB performs "Minimum of Packed Unsigned Byte Integers".
 22402  //
 22403  // Mnemonic        : PMINUB
 22404  // Supported forms : (4 forms)
 22405  //
 22406  //    * PMINUB mm, mm       [MMX+]
 22407  //    * PMINUB m64, mm      [MMX+]
 22408  //    * PMINUB xmm, xmm     [SSE2]
 22409  //    * PMINUB m128, xmm    [SSE2]
 22410  //
 22411  func (self *Program) PMINUB(v0 interface{}, v1 interface{}) *Instruction {
 22412      p := self.alloc("PMINUB", 2, Operands { v0, v1 })
 22413      // PMINUB mm, mm
 22414      if isMM(v0) && isMM(v1) {
 22415          self.require(ISA_MMX_PLUS)
 22416          p.domain = DomainMMXSSE
 22417          p.add(0, func(m *_Encoding, v []interface{}) {
 22418              m.rexo(hcode(v[1]), v[0], false)
 22419              m.emit(0x0f)
 22420              m.emit(0xda)
 22421              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22422          })
 22423      }
 22424      // PMINUB m64, mm
 22425      if isM64(v0) && isMM(v1) {
 22426          self.require(ISA_MMX_PLUS)
 22427          p.domain = DomainMMXSSE
 22428          p.add(0, func(m *_Encoding, v []interface{}) {
 22429              m.rexo(hcode(v[1]), addr(v[0]), false)
 22430              m.emit(0x0f)
 22431              m.emit(0xda)
 22432              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22433          })
 22434      }
 22435      // PMINUB xmm, xmm
 22436      if isXMM(v0) && isXMM(v1) {
 22437          self.require(ISA_SSE2)
 22438          p.domain = DomainMMXSSE
 22439          p.add(0, func(m *_Encoding, v []interface{}) {
 22440              m.emit(0x66)
 22441              m.rexo(hcode(v[1]), v[0], false)
 22442              m.emit(0x0f)
 22443              m.emit(0xda)
 22444              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22445          })
 22446      }
 22447      // PMINUB m128, xmm
 22448      if isM128(v0) && isXMM(v1) {
 22449          self.require(ISA_SSE2)
 22450          p.domain = DomainMMXSSE
 22451          p.add(0, func(m *_Encoding, v []interface{}) {
 22452              m.emit(0x66)
 22453              m.rexo(hcode(v[1]), addr(v[0]), false)
 22454              m.emit(0x0f)
 22455              m.emit(0xda)
 22456              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22457          })
 22458      }
 22459      if p.len == 0 {
 22460          panic("invalid operands for PMINUB")
 22461      }
 22462      return p
 22463  }
 22464  
 22465  // PMINUD performs "Minimum of Packed Unsigned Doubleword Integers".
 22466  //
 22467  // Mnemonic        : PMINUD
 22468  // Supported forms : (2 forms)
 22469  //
 22470  //    * PMINUD xmm, xmm     [SSE4.1]
 22471  //    * PMINUD m128, xmm    [SSE4.1]
 22472  //
 22473  func (self *Program) PMINUD(v0 interface{}, v1 interface{}) *Instruction {
 22474      p := self.alloc("PMINUD", 2, Operands { v0, v1 })
 22475      // PMINUD xmm, xmm
 22476      if isXMM(v0) && isXMM(v1) {
 22477          self.require(ISA_SSE4_1)
 22478          p.domain = DomainMMXSSE
 22479          p.add(0, func(m *_Encoding, v []interface{}) {
 22480              m.emit(0x66)
 22481              m.rexo(hcode(v[1]), v[0], false)
 22482              m.emit(0x0f)
 22483              m.emit(0x38)
 22484              m.emit(0x3b)
 22485              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22486          })
 22487      }
 22488      // PMINUD m128, xmm
 22489      if isM128(v0) && isXMM(v1) {
 22490          self.require(ISA_SSE4_1)
 22491          p.domain = DomainMMXSSE
 22492          p.add(0, func(m *_Encoding, v []interface{}) {
 22493              m.emit(0x66)
 22494              m.rexo(hcode(v[1]), addr(v[0]), false)
 22495              m.emit(0x0f)
 22496              m.emit(0x38)
 22497              m.emit(0x3b)
 22498              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22499          })
 22500      }
 22501      if p.len == 0 {
 22502          panic("invalid operands for PMINUD")
 22503      }
 22504      return p
 22505  }
 22506  
 22507  // PMINUW performs "Minimum of Packed Unsigned Word Integers".
 22508  //
 22509  // Mnemonic        : PMINUW
 22510  // Supported forms : (2 forms)
 22511  //
 22512  //    * PMINUW xmm, xmm     [SSE4.1]
 22513  //    * PMINUW m128, xmm    [SSE4.1]
 22514  //
 22515  func (self *Program) PMINUW(v0 interface{}, v1 interface{}) *Instruction {
 22516      p := self.alloc("PMINUW", 2, Operands { v0, v1 })
 22517      // PMINUW xmm, xmm
 22518      if isXMM(v0) && isXMM(v1) {
 22519          self.require(ISA_SSE4_1)
 22520          p.domain = DomainMMXSSE
 22521          p.add(0, func(m *_Encoding, v []interface{}) {
 22522              m.emit(0x66)
 22523              m.rexo(hcode(v[1]), v[0], false)
 22524              m.emit(0x0f)
 22525              m.emit(0x38)
 22526              m.emit(0x3a)
 22527              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22528          })
 22529      }
 22530      // PMINUW m128, xmm
 22531      if isM128(v0) && isXMM(v1) {
 22532          self.require(ISA_SSE4_1)
 22533          p.domain = DomainMMXSSE
 22534          p.add(0, func(m *_Encoding, v []interface{}) {
 22535              m.emit(0x66)
 22536              m.rexo(hcode(v[1]), addr(v[0]), false)
 22537              m.emit(0x0f)
 22538              m.emit(0x38)
 22539              m.emit(0x3a)
 22540              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22541          })
 22542      }
 22543      if p.len == 0 {
 22544          panic("invalid operands for PMINUW")
 22545      }
 22546      return p
 22547  }
 22548  
 22549  // PMOVMSKB performs "Move Byte Mask".
 22550  //
 22551  // Mnemonic        : PMOVMSKB
 22552  // Supported forms : (2 forms)
 22553  //
 22554  //    * PMOVMSKB mm, r32     [MMX+]
 22555  //    * PMOVMSKB xmm, r32    [SSE2]
 22556  //
 22557  func (self *Program) PMOVMSKB(v0 interface{}, v1 interface{}) *Instruction {
 22558      p := self.alloc("PMOVMSKB", 2, Operands { v0, v1 })
 22559      // PMOVMSKB mm, r32
 22560      if isMM(v0) && isReg32(v1) {
 22561          self.require(ISA_MMX_PLUS)
 22562          p.domain = DomainMMXSSE
 22563          p.add(0, func(m *_Encoding, v []interface{}) {
 22564              m.rexo(hcode(v[1]), v[0], false)
 22565              m.emit(0x0f)
 22566              m.emit(0xd7)
 22567              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22568          })
 22569      }
 22570      // PMOVMSKB xmm, r32
 22571      if isXMM(v0) && isReg32(v1) {
 22572          self.require(ISA_SSE2)
 22573          p.domain = DomainMMXSSE
 22574          p.add(0, func(m *_Encoding, v []interface{}) {
 22575              m.emit(0x66)
 22576              m.rexo(hcode(v[1]), v[0], false)
 22577              m.emit(0x0f)
 22578              m.emit(0xd7)
 22579              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22580          })
 22581      }
 22582      if p.len == 0 {
 22583          panic("invalid operands for PMOVMSKB")
 22584      }
 22585      return p
 22586  }
 22587  
 22588  // PMOVSXBD performs "Move Packed Byte Integers to Doubleword Integers with Sign Extension".
 22589  //
 22590  // Mnemonic        : PMOVSXBD
 22591  // Supported forms : (2 forms)
 22592  //
 22593  //    * PMOVSXBD xmm, xmm    [SSE4.1]
 22594  //    * PMOVSXBD m32, xmm    [SSE4.1]
 22595  //
 22596  func (self *Program) PMOVSXBD(v0 interface{}, v1 interface{}) *Instruction {
 22597      p := self.alloc("PMOVSXBD", 2, Operands { v0, v1 })
 22598      // PMOVSXBD xmm, xmm
 22599      if isXMM(v0) && isXMM(v1) {
 22600          self.require(ISA_SSE4_1)
 22601          p.domain = DomainMMXSSE
 22602          p.add(0, func(m *_Encoding, v []interface{}) {
 22603              m.emit(0x66)
 22604              m.rexo(hcode(v[1]), v[0], false)
 22605              m.emit(0x0f)
 22606              m.emit(0x38)
 22607              m.emit(0x21)
 22608              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22609          })
 22610      }
 22611      // PMOVSXBD m32, xmm
 22612      if isM32(v0) && isXMM(v1) {
 22613          self.require(ISA_SSE4_1)
 22614          p.domain = DomainMMXSSE
 22615          p.add(0, func(m *_Encoding, v []interface{}) {
 22616              m.emit(0x66)
 22617              m.rexo(hcode(v[1]), addr(v[0]), false)
 22618              m.emit(0x0f)
 22619              m.emit(0x38)
 22620              m.emit(0x21)
 22621              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22622          })
 22623      }
 22624      if p.len == 0 {
 22625          panic("invalid operands for PMOVSXBD")
 22626      }
 22627      return p
 22628  }
 22629  
 22630  // PMOVSXBQ performs "Move Packed Byte Integers to Quadword Integers with Sign Extension".
 22631  //
 22632  // Mnemonic        : PMOVSXBQ
 22633  // Supported forms : (2 forms)
 22634  //
 22635  //    * PMOVSXBQ xmm, xmm    [SSE4.1]
 22636  //    * PMOVSXBQ m16, xmm    [SSE4.1]
 22637  //
 22638  func (self *Program) PMOVSXBQ(v0 interface{}, v1 interface{}) *Instruction {
 22639      p := self.alloc("PMOVSXBQ", 2, Operands { v0, v1 })
 22640      // PMOVSXBQ xmm, xmm
 22641      if isXMM(v0) && isXMM(v1) {
 22642          self.require(ISA_SSE4_1)
 22643          p.domain = DomainMMXSSE
 22644          p.add(0, func(m *_Encoding, v []interface{}) {
 22645              m.emit(0x66)
 22646              m.rexo(hcode(v[1]), v[0], false)
 22647              m.emit(0x0f)
 22648              m.emit(0x38)
 22649              m.emit(0x22)
 22650              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22651          })
 22652      }
 22653      // PMOVSXBQ m16, xmm
 22654      if isM16(v0) && isXMM(v1) {
 22655          self.require(ISA_SSE4_1)
 22656          p.domain = DomainMMXSSE
 22657          p.add(0, func(m *_Encoding, v []interface{}) {
 22658              m.emit(0x66)
 22659              m.rexo(hcode(v[1]), addr(v[0]), false)
 22660              m.emit(0x0f)
 22661              m.emit(0x38)
 22662              m.emit(0x22)
 22663              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22664          })
 22665      }
 22666      if p.len == 0 {
 22667          panic("invalid operands for PMOVSXBQ")
 22668      }
 22669      return p
 22670  }
 22671  
 22672  // PMOVSXBW performs "Move Packed Byte Integers to Word Integers with Sign Extension".
 22673  //
 22674  // Mnemonic        : PMOVSXBW
 22675  // Supported forms : (2 forms)
 22676  //
 22677  //    * PMOVSXBW xmm, xmm    [SSE4.1]
 22678  //    * PMOVSXBW m64, xmm    [SSE4.1]
 22679  //
 22680  func (self *Program) PMOVSXBW(v0 interface{}, v1 interface{}) *Instruction {
 22681      p := self.alloc("PMOVSXBW", 2, Operands { v0, v1 })
 22682      // PMOVSXBW xmm, xmm
 22683      if isXMM(v0) && isXMM(v1) {
 22684          self.require(ISA_SSE4_1)
 22685          p.domain = DomainMMXSSE
 22686          p.add(0, func(m *_Encoding, v []interface{}) {
 22687              m.emit(0x66)
 22688              m.rexo(hcode(v[1]), v[0], false)
 22689              m.emit(0x0f)
 22690              m.emit(0x38)
 22691              m.emit(0x20)
 22692              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22693          })
 22694      }
 22695      // PMOVSXBW m64, xmm
 22696      if isM64(v0) && isXMM(v1) {
 22697          self.require(ISA_SSE4_1)
 22698          p.domain = DomainMMXSSE
 22699          p.add(0, func(m *_Encoding, v []interface{}) {
 22700              m.emit(0x66)
 22701              m.rexo(hcode(v[1]), addr(v[0]), false)
 22702              m.emit(0x0f)
 22703              m.emit(0x38)
 22704              m.emit(0x20)
 22705              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22706          })
 22707      }
 22708      if p.len == 0 {
 22709          panic("invalid operands for PMOVSXBW")
 22710      }
 22711      return p
 22712  }
 22713  
 22714  // PMOVSXDQ performs "Move Packed Doubleword Integers to Quadword Integers with Sign Extension".
 22715  //
 22716  // Mnemonic        : PMOVSXDQ
 22717  // Supported forms : (2 forms)
 22718  //
 22719  //    * PMOVSXDQ xmm, xmm    [SSE4.1]
 22720  //    * PMOVSXDQ m64, xmm    [SSE4.1]
 22721  //
 22722  func (self *Program) PMOVSXDQ(v0 interface{}, v1 interface{}) *Instruction {
 22723      p := self.alloc("PMOVSXDQ", 2, Operands { v0, v1 })
 22724      // PMOVSXDQ xmm, xmm
 22725      if isXMM(v0) && isXMM(v1) {
 22726          self.require(ISA_SSE4_1)
 22727          p.domain = DomainMMXSSE
 22728          p.add(0, func(m *_Encoding, v []interface{}) {
 22729              m.emit(0x66)
 22730              m.rexo(hcode(v[1]), v[0], false)
 22731              m.emit(0x0f)
 22732              m.emit(0x38)
 22733              m.emit(0x25)
 22734              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22735          })
 22736      }
 22737      // PMOVSXDQ m64, xmm
 22738      if isM64(v0) && isXMM(v1) {
 22739          self.require(ISA_SSE4_1)
 22740          p.domain = DomainMMXSSE
 22741          p.add(0, func(m *_Encoding, v []interface{}) {
 22742              m.emit(0x66)
 22743              m.rexo(hcode(v[1]), addr(v[0]), false)
 22744              m.emit(0x0f)
 22745              m.emit(0x38)
 22746              m.emit(0x25)
 22747              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22748          })
 22749      }
 22750      if p.len == 0 {
 22751          panic("invalid operands for PMOVSXDQ")
 22752      }
 22753      return p
 22754  }
 22755  
 22756  // PMOVSXWD performs "Move Packed Word Integers to Doubleword Integers with Sign Extension".
 22757  //
 22758  // Mnemonic        : PMOVSXWD
 22759  // Supported forms : (2 forms)
 22760  //
 22761  //    * PMOVSXWD xmm, xmm    [SSE4.1]
 22762  //    * PMOVSXWD m64, xmm    [SSE4.1]
 22763  //
 22764  func (self *Program) PMOVSXWD(v0 interface{}, v1 interface{}) *Instruction {
 22765      p := self.alloc("PMOVSXWD", 2, Operands { v0, v1 })
 22766      // PMOVSXWD xmm, xmm
 22767      if isXMM(v0) && isXMM(v1) {
 22768          self.require(ISA_SSE4_1)
 22769          p.domain = DomainMMXSSE
 22770          p.add(0, func(m *_Encoding, v []interface{}) {
 22771              m.emit(0x66)
 22772              m.rexo(hcode(v[1]), v[0], false)
 22773              m.emit(0x0f)
 22774              m.emit(0x38)
 22775              m.emit(0x23)
 22776              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22777          })
 22778      }
 22779      // PMOVSXWD m64, xmm
 22780      if isM64(v0) && isXMM(v1) {
 22781          self.require(ISA_SSE4_1)
 22782          p.domain = DomainMMXSSE
 22783          p.add(0, func(m *_Encoding, v []interface{}) {
 22784              m.emit(0x66)
 22785              m.rexo(hcode(v[1]), addr(v[0]), false)
 22786              m.emit(0x0f)
 22787              m.emit(0x38)
 22788              m.emit(0x23)
 22789              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22790          })
 22791      }
 22792      if p.len == 0 {
 22793          panic("invalid operands for PMOVSXWD")
 22794      }
 22795      return p
 22796  }
 22797  
 22798  // PMOVSXWQ performs "Move Packed Word Integers to Quadword Integers with Sign Extension".
 22799  //
 22800  // Mnemonic        : PMOVSXWQ
 22801  // Supported forms : (2 forms)
 22802  //
 22803  //    * PMOVSXWQ xmm, xmm    [SSE4.1]
 22804  //    * PMOVSXWQ m32, xmm    [SSE4.1]
 22805  //
 22806  func (self *Program) PMOVSXWQ(v0 interface{}, v1 interface{}) *Instruction {
 22807      p := self.alloc("PMOVSXWQ", 2, Operands { v0, v1 })
 22808      // PMOVSXWQ xmm, xmm
 22809      if isXMM(v0) && isXMM(v1) {
 22810          self.require(ISA_SSE4_1)
 22811          p.domain = DomainMMXSSE
 22812          p.add(0, func(m *_Encoding, v []interface{}) {
 22813              m.emit(0x66)
 22814              m.rexo(hcode(v[1]), v[0], false)
 22815              m.emit(0x0f)
 22816              m.emit(0x38)
 22817              m.emit(0x24)
 22818              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22819          })
 22820      }
 22821      // PMOVSXWQ m32, xmm
 22822      if isM32(v0) && isXMM(v1) {
 22823          self.require(ISA_SSE4_1)
 22824          p.domain = DomainMMXSSE
 22825          p.add(0, func(m *_Encoding, v []interface{}) {
 22826              m.emit(0x66)
 22827              m.rexo(hcode(v[1]), addr(v[0]), false)
 22828              m.emit(0x0f)
 22829              m.emit(0x38)
 22830              m.emit(0x24)
 22831              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22832          })
 22833      }
 22834      if p.len == 0 {
 22835          panic("invalid operands for PMOVSXWQ")
 22836      }
 22837      return p
 22838  }
 22839  
 22840  // PMOVZXBD performs "Move Packed Byte Integers to Doubleword Integers with Zero Extension".
 22841  //
 22842  // Mnemonic        : PMOVZXBD
 22843  // Supported forms : (2 forms)
 22844  //
 22845  //    * PMOVZXBD xmm, xmm    [SSE4.1]
 22846  //    * PMOVZXBD m32, xmm    [SSE4.1]
 22847  //
 22848  func (self *Program) PMOVZXBD(v0 interface{}, v1 interface{}) *Instruction {
 22849      p := self.alloc("PMOVZXBD", 2, Operands { v0, v1 })
 22850      // PMOVZXBD xmm, xmm
 22851      if isXMM(v0) && isXMM(v1) {
 22852          self.require(ISA_SSE4_1)
 22853          p.domain = DomainMMXSSE
 22854          p.add(0, func(m *_Encoding, v []interface{}) {
 22855              m.emit(0x66)
 22856              m.rexo(hcode(v[1]), v[0], false)
 22857              m.emit(0x0f)
 22858              m.emit(0x38)
 22859              m.emit(0x31)
 22860              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22861          })
 22862      }
 22863      // PMOVZXBD m32, xmm
 22864      if isM32(v0) && isXMM(v1) {
 22865          self.require(ISA_SSE4_1)
 22866          p.domain = DomainMMXSSE
 22867          p.add(0, func(m *_Encoding, v []interface{}) {
 22868              m.emit(0x66)
 22869              m.rexo(hcode(v[1]), addr(v[0]), false)
 22870              m.emit(0x0f)
 22871              m.emit(0x38)
 22872              m.emit(0x31)
 22873              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22874          })
 22875      }
 22876      if p.len == 0 {
 22877          panic("invalid operands for PMOVZXBD")
 22878      }
 22879      return p
 22880  }
 22881  
 22882  // PMOVZXBQ performs "Move Packed Byte Integers to Quadword Integers with Zero Extension".
 22883  //
 22884  // Mnemonic        : PMOVZXBQ
 22885  // Supported forms : (2 forms)
 22886  //
 22887  //    * PMOVZXBQ xmm, xmm    [SSE4.1]
 22888  //    * PMOVZXBQ m16, xmm    [SSE4.1]
 22889  //
 22890  func (self *Program) PMOVZXBQ(v0 interface{}, v1 interface{}) *Instruction {
 22891      p := self.alloc("PMOVZXBQ", 2, Operands { v0, v1 })
 22892      // PMOVZXBQ xmm, xmm
 22893      if isXMM(v0) && isXMM(v1) {
 22894          self.require(ISA_SSE4_1)
 22895          p.domain = DomainMMXSSE
 22896          p.add(0, func(m *_Encoding, v []interface{}) {
 22897              m.emit(0x66)
 22898              m.rexo(hcode(v[1]), v[0], false)
 22899              m.emit(0x0f)
 22900              m.emit(0x38)
 22901              m.emit(0x32)
 22902              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22903          })
 22904      }
 22905      // PMOVZXBQ m16, xmm
 22906      if isM16(v0) && isXMM(v1) {
 22907          self.require(ISA_SSE4_1)
 22908          p.domain = DomainMMXSSE
 22909          p.add(0, func(m *_Encoding, v []interface{}) {
 22910              m.emit(0x66)
 22911              m.rexo(hcode(v[1]), addr(v[0]), false)
 22912              m.emit(0x0f)
 22913              m.emit(0x38)
 22914              m.emit(0x32)
 22915              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22916          })
 22917      }
 22918      if p.len == 0 {
 22919          panic("invalid operands for PMOVZXBQ")
 22920      }
 22921      return p
 22922  }
 22923  
 22924  // PMOVZXBW performs "Move Packed Byte Integers to Word Integers with Zero Extension".
 22925  //
 22926  // Mnemonic        : PMOVZXBW
 22927  // Supported forms : (2 forms)
 22928  //
 22929  //    * PMOVZXBW xmm, xmm    [SSE4.1]
 22930  //    * PMOVZXBW m64, xmm    [SSE4.1]
 22931  //
 22932  func (self *Program) PMOVZXBW(v0 interface{}, v1 interface{}) *Instruction {
 22933      p := self.alloc("PMOVZXBW", 2, Operands { v0, v1 })
 22934      // PMOVZXBW xmm, xmm
 22935      if isXMM(v0) && isXMM(v1) {
 22936          self.require(ISA_SSE4_1)
 22937          p.domain = DomainMMXSSE
 22938          p.add(0, func(m *_Encoding, v []interface{}) {
 22939              m.emit(0x66)
 22940              m.rexo(hcode(v[1]), v[0], false)
 22941              m.emit(0x0f)
 22942              m.emit(0x38)
 22943              m.emit(0x30)
 22944              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22945          })
 22946      }
 22947      // PMOVZXBW m64, xmm
 22948      if isM64(v0) && isXMM(v1) {
 22949          self.require(ISA_SSE4_1)
 22950          p.domain = DomainMMXSSE
 22951          p.add(0, func(m *_Encoding, v []interface{}) {
 22952              m.emit(0x66)
 22953              m.rexo(hcode(v[1]), addr(v[0]), false)
 22954              m.emit(0x0f)
 22955              m.emit(0x38)
 22956              m.emit(0x30)
 22957              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 22958          })
 22959      }
 22960      if p.len == 0 {
 22961          panic("invalid operands for PMOVZXBW")
 22962      }
 22963      return p
 22964  }
 22965  
 22966  // PMOVZXDQ performs "Move Packed Doubleword Integers to Quadword Integers with Zero Extension".
 22967  //
 22968  // Mnemonic        : PMOVZXDQ
 22969  // Supported forms : (2 forms)
 22970  //
 22971  //    * PMOVZXDQ xmm, xmm    [SSE4.1]
 22972  //    * PMOVZXDQ m64, xmm    [SSE4.1]
 22973  //
 22974  func (self *Program) PMOVZXDQ(v0 interface{}, v1 interface{}) *Instruction {
 22975      p := self.alloc("PMOVZXDQ", 2, Operands { v0, v1 })
 22976      // PMOVZXDQ xmm, xmm
 22977      if isXMM(v0) && isXMM(v1) {
 22978          self.require(ISA_SSE4_1)
 22979          p.domain = DomainMMXSSE
 22980          p.add(0, func(m *_Encoding, v []interface{}) {
 22981              m.emit(0x66)
 22982              m.rexo(hcode(v[1]), v[0], false)
 22983              m.emit(0x0f)
 22984              m.emit(0x38)
 22985              m.emit(0x35)
 22986              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 22987          })
 22988      }
 22989      // PMOVZXDQ m64, xmm
 22990      if isM64(v0) && isXMM(v1) {
 22991          self.require(ISA_SSE4_1)
 22992          p.domain = DomainMMXSSE
 22993          p.add(0, func(m *_Encoding, v []interface{}) {
 22994              m.emit(0x66)
 22995              m.rexo(hcode(v[1]), addr(v[0]), false)
 22996              m.emit(0x0f)
 22997              m.emit(0x38)
 22998              m.emit(0x35)
 22999              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23000          })
 23001      }
 23002      if p.len == 0 {
 23003          panic("invalid operands for PMOVZXDQ")
 23004      }
 23005      return p
 23006  }
 23007  
 23008  // PMOVZXWD performs "Move Packed Word Integers to Doubleword Integers with Zero Extension".
 23009  //
 23010  // Mnemonic        : PMOVZXWD
 23011  // Supported forms : (2 forms)
 23012  //
 23013  //    * PMOVZXWD xmm, xmm    [SSE4.1]
 23014  //    * PMOVZXWD m64, xmm    [SSE4.1]
 23015  //
 23016  func (self *Program) PMOVZXWD(v0 interface{}, v1 interface{}) *Instruction {
 23017      p := self.alloc("PMOVZXWD", 2, Operands { v0, v1 })
 23018      // PMOVZXWD xmm, xmm
 23019      if isXMM(v0) && isXMM(v1) {
 23020          self.require(ISA_SSE4_1)
 23021          p.domain = DomainMMXSSE
 23022          p.add(0, func(m *_Encoding, v []interface{}) {
 23023              m.emit(0x66)
 23024              m.rexo(hcode(v[1]), v[0], false)
 23025              m.emit(0x0f)
 23026              m.emit(0x38)
 23027              m.emit(0x33)
 23028              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23029          })
 23030      }
 23031      // PMOVZXWD m64, xmm
 23032      if isM64(v0) && isXMM(v1) {
 23033          self.require(ISA_SSE4_1)
 23034          p.domain = DomainMMXSSE
 23035          p.add(0, func(m *_Encoding, v []interface{}) {
 23036              m.emit(0x66)
 23037              m.rexo(hcode(v[1]), addr(v[0]), false)
 23038              m.emit(0x0f)
 23039              m.emit(0x38)
 23040              m.emit(0x33)
 23041              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23042          })
 23043      }
 23044      if p.len == 0 {
 23045          panic("invalid operands for PMOVZXWD")
 23046      }
 23047      return p
 23048  }
 23049  
 23050  // PMOVZXWQ performs "Move Packed Word Integers to Quadword Integers with Zero Extension".
 23051  //
 23052  // Mnemonic        : PMOVZXWQ
 23053  // Supported forms : (2 forms)
 23054  //
 23055  //    * PMOVZXWQ xmm, xmm    [SSE4.1]
 23056  //    * PMOVZXWQ m32, xmm    [SSE4.1]
 23057  //
 23058  func (self *Program) PMOVZXWQ(v0 interface{}, v1 interface{}) *Instruction {
 23059      p := self.alloc("PMOVZXWQ", 2, Operands { v0, v1 })
 23060      // PMOVZXWQ xmm, xmm
 23061      if isXMM(v0) && isXMM(v1) {
 23062          self.require(ISA_SSE4_1)
 23063          p.domain = DomainMMXSSE
 23064          p.add(0, func(m *_Encoding, v []interface{}) {
 23065              m.emit(0x66)
 23066              m.rexo(hcode(v[1]), v[0], false)
 23067              m.emit(0x0f)
 23068              m.emit(0x38)
 23069              m.emit(0x34)
 23070              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23071          })
 23072      }
 23073      // PMOVZXWQ m32, xmm
 23074      if isM32(v0) && isXMM(v1) {
 23075          self.require(ISA_SSE4_1)
 23076          p.domain = DomainMMXSSE
 23077          p.add(0, func(m *_Encoding, v []interface{}) {
 23078              m.emit(0x66)
 23079              m.rexo(hcode(v[1]), addr(v[0]), false)
 23080              m.emit(0x0f)
 23081              m.emit(0x38)
 23082              m.emit(0x34)
 23083              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23084          })
 23085      }
 23086      if p.len == 0 {
 23087          panic("invalid operands for PMOVZXWQ")
 23088      }
 23089      return p
 23090  }
 23091  
 23092  // PMULDQ performs "Multiply Packed Signed Doubleword Integers and Store Quadword Result".
 23093  //
 23094  // Mnemonic        : PMULDQ
 23095  // Supported forms : (2 forms)
 23096  //
 23097  //    * PMULDQ xmm, xmm     [SSE4.1]
 23098  //    * PMULDQ m128, xmm    [SSE4.1]
 23099  //
 23100  func (self *Program) PMULDQ(v0 interface{}, v1 interface{}) *Instruction {
 23101      p := self.alloc("PMULDQ", 2, Operands { v0, v1 })
 23102      // PMULDQ xmm, xmm
 23103      if isXMM(v0) && isXMM(v1) {
 23104          self.require(ISA_SSE4_1)
 23105          p.domain = DomainMMXSSE
 23106          p.add(0, func(m *_Encoding, v []interface{}) {
 23107              m.emit(0x66)
 23108              m.rexo(hcode(v[1]), v[0], false)
 23109              m.emit(0x0f)
 23110              m.emit(0x38)
 23111              m.emit(0x28)
 23112              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23113          })
 23114      }
 23115      // PMULDQ m128, xmm
 23116      if isM128(v0) && isXMM(v1) {
 23117          self.require(ISA_SSE4_1)
 23118          p.domain = DomainMMXSSE
 23119          p.add(0, func(m *_Encoding, v []interface{}) {
 23120              m.emit(0x66)
 23121              m.rexo(hcode(v[1]), addr(v[0]), false)
 23122              m.emit(0x0f)
 23123              m.emit(0x38)
 23124              m.emit(0x28)
 23125              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23126          })
 23127      }
 23128      if p.len == 0 {
 23129          panic("invalid operands for PMULDQ")
 23130      }
 23131      return p
 23132  }
 23133  
 23134  // PMULHRSW performs "Packed Multiply Signed Word Integers and Store High Result with Round and Scale".
 23135  //
 23136  // Mnemonic        : PMULHRSW
 23137  // Supported forms : (4 forms)
 23138  //
 23139  //    * PMULHRSW mm, mm       [SSSE3]
 23140  //    * PMULHRSW m64, mm      [SSSE3]
 23141  //    * PMULHRSW xmm, xmm     [SSSE3]
 23142  //    * PMULHRSW m128, xmm    [SSSE3]
 23143  //
 23144  func (self *Program) PMULHRSW(v0 interface{}, v1 interface{}) *Instruction {
 23145      p := self.alloc("PMULHRSW", 2, Operands { v0, v1 })
 23146      // PMULHRSW mm, mm
 23147      if isMM(v0) && isMM(v1) {
 23148          self.require(ISA_SSSE3)
 23149          p.domain = DomainMMXSSE
 23150          p.add(0, func(m *_Encoding, v []interface{}) {
 23151              m.rexo(hcode(v[1]), v[0], false)
 23152              m.emit(0x0f)
 23153              m.emit(0x38)
 23154              m.emit(0x0b)
 23155              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23156          })
 23157      }
 23158      // PMULHRSW m64, mm
 23159      if isM64(v0) && isMM(v1) {
 23160          self.require(ISA_SSSE3)
 23161          p.domain = DomainMMXSSE
 23162          p.add(0, func(m *_Encoding, v []interface{}) {
 23163              m.rexo(hcode(v[1]), addr(v[0]), false)
 23164              m.emit(0x0f)
 23165              m.emit(0x38)
 23166              m.emit(0x0b)
 23167              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23168          })
 23169      }
 23170      // PMULHRSW xmm, xmm
 23171      if isXMM(v0) && isXMM(v1) {
 23172          self.require(ISA_SSSE3)
 23173          p.domain = DomainMMXSSE
 23174          p.add(0, func(m *_Encoding, v []interface{}) {
 23175              m.emit(0x66)
 23176              m.rexo(hcode(v[1]), v[0], false)
 23177              m.emit(0x0f)
 23178              m.emit(0x38)
 23179              m.emit(0x0b)
 23180              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23181          })
 23182      }
 23183      // PMULHRSW m128, xmm
 23184      if isM128(v0) && isXMM(v1) {
 23185          self.require(ISA_SSSE3)
 23186          p.domain = DomainMMXSSE
 23187          p.add(0, func(m *_Encoding, v []interface{}) {
 23188              m.emit(0x66)
 23189              m.rexo(hcode(v[1]), addr(v[0]), false)
 23190              m.emit(0x0f)
 23191              m.emit(0x38)
 23192              m.emit(0x0b)
 23193              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23194          })
 23195      }
 23196      if p.len == 0 {
 23197          panic("invalid operands for PMULHRSW")
 23198      }
 23199      return p
 23200  }
 23201  
 23202  // PMULHRW performs "Packed Multiply High Rounded Word".
 23203  //
 23204  // Mnemonic        : PMULHRW
 23205  // Supported forms : (2 forms)
 23206  //
 23207  //    * PMULHRW mm, mm     [3dnow!]
 23208  //    * PMULHRW m64, mm    [3dnow!]
 23209  //
 23210  func (self *Program) PMULHRW(v0 interface{}, v1 interface{}) *Instruction {
 23211      p := self.alloc("PMULHRW", 2, Operands { v0, v1 })
 23212      // PMULHRW mm, mm
 23213      if isMM(v0) && isMM(v1) {
 23214          self.require(ISA_3DNOW)
 23215          p.domain = DomainAMDSpecific
 23216          p.add(0, func(m *_Encoding, v []interface{}) {
 23217              m.rexo(hcode(v[1]), v[0], false)
 23218              m.emit(0x0f)
 23219              m.emit(0x0f)
 23220              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23221              m.emit(0xb7)
 23222          })
 23223      }
 23224      // PMULHRW m64, mm
 23225      if isM64(v0) && isMM(v1) {
 23226          self.require(ISA_3DNOW)
 23227          p.domain = DomainAMDSpecific
 23228          p.add(0, func(m *_Encoding, v []interface{}) {
 23229              m.rexo(hcode(v[1]), addr(v[0]), false)
 23230              m.emit(0x0f)
 23231              m.emit(0x0f)
 23232              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23233              m.emit(0xb7)
 23234          })
 23235      }
 23236      if p.len == 0 {
 23237          panic("invalid operands for PMULHRW")
 23238      }
 23239      return p
 23240  }
 23241  
 23242  // PMULHUW performs "Multiply Packed Unsigned Word Integers and Store High Result".
 23243  //
 23244  // Mnemonic        : PMULHUW
 23245  // Supported forms : (4 forms)
 23246  //
 23247  //    * PMULHUW mm, mm       [MMX+]
 23248  //    * PMULHUW m64, mm      [MMX+]
 23249  //    * PMULHUW xmm, xmm     [SSE2]
 23250  //    * PMULHUW m128, xmm    [SSE2]
 23251  //
 23252  func (self *Program) PMULHUW(v0 interface{}, v1 interface{}) *Instruction {
 23253      p := self.alloc("PMULHUW", 2, Operands { v0, v1 })
 23254      // PMULHUW mm, mm
 23255      if isMM(v0) && isMM(v1) {
 23256          self.require(ISA_MMX_PLUS)
 23257          p.domain = DomainMMXSSE
 23258          p.add(0, func(m *_Encoding, v []interface{}) {
 23259              m.rexo(hcode(v[1]), v[0], false)
 23260              m.emit(0x0f)
 23261              m.emit(0xe4)
 23262              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23263          })
 23264      }
 23265      // PMULHUW m64, mm
 23266      if isM64(v0) && isMM(v1) {
 23267          self.require(ISA_MMX_PLUS)
 23268          p.domain = DomainMMXSSE
 23269          p.add(0, func(m *_Encoding, v []interface{}) {
 23270              m.rexo(hcode(v[1]), addr(v[0]), false)
 23271              m.emit(0x0f)
 23272              m.emit(0xe4)
 23273              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23274          })
 23275      }
 23276      // PMULHUW xmm, xmm
 23277      if isXMM(v0) && isXMM(v1) {
 23278          self.require(ISA_SSE2)
 23279          p.domain = DomainMMXSSE
 23280          p.add(0, func(m *_Encoding, v []interface{}) {
 23281              m.emit(0x66)
 23282              m.rexo(hcode(v[1]), v[0], false)
 23283              m.emit(0x0f)
 23284              m.emit(0xe4)
 23285              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23286          })
 23287      }
 23288      // PMULHUW m128, xmm
 23289      if isM128(v0) && isXMM(v1) {
 23290          self.require(ISA_SSE2)
 23291          p.domain = DomainMMXSSE
 23292          p.add(0, func(m *_Encoding, v []interface{}) {
 23293              m.emit(0x66)
 23294              m.rexo(hcode(v[1]), addr(v[0]), false)
 23295              m.emit(0x0f)
 23296              m.emit(0xe4)
 23297              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23298          })
 23299      }
 23300      if p.len == 0 {
 23301          panic("invalid operands for PMULHUW")
 23302      }
 23303      return p
 23304  }
 23305  
 23306  // PMULHW performs "Multiply Packed Signed Word Integers and Store High Result".
 23307  //
 23308  // Mnemonic        : PMULHW
 23309  // Supported forms : (4 forms)
 23310  //
 23311  //    * PMULHW mm, mm       [MMX]
 23312  //    * PMULHW m64, mm      [MMX]
 23313  //    * PMULHW xmm, xmm     [SSE2]
 23314  //    * PMULHW m128, xmm    [SSE2]
 23315  //
 23316  func (self *Program) PMULHW(v0 interface{}, v1 interface{}) *Instruction {
 23317      p := self.alloc("PMULHW", 2, Operands { v0, v1 })
 23318      // PMULHW mm, mm
 23319      if isMM(v0) && isMM(v1) {
 23320          self.require(ISA_MMX)
 23321          p.domain = DomainMMXSSE
 23322          p.add(0, func(m *_Encoding, v []interface{}) {
 23323              m.rexo(hcode(v[1]), v[0], false)
 23324              m.emit(0x0f)
 23325              m.emit(0xe5)
 23326              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23327          })
 23328      }
 23329      // PMULHW m64, mm
 23330      if isM64(v0) && isMM(v1) {
 23331          self.require(ISA_MMX)
 23332          p.domain = DomainMMXSSE
 23333          p.add(0, func(m *_Encoding, v []interface{}) {
 23334              m.rexo(hcode(v[1]), addr(v[0]), false)
 23335              m.emit(0x0f)
 23336              m.emit(0xe5)
 23337              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23338          })
 23339      }
 23340      // PMULHW xmm, xmm
 23341      if isXMM(v0) && isXMM(v1) {
 23342          self.require(ISA_SSE2)
 23343          p.domain = DomainMMXSSE
 23344          p.add(0, func(m *_Encoding, v []interface{}) {
 23345              m.emit(0x66)
 23346              m.rexo(hcode(v[1]), v[0], false)
 23347              m.emit(0x0f)
 23348              m.emit(0xe5)
 23349              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23350          })
 23351      }
 23352      // PMULHW m128, xmm
 23353      if isM128(v0) && isXMM(v1) {
 23354          self.require(ISA_SSE2)
 23355          p.domain = DomainMMXSSE
 23356          p.add(0, func(m *_Encoding, v []interface{}) {
 23357              m.emit(0x66)
 23358              m.rexo(hcode(v[1]), addr(v[0]), false)
 23359              m.emit(0x0f)
 23360              m.emit(0xe5)
 23361              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23362          })
 23363      }
 23364      if p.len == 0 {
 23365          panic("invalid operands for PMULHW")
 23366      }
 23367      return p
 23368  }
 23369  
 23370  // PMULLD performs "Multiply Packed Signed Doubleword Integers and Store Low Result".
 23371  //
 23372  // Mnemonic        : PMULLD
 23373  // Supported forms : (2 forms)
 23374  //
 23375  //    * PMULLD xmm, xmm     [SSE4.1]
 23376  //    * PMULLD m128, xmm    [SSE4.1]
 23377  //
 23378  func (self *Program) PMULLD(v0 interface{}, v1 interface{}) *Instruction {
 23379      p := self.alloc("PMULLD", 2, Operands { v0, v1 })
 23380      // PMULLD xmm, xmm
 23381      if isXMM(v0) && isXMM(v1) {
 23382          self.require(ISA_SSE4_1)
 23383          p.domain = DomainMMXSSE
 23384          p.add(0, func(m *_Encoding, v []interface{}) {
 23385              m.emit(0x66)
 23386              m.rexo(hcode(v[1]), v[0], false)
 23387              m.emit(0x0f)
 23388              m.emit(0x38)
 23389              m.emit(0x40)
 23390              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23391          })
 23392      }
 23393      // PMULLD m128, xmm
 23394      if isM128(v0) && isXMM(v1) {
 23395          self.require(ISA_SSE4_1)
 23396          p.domain = DomainMMXSSE
 23397          p.add(0, func(m *_Encoding, v []interface{}) {
 23398              m.emit(0x66)
 23399              m.rexo(hcode(v[1]), addr(v[0]), false)
 23400              m.emit(0x0f)
 23401              m.emit(0x38)
 23402              m.emit(0x40)
 23403              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23404          })
 23405      }
 23406      if p.len == 0 {
 23407          panic("invalid operands for PMULLD")
 23408      }
 23409      return p
 23410  }
 23411  
 23412  // PMULLW performs "Multiply Packed Signed Word Integers and Store Low Result".
 23413  //
 23414  // Mnemonic        : PMULLW
 23415  // Supported forms : (4 forms)
 23416  //
 23417  //    * PMULLW mm, mm       [MMX]
 23418  //    * PMULLW m64, mm      [MMX]
 23419  //    * PMULLW xmm, xmm     [SSE2]
 23420  //    * PMULLW m128, xmm    [SSE2]
 23421  //
 23422  func (self *Program) PMULLW(v0 interface{}, v1 interface{}) *Instruction {
 23423      p := self.alloc("PMULLW", 2, Operands { v0, v1 })
 23424      // PMULLW mm, mm
 23425      if isMM(v0) && isMM(v1) {
 23426          self.require(ISA_MMX)
 23427          p.domain = DomainMMXSSE
 23428          p.add(0, func(m *_Encoding, v []interface{}) {
 23429              m.rexo(hcode(v[1]), v[0], false)
 23430              m.emit(0x0f)
 23431              m.emit(0xd5)
 23432              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23433          })
 23434      }
 23435      // PMULLW m64, mm
 23436      if isM64(v0) && isMM(v1) {
 23437          self.require(ISA_MMX)
 23438          p.domain = DomainMMXSSE
 23439          p.add(0, func(m *_Encoding, v []interface{}) {
 23440              m.rexo(hcode(v[1]), addr(v[0]), false)
 23441              m.emit(0x0f)
 23442              m.emit(0xd5)
 23443              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23444          })
 23445      }
 23446      // PMULLW xmm, xmm
 23447      if isXMM(v0) && isXMM(v1) {
 23448          self.require(ISA_SSE2)
 23449          p.domain = DomainMMXSSE
 23450          p.add(0, func(m *_Encoding, v []interface{}) {
 23451              m.emit(0x66)
 23452              m.rexo(hcode(v[1]), v[0], false)
 23453              m.emit(0x0f)
 23454              m.emit(0xd5)
 23455              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23456          })
 23457      }
 23458      // PMULLW m128, xmm
 23459      if isM128(v0) && isXMM(v1) {
 23460          self.require(ISA_SSE2)
 23461          p.domain = DomainMMXSSE
 23462          p.add(0, func(m *_Encoding, v []interface{}) {
 23463              m.emit(0x66)
 23464              m.rexo(hcode(v[1]), addr(v[0]), false)
 23465              m.emit(0x0f)
 23466              m.emit(0xd5)
 23467              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23468          })
 23469      }
 23470      if p.len == 0 {
 23471          panic("invalid operands for PMULLW")
 23472      }
 23473      return p
 23474  }
 23475  
 23476  // PMULUDQ performs "Multiply Packed Unsigned Doubleword Integers".
 23477  //
 23478  // Mnemonic        : PMULUDQ
 23479  // Supported forms : (4 forms)
 23480  //
 23481  //    * PMULUDQ mm, mm       [SSE2]
 23482  //    * PMULUDQ m64, mm      [SSE2]
 23483  //    * PMULUDQ xmm, xmm     [SSE2]
 23484  //    * PMULUDQ m128, xmm    [SSE2]
 23485  //
 23486  func (self *Program) PMULUDQ(v0 interface{}, v1 interface{}) *Instruction {
 23487      p := self.alloc("PMULUDQ", 2, Operands { v0, v1 })
 23488      // PMULUDQ mm, mm
 23489      if isMM(v0) && isMM(v1) {
 23490          self.require(ISA_SSE2)
 23491          p.domain = DomainMMXSSE
 23492          p.add(0, func(m *_Encoding, v []interface{}) {
 23493              m.rexo(hcode(v[1]), v[0], false)
 23494              m.emit(0x0f)
 23495              m.emit(0xf4)
 23496              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23497          })
 23498      }
 23499      // PMULUDQ m64, mm
 23500      if isM64(v0) && isMM(v1) {
 23501          self.require(ISA_SSE2)
 23502          p.domain = DomainMMXSSE
 23503          p.add(0, func(m *_Encoding, v []interface{}) {
 23504              m.rexo(hcode(v[1]), addr(v[0]), false)
 23505              m.emit(0x0f)
 23506              m.emit(0xf4)
 23507              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23508          })
 23509      }
 23510      // PMULUDQ xmm, xmm
 23511      if isXMM(v0) && isXMM(v1) {
 23512          self.require(ISA_SSE2)
 23513          p.domain = DomainMMXSSE
 23514          p.add(0, func(m *_Encoding, v []interface{}) {
 23515              m.emit(0x66)
 23516              m.rexo(hcode(v[1]), v[0], false)
 23517              m.emit(0x0f)
 23518              m.emit(0xf4)
 23519              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23520          })
 23521      }
 23522      // PMULUDQ m128, xmm
 23523      if isM128(v0) && isXMM(v1) {
 23524          self.require(ISA_SSE2)
 23525          p.domain = DomainMMXSSE
 23526          p.add(0, func(m *_Encoding, v []interface{}) {
 23527              m.emit(0x66)
 23528              m.rexo(hcode(v[1]), addr(v[0]), false)
 23529              m.emit(0x0f)
 23530              m.emit(0xf4)
 23531              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23532          })
 23533      }
 23534      if p.len == 0 {
 23535          panic("invalid operands for PMULUDQ")
 23536      }
 23537      return p
 23538  }
 23539  
 23540  // POPCNTL performs "Count of Number of Bits Set to 1".
 23541  //
 23542  // Mnemonic        : POPCNT
 23543  // Supported forms : (2 forms)
 23544  //
 23545  //    * POPCNTL r32, r32    [POPCNT]
 23546  //    * POPCNTL m32, r32    [POPCNT]
 23547  //
 23548  func (self *Program) POPCNTL(v0 interface{}, v1 interface{}) *Instruction {
 23549      p := self.alloc("POPCNTL", 2, Operands { v0, v1 })
 23550      // POPCNTL r32, r32
 23551      if isReg32(v0) && isReg32(v1) {
 23552          self.require(ISA_POPCNT)
 23553          p.domain = DomainGeneric
 23554          p.add(0, func(m *_Encoding, v []interface{}) {
 23555              m.emit(0xf3)
 23556              m.rexo(hcode(v[1]), v[0], false)
 23557              m.emit(0x0f)
 23558              m.emit(0xb8)
 23559              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23560          })
 23561      }
 23562      // POPCNTL m32, r32
 23563      if isM32(v0) && isReg32(v1) {
 23564          self.require(ISA_POPCNT)
 23565          p.domain = DomainGeneric
 23566          p.add(0, func(m *_Encoding, v []interface{}) {
 23567              m.emit(0xf3)
 23568              m.rexo(hcode(v[1]), addr(v[0]), false)
 23569              m.emit(0x0f)
 23570              m.emit(0xb8)
 23571              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23572          })
 23573      }
 23574      if p.len == 0 {
 23575          panic("invalid operands for POPCNTL")
 23576      }
 23577      return p
 23578  }
 23579  
 23580  // POPCNTQ performs "Count of Number of Bits Set to 1".
 23581  //
 23582  // Mnemonic        : POPCNT
 23583  // Supported forms : (2 forms)
 23584  //
 23585  //    * POPCNTQ r64, r64    [POPCNT]
 23586  //    * POPCNTQ m64, r64    [POPCNT]
 23587  //
 23588  func (self *Program) POPCNTQ(v0 interface{}, v1 interface{}) *Instruction {
 23589      p := self.alloc("POPCNTQ", 2, Operands { v0, v1 })
 23590      // POPCNTQ r64, r64
 23591      if isReg64(v0) && isReg64(v1) {
 23592          self.require(ISA_POPCNT)
 23593          p.domain = DomainGeneric
 23594          p.add(0, func(m *_Encoding, v []interface{}) {
 23595              m.emit(0xf3)
 23596              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 23597              m.emit(0x0f)
 23598              m.emit(0xb8)
 23599              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23600          })
 23601      }
 23602      // POPCNTQ m64, r64
 23603      if isM64(v0) && isReg64(v1) {
 23604          self.require(ISA_POPCNT)
 23605          p.domain = DomainGeneric
 23606          p.add(0, func(m *_Encoding, v []interface{}) {
 23607              m.emit(0xf3)
 23608              m.rexm(1, hcode(v[1]), addr(v[0]))
 23609              m.emit(0x0f)
 23610              m.emit(0xb8)
 23611              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23612          })
 23613      }
 23614      if p.len == 0 {
 23615          panic("invalid operands for POPCNTQ")
 23616      }
 23617      return p
 23618  }
 23619  
 23620  // POPCNTW performs "Count of Number of Bits Set to 1".
 23621  //
 23622  // Mnemonic        : POPCNT
 23623  // Supported forms : (2 forms)
 23624  //
 23625  //    * POPCNTW r16, r16    [POPCNT]
 23626  //    * POPCNTW m16, r16    [POPCNT]
 23627  //
 23628  func (self *Program) POPCNTW(v0 interface{}, v1 interface{}) *Instruction {
 23629      p := self.alloc("POPCNTW", 2, Operands { v0, v1 })
 23630      // POPCNTW r16, r16
 23631      if isReg16(v0) && isReg16(v1) {
 23632          self.require(ISA_POPCNT)
 23633          p.domain = DomainGeneric
 23634          p.add(0, func(m *_Encoding, v []interface{}) {
 23635              m.emit(0x66)
 23636              m.emit(0xf3)
 23637              m.rexo(hcode(v[1]), v[0], false)
 23638              m.emit(0x0f)
 23639              m.emit(0xb8)
 23640              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23641          })
 23642      }
 23643      // POPCNTW m16, r16
 23644      if isM16(v0) && isReg16(v1) {
 23645          self.require(ISA_POPCNT)
 23646          p.domain = DomainGeneric
 23647          p.add(0, func(m *_Encoding, v []interface{}) {
 23648              m.emit(0x66)
 23649              m.emit(0xf3)
 23650              m.rexo(hcode(v[1]), addr(v[0]), false)
 23651              m.emit(0x0f)
 23652              m.emit(0xb8)
 23653              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23654          })
 23655      }
 23656      if p.len == 0 {
 23657          panic("invalid operands for POPCNTW")
 23658      }
 23659      return p
 23660  }
 23661  
 23662  // POPQ performs "Pop a Value from the Stack".
 23663  //
 23664  // Mnemonic        : POP
 23665  // Supported forms : (2 forms)
 23666  //
 23667  //    * POPQ r64
 23668  //    * POPQ m64
 23669  //
 23670  func (self *Program) POPQ(v0 interface{}) *Instruction {
 23671      p := self.alloc("POPQ", 1, Operands { v0 })
 23672      // POPQ r64
 23673      if isReg64(v0) {
 23674          p.domain = DomainGeneric
 23675          p.add(0, func(m *_Encoding, v []interface{}) {
 23676              m.rexo(0, v[0], false)
 23677              m.emit(0x58 | lcode(v[0]))
 23678          })
 23679          p.add(0, func(m *_Encoding, v []interface{}) {
 23680              m.rexo(0, v[0], false)
 23681              m.emit(0x8f)
 23682              m.emit(0xc0 | lcode(v[0]))
 23683          })
 23684      }
 23685      // POPQ m64
 23686      if isM64(v0) {
 23687          p.domain = DomainGeneric
 23688          p.add(0, func(m *_Encoding, v []interface{}) {
 23689              m.rexo(0, addr(v[0]), false)
 23690              m.emit(0x8f)
 23691              m.mrsd(0, addr(v[0]), 1)
 23692          })
 23693      }
 23694      if p.len == 0 {
 23695          panic("invalid operands for POPQ")
 23696      }
 23697      return p
 23698  }
 23699  
 23700  // POPW performs "Pop a Value from the Stack".
 23701  //
 23702  // Mnemonic        : POP
 23703  // Supported forms : (2 forms)
 23704  //
 23705  //    * POPW r16
 23706  //    * POPW m16
 23707  //
 23708  func (self *Program) POPW(v0 interface{}) *Instruction {
 23709      p := self.alloc("POPW", 1, Operands { v0 })
 23710      // POPW r16
 23711      if isReg16(v0) {
 23712          p.domain = DomainGeneric
 23713          p.add(0, func(m *_Encoding, v []interface{}) {
 23714              m.emit(0x66)
 23715              m.rexo(0, v[0], false)
 23716              m.emit(0x58 | lcode(v[0]))
 23717          })
 23718          p.add(0, func(m *_Encoding, v []interface{}) {
 23719              m.emit(0x66)
 23720              m.rexo(0, v[0], false)
 23721              m.emit(0x8f)
 23722              m.emit(0xc0 | lcode(v[0]))
 23723          })
 23724      }
 23725      // POPW m16
 23726      if isM16(v0) {
 23727          p.domain = DomainGeneric
 23728          p.add(0, func(m *_Encoding, v []interface{}) {
 23729              m.emit(0x66)
 23730              m.rexo(0, addr(v[0]), false)
 23731              m.emit(0x8f)
 23732              m.mrsd(0, addr(v[0]), 1)
 23733          })
 23734      }
 23735      if p.len == 0 {
 23736          panic("invalid operands for POPW")
 23737      }
 23738      return p
 23739  }
 23740  
 23741  // POR performs "Packed Bitwise Logical OR".
 23742  //
 23743  // Mnemonic        : POR
 23744  // Supported forms : (4 forms)
 23745  //
 23746  //    * POR mm, mm       [MMX]
 23747  //    * POR m64, mm      [MMX]
 23748  //    * POR xmm, xmm     [SSE2]
 23749  //    * POR m128, xmm    [SSE2]
 23750  //
 23751  func (self *Program) POR(v0 interface{}, v1 interface{}) *Instruction {
 23752      p := self.alloc("POR", 2, Operands { v0, v1 })
 23753      // POR mm, mm
 23754      if isMM(v0) && isMM(v1) {
 23755          self.require(ISA_MMX)
 23756          p.domain = DomainMMXSSE
 23757          p.add(0, func(m *_Encoding, v []interface{}) {
 23758              m.rexo(hcode(v[1]), v[0], false)
 23759              m.emit(0x0f)
 23760              m.emit(0xeb)
 23761              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23762          })
 23763      }
 23764      // POR m64, mm
 23765      if isM64(v0) && isMM(v1) {
 23766          self.require(ISA_MMX)
 23767          p.domain = DomainMMXSSE
 23768          p.add(0, func(m *_Encoding, v []interface{}) {
 23769              m.rexo(hcode(v[1]), addr(v[0]), false)
 23770              m.emit(0x0f)
 23771              m.emit(0xeb)
 23772              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23773          })
 23774      }
 23775      // POR xmm, xmm
 23776      if isXMM(v0) && isXMM(v1) {
 23777          self.require(ISA_SSE2)
 23778          p.domain = DomainMMXSSE
 23779          p.add(0, func(m *_Encoding, v []interface{}) {
 23780              m.emit(0x66)
 23781              m.rexo(hcode(v[1]), v[0], false)
 23782              m.emit(0x0f)
 23783              m.emit(0xeb)
 23784              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 23785          })
 23786      }
 23787      // POR m128, xmm
 23788      if isM128(v0) && isXMM(v1) {
 23789          self.require(ISA_SSE2)
 23790          p.domain = DomainMMXSSE
 23791          p.add(0, func(m *_Encoding, v []interface{}) {
 23792              m.emit(0x66)
 23793              m.rexo(hcode(v[1]), addr(v[0]), false)
 23794              m.emit(0x0f)
 23795              m.emit(0xeb)
 23796              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 23797          })
 23798      }
 23799      if p.len == 0 {
 23800          panic("invalid operands for POR")
 23801      }
 23802      return p
 23803  }
 23804  
 23805  // PREFETCH performs "Prefetch Data into Caches".
 23806  //
 23807  // Mnemonic        : PREFETCH
 23808  // Supported forms : (1 form)
 23809  //
 23810  //    * PREFETCH m8    [PREFETCH]
 23811  //
 23812  func (self *Program) PREFETCH(v0 interface{}) *Instruction {
 23813      p := self.alloc("PREFETCH", 1, Operands { v0 })
 23814      // PREFETCH m8
 23815      if isM8(v0) {
 23816          self.require(ISA_PREFETCH)
 23817          p.domain = DomainGeneric
 23818          p.add(0, func(m *_Encoding, v []interface{}) {
 23819              m.rexo(0, addr(v[0]), false)
 23820              m.emit(0x0f)
 23821              m.emit(0x0d)
 23822              m.mrsd(0, addr(v[0]), 1)
 23823          })
 23824      }
 23825      if p.len == 0 {
 23826          panic("invalid operands for PREFETCH")
 23827      }
 23828      return p
 23829  }
 23830  
 23831  // PREFETCHNTA performs "Prefetch Data Into Caches using NTA Hint".
 23832  //
 23833  // Mnemonic        : PREFETCHNTA
 23834  // Supported forms : (1 form)
 23835  //
 23836  //    * PREFETCHNTA m8    [MMX+]
 23837  //
 23838  func (self *Program) PREFETCHNTA(v0 interface{}) *Instruction {
 23839      p := self.alloc("PREFETCHNTA", 1, Operands { v0 })
 23840      // PREFETCHNTA m8
 23841      if isM8(v0) {
 23842          self.require(ISA_MMX_PLUS)
 23843          p.domain = DomainGeneric
 23844          p.add(0, func(m *_Encoding, v []interface{}) {
 23845              m.rexo(0, addr(v[0]), false)
 23846              m.emit(0x0f)
 23847              m.emit(0x18)
 23848              m.mrsd(0, addr(v[0]), 1)
 23849          })
 23850      }
 23851      if p.len == 0 {
 23852          panic("invalid operands for PREFETCHNTA")
 23853      }
 23854      return p
 23855  }
 23856  
 23857  // PREFETCHT0 performs "Prefetch Data Into Caches using T0 Hint".
 23858  //
 23859  // Mnemonic        : PREFETCHT0
 23860  // Supported forms : (1 form)
 23861  //
 23862  //    * PREFETCHT0 m8    [MMX+]
 23863  //
 23864  func (self *Program) PREFETCHT0(v0 interface{}) *Instruction {
 23865      p := self.alloc("PREFETCHT0", 1, Operands { v0 })
 23866      // PREFETCHT0 m8
 23867      if isM8(v0) {
 23868          self.require(ISA_MMX_PLUS)
 23869          p.domain = DomainGeneric
 23870          p.add(0, func(m *_Encoding, v []interface{}) {
 23871              m.rexo(0, addr(v[0]), false)
 23872              m.emit(0x0f)
 23873              m.emit(0x18)
 23874              m.mrsd(1, addr(v[0]), 1)
 23875          })
 23876      }
 23877      if p.len == 0 {
 23878          panic("invalid operands for PREFETCHT0")
 23879      }
 23880      return p
 23881  }
 23882  
 23883  // PREFETCHT1 performs "Prefetch Data Into Caches using T1 Hint".
 23884  //
 23885  // Mnemonic        : PREFETCHT1
 23886  // Supported forms : (1 form)
 23887  //
 23888  //    * PREFETCHT1 m8    [MMX+]
 23889  //
 23890  func (self *Program) PREFETCHT1(v0 interface{}) *Instruction {
 23891      p := self.alloc("PREFETCHT1", 1, Operands { v0 })
 23892      // PREFETCHT1 m8
 23893      if isM8(v0) {
 23894          self.require(ISA_MMX_PLUS)
 23895          p.domain = DomainGeneric
 23896          p.add(0, func(m *_Encoding, v []interface{}) {
 23897              m.rexo(0, addr(v[0]), false)
 23898              m.emit(0x0f)
 23899              m.emit(0x18)
 23900              m.mrsd(2, addr(v[0]), 1)
 23901          })
 23902      }
 23903      if p.len == 0 {
 23904          panic("invalid operands for PREFETCHT1")
 23905      }
 23906      return p
 23907  }
 23908  
 23909  // PREFETCHT2 performs "Prefetch Data Into Caches using T2 Hint".
 23910  //
 23911  // Mnemonic        : PREFETCHT2
 23912  // Supported forms : (1 form)
 23913  //
 23914  //    * PREFETCHT2 m8    [MMX+]
 23915  //
 23916  func (self *Program) PREFETCHT2(v0 interface{}) *Instruction {
 23917      p := self.alloc("PREFETCHT2", 1, Operands { v0 })
 23918      // PREFETCHT2 m8
 23919      if isM8(v0) {
 23920          self.require(ISA_MMX_PLUS)
 23921          p.domain = DomainGeneric
 23922          p.add(0, func(m *_Encoding, v []interface{}) {
 23923              m.rexo(0, addr(v[0]), false)
 23924              m.emit(0x0f)
 23925              m.emit(0x18)
 23926              m.mrsd(3, addr(v[0]), 1)
 23927          })
 23928      }
 23929      if p.len == 0 {
 23930          panic("invalid operands for PREFETCHT2")
 23931      }
 23932      return p
 23933  }
 23934  
 23935  // PREFETCHW performs "Prefetch Data into Caches in Anticipation of a Write".
 23936  //
 23937  // Mnemonic        : PREFETCHW
 23938  // Supported forms : (1 form)
 23939  //
 23940  //    * PREFETCHW m8    [PREFETCHW]
 23941  //
 23942  func (self *Program) PREFETCHW(v0 interface{}) *Instruction {
 23943      p := self.alloc("PREFETCHW", 1, Operands { v0 })
 23944      // PREFETCHW m8
 23945      if isM8(v0) {
 23946          self.require(ISA_PREFETCHW)
 23947          p.domain = DomainGeneric
 23948          p.add(0, func(m *_Encoding, v []interface{}) {
 23949              m.rexo(0, addr(v[0]), false)
 23950              m.emit(0x0f)
 23951              m.emit(0x0d)
 23952              m.mrsd(1, addr(v[0]), 1)
 23953          })
 23954      }
 23955      if p.len == 0 {
 23956          panic("invalid operands for PREFETCHW")
 23957      }
 23958      return p
 23959  }
 23960  
 23961  // PREFETCHWT1 performs "Prefetch Vector Data Into Caches with Intent to Write and T1 Hint".
 23962  //
 23963  // Mnemonic        : PREFETCHWT1
 23964  // Supported forms : (1 form)
 23965  //
 23966  //    * PREFETCHWT1 m8    [PREFETCHWT1]
 23967  //
 23968  func (self *Program) PREFETCHWT1(v0 interface{}) *Instruction {
 23969      p := self.alloc("PREFETCHWT1", 1, Operands { v0 })
 23970      // PREFETCHWT1 m8
 23971      if isM8(v0) {
 23972          self.require(ISA_PREFETCHWT1)
 23973          p.domain = DomainGeneric
 23974          p.add(0, func(m *_Encoding, v []interface{}) {
 23975              m.rexo(0, addr(v[0]), false)
 23976              m.emit(0x0f)
 23977              m.emit(0x0d)
 23978              m.mrsd(2, addr(v[0]), 1)
 23979          })
 23980      }
 23981      if p.len == 0 {
 23982          panic("invalid operands for PREFETCHWT1")
 23983      }
 23984      return p
 23985  }
 23986  
 23987  // PSADBW performs "Compute Sum of Absolute Differences".
 23988  //
 23989  // Mnemonic        : PSADBW
 23990  // Supported forms : (4 forms)
 23991  //
 23992  //    * PSADBW mm, mm       [MMX+]
 23993  //    * PSADBW m64, mm      [MMX+]
 23994  //    * PSADBW xmm, xmm     [SSE2]
 23995  //    * PSADBW m128, xmm    [SSE2]
 23996  //
 23997  func (self *Program) PSADBW(v0 interface{}, v1 interface{}) *Instruction {
 23998      p := self.alloc("PSADBW", 2, Operands { v0, v1 })
 23999      // PSADBW mm, mm
 24000      if isMM(v0) && isMM(v1) {
 24001          self.require(ISA_MMX_PLUS)
 24002          p.domain = DomainMMXSSE
 24003          p.add(0, func(m *_Encoding, v []interface{}) {
 24004              m.rexo(hcode(v[1]), v[0], false)
 24005              m.emit(0x0f)
 24006              m.emit(0xf6)
 24007              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24008          })
 24009      }
 24010      // PSADBW m64, mm
 24011      if isM64(v0) && isMM(v1) {
 24012          self.require(ISA_MMX_PLUS)
 24013          p.domain = DomainMMXSSE
 24014          p.add(0, func(m *_Encoding, v []interface{}) {
 24015              m.rexo(hcode(v[1]), addr(v[0]), false)
 24016              m.emit(0x0f)
 24017              m.emit(0xf6)
 24018              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24019          })
 24020      }
 24021      // PSADBW xmm, xmm
 24022      if isXMM(v0) && isXMM(v1) {
 24023          self.require(ISA_SSE2)
 24024          p.domain = DomainMMXSSE
 24025          p.add(0, func(m *_Encoding, v []interface{}) {
 24026              m.emit(0x66)
 24027              m.rexo(hcode(v[1]), v[0], false)
 24028              m.emit(0x0f)
 24029              m.emit(0xf6)
 24030              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24031          })
 24032      }
 24033      // PSADBW m128, xmm
 24034      if isM128(v0) && isXMM(v1) {
 24035          self.require(ISA_SSE2)
 24036          p.domain = DomainMMXSSE
 24037          p.add(0, func(m *_Encoding, v []interface{}) {
 24038              m.emit(0x66)
 24039              m.rexo(hcode(v[1]), addr(v[0]), false)
 24040              m.emit(0x0f)
 24041              m.emit(0xf6)
 24042              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24043          })
 24044      }
 24045      if p.len == 0 {
 24046          panic("invalid operands for PSADBW")
 24047      }
 24048      return p
 24049  }
 24050  
 24051  // PSHUFB performs "Packed Shuffle Bytes".
 24052  //
 24053  // Mnemonic        : PSHUFB
 24054  // Supported forms : (4 forms)
 24055  //
 24056  //    * PSHUFB mm, mm       [SSSE3]
 24057  //    * PSHUFB m64, mm      [SSSE3]
 24058  //    * PSHUFB xmm, xmm     [SSSE3]
 24059  //    * PSHUFB m128, xmm    [SSSE3]
 24060  //
 24061  func (self *Program) PSHUFB(v0 interface{}, v1 interface{}) *Instruction {
 24062      p := self.alloc("PSHUFB", 2, Operands { v0, v1 })
 24063      // PSHUFB mm, mm
 24064      if isMM(v0) && isMM(v1) {
 24065          self.require(ISA_SSSE3)
 24066          p.domain = DomainMMXSSE
 24067          p.add(0, func(m *_Encoding, v []interface{}) {
 24068              m.rexo(hcode(v[1]), v[0], false)
 24069              m.emit(0x0f)
 24070              m.emit(0x38)
 24071              m.emit(0x00)
 24072              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24073          })
 24074      }
 24075      // PSHUFB m64, mm
 24076      if isM64(v0) && isMM(v1) {
 24077          self.require(ISA_SSSE3)
 24078          p.domain = DomainMMXSSE
 24079          p.add(0, func(m *_Encoding, v []interface{}) {
 24080              m.rexo(hcode(v[1]), addr(v[0]), false)
 24081              m.emit(0x0f)
 24082              m.emit(0x38)
 24083              m.emit(0x00)
 24084              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24085          })
 24086      }
 24087      // PSHUFB xmm, xmm
 24088      if isXMM(v0) && isXMM(v1) {
 24089          self.require(ISA_SSSE3)
 24090          p.domain = DomainMMXSSE
 24091          p.add(0, func(m *_Encoding, v []interface{}) {
 24092              m.emit(0x66)
 24093              m.rexo(hcode(v[1]), v[0], false)
 24094              m.emit(0x0f)
 24095              m.emit(0x38)
 24096              m.emit(0x00)
 24097              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24098          })
 24099      }
 24100      // PSHUFB m128, xmm
 24101      if isM128(v0) && isXMM(v1) {
 24102          self.require(ISA_SSSE3)
 24103          p.domain = DomainMMXSSE
 24104          p.add(0, func(m *_Encoding, v []interface{}) {
 24105              m.emit(0x66)
 24106              m.rexo(hcode(v[1]), addr(v[0]), false)
 24107              m.emit(0x0f)
 24108              m.emit(0x38)
 24109              m.emit(0x00)
 24110              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24111          })
 24112      }
 24113      if p.len == 0 {
 24114          panic("invalid operands for PSHUFB")
 24115      }
 24116      return p
 24117  }
 24118  
 24119  // PSHUFD performs "Shuffle Packed Doublewords".
 24120  //
 24121  // Mnemonic        : PSHUFD
 24122  // Supported forms : (2 forms)
 24123  //
 24124  //    * PSHUFD imm8, xmm, xmm     [SSE2]
 24125  //    * PSHUFD imm8, m128, xmm    [SSE2]
 24126  //
 24127  func (self *Program) PSHUFD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 24128      p := self.alloc("PSHUFD", 3, Operands { v0, v1, v2 })
 24129      // PSHUFD imm8, xmm, xmm
 24130      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 24131          self.require(ISA_SSE2)
 24132          p.domain = DomainMMXSSE
 24133          p.add(0, func(m *_Encoding, v []interface{}) {
 24134              m.emit(0x66)
 24135              m.rexo(hcode(v[2]), v[1], false)
 24136              m.emit(0x0f)
 24137              m.emit(0x70)
 24138              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 24139              m.imm1(toImmAny(v[0]))
 24140          })
 24141      }
 24142      // PSHUFD imm8, m128, xmm
 24143      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 24144          self.require(ISA_SSE2)
 24145          p.domain = DomainMMXSSE
 24146          p.add(0, func(m *_Encoding, v []interface{}) {
 24147              m.emit(0x66)
 24148              m.rexo(hcode(v[2]), addr(v[1]), false)
 24149              m.emit(0x0f)
 24150              m.emit(0x70)
 24151              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 24152              m.imm1(toImmAny(v[0]))
 24153          })
 24154      }
 24155      if p.len == 0 {
 24156          panic("invalid operands for PSHUFD")
 24157      }
 24158      return p
 24159  }
 24160  
 24161  // PSHUFHW performs "Shuffle Packed High Words".
 24162  //
 24163  // Mnemonic        : PSHUFHW
 24164  // Supported forms : (2 forms)
 24165  //
 24166  //    * PSHUFHW imm8, xmm, xmm     [SSE2]
 24167  //    * PSHUFHW imm8, m128, xmm    [SSE2]
 24168  //
 24169  func (self *Program) PSHUFHW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 24170      p := self.alloc("PSHUFHW", 3, Operands { v0, v1, v2 })
 24171      // PSHUFHW imm8, xmm, xmm
 24172      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 24173          self.require(ISA_SSE2)
 24174          p.domain = DomainMMXSSE
 24175          p.add(0, func(m *_Encoding, v []interface{}) {
 24176              m.emit(0xf3)
 24177              m.rexo(hcode(v[2]), v[1], false)
 24178              m.emit(0x0f)
 24179              m.emit(0x70)
 24180              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 24181              m.imm1(toImmAny(v[0]))
 24182          })
 24183      }
 24184      // PSHUFHW imm8, m128, xmm
 24185      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 24186          self.require(ISA_SSE2)
 24187          p.domain = DomainMMXSSE
 24188          p.add(0, func(m *_Encoding, v []interface{}) {
 24189              m.emit(0xf3)
 24190              m.rexo(hcode(v[2]), addr(v[1]), false)
 24191              m.emit(0x0f)
 24192              m.emit(0x70)
 24193              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 24194              m.imm1(toImmAny(v[0]))
 24195          })
 24196      }
 24197      if p.len == 0 {
 24198          panic("invalid operands for PSHUFHW")
 24199      }
 24200      return p
 24201  }
 24202  
 24203  // PSHUFLW performs "Shuffle Packed Low Words".
 24204  //
 24205  // Mnemonic        : PSHUFLW
 24206  // Supported forms : (2 forms)
 24207  //
 24208  //    * PSHUFLW imm8, xmm, xmm     [SSE2]
 24209  //    * PSHUFLW imm8, m128, xmm    [SSE2]
 24210  //
 24211  func (self *Program) PSHUFLW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 24212      p := self.alloc("PSHUFLW", 3, Operands { v0, v1, v2 })
 24213      // PSHUFLW imm8, xmm, xmm
 24214      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 24215          self.require(ISA_SSE2)
 24216          p.domain = DomainMMXSSE
 24217          p.add(0, func(m *_Encoding, v []interface{}) {
 24218              m.emit(0xf2)
 24219              m.rexo(hcode(v[2]), v[1], false)
 24220              m.emit(0x0f)
 24221              m.emit(0x70)
 24222              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 24223              m.imm1(toImmAny(v[0]))
 24224          })
 24225      }
 24226      // PSHUFLW imm8, m128, xmm
 24227      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 24228          self.require(ISA_SSE2)
 24229          p.domain = DomainMMXSSE
 24230          p.add(0, func(m *_Encoding, v []interface{}) {
 24231              m.emit(0xf2)
 24232              m.rexo(hcode(v[2]), addr(v[1]), false)
 24233              m.emit(0x0f)
 24234              m.emit(0x70)
 24235              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 24236              m.imm1(toImmAny(v[0]))
 24237          })
 24238      }
 24239      if p.len == 0 {
 24240          panic("invalid operands for PSHUFLW")
 24241      }
 24242      return p
 24243  }
 24244  
 24245  // PSHUFW performs "Shuffle Packed Words".
 24246  //
 24247  // Mnemonic        : PSHUFW
 24248  // Supported forms : (2 forms)
 24249  //
 24250  //    * PSHUFW imm8, mm, mm     [MMX+]
 24251  //    * PSHUFW imm8, m64, mm    [MMX+]
 24252  //
 24253  func (self *Program) PSHUFW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 24254      p := self.alloc("PSHUFW", 3, Operands { v0, v1, v2 })
 24255      // PSHUFW imm8, mm, mm
 24256      if isImm8(v0) && isMM(v1) && isMM(v2) {
 24257          self.require(ISA_MMX_PLUS)
 24258          p.domain = DomainMMXSSE
 24259          p.add(0, func(m *_Encoding, v []interface{}) {
 24260              m.rexo(hcode(v[2]), v[1], false)
 24261              m.emit(0x0f)
 24262              m.emit(0x70)
 24263              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 24264              m.imm1(toImmAny(v[0]))
 24265          })
 24266      }
 24267      // PSHUFW imm8, m64, mm
 24268      if isImm8(v0) && isM64(v1) && isMM(v2) {
 24269          self.require(ISA_MMX_PLUS)
 24270          p.domain = DomainMMXSSE
 24271          p.add(0, func(m *_Encoding, v []interface{}) {
 24272              m.rexo(hcode(v[2]), addr(v[1]), false)
 24273              m.emit(0x0f)
 24274              m.emit(0x70)
 24275              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 24276              m.imm1(toImmAny(v[0]))
 24277          })
 24278      }
 24279      if p.len == 0 {
 24280          panic("invalid operands for PSHUFW")
 24281      }
 24282      return p
 24283  }
 24284  
 24285  // PSIGNB performs "Packed Sign of Byte Integers".
 24286  //
 24287  // Mnemonic        : PSIGNB
 24288  // Supported forms : (4 forms)
 24289  //
 24290  //    * PSIGNB mm, mm       [SSSE3]
 24291  //    * PSIGNB m64, mm      [SSSE3]
 24292  //    * PSIGNB xmm, xmm     [SSSE3]
 24293  //    * PSIGNB m128, xmm    [SSSE3]
 24294  //
 24295  func (self *Program) PSIGNB(v0 interface{}, v1 interface{}) *Instruction {
 24296      p := self.alloc("PSIGNB", 2, Operands { v0, v1 })
 24297      // PSIGNB mm, mm
 24298      if isMM(v0) && isMM(v1) {
 24299          self.require(ISA_SSSE3)
 24300          p.domain = DomainMMXSSE
 24301          p.add(0, func(m *_Encoding, v []interface{}) {
 24302              m.rexo(hcode(v[1]), v[0], false)
 24303              m.emit(0x0f)
 24304              m.emit(0x38)
 24305              m.emit(0x08)
 24306              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24307          })
 24308      }
 24309      // PSIGNB m64, mm
 24310      if isM64(v0) && isMM(v1) {
 24311          self.require(ISA_SSSE3)
 24312          p.domain = DomainMMXSSE
 24313          p.add(0, func(m *_Encoding, v []interface{}) {
 24314              m.rexo(hcode(v[1]), addr(v[0]), false)
 24315              m.emit(0x0f)
 24316              m.emit(0x38)
 24317              m.emit(0x08)
 24318              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24319          })
 24320      }
 24321      // PSIGNB xmm, xmm
 24322      if isXMM(v0) && isXMM(v1) {
 24323          self.require(ISA_SSSE3)
 24324          p.domain = DomainMMXSSE
 24325          p.add(0, func(m *_Encoding, v []interface{}) {
 24326              m.emit(0x66)
 24327              m.rexo(hcode(v[1]), v[0], false)
 24328              m.emit(0x0f)
 24329              m.emit(0x38)
 24330              m.emit(0x08)
 24331              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24332          })
 24333      }
 24334      // PSIGNB m128, xmm
 24335      if isM128(v0) && isXMM(v1) {
 24336          self.require(ISA_SSSE3)
 24337          p.domain = DomainMMXSSE
 24338          p.add(0, func(m *_Encoding, v []interface{}) {
 24339              m.emit(0x66)
 24340              m.rexo(hcode(v[1]), addr(v[0]), false)
 24341              m.emit(0x0f)
 24342              m.emit(0x38)
 24343              m.emit(0x08)
 24344              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24345          })
 24346      }
 24347      if p.len == 0 {
 24348          panic("invalid operands for PSIGNB")
 24349      }
 24350      return p
 24351  }
 24352  
 24353  // PSIGND performs "Packed Sign of Doubleword Integers".
 24354  //
 24355  // Mnemonic        : PSIGND
 24356  // Supported forms : (4 forms)
 24357  //
 24358  //    * PSIGND mm, mm       [SSSE3]
 24359  //    * PSIGND m64, mm      [SSSE3]
 24360  //    * PSIGND xmm, xmm     [SSSE3]
 24361  //    * PSIGND m128, xmm    [SSSE3]
 24362  //
 24363  func (self *Program) PSIGND(v0 interface{}, v1 interface{}) *Instruction {
 24364      p := self.alloc("PSIGND", 2, Operands { v0, v1 })
 24365      // PSIGND mm, mm
 24366      if isMM(v0) && isMM(v1) {
 24367          self.require(ISA_SSSE3)
 24368          p.domain = DomainMMXSSE
 24369          p.add(0, func(m *_Encoding, v []interface{}) {
 24370              m.rexo(hcode(v[1]), v[0], false)
 24371              m.emit(0x0f)
 24372              m.emit(0x38)
 24373              m.emit(0x0a)
 24374              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24375          })
 24376      }
 24377      // PSIGND m64, mm
 24378      if isM64(v0) && isMM(v1) {
 24379          self.require(ISA_SSSE3)
 24380          p.domain = DomainMMXSSE
 24381          p.add(0, func(m *_Encoding, v []interface{}) {
 24382              m.rexo(hcode(v[1]), addr(v[0]), false)
 24383              m.emit(0x0f)
 24384              m.emit(0x38)
 24385              m.emit(0x0a)
 24386              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24387          })
 24388      }
 24389      // PSIGND xmm, xmm
 24390      if isXMM(v0) && isXMM(v1) {
 24391          self.require(ISA_SSSE3)
 24392          p.domain = DomainMMXSSE
 24393          p.add(0, func(m *_Encoding, v []interface{}) {
 24394              m.emit(0x66)
 24395              m.rexo(hcode(v[1]), v[0], false)
 24396              m.emit(0x0f)
 24397              m.emit(0x38)
 24398              m.emit(0x0a)
 24399              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24400          })
 24401      }
 24402      // PSIGND m128, xmm
 24403      if isM128(v0) && isXMM(v1) {
 24404          self.require(ISA_SSSE3)
 24405          p.domain = DomainMMXSSE
 24406          p.add(0, func(m *_Encoding, v []interface{}) {
 24407              m.emit(0x66)
 24408              m.rexo(hcode(v[1]), addr(v[0]), false)
 24409              m.emit(0x0f)
 24410              m.emit(0x38)
 24411              m.emit(0x0a)
 24412              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24413          })
 24414      }
 24415      if p.len == 0 {
 24416          panic("invalid operands for PSIGND")
 24417      }
 24418      return p
 24419  }
 24420  
 24421  // PSIGNW performs "Packed Sign of Word Integers".
 24422  //
 24423  // Mnemonic        : PSIGNW
 24424  // Supported forms : (4 forms)
 24425  //
 24426  //    * PSIGNW mm, mm       [SSSE3]
 24427  //    * PSIGNW m64, mm      [SSSE3]
 24428  //    * PSIGNW xmm, xmm     [SSSE3]
 24429  //    * PSIGNW m128, xmm    [SSSE3]
 24430  //
 24431  func (self *Program) PSIGNW(v0 interface{}, v1 interface{}) *Instruction {
 24432      p := self.alloc("PSIGNW", 2, Operands { v0, v1 })
 24433      // PSIGNW mm, mm
 24434      if isMM(v0) && isMM(v1) {
 24435          self.require(ISA_SSSE3)
 24436          p.domain = DomainMMXSSE
 24437          p.add(0, func(m *_Encoding, v []interface{}) {
 24438              m.rexo(hcode(v[1]), v[0], false)
 24439              m.emit(0x0f)
 24440              m.emit(0x38)
 24441              m.emit(0x09)
 24442              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24443          })
 24444      }
 24445      // PSIGNW m64, mm
 24446      if isM64(v0) && isMM(v1) {
 24447          self.require(ISA_SSSE3)
 24448          p.domain = DomainMMXSSE
 24449          p.add(0, func(m *_Encoding, v []interface{}) {
 24450              m.rexo(hcode(v[1]), addr(v[0]), false)
 24451              m.emit(0x0f)
 24452              m.emit(0x38)
 24453              m.emit(0x09)
 24454              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24455          })
 24456      }
 24457      // PSIGNW xmm, xmm
 24458      if isXMM(v0) && isXMM(v1) {
 24459          self.require(ISA_SSSE3)
 24460          p.domain = DomainMMXSSE
 24461          p.add(0, func(m *_Encoding, v []interface{}) {
 24462              m.emit(0x66)
 24463              m.rexo(hcode(v[1]), v[0], false)
 24464              m.emit(0x0f)
 24465              m.emit(0x38)
 24466              m.emit(0x09)
 24467              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24468          })
 24469      }
 24470      // PSIGNW m128, xmm
 24471      if isM128(v0) && isXMM(v1) {
 24472          self.require(ISA_SSSE3)
 24473          p.domain = DomainMMXSSE
 24474          p.add(0, func(m *_Encoding, v []interface{}) {
 24475              m.emit(0x66)
 24476              m.rexo(hcode(v[1]), addr(v[0]), false)
 24477              m.emit(0x0f)
 24478              m.emit(0x38)
 24479              m.emit(0x09)
 24480              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24481          })
 24482      }
 24483      if p.len == 0 {
 24484          panic("invalid operands for PSIGNW")
 24485      }
 24486      return p
 24487  }
 24488  
 24489  // PSLLD performs "Shift Packed Doubleword Data Left Logical".
 24490  //
 24491  // Mnemonic        : PSLLD
 24492  // Supported forms : (6 forms)
 24493  //
 24494  //    * PSLLD imm8, mm     [MMX]
 24495  //    * PSLLD mm, mm       [MMX]
 24496  //    * PSLLD m64, mm      [MMX]
 24497  //    * PSLLD imm8, xmm    [SSE2]
 24498  //    * PSLLD xmm, xmm     [SSE2]
 24499  //    * PSLLD m128, xmm    [SSE2]
 24500  //
 24501  func (self *Program) PSLLD(v0 interface{}, v1 interface{}) *Instruction {
 24502      p := self.alloc("PSLLD", 2, Operands { v0, v1 })
 24503      // PSLLD imm8, mm
 24504      if isImm8(v0) && isMM(v1) {
 24505          self.require(ISA_MMX)
 24506          p.domain = DomainMMXSSE
 24507          p.add(0, func(m *_Encoding, v []interface{}) {
 24508              m.rexo(0, v[1], false)
 24509              m.emit(0x0f)
 24510              m.emit(0x72)
 24511              m.emit(0xf0 | lcode(v[1]))
 24512              m.imm1(toImmAny(v[0]))
 24513          })
 24514      }
 24515      // PSLLD mm, mm
 24516      if isMM(v0) && isMM(v1) {
 24517          self.require(ISA_MMX)
 24518          p.domain = DomainMMXSSE
 24519          p.add(0, func(m *_Encoding, v []interface{}) {
 24520              m.rexo(hcode(v[1]), v[0], false)
 24521              m.emit(0x0f)
 24522              m.emit(0xf2)
 24523              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24524          })
 24525      }
 24526      // PSLLD m64, mm
 24527      if isM64(v0) && isMM(v1) {
 24528          self.require(ISA_MMX)
 24529          p.domain = DomainMMXSSE
 24530          p.add(0, func(m *_Encoding, v []interface{}) {
 24531              m.rexo(hcode(v[1]), addr(v[0]), false)
 24532              m.emit(0x0f)
 24533              m.emit(0xf2)
 24534              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24535          })
 24536      }
 24537      // PSLLD imm8, xmm
 24538      if isImm8(v0) && isXMM(v1) {
 24539          self.require(ISA_SSE2)
 24540          p.domain = DomainMMXSSE
 24541          p.add(0, func(m *_Encoding, v []interface{}) {
 24542              m.emit(0x66)
 24543              m.rexo(0, v[1], false)
 24544              m.emit(0x0f)
 24545              m.emit(0x72)
 24546              m.emit(0xf0 | lcode(v[1]))
 24547              m.imm1(toImmAny(v[0]))
 24548          })
 24549      }
 24550      // PSLLD xmm, xmm
 24551      if isXMM(v0) && isXMM(v1) {
 24552          self.require(ISA_SSE2)
 24553          p.domain = DomainMMXSSE
 24554          p.add(0, func(m *_Encoding, v []interface{}) {
 24555              m.emit(0x66)
 24556              m.rexo(hcode(v[1]), v[0], false)
 24557              m.emit(0x0f)
 24558              m.emit(0xf2)
 24559              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24560          })
 24561      }
 24562      // PSLLD m128, xmm
 24563      if isM128(v0) && isXMM(v1) {
 24564          self.require(ISA_SSE2)
 24565          p.domain = DomainMMXSSE
 24566          p.add(0, func(m *_Encoding, v []interface{}) {
 24567              m.emit(0x66)
 24568              m.rexo(hcode(v[1]), addr(v[0]), false)
 24569              m.emit(0x0f)
 24570              m.emit(0xf2)
 24571              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24572          })
 24573      }
 24574      if p.len == 0 {
 24575          panic("invalid operands for PSLLD")
 24576      }
 24577      return p
 24578  }
 24579  
 24580  // PSLLDQ performs "Shift Packed Double Quadword Left Logical".
 24581  //
 24582  // Mnemonic        : PSLLDQ
 24583  // Supported forms : (1 form)
 24584  //
 24585  //    * PSLLDQ imm8, xmm    [SSE2]
 24586  //
 24587  func (self *Program) PSLLDQ(v0 interface{}, v1 interface{}) *Instruction {
 24588      p := self.alloc("PSLLDQ", 2, Operands { v0, v1 })
 24589      // PSLLDQ imm8, xmm
 24590      if isImm8(v0) && isXMM(v1) {
 24591          self.require(ISA_SSE2)
 24592          p.domain = DomainMMXSSE
 24593          p.add(0, func(m *_Encoding, v []interface{}) {
 24594              m.emit(0x66)
 24595              m.rexo(0, v[1], false)
 24596              m.emit(0x0f)
 24597              m.emit(0x73)
 24598              m.emit(0xf8 | lcode(v[1]))
 24599              m.imm1(toImmAny(v[0]))
 24600          })
 24601      }
 24602      if p.len == 0 {
 24603          panic("invalid operands for PSLLDQ")
 24604      }
 24605      return p
 24606  }
 24607  
 24608  // PSLLQ performs "Shift Packed Quadword Data Left Logical".
 24609  //
 24610  // Mnemonic        : PSLLQ
 24611  // Supported forms : (6 forms)
 24612  //
 24613  //    * PSLLQ imm8, mm     [MMX]
 24614  //    * PSLLQ mm, mm       [MMX]
 24615  //    * PSLLQ m64, mm      [MMX]
 24616  //    * PSLLQ imm8, xmm    [SSE2]
 24617  //    * PSLLQ xmm, xmm     [SSE2]
 24618  //    * PSLLQ m128, xmm    [SSE2]
 24619  //
 24620  func (self *Program) PSLLQ(v0 interface{}, v1 interface{}) *Instruction {
 24621      p := self.alloc("PSLLQ", 2, Operands { v0, v1 })
 24622      // PSLLQ imm8, mm
 24623      if isImm8(v0) && isMM(v1) {
 24624          self.require(ISA_MMX)
 24625          p.domain = DomainMMXSSE
 24626          p.add(0, func(m *_Encoding, v []interface{}) {
 24627              m.rexo(0, v[1], false)
 24628              m.emit(0x0f)
 24629              m.emit(0x73)
 24630              m.emit(0xf0 | lcode(v[1]))
 24631              m.imm1(toImmAny(v[0]))
 24632          })
 24633      }
 24634      // PSLLQ mm, mm
 24635      if isMM(v0) && isMM(v1) {
 24636          self.require(ISA_MMX)
 24637          p.domain = DomainMMXSSE
 24638          p.add(0, func(m *_Encoding, v []interface{}) {
 24639              m.rexo(hcode(v[1]), v[0], false)
 24640              m.emit(0x0f)
 24641              m.emit(0xf3)
 24642              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24643          })
 24644      }
 24645      // PSLLQ m64, mm
 24646      if isM64(v0) && isMM(v1) {
 24647          self.require(ISA_MMX)
 24648          p.domain = DomainMMXSSE
 24649          p.add(0, func(m *_Encoding, v []interface{}) {
 24650              m.rexo(hcode(v[1]), addr(v[0]), false)
 24651              m.emit(0x0f)
 24652              m.emit(0xf3)
 24653              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24654          })
 24655      }
 24656      // PSLLQ imm8, xmm
 24657      if isImm8(v0) && isXMM(v1) {
 24658          self.require(ISA_SSE2)
 24659          p.domain = DomainMMXSSE
 24660          p.add(0, func(m *_Encoding, v []interface{}) {
 24661              m.emit(0x66)
 24662              m.rexo(0, v[1], false)
 24663              m.emit(0x0f)
 24664              m.emit(0x73)
 24665              m.emit(0xf0 | lcode(v[1]))
 24666              m.imm1(toImmAny(v[0]))
 24667          })
 24668      }
 24669      // PSLLQ xmm, xmm
 24670      if isXMM(v0) && isXMM(v1) {
 24671          self.require(ISA_SSE2)
 24672          p.domain = DomainMMXSSE
 24673          p.add(0, func(m *_Encoding, v []interface{}) {
 24674              m.emit(0x66)
 24675              m.rexo(hcode(v[1]), v[0], false)
 24676              m.emit(0x0f)
 24677              m.emit(0xf3)
 24678              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24679          })
 24680      }
 24681      // PSLLQ m128, xmm
 24682      if isM128(v0) && isXMM(v1) {
 24683          self.require(ISA_SSE2)
 24684          p.domain = DomainMMXSSE
 24685          p.add(0, func(m *_Encoding, v []interface{}) {
 24686              m.emit(0x66)
 24687              m.rexo(hcode(v[1]), addr(v[0]), false)
 24688              m.emit(0x0f)
 24689              m.emit(0xf3)
 24690              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24691          })
 24692      }
 24693      if p.len == 0 {
 24694          panic("invalid operands for PSLLQ")
 24695      }
 24696      return p
 24697  }
 24698  
 24699  // PSLLW performs "Shift Packed Word Data Left Logical".
 24700  //
 24701  // Mnemonic        : PSLLW
 24702  // Supported forms : (6 forms)
 24703  //
 24704  //    * PSLLW imm8, mm     [MMX]
 24705  //    * PSLLW mm, mm       [MMX]
 24706  //    * PSLLW m64, mm      [MMX]
 24707  //    * PSLLW imm8, xmm    [SSE2]
 24708  //    * PSLLW xmm, xmm     [SSE2]
 24709  //    * PSLLW m128, xmm    [SSE2]
 24710  //
 24711  func (self *Program) PSLLW(v0 interface{}, v1 interface{}) *Instruction {
 24712      p := self.alloc("PSLLW", 2, Operands { v0, v1 })
 24713      // PSLLW imm8, mm
 24714      if isImm8(v0) && isMM(v1) {
 24715          self.require(ISA_MMX)
 24716          p.domain = DomainMMXSSE
 24717          p.add(0, func(m *_Encoding, v []interface{}) {
 24718              m.rexo(0, v[1], false)
 24719              m.emit(0x0f)
 24720              m.emit(0x71)
 24721              m.emit(0xf0 | lcode(v[1]))
 24722              m.imm1(toImmAny(v[0]))
 24723          })
 24724      }
 24725      // PSLLW mm, mm
 24726      if isMM(v0) && isMM(v1) {
 24727          self.require(ISA_MMX)
 24728          p.domain = DomainMMXSSE
 24729          p.add(0, func(m *_Encoding, v []interface{}) {
 24730              m.rexo(hcode(v[1]), v[0], false)
 24731              m.emit(0x0f)
 24732              m.emit(0xf1)
 24733              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24734          })
 24735      }
 24736      // PSLLW m64, mm
 24737      if isM64(v0) && isMM(v1) {
 24738          self.require(ISA_MMX)
 24739          p.domain = DomainMMXSSE
 24740          p.add(0, func(m *_Encoding, v []interface{}) {
 24741              m.rexo(hcode(v[1]), addr(v[0]), false)
 24742              m.emit(0x0f)
 24743              m.emit(0xf1)
 24744              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24745          })
 24746      }
 24747      // PSLLW imm8, xmm
 24748      if isImm8(v0) && isXMM(v1) {
 24749          self.require(ISA_SSE2)
 24750          p.domain = DomainMMXSSE
 24751          p.add(0, func(m *_Encoding, v []interface{}) {
 24752              m.emit(0x66)
 24753              m.rexo(0, v[1], false)
 24754              m.emit(0x0f)
 24755              m.emit(0x71)
 24756              m.emit(0xf0 | lcode(v[1]))
 24757              m.imm1(toImmAny(v[0]))
 24758          })
 24759      }
 24760      // PSLLW xmm, xmm
 24761      if isXMM(v0) && isXMM(v1) {
 24762          self.require(ISA_SSE2)
 24763          p.domain = DomainMMXSSE
 24764          p.add(0, func(m *_Encoding, v []interface{}) {
 24765              m.emit(0x66)
 24766              m.rexo(hcode(v[1]), v[0], false)
 24767              m.emit(0x0f)
 24768              m.emit(0xf1)
 24769              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24770          })
 24771      }
 24772      // PSLLW m128, xmm
 24773      if isM128(v0) && isXMM(v1) {
 24774          self.require(ISA_SSE2)
 24775          p.domain = DomainMMXSSE
 24776          p.add(0, func(m *_Encoding, v []interface{}) {
 24777              m.emit(0x66)
 24778              m.rexo(hcode(v[1]), addr(v[0]), false)
 24779              m.emit(0x0f)
 24780              m.emit(0xf1)
 24781              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24782          })
 24783      }
 24784      if p.len == 0 {
 24785          panic("invalid operands for PSLLW")
 24786      }
 24787      return p
 24788  }
 24789  
 24790  // PSRAD performs "Shift Packed Doubleword Data Right Arithmetic".
 24791  //
 24792  // Mnemonic        : PSRAD
 24793  // Supported forms : (6 forms)
 24794  //
 24795  //    * PSRAD imm8, mm     [MMX]
 24796  //    * PSRAD mm, mm       [MMX]
 24797  //    * PSRAD m64, mm      [MMX]
 24798  //    * PSRAD imm8, xmm    [SSE2]
 24799  //    * PSRAD xmm, xmm     [SSE2]
 24800  //    * PSRAD m128, xmm    [SSE2]
 24801  //
 24802  func (self *Program) PSRAD(v0 interface{}, v1 interface{}) *Instruction {
 24803      p := self.alloc("PSRAD", 2, Operands { v0, v1 })
 24804      // PSRAD imm8, mm
 24805      if isImm8(v0) && isMM(v1) {
 24806          self.require(ISA_MMX)
 24807          p.domain = DomainMMXSSE
 24808          p.add(0, func(m *_Encoding, v []interface{}) {
 24809              m.rexo(0, v[1], false)
 24810              m.emit(0x0f)
 24811              m.emit(0x72)
 24812              m.emit(0xe0 | lcode(v[1]))
 24813              m.imm1(toImmAny(v[0]))
 24814          })
 24815      }
 24816      // PSRAD mm, mm
 24817      if isMM(v0) && isMM(v1) {
 24818          self.require(ISA_MMX)
 24819          p.domain = DomainMMXSSE
 24820          p.add(0, func(m *_Encoding, v []interface{}) {
 24821              m.rexo(hcode(v[1]), v[0], false)
 24822              m.emit(0x0f)
 24823              m.emit(0xe2)
 24824              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24825          })
 24826      }
 24827      // PSRAD m64, mm
 24828      if isM64(v0) && isMM(v1) {
 24829          self.require(ISA_MMX)
 24830          p.domain = DomainMMXSSE
 24831          p.add(0, func(m *_Encoding, v []interface{}) {
 24832              m.rexo(hcode(v[1]), addr(v[0]), false)
 24833              m.emit(0x0f)
 24834              m.emit(0xe2)
 24835              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24836          })
 24837      }
 24838      // PSRAD imm8, xmm
 24839      if isImm8(v0) && isXMM(v1) {
 24840          self.require(ISA_SSE2)
 24841          p.domain = DomainMMXSSE
 24842          p.add(0, func(m *_Encoding, v []interface{}) {
 24843              m.emit(0x66)
 24844              m.rexo(0, v[1], false)
 24845              m.emit(0x0f)
 24846              m.emit(0x72)
 24847              m.emit(0xe0 | lcode(v[1]))
 24848              m.imm1(toImmAny(v[0]))
 24849          })
 24850      }
 24851      // PSRAD xmm, xmm
 24852      if isXMM(v0) && isXMM(v1) {
 24853          self.require(ISA_SSE2)
 24854          p.domain = DomainMMXSSE
 24855          p.add(0, func(m *_Encoding, v []interface{}) {
 24856              m.emit(0x66)
 24857              m.rexo(hcode(v[1]), v[0], false)
 24858              m.emit(0x0f)
 24859              m.emit(0xe2)
 24860              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24861          })
 24862      }
 24863      // PSRAD m128, xmm
 24864      if isM128(v0) && isXMM(v1) {
 24865          self.require(ISA_SSE2)
 24866          p.domain = DomainMMXSSE
 24867          p.add(0, func(m *_Encoding, v []interface{}) {
 24868              m.emit(0x66)
 24869              m.rexo(hcode(v[1]), addr(v[0]), false)
 24870              m.emit(0x0f)
 24871              m.emit(0xe2)
 24872              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24873          })
 24874      }
 24875      if p.len == 0 {
 24876          panic("invalid operands for PSRAD")
 24877      }
 24878      return p
 24879  }
 24880  
 24881  // PSRAW performs "Shift Packed Word Data Right Arithmetic".
 24882  //
 24883  // Mnemonic        : PSRAW
 24884  // Supported forms : (6 forms)
 24885  //
 24886  //    * PSRAW imm8, mm     [MMX]
 24887  //    * PSRAW mm, mm       [MMX]
 24888  //    * PSRAW m64, mm      [MMX]
 24889  //    * PSRAW imm8, xmm    [SSE2]
 24890  //    * PSRAW xmm, xmm     [SSE2]
 24891  //    * PSRAW m128, xmm    [SSE2]
 24892  //
 24893  func (self *Program) PSRAW(v0 interface{}, v1 interface{}) *Instruction {
 24894      p := self.alloc("PSRAW", 2, Operands { v0, v1 })
 24895      // PSRAW imm8, mm
 24896      if isImm8(v0) && isMM(v1) {
 24897          self.require(ISA_MMX)
 24898          p.domain = DomainMMXSSE
 24899          p.add(0, func(m *_Encoding, v []interface{}) {
 24900              m.rexo(0, v[1], false)
 24901              m.emit(0x0f)
 24902              m.emit(0x71)
 24903              m.emit(0xe0 | lcode(v[1]))
 24904              m.imm1(toImmAny(v[0]))
 24905          })
 24906      }
 24907      // PSRAW mm, mm
 24908      if isMM(v0) && isMM(v1) {
 24909          self.require(ISA_MMX)
 24910          p.domain = DomainMMXSSE
 24911          p.add(0, func(m *_Encoding, v []interface{}) {
 24912              m.rexo(hcode(v[1]), v[0], false)
 24913              m.emit(0x0f)
 24914              m.emit(0xe1)
 24915              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24916          })
 24917      }
 24918      // PSRAW m64, mm
 24919      if isM64(v0) && isMM(v1) {
 24920          self.require(ISA_MMX)
 24921          p.domain = DomainMMXSSE
 24922          p.add(0, func(m *_Encoding, v []interface{}) {
 24923              m.rexo(hcode(v[1]), addr(v[0]), false)
 24924              m.emit(0x0f)
 24925              m.emit(0xe1)
 24926              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24927          })
 24928      }
 24929      // PSRAW imm8, xmm
 24930      if isImm8(v0) && isXMM(v1) {
 24931          self.require(ISA_SSE2)
 24932          p.domain = DomainMMXSSE
 24933          p.add(0, func(m *_Encoding, v []interface{}) {
 24934              m.emit(0x66)
 24935              m.rexo(0, v[1], false)
 24936              m.emit(0x0f)
 24937              m.emit(0x71)
 24938              m.emit(0xe0 | lcode(v[1]))
 24939              m.imm1(toImmAny(v[0]))
 24940          })
 24941      }
 24942      // PSRAW xmm, xmm
 24943      if isXMM(v0) && isXMM(v1) {
 24944          self.require(ISA_SSE2)
 24945          p.domain = DomainMMXSSE
 24946          p.add(0, func(m *_Encoding, v []interface{}) {
 24947              m.emit(0x66)
 24948              m.rexo(hcode(v[1]), v[0], false)
 24949              m.emit(0x0f)
 24950              m.emit(0xe1)
 24951              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 24952          })
 24953      }
 24954      // PSRAW m128, xmm
 24955      if isM128(v0) && isXMM(v1) {
 24956          self.require(ISA_SSE2)
 24957          p.domain = DomainMMXSSE
 24958          p.add(0, func(m *_Encoding, v []interface{}) {
 24959              m.emit(0x66)
 24960              m.rexo(hcode(v[1]), addr(v[0]), false)
 24961              m.emit(0x0f)
 24962              m.emit(0xe1)
 24963              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 24964          })
 24965      }
 24966      if p.len == 0 {
 24967          panic("invalid operands for PSRAW")
 24968      }
 24969      return p
 24970  }
 24971  
 24972  // PSRLD performs "Shift Packed Doubleword Data Right Logical".
 24973  //
 24974  // Mnemonic        : PSRLD
 24975  // Supported forms : (6 forms)
 24976  //
 24977  //    * PSRLD imm8, mm     [MMX]
 24978  //    * PSRLD mm, mm       [MMX]
 24979  //    * PSRLD m64, mm      [MMX]
 24980  //    * PSRLD imm8, xmm    [SSE2]
 24981  //    * PSRLD xmm, xmm     [SSE2]
 24982  //    * PSRLD m128, xmm    [SSE2]
 24983  //
 24984  func (self *Program) PSRLD(v0 interface{}, v1 interface{}) *Instruction {
 24985      p := self.alloc("PSRLD", 2, Operands { v0, v1 })
 24986      // PSRLD imm8, mm
 24987      if isImm8(v0) && isMM(v1) {
 24988          self.require(ISA_MMX)
 24989          p.domain = DomainMMXSSE
 24990          p.add(0, func(m *_Encoding, v []interface{}) {
 24991              m.rexo(0, v[1], false)
 24992              m.emit(0x0f)
 24993              m.emit(0x72)
 24994              m.emit(0xd0 | lcode(v[1]))
 24995              m.imm1(toImmAny(v[0]))
 24996          })
 24997      }
 24998      // PSRLD mm, mm
 24999      if isMM(v0) && isMM(v1) {
 25000          self.require(ISA_MMX)
 25001          p.domain = DomainMMXSSE
 25002          p.add(0, func(m *_Encoding, v []interface{}) {
 25003              m.rexo(hcode(v[1]), v[0], false)
 25004              m.emit(0x0f)
 25005              m.emit(0xd2)
 25006              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25007          })
 25008      }
 25009      // PSRLD m64, mm
 25010      if isM64(v0) && isMM(v1) {
 25011          self.require(ISA_MMX)
 25012          p.domain = DomainMMXSSE
 25013          p.add(0, func(m *_Encoding, v []interface{}) {
 25014              m.rexo(hcode(v[1]), addr(v[0]), false)
 25015              m.emit(0x0f)
 25016              m.emit(0xd2)
 25017              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25018          })
 25019      }
 25020      // PSRLD imm8, xmm
 25021      if isImm8(v0) && isXMM(v1) {
 25022          self.require(ISA_SSE2)
 25023          p.domain = DomainMMXSSE
 25024          p.add(0, func(m *_Encoding, v []interface{}) {
 25025              m.emit(0x66)
 25026              m.rexo(0, v[1], false)
 25027              m.emit(0x0f)
 25028              m.emit(0x72)
 25029              m.emit(0xd0 | lcode(v[1]))
 25030              m.imm1(toImmAny(v[0]))
 25031          })
 25032      }
 25033      // PSRLD xmm, xmm
 25034      if isXMM(v0) && isXMM(v1) {
 25035          self.require(ISA_SSE2)
 25036          p.domain = DomainMMXSSE
 25037          p.add(0, func(m *_Encoding, v []interface{}) {
 25038              m.emit(0x66)
 25039              m.rexo(hcode(v[1]), v[0], false)
 25040              m.emit(0x0f)
 25041              m.emit(0xd2)
 25042              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25043          })
 25044      }
 25045      // PSRLD m128, xmm
 25046      if isM128(v0) && isXMM(v1) {
 25047          self.require(ISA_SSE2)
 25048          p.domain = DomainMMXSSE
 25049          p.add(0, func(m *_Encoding, v []interface{}) {
 25050              m.emit(0x66)
 25051              m.rexo(hcode(v[1]), addr(v[0]), false)
 25052              m.emit(0x0f)
 25053              m.emit(0xd2)
 25054              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25055          })
 25056      }
 25057      if p.len == 0 {
 25058          panic("invalid operands for PSRLD")
 25059      }
 25060      return p
 25061  }
 25062  
 25063  // PSRLDQ performs "Shift Packed Double Quadword Right Logical".
 25064  //
 25065  // Mnemonic        : PSRLDQ
 25066  // Supported forms : (1 form)
 25067  //
 25068  //    * PSRLDQ imm8, xmm    [SSE2]
 25069  //
 25070  func (self *Program) PSRLDQ(v0 interface{}, v1 interface{}) *Instruction {
 25071      p := self.alloc("PSRLDQ", 2, Operands { v0, v1 })
 25072      // PSRLDQ imm8, xmm
 25073      if isImm8(v0) && isXMM(v1) {
 25074          self.require(ISA_SSE2)
 25075          p.domain = DomainMMXSSE
 25076          p.add(0, func(m *_Encoding, v []interface{}) {
 25077              m.emit(0x66)
 25078              m.rexo(0, v[1], false)
 25079              m.emit(0x0f)
 25080              m.emit(0x73)
 25081              m.emit(0xd8 | lcode(v[1]))
 25082              m.imm1(toImmAny(v[0]))
 25083          })
 25084      }
 25085      if p.len == 0 {
 25086          panic("invalid operands for PSRLDQ")
 25087      }
 25088      return p
 25089  }
 25090  
 25091  // PSRLQ performs "Shift Packed Quadword Data Right Logical".
 25092  //
 25093  // Mnemonic        : PSRLQ
 25094  // Supported forms : (6 forms)
 25095  //
 25096  //    * PSRLQ imm8, mm     [MMX]
 25097  //    * PSRLQ mm, mm       [MMX]
 25098  //    * PSRLQ m64, mm      [MMX]
 25099  //    * PSRLQ imm8, xmm    [SSE2]
 25100  //    * PSRLQ xmm, xmm     [SSE2]
 25101  //    * PSRLQ m128, xmm    [SSE2]
 25102  //
 25103  func (self *Program) PSRLQ(v0 interface{}, v1 interface{}) *Instruction {
 25104      p := self.alloc("PSRLQ", 2, Operands { v0, v1 })
 25105      // PSRLQ imm8, mm
 25106      if isImm8(v0) && isMM(v1) {
 25107          self.require(ISA_MMX)
 25108          p.domain = DomainMMXSSE
 25109          p.add(0, func(m *_Encoding, v []interface{}) {
 25110              m.rexo(0, v[1], false)
 25111              m.emit(0x0f)
 25112              m.emit(0x73)
 25113              m.emit(0xd0 | lcode(v[1]))
 25114              m.imm1(toImmAny(v[0]))
 25115          })
 25116      }
 25117      // PSRLQ mm, mm
 25118      if isMM(v0) && isMM(v1) {
 25119          self.require(ISA_MMX)
 25120          p.domain = DomainMMXSSE
 25121          p.add(0, func(m *_Encoding, v []interface{}) {
 25122              m.rexo(hcode(v[1]), v[0], false)
 25123              m.emit(0x0f)
 25124              m.emit(0xd3)
 25125              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25126          })
 25127      }
 25128      // PSRLQ m64, mm
 25129      if isM64(v0) && isMM(v1) {
 25130          self.require(ISA_MMX)
 25131          p.domain = DomainMMXSSE
 25132          p.add(0, func(m *_Encoding, v []interface{}) {
 25133              m.rexo(hcode(v[1]), addr(v[0]), false)
 25134              m.emit(0x0f)
 25135              m.emit(0xd3)
 25136              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25137          })
 25138      }
 25139      // PSRLQ imm8, xmm
 25140      if isImm8(v0) && isXMM(v1) {
 25141          self.require(ISA_SSE2)
 25142          p.domain = DomainMMXSSE
 25143          p.add(0, func(m *_Encoding, v []interface{}) {
 25144              m.emit(0x66)
 25145              m.rexo(0, v[1], false)
 25146              m.emit(0x0f)
 25147              m.emit(0x73)
 25148              m.emit(0xd0 | lcode(v[1]))
 25149              m.imm1(toImmAny(v[0]))
 25150          })
 25151      }
 25152      // PSRLQ xmm, xmm
 25153      if isXMM(v0) && isXMM(v1) {
 25154          self.require(ISA_SSE2)
 25155          p.domain = DomainMMXSSE
 25156          p.add(0, func(m *_Encoding, v []interface{}) {
 25157              m.emit(0x66)
 25158              m.rexo(hcode(v[1]), v[0], false)
 25159              m.emit(0x0f)
 25160              m.emit(0xd3)
 25161              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25162          })
 25163      }
 25164      // PSRLQ m128, xmm
 25165      if isM128(v0) && isXMM(v1) {
 25166          self.require(ISA_SSE2)
 25167          p.domain = DomainMMXSSE
 25168          p.add(0, func(m *_Encoding, v []interface{}) {
 25169              m.emit(0x66)
 25170              m.rexo(hcode(v[1]), addr(v[0]), false)
 25171              m.emit(0x0f)
 25172              m.emit(0xd3)
 25173              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25174          })
 25175      }
 25176      if p.len == 0 {
 25177          panic("invalid operands for PSRLQ")
 25178      }
 25179      return p
 25180  }
 25181  
 25182  // PSRLW performs "Shift Packed Word Data Right Logical".
 25183  //
 25184  // Mnemonic        : PSRLW
 25185  // Supported forms : (6 forms)
 25186  //
 25187  //    * PSRLW imm8, mm     [MMX]
 25188  //    * PSRLW mm, mm       [MMX]
 25189  //    * PSRLW m64, mm      [MMX]
 25190  //    * PSRLW imm8, xmm    [SSE2]
 25191  //    * PSRLW xmm, xmm     [SSE2]
 25192  //    * PSRLW m128, xmm    [SSE2]
 25193  //
 25194  func (self *Program) PSRLW(v0 interface{}, v1 interface{}) *Instruction {
 25195      p := self.alloc("PSRLW", 2, Operands { v0, v1 })
 25196      // PSRLW imm8, mm
 25197      if isImm8(v0) && isMM(v1) {
 25198          self.require(ISA_MMX)
 25199          p.domain = DomainMMXSSE
 25200          p.add(0, func(m *_Encoding, v []interface{}) {
 25201              m.rexo(0, v[1], false)
 25202              m.emit(0x0f)
 25203              m.emit(0x71)
 25204              m.emit(0xd0 | lcode(v[1]))
 25205              m.imm1(toImmAny(v[0]))
 25206          })
 25207      }
 25208      // PSRLW mm, mm
 25209      if isMM(v0) && isMM(v1) {
 25210          self.require(ISA_MMX)
 25211          p.domain = DomainMMXSSE
 25212          p.add(0, func(m *_Encoding, v []interface{}) {
 25213              m.rexo(hcode(v[1]), v[0], false)
 25214              m.emit(0x0f)
 25215              m.emit(0xd1)
 25216              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25217          })
 25218      }
 25219      // PSRLW m64, mm
 25220      if isM64(v0) && isMM(v1) {
 25221          self.require(ISA_MMX)
 25222          p.domain = DomainMMXSSE
 25223          p.add(0, func(m *_Encoding, v []interface{}) {
 25224              m.rexo(hcode(v[1]), addr(v[0]), false)
 25225              m.emit(0x0f)
 25226              m.emit(0xd1)
 25227              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25228          })
 25229      }
 25230      // PSRLW imm8, xmm
 25231      if isImm8(v0) && isXMM(v1) {
 25232          self.require(ISA_SSE2)
 25233          p.domain = DomainMMXSSE
 25234          p.add(0, func(m *_Encoding, v []interface{}) {
 25235              m.emit(0x66)
 25236              m.rexo(0, v[1], false)
 25237              m.emit(0x0f)
 25238              m.emit(0x71)
 25239              m.emit(0xd0 | lcode(v[1]))
 25240              m.imm1(toImmAny(v[0]))
 25241          })
 25242      }
 25243      // PSRLW xmm, xmm
 25244      if isXMM(v0) && isXMM(v1) {
 25245          self.require(ISA_SSE2)
 25246          p.domain = DomainMMXSSE
 25247          p.add(0, func(m *_Encoding, v []interface{}) {
 25248              m.emit(0x66)
 25249              m.rexo(hcode(v[1]), v[0], false)
 25250              m.emit(0x0f)
 25251              m.emit(0xd1)
 25252              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25253          })
 25254      }
 25255      // PSRLW m128, xmm
 25256      if isM128(v0) && isXMM(v1) {
 25257          self.require(ISA_SSE2)
 25258          p.domain = DomainMMXSSE
 25259          p.add(0, func(m *_Encoding, v []interface{}) {
 25260              m.emit(0x66)
 25261              m.rexo(hcode(v[1]), addr(v[0]), false)
 25262              m.emit(0x0f)
 25263              m.emit(0xd1)
 25264              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25265          })
 25266      }
 25267      if p.len == 0 {
 25268          panic("invalid operands for PSRLW")
 25269      }
 25270      return p
 25271  }
 25272  
 25273  // PSUBB performs "Subtract Packed Byte Integers".
 25274  //
 25275  // Mnemonic        : PSUBB
 25276  // Supported forms : (4 forms)
 25277  //
 25278  //    * PSUBB mm, mm       [MMX]
 25279  //    * PSUBB m64, mm      [MMX]
 25280  //    * PSUBB xmm, xmm     [SSE2]
 25281  //    * PSUBB m128, xmm    [SSE2]
 25282  //
 25283  func (self *Program) PSUBB(v0 interface{}, v1 interface{}) *Instruction {
 25284      p := self.alloc("PSUBB", 2, Operands { v0, v1 })
 25285      // PSUBB mm, mm
 25286      if isMM(v0) && isMM(v1) {
 25287          self.require(ISA_MMX)
 25288          p.domain = DomainMMXSSE
 25289          p.add(0, func(m *_Encoding, v []interface{}) {
 25290              m.rexo(hcode(v[1]), v[0], false)
 25291              m.emit(0x0f)
 25292              m.emit(0xf8)
 25293              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25294          })
 25295      }
 25296      // PSUBB m64, mm
 25297      if isM64(v0) && isMM(v1) {
 25298          self.require(ISA_MMX)
 25299          p.domain = DomainMMXSSE
 25300          p.add(0, func(m *_Encoding, v []interface{}) {
 25301              m.rexo(hcode(v[1]), addr(v[0]), false)
 25302              m.emit(0x0f)
 25303              m.emit(0xf8)
 25304              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25305          })
 25306      }
 25307      // PSUBB xmm, xmm
 25308      if isXMM(v0) && isXMM(v1) {
 25309          self.require(ISA_SSE2)
 25310          p.domain = DomainMMXSSE
 25311          p.add(0, func(m *_Encoding, v []interface{}) {
 25312              m.emit(0x66)
 25313              m.rexo(hcode(v[1]), v[0], false)
 25314              m.emit(0x0f)
 25315              m.emit(0xf8)
 25316              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25317          })
 25318      }
 25319      // PSUBB m128, xmm
 25320      if isM128(v0) && isXMM(v1) {
 25321          self.require(ISA_SSE2)
 25322          p.domain = DomainMMXSSE
 25323          p.add(0, func(m *_Encoding, v []interface{}) {
 25324              m.emit(0x66)
 25325              m.rexo(hcode(v[1]), addr(v[0]), false)
 25326              m.emit(0x0f)
 25327              m.emit(0xf8)
 25328              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25329          })
 25330      }
 25331      if p.len == 0 {
 25332          panic("invalid operands for PSUBB")
 25333      }
 25334      return p
 25335  }
 25336  
 25337  // PSUBD performs "Subtract Packed Doubleword Integers".
 25338  //
 25339  // Mnemonic        : PSUBD
 25340  // Supported forms : (4 forms)
 25341  //
 25342  //    * PSUBD mm, mm       [MMX]
 25343  //    * PSUBD m64, mm      [MMX]
 25344  //    * PSUBD xmm, xmm     [SSE2]
 25345  //    * PSUBD m128, xmm    [SSE2]
 25346  //
 25347  func (self *Program) PSUBD(v0 interface{}, v1 interface{}) *Instruction {
 25348      p := self.alloc("PSUBD", 2, Operands { v0, v1 })
 25349      // PSUBD mm, mm
 25350      if isMM(v0) && isMM(v1) {
 25351          self.require(ISA_MMX)
 25352          p.domain = DomainMMXSSE
 25353          p.add(0, func(m *_Encoding, v []interface{}) {
 25354              m.rexo(hcode(v[1]), v[0], false)
 25355              m.emit(0x0f)
 25356              m.emit(0xfa)
 25357              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25358          })
 25359      }
 25360      // PSUBD m64, mm
 25361      if isM64(v0) && isMM(v1) {
 25362          self.require(ISA_MMX)
 25363          p.domain = DomainMMXSSE
 25364          p.add(0, func(m *_Encoding, v []interface{}) {
 25365              m.rexo(hcode(v[1]), addr(v[0]), false)
 25366              m.emit(0x0f)
 25367              m.emit(0xfa)
 25368              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25369          })
 25370      }
 25371      // PSUBD xmm, xmm
 25372      if isXMM(v0) && isXMM(v1) {
 25373          self.require(ISA_SSE2)
 25374          p.domain = DomainMMXSSE
 25375          p.add(0, func(m *_Encoding, v []interface{}) {
 25376              m.emit(0x66)
 25377              m.rexo(hcode(v[1]), v[0], false)
 25378              m.emit(0x0f)
 25379              m.emit(0xfa)
 25380              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25381          })
 25382      }
 25383      // PSUBD m128, xmm
 25384      if isM128(v0) && isXMM(v1) {
 25385          self.require(ISA_SSE2)
 25386          p.domain = DomainMMXSSE
 25387          p.add(0, func(m *_Encoding, v []interface{}) {
 25388              m.emit(0x66)
 25389              m.rexo(hcode(v[1]), addr(v[0]), false)
 25390              m.emit(0x0f)
 25391              m.emit(0xfa)
 25392              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25393          })
 25394      }
 25395      if p.len == 0 {
 25396          panic("invalid operands for PSUBD")
 25397      }
 25398      return p
 25399  }
 25400  
 25401  // PSUBQ performs "Subtract Packed Quadword Integers".
 25402  //
 25403  // Mnemonic        : PSUBQ
 25404  // Supported forms : (4 forms)
 25405  //
 25406  //    * PSUBQ mm, mm       [SSE2]
 25407  //    * PSUBQ m64, mm      [SSE2]
 25408  //    * PSUBQ xmm, xmm     [SSE2]
 25409  //    * PSUBQ m128, xmm    [SSE2]
 25410  //
 25411  func (self *Program) PSUBQ(v0 interface{}, v1 interface{}) *Instruction {
 25412      p := self.alloc("PSUBQ", 2, Operands { v0, v1 })
 25413      // PSUBQ mm, mm
 25414      if isMM(v0) && isMM(v1) {
 25415          self.require(ISA_SSE2)
 25416          p.domain = DomainMMXSSE
 25417          p.add(0, func(m *_Encoding, v []interface{}) {
 25418              m.rexo(hcode(v[1]), v[0], false)
 25419              m.emit(0x0f)
 25420              m.emit(0xfb)
 25421              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25422          })
 25423      }
 25424      // PSUBQ m64, mm
 25425      if isM64(v0) && isMM(v1) {
 25426          self.require(ISA_SSE2)
 25427          p.domain = DomainMMXSSE
 25428          p.add(0, func(m *_Encoding, v []interface{}) {
 25429              m.rexo(hcode(v[1]), addr(v[0]), false)
 25430              m.emit(0x0f)
 25431              m.emit(0xfb)
 25432              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25433          })
 25434      }
 25435      // PSUBQ xmm, xmm
 25436      if isXMM(v0) && isXMM(v1) {
 25437          self.require(ISA_SSE2)
 25438          p.domain = DomainMMXSSE
 25439          p.add(0, func(m *_Encoding, v []interface{}) {
 25440              m.emit(0x66)
 25441              m.rexo(hcode(v[1]), v[0], false)
 25442              m.emit(0x0f)
 25443              m.emit(0xfb)
 25444              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25445          })
 25446      }
 25447      // PSUBQ m128, xmm
 25448      if isM128(v0) && isXMM(v1) {
 25449          self.require(ISA_SSE2)
 25450          p.domain = DomainMMXSSE
 25451          p.add(0, func(m *_Encoding, v []interface{}) {
 25452              m.emit(0x66)
 25453              m.rexo(hcode(v[1]), addr(v[0]), false)
 25454              m.emit(0x0f)
 25455              m.emit(0xfb)
 25456              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25457          })
 25458      }
 25459      if p.len == 0 {
 25460          panic("invalid operands for PSUBQ")
 25461      }
 25462      return p
 25463  }
 25464  
 25465  // PSUBSB performs "Subtract Packed Signed Byte Integers with Signed Saturation".
 25466  //
 25467  // Mnemonic        : PSUBSB
 25468  // Supported forms : (4 forms)
 25469  //
 25470  //    * PSUBSB mm, mm       [MMX]
 25471  //    * PSUBSB m64, mm      [MMX]
 25472  //    * PSUBSB xmm, xmm     [SSE2]
 25473  //    * PSUBSB m128, xmm    [SSE2]
 25474  //
 25475  func (self *Program) PSUBSB(v0 interface{}, v1 interface{}) *Instruction {
 25476      p := self.alloc("PSUBSB", 2, Operands { v0, v1 })
 25477      // PSUBSB mm, mm
 25478      if isMM(v0) && isMM(v1) {
 25479          self.require(ISA_MMX)
 25480          p.domain = DomainMMXSSE
 25481          p.add(0, func(m *_Encoding, v []interface{}) {
 25482              m.rexo(hcode(v[1]), v[0], false)
 25483              m.emit(0x0f)
 25484              m.emit(0xe8)
 25485              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25486          })
 25487      }
 25488      // PSUBSB m64, mm
 25489      if isM64(v0) && isMM(v1) {
 25490          self.require(ISA_MMX)
 25491          p.domain = DomainMMXSSE
 25492          p.add(0, func(m *_Encoding, v []interface{}) {
 25493              m.rexo(hcode(v[1]), addr(v[0]), false)
 25494              m.emit(0x0f)
 25495              m.emit(0xe8)
 25496              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25497          })
 25498      }
 25499      // PSUBSB xmm, xmm
 25500      if isXMM(v0) && isXMM(v1) {
 25501          self.require(ISA_SSE2)
 25502          p.domain = DomainMMXSSE
 25503          p.add(0, func(m *_Encoding, v []interface{}) {
 25504              m.emit(0x66)
 25505              m.rexo(hcode(v[1]), v[0], false)
 25506              m.emit(0x0f)
 25507              m.emit(0xe8)
 25508              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25509          })
 25510      }
 25511      // PSUBSB m128, xmm
 25512      if isM128(v0) && isXMM(v1) {
 25513          self.require(ISA_SSE2)
 25514          p.domain = DomainMMXSSE
 25515          p.add(0, func(m *_Encoding, v []interface{}) {
 25516              m.emit(0x66)
 25517              m.rexo(hcode(v[1]), addr(v[0]), false)
 25518              m.emit(0x0f)
 25519              m.emit(0xe8)
 25520              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25521          })
 25522      }
 25523      if p.len == 0 {
 25524          panic("invalid operands for PSUBSB")
 25525      }
 25526      return p
 25527  }
 25528  
 25529  // PSUBSW performs "Subtract Packed Signed Word Integers with Signed Saturation".
 25530  //
 25531  // Mnemonic        : PSUBSW
 25532  // Supported forms : (4 forms)
 25533  //
 25534  //    * PSUBSW mm, mm       [MMX]
 25535  //    * PSUBSW m64, mm      [MMX]
 25536  //    * PSUBSW xmm, xmm     [SSE2]
 25537  //    * PSUBSW m128, xmm    [SSE2]
 25538  //
 25539  func (self *Program) PSUBSW(v0 interface{}, v1 interface{}) *Instruction {
 25540      p := self.alloc("PSUBSW", 2, Operands { v0, v1 })
 25541      // PSUBSW mm, mm
 25542      if isMM(v0) && isMM(v1) {
 25543          self.require(ISA_MMX)
 25544          p.domain = DomainMMXSSE
 25545          p.add(0, func(m *_Encoding, v []interface{}) {
 25546              m.rexo(hcode(v[1]), v[0], false)
 25547              m.emit(0x0f)
 25548              m.emit(0xe9)
 25549              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25550          })
 25551      }
 25552      // PSUBSW m64, mm
 25553      if isM64(v0) && isMM(v1) {
 25554          self.require(ISA_MMX)
 25555          p.domain = DomainMMXSSE
 25556          p.add(0, func(m *_Encoding, v []interface{}) {
 25557              m.rexo(hcode(v[1]), addr(v[0]), false)
 25558              m.emit(0x0f)
 25559              m.emit(0xe9)
 25560              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25561          })
 25562      }
 25563      // PSUBSW xmm, xmm
 25564      if isXMM(v0) && isXMM(v1) {
 25565          self.require(ISA_SSE2)
 25566          p.domain = DomainMMXSSE
 25567          p.add(0, func(m *_Encoding, v []interface{}) {
 25568              m.emit(0x66)
 25569              m.rexo(hcode(v[1]), v[0], false)
 25570              m.emit(0x0f)
 25571              m.emit(0xe9)
 25572              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25573          })
 25574      }
 25575      // PSUBSW m128, xmm
 25576      if isM128(v0) && isXMM(v1) {
 25577          self.require(ISA_SSE2)
 25578          p.domain = DomainMMXSSE
 25579          p.add(0, func(m *_Encoding, v []interface{}) {
 25580              m.emit(0x66)
 25581              m.rexo(hcode(v[1]), addr(v[0]), false)
 25582              m.emit(0x0f)
 25583              m.emit(0xe9)
 25584              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25585          })
 25586      }
 25587      if p.len == 0 {
 25588          panic("invalid operands for PSUBSW")
 25589      }
 25590      return p
 25591  }
 25592  
 25593  // PSUBUSB performs "Subtract Packed Unsigned Byte Integers with Unsigned Saturation".
 25594  //
 25595  // Mnemonic        : PSUBUSB
 25596  // Supported forms : (4 forms)
 25597  //
 25598  //    * PSUBUSB mm, mm       [MMX]
 25599  //    * PSUBUSB m64, mm      [MMX]
 25600  //    * PSUBUSB xmm, xmm     [SSE2]
 25601  //    * PSUBUSB m128, xmm    [SSE2]
 25602  //
 25603  func (self *Program) PSUBUSB(v0 interface{}, v1 interface{}) *Instruction {
 25604      p := self.alloc("PSUBUSB", 2, Operands { v0, v1 })
 25605      // PSUBUSB mm, mm
 25606      if isMM(v0) && isMM(v1) {
 25607          self.require(ISA_MMX)
 25608          p.domain = DomainMMXSSE
 25609          p.add(0, func(m *_Encoding, v []interface{}) {
 25610              m.rexo(hcode(v[1]), v[0], false)
 25611              m.emit(0x0f)
 25612              m.emit(0xd8)
 25613              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25614          })
 25615      }
 25616      // PSUBUSB m64, mm
 25617      if isM64(v0) && isMM(v1) {
 25618          self.require(ISA_MMX)
 25619          p.domain = DomainMMXSSE
 25620          p.add(0, func(m *_Encoding, v []interface{}) {
 25621              m.rexo(hcode(v[1]), addr(v[0]), false)
 25622              m.emit(0x0f)
 25623              m.emit(0xd8)
 25624              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25625          })
 25626      }
 25627      // PSUBUSB xmm, xmm
 25628      if isXMM(v0) && isXMM(v1) {
 25629          self.require(ISA_SSE2)
 25630          p.domain = DomainMMXSSE
 25631          p.add(0, func(m *_Encoding, v []interface{}) {
 25632              m.emit(0x66)
 25633              m.rexo(hcode(v[1]), v[0], false)
 25634              m.emit(0x0f)
 25635              m.emit(0xd8)
 25636              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25637          })
 25638      }
 25639      // PSUBUSB m128, xmm
 25640      if isM128(v0) && isXMM(v1) {
 25641          self.require(ISA_SSE2)
 25642          p.domain = DomainMMXSSE
 25643          p.add(0, func(m *_Encoding, v []interface{}) {
 25644              m.emit(0x66)
 25645              m.rexo(hcode(v[1]), addr(v[0]), false)
 25646              m.emit(0x0f)
 25647              m.emit(0xd8)
 25648              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25649          })
 25650      }
 25651      if p.len == 0 {
 25652          panic("invalid operands for PSUBUSB")
 25653      }
 25654      return p
 25655  }
 25656  
 25657  // PSUBUSW performs "Subtract Packed Unsigned Word Integers with Unsigned Saturation".
 25658  //
 25659  // Mnemonic        : PSUBUSW
 25660  // Supported forms : (4 forms)
 25661  //
 25662  //    * PSUBUSW mm, mm       [MMX]
 25663  //    * PSUBUSW m64, mm      [MMX]
 25664  //    * PSUBUSW xmm, xmm     [SSE2]
 25665  //    * PSUBUSW m128, xmm    [SSE2]
 25666  //
 25667  func (self *Program) PSUBUSW(v0 interface{}, v1 interface{}) *Instruction {
 25668      p := self.alloc("PSUBUSW", 2, Operands { v0, v1 })
 25669      // PSUBUSW mm, mm
 25670      if isMM(v0) && isMM(v1) {
 25671          self.require(ISA_MMX)
 25672          p.domain = DomainMMXSSE
 25673          p.add(0, func(m *_Encoding, v []interface{}) {
 25674              m.rexo(hcode(v[1]), v[0], false)
 25675              m.emit(0x0f)
 25676              m.emit(0xd9)
 25677              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25678          })
 25679      }
 25680      // PSUBUSW m64, mm
 25681      if isM64(v0) && isMM(v1) {
 25682          self.require(ISA_MMX)
 25683          p.domain = DomainMMXSSE
 25684          p.add(0, func(m *_Encoding, v []interface{}) {
 25685              m.rexo(hcode(v[1]), addr(v[0]), false)
 25686              m.emit(0x0f)
 25687              m.emit(0xd9)
 25688              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25689          })
 25690      }
 25691      // PSUBUSW xmm, xmm
 25692      if isXMM(v0) && isXMM(v1) {
 25693          self.require(ISA_SSE2)
 25694          p.domain = DomainMMXSSE
 25695          p.add(0, func(m *_Encoding, v []interface{}) {
 25696              m.emit(0x66)
 25697              m.rexo(hcode(v[1]), v[0], false)
 25698              m.emit(0x0f)
 25699              m.emit(0xd9)
 25700              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25701          })
 25702      }
 25703      // PSUBUSW m128, xmm
 25704      if isM128(v0) && isXMM(v1) {
 25705          self.require(ISA_SSE2)
 25706          p.domain = DomainMMXSSE
 25707          p.add(0, func(m *_Encoding, v []interface{}) {
 25708              m.emit(0x66)
 25709              m.rexo(hcode(v[1]), addr(v[0]), false)
 25710              m.emit(0x0f)
 25711              m.emit(0xd9)
 25712              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25713          })
 25714      }
 25715      if p.len == 0 {
 25716          panic("invalid operands for PSUBUSW")
 25717      }
 25718      return p
 25719  }
 25720  
 25721  // PSUBW performs "Subtract Packed Word Integers".
 25722  //
 25723  // Mnemonic        : PSUBW
 25724  // Supported forms : (4 forms)
 25725  //
 25726  //    * PSUBW mm, mm       [MMX]
 25727  //    * PSUBW m64, mm      [MMX]
 25728  //    * PSUBW xmm, xmm     [SSE2]
 25729  //    * PSUBW m128, xmm    [SSE2]
 25730  //
 25731  func (self *Program) PSUBW(v0 interface{}, v1 interface{}) *Instruction {
 25732      p := self.alloc("PSUBW", 2, Operands { v0, v1 })
 25733      // PSUBW mm, mm
 25734      if isMM(v0) && isMM(v1) {
 25735          self.require(ISA_MMX)
 25736          p.domain = DomainMMXSSE
 25737          p.add(0, func(m *_Encoding, v []interface{}) {
 25738              m.rexo(hcode(v[1]), v[0], false)
 25739              m.emit(0x0f)
 25740              m.emit(0xf9)
 25741              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25742          })
 25743      }
 25744      // PSUBW m64, mm
 25745      if isM64(v0) && isMM(v1) {
 25746          self.require(ISA_MMX)
 25747          p.domain = DomainMMXSSE
 25748          p.add(0, func(m *_Encoding, v []interface{}) {
 25749              m.rexo(hcode(v[1]), addr(v[0]), false)
 25750              m.emit(0x0f)
 25751              m.emit(0xf9)
 25752              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25753          })
 25754      }
 25755      // PSUBW xmm, xmm
 25756      if isXMM(v0) && isXMM(v1) {
 25757          self.require(ISA_SSE2)
 25758          p.domain = DomainMMXSSE
 25759          p.add(0, func(m *_Encoding, v []interface{}) {
 25760              m.emit(0x66)
 25761              m.rexo(hcode(v[1]), v[0], false)
 25762              m.emit(0x0f)
 25763              m.emit(0xf9)
 25764              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25765          })
 25766      }
 25767      // PSUBW m128, xmm
 25768      if isM128(v0) && isXMM(v1) {
 25769          self.require(ISA_SSE2)
 25770          p.domain = DomainMMXSSE
 25771          p.add(0, func(m *_Encoding, v []interface{}) {
 25772              m.emit(0x66)
 25773              m.rexo(hcode(v[1]), addr(v[0]), false)
 25774              m.emit(0x0f)
 25775              m.emit(0xf9)
 25776              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25777          })
 25778      }
 25779      if p.len == 0 {
 25780          panic("invalid operands for PSUBW")
 25781      }
 25782      return p
 25783  }
 25784  
 25785  // PSWAPD performs "Packed Swap Doubleword".
 25786  //
 25787  // Mnemonic        : PSWAPD
 25788  // Supported forms : (2 forms)
 25789  //
 25790  //    * PSWAPD mm, mm     [3dnow!+]
 25791  //    * PSWAPD m64, mm    [3dnow!+]
 25792  //
 25793  func (self *Program) PSWAPD(v0 interface{}, v1 interface{}) *Instruction {
 25794      p := self.alloc("PSWAPD", 2, Operands { v0, v1 })
 25795      // PSWAPD mm, mm
 25796      if isMM(v0) && isMM(v1) {
 25797          self.require(ISA_3DNOW_PLUS)
 25798          p.domain = DomainAMDSpecific
 25799          p.add(0, func(m *_Encoding, v []interface{}) {
 25800              m.rexo(hcode(v[1]), v[0], false)
 25801              m.emit(0x0f)
 25802              m.emit(0x0f)
 25803              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25804              m.emit(0xbb)
 25805          })
 25806      }
 25807      // PSWAPD m64, mm
 25808      if isM64(v0) && isMM(v1) {
 25809          self.require(ISA_3DNOW_PLUS)
 25810          p.domain = DomainAMDSpecific
 25811          p.add(0, func(m *_Encoding, v []interface{}) {
 25812              m.rexo(hcode(v[1]), addr(v[0]), false)
 25813              m.emit(0x0f)
 25814              m.emit(0x0f)
 25815              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25816              m.emit(0xbb)
 25817          })
 25818      }
 25819      if p.len == 0 {
 25820          panic("invalid operands for PSWAPD")
 25821      }
 25822      return p
 25823  }
 25824  
 25825  // PTEST performs "Packed Logical Compare".
 25826  //
 25827  // Mnemonic        : PTEST
 25828  // Supported forms : (2 forms)
 25829  //
 25830  //    * PTEST xmm, xmm     [SSE4.1]
 25831  //    * PTEST m128, xmm    [SSE4.1]
 25832  //
 25833  func (self *Program) PTEST(v0 interface{}, v1 interface{}) *Instruction {
 25834      p := self.alloc("PTEST", 2, Operands { v0, v1 })
 25835      // PTEST xmm, xmm
 25836      if isXMM(v0) && isXMM(v1) {
 25837          self.require(ISA_SSE4_1)
 25838          p.domain = DomainMMXSSE
 25839          p.add(0, func(m *_Encoding, v []interface{}) {
 25840              m.emit(0x66)
 25841              m.rexo(hcode(v[1]), v[0], false)
 25842              m.emit(0x0f)
 25843              m.emit(0x38)
 25844              m.emit(0x17)
 25845              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25846          })
 25847      }
 25848      // PTEST m128, xmm
 25849      if isM128(v0) && isXMM(v1) {
 25850          self.require(ISA_SSE4_1)
 25851          p.domain = DomainMMXSSE
 25852          p.add(0, func(m *_Encoding, v []interface{}) {
 25853              m.emit(0x66)
 25854              m.rexo(hcode(v[1]), addr(v[0]), false)
 25855              m.emit(0x0f)
 25856              m.emit(0x38)
 25857              m.emit(0x17)
 25858              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25859          })
 25860      }
 25861      if p.len == 0 {
 25862          panic("invalid operands for PTEST")
 25863      }
 25864      return p
 25865  }
 25866  
 25867  // PUNPCKHBW performs "Unpack and Interleave High-Order Bytes into Words".
 25868  //
 25869  // Mnemonic        : PUNPCKHBW
 25870  // Supported forms : (4 forms)
 25871  //
 25872  //    * PUNPCKHBW mm, mm       [MMX]
 25873  //    * PUNPCKHBW m64, mm      [MMX]
 25874  //    * PUNPCKHBW xmm, xmm     [SSE2]
 25875  //    * PUNPCKHBW m128, xmm    [SSE2]
 25876  //
 25877  func (self *Program) PUNPCKHBW(v0 interface{}, v1 interface{}) *Instruction {
 25878      p := self.alloc("PUNPCKHBW", 2, Operands { v0, v1 })
 25879      // PUNPCKHBW mm, mm
 25880      if isMM(v0) && isMM(v1) {
 25881          self.require(ISA_MMX)
 25882          p.domain = DomainMMXSSE
 25883          p.add(0, func(m *_Encoding, v []interface{}) {
 25884              m.rexo(hcode(v[1]), v[0], false)
 25885              m.emit(0x0f)
 25886              m.emit(0x68)
 25887              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25888          })
 25889      }
 25890      // PUNPCKHBW m64, mm
 25891      if isM64(v0) && isMM(v1) {
 25892          self.require(ISA_MMX)
 25893          p.domain = DomainMMXSSE
 25894          p.add(0, func(m *_Encoding, v []interface{}) {
 25895              m.rexo(hcode(v[1]), addr(v[0]), false)
 25896              m.emit(0x0f)
 25897              m.emit(0x68)
 25898              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25899          })
 25900      }
 25901      // PUNPCKHBW xmm, xmm
 25902      if isXMM(v0) && isXMM(v1) {
 25903          self.require(ISA_SSE2)
 25904          p.domain = DomainMMXSSE
 25905          p.add(0, func(m *_Encoding, v []interface{}) {
 25906              m.emit(0x66)
 25907              m.rexo(hcode(v[1]), v[0], false)
 25908              m.emit(0x0f)
 25909              m.emit(0x68)
 25910              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25911          })
 25912      }
 25913      // PUNPCKHBW m128, xmm
 25914      if isM128(v0) && isXMM(v1) {
 25915          self.require(ISA_SSE2)
 25916          p.domain = DomainMMXSSE
 25917          p.add(0, func(m *_Encoding, v []interface{}) {
 25918              m.emit(0x66)
 25919              m.rexo(hcode(v[1]), addr(v[0]), false)
 25920              m.emit(0x0f)
 25921              m.emit(0x68)
 25922              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25923          })
 25924      }
 25925      if p.len == 0 {
 25926          panic("invalid operands for PUNPCKHBW")
 25927      }
 25928      return p
 25929  }
 25930  
 25931  // PUNPCKHDQ performs "Unpack and Interleave High-Order Doublewords into Quadwords".
 25932  //
 25933  // Mnemonic        : PUNPCKHDQ
 25934  // Supported forms : (4 forms)
 25935  //
 25936  //    * PUNPCKHDQ mm, mm       [MMX]
 25937  //    * PUNPCKHDQ m64, mm      [MMX]
 25938  //    * PUNPCKHDQ xmm, xmm     [SSE2]
 25939  //    * PUNPCKHDQ m128, xmm    [SSE2]
 25940  //
 25941  func (self *Program) PUNPCKHDQ(v0 interface{}, v1 interface{}) *Instruction {
 25942      p := self.alloc("PUNPCKHDQ", 2, Operands { v0, v1 })
 25943      // PUNPCKHDQ mm, mm
 25944      if isMM(v0) && isMM(v1) {
 25945          self.require(ISA_MMX)
 25946          p.domain = DomainMMXSSE
 25947          p.add(0, func(m *_Encoding, v []interface{}) {
 25948              m.rexo(hcode(v[1]), v[0], false)
 25949              m.emit(0x0f)
 25950              m.emit(0x6a)
 25951              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25952          })
 25953      }
 25954      // PUNPCKHDQ m64, mm
 25955      if isM64(v0) && isMM(v1) {
 25956          self.require(ISA_MMX)
 25957          p.domain = DomainMMXSSE
 25958          p.add(0, func(m *_Encoding, v []interface{}) {
 25959              m.rexo(hcode(v[1]), addr(v[0]), false)
 25960              m.emit(0x0f)
 25961              m.emit(0x6a)
 25962              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25963          })
 25964      }
 25965      // PUNPCKHDQ xmm, xmm
 25966      if isXMM(v0) && isXMM(v1) {
 25967          self.require(ISA_SSE2)
 25968          p.domain = DomainMMXSSE
 25969          p.add(0, func(m *_Encoding, v []interface{}) {
 25970              m.emit(0x66)
 25971              m.rexo(hcode(v[1]), v[0], false)
 25972              m.emit(0x0f)
 25973              m.emit(0x6a)
 25974              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 25975          })
 25976      }
 25977      // PUNPCKHDQ m128, xmm
 25978      if isM128(v0) && isXMM(v1) {
 25979          self.require(ISA_SSE2)
 25980          p.domain = DomainMMXSSE
 25981          p.add(0, func(m *_Encoding, v []interface{}) {
 25982              m.emit(0x66)
 25983              m.rexo(hcode(v[1]), addr(v[0]), false)
 25984              m.emit(0x0f)
 25985              m.emit(0x6a)
 25986              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 25987          })
 25988      }
 25989      if p.len == 0 {
 25990          panic("invalid operands for PUNPCKHDQ")
 25991      }
 25992      return p
 25993  }
 25994  
 25995  // PUNPCKHQDQ performs "Unpack and Interleave High-Order Quadwords into Double Quadwords".
 25996  //
 25997  // Mnemonic        : PUNPCKHQDQ
 25998  // Supported forms : (2 forms)
 25999  //
 26000  //    * PUNPCKHQDQ xmm, xmm     [SSE2]
 26001  //    * PUNPCKHQDQ m128, xmm    [SSE2]
 26002  //
 26003  func (self *Program) PUNPCKHQDQ(v0 interface{}, v1 interface{}) *Instruction {
 26004      p := self.alloc("PUNPCKHQDQ", 2, Operands { v0, v1 })
 26005      // PUNPCKHQDQ xmm, xmm
 26006      if isXMM(v0) && isXMM(v1) {
 26007          self.require(ISA_SSE2)
 26008          p.domain = DomainMMXSSE
 26009          p.add(0, func(m *_Encoding, v []interface{}) {
 26010              m.emit(0x66)
 26011              m.rexo(hcode(v[1]), v[0], false)
 26012              m.emit(0x0f)
 26013              m.emit(0x6d)
 26014              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26015          })
 26016      }
 26017      // PUNPCKHQDQ m128, xmm
 26018      if isM128(v0) && isXMM(v1) {
 26019          self.require(ISA_SSE2)
 26020          p.domain = DomainMMXSSE
 26021          p.add(0, func(m *_Encoding, v []interface{}) {
 26022              m.emit(0x66)
 26023              m.rexo(hcode(v[1]), addr(v[0]), false)
 26024              m.emit(0x0f)
 26025              m.emit(0x6d)
 26026              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26027          })
 26028      }
 26029      if p.len == 0 {
 26030          panic("invalid operands for PUNPCKHQDQ")
 26031      }
 26032      return p
 26033  }
 26034  
 26035  // PUNPCKHWD performs "Unpack and Interleave High-Order Words into Doublewords".
 26036  //
 26037  // Mnemonic        : PUNPCKHWD
 26038  // Supported forms : (4 forms)
 26039  //
 26040  //    * PUNPCKHWD mm, mm       [MMX]
 26041  //    * PUNPCKHWD m64, mm      [MMX]
 26042  //    * PUNPCKHWD xmm, xmm     [SSE2]
 26043  //    * PUNPCKHWD m128, xmm    [SSE2]
 26044  //
 26045  func (self *Program) PUNPCKHWD(v0 interface{}, v1 interface{}) *Instruction {
 26046      p := self.alloc("PUNPCKHWD", 2, Operands { v0, v1 })
 26047      // PUNPCKHWD mm, mm
 26048      if isMM(v0) && isMM(v1) {
 26049          self.require(ISA_MMX)
 26050          p.domain = DomainMMXSSE
 26051          p.add(0, func(m *_Encoding, v []interface{}) {
 26052              m.rexo(hcode(v[1]), v[0], false)
 26053              m.emit(0x0f)
 26054              m.emit(0x69)
 26055              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26056          })
 26057      }
 26058      // PUNPCKHWD m64, mm
 26059      if isM64(v0) && isMM(v1) {
 26060          self.require(ISA_MMX)
 26061          p.domain = DomainMMXSSE
 26062          p.add(0, func(m *_Encoding, v []interface{}) {
 26063              m.rexo(hcode(v[1]), addr(v[0]), false)
 26064              m.emit(0x0f)
 26065              m.emit(0x69)
 26066              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26067          })
 26068      }
 26069      // PUNPCKHWD xmm, xmm
 26070      if isXMM(v0) && isXMM(v1) {
 26071          self.require(ISA_SSE2)
 26072          p.domain = DomainMMXSSE
 26073          p.add(0, func(m *_Encoding, v []interface{}) {
 26074              m.emit(0x66)
 26075              m.rexo(hcode(v[1]), v[0], false)
 26076              m.emit(0x0f)
 26077              m.emit(0x69)
 26078              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26079          })
 26080      }
 26081      // PUNPCKHWD m128, xmm
 26082      if isM128(v0) && isXMM(v1) {
 26083          self.require(ISA_SSE2)
 26084          p.domain = DomainMMXSSE
 26085          p.add(0, func(m *_Encoding, v []interface{}) {
 26086              m.emit(0x66)
 26087              m.rexo(hcode(v[1]), addr(v[0]), false)
 26088              m.emit(0x0f)
 26089              m.emit(0x69)
 26090              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26091          })
 26092      }
 26093      if p.len == 0 {
 26094          panic("invalid operands for PUNPCKHWD")
 26095      }
 26096      return p
 26097  }
 26098  
 26099  // PUNPCKLBW performs "Unpack and Interleave Low-Order Bytes into Words".
 26100  //
 26101  // Mnemonic        : PUNPCKLBW
 26102  // Supported forms : (4 forms)
 26103  //
 26104  //    * PUNPCKLBW mm, mm       [MMX]
 26105  //    * PUNPCKLBW m32, mm      [MMX]
 26106  //    * PUNPCKLBW xmm, xmm     [SSE2]
 26107  //    * PUNPCKLBW m128, xmm    [SSE2]
 26108  //
 26109  func (self *Program) PUNPCKLBW(v0 interface{}, v1 interface{}) *Instruction {
 26110      p := self.alloc("PUNPCKLBW", 2, Operands { v0, v1 })
 26111      // PUNPCKLBW mm, mm
 26112      if isMM(v0) && isMM(v1) {
 26113          self.require(ISA_MMX)
 26114          p.domain = DomainMMXSSE
 26115          p.add(0, func(m *_Encoding, v []interface{}) {
 26116              m.rexo(hcode(v[1]), v[0], false)
 26117              m.emit(0x0f)
 26118              m.emit(0x60)
 26119              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26120          })
 26121      }
 26122      // PUNPCKLBW m32, mm
 26123      if isM32(v0) && isMM(v1) {
 26124          self.require(ISA_MMX)
 26125          p.domain = DomainMMXSSE
 26126          p.add(0, func(m *_Encoding, v []interface{}) {
 26127              m.rexo(hcode(v[1]), addr(v[0]), false)
 26128              m.emit(0x0f)
 26129              m.emit(0x60)
 26130              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26131          })
 26132      }
 26133      // PUNPCKLBW xmm, xmm
 26134      if isXMM(v0) && isXMM(v1) {
 26135          self.require(ISA_SSE2)
 26136          p.domain = DomainMMXSSE
 26137          p.add(0, func(m *_Encoding, v []interface{}) {
 26138              m.emit(0x66)
 26139              m.rexo(hcode(v[1]), v[0], false)
 26140              m.emit(0x0f)
 26141              m.emit(0x60)
 26142              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26143          })
 26144      }
 26145      // PUNPCKLBW m128, xmm
 26146      if isM128(v0) && isXMM(v1) {
 26147          self.require(ISA_SSE2)
 26148          p.domain = DomainMMXSSE
 26149          p.add(0, func(m *_Encoding, v []interface{}) {
 26150              m.emit(0x66)
 26151              m.rexo(hcode(v[1]), addr(v[0]), false)
 26152              m.emit(0x0f)
 26153              m.emit(0x60)
 26154              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26155          })
 26156      }
 26157      if p.len == 0 {
 26158          panic("invalid operands for PUNPCKLBW")
 26159      }
 26160      return p
 26161  }
 26162  
 26163  // PUNPCKLDQ performs "Unpack and Interleave Low-Order Doublewords into Quadwords".
 26164  //
 26165  // Mnemonic        : PUNPCKLDQ
 26166  // Supported forms : (4 forms)
 26167  //
 26168  //    * PUNPCKLDQ mm, mm       [MMX]
 26169  //    * PUNPCKLDQ m32, mm      [MMX]
 26170  //    * PUNPCKLDQ xmm, xmm     [SSE2]
 26171  //    * PUNPCKLDQ m128, xmm    [SSE2]
 26172  //
 26173  func (self *Program) PUNPCKLDQ(v0 interface{}, v1 interface{}) *Instruction {
 26174      p := self.alloc("PUNPCKLDQ", 2, Operands { v0, v1 })
 26175      // PUNPCKLDQ mm, mm
 26176      if isMM(v0) && isMM(v1) {
 26177          self.require(ISA_MMX)
 26178          p.domain = DomainMMXSSE
 26179          p.add(0, func(m *_Encoding, v []interface{}) {
 26180              m.rexo(hcode(v[1]), v[0], false)
 26181              m.emit(0x0f)
 26182              m.emit(0x62)
 26183              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26184          })
 26185      }
 26186      // PUNPCKLDQ m32, mm
 26187      if isM32(v0) && isMM(v1) {
 26188          self.require(ISA_MMX)
 26189          p.domain = DomainMMXSSE
 26190          p.add(0, func(m *_Encoding, v []interface{}) {
 26191              m.rexo(hcode(v[1]), addr(v[0]), false)
 26192              m.emit(0x0f)
 26193              m.emit(0x62)
 26194              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26195          })
 26196      }
 26197      // PUNPCKLDQ xmm, xmm
 26198      if isXMM(v0) && isXMM(v1) {
 26199          self.require(ISA_SSE2)
 26200          p.domain = DomainMMXSSE
 26201          p.add(0, func(m *_Encoding, v []interface{}) {
 26202              m.emit(0x66)
 26203              m.rexo(hcode(v[1]), v[0], false)
 26204              m.emit(0x0f)
 26205              m.emit(0x62)
 26206              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26207          })
 26208      }
 26209      // PUNPCKLDQ m128, xmm
 26210      if isM128(v0) && isXMM(v1) {
 26211          self.require(ISA_SSE2)
 26212          p.domain = DomainMMXSSE
 26213          p.add(0, func(m *_Encoding, v []interface{}) {
 26214              m.emit(0x66)
 26215              m.rexo(hcode(v[1]), addr(v[0]), false)
 26216              m.emit(0x0f)
 26217              m.emit(0x62)
 26218              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26219          })
 26220      }
 26221      if p.len == 0 {
 26222          panic("invalid operands for PUNPCKLDQ")
 26223      }
 26224      return p
 26225  }
 26226  
 26227  // PUNPCKLQDQ performs "Unpack and Interleave Low-Order Quadwords into Double Quadwords".
 26228  //
 26229  // Mnemonic        : PUNPCKLQDQ
 26230  // Supported forms : (2 forms)
 26231  //
 26232  //    * PUNPCKLQDQ xmm, xmm     [SSE2]
 26233  //    * PUNPCKLQDQ m128, xmm    [SSE2]
 26234  //
 26235  func (self *Program) PUNPCKLQDQ(v0 interface{}, v1 interface{}) *Instruction {
 26236      p := self.alloc("PUNPCKLQDQ", 2, Operands { v0, v1 })
 26237      // PUNPCKLQDQ xmm, xmm
 26238      if isXMM(v0) && isXMM(v1) {
 26239          self.require(ISA_SSE2)
 26240          p.domain = DomainMMXSSE
 26241          p.add(0, func(m *_Encoding, v []interface{}) {
 26242              m.emit(0x66)
 26243              m.rexo(hcode(v[1]), v[0], false)
 26244              m.emit(0x0f)
 26245              m.emit(0x6c)
 26246              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26247          })
 26248      }
 26249      // PUNPCKLQDQ m128, xmm
 26250      if isM128(v0) && isXMM(v1) {
 26251          self.require(ISA_SSE2)
 26252          p.domain = DomainMMXSSE
 26253          p.add(0, func(m *_Encoding, v []interface{}) {
 26254              m.emit(0x66)
 26255              m.rexo(hcode(v[1]), addr(v[0]), false)
 26256              m.emit(0x0f)
 26257              m.emit(0x6c)
 26258              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26259          })
 26260      }
 26261      if p.len == 0 {
 26262          panic("invalid operands for PUNPCKLQDQ")
 26263      }
 26264      return p
 26265  }
 26266  
 26267  // PUNPCKLWD performs "Unpack and Interleave Low-Order Words into Doublewords".
 26268  //
 26269  // Mnemonic        : PUNPCKLWD
 26270  // Supported forms : (4 forms)
 26271  //
 26272  //    * PUNPCKLWD mm, mm       [MMX]
 26273  //    * PUNPCKLWD m32, mm      [MMX]
 26274  //    * PUNPCKLWD xmm, xmm     [SSE2]
 26275  //    * PUNPCKLWD m128, xmm    [SSE2]
 26276  //
 26277  func (self *Program) PUNPCKLWD(v0 interface{}, v1 interface{}) *Instruction {
 26278      p := self.alloc("PUNPCKLWD", 2, Operands { v0, v1 })
 26279      // PUNPCKLWD mm, mm
 26280      if isMM(v0) && isMM(v1) {
 26281          self.require(ISA_MMX)
 26282          p.domain = DomainMMXSSE
 26283          p.add(0, func(m *_Encoding, v []interface{}) {
 26284              m.rexo(hcode(v[1]), v[0], false)
 26285              m.emit(0x0f)
 26286              m.emit(0x61)
 26287              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26288          })
 26289      }
 26290      // PUNPCKLWD m32, mm
 26291      if isM32(v0) && isMM(v1) {
 26292          self.require(ISA_MMX)
 26293          p.domain = DomainMMXSSE
 26294          p.add(0, func(m *_Encoding, v []interface{}) {
 26295              m.rexo(hcode(v[1]), addr(v[0]), false)
 26296              m.emit(0x0f)
 26297              m.emit(0x61)
 26298              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26299          })
 26300      }
 26301      // PUNPCKLWD xmm, xmm
 26302      if isXMM(v0) && isXMM(v1) {
 26303          self.require(ISA_SSE2)
 26304          p.domain = DomainMMXSSE
 26305          p.add(0, func(m *_Encoding, v []interface{}) {
 26306              m.emit(0x66)
 26307              m.rexo(hcode(v[1]), v[0], false)
 26308              m.emit(0x0f)
 26309              m.emit(0x61)
 26310              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26311          })
 26312      }
 26313      // PUNPCKLWD m128, xmm
 26314      if isM128(v0) && isXMM(v1) {
 26315          self.require(ISA_SSE2)
 26316          p.domain = DomainMMXSSE
 26317          p.add(0, func(m *_Encoding, v []interface{}) {
 26318              m.emit(0x66)
 26319              m.rexo(hcode(v[1]), addr(v[0]), false)
 26320              m.emit(0x0f)
 26321              m.emit(0x61)
 26322              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26323          })
 26324      }
 26325      if p.len == 0 {
 26326          panic("invalid operands for PUNPCKLWD")
 26327      }
 26328      return p
 26329  }
 26330  
 26331  // PUSHQ performs "Push Value Onto the Stack".
 26332  //
 26333  // Mnemonic        : PUSH
 26334  // Supported forms : (4 forms)
 26335  //
 26336  //    * PUSHQ imm8
 26337  //    * PUSHQ imm32
 26338  //    * PUSHQ r64
 26339  //    * PUSHQ m64
 26340  //
 26341  func (self *Program) PUSHQ(v0 interface{}) *Instruction {
 26342      p := self.alloc("PUSHQ", 1, Operands { v0 })
 26343      // PUSHQ imm8
 26344      if isImm8Ext(v0, 8) {
 26345          p.domain = DomainGeneric
 26346          p.add(0, func(m *_Encoding, v []interface{}) {
 26347              m.emit(0x6a)
 26348              m.imm1(toImmAny(v[0]))
 26349          })
 26350      }
 26351      // PUSHQ imm32
 26352      if isImm32Ext(v0, 8) {
 26353          p.domain = DomainGeneric
 26354          p.add(0, func(m *_Encoding, v []interface{}) {
 26355              m.emit(0x68)
 26356              m.imm4(toImmAny(v[0]))
 26357          })
 26358      }
 26359      // PUSHQ r64
 26360      if isReg64(v0) {
 26361          p.domain = DomainGeneric
 26362          p.add(0, func(m *_Encoding, v []interface{}) {
 26363              m.rexo(0, v[0], false)
 26364              m.emit(0x50 | lcode(v[0]))
 26365          })
 26366          p.add(0, func(m *_Encoding, v []interface{}) {
 26367              m.rexo(0, v[0], false)
 26368              m.emit(0xff)
 26369              m.emit(0xf0 | lcode(v[0]))
 26370          })
 26371      }
 26372      // PUSHQ m64
 26373      if isM64(v0) {
 26374          p.domain = DomainGeneric
 26375          p.add(0, func(m *_Encoding, v []interface{}) {
 26376              m.rexo(0, addr(v[0]), false)
 26377              m.emit(0xff)
 26378              m.mrsd(6, addr(v[0]), 1)
 26379          })
 26380      }
 26381      if p.len == 0 {
 26382          panic("invalid operands for PUSHQ")
 26383      }
 26384      return p
 26385  }
 26386  
 26387  // PUSHW performs "Push Value Onto the Stack".
 26388  //
 26389  // Mnemonic        : PUSH
 26390  // Supported forms : (2 forms)
 26391  //
 26392  //    * PUSHW r16
 26393  //    * PUSHW m16
 26394  //
 26395  func (self *Program) PUSHW(v0 interface{}) *Instruction {
 26396      p := self.alloc("PUSHW", 1, Operands { v0 })
 26397      // PUSHW r16
 26398      if isReg16(v0) {
 26399          p.domain = DomainGeneric
 26400          p.add(0, func(m *_Encoding, v []interface{}) {
 26401              m.emit(0x66)
 26402              m.rexo(0, v[0], false)
 26403              m.emit(0x50 | lcode(v[0]))
 26404          })
 26405          p.add(0, func(m *_Encoding, v []interface{}) {
 26406              m.emit(0x66)
 26407              m.rexo(0, v[0], false)
 26408              m.emit(0xff)
 26409              m.emit(0xf0 | lcode(v[0]))
 26410          })
 26411      }
 26412      // PUSHW m16
 26413      if isM16(v0) {
 26414          p.domain = DomainGeneric
 26415          p.add(0, func(m *_Encoding, v []interface{}) {
 26416              m.emit(0x66)
 26417              m.rexo(0, addr(v[0]), false)
 26418              m.emit(0xff)
 26419              m.mrsd(6, addr(v[0]), 1)
 26420          })
 26421      }
 26422      if p.len == 0 {
 26423          panic("invalid operands for PUSHW")
 26424      }
 26425      return p
 26426  }
 26427  
 26428  // PXOR performs "Packed Bitwise Logical Exclusive OR".
 26429  //
 26430  // Mnemonic        : PXOR
 26431  // Supported forms : (4 forms)
 26432  //
 26433  //    * PXOR mm, mm       [MMX]
 26434  //    * PXOR m64, mm      [MMX]
 26435  //    * PXOR xmm, xmm     [SSE2]
 26436  //    * PXOR m128, xmm    [SSE2]
 26437  //
 26438  func (self *Program) PXOR(v0 interface{}, v1 interface{}) *Instruction {
 26439      p := self.alloc("PXOR", 2, Operands { v0, v1 })
 26440      // PXOR mm, mm
 26441      if isMM(v0) && isMM(v1) {
 26442          self.require(ISA_MMX)
 26443          p.domain = DomainMMXSSE
 26444          p.add(0, func(m *_Encoding, v []interface{}) {
 26445              m.rexo(hcode(v[1]), v[0], false)
 26446              m.emit(0x0f)
 26447              m.emit(0xef)
 26448              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26449          })
 26450      }
 26451      // PXOR m64, mm
 26452      if isM64(v0) && isMM(v1) {
 26453          self.require(ISA_MMX)
 26454          p.domain = DomainMMXSSE
 26455          p.add(0, func(m *_Encoding, v []interface{}) {
 26456              m.rexo(hcode(v[1]), addr(v[0]), false)
 26457              m.emit(0x0f)
 26458              m.emit(0xef)
 26459              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26460          })
 26461      }
 26462      // PXOR xmm, xmm
 26463      if isXMM(v0) && isXMM(v1) {
 26464          self.require(ISA_SSE2)
 26465          p.domain = DomainMMXSSE
 26466          p.add(0, func(m *_Encoding, v []interface{}) {
 26467              m.emit(0x66)
 26468              m.rexo(hcode(v[1]), v[0], false)
 26469              m.emit(0x0f)
 26470              m.emit(0xef)
 26471              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26472          })
 26473      }
 26474      // PXOR m128, xmm
 26475      if isM128(v0) && isXMM(v1) {
 26476          self.require(ISA_SSE2)
 26477          p.domain = DomainMMXSSE
 26478          p.add(0, func(m *_Encoding, v []interface{}) {
 26479              m.emit(0x66)
 26480              m.rexo(hcode(v[1]), addr(v[0]), false)
 26481              m.emit(0x0f)
 26482              m.emit(0xef)
 26483              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26484          })
 26485      }
 26486      if p.len == 0 {
 26487          panic("invalid operands for PXOR")
 26488      }
 26489      return p
 26490  }
 26491  
 26492  // RCLB performs "Rotate Left through Carry Flag".
 26493  //
 26494  // Mnemonic        : RCL
 26495  // Supported forms : (6 forms)
 26496  //
 26497  //    * RCLB 1, r8
 26498  //    * RCLB imm8, r8
 26499  //    * RCLB cl, r8
 26500  //    * RCLB 1, m8
 26501  //    * RCLB imm8, m8
 26502  //    * RCLB cl, m8
 26503  //
 26504  func (self *Program) RCLB(v0 interface{}, v1 interface{}) *Instruction {
 26505      p := self.alloc("RCLB", 2, Operands { v0, v1 })
 26506      // RCLB 1, r8
 26507      if isConst1(v0) && isReg8(v1) {
 26508          p.domain = DomainGeneric
 26509          p.add(0, func(m *_Encoding, v []interface{}) {
 26510              m.rexo(0, v[1], isReg8REX(v[1]))
 26511              m.emit(0xd0)
 26512              m.emit(0xd0 | lcode(v[1]))
 26513          })
 26514      }
 26515      // RCLB imm8, r8
 26516      if isImm8(v0) && isReg8(v1) {
 26517          p.domain = DomainGeneric
 26518          p.add(0, func(m *_Encoding, v []interface{}) {
 26519              m.rexo(0, v[1], isReg8REX(v[1]))
 26520              m.emit(0xc0)
 26521              m.emit(0xd0 | lcode(v[1]))
 26522              m.imm1(toImmAny(v[0]))
 26523          })
 26524      }
 26525      // RCLB cl, r8
 26526      if v0 == CL && isReg8(v1) {
 26527          p.domain = DomainGeneric
 26528          p.add(0, func(m *_Encoding, v []interface{}) {
 26529              m.rexo(0, v[1], isReg8REX(v[1]))
 26530              m.emit(0xd2)
 26531              m.emit(0xd0 | lcode(v[1]))
 26532          })
 26533      }
 26534      // RCLB 1, m8
 26535      if isConst1(v0) && isM8(v1) {
 26536          p.domain = DomainGeneric
 26537          p.add(0, func(m *_Encoding, v []interface{}) {
 26538              m.rexo(0, addr(v[1]), false)
 26539              m.emit(0xd0)
 26540              m.mrsd(2, addr(v[1]), 1)
 26541          })
 26542      }
 26543      // RCLB imm8, m8
 26544      if isImm8(v0) && isM8(v1) {
 26545          p.domain = DomainGeneric
 26546          p.add(0, func(m *_Encoding, v []interface{}) {
 26547              m.rexo(0, addr(v[1]), false)
 26548              m.emit(0xc0)
 26549              m.mrsd(2, addr(v[1]), 1)
 26550              m.imm1(toImmAny(v[0]))
 26551          })
 26552      }
 26553      // RCLB cl, m8
 26554      if v0 == CL && isM8(v1) {
 26555          p.domain = DomainGeneric
 26556          p.add(0, func(m *_Encoding, v []interface{}) {
 26557              m.rexo(0, addr(v[1]), false)
 26558              m.emit(0xd2)
 26559              m.mrsd(2, addr(v[1]), 1)
 26560          })
 26561      }
 26562      if p.len == 0 {
 26563          panic("invalid operands for RCLB")
 26564      }
 26565      return p
 26566  }
 26567  
 26568  // RCLL performs "Rotate Left through Carry Flag".
 26569  //
 26570  // Mnemonic        : RCL
 26571  // Supported forms : (6 forms)
 26572  //
 26573  //    * RCLL 1, r32
 26574  //    * RCLL imm8, r32
 26575  //    * RCLL cl, r32
 26576  //    * RCLL 1, m32
 26577  //    * RCLL imm8, m32
 26578  //    * RCLL cl, m32
 26579  //
 26580  func (self *Program) RCLL(v0 interface{}, v1 interface{}) *Instruction {
 26581      p := self.alloc("RCLL", 2, Operands { v0, v1 })
 26582      // RCLL 1, r32
 26583      if isConst1(v0) && isReg32(v1) {
 26584          p.domain = DomainGeneric
 26585          p.add(0, func(m *_Encoding, v []interface{}) {
 26586              m.rexo(0, v[1], false)
 26587              m.emit(0xd1)
 26588              m.emit(0xd0 | lcode(v[1]))
 26589          })
 26590      }
 26591      // RCLL imm8, r32
 26592      if isImm8(v0) && isReg32(v1) {
 26593          p.domain = DomainGeneric
 26594          p.add(0, func(m *_Encoding, v []interface{}) {
 26595              m.rexo(0, v[1], false)
 26596              m.emit(0xc1)
 26597              m.emit(0xd0 | lcode(v[1]))
 26598              m.imm1(toImmAny(v[0]))
 26599          })
 26600      }
 26601      // RCLL cl, r32
 26602      if v0 == CL && isReg32(v1) {
 26603          p.domain = DomainGeneric
 26604          p.add(0, func(m *_Encoding, v []interface{}) {
 26605              m.rexo(0, v[1], false)
 26606              m.emit(0xd3)
 26607              m.emit(0xd0 | lcode(v[1]))
 26608          })
 26609      }
 26610      // RCLL 1, m32
 26611      if isConst1(v0) && isM32(v1) {
 26612          p.domain = DomainGeneric
 26613          p.add(0, func(m *_Encoding, v []interface{}) {
 26614              m.rexo(0, addr(v[1]), false)
 26615              m.emit(0xd1)
 26616              m.mrsd(2, addr(v[1]), 1)
 26617          })
 26618      }
 26619      // RCLL imm8, m32
 26620      if isImm8(v0) && isM32(v1) {
 26621          p.domain = DomainGeneric
 26622          p.add(0, func(m *_Encoding, v []interface{}) {
 26623              m.rexo(0, addr(v[1]), false)
 26624              m.emit(0xc1)
 26625              m.mrsd(2, addr(v[1]), 1)
 26626              m.imm1(toImmAny(v[0]))
 26627          })
 26628      }
 26629      // RCLL cl, m32
 26630      if v0 == CL && isM32(v1) {
 26631          p.domain = DomainGeneric
 26632          p.add(0, func(m *_Encoding, v []interface{}) {
 26633              m.rexo(0, addr(v[1]), false)
 26634              m.emit(0xd3)
 26635              m.mrsd(2, addr(v[1]), 1)
 26636          })
 26637      }
 26638      if p.len == 0 {
 26639          panic("invalid operands for RCLL")
 26640      }
 26641      return p
 26642  }
 26643  
 26644  // RCLQ performs "Rotate Left through Carry Flag".
 26645  //
 26646  // Mnemonic        : RCL
 26647  // Supported forms : (6 forms)
 26648  //
 26649  //    * RCLQ 1, r64
 26650  //    * RCLQ imm8, r64
 26651  //    * RCLQ cl, r64
 26652  //    * RCLQ 1, m64
 26653  //    * RCLQ imm8, m64
 26654  //    * RCLQ cl, m64
 26655  //
 26656  func (self *Program) RCLQ(v0 interface{}, v1 interface{}) *Instruction {
 26657      p := self.alloc("RCLQ", 2, Operands { v0, v1 })
 26658      // RCLQ 1, r64
 26659      if isConst1(v0) && isReg64(v1) {
 26660          p.domain = DomainGeneric
 26661          p.add(0, func(m *_Encoding, v []interface{}) {
 26662              m.emit(0x48 | hcode(v[1]))
 26663              m.emit(0xd1)
 26664              m.emit(0xd0 | lcode(v[1]))
 26665          })
 26666      }
 26667      // RCLQ imm8, r64
 26668      if isImm8(v0) && isReg64(v1) {
 26669          p.domain = DomainGeneric
 26670          p.add(0, func(m *_Encoding, v []interface{}) {
 26671              m.emit(0x48 | hcode(v[1]))
 26672              m.emit(0xc1)
 26673              m.emit(0xd0 | lcode(v[1]))
 26674              m.imm1(toImmAny(v[0]))
 26675          })
 26676      }
 26677      // RCLQ cl, r64
 26678      if v0 == CL && isReg64(v1) {
 26679          p.domain = DomainGeneric
 26680          p.add(0, func(m *_Encoding, v []interface{}) {
 26681              m.emit(0x48 | hcode(v[1]))
 26682              m.emit(0xd3)
 26683              m.emit(0xd0 | lcode(v[1]))
 26684          })
 26685      }
 26686      // RCLQ 1, m64
 26687      if isConst1(v0) && isM64(v1) {
 26688          p.domain = DomainGeneric
 26689          p.add(0, func(m *_Encoding, v []interface{}) {
 26690              m.rexm(1, 0, addr(v[1]))
 26691              m.emit(0xd1)
 26692              m.mrsd(2, addr(v[1]), 1)
 26693          })
 26694      }
 26695      // RCLQ imm8, m64
 26696      if isImm8(v0) && isM64(v1) {
 26697          p.domain = DomainGeneric
 26698          p.add(0, func(m *_Encoding, v []interface{}) {
 26699              m.rexm(1, 0, addr(v[1]))
 26700              m.emit(0xc1)
 26701              m.mrsd(2, addr(v[1]), 1)
 26702              m.imm1(toImmAny(v[0]))
 26703          })
 26704      }
 26705      // RCLQ cl, m64
 26706      if v0 == CL && isM64(v1) {
 26707          p.domain = DomainGeneric
 26708          p.add(0, func(m *_Encoding, v []interface{}) {
 26709              m.rexm(1, 0, addr(v[1]))
 26710              m.emit(0xd3)
 26711              m.mrsd(2, addr(v[1]), 1)
 26712          })
 26713      }
 26714      if p.len == 0 {
 26715          panic("invalid operands for RCLQ")
 26716      }
 26717      return p
 26718  }
 26719  
 26720  // RCLW performs "Rotate Left through Carry Flag".
 26721  //
 26722  // Mnemonic        : RCL
 26723  // Supported forms : (6 forms)
 26724  //
 26725  //    * RCLW 1, r16
 26726  //    * RCLW imm8, r16
 26727  //    * RCLW cl, r16
 26728  //    * RCLW 1, m16
 26729  //    * RCLW imm8, m16
 26730  //    * RCLW cl, m16
 26731  //
 26732  func (self *Program) RCLW(v0 interface{}, v1 interface{}) *Instruction {
 26733      p := self.alloc("RCLW", 2, Operands { v0, v1 })
 26734      // RCLW 1, r16
 26735      if isConst1(v0) && isReg16(v1) {
 26736          p.domain = DomainGeneric
 26737          p.add(0, func(m *_Encoding, v []interface{}) {
 26738              m.emit(0x66)
 26739              m.rexo(0, v[1], false)
 26740              m.emit(0xd1)
 26741              m.emit(0xd0 | lcode(v[1]))
 26742          })
 26743      }
 26744      // RCLW imm8, r16
 26745      if isImm8(v0) && isReg16(v1) {
 26746          p.domain = DomainGeneric
 26747          p.add(0, func(m *_Encoding, v []interface{}) {
 26748              m.emit(0x66)
 26749              m.rexo(0, v[1], false)
 26750              m.emit(0xc1)
 26751              m.emit(0xd0 | lcode(v[1]))
 26752              m.imm1(toImmAny(v[0]))
 26753          })
 26754      }
 26755      // RCLW cl, r16
 26756      if v0 == CL && isReg16(v1) {
 26757          p.domain = DomainGeneric
 26758          p.add(0, func(m *_Encoding, v []interface{}) {
 26759              m.emit(0x66)
 26760              m.rexo(0, v[1], false)
 26761              m.emit(0xd3)
 26762              m.emit(0xd0 | lcode(v[1]))
 26763          })
 26764      }
 26765      // RCLW 1, m16
 26766      if isConst1(v0) && isM16(v1) {
 26767          p.domain = DomainGeneric
 26768          p.add(0, func(m *_Encoding, v []interface{}) {
 26769              m.emit(0x66)
 26770              m.rexo(0, addr(v[1]), false)
 26771              m.emit(0xd1)
 26772              m.mrsd(2, addr(v[1]), 1)
 26773          })
 26774      }
 26775      // RCLW imm8, m16
 26776      if isImm8(v0) && isM16(v1) {
 26777          p.domain = DomainGeneric
 26778          p.add(0, func(m *_Encoding, v []interface{}) {
 26779              m.emit(0x66)
 26780              m.rexo(0, addr(v[1]), false)
 26781              m.emit(0xc1)
 26782              m.mrsd(2, addr(v[1]), 1)
 26783              m.imm1(toImmAny(v[0]))
 26784          })
 26785      }
 26786      // RCLW cl, m16
 26787      if v0 == CL && isM16(v1) {
 26788          p.domain = DomainGeneric
 26789          p.add(0, func(m *_Encoding, v []interface{}) {
 26790              m.emit(0x66)
 26791              m.rexo(0, addr(v[1]), false)
 26792              m.emit(0xd3)
 26793              m.mrsd(2, addr(v[1]), 1)
 26794          })
 26795      }
 26796      if p.len == 0 {
 26797          panic("invalid operands for RCLW")
 26798      }
 26799      return p
 26800  }
 26801  
 26802  // RCPPS performs "Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values".
 26803  //
 26804  // Mnemonic        : RCPPS
 26805  // Supported forms : (2 forms)
 26806  //
 26807  //    * RCPPS xmm, xmm     [SSE]
 26808  //    * RCPPS m128, xmm    [SSE]
 26809  //
 26810  func (self *Program) RCPPS(v0 interface{}, v1 interface{}) *Instruction {
 26811      p := self.alloc("RCPPS", 2, Operands { v0, v1 })
 26812      // RCPPS xmm, xmm
 26813      if isXMM(v0) && isXMM(v1) {
 26814          self.require(ISA_SSE)
 26815          p.domain = DomainMMXSSE
 26816          p.add(0, func(m *_Encoding, v []interface{}) {
 26817              m.rexo(hcode(v[1]), v[0], false)
 26818              m.emit(0x0f)
 26819              m.emit(0x53)
 26820              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26821          })
 26822      }
 26823      // RCPPS m128, xmm
 26824      if isM128(v0) && isXMM(v1) {
 26825          self.require(ISA_SSE)
 26826          p.domain = DomainMMXSSE
 26827          p.add(0, func(m *_Encoding, v []interface{}) {
 26828              m.rexo(hcode(v[1]), addr(v[0]), false)
 26829              m.emit(0x0f)
 26830              m.emit(0x53)
 26831              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26832          })
 26833      }
 26834      if p.len == 0 {
 26835          panic("invalid operands for RCPPS")
 26836      }
 26837      return p
 26838  }
 26839  
 26840  // RCPSS performs "Compute Approximate Reciprocal of Scalar Single-Precision Floating-Point Values".
 26841  //
 26842  // Mnemonic        : RCPSS
 26843  // Supported forms : (2 forms)
 26844  //
 26845  //    * RCPSS xmm, xmm    [SSE]
 26846  //    * RCPSS m32, xmm    [SSE]
 26847  //
 26848  func (self *Program) RCPSS(v0 interface{}, v1 interface{}) *Instruction {
 26849      p := self.alloc("RCPSS", 2, Operands { v0, v1 })
 26850      // RCPSS xmm, xmm
 26851      if isXMM(v0) && isXMM(v1) {
 26852          self.require(ISA_SSE)
 26853          p.domain = DomainMMXSSE
 26854          p.add(0, func(m *_Encoding, v []interface{}) {
 26855              m.emit(0xf3)
 26856              m.rexo(hcode(v[1]), v[0], false)
 26857              m.emit(0x0f)
 26858              m.emit(0x53)
 26859              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 26860          })
 26861      }
 26862      // RCPSS m32, xmm
 26863      if isM32(v0) && isXMM(v1) {
 26864          self.require(ISA_SSE)
 26865          p.domain = DomainMMXSSE
 26866          p.add(0, func(m *_Encoding, v []interface{}) {
 26867              m.emit(0xf3)
 26868              m.rexo(hcode(v[1]), addr(v[0]), false)
 26869              m.emit(0x0f)
 26870              m.emit(0x53)
 26871              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 26872          })
 26873      }
 26874      if p.len == 0 {
 26875          panic("invalid operands for RCPSS")
 26876      }
 26877      return p
 26878  }
 26879  
 26880  // RCRB performs "Rotate Right through Carry Flag".
 26881  //
 26882  // Mnemonic        : RCR
 26883  // Supported forms : (6 forms)
 26884  //
 26885  //    * RCRB 1, r8
 26886  //    * RCRB imm8, r8
 26887  //    * RCRB cl, r8
 26888  //    * RCRB 1, m8
 26889  //    * RCRB imm8, m8
 26890  //    * RCRB cl, m8
 26891  //
 26892  func (self *Program) RCRB(v0 interface{}, v1 interface{}) *Instruction {
 26893      p := self.alloc("RCRB", 2, Operands { v0, v1 })
 26894      // RCRB 1, r8
 26895      if isConst1(v0) && isReg8(v1) {
 26896          p.domain = DomainGeneric
 26897          p.add(0, func(m *_Encoding, v []interface{}) {
 26898              m.rexo(0, v[1], isReg8REX(v[1]))
 26899              m.emit(0xd0)
 26900              m.emit(0xd8 | lcode(v[1]))
 26901          })
 26902      }
 26903      // RCRB imm8, r8
 26904      if isImm8(v0) && isReg8(v1) {
 26905          p.domain = DomainGeneric
 26906          p.add(0, func(m *_Encoding, v []interface{}) {
 26907              m.rexo(0, v[1], isReg8REX(v[1]))
 26908              m.emit(0xc0)
 26909              m.emit(0xd8 | lcode(v[1]))
 26910              m.imm1(toImmAny(v[0]))
 26911          })
 26912      }
 26913      // RCRB cl, r8
 26914      if v0 == CL && isReg8(v1) {
 26915          p.domain = DomainGeneric
 26916          p.add(0, func(m *_Encoding, v []interface{}) {
 26917              m.rexo(0, v[1], isReg8REX(v[1]))
 26918              m.emit(0xd2)
 26919              m.emit(0xd8 | lcode(v[1]))
 26920          })
 26921      }
 26922      // RCRB 1, m8
 26923      if isConst1(v0) && isM8(v1) {
 26924          p.domain = DomainGeneric
 26925          p.add(0, func(m *_Encoding, v []interface{}) {
 26926              m.rexo(0, addr(v[1]), false)
 26927              m.emit(0xd0)
 26928              m.mrsd(3, addr(v[1]), 1)
 26929          })
 26930      }
 26931      // RCRB imm8, m8
 26932      if isImm8(v0) && isM8(v1) {
 26933          p.domain = DomainGeneric
 26934          p.add(0, func(m *_Encoding, v []interface{}) {
 26935              m.rexo(0, addr(v[1]), false)
 26936              m.emit(0xc0)
 26937              m.mrsd(3, addr(v[1]), 1)
 26938              m.imm1(toImmAny(v[0]))
 26939          })
 26940      }
 26941      // RCRB cl, m8
 26942      if v0 == CL && isM8(v1) {
 26943          p.domain = DomainGeneric
 26944          p.add(0, func(m *_Encoding, v []interface{}) {
 26945              m.rexo(0, addr(v[1]), false)
 26946              m.emit(0xd2)
 26947              m.mrsd(3, addr(v[1]), 1)
 26948          })
 26949      }
 26950      if p.len == 0 {
 26951          panic("invalid operands for RCRB")
 26952      }
 26953      return p
 26954  }
 26955  
 26956  // RCRL performs "Rotate Right through Carry Flag".
 26957  //
 26958  // Mnemonic        : RCR
 26959  // Supported forms : (6 forms)
 26960  //
 26961  //    * RCRL 1, r32
 26962  //    * RCRL imm8, r32
 26963  //    * RCRL cl, r32
 26964  //    * RCRL 1, m32
 26965  //    * RCRL imm8, m32
 26966  //    * RCRL cl, m32
 26967  //
 26968  func (self *Program) RCRL(v0 interface{}, v1 interface{}) *Instruction {
 26969      p := self.alloc("RCRL", 2, Operands { v0, v1 })
 26970      // RCRL 1, r32
 26971      if isConst1(v0) && isReg32(v1) {
 26972          p.domain = DomainGeneric
 26973          p.add(0, func(m *_Encoding, v []interface{}) {
 26974              m.rexo(0, v[1], false)
 26975              m.emit(0xd1)
 26976              m.emit(0xd8 | lcode(v[1]))
 26977          })
 26978      }
 26979      // RCRL imm8, r32
 26980      if isImm8(v0) && isReg32(v1) {
 26981          p.domain = DomainGeneric
 26982          p.add(0, func(m *_Encoding, v []interface{}) {
 26983              m.rexo(0, v[1], false)
 26984              m.emit(0xc1)
 26985              m.emit(0xd8 | lcode(v[1]))
 26986              m.imm1(toImmAny(v[0]))
 26987          })
 26988      }
 26989      // RCRL cl, r32
 26990      if v0 == CL && isReg32(v1) {
 26991          p.domain = DomainGeneric
 26992          p.add(0, func(m *_Encoding, v []interface{}) {
 26993              m.rexo(0, v[1], false)
 26994              m.emit(0xd3)
 26995              m.emit(0xd8 | lcode(v[1]))
 26996          })
 26997      }
 26998      // RCRL 1, m32
 26999      if isConst1(v0) && isM32(v1) {
 27000          p.domain = DomainGeneric
 27001          p.add(0, func(m *_Encoding, v []interface{}) {
 27002              m.rexo(0, addr(v[1]), false)
 27003              m.emit(0xd1)
 27004              m.mrsd(3, addr(v[1]), 1)
 27005          })
 27006      }
 27007      // RCRL imm8, m32
 27008      if isImm8(v0) && isM32(v1) {
 27009          p.domain = DomainGeneric
 27010          p.add(0, func(m *_Encoding, v []interface{}) {
 27011              m.rexo(0, addr(v[1]), false)
 27012              m.emit(0xc1)
 27013              m.mrsd(3, addr(v[1]), 1)
 27014              m.imm1(toImmAny(v[0]))
 27015          })
 27016      }
 27017      // RCRL cl, m32
 27018      if v0 == CL && isM32(v1) {
 27019          p.domain = DomainGeneric
 27020          p.add(0, func(m *_Encoding, v []interface{}) {
 27021              m.rexo(0, addr(v[1]), false)
 27022              m.emit(0xd3)
 27023              m.mrsd(3, addr(v[1]), 1)
 27024          })
 27025      }
 27026      if p.len == 0 {
 27027          panic("invalid operands for RCRL")
 27028      }
 27029      return p
 27030  }
 27031  
 27032  // RCRQ performs "Rotate Right through Carry Flag".
 27033  //
 27034  // Mnemonic        : RCR
 27035  // Supported forms : (6 forms)
 27036  //
 27037  //    * RCRQ 1, r64
 27038  //    * RCRQ imm8, r64
 27039  //    * RCRQ cl, r64
 27040  //    * RCRQ 1, m64
 27041  //    * RCRQ imm8, m64
 27042  //    * RCRQ cl, m64
 27043  //
 27044  func (self *Program) RCRQ(v0 interface{}, v1 interface{}) *Instruction {
 27045      p := self.alloc("RCRQ", 2, Operands { v0, v1 })
 27046      // RCRQ 1, r64
 27047      if isConst1(v0) && isReg64(v1) {
 27048          p.domain = DomainGeneric
 27049          p.add(0, func(m *_Encoding, v []interface{}) {
 27050              m.emit(0x48 | hcode(v[1]))
 27051              m.emit(0xd1)
 27052              m.emit(0xd8 | lcode(v[1]))
 27053          })
 27054      }
 27055      // RCRQ imm8, r64
 27056      if isImm8(v0) && isReg64(v1) {
 27057          p.domain = DomainGeneric
 27058          p.add(0, func(m *_Encoding, v []interface{}) {
 27059              m.emit(0x48 | hcode(v[1]))
 27060              m.emit(0xc1)
 27061              m.emit(0xd8 | lcode(v[1]))
 27062              m.imm1(toImmAny(v[0]))
 27063          })
 27064      }
 27065      // RCRQ cl, r64
 27066      if v0 == CL && isReg64(v1) {
 27067          p.domain = DomainGeneric
 27068          p.add(0, func(m *_Encoding, v []interface{}) {
 27069              m.emit(0x48 | hcode(v[1]))
 27070              m.emit(0xd3)
 27071              m.emit(0xd8 | lcode(v[1]))
 27072          })
 27073      }
 27074      // RCRQ 1, m64
 27075      if isConst1(v0) && isM64(v1) {
 27076          p.domain = DomainGeneric
 27077          p.add(0, func(m *_Encoding, v []interface{}) {
 27078              m.rexm(1, 0, addr(v[1]))
 27079              m.emit(0xd1)
 27080              m.mrsd(3, addr(v[1]), 1)
 27081          })
 27082      }
 27083      // RCRQ imm8, m64
 27084      if isImm8(v0) && isM64(v1) {
 27085          p.domain = DomainGeneric
 27086          p.add(0, func(m *_Encoding, v []interface{}) {
 27087              m.rexm(1, 0, addr(v[1]))
 27088              m.emit(0xc1)
 27089              m.mrsd(3, addr(v[1]), 1)
 27090              m.imm1(toImmAny(v[0]))
 27091          })
 27092      }
 27093      // RCRQ cl, m64
 27094      if v0 == CL && isM64(v1) {
 27095          p.domain = DomainGeneric
 27096          p.add(0, func(m *_Encoding, v []interface{}) {
 27097              m.rexm(1, 0, addr(v[1]))
 27098              m.emit(0xd3)
 27099              m.mrsd(3, addr(v[1]), 1)
 27100          })
 27101      }
 27102      if p.len == 0 {
 27103          panic("invalid operands for RCRQ")
 27104      }
 27105      return p
 27106  }
 27107  
 27108  // RCRW performs "Rotate Right through Carry Flag".
 27109  //
 27110  // Mnemonic        : RCR
 27111  // Supported forms : (6 forms)
 27112  //
 27113  //    * RCRW 1, r16
 27114  //    * RCRW imm8, r16
 27115  //    * RCRW cl, r16
 27116  //    * RCRW 1, m16
 27117  //    * RCRW imm8, m16
 27118  //    * RCRW cl, m16
 27119  //
 27120  func (self *Program) RCRW(v0 interface{}, v1 interface{}) *Instruction {
 27121      p := self.alloc("RCRW", 2, Operands { v0, v1 })
 27122      // RCRW 1, r16
 27123      if isConst1(v0) && isReg16(v1) {
 27124          p.domain = DomainGeneric
 27125          p.add(0, func(m *_Encoding, v []interface{}) {
 27126              m.emit(0x66)
 27127              m.rexo(0, v[1], false)
 27128              m.emit(0xd1)
 27129              m.emit(0xd8 | lcode(v[1]))
 27130          })
 27131      }
 27132      // RCRW imm8, r16
 27133      if isImm8(v0) && isReg16(v1) {
 27134          p.domain = DomainGeneric
 27135          p.add(0, func(m *_Encoding, v []interface{}) {
 27136              m.emit(0x66)
 27137              m.rexo(0, v[1], false)
 27138              m.emit(0xc1)
 27139              m.emit(0xd8 | lcode(v[1]))
 27140              m.imm1(toImmAny(v[0]))
 27141          })
 27142      }
 27143      // RCRW cl, r16
 27144      if v0 == CL && isReg16(v1) {
 27145          p.domain = DomainGeneric
 27146          p.add(0, func(m *_Encoding, v []interface{}) {
 27147              m.emit(0x66)
 27148              m.rexo(0, v[1], false)
 27149              m.emit(0xd3)
 27150              m.emit(0xd8 | lcode(v[1]))
 27151          })
 27152      }
 27153      // RCRW 1, m16
 27154      if isConst1(v0) && isM16(v1) {
 27155          p.domain = DomainGeneric
 27156          p.add(0, func(m *_Encoding, v []interface{}) {
 27157              m.emit(0x66)
 27158              m.rexo(0, addr(v[1]), false)
 27159              m.emit(0xd1)
 27160              m.mrsd(3, addr(v[1]), 1)
 27161          })
 27162      }
 27163      // RCRW imm8, m16
 27164      if isImm8(v0) && isM16(v1) {
 27165          p.domain = DomainGeneric
 27166          p.add(0, func(m *_Encoding, v []interface{}) {
 27167              m.emit(0x66)
 27168              m.rexo(0, addr(v[1]), false)
 27169              m.emit(0xc1)
 27170              m.mrsd(3, addr(v[1]), 1)
 27171              m.imm1(toImmAny(v[0]))
 27172          })
 27173      }
 27174      // RCRW cl, m16
 27175      if v0 == CL && isM16(v1) {
 27176          p.domain = DomainGeneric
 27177          p.add(0, func(m *_Encoding, v []interface{}) {
 27178              m.emit(0x66)
 27179              m.rexo(0, addr(v[1]), false)
 27180              m.emit(0xd3)
 27181              m.mrsd(3, addr(v[1]), 1)
 27182          })
 27183      }
 27184      if p.len == 0 {
 27185          panic("invalid operands for RCRW")
 27186      }
 27187      return p
 27188  }
 27189  
 27190  // RDRAND performs "Read Random Number".
 27191  //
 27192  // Mnemonic        : RDRAND
 27193  // Supported forms : (3 forms)
 27194  //
 27195  //    * RDRAND r16    [RDRAND]
 27196  //    * RDRAND r32    [RDRAND]
 27197  //    * RDRAND r64    [RDRAND]
 27198  //
 27199  func (self *Program) RDRAND(v0 interface{}) *Instruction {
 27200      p := self.alloc("RDRAND", 1, Operands { v0 })
 27201      // RDRAND r16
 27202      if isReg16(v0) {
 27203          self.require(ISA_RDRAND)
 27204          p.domain = DomainCrypto
 27205          p.add(0, func(m *_Encoding, v []interface{}) {
 27206              m.emit(0x66)
 27207              m.rexo(0, v[0], false)
 27208              m.emit(0x0f)
 27209              m.emit(0xc7)
 27210              m.emit(0xf0 | lcode(v[0]))
 27211          })
 27212      }
 27213      // RDRAND r32
 27214      if isReg32(v0) {
 27215          self.require(ISA_RDRAND)
 27216          p.domain = DomainCrypto
 27217          p.add(0, func(m *_Encoding, v []interface{}) {
 27218              m.rexo(0, v[0], false)
 27219              m.emit(0x0f)
 27220              m.emit(0xc7)
 27221              m.emit(0xf0 | lcode(v[0]))
 27222          })
 27223      }
 27224      // RDRAND r64
 27225      if isReg64(v0) {
 27226          self.require(ISA_RDRAND)
 27227          p.domain = DomainCrypto
 27228          p.add(0, func(m *_Encoding, v []interface{}) {
 27229              m.emit(0x48 | hcode(v[0]))
 27230              m.emit(0x0f)
 27231              m.emit(0xc7)
 27232              m.emit(0xf0 | lcode(v[0]))
 27233          })
 27234      }
 27235      if p.len == 0 {
 27236          panic("invalid operands for RDRAND")
 27237      }
 27238      return p
 27239  }
 27240  
 27241  // RDSEED performs "Read Random SEED".
 27242  //
 27243  // Mnemonic        : RDSEED
 27244  // Supported forms : (3 forms)
 27245  //
 27246  //    * RDSEED r16    [RDSEED]
 27247  //    * RDSEED r32    [RDSEED]
 27248  //    * RDSEED r64    [RDSEED]
 27249  //
 27250  func (self *Program) RDSEED(v0 interface{}) *Instruction {
 27251      p := self.alloc("RDSEED", 1, Operands { v0 })
 27252      // RDSEED r16
 27253      if isReg16(v0) {
 27254          self.require(ISA_RDSEED)
 27255          p.domain = DomainCrypto
 27256          p.add(0, func(m *_Encoding, v []interface{}) {
 27257              m.emit(0x66)
 27258              m.rexo(0, v[0], false)
 27259              m.emit(0x0f)
 27260              m.emit(0xc7)
 27261              m.emit(0xf8 | lcode(v[0]))
 27262          })
 27263      }
 27264      // RDSEED r32
 27265      if isReg32(v0) {
 27266          self.require(ISA_RDSEED)
 27267          p.domain = DomainCrypto
 27268          p.add(0, func(m *_Encoding, v []interface{}) {
 27269              m.rexo(0, v[0], false)
 27270              m.emit(0x0f)
 27271              m.emit(0xc7)
 27272              m.emit(0xf8 | lcode(v[0]))
 27273          })
 27274      }
 27275      // RDSEED r64
 27276      if isReg64(v0) {
 27277          self.require(ISA_RDSEED)
 27278          p.domain = DomainCrypto
 27279          p.add(0, func(m *_Encoding, v []interface{}) {
 27280              m.emit(0x48 | hcode(v[0]))
 27281              m.emit(0x0f)
 27282              m.emit(0xc7)
 27283              m.emit(0xf8 | lcode(v[0]))
 27284          })
 27285      }
 27286      if p.len == 0 {
 27287          panic("invalid operands for RDSEED")
 27288      }
 27289      return p
 27290  }
 27291  
 27292  // RDTSC performs "Read Time-Stamp Counter".
 27293  //
 27294  // Mnemonic        : RDTSC
 27295  // Supported forms : (1 form)
 27296  //
 27297  //    * RDTSC    [RDTSC]
 27298  //
 27299  func (self *Program) RDTSC() *Instruction {
 27300      p := self.alloc("RDTSC", 0, Operands {  })
 27301      // RDTSC
 27302      self.require(ISA_RDTSC)
 27303      p.domain = DomainGeneric
 27304      p.add(0, func(m *_Encoding, v []interface{}) {
 27305          m.emit(0x0f)
 27306          m.emit(0x31)
 27307      })
 27308      return p
 27309  }
 27310  
 27311  // RDTSCP performs "Read Time-Stamp Counter and Processor ID".
 27312  //
 27313  // Mnemonic        : RDTSCP
 27314  // Supported forms : (1 form)
 27315  //
 27316  //    * RDTSCP    [RDTSCP]
 27317  //
 27318  func (self *Program) RDTSCP() *Instruction {
 27319      p := self.alloc("RDTSCP", 0, Operands {  })
 27320      // RDTSCP
 27321      self.require(ISA_RDTSCP)
 27322      p.domain = DomainGeneric
 27323      p.add(0, func(m *_Encoding, v []interface{}) {
 27324          m.emit(0x0f)
 27325          m.emit(0x01)
 27326          m.emit(0xf9)
 27327      })
 27328      return p
 27329  }
 27330  
 27331  // RET performs "Return from Procedure".
 27332  //
 27333  // Mnemonic        : RET
 27334  // Supported forms : (2 forms)
 27335  //
 27336  //    * RET
 27337  //    * RET imm16
 27338  //
 27339  func (self *Program) RET(vv ...interface{}) *Instruction {
 27340      var p *Instruction
 27341      switch len(vv) {
 27342          case 0  : p = self.alloc("RET", 0, Operands {  })
 27343          case 1  : p = self.alloc("RET", 1, Operands { vv[0] })
 27344          default : panic("instruction RET takes 0 or 1 operands")
 27345      }
 27346      // RET
 27347      if len(vv) == 0 {
 27348          p.domain = DomainGeneric
 27349          p.add(0, func(m *_Encoding, v []interface{}) {
 27350              m.emit(0xc3)
 27351          })
 27352      }
 27353      // RET imm16
 27354      if len(vv) == 1 && isImm16(vv[0]) {
 27355          p.domain = DomainGeneric
 27356          p.add(0, func(m *_Encoding, v []interface{}) {
 27357              m.emit(0xc2)
 27358              m.imm2(toImmAny(v[0]))
 27359          })
 27360      }
 27361      if p.len == 0 {
 27362          panic("invalid operands for RET")
 27363      }
 27364      return p
 27365  }
 27366  
 27367  // ROLB performs "Rotate Left".
 27368  //
 27369  // Mnemonic        : ROL
 27370  // Supported forms : (6 forms)
 27371  //
 27372  //    * ROLB 1, r8
 27373  //    * ROLB imm8, r8
 27374  //    * ROLB cl, r8
 27375  //    * ROLB 1, m8
 27376  //    * ROLB imm8, m8
 27377  //    * ROLB cl, m8
 27378  //
 27379  func (self *Program) ROLB(v0 interface{}, v1 interface{}) *Instruction {
 27380      p := self.alloc("ROLB", 2, Operands { v0, v1 })
 27381      // ROLB 1, r8
 27382      if isConst1(v0) && isReg8(v1) {
 27383          p.domain = DomainGeneric
 27384          p.add(0, func(m *_Encoding, v []interface{}) {
 27385              m.rexo(0, v[1], isReg8REX(v[1]))
 27386              m.emit(0xd0)
 27387              m.emit(0xc0 | lcode(v[1]))
 27388          })
 27389      }
 27390      // ROLB imm8, r8
 27391      if isImm8(v0) && isReg8(v1) {
 27392          p.domain = DomainGeneric
 27393          p.add(0, func(m *_Encoding, v []interface{}) {
 27394              m.rexo(0, v[1], isReg8REX(v[1]))
 27395              m.emit(0xc0)
 27396              m.emit(0xc0 | lcode(v[1]))
 27397              m.imm1(toImmAny(v[0]))
 27398          })
 27399      }
 27400      // ROLB cl, r8
 27401      if v0 == CL && isReg8(v1) {
 27402          p.domain = DomainGeneric
 27403          p.add(0, func(m *_Encoding, v []interface{}) {
 27404              m.rexo(0, v[1], isReg8REX(v[1]))
 27405              m.emit(0xd2)
 27406              m.emit(0xc0 | lcode(v[1]))
 27407          })
 27408      }
 27409      // ROLB 1, m8
 27410      if isConst1(v0) && isM8(v1) {
 27411          p.domain = DomainGeneric
 27412          p.add(0, func(m *_Encoding, v []interface{}) {
 27413              m.rexo(0, addr(v[1]), false)
 27414              m.emit(0xd0)
 27415              m.mrsd(0, addr(v[1]), 1)
 27416          })
 27417      }
 27418      // ROLB imm8, m8
 27419      if isImm8(v0) && isM8(v1) {
 27420          p.domain = DomainGeneric
 27421          p.add(0, func(m *_Encoding, v []interface{}) {
 27422              m.rexo(0, addr(v[1]), false)
 27423              m.emit(0xc0)
 27424              m.mrsd(0, addr(v[1]), 1)
 27425              m.imm1(toImmAny(v[0]))
 27426          })
 27427      }
 27428      // ROLB cl, m8
 27429      if v0 == CL && isM8(v1) {
 27430          p.domain = DomainGeneric
 27431          p.add(0, func(m *_Encoding, v []interface{}) {
 27432              m.rexo(0, addr(v[1]), false)
 27433              m.emit(0xd2)
 27434              m.mrsd(0, addr(v[1]), 1)
 27435          })
 27436      }
 27437      if p.len == 0 {
 27438          panic("invalid operands for ROLB")
 27439      }
 27440      return p
 27441  }
 27442  
 27443  // ROLL performs "Rotate Left".
 27444  //
 27445  // Mnemonic        : ROL
 27446  // Supported forms : (6 forms)
 27447  //
 27448  //    * ROLL 1, r32
 27449  //    * ROLL imm8, r32
 27450  //    * ROLL cl, r32
 27451  //    * ROLL 1, m32
 27452  //    * ROLL imm8, m32
 27453  //    * ROLL cl, m32
 27454  //
 27455  func (self *Program) ROLL(v0 interface{}, v1 interface{}) *Instruction {
 27456      p := self.alloc("ROLL", 2, Operands { v0, v1 })
 27457      // ROLL 1, r32
 27458      if isConst1(v0) && isReg32(v1) {
 27459          p.domain = DomainGeneric
 27460          p.add(0, func(m *_Encoding, v []interface{}) {
 27461              m.rexo(0, v[1], false)
 27462              m.emit(0xd1)
 27463              m.emit(0xc0 | lcode(v[1]))
 27464          })
 27465      }
 27466      // ROLL imm8, r32
 27467      if isImm8(v0) && isReg32(v1) {
 27468          p.domain = DomainGeneric
 27469          p.add(0, func(m *_Encoding, v []interface{}) {
 27470              m.rexo(0, v[1], false)
 27471              m.emit(0xc1)
 27472              m.emit(0xc0 | lcode(v[1]))
 27473              m.imm1(toImmAny(v[0]))
 27474          })
 27475      }
 27476      // ROLL cl, r32
 27477      if v0 == CL && isReg32(v1) {
 27478          p.domain = DomainGeneric
 27479          p.add(0, func(m *_Encoding, v []interface{}) {
 27480              m.rexo(0, v[1], false)
 27481              m.emit(0xd3)
 27482              m.emit(0xc0 | lcode(v[1]))
 27483          })
 27484      }
 27485      // ROLL 1, m32
 27486      if isConst1(v0) && isM32(v1) {
 27487          p.domain = DomainGeneric
 27488          p.add(0, func(m *_Encoding, v []interface{}) {
 27489              m.rexo(0, addr(v[1]), false)
 27490              m.emit(0xd1)
 27491              m.mrsd(0, addr(v[1]), 1)
 27492          })
 27493      }
 27494      // ROLL imm8, m32
 27495      if isImm8(v0) && isM32(v1) {
 27496          p.domain = DomainGeneric
 27497          p.add(0, func(m *_Encoding, v []interface{}) {
 27498              m.rexo(0, addr(v[1]), false)
 27499              m.emit(0xc1)
 27500              m.mrsd(0, addr(v[1]), 1)
 27501              m.imm1(toImmAny(v[0]))
 27502          })
 27503      }
 27504      // ROLL cl, m32
 27505      if v0 == CL && isM32(v1) {
 27506          p.domain = DomainGeneric
 27507          p.add(0, func(m *_Encoding, v []interface{}) {
 27508              m.rexo(0, addr(v[1]), false)
 27509              m.emit(0xd3)
 27510              m.mrsd(0, addr(v[1]), 1)
 27511          })
 27512      }
 27513      if p.len == 0 {
 27514          panic("invalid operands for ROLL")
 27515      }
 27516      return p
 27517  }
 27518  
 27519  // ROLQ performs "Rotate Left".
 27520  //
 27521  // Mnemonic        : ROL
 27522  // Supported forms : (6 forms)
 27523  //
 27524  //    * ROLQ 1, r64
 27525  //    * ROLQ imm8, r64
 27526  //    * ROLQ cl, r64
 27527  //    * ROLQ 1, m64
 27528  //    * ROLQ imm8, m64
 27529  //    * ROLQ cl, m64
 27530  //
 27531  func (self *Program) ROLQ(v0 interface{}, v1 interface{}) *Instruction {
 27532      p := self.alloc("ROLQ", 2, Operands { v0, v1 })
 27533      // ROLQ 1, r64
 27534      if isConst1(v0) && isReg64(v1) {
 27535          p.domain = DomainGeneric
 27536          p.add(0, func(m *_Encoding, v []interface{}) {
 27537              m.emit(0x48 | hcode(v[1]))
 27538              m.emit(0xd1)
 27539              m.emit(0xc0 | lcode(v[1]))
 27540          })
 27541      }
 27542      // ROLQ imm8, r64
 27543      if isImm8(v0) && isReg64(v1) {
 27544          p.domain = DomainGeneric
 27545          p.add(0, func(m *_Encoding, v []interface{}) {
 27546              m.emit(0x48 | hcode(v[1]))
 27547              m.emit(0xc1)
 27548              m.emit(0xc0 | lcode(v[1]))
 27549              m.imm1(toImmAny(v[0]))
 27550          })
 27551      }
 27552      // ROLQ cl, r64
 27553      if v0 == CL && isReg64(v1) {
 27554          p.domain = DomainGeneric
 27555          p.add(0, func(m *_Encoding, v []interface{}) {
 27556              m.emit(0x48 | hcode(v[1]))
 27557              m.emit(0xd3)
 27558              m.emit(0xc0 | lcode(v[1]))
 27559          })
 27560      }
 27561      // ROLQ 1, m64
 27562      if isConst1(v0) && isM64(v1) {
 27563          p.domain = DomainGeneric
 27564          p.add(0, func(m *_Encoding, v []interface{}) {
 27565              m.rexm(1, 0, addr(v[1]))
 27566              m.emit(0xd1)
 27567              m.mrsd(0, addr(v[1]), 1)
 27568          })
 27569      }
 27570      // ROLQ imm8, m64
 27571      if isImm8(v0) && isM64(v1) {
 27572          p.domain = DomainGeneric
 27573          p.add(0, func(m *_Encoding, v []interface{}) {
 27574              m.rexm(1, 0, addr(v[1]))
 27575              m.emit(0xc1)
 27576              m.mrsd(0, addr(v[1]), 1)
 27577              m.imm1(toImmAny(v[0]))
 27578          })
 27579      }
 27580      // ROLQ cl, m64
 27581      if v0 == CL && isM64(v1) {
 27582          p.domain = DomainGeneric
 27583          p.add(0, func(m *_Encoding, v []interface{}) {
 27584              m.rexm(1, 0, addr(v[1]))
 27585              m.emit(0xd3)
 27586              m.mrsd(0, addr(v[1]), 1)
 27587          })
 27588      }
 27589      if p.len == 0 {
 27590          panic("invalid operands for ROLQ")
 27591      }
 27592      return p
 27593  }
 27594  
 27595  // ROLW performs "Rotate Left".
 27596  //
 27597  // Mnemonic        : ROL
 27598  // Supported forms : (6 forms)
 27599  //
 27600  //    * ROLW 1, r16
 27601  //    * ROLW imm8, r16
 27602  //    * ROLW cl, r16
 27603  //    * ROLW 1, m16
 27604  //    * ROLW imm8, m16
 27605  //    * ROLW cl, m16
 27606  //
 27607  func (self *Program) ROLW(v0 interface{}, v1 interface{}) *Instruction {
 27608      p := self.alloc("ROLW", 2, Operands { v0, v1 })
 27609      // ROLW 1, r16
 27610      if isConst1(v0) && isReg16(v1) {
 27611          p.domain = DomainGeneric
 27612          p.add(0, func(m *_Encoding, v []interface{}) {
 27613              m.emit(0x66)
 27614              m.rexo(0, v[1], false)
 27615              m.emit(0xd1)
 27616              m.emit(0xc0 | lcode(v[1]))
 27617          })
 27618      }
 27619      // ROLW imm8, r16
 27620      if isImm8(v0) && isReg16(v1) {
 27621          p.domain = DomainGeneric
 27622          p.add(0, func(m *_Encoding, v []interface{}) {
 27623              m.emit(0x66)
 27624              m.rexo(0, v[1], false)
 27625              m.emit(0xc1)
 27626              m.emit(0xc0 | lcode(v[1]))
 27627              m.imm1(toImmAny(v[0]))
 27628          })
 27629      }
 27630      // ROLW cl, r16
 27631      if v0 == CL && isReg16(v1) {
 27632          p.domain = DomainGeneric
 27633          p.add(0, func(m *_Encoding, v []interface{}) {
 27634              m.emit(0x66)
 27635              m.rexo(0, v[1], false)
 27636              m.emit(0xd3)
 27637              m.emit(0xc0 | lcode(v[1]))
 27638          })
 27639      }
 27640      // ROLW 1, m16
 27641      if isConst1(v0) && isM16(v1) {
 27642          p.domain = DomainGeneric
 27643          p.add(0, func(m *_Encoding, v []interface{}) {
 27644              m.emit(0x66)
 27645              m.rexo(0, addr(v[1]), false)
 27646              m.emit(0xd1)
 27647              m.mrsd(0, addr(v[1]), 1)
 27648          })
 27649      }
 27650      // ROLW imm8, m16
 27651      if isImm8(v0) && isM16(v1) {
 27652          p.domain = DomainGeneric
 27653          p.add(0, func(m *_Encoding, v []interface{}) {
 27654              m.emit(0x66)
 27655              m.rexo(0, addr(v[1]), false)
 27656              m.emit(0xc1)
 27657              m.mrsd(0, addr(v[1]), 1)
 27658              m.imm1(toImmAny(v[0]))
 27659          })
 27660      }
 27661      // ROLW cl, m16
 27662      if v0 == CL && isM16(v1) {
 27663          p.domain = DomainGeneric
 27664          p.add(0, func(m *_Encoding, v []interface{}) {
 27665              m.emit(0x66)
 27666              m.rexo(0, addr(v[1]), false)
 27667              m.emit(0xd3)
 27668              m.mrsd(0, addr(v[1]), 1)
 27669          })
 27670      }
 27671      if p.len == 0 {
 27672          panic("invalid operands for ROLW")
 27673      }
 27674      return p
 27675  }
 27676  
 27677  // RORB performs "Rotate Right".
 27678  //
 27679  // Mnemonic        : ROR
 27680  // Supported forms : (6 forms)
 27681  //
 27682  //    * RORB 1, r8
 27683  //    * RORB imm8, r8
 27684  //    * RORB cl, r8
 27685  //    * RORB 1, m8
 27686  //    * RORB imm8, m8
 27687  //    * RORB cl, m8
 27688  //
 27689  func (self *Program) RORB(v0 interface{}, v1 interface{}) *Instruction {
 27690      p := self.alloc("RORB", 2, Operands { v0, v1 })
 27691      // RORB 1, r8
 27692      if isConst1(v0) && isReg8(v1) {
 27693          p.domain = DomainGeneric
 27694          p.add(0, func(m *_Encoding, v []interface{}) {
 27695              m.rexo(0, v[1], isReg8REX(v[1]))
 27696              m.emit(0xd0)
 27697              m.emit(0xc8 | lcode(v[1]))
 27698          })
 27699      }
 27700      // RORB imm8, r8
 27701      if isImm8(v0) && isReg8(v1) {
 27702          p.domain = DomainGeneric
 27703          p.add(0, func(m *_Encoding, v []interface{}) {
 27704              m.rexo(0, v[1], isReg8REX(v[1]))
 27705              m.emit(0xc0)
 27706              m.emit(0xc8 | lcode(v[1]))
 27707              m.imm1(toImmAny(v[0]))
 27708          })
 27709      }
 27710      // RORB cl, r8
 27711      if v0 == CL && isReg8(v1) {
 27712          p.domain = DomainGeneric
 27713          p.add(0, func(m *_Encoding, v []interface{}) {
 27714              m.rexo(0, v[1], isReg8REX(v[1]))
 27715              m.emit(0xd2)
 27716              m.emit(0xc8 | lcode(v[1]))
 27717          })
 27718      }
 27719      // RORB 1, m8
 27720      if isConst1(v0) && isM8(v1) {
 27721          p.domain = DomainGeneric
 27722          p.add(0, func(m *_Encoding, v []interface{}) {
 27723              m.rexo(0, addr(v[1]), false)
 27724              m.emit(0xd0)
 27725              m.mrsd(1, addr(v[1]), 1)
 27726          })
 27727      }
 27728      // RORB imm8, m8
 27729      if isImm8(v0) && isM8(v1) {
 27730          p.domain = DomainGeneric
 27731          p.add(0, func(m *_Encoding, v []interface{}) {
 27732              m.rexo(0, addr(v[1]), false)
 27733              m.emit(0xc0)
 27734              m.mrsd(1, addr(v[1]), 1)
 27735              m.imm1(toImmAny(v[0]))
 27736          })
 27737      }
 27738      // RORB cl, m8
 27739      if v0 == CL && isM8(v1) {
 27740          p.domain = DomainGeneric
 27741          p.add(0, func(m *_Encoding, v []interface{}) {
 27742              m.rexo(0, addr(v[1]), false)
 27743              m.emit(0xd2)
 27744              m.mrsd(1, addr(v[1]), 1)
 27745          })
 27746      }
 27747      if p.len == 0 {
 27748          panic("invalid operands for RORB")
 27749      }
 27750      return p
 27751  }
 27752  
 27753  // RORL performs "Rotate Right".
 27754  //
 27755  // Mnemonic        : ROR
 27756  // Supported forms : (6 forms)
 27757  //
 27758  //    * RORL 1, r32
 27759  //    * RORL imm8, r32
 27760  //    * RORL cl, r32
 27761  //    * RORL 1, m32
 27762  //    * RORL imm8, m32
 27763  //    * RORL cl, m32
 27764  //
 27765  func (self *Program) RORL(v0 interface{}, v1 interface{}) *Instruction {
 27766      p := self.alloc("RORL", 2, Operands { v0, v1 })
 27767      // RORL 1, r32
 27768      if isConst1(v0) && isReg32(v1) {
 27769          p.domain = DomainGeneric
 27770          p.add(0, func(m *_Encoding, v []interface{}) {
 27771              m.rexo(0, v[1], false)
 27772              m.emit(0xd1)
 27773              m.emit(0xc8 | lcode(v[1]))
 27774          })
 27775      }
 27776      // RORL imm8, r32
 27777      if isImm8(v0) && isReg32(v1) {
 27778          p.domain = DomainGeneric
 27779          p.add(0, func(m *_Encoding, v []interface{}) {
 27780              m.rexo(0, v[1], false)
 27781              m.emit(0xc1)
 27782              m.emit(0xc8 | lcode(v[1]))
 27783              m.imm1(toImmAny(v[0]))
 27784          })
 27785      }
 27786      // RORL cl, r32
 27787      if v0 == CL && isReg32(v1) {
 27788          p.domain = DomainGeneric
 27789          p.add(0, func(m *_Encoding, v []interface{}) {
 27790              m.rexo(0, v[1], false)
 27791              m.emit(0xd3)
 27792              m.emit(0xc8 | lcode(v[1]))
 27793          })
 27794      }
 27795      // RORL 1, m32
 27796      if isConst1(v0) && isM32(v1) {
 27797          p.domain = DomainGeneric
 27798          p.add(0, func(m *_Encoding, v []interface{}) {
 27799              m.rexo(0, addr(v[1]), false)
 27800              m.emit(0xd1)
 27801              m.mrsd(1, addr(v[1]), 1)
 27802          })
 27803      }
 27804      // RORL imm8, m32
 27805      if isImm8(v0) && isM32(v1) {
 27806          p.domain = DomainGeneric
 27807          p.add(0, func(m *_Encoding, v []interface{}) {
 27808              m.rexo(0, addr(v[1]), false)
 27809              m.emit(0xc1)
 27810              m.mrsd(1, addr(v[1]), 1)
 27811              m.imm1(toImmAny(v[0]))
 27812          })
 27813      }
 27814      // RORL cl, m32
 27815      if v0 == CL && isM32(v1) {
 27816          p.domain = DomainGeneric
 27817          p.add(0, func(m *_Encoding, v []interface{}) {
 27818              m.rexo(0, addr(v[1]), false)
 27819              m.emit(0xd3)
 27820              m.mrsd(1, addr(v[1]), 1)
 27821          })
 27822      }
 27823      if p.len == 0 {
 27824          panic("invalid operands for RORL")
 27825      }
 27826      return p
 27827  }
 27828  
 27829  // RORQ performs "Rotate Right".
 27830  //
 27831  // Mnemonic        : ROR
 27832  // Supported forms : (6 forms)
 27833  //
 27834  //    * RORQ 1, r64
 27835  //    * RORQ imm8, r64
 27836  //    * RORQ cl, r64
 27837  //    * RORQ 1, m64
 27838  //    * RORQ imm8, m64
 27839  //    * RORQ cl, m64
 27840  //
 27841  func (self *Program) RORQ(v0 interface{}, v1 interface{}) *Instruction {
 27842      p := self.alloc("RORQ", 2, Operands { v0, v1 })
 27843      // RORQ 1, r64
 27844      if isConst1(v0) && isReg64(v1) {
 27845          p.domain = DomainGeneric
 27846          p.add(0, func(m *_Encoding, v []interface{}) {
 27847              m.emit(0x48 | hcode(v[1]))
 27848              m.emit(0xd1)
 27849              m.emit(0xc8 | lcode(v[1]))
 27850          })
 27851      }
 27852      // RORQ imm8, r64
 27853      if isImm8(v0) && isReg64(v1) {
 27854          p.domain = DomainGeneric
 27855          p.add(0, func(m *_Encoding, v []interface{}) {
 27856              m.emit(0x48 | hcode(v[1]))
 27857              m.emit(0xc1)
 27858              m.emit(0xc8 | lcode(v[1]))
 27859              m.imm1(toImmAny(v[0]))
 27860          })
 27861      }
 27862      // RORQ cl, r64
 27863      if v0 == CL && isReg64(v1) {
 27864          p.domain = DomainGeneric
 27865          p.add(0, func(m *_Encoding, v []interface{}) {
 27866              m.emit(0x48 | hcode(v[1]))
 27867              m.emit(0xd3)
 27868              m.emit(0xc8 | lcode(v[1]))
 27869          })
 27870      }
 27871      // RORQ 1, m64
 27872      if isConst1(v0) && isM64(v1) {
 27873          p.domain = DomainGeneric
 27874          p.add(0, func(m *_Encoding, v []interface{}) {
 27875              m.rexm(1, 0, addr(v[1]))
 27876              m.emit(0xd1)
 27877              m.mrsd(1, addr(v[1]), 1)
 27878          })
 27879      }
 27880      // RORQ imm8, m64
 27881      if isImm8(v0) && isM64(v1) {
 27882          p.domain = DomainGeneric
 27883          p.add(0, func(m *_Encoding, v []interface{}) {
 27884              m.rexm(1, 0, addr(v[1]))
 27885              m.emit(0xc1)
 27886              m.mrsd(1, addr(v[1]), 1)
 27887              m.imm1(toImmAny(v[0]))
 27888          })
 27889      }
 27890      // RORQ cl, m64
 27891      if v0 == CL && isM64(v1) {
 27892          p.domain = DomainGeneric
 27893          p.add(0, func(m *_Encoding, v []interface{}) {
 27894              m.rexm(1, 0, addr(v[1]))
 27895              m.emit(0xd3)
 27896              m.mrsd(1, addr(v[1]), 1)
 27897          })
 27898      }
 27899      if p.len == 0 {
 27900          panic("invalid operands for RORQ")
 27901      }
 27902      return p
 27903  }
 27904  
 27905  // RORW performs "Rotate Right".
 27906  //
 27907  // Mnemonic        : ROR
 27908  // Supported forms : (6 forms)
 27909  //
 27910  //    * RORW 1, r16
 27911  //    * RORW imm8, r16
 27912  //    * RORW cl, r16
 27913  //    * RORW 1, m16
 27914  //    * RORW imm8, m16
 27915  //    * RORW cl, m16
 27916  //
 27917  func (self *Program) RORW(v0 interface{}, v1 interface{}) *Instruction {
 27918      p := self.alloc("RORW", 2, Operands { v0, v1 })
 27919      // RORW 1, r16
 27920      if isConst1(v0) && isReg16(v1) {
 27921          p.domain = DomainGeneric
 27922          p.add(0, func(m *_Encoding, v []interface{}) {
 27923              m.emit(0x66)
 27924              m.rexo(0, v[1], false)
 27925              m.emit(0xd1)
 27926              m.emit(0xc8 | lcode(v[1]))
 27927          })
 27928      }
 27929      // RORW imm8, r16
 27930      if isImm8(v0) && isReg16(v1) {
 27931          p.domain = DomainGeneric
 27932          p.add(0, func(m *_Encoding, v []interface{}) {
 27933              m.emit(0x66)
 27934              m.rexo(0, v[1], false)
 27935              m.emit(0xc1)
 27936              m.emit(0xc8 | lcode(v[1]))
 27937              m.imm1(toImmAny(v[0]))
 27938          })
 27939      }
 27940      // RORW cl, r16
 27941      if v0 == CL && isReg16(v1) {
 27942          p.domain = DomainGeneric
 27943          p.add(0, func(m *_Encoding, v []interface{}) {
 27944              m.emit(0x66)
 27945              m.rexo(0, v[1], false)
 27946              m.emit(0xd3)
 27947              m.emit(0xc8 | lcode(v[1]))
 27948          })
 27949      }
 27950      // RORW 1, m16
 27951      if isConst1(v0) && isM16(v1) {
 27952          p.domain = DomainGeneric
 27953          p.add(0, func(m *_Encoding, v []interface{}) {
 27954              m.emit(0x66)
 27955              m.rexo(0, addr(v[1]), false)
 27956              m.emit(0xd1)
 27957              m.mrsd(1, addr(v[1]), 1)
 27958          })
 27959      }
 27960      // RORW imm8, m16
 27961      if isImm8(v0) && isM16(v1) {
 27962          p.domain = DomainGeneric
 27963          p.add(0, func(m *_Encoding, v []interface{}) {
 27964              m.emit(0x66)
 27965              m.rexo(0, addr(v[1]), false)
 27966              m.emit(0xc1)
 27967              m.mrsd(1, addr(v[1]), 1)
 27968              m.imm1(toImmAny(v[0]))
 27969          })
 27970      }
 27971      // RORW cl, m16
 27972      if v0 == CL && isM16(v1) {
 27973          p.domain = DomainGeneric
 27974          p.add(0, func(m *_Encoding, v []interface{}) {
 27975              m.emit(0x66)
 27976              m.rexo(0, addr(v[1]), false)
 27977              m.emit(0xd3)
 27978              m.mrsd(1, addr(v[1]), 1)
 27979          })
 27980      }
 27981      if p.len == 0 {
 27982          panic("invalid operands for RORW")
 27983      }
 27984      return p
 27985  }
 27986  
 27987  // RORXL performs "Rotate Right Logical Without Affecting Flags".
 27988  //
 27989  // Mnemonic        : RORX
 27990  // Supported forms : (2 forms)
 27991  //
 27992  //    * RORXL imm8, r32, r32    [BMI2]
 27993  //    * RORXL imm8, m32, r32    [BMI2]
 27994  //
 27995  func (self *Program) RORXL(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 27996      p := self.alloc("RORXL", 3, Operands { v0, v1, v2 })
 27997      // RORXL imm8, r32, r32
 27998      if isImm8(v0) && isReg32(v1) && isReg32(v2) {
 27999          self.require(ISA_BMI2)
 28000          p.domain = DomainGeneric
 28001          p.add(0, func(m *_Encoding, v []interface{}) {
 28002              m.emit(0xc4)
 28003              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 28004              m.emit(0x7b)
 28005              m.emit(0xf0)
 28006              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 28007              m.imm1(toImmAny(v[0]))
 28008          })
 28009      }
 28010      // RORXL imm8, m32, r32
 28011      if isImm8(v0) && isM32(v1) && isReg32(v2) {
 28012          self.require(ISA_BMI2)
 28013          p.domain = DomainGeneric
 28014          p.add(0, func(m *_Encoding, v []interface{}) {
 28015              m.vex3(0xc4, 0b11, 0x03, hcode(v[2]), addr(v[1]), 0)
 28016              m.emit(0xf0)
 28017              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 28018              m.imm1(toImmAny(v[0]))
 28019          })
 28020      }
 28021      if p.len == 0 {
 28022          panic("invalid operands for RORXL")
 28023      }
 28024      return p
 28025  }
 28026  
 28027  // RORXQ performs "Rotate Right Logical Without Affecting Flags".
 28028  //
 28029  // Mnemonic        : RORX
 28030  // Supported forms : (2 forms)
 28031  //
 28032  //    * RORXQ imm8, r64, r64    [BMI2]
 28033  //    * RORXQ imm8, m64, r64    [BMI2]
 28034  //
 28035  func (self *Program) RORXQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 28036      p := self.alloc("RORXQ", 3, Operands { v0, v1, v2 })
 28037      // RORXQ imm8, r64, r64
 28038      if isImm8(v0) && isReg64(v1) && isReg64(v2) {
 28039          self.require(ISA_BMI2)
 28040          p.domain = DomainGeneric
 28041          p.add(0, func(m *_Encoding, v []interface{}) {
 28042              m.emit(0xc4)
 28043              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 28044              m.emit(0xfb)
 28045              m.emit(0xf0)
 28046              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 28047              m.imm1(toImmAny(v[0]))
 28048          })
 28049      }
 28050      // RORXQ imm8, m64, r64
 28051      if isImm8(v0) && isM64(v1) && isReg64(v2) {
 28052          self.require(ISA_BMI2)
 28053          p.domain = DomainGeneric
 28054          p.add(0, func(m *_Encoding, v []interface{}) {
 28055              m.vex3(0xc4, 0b11, 0x83, hcode(v[2]), addr(v[1]), 0)
 28056              m.emit(0xf0)
 28057              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 28058              m.imm1(toImmAny(v[0]))
 28059          })
 28060      }
 28061      if p.len == 0 {
 28062          panic("invalid operands for RORXQ")
 28063      }
 28064      return p
 28065  }
 28066  
 28067  // ROUNDPD performs "Round Packed Double Precision Floating-Point Values".
 28068  //
 28069  // Mnemonic        : ROUNDPD
 28070  // Supported forms : (2 forms)
 28071  //
 28072  //    * ROUNDPD imm8, xmm, xmm     [SSE4.1]
 28073  //    * ROUNDPD imm8, m128, xmm    [SSE4.1]
 28074  //
 28075  func (self *Program) ROUNDPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 28076      p := self.alloc("ROUNDPD", 3, Operands { v0, v1, v2 })
 28077      // ROUNDPD imm8, xmm, xmm
 28078      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 28079          self.require(ISA_SSE4_1)
 28080          p.domain = DomainMMXSSE
 28081          p.add(0, func(m *_Encoding, v []interface{}) {
 28082              m.emit(0x66)
 28083              m.rexo(hcode(v[2]), v[1], false)
 28084              m.emit(0x0f)
 28085              m.emit(0x3a)
 28086              m.emit(0x09)
 28087              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 28088              m.imm1(toImmAny(v[0]))
 28089          })
 28090      }
 28091      // ROUNDPD imm8, m128, xmm
 28092      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 28093          self.require(ISA_SSE4_1)
 28094          p.domain = DomainMMXSSE
 28095          p.add(0, func(m *_Encoding, v []interface{}) {
 28096              m.emit(0x66)
 28097              m.rexo(hcode(v[2]), addr(v[1]), false)
 28098              m.emit(0x0f)
 28099              m.emit(0x3a)
 28100              m.emit(0x09)
 28101              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 28102              m.imm1(toImmAny(v[0]))
 28103          })
 28104      }
 28105      if p.len == 0 {
 28106          panic("invalid operands for ROUNDPD")
 28107      }
 28108      return p
 28109  }
 28110  
 28111  // ROUNDPS performs "Round Packed Single Precision Floating-Point Values".
 28112  //
 28113  // Mnemonic        : ROUNDPS
 28114  // Supported forms : (2 forms)
 28115  //
 28116  //    * ROUNDPS imm8, xmm, xmm     [SSE4.1]
 28117  //    * ROUNDPS imm8, m128, xmm    [SSE4.1]
 28118  //
 28119  func (self *Program) ROUNDPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 28120      p := self.alloc("ROUNDPS", 3, Operands { v0, v1, v2 })
 28121      // ROUNDPS imm8, xmm, xmm
 28122      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 28123          self.require(ISA_SSE4_1)
 28124          p.domain = DomainMMXSSE
 28125          p.add(0, func(m *_Encoding, v []interface{}) {
 28126              m.emit(0x66)
 28127              m.rexo(hcode(v[2]), v[1], false)
 28128              m.emit(0x0f)
 28129              m.emit(0x3a)
 28130              m.emit(0x08)
 28131              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 28132              m.imm1(toImmAny(v[0]))
 28133          })
 28134      }
 28135      // ROUNDPS imm8, m128, xmm
 28136      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 28137          self.require(ISA_SSE4_1)
 28138          p.domain = DomainMMXSSE
 28139          p.add(0, func(m *_Encoding, v []interface{}) {
 28140              m.emit(0x66)
 28141              m.rexo(hcode(v[2]), addr(v[1]), false)
 28142              m.emit(0x0f)
 28143              m.emit(0x3a)
 28144              m.emit(0x08)
 28145              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 28146              m.imm1(toImmAny(v[0]))
 28147          })
 28148      }
 28149      if p.len == 0 {
 28150          panic("invalid operands for ROUNDPS")
 28151      }
 28152      return p
 28153  }
 28154  
 28155  // ROUNDSD performs "Round Scalar Double Precision Floating-Point Values".
 28156  //
 28157  // Mnemonic        : ROUNDSD
 28158  // Supported forms : (2 forms)
 28159  //
 28160  //    * ROUNDSD imm8, xmm, xmm    [SSE4.1]
 28161  //    * ROUNDSD imm8, m64, xmm    [SSE4.1]
 28162  //
 28163  func (self *Program) ROUNDSD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 28164      p := self.alloc("ROUNDSD", 3, Operands { v0, v1, v2 })
 28165      // ROUNDSD imm8, xmm, xmm
 28166      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 28167          self.require(ISA_SSE4_1)
 28168          p.domain = DomainMMXSSE
 28169          p.add(0, func(m *_Encoding, v []interface{}) {
 28170              m.emit(0x66)
 28171              m.rexo(hcode(v[2]), v[1], false)
 28172              m.emit(0x0f)
 28173              m.emit(0x3a)
 28174              m.emit(0x0b)
 28175              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 28176              m.imm1(toImmAny(v[0]))
 28177          })
 28178      }
 28179      // ROUNDSD imm8, m64, xmm
 28180      if isImm8(v0) && isM64(v1) && isXMM(v2) {
 28181          self.require(ISA_SSE4_1)
 28182          p.domain = DomainMMXSSE
 28183          p.add(0, func(m *_Encoding, v []interface{}) {
 28184              m.emit(0x66)
 28185              m.rexo(hcode(v[2]), addr(v[1]), false)
 28186              m.emit(0x0f)
 28187              m.emit(0x3a)
 28188              m.emit(0x0b)
 28189              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 28190              m.imm1(toImmAny(v[0]))
 28191          })
 28192      }
 28193      if p.len == 0 {
 28194          panic("invalid operands for ROUNDSD")
 28195      }
 28196      return p
 28197  }
 28198  
 28199  // ROUNDSS performs "Round Scalar Single Precision Floating-Point Values".
 28200  //
 28201  // Mnemonic        : ROUNDSS
 28202  // Supported forms : (2 forms)
 28203  //
 28204  //    * ROUNDSS imm8, xmm, xmm    [SSE4.1]
 28205  //    * ROUNDSS imm8, m32, xmm    [SSE4.1]
 28206  //
 28207  func (self *Program) ROUNDSS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 28208      p := self.alloc("ROUNDSS", 3, Operands { v0, v1, v2 })
 28209      // ROUNDSS imm8, xmm, xmm
 28210      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 28211          self.require(ISA_SSE4_1)
 28212          p.domain = DomainMMXSSE
 28213          p.add(0, func(m *_Encoding, v []interface{}) {
 28214              m.emit(0x66)
 28215              m.rexo(hcode(v[2]), v[1], false)
 28216              m.emit(0x0f)
 28217              m.emit(0x3a)
 28218              m.emit(0x0a)
 28219              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 28220              m.imm1(toImmAny(v[0]))
 28221          })
 28222      }
 28223      // ROUNDSS imm8, m32, xmm
 28224      if isImm8(v0) && isM32(v1) && isXMM(v2) {
 28225          self.require(ISA_SSE4_1)
 28226          p.domain = DomainMMXSSE
 28227          p.add(0, func(m *_Encoding, v []interface{}) {
 28228              m.emit(0x66)
 28229              m.rexo(hcode(v[2]), addr(v[1]), false)
 28230              m.emit(0x0f)
 28231              m.emit(0x3a)
 28232              m.emit(0x0a)
 28233              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 28234              m.imm1(toImmAny(v[0]))
 28235          })
 28236      }
 28237      if p.len == 0 {
 28238          panic("invalid operands for ROUNDSS")
 28239      }
 28240      return p
 28241  }
 28242  
 28243  // RSQRTPS performs "Compute Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values".
 28244  //
 28245  // Mnemonic        : RSQRTPS
 28246  // Supported forms : (2 forms)
 28247  //
 28248  //    * RSQRTPS xmm, xmm     [SSE]
 28249  //    * RSQRTPS m128, xmm    [SSE]
 28250  //
 28251  func (self *Program) RSQRTPS(v0 interface{}, v1 interface{}) *Instruction {
 28252      p := self.alloc("RSQRTPS", 2, Operands { v0, v1 })
 28253      // RSQRTPS xmm, xmm
 28254      if isXMM(v0) && isXMM(v1) {
 28255          self.require(ISA_SSE)
 28256          p.domain = DomainMMXSSE
 28257          p.add(0, func(m *_Encoding, v []interface{}) {
 28258              m.rexo(hcode(v[1]), v[0], false)
 28259              m.emit(0x0f)
 28260              m.emit(0x52)
 28261              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 28262          })
 28263      }
 28264      // RSQRTPS m128, xmm
 28265      if isM128(v0) && isXMM(v1) {
 28266          self.require(ISA_SSE)
 28267          p.domain = DomainMMXSSE
 28268          p.add(0, func(m *_Encoding, v []interface{}) {
 28269              m.rexo(hcode(v[1]), addr(v[0]), false)
 28270              m.emit(0x0f)
 28271              m.emit(0x52)
 28272              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 28273          })
 28274      }
 28275      if p.len == 0 {
 28276          panic("invalid operands for RSQRTPS")
 28277      }
 28278      return p
 28279  }
 28280  
 28281  // RSQRTSS performs "Compute Reciprocal of Square Root of Scalar Single-Precision Floating-Point Value".
 28282  //
 28283  // Mnemonic        : RSQRTSS
 28284  // Supported forms : (2 forms)
 28285  //
 28286  //    * RSQRTSS xmm, xmm    [SSE]
 28287  //    * RSQRTSS m32, xmm    [SSE]
 28288  //
 28289  func (self *Program) RSQRTSS(v0 interface{}, v1 interface{}) *Instruction {
 28290      p := self.alloc("RSQRTSS", 2, Operands { v0, v1 })
 28291      // RSQRTSS xmm, xmm
 28292      if isXMM(v0) && isXMM(v1) {
 28293          self.require(ISA_SSE)
 28294          p.domain = DomainMMXSSE
 28295          p.add(0, func(m *_Encoding, v []interface{}) {
 28296              m.emit(0xf3)
 28297              m.rexo(hcode(v[1]), v[0], false)
 28298              m.emit(0x0f)
 28299              m.emit(0x52)
 28300              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 28301          })
 28302      }
 28303      // RSQRTSS m32, xmm
 28304      if isM32(v0) && isXMM(v1) {
 28305          self.require(ISA_SSE)
 28306          p.domain = DomainMMXSSE
 28307          p.add(0, func(m *_Encoding, v []interface{}) {
 28308              m.emit(0xf3)
 28309              m.rexo(hcode(v[1]), addr(v[0]), false)
 28310              m.emit(0x0f)
 28311              m.emit(0x52)
 28312              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 28313          })
 28314      }
 28315      if p.len == 0 {
 28316          panic("invalid operands for RSQRTSS")
 28317      }
 28318      return p
 28319  }
 28320  
 28321  // SALB performs "Arithmetic Shift Left".
 28322  //
 28323  // Mnemonic        : SAL
 28324  // Supported forms : (6 forms)
 28325  //
 28326  //    * SALB 1, r8
 28327  //    * SALB imm8, r8
 28328  //    * SALB cl, r8
 28329  //    * SALB 1, m8
 28330  //    * SALB imm8, m8
 28331  //    * SALB cl, m8
 28332  //
 28333  func (self *Program) SALB(v0 interface{}, v1 interface{}) *Instruction {
 28334      p := self.alloc("SALB", 2, Operands { v0, v1 })
 28335      // SALB 1, r8
 28336      if isConst1(v0) && isReg8(v1) {
 28337          p.domain = DomainGeneric
 28338          p.add(0, func(m *_Encoding, v []interface{}) {
 28339              m.rexo(0, v[1], isReg8REX(v[1]))
 28340              m.emit(0xd0)
 28341              m.emit(0xe0 | lcode(v[1]))
 28342          })
 28343      }
 28344      // SALB imm8, r8
 28345      if isImm8(v0) && isReg8(v1) {
 28346          p.domain = DomainGeneric
 28347          p.add(0, func(m *_Encoding, v []interface{}) {
 28348              m.rexo(0, v[1], isReg8REX(v[1]))
 28349              m.emit(0xc0)
 28350              m.emit(0xe0 | lcode(v[1]))
 28351              m.imm1(toImmAny(v[0]))
 28352          })
 28353      }
 28354      // SALB cl, r8
 28355      if v0 == CL && isReg8(v1) {
 28356          p.domain = DomainGeneric
 28357          p.add(0, func(m *_Encoding, v []interface{}) {
 28358              m.rexo(0, v[1], isReg8REX(v[1]))
 28359              m.emit(0xd2)
 28360              m.emit(0xe0 | lcode(v[1]))
 28361          })
 28362      }
 28363      // SALB 1, m8
 28364      if isConst1(v0) && isM8(v1) {
 28365          p.domain = DomainGeneric
 28366          p.add(0, func(m *_Encoding, v []interface{}) {
 28367              m.rexo(0, addr(v[1]), false)
 28368              m.emit(0xd0)
 28369              m.mrsd(4, addr(v[1]), 1)
 28370          })
 28371      }
 28372      // SALB imm8, m8
 28373      if isImm8(v0) && isM8(v1) {
 28374          p.domain = DomainGeneric
 28375          p.add(0, func(m *_Encoding, v []interface{}) {
 28376              m.rexo(0, addr(v[1]), false)
 28377              m.emit(0xc0)
 28378              m.mrsd(4, addr(v[1]), 1)
 28379              m.imm1(toImmAny(v[0]))
 28380          })
 28381      }
 28382      // SALB cl, m8
 28383      if v0 == CL && isM8(v1) {
 28384          p.domain = DomainGeneric
 28385          p.add(0, func(m *_Encoding, v []interface{}) {
 28386              m.rexo(0, addr(v[1]), false)
 28387              m.emit(0xd2)
 28388              m.mrsd(4, addr(v[1]), 1)
 28389          })
 28390      }
 28391      if p.len == 0 {
 28392          panic("invalid operands for SALB")
 28393      }
 28394      return p
 28395  }
 28396  
 28397  // SALL performs "Arithmetic Shift Left".
 28398  //
 28399  // Mnemonic        : SAL
 28400  // Supported forms : (6 forms)
 28401  //
 28402  //    * SALL 1, r32
 28403  //    * SALL imm8, r32
 28404  //    * SALL cl, r32
 28405  //    * SALL 1, m32
 28406  //    * SALL imm8, m32
 28407  //    * SALL cl, m32
 28408  //
 28409  func (self *Program) SALL(v0 interface{}, v1 interface{}) *Instruction {
 28410      p := self.alloc("SALL", 2, Operands { v0, v1 })
 28411      // SALL 1, r32
 28412      if isConst1(v0) && isReg32(v1) {
 28413          p.domain = DomainGeneric
 28414          p.add(0, func(m *_Encoding, v []interface{}) {
 28415              m.rexo(0, v[1], false)
 28416              m.emit(0xd1)
 28417              m.emit(0xe0 | lcode(v[1]))
 28418          })
 28419      }
 28420      // SALL imm8, r32
 28421      if isImm8(v0) && isReg32(v1) {
 28422          p.domain = DomainGeneric
 28423          p.add(0, func(m *_Encoding, v []interface{}) {
 28424              m.rexo(0, v[1], false)
 28425              m.emit(0xc1)
 28426              m.emit(0xe0 | lcode(v[1]))
 28427              m.imm1(toImmAny(v[0]))
 28428          })
 28429      }
 28430      // SALL cl, r32
 28431      if v0 == CL && isReg32(v1) {
 28432          p.domain = DomainGeneric
 28433          p.add(0, func(m *_Encoding, v []interface{}) {
 28434              m.rexo(0, v[1], false)
 28435              m.emit(0xd3)
 28436              m.emit(0xe0 | lcode(v[1]))
 28437          })
 28438      }
 28439      // SALL 1, m32
 28440      if isConst1(v0) && isM32(v1) {
 28441          p.domain = DomainGeneric
 28442          p.add(0, func(m *_Encoding, v []interface{}) {
 28443              m.rexo(0, addr(v[1]), false)
 28444              m.emit(0xd1)
 28445              m.mrsd(4, addr(v[1]), 1)
 28446          })
 28447      }
 28448      // SALL imm8, m32
 28449      if isImm8(v0) && isM32(v1) {
 28450          p.domain = DomainGeneric
 28451          p.add(0, func(m *_Encoding, v []interface{}) {
 28452              m.rexo(0, addr(v[1]), false)
 28453              m.emit(0xc1)
 28454              m.mrsd(4, addr(v[1]), 1)
 28455              m.imm1(toImmAny(v[0]))
 28456          })
 28457      }
 28458      // SALL cl, m32
 28459      if v0 == CL && isM32(v1) {
 28460          p.domain = DomainGeneric
 28461          p.add(0, func(m *_Encoding, v []interface{}) {
 28462              m.rexo(0, addr(v[1]), false)
 28463              m.emit(0xd3)
 28464              m.mrsd(4, addr(v[1]), 1)
 28465          })
 28466      }
 28467      if p.len == 0 {
 28468          panic("invalid operands for SALL")
 28469      }
 28470      return p
 28471  }
 28472  
 28473  // SALQ performs "Arithmetic Shift Left".
 28474  //
 28475  // Mnemonic        : SAL
 28476  // Supported forms : (6 forms)
 28477  //
 28478  //    * SALQ 1, r64
 28479  //    * SALQ imm8, r64
 28480  //    * SALQ cl, r64
 28481  //    * SALQ 1, m64
 28482  //    * SALQ imm8, m64
 28483  //    * SALQ cl, m64
 28484  //
 28485  func (self *Program) SALQ(v0 interface{}, v1 interface{}) *Instruction {
 28486      p := self.alloc("SALQ", 2, Operands { v0, v1 })
 28487      // SALQ 1, r64
 28488      if isConst1(v0) && isReg64(v1) {
 28489          p.domain = DomainGeneric
 28490          p.add(0, func(m *_Encoding, v []interface{}) {
 28491              m.emit(0x48 | hcode(v[1]))
 28492              m.emit(0xd1)
 28493              m.emit(0xe0 | lcode(v[1]))
 28494          })
 28495      }
 28496      // SALQ imm8, r64
 28497      if isImm8(v0) && isReg64(v1) {
 28498          p.domain = DomainGeneric
 28499          p.add(0, func(m *_Encoding, v []interface{}) {
 28500              m.emit(0x48 | hcode(v[1]))
 28501              m.emit(0xc1)
 28502              m.emit(0xe0 | lcode(v[1]))
 28503              m.imm1(toImmAny(v[0]))
 28504          })
 28505      }
 28506      // SALQ cl, r64
 28507      if v0 == CL && isReg64(v1) {
 28508          p.domain = DomainGeneric
 28509          p.add(0, func(m *_Encoding, v []interface{}) {
 28510              m.emit(0x48 | hcode(v[1]))
 28511              m.emit(0xd3)
 28512              m.emit(0xe0 | lcode(v[1]))
 28513          })
 28514      }
 28515      // SALQ 1, m64
 28516      if isConst1(v0) && isM64(v1) {
 28517          p.domain = DomainGeneric
 28518          p.add(0, func(m *_Encoding, v []interface{}) {
 28519              m.rexm(1, 0, addr(v[1]))
 28520              m.emit(0xd1)
 28521              m.mrsd(4, addr(v[1]), 1)
 28522          })
 28523      }
 28524      // SALQ imm8, m64
 28525      if isImm8(v0) && isM64(v1) {
 28526          p.domain = DomainGeneric
 28527          p.add(0, func(m *_Encoding, v []interface{}) {
 28528              m.rexm(1, 0, addr(v[1]))
 28529              m.emit(0xc1)
 28530              m.mrsd(4, addr(v[1]), 1)
 28531              m.imm1(toImmAny(v[0]))
 28532          })
 28533      }
 28534      // SALQ cl, m64
 28535      if v0 == CL && isM64(v1) {
 28536          p.domain = DomainGeneric
 28537          p.add(0, func(m *_Encoding, v []interface{}) {
 28538              m.rexm(1, 0, addr(v[1]))
 28539              m.emit(0xd3)
 28540              m.mrsd(4, addr(v[1]), 1)
 28541          })
 28542      }
 28543      if p.len == 0 {
 28544          panic("invalid operands for SALQ")
 28545      }
 28546      return p
 28547  }
 28548  
 28549  // SALW performs "Arithmetic Shift Left".
 28550  //
 28551  // Mnemonic        : SAL
 28552  // Supported forms : (6 forms)
 28553  //
 28554  //    * SALW 1, r16
 28555  //    * SALW imm8, r16
 28556  //    * SALW cl, r16
 28557  //    * SALW 1, m16
 28558  //    * SALW imm8, m16
 28559  //    * SALW cl, m16
 28560  //
 28561  func (self *Program) SALW(v0 interface{}, v1 interface{}) *Instruction {
 28562      p := self.alloc("SALW", 2, Operands { v0, v1 })
 28563      // SALW 1, r16
 28564      if isConst1(v0) && isReg16(v1) {
 28565          p.domain = DomainGeneric
 28566          p.add(0, func(m *_Encoding, v []interface{}) {
 28567              m.emit(0x66)
 28568              m.rexo(0, v[1], false)
 28569              m.emit(0xd1)
 28570              m.emit(0xe0 | lcode(v[1]))
 28571          })
 28572      }
 28573      // SALW imm8, r16
 28574      if isImm8(v0) && isReg16(v1) {
 28575          p.domain = DomainGeneric
 28576          p.add(0, func(m *_Encoding, v []interface{}) {
 28577              m.emit(0x66)
 28578              m.rexo(0, v[1], false)
 28579              m.emit(0xc1)
 28580              m.emit(0xe0 | lcode(v[1]))
 28581              m.imm1(toImmAny(v[0]))
 28582          })
 28583      }
 28584      // SALW cl, r16
 28585      if v0 == CL && isReg16(v1) {
 28586          p.domain = DomainGeneric
 28587          p.add(0, func(m *_Encoding, v []interface{}) {
 28588              m.emit(0x66)
 28589              m.rexo(0, v[1], false)
 28590              m.emit(0xd3)
 28591              m.emit(0xe0 | lcode(v[1]))
 28592          })
 28593      }
 28594      // SALW 1, m16
 28595      if isConst1(v0) && isM16(v1) {
 28596          p.domain = DomainGeneric
 28597          p.add(0, func(m *_Encoding, v []interface{}) {
 28598              m.emit(0x66)
 28599              m.rexo(0, addr(v[1]), false)
 28600              m.emit(0xd1)
 28601              m.mrsd(4, addr(v[1]), 1)
 28602          })
 28603      }
 28604      // SALW imm8, m16
 28605      if isImm8(v0) && isM16(v1) {
 28606          p.domain = DomainGeneric
 28607          p.add(0, func(m *_Encoding, v []interface{}) {
 28608              m.emit(0x66)
 28609              m.rexo(0, addr(v[1]), false)
 28610              m.emit(0xc1)
 28611              m.mrsd(4, addr(v[1]), 1)
 28612              m.imm1(toImmAny(v[0]))
 28613          })
 28614      }
 28615      // SALW cl, m16
 28616      if v0 == CL && isM16(v1) {
 28617          p.domain = DomainGeneric
 28618          p.add(0, func(m *_Encoding, v []interface{}) {
 28619              m.emit(0x66)
 28620              m.rexo(0, addr(v[1]), false)
 28621              m.emit(0xd3)
 28622              m.mrsd(4, addr(v[1]), 1)
 28623          })
 28624      }
 28625      if p.len == 0 {
 28626          panic("invalid operands for SALW")
 28627      }
 28628      return p
 28629  }
 28630  
 28631  // SARB performs "Arithmetic Shift Right".
 28632  //
 28633  // Mnemonic        : SAR
 28634  // Supported forms : (6 forms)
 28635  //
 28636  //    * SARB 1, r8
 28637  //    * SARB imm8, r8
 28638  //    * SARB cl, r8
 28639  //    * SARB 1, m8
 28640  //    * SARB imm8, m8
 28641  //    * SARB cl, m8
 28642  //
 28643  func (self *Program) SARB(v0 interface{}, v1 interface{}) *Instruction {
 28644      p := self.alloc("SARB", 2, Operands { v0, v1 })
 28645      // SARB 1, r8
 28646      if isConst1(v0) && isReg8(v1) {
 28647          p.domain = DomainGeneric
 28648          p.add(0, func(m *_Encoding, v []interface{}) {
 28649              m.rexo(0, v[1], isReg8REX(v[1]))
 28650              m.emit(0xd0)
 28651              m.emit(0xf8 | lcode(v[1]))
 28652          })
 28653      }
 28654      // SARB imm8, r8
 28655      if isImm8(v0) && isReg8(v1) {
 28656          p.domain = DomainGeneric
 28657          p.add(0, func(m *_Encoding, v []interface{}) {
 28658              m.rexo(0, v[1], isReg8REX(v[1]))
 28659              m.emit(0xc0)
 28660              m.emit(0xf8 | lcode(v[1]))
 28661              m.imm1(toImmAny(v[0]))
 28662          })
 28663      }
 28664      // SARB cl, r8
 28665      if v0 == CL && isReg8(v1) {
 28666          p.domain = DomainGeneric
 28667          p.add(0, func(m *_Encoding, v []interface{}) {
 28668              m.rexo(0, v[1], isReg8REX(v[1]))
 28669              m.emit(0xd2)
 28670              m.emit(0xf8 | lcode(v[1]))
 28671          })
 28672      }
 28673      // SARB 1, m8
 28674      if isConst1(v0) && isM8(v1) {
 28675          p.domain = DomainGeneric
 28676          p.add(0, func(m *_Encoding, v []interface{}) {
 28677              m.rexo(0, addr(v[1]), false)
 28678              m.emit(0xd0)
 28679              m.mrsd(7, addr(v[1]), 1)
 28680          })
 28681      }
 28682      // SARB imm8, m8
 28683      if isImm8(v0) && isM8(v1) {
 28684          p.domain = DomainGeneric
 28685          p.add(0, func(m *_Encoding, v []interface{}) {
 28686              m.rexo(0, addr(v[1]), false)
 28687              m.emit(0xc0)
 28688              m.mrsd(7, addr(v[1]), 1)
 28689              m.imm1(toImmAny(v[0]))
 28690          })
 28691      }
 28692      // SARB cl, m8
 28693      if v0 == CL && isM8(v1) {
 28694          p.domain = DomainGeneric
 28695          p.add(0, func(m *_Encoding, v []interface{}) {
 28696              m.rexo(0, addr(v[1]), false)
 28697              m.emit(0xd2)
 28698              m.mrsd(7, addr(v[1]), 1)
 28699          })
 28700      }
 28701      if p.len == 0 {
 28702          panic("invalid operands for SARB")
 28703      }
 28704      return p
 28705  }
 28706  
 28707  // SARL performs "Arithmetic Shift Right".
 28708  //
 28709  // Mnemonic        : SAR
 28710  // Supported forms : (6 forms)
 28711  //
 28712  //    * SARL 1, r32
 28713  //    * SARL imm8, r32
 28714  //    * SARL cl, r32
 28715  //    * SARL 1, m32
 28716  //    * SARL imm8, m32
 28717  //    * SARL cl, m32
 28718  //
 28719  func (self *Program) SARL(v0 interface{}, v1 interface{}) *Instruction {
 28720      p := self.alloc("SARL", 2, Operands { v0, v1 })
 28721      // SARL 1, r32
 28722      if isConst1(v0) && isReg32(v1) {
 28723          p.domain = DomainGeneric
 28724          p.add(0, func(m *_Encoding, v []interface{}) {
 28725              m.rexo(0, v[1], false)
 28726              m.emit(0xd1)
 28727              m.emit(0xf8 | lcode(v[1]))
 28728          })
 28729      }
 28730      // SARL imm8, r32
 28731      if isImm8(v0) && isReg32(v1) {
 28732          p.domain = DomainGeneric
 28733          p.add(0, func(m *_Encoding, v []interface{}) {
 28734              m.rexo(0, v[1], false)
 28735              m.emit(0xc1)
 28736              m.emit(0xf8 | lcode(v[1]))
 28737              m.imm1(toImmAny(v[0]))
 28738          })
 28739      }
 28740      // SARL cl, r32
 28741      if v0 == CL && isReg32(v1) {
 28742          p.domain = DomainGeneric
 28743          p.add(0, func(m *_Encoding, v []interface{}) {
 28744              m.rexo(0, v[1], false)
 28745              m.emit(0xd3)
 28746              m.emit(0xf8 | lcode(v[1]))
 28747          })
 28748      }
 28749      // SARL 1, m32
 28750      if isConst1(v0) && isM32(v1) {
 28751          p.domain = DomainGeneric
 28752          p.add(0, func(m *_Encoding, v []interface{}) {
 28753              m.rexo(0, addr(v[1]), false)
 28754              m.emit(0xd1)
 28755              m.mrsd(7, addr(v[1]), 1)
 28756          })
 28757      }
 28758      // SARL imm8, m32
 28759      if isImm8(v0) && isM32(v1) {
 28760          p.domain = DomainGeneric
 28761          p.add(0, func(m *_Encoding, v []interface{}) {
 28762              m.rexo(0, addr(v[1]), false)
 28763              m.emit(0xc1)
 28764              m.mrsd(7, addr(v[1]), 1)
 28765              m.imm1(toImmAny(v[0]))
 28766          })
 28767      }
 28768      // SARL cl, m32
 28769      if v0 == CL && isM32(v1) {
 28770          p.domain = DomainGeneric
 28771          p.add(0, func(m *_Encoding, v []interface{}) {
 28772              m.rexo(0, addr(v[1]), false)
 28773              m.emit(0xd3)
 28774              m.mrsd(7, addr(v[1]), 1)
 28775          })
 28776      }
 28777      if p.len == 0 {
 28778          panic("invalid operands for SARL")
 28779      }
 28780      return p
 28781  }
 28782  
 28783  // SARQ performs "Arithmetic Shift Right".
 28784  //
 28785  // Mnemonic        : SAR
 28786  // Supported forms : (6 forms)
 28787  //
 28788  //    * SARQ 1, r64
 28789  //    * SARQ imm8, r64
 28790  //    * SARQ cl, r64
 28791  //    * SARQ 1, m64
 28792  //    * SARQ imm8, m64
 28793  //    * SARQ cl, m64
 28794  //
 28795  func (self *Program) SARQ(v0 interface{}, v1 interface{}) *Instruction {
 28796      p := self.alloc("SARQ", 2, Operands { v0, v1 })
 28797      // SARQ 1, r64
 28798      if isConst1(v0) && isReg64(v1) {
 28799          p.domain = DomainGeneric
 28800          p.add(0, func(m *_Encoding, v []interface{}) {
 28801              m.emit(0x48 | hcode(v[1]))
 28802              m.emit(0xd1)
 28803              m.emit(0xf8 | lcode(v[1]))
 28804          })
 28805      }
 28806      // SARQ imm8, r64
 28807      if isImm8(v0) && isReg64(v1) {
 28808          p.domain = DomainGeneric
 28809          p.add(0, func(m *_Encoding, v []interface{}) {
 28810              m.emit(0x48 | hcode(v[1]))
 28811              m.emit(0xc1)
 28812              m.emit(0xf8 | lcode(v[1]))
 28813              m.imm1(toImmAny(v[0]))
 28814          })
 28815      }
 28816      // SARQ cl, r64
 28817      if v0 == CL && isReg64(v1) {
 28818          p.domain = DomainGeneric
 28819          p.add(0, func(m *_Encoding, v []interface{}) {
 28820              m.emit(0x48 | hcode(v[1]))
 28821              m.emit(0xd3)
 28822              m.emit(0xf8 | lcode(v[1]))
 28823          })
 28824      }
 28825      // SARQ 1, m64
 28826      if isConst1(v0) && isM64(v1) {
 28827          p.domain = DomainGeneric
 28828          p.add(0, func(m *_Encoding, v []interface{}) {
 28829              m.rexm(1, 0, addr(v[1]))
 28830              m.emit(0xd1)
 28831              m.mrsd(7, addr(v[1]), 1)
 28832          })
 28833      }
 28834      // SARQ imm8, m64
 28835      if isImm8(v0) && isM64(v1) {
 28836          p.domain = DomainGeneric
 28837          p.add(0, func(m *_Encoding, v []interface{}) {
 28838              m.rexm(1, 0, addr(v[1]))
 28839              m.emit(0xc1)
 28840              m.mrsd(7, addr(v[1]), 1)
 28841              m.imm1(toImmAny(v[0]))
 28842          })
 28843      }
 28844      // SARQ cl, m64
 28845      if v0 == CL && isM64(v1) {
 28846          p.domain = DomainGeneric
 28847          p.add(0, func(m *_Encoding, v []interface{}) {
 28848              m.rexm(1, 0, addr(v[1]))
 28849              m.emit(0xd3)
 28850              m.mrsd(7, addr(v[1]), 1)
 28851          })
 28852      }
 28853      if p.len == 0 {
 28854          panic("invalid operands for SARQ")
 28855      }
 28856      return p
 28857  }
 28858  
 28859  // SARW performs "Arithmetic Shift Right".
 28860  //
 28861  // Mnemonic        : SAR
 28862  // Supported forms : (6 forms)
 28863  //
 28864  //    * SARW 1, r16
 28865  //    * SARW imm8, r16
 28866  //    * SARW cl, r16
 28867  //    * SARW 1, m16
 28868  //    * SARW imm8, m16
 28869  //    * SARW cl, m16
 28870  //
 28871  func (self *Program) SARW(v0 interface{}, v1 interface{}) *Instruction {
 28872      p := self.alloc("SARW", 2, Operands { v0, v1 })
 28873      // SARW 1, r16
 28874      if isConst1(v0) && isReg16(v1) {
 28875          p.domain = DomainGeneric
 28876          p.add(0, func(m *_Encoding, v []interface{}) {
 28877              m.emit(0x66)
 28878              m.rexo(0, v[1], false)
 28879              m.emit(0xd1)
 28880              m.emit(0xf8 | lcode(v[1]))
 28881          })
 28882      }
 28883      // SARW imm8, r16
 28884      if isImm8(v0) && isReg16(v1) {
 28885          p.domain = DomainGeneric
 28886          p.add(0, func(m *_Encoding, v []interface{}) {
 28887              m.emit(0x66)
 28888              m.rexo(0, v[1], false)
 28889              m.emit(0xc1)
 28890              m.emit(0xf8 | lcode(v[1]))
 28891              m.imm1(toImmAny(v[0]))
 28892          })
 28893      }
 28894      // SARW cl, r16
 28895      if v0 == CL && isReg16(v1) {
 28896          p.domain = DomainGeneric
 28897          p.add(0, func(m *_Encoding, v []interface{}) {
 28898              m.emit(0x66)
 28899              m.rexo(0, v[1], false)
 28900              m.emit(0xd3)
 28901              m.emit(0xf8 | lcode(v[1]))
 28902          })
 28903      }
 28904      // SARW 1, m16
 28905      if isConst1(v0) && isM16(v1) {
 28906          p.domain = DomainGeneric
 28907          p.add(0, func(m *_Encoding, v []interface{}) {
 28908              m.emit(0x66)
 28909              m.rexo(0, addr(v[1]), false)
 28910              m.emit(0xd1)
 28911              m.mrsd(7, addr(v[1]), 1)
 28912          })
 28913      }
 28914      // SARW imm8, m16
 28915      if isImm8(v0) && isM16(v1) {
 28916          p.domain = DomainGeneric
 28917          p.add(0, func(m *_Encoding, v []interface{}) {
 28918              m.emit(0x66)
 28919              m.rexo(0, addr(v[1]), false)
 28920              m.emit(0xc1)
 28921              m.mrsd(7, addr(v[1]), 1)
 28922              m.imm1(toImmAny(v[0]))
 28923          })
 28924      }
 28925      // SARW cl, m16
 28926      if v0 == CL && isM16(v1) {
 28927          p.domain = DomainGeneric
 28928          p.add(0, func(m *_Encoding, v []interface{}) {
 28929              m.emit(0x66)
 28930              m.rexo(0, addr(v[1]), false)
 28931              m.emit(0xd3)
 28932              m.mrsd(7, addr(v[1]), 1)
 28933          })
 28934      }
 28935      if p.len == 0 {
 28936          panic("invalid operands for SARW")
 28937      }
 28938      return p
 28939  }
 28940  
 28941  // SARXL performs "Arithmetic Shift Right Without Affecting Flags".
 28942  //
 28943  // Mnemonic        : SARX
 28944  // Supported forms : (2 forms)
 28945  //
 28946  //    * SARXL r32, r32, r32    [BMI2]
 28947  //    * SARXL r32, m32, r32    [BMI2]
 28948  //
 28949  func (self *Program) SARXL(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 28950      p := self.alloc("SARXL", 3, Operands { v0, v1, v2 })
 28951      // SARXL r32, r32, r32
 28952      if isReg32(v0) && isReg32(v1) && isReg32(v2) {
 28953          self.require(ISA_BMI2)
 28954          p.domain = DomainGeneric
 28955          p.add(0, func(m *_Encoding, v []interface{}) {
 28956              m.emit(0xc4)
 28957              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 28958              m.emit(0x7a ^ (hlcode(v[0]) << 3))
 28959              m.emit(0xf7)
 28960              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 28961          })
 28962      }
 28963      // SARXL r32, m32, r32
 28964      if isReg32(v0) && isM32(v1) && isReg32(v2) {
 28965          self.require(ISA_BMI2)
 28966          p.domain = DomainGeneric
 28967          p.add(0, func(m *_Encoding, v []interface{}) {
 28968              m.vex3(0xc4, 0b10, 0x02, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 28969              m.emit(0xf7)
 28970              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 28971          })
 28972      }
 28973      if p.len == 0 {
 28974          panic("invalid operands for SARXL")
 28975      }
 28976      return p
 28977  }
 28978  
 28979  // SARXQ performs "Arithmetic Shift Right Without Affecting Flags".
 28980  //
 28981  // Mnemonic        : SARX
 28982  // Supported forms : (2 forms)
 28983  //
 28984  //    * SARXQ r64, r64, r64    [BMI2]
 28985  //    * SARXQ r64, m64, r64    [BMI2]
 28986  //
 28987  func (self *Program) SARXQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 28988      p := self.alloc("SARXQ", 3, Operands { v0, v1, v2 })
 28989      // SARXQ r64, r64, r64
 28990      if isReg64(v0) && isReg64(v1) && isReg64(v2) {
 28991          self.require(ISA_BMI2)
 28992          p.domain = DomainGeneric
 28993          p.add(0, func(m *_Encoding, v []interface{}) {
 28994              m.emit(0xc4)
 28995              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 28996              m.emit(0xfa ^ (hlcode(v[0]) << 3))
 28997              m.emit(0xf7)
 28998              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 28999          })
 29000      }
 29001      // SARXQ r64, m64, r64
 29002      if isReg64(v0) && isM64(v1) && isReg64(v2) {
 29003          self.require(ISA_BMI2)
 29004          p.domain = DomainGeneric
 29005          p.add(0, func(m *_Encoding, v []interface{}) {
 29006              m.vex3(0xc4, 0b10, 0x82, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 29007              m.emit(0xf7)
 29008              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 29009          })
 29010      }
 29011      if p.len == 0 {
 29012          panic("invalid operands for SARXQ")
 29013      }
 29014      return p
 29015  }
 29016  
 29017  // SBBB performs "Subtract with Borrow".
 29018  //
 29019  // Mnemonic        : SBB
 29020  // Supported forms : (6 forms)
 29021  //
 29022  //    * SBBB imm8, al
 29023  //    * SBBB imm8, r8
 29024  //    * SBBB r8, r8
 29025  //    * SBBB m8, r8
 29026  //    * SBBB imm8, m8
 29027  //    * SBBB r8, m8
 29028  //
 29029  func (self *Program) SBBB(v0 interface{}, v1 interface{}) *Instruction {
 29030      p := self.alloc("SBBB", 2, Operands { v0, v1 })
 29031      // SBBB imm8, al
 29032      if isImm8(v0) && v1 == AL {
 29033          p.domain = DomainGeneric
 29034          p.add(0, func(m *_Encoding, v []interface{}) {
 29035              m.emit(0x1c)
 29036              m.imm1(toImmAny(v[0]))
 29037          })
 29038      }
 29039      // SBBB imm8, r8
 29040      if isImm8(v0) && isReg8(v1) {
 29041          p.domain = DomainGeneric
 29042          p.add(0, func(m *_Encoding, v []interface{}) {
 29043              m.rexo(0, v[1], isReg8REX(v[1]))
 29044              m.emit(0x80)
 29045              m.emit(0xd8 | lcode(v[1]))
 29046              m.imm1(toImmAny(v[0]))
 29047          })
 29048      }
 29049      // SBBB r8, r8
 29050      if isReg8(v0) && isReg8(v1) {
 29051          p.domain = DomainGeneric
 29052          p.add(0, func(m *_Encoding, v []interface{}) {
 29053              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
 29054              m.emit(0x18)
 29055              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 29056          })
 29057          p.add(0, func(m *_Encoding, v []interface{}) {
 29058              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]) || isReg8REX(v[1]))
 29059              m.emit(0x1a)
 29060              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 29061          })
 29062      }
 29063      // SBBB m8, r8
 29064      if isM8(v0) && isReg8(v1) {
 29065          p.domain = DomainGeneric
 29066          p.add(0, func(m *_Encoding, v []interface{}) {
 29067              m.rexo(hcode(v[1]), addr(v[0]), isReg8REX(v[1]))
 29068              m.emit(0x1a)
 29069              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 29070          })
 29071      }
 29072      // SBBB imm8, m8
 29073      if isImm8(v0) && isM8(v1) {
 29074          p.domain = DomainGeneric
 29075          p.add(0, func(m *_Encoding, v []interface{}) {
 29076              m.rexo(0, addr(v[1]), false)
 29077              m.emit(0x80)
 29078              m.mrsd(3, addr(v[1]), 1)
 29079              m.imm1(toImmAny(v[0]))
 29080          })
 29081      }
 29082      // SBBB r8, m8
 29083      if isReg8(v0) && isM8(v1) {
 29084          p.domain = DomainGeneric
 29085          p.add(0, func(m *_Encoding, v []interface{}) {
 29086              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
 29087              m.emit(0x18)
 29088              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 29089          })
 29090      }
 29091      if p.len == 0 {
 29092          panic("invalid operands for SBBB")
 29093      }
 29094      return p
 29095  }
 29096  
 29097  // SBBL performs "Subtract with Borrow".
 29098  //
 29099  // Mnemonic        : SBB
 29100  // Supported forms : (8 forms)
 29101  //
 29102  //    * SBBL imm32, eax
 29103  //    * SBBL imm8, r32
 29104  //    * SBBL imm32, r32
 29105  //    * SBBL r32, r32
 29106  //    * SBBL m32, r32
 29107  //    * SBBL imm8, m32
 29108  //    * SBBL imm32, m32
 29109  //    * SBBL r32, m32
 29110  //
 29111  func (self *Program) SBBL(v0 interface{}, v1 interface{}) *Instruction {
 29112      p := self.alloc("SBBL", 2, Operands { v0, v1 })
 29113      // SBBL imm32, eax
 29114      if isImm32(v0) && v1 == EAX {
 29115          p.domain = DomainGeneric
 29116          p.add(0, func(m *_Encoding, v []interface{}) {
 29117              m.emit(0x1d)
 29118              m.imm4(toImmAny(v[0]))
 29119          })
 29120      }
 29121      // SBBL imm8, r32
 29122      if isImm8Ext(v0, 4) && isReg32(v1) {
 29123          p.domain = DomainGeneric
 29124          p.add(0, func(m *_Encoding, v []interface{}) {
 29125              m.rexo(0, v[1], false)
 29126              m.emit(0x83)
 29127              m.emit(0xd8 | lcode(v[1]))
 29128              m.imm1(toImmAny(v[0]))
 29129          })
 29130      }
 29131      // SBBL imm32, r32
 29132      if isImm32(v0) && isReg32(v1) {
 29133          p.domain = DomainGeneric
 29134          p.add(0, func(m *_Encoding, v []interface{}) {
 29135              m.rexo(0, v[1], false)
 29136              m.emit(0x81)
 29137              m.emit(0xd8 | lcode(v[1]))
 29138              m.imm4(toImmAny(v[0]))
 29139          })
 29140      }
 29141      // SBBL r32, r32
 29142      if isReg32(v0) && isReg32(v1) {
 29143          p.domain = DomainGeneric
 29144          p.add(0, func(m *_Encoding, v []interface{}) {
 29145              m.rexo(hcode(v[0]), v[1], false)
 29146              m.emit(0x19)
 29147              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 29148          })
 29149          p.add(0, func(m *_Encoding, v []interface{}) {
 29150              m.rexo(hcode(v[1]), v[0], false)
 29151              m.emit(0x1b)
 29152              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 29153          })
 29154      }
 29155      // SBBL m32, r32
 29156      if isM32(v0) && isReg32(v1) {
 29157          p.domain = DomainGeneric
 29158          p.add(0, func(m *_Encoding, v []interface{}) {
 29159              m.rexo(hcode(v[1]), addr(v[0]), false)
 29160              m.emit(0x1b)
 29161              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 29162          })
 29163      }
 29164      // SBBL imm8, m32
 29165      if isImm8Ext(v0, 4) && isM32(v1) {
 29166          p.domain = DomainGeneric
 29167          p.add(0, func(m *_Encoding, v []interface{}) {
 29168              m.rexo(0, addr(v[1]), false)
 29169              m.emit(0x83)
 29170              m.mrsd(3, addr(v[1]), 1)
 29171              m.imm1(toImmAny(v[0]))
 29172          })
 29173      }
 29174      // SBBL imm32, m32
 29175      if isImm32(v0) && isM32(v1) {
 29176          p.domain = DomainGeneric
 29177          p.add(0, func(m *_Encoding, v []interface{}) {
 29178              m.rexo(0, addr(v[1]), false)
 29179              m.emit(0x81)
 29180              m.mrsd(3, addr(v[1]), 1)
 29181              m.imm4(toImmAny(v[0]))
 29182          })
 29183      }
 29184      // SBBL r32, m32
 29185      if isReg32(v0) && isM32(v1) {
 29186          p.domain = DomainGeneric
 29187          p.add(0, func(m *_Encoding, v []interface{}) {
 29188              m.rexo(hcode(v[0]), addr(v[1]), false)
 29189              m.emit(0x19)
 29190              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 29191          })
 29192      }
 29193      if p.len == 0 {
 29194          panic("invalid operands for SBBL")
 29195      }
 29196      return p
 29197  }
 29198  
 29199  // SBBQ performs "Subtract with Borrow".
 29200  //
 29201  // Mnemonic        : SBB
 29202  // Supported forms : (8 forms)
 29203  //
 29204  //    * SBBQ imm32, rax
 29205  //    * SBBQ imm8, r64
 29206  //    * SBBQ imm32, r64
 29207  //    * SBBQ r64, r64
 29208  //    * SBBQ m64, r64
 29209  //    * SBBQ imm8, m64
 29210  //    * SBBQ imm32, m64
 29211  //    * SBBQ r64, m64
 29212  //
 29213  func (self *Program) SBBQ(v0 interface{}, v1 interface{}) *Instruction {
 29214      p := self.alloc("SBBQ", 2, Operands { v0, v1 })
 29215      // SBBQ imm32, rax
 29216      if isImm32(v0) && v1 == RAX {
 29217          p.domain = DomainGeneric
 29218          p.add(0, func(m *_Encoding, v []interface{}) {
 29219              m.emit(0x48)
 29220              m.emit(0x1d)
 29221              m.imm4(toImmAny(v[0]))
 29222          })
 29223      }
 29224      // SBBQ imm8, r64
 29225      if isImm8Ext(v0, 8) && isReg64(v1) {
 29226          p.domain = DomainGeneric
 29227          p.add(0, func(m *_Encoding, v []interface{}) {
 29228              m.emit(0x48 | hcode(v[1]))
 29229              m.emit(0x83)
 29230              m.emit(0xd8 | lcode(v[1]))
 29231              m.imm1(toImmAny(v[0]))
 29232          })
 29233      }
 29234      // SBBQ imm32, r64
 29235      if isImm32Ext(v0, 8) && isReg64(v1) {
 29236          p.domain = DomainGeneric
 29237          p.add(0, func(m *_Encoding, v []interface{}) {
 29238              m.emit(0x48 | hcode(v[1]))
 29239              m.emit(0x81)
 29240              m.emit(0xd8 | lcode(v[1]))
 29241              m.imm4(toImmAny(v[0]))
 29242          })
 29243      }
 29244      // SBBQ r64, r64
 29245      if isReg64(v0) && isReg64(v1) {
 29246          p.domain = DomainGeneric
 29247          p.add(0, func(m *_Encoding, v []interface{}) {
 29248              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
 29249              m.emit(0x19)
 29250              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 29251          })
 29252          p.add(0, func(m *_Encoding, v []interface{}) {
 29253              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 29254              m.emit(0x1b)
 29255              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 29256          })
 29257      }
 29258      // SBBQ m64, r64
 29259      if isM64(v0) && isReg64(v1) {
 29260          p.domain = DomainGeneric
 29261          p.add(0, func(m *_Encoding, v []interface{}) {
 29262              m.rexm(1, hcode(v[1]), addr(v[0]))
 29263              m.emit(0x1b)
 29264              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 29265          })
 29266      }
 29267      // SBBQ imm8, m64
 29268      if isImm8Ext(v0, 8) && isM64(v1) {
 29269          p.domain = DomainGeneric
 29270          p.add(0, func(m *_Encoding, v []interface{}) {
 29271              m.rexm(1, 0, addr(v[1]))
 29272              m.emit(0x83)
 29273              m.mrsd(3, addr(v[1]), 1)
 29274              m.imm1(toImmAny(v[0]))
 29275          })
 29276      }
 29277      // SBBQ imm32, m64
 29278      if isImm32Ext(v0, 8) && isM64(v1) {
 29279          p.domain = DomainGeneric
 29280          p.add(0, func(m *_Encoding, v []interface{}) {
 29281              m.rexm(1, 0, addr(v[1]))
 29282              m.emit(0x81)
 29283              m.mrsd(3, addr(v[1]), 1)
 29284              m.imm4(toImmAny(v[0]))
 29285          })
 29286      }
 29287      // SBBQ r64, m64
 29288      if isReg64(v0) && isM64(v1) {
 29289          p.domain = DomainGeneric
 29290          p.add(0, func(m *_Encoding, v []interface{}) {
 29291              m.rexm(1, hcode(v[0]), addr(v[1]))
 29292              m.emit(0x19)
 29293              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 29294          })
 29295      }
 29296      if p.len == 0 {
 29297          panic("invalid operands for SBBQ")
 29298      }
 29299      return p
 29300  }
 29301  
 29302  // SBBW performs "Subtract with Borrow".
 29303  //
 29304  // Mnemonic        : SBB
 29305  // Supported forms : (8 forms)
 29306  //
 29307  //    * SBBW imm16, ax
 29308  //    * SBBW imm8, r16
 29309  //    * SBBW imm16, r16
 29310  //    * SBBW r16, r16
 29311  //    * SBBW m16, r16
 29312  //    * SBBW imm8, m16
 29313  //    * SBBW imm16, m16
 29314  //    * SBBW r16, m16
 29315  //
 29316  func (self *Program) SBBW(v0 interface{}, v1 interface{}) *Instruction {
 29317      p := self.alloc("SBBW", 2, Operands { v0, v1 })
 29318      // SBBW imm16, ax
 29319      if isImm16(v0) && v1 == AX {
 29320          p.domain = DomainGeneric
 29321          p.add(0, func(m *_Encoding, v []interface{}) {
 29322              m.emit(0x66)
 29323              m.emit(0x1d)
 29324              m.imm2(toImmAny(v[0]))
 29325          })
 29326      }
 29327      // SBBW imm8, r16
 29328      if isImm8Ext(v0, 2) && isReg16(v1) {
 29329          p.domain = DomainGeneric
 29330          p.add(0, func(m *_Encoding, v []interface{}) {
 29331              m.emit(0x66)
 29332              m.rexo(0, v[1], false)
 29333              m.emit(0x83)
 29334              m.emit(0xd8 | lcode(v[1]))
 29335              m.imm1(toImmAny(v[0]))
 29336          })
 29337      }
 29338      // SBBW imm16, r16
 29339      if isImm16(v0) && isReg16(v1) {
 29340          p.domain = DomainGeneric
 29341          p.add(0, func(m *_Encoding, v []interface{}) {
 29342              m.emit(0x66)
 29343              m.rexo(0, v[1], false)
 29344              m.emit(0x81)
 29345              m.emit(0xd8 | lcode(v[1]))
 29346              m.imm2(toImmAny(v[0]))
 29347          })
 29348      }
 29349      // SBBW r16, r16
 29350      if isReg16(v0) && isReg16(v1) {
 29351          p.domain = DomainGeneric
 29352          p.add(0, func(m *_Encoding, v []interface{}) {
 29353              m.emit(0x66)
 29354              m.rexo(hcode(v[0]), v[1], false)
 29355              m.emit(0x19)
 29356              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 29357          })
 29358          p.add(0, func(m *_Encoding, v []interface{}) {
 29359              m.emit(0x66)
 29360              m.rexo(hcode(v[1]), v[0], false)
 29361              m.emit(0x1b)
 29362              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 29363          })
 29364      }
 29365      // SBBW m16, r16
 29366      if isM16(v0) && isReg16(v1) {
 29367          p.domain = DomainGeneric
 29368          p.add(0, func(m *_Encoding, v []interface{}) {
 29369              m.emit(0x66)
 29370              m.rexo(hcode(v[1]), addr(v[0]), false)
 29371              m.emit(0x1b)
 29372              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 29373          })
 29374      }
 29375      // SBBW imm8, m16
 29376      if isImm8Ext(v0, 2) && isM16(v1) {
 29377          p.domain = DomainGeneric
 29378          p.add(0, func(m *_Encoding, v []interface{}) {
 29379              m.emit(0x66)
 29380              m.rexo(0, addr(v[1]), false)
 29381              m.emit(0x83)
 29382              m.mrsd(3, addr(v[1]), 1)
 29383              m.imm1(toImmAny(v[0]))
 29384          })
 29385      }
 29386      // SBBW imm16, m16
 29387      if isImm16(v0) && isM16(v1) {
 29388          p.domain = DomainGeneric
 29389          p.add(0, func(m *_Encoding, v []interface{}) {
 29390              m.emit(0x66)
 29391              m.rexo(0, addr(v[1]), false)
 29392              m.emit(0x81)
 29393              m.mrsd(3, addr(v[1]), 1)
 29394              m.imm2(toImmAny(v[0]))
 29395          })
 29396      }
 29397      // SBBW r16, m16
 29398      if isReg16(v0) && isM16(v1) {
 29399          p.domain = DomainGeneric
 29400          p.add(0, func(m *_Encoding, v []interface{}) {
 29401              m.emit(0x66)
 29402              m.rexo(hcode(v[0]), addr(v[1]), false)
 29403              m.emit(0x19)
 29404              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 29405          })
 29406      }
 29407      if p.len == 0 {
 29408          panic("invalid operands for SBBW")
 29409      }
 29410      return p
 29411  }
 29412  
 29413  // SETA performs "Set byte if above (CF == 0 and ZF == 0)".
 29414  //
 29415  // Mnemonic        : SETA
 29416  // Supported forms : (2 forms)
 29417  //
 29418  //    * SETA r8
 29419  //    * SETA m8
 29420  //
 29421  func (self *Program) SETA(v0 interface{}) *Instruction {
 29422      p := self.alloc("SETA", 1, Operands { v0 })
 29423      // SETA r8
 29424      if isReg8(v0) {
 29425          p.domain = DomainGeneric
 29426          p.add(0, func(m *_Encoding, v []interface{}) {
 29427              m.rexo(0, v[0], isReg8REX(v[0]))
 29428              m.emit(0x0f)
 29429              m.emit(0x97)
 29430              m.emit(0xc0 | lcode(v[0]))
 29431          })
 29432      }
 29433      // SETA m8
 29434      if isM8(v0) {
 29435          p.domain = DomainGeneric
 29436          p.add(0, func(m *_Encoding, v []interface{}) {
 29437              m.rexo(0, addr(v[0]), false)
 29438              m.emit(0x0f)
 29439              m.emit(0x97)
 29440              m.mrsd(0, addr(v[0]), 1)
 29441          })
 29442      }
 29443      if p.len == 0 {
 29444          panic("invalid operands for SETA")
 29445      }
 29446      return p
 29447  }
 29448  
 29449  // SETAE performs "Set byte if above or equal (CF == 0)".
 29450  //
 29451  // Mnemonic        : SETAE
 29452  // Supported forms : (2 forms)
 29453  //
 29454  //    * SETAE r8
 29455  //    * SETAE m8
 29456  //
 29457  func (self *Program) SETAE(v0 interface{}) *Instruction {
 29458      p := self.alloc("SETAE", 1, Operands { v0 })
 29459      // SETAE r8
 29460      if isReg8(v0) {
 29461          p.domain = DomainGeneric
 29462          p.add(0, func(m *_Encoding, v []interface{}) {
 29463              m.rexo(0, v[0], isReg8REX(v[0]))
 29464              m.emit(0x0f)
 29465              m.emit(0x93)
 29466              m.emit(0xc0 | lcode(v[0]))
 29467          })
 29468      }
 29469      // SETAE m8
 29470      if isM8(v0) {
 29471          p.domain = DomainGeneric
 29472          p.add(0, func(m *_Encoding, v []interface{}) {
 29473              m.rexo(0, addr(v[0]), false)
 29474              m.emit(0x0f)
 29475              m.emit(0x93)
 29476              m.mrsd(0, addr(v[0]), 1)
 29477          })
 29478      }
 29479      if p.len == 0 {
 29480          panic("invalid operands for SETAE")
 29481      }
 29482      return p
 29483  }
 29484  
 29485  // SETB performs "Set byte if below (CF == 1)".
 29486  //
 29487  // Mnemonic        : SETB
 29488  // Supported forms : (2 forms)
 29489  //
 29490  //    * SETB r8
 29491  //    * SETB m8
 29492  //
 29493  func (self *Program) SETB(v0 interface{}) *Instruction {
 29494      p := self.alloc("SETB", 1, Operands { v0 })
 29495      // SETB r8
 29496      if isReg8(v0) {
 29497          p.domain = DomainGeneric
 29498          p.add(0, func(m *_Encoding, v []interface{}) {
 29499              m.rexo(0, v[0], isReg8REX(v[0]))
 29500              m.emit(0x0f)
 29501              m.emit(0x92)
 29502              m.emit(0xc0 | lcode(v[0]))
 29503          })
 29504      }
 29505      // SETB m8
 29506      if isM8(v0) {
 29507          p.domain = DomainGeneric
 29508          p.add(0, func(m *_Encoding, v []interface{}) {
 29509              m.rexo(0, addr(v[0]), false)
 29510              m.emit(0x0f)
 29511              m.emit(0x92)
 29512              m.mrsd(0, addr(v[0]), 1)
 29513          })
 29514      }
 29515      if p.len == 0 {
 29516          panic("invalid operands for SETB")
 29517      }
 29518      return p
 29519  }
 29520  
 29521  // SETBE performs "Set byte if below or equal (CF == 1 or ZF == 1)".
 29522  //
 29523  // Mnemonic        : SETBE
 29524  // Supported forms : (2 forms)
 29525  //
 29526  //    * SETBE r8
 29527  //    * SETBE m8
 29528  //
 29529  func (self *Program) SETBE(v0 interface{}) *Instruction {
 29530      p := self.alloc("SETBE", 1, Operands { v0 })
 29531      // SETBE r8
 29532      if isReg8(v0) {
 29533          p.domain = DomainGeneric
 29534          p.add(0, func(m *_Encoding, v []interface{}) {
 29535              m.rexo(0, v[0], isReg8REX(v[0]))
 29536              m.emit(0x0f)
 29537              m.emit(0x96)
 29538              m.emit(0xc0 | lcode(v[0]))
 29539          })
 29540      }
 29541      // SETBE m8
 29542      if isM8(v0) {
 29543          p.domain = DomainGeneric
 29544          p.add(0, func(m *_Encoding, v []interface{}) {
 29545              m.rexo(0, addr(v[0]), false)
 29546              m.emit(0x0f)
 29547              m.emit(0x96)
 29548              m.mrsd(0, addr(v[0]), 1)
 29549          })
 29550      }
 29551      if p.len == 0 {
 29552          panic("invalid operands for SETBE")
 29553      }
 29554      return p
 29555  }
 29556  
 29557  // SETC performs "Set byte if carry (CF == 1)".
 29558  //
 29559  // Mnemonic        : SETC
 29560  // Supported forms : (2 forms)
 29561  //
 29562  //    * SETC r8
 29563  //    * SETC m8
 29564  //
 29565  func (self *Program) SETC(v0 interface{}) *Instruction {
 29566      p := self.alloc("SETC", 1, Operands { v0 })
 29567      // SETC r8
 29568      if isReg8(v0) {
 29569          p.domain = DomainGeneric
 29570          p.add(0, func(m *_Encoding, v []interface{}) {
 29571              m.rexo(0, v[0], isReg8REX(v[0]))
 29572              m.emit(0x0f)
 29573              m.emit(0x92)
 29574              m.emit(0xc0 | lcode(v[0]))
 29575          })
 29576      }
 29577      // SETC m8
 29578      if isM8(v0) {
 29579          p.domain = DomainGeneric
 29580          p.add(0, func(m *_Encoding, v []interface{}) {
 29581              m.rexo(0, addr(v[0]), false)
 29582              m.emit(0x0f)
 29583              m.emit(0x92)
 29584              m.mrsd(0, addr(v[0]), 1)
 29585          })
 29586      }
 29587      if p.len == 0 {
 29588          panic("invalid operands for SETC")
 29589      }
 29590      return p
 29591  }
 29592  
 29593  // SETE performs "Set byte if equal (ZF == 1)".
 29594  //
 29595  // Mnemonic        : SETE
 29596  // Supported forms : (2 forms)
 29597  //
 29598  //    * SETE r8
 29599  //    * SETE m8
 29600  //
 29601  func (self *Program) SETE(v0 interface{}) *Instruction {
 29602      p := self.alloc("SETE", 1, Operands { v0 })
 29603      // SETE r8
 29604      if isReg8(v0) {
 29605          p.domain = DomainGeneric
 29606          p.add(0, func(m *_Encoding, v []interface{}) {
 29607              m.rexo(0, v[0], isReg8REX(v[0]))
 29608              m.emit(0x0f)
 29609              m.emit(0x94)
 29610              m.emit(0xc0 | lcode(v[0]))
 29611          })
 29612      }
 29613      // SETE m8
 29614      if isM8(v0) {
 29615          p.domain = DomainGeneric
 29616          p.add(0, func(m *_Encoding, v []interface{}) {
 29617              m.rexo(0, addr(v[0]), false)
 29618              m.emit(0x0f)
 29619              m.emit(0x94)
 29620              m.mrsd(0, addr(v[0]), 1)
 29621          })
 29622      }
 29623      if p.len == 0 {
 29624          panic("invalid operands for SETE")
 29625      }
 29626      return p
 29627  }
 29628  
 29629  // SETG performs "Set byte if greater (ZF == 0 and SF == OF)".
 29630  //
 29631  // Mnemonic        : SETG
 29632  // Supported forms : (2 forms)
 29633  //
 29634  //    * SETG r8
 29635  //    * SETG m8
 29636  //
 29637  func (self *Program) SETG(v0 interface{}) *Instruction {
 29638      p := self.alloc("SETG", 1, Operands { v0 })
 29639      // SETG r8
 29640      if isReg8(v0) {
 29641          p.domain = DomainGeneric
 29642          p.add(0, func(m *_Encoding, v []interface{}) {
 29643              m.rexo(0, v[0], isReg8REX(v[0]))
 29644              m.emit(0x0f)
 29645              m.emit(0x9f)
 29646              m.emit(0xc0 | lcode(v[0]))
 29647          })
 29648      }
 29649      // SETG m8
 29650      if isM8(v0) {
 29651          p.domain = DomainGeneric
 29652          p.add(0, func(m *_Encoding, v []interface{}) {
 29653              m.rexo(0, addr(v[0]), false)
 29654              m.emit(0x0f)
 29655              m.emit(0x9f)
 29656              m.mrsd(0, addr(v[0]), 1)
 29657          })
 29658      }
 29659      if p.len == 0 {
 29660          panic("invalid operands for SETG")
 29661      }
 29662      return p
 29663  }
 29664  
 29665  // SETGE performs "Set byte if greater or equal (SF == OF)".
 29666  //
 29667  // Mnemonic        : SETGE
 29668  // Supported forms : (2 forms)
 29669  //
 29670  //    * SETGE r8
 29671  //    * SETGE m8
 29672  //
 29673  func (self *Program) SETGE(v0 interface{}) *Instruction {
 29674      p := self.alloc("SETGE", 1, Operands { v0 })
 29675      // SETGE r8
 29676      if isReg8(v0) {
 29677          p.domain = DomainGeneric
 29678          p.add(0, func(m *_Encoding, v []interface{}) {
 29679              m.rexo(0, v[0], isReg8REX(v[0]))
 29680              m.emit(0x0f)
 29681              m.emit(0x9d)
 29682              m.emit(0xc0 | lcode(v[0]))
 29683          })
 29684      }
 29685      // SETGE m8
 29686      if isM8(v0) {
 29687          p.domain = DomainGeneric
 29688          p.add(0, func(m *_Encoding, v []interface{}) {
 29689              m.rexo(0, addr(v[0]), false)
 29690              m.emit(0x0f)
 29691              m.emit(0x9d)
 29692              m.mrsd(0, addr(v[0]), 1)
 29693          })
 29694      }
 29695      if p.len == 0 {
 29696          panic("invalid operands for SETGE")
 29697      }
 29698      return p
 29699  }
 29700  
 29701  // SETL performs "Set byte if less (SF != OF)".
 29702  //
 29703  // Mnemonic        : SETL
 29704  // Supported forms : (2 forms)
 29705  //
 29706  //    * SETL r8
 29707  //    * SETL m8
 29708  //
 29709  func (self *Program) SETL(v0 interface{}) *Instruction {
 29710      p := self.alloc("SETL", 1, Operands { v0 })
 29711      // SETL r8
 29712      if isReg8(v0) {
 29713          p.domain = DomainGeneric
 29714          p.add(0, func(m *_Encoding, v []interface{}) {
 29715              m.rexo(0, v[0], isReg8REX(v[0]))
 29716              m.emit(0x0f)
 29717              m.emit(0x9c)
 29718              m.emit(0xc0 | lcode(v[0]))
 29719          })
 29720      }
 29721      // SETL m8
 29722      if isM8(v0) {
 29723          p.domain = DomainGeneric
 29724          p.add(0, func(m *_Encoding, v []interface{}) {
 29725              m.rexo(0, addr(v[0]), false)
 29726              m.emit(0x0f)
 29727              m.emit(0x9c)
 29728              m.mrsd(0, addr(v[0]), 1)
 29729          })
 29730      }
 29731      if p.len == 0 {
 29732          panic("invalid operands for SETL")
 29733      }
 29734      return p
 29735  }
 29736  
 29737  // SETLE performs "Set byte if less or equal (ZF == 1 or SF != OF)".
 29738  //
 29739  // Mnemonic        : SETLE
 29740  // Supported forms : (2 forms)
 29741  //
 29742  //    * SETLE r8
 29743  //    * SETLE m8
 29744  //
 29745  func (self *Program) SETLE(v0 interface{}) *Instruction {
 29746      p := self.alloc("SETLE", 1, Operands { v0 })
 29747      // SETLE r8
 29748      if isReg8(v0) {
 29749          p.domain = DomainGeneric
 29750          p.add(0, func(m *_Encoding, v []interface{}) {
 29751              m.rexo(0, v[0], isReg8REX(v[0]))
 29752              m.emit(0x0f)
 29753              m.emit(0x9e)
 29754              m.emit(0xc0 | lcode(v[0]))
 29755          })
 29756      }
 29757      // SETLE m8
 29758      if isM8(v0) {
 29759          p.domain = DomainGeneric
 29760          p.add(0, func(m *_Encoding, v []interface{}) {
 29761              m.rexo(0, addr(v[0]), false)
 29762              m.emit(0x0f)
 29763              m.emit(0x9e)
 29764              m.mrsd(0, addr(v[0]), 1)
 29765          })
 29766      }
 29767      if p.len == 0 {
 29768          panic("invalid operands for SETLE")
 29769      }
 29770      return p
 29771  }
 29772  
 29773  // SETNA performs "Set byte if not above (CF == 1 or ZF == 1)".
 29774  //
 29775  // Mnemonic        : SETNA
 29776  // Supported forms : (2 forms)
 29777  //
 29778  //    * SETNA r8
 29779  //    * SETNA m8
 29780  //
 29781  func (self *Program) SETNA(v0 interface{}) *Instruction {
 29782      p := self.alloc("SETNA", 1, Operands { v0 })
 29783      // SETNA r8
 29784      if isReg8(v0) {
 29785          p.domain = DomainGeneric
 29786          p.add(0, func(m *_Encoding, v []interface{}) {
 29787              m.rexo(0, v[0], isReg8REX(v[0]))
 29788              m.emit(0x0f)
 29789              m.emit(0x96)
 29790              m.emit(0xc0 | lcode(v[0]))
 29791          })
 29792      }
 29793      // SETNA m8
 29794      if isM8(v0) {
 29795          p.domain = DomainGeneric
 29796          p.add(0, func(m *_Encoding, v []interface{}) {
 29797              m.rexo(0, addr(v[0]), false)
 29798              m.emit(0x0f)
 29799              m.emit(0x96)
 29800              m.mrsd(0, addr(v[0]), 1)
 29801          })
 29802      }
 29803      if p.len == 0 {
 29804          panic("invalid operands for SETNA")
 29805      }
 29806      return p
 29807  }
 29808  
 29809  // SETNAE performs "Set byte if not above or equal (CF == 1)".
 29810  //
 29811  // Mnemonic        : SETNAE
 29812  // Supported forms : (2 forms)
 29813  //
 29814  //    * SETNAE r8
 29815  //    * SETNAE m8
 29816  //
 29817  func (self *Program) SETNAE(v0 interface{}) *Instruction {
 29818      p := self.alloc("SETNAE", 1, Operands { v0 })
 29819      // SETNAE r8
 29820      if isReg8(v0) {
 29821          p.domain = DomainGeneric
 29822          p.add(0, func(m *_Encoding, v []interface{}) {
 29823              m.rexo(0, v[0], isReg8REX(v[0]))
 29824              m.emit(0x0f)
 29825              m.emit(0x92)
 29826              m.emit(0xc0 | lcode(v[0]))
 29827          })
 29828      }
 29829      // SETNAE m8
 29830      if isM8(v0) {
 29831          p.domain = DomainGeneric
 29832          p.add(0, func(m *_Encoding, v []interface{}) {
 29833              m.rexo(0, addr(v[0]), false)
 29834              m.emit(0x0f)
 29835              m.emit(0x92)
 29836              m.mrsd(0, addr(v[0]), 1)
 29837          })
 29838      }
 29839      if p.len == 0 {
 29840          panic("invalid operands for SETNAE")
 29841      }
 29842      return p
 29843  }
 29844  
 29845  // SETNB performs "Set byte if not below (CF == 0)".
 29846  //
 29847  // Mnemonic        : SETNB
 29848  // Supported forms : (2 forms)
 29849  //
 29850  //    * SETNB r8
 29851  //    * SETNB m8
 29852  //
 29853  func (self *Program) SETNB(v0 interface{}) *Instruction {
 29854      p := self.alloc("SETNB", 1, Operands { v0 })
 29855      // SETNB r8
 29856      if isReg8(v0) {
 29857          p.domain = DomainGeneric
 29858          p.add(0, func(m *_Encoding, v []interface{}) {
 29859              m.rexo(0, v[0], isReg8REX(v[0]))
 29860              m.emit(0x0f)
 29861              m.emit(0x93)
 29862              m.emit(0xc0 | lcode(v[0]))
 29863          })
 29864      }
 29865      // SETNB m8
 29866      if isM8(v0) {
 29867          p.domain = DomainGeneric
 29868          p.add(0, func(m *_Encoding, v []interface{}) {
 29869              m.rexo(0, addr(v[0]), false)
 29870              m.emit(0x0f)
 29871              m.emit(0x93)
 29872              m.mrsd(0, addr(v[0]), 1)
 29873          })
 29874      }
 29875      if p.len == 0 {
 29876          panic("invalid operands for SETNB")
 29877      }
 29878      return p
 29879  }
 29880  
 29881  // SETNBE performs "Set byte if not below or equal (CF == 0 and ZF == 0)".
 29882  //
 29883  // Mnemonic        : SETNBE
 29884  // Supported forms : (2 forms)
 29885  //
 29886  //    * SETNBE r8
 29887  //    * SETNBE m8
 29888  //
 29889  func (self *Program) SETNBE(v0 interface{}) *Instruction {
 29890      p := self.alloc("SETNBE", 1, Operands { v0 })
 29891      // SETNBE r8
 29892      if isReg8(v0) {
 29893          p.domain = DomainGeneric
 29894          p.add(0, func(m *_Encoding, v []interface{}) {
 29895              m.rexo(0, v[0], isReg8REX(v[0]))
 29896              m.emit(0x0f)
 29897              m.emit(0x97)
 29898              m.emit(0xc0 | lcode(v[0]))
 29899          })
 29900      }
 29901      // SETNBE m8
 29902      if isM8(v0) {
 29903          p.domain = DomainGeneric
 29904          p.add(0, func(m *_Encoding, v []interface{}) {
 29905              m.rexo(0, addr(v[0]), false)
 29906              m.emit(0x0f)
 29907              m.emit(0x97)
 29908              m.mrsd(0, addr(v[0]), 1)
 29909          })
 29910      }
 29911      if p.len == 0 {
 29912          panic("invalid operands for SETNBE")
 29913      }
 29914      return p
 29915  }
 29916  
 29917  // SETNC performs "Set byte if not carry (CF == 0)".
 29918  //
 29919  // Mnemonic        : SETNC
 29920  // Supported forms : (2 forms)
 29921  //
 29922  //    * SETNC r8
 29923  //    * SETNC m8
 29924  //
 29925  func (self *Program) SETNC(v0 interface{}) *Instruction {
 29926      p := self.alloc("SETNC", 1, Operands { v0 })
 29927      // SETNC r8
 29928      if isReg8(v0) {
 29929          p.domain = DomainGeneric
 29930          p.add(0, func(m *_Encoding, v []interface{}) {
 29931              m.rexo(0, v[0], isReg8REX(v[0]))
 29932              m.emit(0x0f)
 29933              m.emit(0x93)
 29934              m.emit(0xc0 | lcode(v[0]))
 29935          })
 29936      }
 29937      // SETNC m8
 29938      if isM8(v0) {
 29939          p.domain = DomainGeneric
 29940          p.add(0, func(m *_Encoding, v []interface{}) {
 29941              m.rexo(0, addr(v[0]), false)
 29942              m.emit(0x0f)
 29943              m.emit(0x93)
 29944              m.mrsd(0, addr(v[0]), 1)
 29945          })
 29946      }
 29947      if p.len == 0 {
 29948          panic("invalid operands for SETNC")
 29949      }
 29950      return p
 29951  }
 29952  
 29953  // SETNE performs "Set byte if not equal (ZF == 0)".
 29954  //
 29955  // Mnemonic        : SETNE
 29956  // Supported forms : (2 forms)
 29957  //
 29958  //    * SETNE r8
 29959  //    * SETNE m8
 29960  //
 29961  func (self *Program) SETNE(v0 interface{}) *Instruction {
 29962      p := self.alloc("SETNE", 1, Operands { v0 })
 29963      // SETNE r8
 29964      if isReg8(v0) {
 29965          p.domain = DomainGeneric
 29966          p.add(0, func(m *_Encoding, v []interface{}) {
 29967              m.rexo(0, v[0], isReg8REX(v[0]))
 29968              m.emit(0x0f)
 29969              m.emit(0x95)
 29970              m.emit(0xc0 | lcode(v[0]))
 29971          })
 29972      }
 29973      // SETNE m8
 29974      if isM8(v0) {
 29975          p.domain = DomainGeneric
 29976          p.add(0, func(m *_Encoding, v []interface{}) {
 29977              m.rexo(0, addr(v[0]), false)
 29978              m.emit(0x0f)
 29979              m.emit(0x95)
 29980              m.mrsd(0, addr(v[0]), 1)
 29981          })
 29982      }
 29983      if p.len == 0 {
 29984          panic("invalid operands for SETNE")
 29985      }
 29986      return p
 29987  }
 29988  
 29989  // SETNG performs "Set byte if not greater (ZF == 1 or SF != OF)".
 29990  //
 29991  // Mnemonic        : SETNG
 29992  // Supported forms : (2 forms)
 29993  //
 29994  //    * SETNG r8
 29995  //    * SETNG m8
 29996  //
 29997  func (self *Program) SETNG(v0 interface{}) *Instruction {
 29998      p := self.alloc("SETNG", 1, Operands { v0 })
 29999      // SETNG r8
 30000      if isReg8(v0) {
 30001          p.domain = DomainGeneric
 30002          p.add(0, func(m *_Encoding, v []interface{}) {
 30003              m.rexo(0, v[0], isReg8REX(v[0]))
 30004              m.emit(0x0f)
 30005              m.emit(0x9e)
 30006              m.emit(0xc0 | lcode(v[0]))
 30007          })
 30008      }
 30009      // SETNG m8
 30010      if isM8(v0) {
 30011          p.domain = DomainGeneric
 30012          p.add(0, func(m *_Encoding, v []interface{}) {
 30013              m.rexo(0, addr(v[0]), false)
 30014              m.emit(0x0f)
 30015              m.emit(0x9e)
 30016              m.mrsd(0, addr(v[0]), 1)
 30017          })
 30018      }
 30019      if p.len == 0 {
 30020          panic("invalid operands for SETNG")
 30021      }
 30022      return p
 30023  }
 30024  
 30025  // SETNGE performs "Set byte if not greater or equal (SF != OF)".
 30026  //
 30027  // Mnemonic        : SETNGE
 30028  // Supported forms : (2 forms)
 30029  //
 30030  //    * SETNGE r8
 30031  //    * SETNGE m8
 30032  //
 30033  func (self *Program) SETNGE(v0 interface{}) *Instruction {
 30034      p := self.alloc("SETNGE", 1, Operands { v0 })
 30035      // SETNGE r8
 30036      if isReg8(v0) {
 30037          p.domain = DomainGeneric
 30038          p.add(0, func(m *_Encoding, v []interface{}) {
 30039              m.rexo(0, v[0], isReg8REX(v[0]))
 30040              m.emit(0x0f)
 30041              m.emit(0x9c)
 30042              m.emit(0xc0 | lcode(v[0]))
 30043          })
 30044      }
 30045      // SETNGE m8
 30046      if isM8(v0) {
 30047          p.domain = DomainGeneric
 30048          p.add(0, func(m *_Encoding, v []interface{}) {
 30049              m.rexo(0, addr(v[0]), false)
 30050              m.emit(0x0f)
 30051              m.emit(0x9c)
 30052              m.mrsd(0, addr(v[0]), 1)
 30053          })
 30054      }
 30055      if p.len == 0 {
 30056          panic("invalid operands for SETNGE")
 30057      }
 30058      return p
 30059  }
 30060  
 30061  // SETNL performs "Set byte if not less (SF == OF)".
 30062  //
 30063  // Mnemonic        : SETNL
 30064  // Supported forms : (2 forms)
 30065  //
 30066  //    * SETNL r8
 30067  //    * SETNL m8
 30068  //
 30069  func (self *Program) SETNL(v0 interface{}) *Instruction {
 30070      p := self.alloc("SETNL", 1, Operands { v0 })
 30071      // SETNL r8
 30072      if isReg8(v0) {
 30073          p.domain = DomainGeneric
 30074          p.add(0, func(m *_Encoding, v []interface{}) {
 30075              m.rexo(0, v[0], isReg8REX(v[0]))
 30076              m.emit(0x0f)
 30077              m.emit(0x9d)
 30078              m.emit(0xc0 | lcode(v[0]))
 30079          })
 30080      }
 30081      // SETNL m8
 30082      if isM8(v0) {
 30083          p.domain = DomainGeneric
 30084          p.add(0, func(m *_Encoding, v []interface{}) {
 30085              m.rexo(0, addr(v[0]), false)
 30086              m.emit(0x0f)
 30087              m.emit(0x9d)
 30088              m.mrsd(0, addr(v[0]), 1)
 30089          })
 30090      }
 30091      if p.len == 0 {
 30092          panic("invalid operands for SETNL")
 30093      }
 30094      return p
 30095  }
 30096  
 30097  // SETNLE performs "Set byte if not less or equal (ZF == 0 and SF == OF)".
 30098  //
 30099  // Mnemonic        : SETNLE
 30100  // Supported forms : (2 forms)
 30101  //
 30102  //    * SETNLE r8
 30103  //    * SETNLE m8
 30104  //
 30105  func (self *Program) SETNLE(v0 interface{}) *Instruction {
 30106      p := self.alloc("SETNLE", 1, Operands { v0 })
 30107      // SETNLE r8
 30108      if isReg8(v0) {
 30109          p.domain = DomainGeneric
 30110          p.add(0, func(m *_Encoding, v []interface{}) {
 30111              m.rexo(0, v[0], isReg8REX(v[0]))
 30112              m.emit(0x0f)
 30113              m.emit(0x9f)
 30114              m.emit(0xc0 | lcode(v[0]))
 30115          })
 30116      }
 30117      // SETNLE m8
 30118      if isM8(v0) {
 30119          p.domain = DomainGeneric
 30120          p.add(0, func(m *_Encoding, v []interface{}) {
 30121              m.rexo(0, addr(v[0]), false)
 30122              m.emit(0x0f)
 30123              m.emit(0x9f)
 30124              m.mrsd(0, addr(v[0]), 1)
 30125          })
 30126      }
 30127      if p.len == 0 {
 30128          panic("invalid operands for SETNLE")
 30129      }
 30130      return p
 30131  }
 30132  
 30133  // SETNO performs "Set byte if not overflow (OF == 0)".
 30134  //
 30135  // Mnemonic        : SETNO
 30136  // Supported forms : (2 forms)
 30137  //
 30138  //    * SETNO r8
 30139  //    * SETNO m8
 30140  //
 30141  func (self *Program) SETNO(v0 interface{}) *Instruction {
 30142      p := self.alloc("SETNO", 1, Operands { v0 })
 30143      // SETNO r8
 30144      if isReg8(v0) {
 30145          p.domain = DomainGeneric
 30146          p.add(0, func(m *_Encoding, v []interface{}) {
 30147              m.rexo(0, v[0], isReg8REX(v[0]))
 30148              m.emit(0x0f)
 30149              m.emit(0x91)
 30150              m.emit(0xc0 | lcode(v[0]))
 30151          })
 30152      }
 30153      // SETNO m8
 30154      if isM8(v0) {
 30155          p.domain = DomainGeneric
 30156          p.add(0, func(m *_Encoding, v []interface{}) {
 30157              m.rexo(0, addr(v[0]), false)
 30158              m.emit(0x0f)
 30159              m.emit(0x91)
 30160              m.mrsd(0, addr(v[0]), 1)
 30161          })
 30162      }
 30163      if p.len == 0 {
 30164          panic("invalid operands for SETNO")
 30165      }
 30166      return p
 30167  }
 30168  
 30169  // SETNP performs "Set byte if not parity (PF == 0)".
 30170  //
 30171  // Mnemonic        : SETNP
 30172  // Supported forms : (2 forms)
 30173  //
 30174  //    * SETNP r8
 30175  //    * SETNP m8
 30176  //
 30177  func (self *Program) SETNP(v0 interface{}) *Instruction {
 30178      p := self.alloc("SETNP", 1, Operands { v0 })
 30179      // SETNP r8
 30180      if isReg8(v0) {
 30181          p.domain = DomainGeneric
 30182          p.add(0, func(m *_Encoding, v []interface{}) {
 30183              m.rexo(0, v[0], isReg8REX(v[0]))
 30184              m.emit(0x0f)
 30185              m.emit(0x9b)
 30186              m.emit(0xc0 | lcode(v[0]))
 30187          })
 30188      }
 30189      // SETNP m8
 30190      if isM8(v0) {
 30191          p.domain = DomainGeneric
 30192          p.add(0, func(m *_Encoding, v []interface{}) {
 30193              m.rexo(0, addr(v[0]), false)
 30194              m.emit(0x0f)
 30195              m.emit(0x9b)
 30196              m.mrsd(0, addr(v[0]), 1)
 30197          })
 30198      }
 30199      if p.len == 0 {
 30200          panic("invalid operands for SETNP")
 30201      }
 30202      return p
 30203  }
 30204  
 30205  // SETNS performs "Set byte if not sign (SF == 0)".
 30206  //
 30207  // Mnemonic        : SETNS
 30208  // Supported forms : (2 forms)
 30209  //
 30210  //    * SETNS r8
 30211  //    * SETNS m8
 30212  //
 30213  func (self *Program) SETNS(v0 interface{}) *Instruction {
 30214      p := self.alloc("SETNS", 1, Operands { v0 })
 30215      // SETNS r8
 30216      if isReg8(v0) {
 30217          p.domain = DomainGeneric
 30218          p.add(0, func(m *_Encoding, v []interface{}) {
 30219              m.rexo(0, v[0], isReg8REX(v[0]))
 30220              m.emit(0x0f)
 30221              m.emit(0x99)
 30222              m.emit(0xc0 | lcode(v[0]))
 30223          })
 30224      }
 30225      // SETNS m8
 30226      if isM8(v0) {
 30227          p.domain = DomainGeneric
 30228          p.add(0, func(m *_Encoding, v []interface{}) {
 30229              m.rexo(0, addr(v[0]), false)
 30230              m.emit(0x0f)
 30231              m.emit(0x99)
 30232              m.mrsd(0, addr(v[0]), 1)
 30233          })
 30234      }
 30235      if p.len == 0 {
 30236          panic("invalid operands for SETNS")
 30237      }
 30238      return p
 30239  }
 30240  
 30241  // SETNZ performs "Set byte if not zero (ZF == 0)".
 30242  //
 30243  // Mnemonic        : SETNZ
 30244  // Supported forms : (2 forms)
 30245  //
 30246  //    * SETNZ r8
 30247  //    * SETNZ m8
 30248  //
 30249  func (self *Program) SETNZ(v0 interface{}) *Instruction {
 30250      p := self.alloc("SETNZ", 1, Operands { v0 })
 30251      // SETNZ r8
 30252      if isReg8(v0) {
 30253          p.domain = DomainGeneric
 30254          p.add(0, func(m *_Encoding, v []interface{}) {
 30255              m.rexo(0, v[0], isReg8REX(v[0]))
 30256              m.emit(0x0f)
 30257              m.emit(0x95)
 30258              m.emit(0xc0 | lcode(v[0]))
 30259          })
 30260      }
 30261      // SETNZ m8
 30262      if isM8(v0) {
 30263          p.domain = DomainGeneric
 30264          p.add(0, func(m *_Encoding, v []interface{}) {
 30265              m.rexo(0, addr(v[0]), false)
 30266              m.emit(0x0f)
 30267              m.emit(0x95)
 30268              m.mrsd(0, addr(v[0]), 1)
 30269          })
 30270      }
 30271      if p.len == 0 {
 30272          panic("invalid operands for SETNZ")
 30273      }
 30274      return p
 30275  }
 30276  
 30277  // SETO performs "Set byte if overflow (OF == 1)".
 30278  //
 30279  // Mnemonic        : SETO
 30280  // Supported forms : (2 forms)
 30281  //
 30282  //    * SETO r8
 30283  //    * SETO m8
 30284  //
 30285  func (self *Program) SETO(v0 interface{}) *Instruction {
 30286      p := self.alloc("SETO", 1, Operands { v0 })
 30287      // SETO r8
 30288      if isReg8(v0) {
 30289          p.domain = DomainGeneric
 30290          p.add(0, func(m *_Encoding, v []interface{}) {
 30291              m.rexo(0, v[0], isReg8REX(v[0]))
 30292              m.emit(0x0f)
 30293              m.emit(0x90)
 30294              m.emit(0xc0 | lcode(v[0]))
 30295          })
 30296      }
 30297      // SETO m8
 30298      if isM8(v0) {
 30299          p.domain = DomainGeneric
 30300          p.add(0, func(m *_Encoding, v []interface{}) {
 30301              m.rexo(0, addr(v[0]), false)
 30302              m.emit(0x0f)
 30303              m.emit(0x90)
 30304              m.mrsd(0, addr(v[0]), 1)
 30305          })
 30306      }
 30307      if p.len == 0 {
 30308          panic("invalid operands for SETO")
 30309      }
 30310      return p
 30311  }
 30312  
 30313  // SETP performs "Set byte if parity (PF == 1)".
 30314  //
 30315  // Mnemonic        : SETP
 30316  // Supported forms : (2 forms)
 30317  //
 30318  //    * SETP r8
 30319  //    * SETP m8
 30320  //
 30321  func (self *Program) SETP(v0 interface{}) *Instruction {
 30322      p := self.alloc("SETP", 1, Operands { v0 })
 30323      // SETP r8
 30324      if isReg8(v0) {
 30325          p.domain = DomainGeneric
 30326          p.add(0, func(m *_Encoding, v []interface{}) {
 30327              m.rexo(0, v[0], isReg8REX(v[0]))
 30328              m.emit(0x0f)
 30329              m.emit(0x9a)
 30330              m.emit(0xc0 | lcode(v[0]))
 30331          })
 30332      }
 30333      // SETP m8
 30334      if isM8(v0) {
 30335          p.domain = DomainGeneric
 30336          p.add(0, func(m *_Encoding, v []interface{}) {
 30337              m.rexo(0, addr(v[0]), false)
 30338              m.emit(0x0f)
 30339              m.emit(0x9a)
 30340              m.mrsd(0, addr(v[0]), 1)
 30341          })
 30342      }
 30343      if p.len == 0 {
 30344          panic("invalid operands for SETP")
 30345      }
 30346      return p
 30347  }
 30348  
 30349  // SETPE performs "Set byte if parity even (PF == 1)".
 30350  //
 30351  // Mnemonic        : SETPE
 30352  // Supported forms : (2 forms)
 30353  //
 30354  //    * SETPE r8
 30355  //    * SETPE m8
 30356  //
 30357  func (self *Program) SETPE(v0 interface{}) *Instruction {
 30358      p := self.alloc("SETPE", 1, Operands { v0 })
 30359      // SETPE r8
 30360      if isReg8(v0) {
 30361          p.domain = DomainGeneric
 30362          p.add(0, func(m *_Encoding, v []interface{}) {
 30363              m.rexo(0, v[0], isReg8REX(v[0]))
 30364              m.emit(0x0f)
 30365              m.emit(0x9a)
 30366              m.emit(0xc0 | lcode(v[0]))
 30367          })
 30368      }
 30369      // SETPE m8
 30370      if isM8(v0) {
 30371          p.domain = DomainGeneric
 30372          p.add(0, func(m *_Encoding, v []interface{}) {
 30373              m.rexo(0, addr(v[0]), false)
 30374              m.emit(0x0f)
 30375              m.emit(0x9a)
 30376              m.mrsd(0, addr(v[0]), 1)
 30377          })
 30378      }
 30379      if p.len == 0 {
 30380          panic("invalid operands for SETPE")
 30381      }
 30382      return p
 30383  }
 30384  
 30385  // SETPO performs "Set byte if parity odd (PF == 0)".
 30386  //
 30387  // Mnemonic        : SETPO
 30388  // Supported forms : (2 forms)
 30389  //
 30390  //    * SETPO r8
 30391  //    * SETPO m8
 30392  //
 30393  func (self *Program) SETPO(v0 interface{}) *Instruction {
 30394      p := self.alloc("SETPO", 1, Operands { v0 })
 30395      // SETPO r8
 30396      if isReg8(v0) {
 30397          p.domain = DomainGeneric
 30398          p.add(0, func(m *_Encoding, v []interface{}) {
 30399              m.rexo(0, v[0], isReg8REX(v[0]))
 30400              m.emit(0x0f)
 30401              m.emit(0x9b)
 30402              m.emit(0xc0 | lcode(v[0]))
 30403          })
 30404      }
 30405      // SETPO m8
 30406      if isM8(v0) {
 30407          p.domain = DomainGeneric
 30408          p.add(0, func(m *_Encoding, v []interface{}) {
 30409              m.rexo(0, addr(v[0]), false)
 30410              m.emit(0x0f)
 30411              m.emit(0x9b)
 30412              m.mrsd(0, addr(v[0]), 1)
 30413          })
 30414      }
 30415      if p.len == 0 {
 30416          panic("invalid operands for SETPO")
 30417      }
 30418      return p
 30419  }
 30420  
 30421  // SETS performs "Set byte if sign (SF == 1)".
 30422  //
 30423  // Mnemonic        : SETS
 30424  // Supported forms : (2 forms)
 30425  //
 30426  //    * SETS r8
 30427  //    * SETS m8
 30428  //
 30429  func (self *Program) SETS(v0 interface{}) *Instruction {
 30430      p := self.alloc("SETS", 1, Operands { v0 })
 30431      // SETS r8
 30432      if isReg8(v0) {
 30433          p.domain = DomainGeneric
 30434          p.add(0, func(m *_Encoding, v []interface{}) {
 30435              m.rexo(0, v[0], isReg8REX(v[0]))
 30436              m.emit(0x0f)
 30437              m.emit(0x98)
 30438              m.emit(0xc0 | lcode(v[0]))
 30439          })
 30440      }
 30441      // SETS m8
 30442      if isM8(v0) {
 30443          p.domain = DomainGeneric
 30444          p.add(0, func(m *_Encoding, v []interface{}) {
 30445              m.rexo(0, addr(v[0]), false)
 30446              m.emit(0x0f)
 30447              m.emit(0x98)
 30448              m.mrsd(0, addr(v[0]), 1)
 30449          })
 30450      }
 30451      if p.len == 0 {
 30452          panic("invalid operands for SETS")
 30453      }
 30454      return p
 30455  }
 30456  
 30457  // SETZ performs "Set byte if zero (ZF == 1)".
 30458  //
 30459  // Mnemonic        : SETZ
 30460  // Supported forms : (2 forms)
 30461  //
 30462  //    * SETZ r8
 30463  //    * SETZ m8
 30464  //
 30465  func (self *Program) SETZ(v0 interface{}) *Instruction {
 30466      p := self.alloc("SETZ", 1, Operands { v0 })
 30467      // SETZ r8
 30468      if isReg8(v0) {
 30469          p.domain = DomainGeneric
 30470          p.add(0, func(m *_Encoding, v []interface{}) {
 30471              m.rexo(0, v[0], isReg8REX(v[0]))
 30472              m.emit(0x0f)
 30473              m.emit(0x94)
 30474              m.emit(0xc0 | lcode(v[0]))
 30475          })
 30476      }
 30477      // SETZ m8
 30478      if isM8(v0) {
 30479          p.domain = DomainGeneric
 30480          p.add(0, func(m *_Encoding, v []interface{}) {
 30481              m.rexo(0, addr(v[0]), false)
 30482              m.emit(0x0f)
 30483              m.emit(0x94)
 30484              m.mrsd(0, addr(v[0]), 1)
 30485          })
 30486      }
 30487      if p.len == 0 {
 30488          panic("invalid operands for SETZ")
 30489      }
 30490      return p
 30491  }
 30492  
 30493  // SFENCE performs "Store Fence".
 30494  //
 30495  // Mnemonic        : SFENCE
 30496  // Supported forms : (1 form)
 30497  //
 30498  //    * SFENCE    [MMX+]
 30499  //
 30500  func (self *Program) SFENCE() *Instruction {
 30501      p := self.alloc("SFENCE", 0, Operands {  })
 30502      // SFENCE
 30503      self.require(ISA_MMX_PLUS)
 30504      p.domain = DomainGeneric
 30505      p.add(0, func(m *_Encoding, v []interface{}) {
 30506          m.emit(0x0f)
 30507          m.emit(0xae)
 30508          m.emit(0xf8)
 30509      })
 30510      return p
 30511  }
 30512  
 30513  // SHA1MSG1 performs "Perform an Intermediate Calculation for the Next Four SHA1 Message Doublewords".
 30514  //
 30515  // Mnemonic        : SHA1MSG1
 30516  // Supported forms : (2 forms)
 30517  //
 30518  //    * SHA1MSG1 xmm, xmm     [SHA]
 30519  //    * SHA1MSG1 m128, xmm    [SHA]
 30520  //
 30521  func (self *Program) SHA1MSG1(v0 interface{}, v1 interface{}) *Instruction {
 30522      p := self.alloc("SHA1MSG1", 2, Operands { v0, v1 })
 30523      // SHA1MSG1 xmm, xmm
 30524      if isXMM(v0) && isXMM(v1) {
 30525          self.require(ISA_SHA)
 30526          p.domain = DomainCrypto
 30527          p.add(0, func(m *_Encoding, v []interface{}) {
 30528              m.rexo(hcode(v[1]), v[0], false)
 30529              m.emit(0x0f)
 30530              m.emit(0x38)
 30531              m.emit(0xc9)
 30532              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 30533          })
 30534      }
 30535      // SHA1MSG1 m128, xmm
 30536      if isM128(v0) && isXMM(v1) {
 30537          self.require(ISA_SHA)
 30538          p.domain = DomainCrypto
 30539          p.add(0, func(m *_Encoding, v []interface{}) {
 30540              m.rexo(hcode(v[1]), addr(v[0]), false)
 30541              m.emit(0x0f)
 30542              m.emit(0x38)
 30543              m.emit(0xc9)
 30544              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 30545          })
 30546      }
 30547      if p.len == 0 {
 30548          panic("invalid operands for SHA1MSG1")
 30549      }
 30550      return p
 30551  }
 30552  
 30553  // SHA1MSG2 performs "Perform a Final Calculation for the Next Four SHA1 Message Doublewords".
 30554  //
 30555  // Mnemonic        : SHA1MSG2
 30556  // Supported forms : (2 forms)
 30557  //
 30558  //    * SHA1MSG2 xmm, xmm     [SHA]
 30559  //    * SHA1MSG2 m128, xmm    [SHA]
 30560  //
 30561  func (self *Program) SHA1MSG2(v0 interface{}, v1 interface{}) *Instruction {
 30562      p := self.alloc("SHA1MSG2", 2, Operands { v0, v1 })
 30563      // SHA1MSG2 xmm, xmm
 30564      if isXMM(v0) && isXMM(v1) {
 30565          self.require(ISA_SHA)
 30566          p.domain = DomainCrypto
 30567          p.add(0, func(m *_Encoding, v []interface{}) {
 30568              m.rexo(hcode(v[1]), v[0], false)
 30569              m.emit(0x0f)
 30570              m.emit(0x38)
 30571              m.emit(0xca)
 30572              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 30573          })
 30574      }
 30575      // SHA1MSG2 m128, xmm
 30576      if isM128(v0) && isXMM(v1) {
 30577          self.require(ISA_SHA)
 30578          p.domain = DomainCrypto
 30579          p.add(0, func(m *_Encoding, v []interface{}) {
 30580              m.rexo(hcode(v[1]), addr(v[0]), false)
 30581              m.emit(0x0f)
 30582              m.emit(0x38)
 30583              m.emit(0xca)
 30584              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 30585          })
 30586      }
 30587      if p.len == 0 {
 30588          panic("invalid operands for SHA1MSG2")
 30589      }
 30590      return p
 30591  }
 30592  
 30593  // SHA1NEXTE performs "Calculate SHA1 State Variable E after Four Rounds".
 30594  //
 30595  // Mnemonic        : SHA1NEXTE
 30596  // Supported forms : (2 forms)
 30597  //
 30598  //    * SHA1NEXTE xmm, xmm     [SHA]
 30599  //    * SHA1NEXTE m128, xmm    [SHA]
 30600  //
 30601  func (self *Program) SHA1NEXTE(v0 interface{}, v1 interface{}) *Instruction {
 30602      p := self.alloc("SHA1NEXTE", 2, Operands { v0, v1 })
 30603      // SHA1NEXTE xmm, xmm
 30604      if isXMM(v0) && isXMM(v1) {
 30605          self.require(ISA_SHA)
 30606          p.domain = DomainCrypto
 30607          p.add(0, func(m *_Encoding, v []interface{}) {
 30608              m.rexo(hcode(v[1]), v[0], false)
 30609              m.emit(0x0f)
 30610              m.emit(0x38)
 30611              m.emit(0xc8)
 30612              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 30613          })
 30614      }
 30615      // SHA1NEXTE m128, xmm
 30616      if isM128(v0) && isXMM(v1) {
 30617          self.require(ISA_SHA)
 30618          p.domain = DomainCrypto
 30619          p.add(0, func(m *_Encoding, v []interface{}) {
 30620              m.rexo(hcode(v[1]), addr(v[0]), false)
 30621              m.emit(0x0f)
 30622              m.emit(0x38)
 30623              m.emit(0xc8)
 30624              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 30625          })
 30626      }
 30627      if p.len == 0 {
 30628          panic("invalid operands for SHA1NEXTE")
 30629      }
 30630      return p
 30631  }
 30632  
 30633  // SHA1RNDS4 performs "Perform Four Rounds of SHA1 Operation".
 30634  //
 30635  // Mnemonic        : SHA1RNDS4
 30636  // Supported forms : (2 forms)
 30637  //
 30638  //    * SHA1RNDS4 imm8, xmm, xmm     [SHA]
 30639  //    * SHA1RNDS4 imm8, m128, xmm    [SHA]
 30640  //
 30641  func (self *Program) SHA1RNDS4(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 30642      p := self.alloc("SHA1RNDS4", 3, Operands { v0, v1, v2 })
 30643      // SHA1RNDS4 imm8, xmm, xmm
 30644      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 30645          self.require(ISA_SHA)
 30646          p.domain = DomainCrypto
 30647          p.add(0, func(m *_Encoding, v []interface{}) {
 30648              m.rexo(hcode(v[2]), v[1], false)
 30649              m.emit(0x0f)
 30650              m.emit(0x3a)
 30651              m.emit(0xcc)
 30652              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 30653              m.imm1(toImmAny(v[0]))
 30654          })
 30655      }
 30656      // SHA1RNDS4 imm8, m128, xmm
 30657      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 30658          self.require(ISA_SHA)
 30659          p.domain = DomainCrypto
 30660          p.add(0, func(m *_Encoding, v []interface{}) {
 30661              m.rexo(hcode(v[2]), addr(v[1]), false)
 30662              m.emit(0x0f)
 30663              m.emit(0x3a)
 30664              m.emit(0xcc)
 30665              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 30666              m.imm1(toImmAny(v[0]))
 30667          })
 30668      }
 30669      if p.len == 0 {
 30670          panic("invalid operands for SHA1RNDS4")
 30671      }
 30672      return p
 30673  }
 30674  
 30675  // SHA256MSG1 performs "Perform an Intermediate Calculation for the Next Four SHA256 Message Doublewords".
 30676  //
 30677  // Mnemonic        : SHA256MSG1
 30678  // Supported forms : (2 forms)
 30679  //
 30680  //    * SHA256MSG1 xmm, xmm     [SHA]
 30681  //    * SHA256MSG1 m128, xmm    [SHA]
 30682  //
 30683  func (self *Program) SHA256MSG1(v0 interface{}, v1 interface{}) *Instruction {
 30684      p := self.alloc("SHA256MSG1", 2, Operands { v0, v1 })
 30685      // SHA256MSG1 xmm, xmm
 30686      if isXMM(v0) && isXMM(v1) {
 30687          self.require(ISA_SHA)
 30688          p.domain = DomainCrypto
 30689          p.add(0, func(m *_Encoding, v []interface{}) {
 30690              m.rexo(hcode(v[1]), v[0], false)
 30691              m.emit(0x0f)
 30692              m.emit(0x38)
 30693              m.emit(0xcc)
 30694              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 30695          })
 30696      }
 30697      // SHA256MSG1 m128, xmm
 30698      if isM128(v0) && isXMM(v1) {
 30699          self.require(ISA_SHA)
 30700          p.domain = DomainCrypto
 30701          p.add(0, func(m *_Encoding, v []interface{}) {
 30702              m.rexo(hcode(v[1]), addr(v[0]), false)
 30703              m.emit(0x0f)
 30704              m.emit(0x38)
 30705              m.emit(0xcc)
 30706              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 30707          })
 30708      }
 30709      if p.len == 0 {
 30710          panic("invalid operands for SHA256MSG1")
 30711      }
 30712      return p
 30713  }
 30714  
 30715  // SHA256MSG2 performs "Perform a Final Calculation for the Next Four SHA256 Message Doublewords".
 30716  //
 30717  // Mnemonic        : SHA256MSG2
 30718  // Supported forms : (2 forms)
 30719  //
 30720  //    * SHA256MSG2 xmm, xmm     [SHA]
 30721  //    * SHA256MSG2 m128, xmm    [SHA]
 30722  //
 30723  func (self *Program) SHA256MSG2(v0 interface{}, v1 interface{}) *Instruction {
 30724      p := self.alloc("SHA256MSG2", 2, Operands { v0, v1 })
 30725      // SHA256MSG2 xmm, xmm
 30726      if isXMM(v0) && isXMM(v1) {
 30727          self.require(ISA_SHA)
 30728          p.domain = DomainCrypto
 30729          p.add(0, func(m *_Encoding, v []interface{}) {
 30730              m.rexo(hcode(v[1]), v[0], false)
 30731              m.emit(0x0f)
 30732              m.emit(0x38)
 30733              m.emit(0xcd)
 30734              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 30735          })
 30736      }
 30737      // SHA256MSG2 m128, xmm
 30738      if isM128(v0) && isXMM(v1) {
 30739          self.require(ISA_SHA)
 30740          p.domain = DomainCrypto
 30741          p.add(0, func(m *_Encoding, v []interface{}) {
 30742              m.rexo(hcode(v[1]), addr(v[0]), false)
 30743              m.emit(0x0f)
 30744              m.emit(0x38)
 30745              m.emit(0xcd)
 30746              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 30747          })
 30748      }
 30749      if p.len == 0 {
 30750          panic("invalid operands for SHA256MSG2")
 30751      }
 30752      return p
 30753  }
 30754  
 30755  // SHA256RNDS2 performs "Perform Two Rounds of SHA256 Operation".
 30756  //
 30757  // Mnemonic        : SHA256RNDS2
 30758  // Supported forms : (2 forms)
 30759  //
 30760  //    * SHA256RNDS2 xmm0, xmm, xmm     [SHA]
 30761  //    * SHA256RNDS2 xmm0, m128, xmm    [SHA]
 30762  //
 30763  func (self *Program) SHA256RNDS2(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 30764      p := self.alloc("SHA256RNDS2", 3, Operands { v0, v1, v2 })
 30765      // SHA256RNDS2 xmm0, xmm, xmm
 30766      if v0 == XMM0 && isXMM(v1) && isXMM(v2) {
 30767          self.require(ISA_SHA)
 30768          p.domain = DomainCrypto
 30769          p.add(0, func(m *_Encoding, v []interface{}) {
 30770              m.rexo(hcode(v[2]), v[1], false)
 30771              m.emit(0x0f)
 30772              m.emit(0x38)
 30773              m.emit(0xcb)
 30774              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 30775          })
 30776      }
 30777      // SHA256RNDS2 xmm0, m128, xmm
 30778      if v0 == XMM0 && isM128(v1) && isXMM(v2) {
 30779          self.require(ISA_SHA)
 30780          p.domain = DomainCrypto
 30781          p.add(0, func(m *_Encoding, v []interface{}) {
 30782              m.rexo(hcode(v[2]), addr(v[1]), false)
 30783              m.emit(0x0f)
 30784              m.emit(0x38)
 30785              m.emit(0xcb)
 30786              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 30787          })
 30788      }
 30789      if p.len == 0 {
 30790          panic("invalid operands for SHA256RNDS2")
 30791      }
 30792      return p
 30793  }
 30794  
 30795  // SHLB performs "Logical Shift Left".
 30796  //
 30797  // Mnemonic        : SHL
 30798  // Supported forms : (6 forms)
 30799  //
 30800  //    * SHLB 1, r8
 30801  //    * SHLB imm8, r8
 30802  //    * SHLB cl, r8
 30803  //    * SHLB 1, m8
 30804  //    * SHLB imm8, m8
 30805  //    * SHLB cl, m8
 30806  //
 30807  func (self *Program) SHLB(v0 interface{}, v1 interface{}) *Instruction {
 30808      p := self.alloc("SHLB", 2, Operands { v0, v1 })
 30809      // SHLB 1, r8
 30810      if isConst1(v0) && isReg8(v1) {
 30811          p.domain = DomainGeneric
 30812          p.add(0, func(m *_Encoding, v []interface{}) {
 30813              m.rexo(0, v[1], isReg8REX(v[1]))
 30814              m.emit(0xd0)
 30815              m.emit(0xe0 | lcode(v[1]))
 30816          })
 30817      }
 30818      // SHLB imm8, r8
 30819      if isImm8(v0) && isReg8(v1) {
 30820          p.domain = DomainGeneric
 30821          p.add(0, func(m *_Encoding, v []interface{}) {
 30822              m.rexo(0, v[1], isReg8REX(v[1]))
 30823              m.emit(0xc0)
 30824              m.emit(0xe0 | lcode(v[1]))
 30825              m.imm1(toImmAny(v[0]))
 30826          })
 30827      }
 30828      // SHLB cl, r8
 30829      if v0 == CL && isReg8(v1) {
 30830          p.domain = DomainGeneric
 30831          p.add(0, func(m *_Encoding, v []interface{}) {
 30832              m.rexo(0, v[1], isReg8REX(v[1]))
 30833              m.emit(0xd2)
 30834              m.emit(0xe0 | lcode(v[1]))
 30835          })
 30836      }
 30837      // SHLB 1, m8
 30838      if isConst1(v0) && isM8(v1) {
 30839          p.domain = DomainGeneric
 30840          p.add(0, func(m *_Encoding, v []interface{}) {
 30841              m.rexo(0, addr(v[1]), false)
 30842              m.emit(0xd0)
 30843              m.mrsd(4, addr(v[1]), 1)
 30844          })
 30845      }
 30846      // SHLB imm8, m8
 30847      if isImm8(v0) && isM8(v1) {
 30848          p.domain = DomainGeneric
 30849          p.add(0, func(m *_Encoding, v []interface{}) {
 30850              m.rexo(0, addr(v[1]), false)
 30851              m.emit(0xc0)
 30852              m.mrsd(4, addr(v[1]), 1)
 30853              m.imm1(toImmAny(v[0]))
 30854          })
 30855      }
 30856      // SHLB cl, m8
 30857      if v0 == CL && isM8(v1) {
 30858          p.domain = DomainGeneric
 30859          p.add(0, func(m *_Encoding, v []interface{}) {
 30860              m.rexo(0, addr(v[1]), false)
 30861              m.emit(0xd2)
 30862              m.mrsd(4, addr(v[1]), 1)
 30863          })
 30864      }
 30865      if p.len == 0 {
 30866          panic("invalid operands for SHLB")
 30867      }
 30868      return p
 30869  }
 30870  
 30871  // SHLDL performs "Integer Double Precision Shift Left".
 30872  //
 30873  // Mnemonic        : SHLD
 30874  // Supported forms : (4 forms)
 30875  //
 30876  //    * SHLDL imm8, r32, r32
 30877  //    * SHLDL cl, r32, r32
 30878  //    * SHLDL imm8, r32, m32
 30879  //    * SHLDL cl, r32, m32
 30880  //
 30881  func (self *Program) SHLDL(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 30882      p := self.alloc("SHLDL", 3, Operands { v0, v1, v2 })
 30883      // SHLDL imm8, r32, r32
 30884      if isImm8(v0) && isReg32(v1) && isReg32(v2) {
 30885          p.domain = DomainGeneric
 30886          p.add(0, func(m *_Encoding, v []interface{}) {
 30887              m.rexo(hcode(v[1]), v[2], false)
 30888              m.emit(0x0f)
 30889              m.emit(0xa4)
 30890              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 30891              m.imm1(toImmAny(v[0]))
 30892          })
 30893      }
 30894      // SHLDL cl, r32, r32
 30895      if v0 == CL && isReg32(v1) && isReg32(v2) {
 30896          p.domain = DomainGeneric
 30897          p.add(0, func(m *_Encoding, v []interface{}) {
 30898              m.rexo(hcode(v[1]), v[2], false)
 30899              m.emit(0x0f)
 30900              m.emit(0xa5)
 30901              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 30902          })
 30903      }
 30904      // SHLDL imm8, r32, m32
 30905      if isImm8(v0) && isReg32(v1) && isM32(v2) {
 30906          p.domain = DomainGeneric
 30907          p.add(0, func(m *_Encoding, v []interface{}) {
 30908              m.rexo(hcode(v[1]), addr(v[2]), false)
 30909              m.emit(0x0f)
 30910              m.emit(0xa4)
 30911              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 30912              m.imm1(toImmAny(v[0]))
 30913          })
 30914      }
 30915      // SHLDL cl, r32, m32
 30916      if v0 == CL && isReg32(v1) && isM32(v2) {
 30917          p.domain = DomainGeneric
 30918          p.add(0, func(m *_Encoding, v []interface{}) {
 30919              m.rexo(hcode(v[1]), addr(v[2]), false)
 30920              m.emit(0x0f)
 30921              m.emit(0xa5)
 30922              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 30923          })
 30924      }
 30925      if p.len == 0 {
 30926          panic("invalid operands for SHLDL")
 30927      }
 30928      return p
 30929  }
 30930  
 30931  // SHLDQ performs "Integer Double Precision Shift Left".
 30932  //
 30933  // Mnemonic        : SHLD
 30934  // Supported forms : (4 forms)
 30935  //
 30936  //    * SHLDQ imm8, r64, r64
 30937  //    * SHLDQ cl, r64, r64
 30938  //    * SHLDQ imm8, r64, m64
 30939  //    * SHLDQ cl, r64, m64
 30940  //
 30941  func (self *Program) SHLDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 30942      p := self.alloc("SHLDQ", 3, Operands { v0, v1, v2 })
 30943      // SHLDQ imm8, r64, r64
 30944      if isImm8(v0) && isReg64(v1) && isReg64(v2) {
 30945          p.domain = DomainGeneric
 30946          p.add(0, func(m *_Encoding, v []interface{}) {
 30947              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[2]))
 30948              m.emit(0x0f)
 30949              m.emit(0xa4)
 30950              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 30951              m.imm1(toImmAny(v[0]))
 30952          })
 30953      }
 30954      // SHLDQ cl, r64, r64
 30955      if v0 == CL && isReg64(v1) && isReg64(v2) {
 30956          p.domain = DomainGeneric
 30957          p.add(0, func(m *_Encoding, v []interface{}) {
 30958              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[2]))
 30959              m.emit(0x0f)
 30960              m.emit(0xa5)
 30961              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 30962          })
 30963      }
 30964      // SHLDQ imm8, r64, m64
 30965      if isImm8(v0) && isReg64(v1) && isM64(v2) {
 30966          p.domain = DomainGeneric
 30967          p.add(0, func(m *_Encoding, v []interface{}) {
 30968              m.rexm(1, hcode(v[1]), addr(v[2]))
 30969              m.emit(0x0f)
 30970              m.emit(0xa4)
 30971              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 30972              m.imm1(toImmAny(v[0]))
 30973          })
 30974      }
 30975      // SHLDQ cl, r64, m64
 30976      if v0 == CL && isReg64(v1) && isM64(v2) {
 30977          p.domain = DomainGeneric
 30978          p.add(0, func(m *_Encoding, v []interface{}) {
 30979              m.rexm(1, hcode(v[1]), addr(v[2]))
 30980              m.emit(0x0f)
 30981              m.emit(0xa5)
 30982              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 30983          })
 30984      }
 30985      if p.len == 0 {
 30986          panic("invalid operands for SHLDQ")
 30987      }
 30988      return p
 30989  }
 30990  
 30991  // SHLDW performs "Integer Double Precision Shift Left".
 30992  //
 30993  // Mnemonic        : SHLD
 30994  // Supported forms : (4 forms)
 30995  //
 30996  //    * SHLDW imm8, r16, r16
 30997  //    * SHLDW cl, r16, r16
 30998  //    * SHLDW imm8, r16, m16
 30999  //    * SHLDW cl, r16, m16
 31000  //
 31001  func (self *Program) SHLDW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 31002      p := self.alloc("SHLDW", 3, Operands { v0, v1, v2 })
 31003      // SHLDW imm8, r16, r16
 31004      if isImm8(v0) && isReg16(v1) && isReg16(v2) {
 31005          p.domain = DomainGeneric
 31006          p.add(0, func(m *_Encoding, v []interface{}) {
 31007              m.emit(0x66)
 31008              m.rexo(hcode(v[1]), v[2], false)
 31009              m.emit(0x0f)
 31010              m.emit(0xa4)
 31011              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 31012              m.imm1(toImmAny(v[0]))
 31013          })
 31014      }
 31015      // SHLDW cl, r16, r16
 31016      if v0 == CL && isReg16(v1) && isReg16(v2) {
 31017          p.domain = DomainGeneric
 31018          p.add(0, func(m *_Encoding, v []interface{}) {
 31019              m.emit(0x66)
 31020              m.rexo(hcode(v[1]), v[2], false)
 31021              m.emit(0x0f)
 31022              m.emit(0xa5)
 31023              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 31024          })
 31025      }
 31026      // SHLDW imm8, r16, m16
 31027      if isImm8(v0) && isReg16(v1) && isM16(v2) {
 31028          p.domain = DomainGeneric
 31029          p.add(0, func(m *_Encoding, v []interface{}) {
 31030              m.emit(0x66)
 31031              m.rexo(hcode(v[1]), addr(v[2]), false)
 31032              m.emit(0x0f)
 31033              m.emit(0xa4)
 31034              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 31035              m.imm1(toImmAny(v[0]))
 31036          })
 31037      }
 31038      // SHLDW cl, r16, m16
 31039      if v0 == CL && isReg16(v1) && isM16(v2) {
 31040          p.domain = DomainGeneric
 31041          p.add(0, func(m *_Encoding, v []interface{}) {
 31042              m.emit(0x66)
 31043              m.rexo(hcode(v[1]), addr(v[2]), false)
 31044              m.emit(0x0f)
 31045              m.emit(0xa5)
 31046              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 31047          })
 31048      }
 31049      if p.len == 0 {
 31050          panic("invalid operands for SHLDW")
 31051      }
 31052      return p
 31053  }
 31054  
 31055  // SHLL performs "Logical Shift Left".
 31056  //
 31057  // Mnemonic        : SHL
 31058  // Supported forms : (6 forms)
 31059  //
 31060  //    * SHLL 1, r32
 31061  //    * SHLL imm8, r32
 31062  //    * SHLL cl, r32
 31063  //    * SHLL 1, m32
 31064  //    * SHLL imm8, m32
 31065  //    * SHLL cl, m32
 31066  //
 31067  func (self *Program) SHLL(v0 interface{}, v1 interface{}) *Instruction {
 31068      p := self.alloc("SHLL", 2, Operands { v0, v1 })
 31069      // SHLL 1, r32
 31070      if isConst1(v0) && isReg32(v1) {
 31071          p.domain = DomainGeneric
 31072          p.add(0, func(m *_Encoding, v []interface{}) {
 31073              m.rexo(0, v[1], false)
 31074              m.emit(0xd1)
 31075              m.emit(0xe0 | lcode(v[1]))
 31076          })
 31077      }
 31078      // SHLL imm8, r32
 31079      if isImm8(v0) && isReg32(v1) {
 31080          p.domain = DomainGeneric
 31081          p.add(0, func(m *_Encoding, v []interface{}) {
 31082              m.rexo(0, v[1], false)
 31083              m.emit(0xc1)
 31084              m.emit(0xe0 | lcode(v[1]))
 31085              m.imm1(toImmAny(v[0]))
 31086          })
 31087      }
 31088      // SHLL cl, r32
 31089      if v0 == CL && isReg32(v1) {
 31090          p.domain = DomainGeneric
 31091          p.add(0, func(m *_Encoding, v []interface{}) {
 31092              m.rexo(0, v[1], false)
 31093              m.emit(0xd3)
 31094              m.emit(0xe0 | lcode(v[1]))
 31095          })
 31096      }
 31097      // SHLL 1, m32
 31098      if isConst1(v0) && isM32(v1) {
 31099          p.domain = DomainGeneric
 31100          p.add(0, func(m *_Encoding, v []interface{}) {
 31101              m.rexo(0, addr(v[1]), false)
 31102              m.emit(0xd1)
 31103              m.mrsd(4, addr(v[1]), 1)
 31104          })
 31105      }
 31106      // SHLL imm8, m32
 31107      if isImm8(v0) && isM32(v1) {
 31108          p.domain = DomainGeneric
 31109          p.add(0, func(m *_Encoding, v []interface{}) {
 31110              m.rexo(0, addr(v[1]), false)
 31111              m.emit(0xc1)
 31112              m.mrsd(4, addr(v[1]), 1)
 31113              m.imm1(toImmAny(v[0]))
 31114          })
 31115      }
 31116      // SHLL cl, m32
 31117      if v0 == CL && isM32(v1) {
 31118          p.domain = DomainGeneric
 31119          p.add(0, func(m *_Encoding, v []interface{}) {
 31120              m.rexo(0, addr(v[1]), false)
 31121              m.emit(0xd3)
 31122              m.mrsd(4, addr(v[1]), 1)
 31123          })
 31124      }
 31125      if p.len == 0 {
 31126          panic("invalid operands for SHLL")
 31127      }
 31128      return p
 31129  }
 31130  
 31131  // SHLQ performs "Logical Shift Left".
 31132  //
 31133  // Mnemonic        : SHL
 31134  // Supported forms : (6 forms)
 31135  //
 31136  //    * SHLQ 1, r64
 31137  //    * SHLQ imm8, r64
 31138  //    * SHLQ cl, r64
 31139  //    * SHLQ 1, m64
 31140  //    * SHLQ imm8, m64
 31141  //    * SHLQ cl, m64
 31142  //
 31143  func (self *Program) SHLQ(v0 interface{}, v1 interface{}) *Instruction {
 31144      p := self.alloc("SHLQ", 2, Operands { v0, v1 })
 31145      // SHLQ 1, r64
 31146      if isConst1(v0) && isReg64(v1) {
 31147          p.domain = DomainGeneric
 31148          p.add(0, func(m *_Encoding, v []interface{}) {
 31149              m.emit(0x48 | hcode(v[1]))
 31150              m.emit(0xd1)
 31151              m.emit(0xe0 | lcode(v[1]))
 31152          })
 31153      }
 31154      // SHLQ imm8, r64
 31155      if isImm8(v0) && isReg64(v1) {
 31156          p.domain = DomainGeneric
 31157          p.add(0, func(m *_Encoding, v []interface{}) {
 31158              m.emit(0x48 | hcode(v[1]))
 31159              m.emit(0xc1)
 31160              m.emit(0xe0 | lcode(v[1]))
 31161              m.imm1(toImmAny(v[0]))
 31162          })
 31163      }
 31164      // SHLQ cl, r64
 31165      if v0 == CL && isReg64(v1) {
 31166          p.domain = DomainGeneric
 31167          p.add(0, func(m *_Encoding, v []interface{}) {
 31168              m.emit(0x48 | hcode(v[1]))
 31169              m.emit(0xd3)
 31170              m.emit(0xe0 | lcode(v[1]))
 31171          })
 31172      }
 31173      // SHLQ 1, m64
 31174      if isConst1(v0) && isM64(v1) {
 31175          p.domain = DomainGeneric
 31176          p.add(0, func(m *_Encoding, v []interface{}) {
 31177              m.rexm(1, 0, addr(v[1]))
 31178              m.emit(0xd1)
 31179              m.mrsd(4, addr(v[1]), 1)
 31180          })
 31181      }
 31182      // SHLQ imm8, m64
 31183      if isImm8(v0) && isM64(v1) {
 31184          p.domain = DomainGeneric
 31185          p.add(0, func(m *_Encoding, v []interface{}) {
 31186              m.rexm(1, 0, addr(v[1]))
 31187              m.emit(0xc1)
 31188              m.mrsd(4, addr(v[1]), 1)
 31189              m.imm1(toImmAny(v[0]))
 31190          })
 31191      }
 31192      // SHLQ cl, m64
 31193      if v0 == CL && isM64(v1) {
 31194          p.domain = DomainGeneric
 31195          p.add(0, func(m *_Encoding, v []interface{}) {
 31196              m.rexm(1, 0, addr(v[1]))
 31197              m.emit(0xd3)
 31198              m.mrsd(4, addr(v[1]), 1)
 31199          })
 31200      }
 31201      if p.len == 0 {
 31202          panic("invalid operands for SHLQ")
 31203      }
 31204      return p
 31205  }
 31206  
 31207  // SHLW performs "Logical Shift Left".
 31208  //
 31209  // Mnemonic        : SHL
 31210  // Supported forms : (6 forms)
 31211  //
 31212  //    * SHLW 1, r16
 31213  //    * SHLW imm8, r16
 31214  //    * SHLW cl, r16
 31215  //    * SHLW 1, m16
 31216  //    * SHLW imm8, m16
 31217  //    * SHLW cl, m16
 31218  //
 31219  func (self *Program) SHLW(v0 interface{}, v1 interface{}) *Instruction {
 31220      p := self.alloc("SHLW", 2, Operands { v0, v1 })
 31221      // SHLW 1, r16
 31222      if isConst1(v0) && isReg16(v1) {
 31223          p.domain = DomainGeneric
 31224          p.add(0, func(m *_Encoding, v []interface{}) {
 31225              m.emit(0x66)
 31226              m.rexo(0, v[1], false)
 31227              m.emit(0xd1)
 31228              m.emit(0xe0 | lcode(v[1]))
 31229          })
 31230      }
 31231      // SHLW imm8, r16
 31232      if isImm8(v0) && isReg16(v1) {
 31233          p.domain = DomainGeneric
 31234          p.add(0, func(m *_Encoding, v []interface{}) {
 31235              m.emit(0x66)
 31236              m.rexo(0, v[1], false)
 31237              m.emit(0xc1)
 31238              m.emit(0xe0 | lcode(v[1]))
 31239              m.imm1(toImmAny(v[0]))
 31240          })
 31241      }
 31242      // SHLW cl, r16
 31243      if v0 == CL && isReg16(v1) {
 31244          p.domain = DomainGeneric
 31245          p.add(0, func(m *_Encoding, v []interface{}) {
 31246              m.emit(0x66)
 31247              m.rexo(0, v[1], false)
 31248              m.emit(0xd3)
 31249              m.emit(0xe0 | lcode(v[1]))
 31250          })
 31251      }
 31252      // SHLW 1, m16
 31253      if isConst1(v0) && isM16(v1) {
 31254          p.domain = DomainGeneric
 31255          p.add(0, func(m *_Encoding, v []interface{}) {
 31256              m.emit(0x66)
 31257              m.rexo(0, addr(v[1]), false)
 31258              m.emit(0xd1)
 31259              m.mrsd(4, addr(v[1]), 1)
 31260          })
 31261      }
 31262      // SHLW imm8, m16
 31263      if isImm8(v0) && isM16(v1) {
 31264          p.domain = DomainGeneric
 31265          p.add(0, func(m *_Encoding, v []interface{}) {
 31266              m.emit(0x66)
 31267              m.rexo(0, addr(v[1]), false)
 31268              m.emit(0xc1)
 31269              m.mrsd(4, addr(v[1]), 1)
 31270              m.imm1(toImmAny(v[0]))
 31271          })
 31272      }
 31273      // SHLW cl, m16
 31274      if v0 == CL && isM16(v1) {
 31275          p.domain = DomainGeneric
 31276          p.add(0, func(m *_Encoding, v []interface{}) {
 31277              m.emit(0x66)
 31278              m.rexo(0, addr(v[1]), false)
 31279              m.emit(0xd3)
 31280              m.mrsd(4, addr(v[1]), 1)
 31281          })
 31282      }
 31283      if p.len == 0 {
 31284          panic("invalid operands for SHLW")
 31285      }
 31286      return p
 31287  }
 31288  
 31289  // SHLXL performs "Logical Shift Left Without Affecting Flags".
 31290  //
 31291  // Mnemonic        : SHLX
 31292  // Supported forms : (2 forms)
 31293  //
 31294  //    * SHLXL r32, r32, r32    [BMI2]
 31295  //    * SHLXL r32, m32, r32    [BMI2]
 31296  //
 31297  func (self *Program) SHLXL(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 31298      p := self.alloc("SHLXL", 3, Operands { v0, v1, v2 })
 31299      // SHLXL r32, r32, r32
 31300      if isReg32(v0) && isReg32(v1) && isReg32(v2) {
 31301          self.require(ISA_BMI2)
 31302          p.domain = DomainGeneric
 31303          p.add(0, func(m *_Encoding, v []interface{}) {
 31304              m.emit(0xc4)
 31305              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 31306              m.emit(0x79 ^ (hlcode(v[0]) << 3))
 31307              m.emit(0xf7)
 31308              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 31309          })
 31310      }
 31311      // SHLXL r32, m32, r32
 31312      if isReg32(v0) && isM32(v1) && isReg32(v2) {
 31313          self.require(ISA_BMI2)
 31314          p.domain = DomainGeneric
 31315          p.add(0, func(m *_Encoding, v []interface{}) {
 31316              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 31317              m.emit(0xf7)
 31318              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 31319          })
 31320      }
 31321      if p.len == 0 {
 31322          panic("invalid operands for SHLXL")
 31323      }
 31324      return p
 31325  }
 31326  
 31327  // SHLXQ performs "Logical Shift Left Without Affecting Flags".
 31328  //
 31329  // Mnemonic        : SHLX
 31330  // Supported forms : (2 forms)
 31331  //
 31332  //    * SHLXQ r64, r64, r64    [BMI2]
 31333  //    * SHLXQ r64, m64, r64    [BMI2]
 31334  //
 31335  func (self *Program) SHLXQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 31336      p := self.alloc("SHLXQ", 3, Operands { v0, v1, v2 })
 31337      // SHLXQ r64, r64, r64
 31338      if isReg64(v0) && isReg64(v1) && isReg64(v2) {
 31339          self.require(ISA_BMI2)
 31340          p.domain = DomainGeneric
 31341          p.add(0, func(m *_Encoding, v []interface{}) {
 31342              m.emit(0xc4)
 31343              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 31344              m.emit(0xf9 ^ (hlcode(v[0]) << 3))
 31345              m.emit(0xf7)
 31346              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 31347          })
 31348      }
 31349      // SHLXQ r64, m64, r64
 31350      if isReg64(v0) && isM64(v1) && isReg64(v2) {
 31351          self.require(ISA_BMI2)
 31352          p.domain = DomainGeneric
 31353          p.add(0, func(m *_Encoding, v []interface{}) {
 31354              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 31355              m.emit(0xf7)
 31356              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 31357          })
 31358      }
 31359      if p.len == 0 {
 31360          panic("invalid operands for SHLXQ")
 31361      }
 31362      return p
 31363  }
 31364  
 31365  // SHRB performs "Logical Shift Right".
 31366  //
 31367  // Mnemonic        : SHR
 31368  // Supported forms : (6 forms)
 31369  //
 31370  //    * SHRB 1, r8
 31371  //    * SHRB imm8, r8
 31372  //    * SHRB cl, r8
 31373  //    * SHRB 1, m8
 31374  //    * SHRB imm8, m8
 31375  //    * SHRB cl, m8
 31376  //
 31377  func (self *Program) SHRB(v0 interface{}, v1 interface{}) *Instruction {
 31378      p := self.alloc("SHRB", 2, Operands { v0, v1 })
 31379      // SHRB 1, r8
 31380      if isConst1(v0) && isReg8(v1) {
 31381          p.domain = DomainGeneric
 31382          p.add(0, func(m *_Encoding, v []interface{}) {
 31383              m.rexo(0, v[1], isReg8REX(v[1]))
 31384              m.emit(0xd0)
 31385              m.emit(0xe8 | lcode(v[1]))
 31386          })
 31387      }
 31388      // SHRB imm8, r8
 31389      if isImm8(v0) && isReg8(v1) {
 31390          p.domain = DomainGeneric
 31391          p.add(0, func(m *_Encoding, v []interface{}) {
 31392              m.rexo(0, v[1], isReg8REX(v[1]))
 31393              m.emit(0xc0)
 31394              m.emit(0xe8 | lcode(v[1]))
 31395              m.imm1(toImmAny(v[0]))
 31396          })
 31397      }
 31398      // SHRB cl, r8
 31399      if v0 == CL && isReg8(v1) {
 31400          p.domain = DomainGeneric
 31401          p.add(0, func(m *_Encoding, v []interface{}) {
 31402              m.rexo(0, v[1], isReg8REX(v[1]))
 31403              m.emit(0xd2)
 31404              m.emit(0xe8 | lcode(v[1]))
 31405          })
 31406      }
 31407      // SHRB 1, m8
 31408      if isConst1(v0) && isM8(v1) {
 31409          p.domain = DomainGeneric
 31410          p.add(0, func(m *_Encoding, v []interface{}) {
 31411              m.rexo(0, addr(v[1]), false)
 31412              m.emit(0xd0)
 31413              m.mrsd(5, addr(v[1]), 1)
 31414          })
 31415      }
 31416      // SHRB imm8, m8
 31417      if isImm8(v0) && isM8(v1) {
 31418          p.domain = DomainGeneric
 31419          p.add(0, func(m *_Encoding, v []interface{}) {
 31420              m.rexo(0, addr(v[1]), false)
 31421              m.emit(0xc0)
 31422              m.mrsd(5, addr(v[1]), 1)
 31423              m.imm1(toImmAny(v[0]))
 31424          })
 31425      }
 31426      // SHRB cl, m8
 31427      if v0 == CL && isM8(v1) {
 31428          p.domain = DomainGeneric
 31429          p.add(0, func(m *_Encoding, v []interface{}) {
 31430              m.rexo(0, addr(v[1]), false)
 31431              m.emit(0xd2)
 31432              m.mrsd(5, addr(v[1]), 1)
 31433          })
 31434      }
 31435      if p.len == 0 {
 31436          panic("invalid operands for SHRB")
 31437      }
 31438      return p
 31439  }
 31440  
 31441  // SHRDL performs "Integer Double Precision Shift Right".
 31442  //
 31443  // Mnemonic        : SHRD
 31444  // Supported forms : (4 forms)
 31445  //
 31446  //    * SHRDL imm8, r32, r32
 31447  //    * SHRDL cl, r32, r32
 31448  //    * SHRDL imm8, r32, m32
 31449  //    * SHRDL cl, r32, m32
 31450  //
 31451  func (self *Program) SHRDL(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 31452      p := self.alloc("SHRDL", 3, Operands { v0, v1, v2 })
 31453      // SHRDL imm8, r32, r32
 31454      if isImm8(v0) && isReg32(v1) && isReg32(v2) {
 31455          p.domain = DomainGeneric
 31456          p.add(0, func(m *_Encoding, v []interface{}) {
 31457              m.rexo(hcode(v[1]), v[2], false)
 31458              m.emit(0x0f)
 31459              m.emit(0xac)
 31460              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 31461              m.imm1(toImmAny(v[0]))
 31462          })
 31463      }
 31464      // SHRDL cl, r32, r32
 31465      if v0 == CL && isReg32(v1) && isReg32(v2) {
 31466          p.domain = DomainGeneric
 31467          p.add(0, func(m *_Encoding, v []interface{}) {
 31468              m.rexo(hcode(v[1]), v[2], false)
 31469              m.emit(0x0f)
 31470              m.emit(0xad)
 31471              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 31472          })
 31473      }
 31474      // SHRDL imm8, r32, m32
 31475      if isImm8(v0) && isReg32(v1) && isM32(v2) {
 31476          p.domain = DomainGeneric
 31477          p.add(0, func(m *_Encoding, v []interface{}) {
 31478              m.rexo(hcode(v[1]), addr(v[2]), false)
 31479              m.emit(0x0f)
 31480              m.emit(0xac)
 31481              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 31482              m.imm1(toImmAny(v[0]))
 31483          })
 31484      }
 31485      // SHRDL cl, r32, m32
 31486      if v0 == CL && isReg32(v1) && isM32(v2) {
 31487          p.domain = DomainGeneric
 31488          p.add(0, func(m *_Encoding, v []interface{}) {
 31489              m.rexo(hcode(v[1]), addr(v[2]), false)
 31490              m.emit(0x0f)
 31491              m.emit(0xad)
 31492              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 31493          })
 31494      }
 31495      if p.len == 0 {
 31496          panic("invalid operands for SHRDL")
 31497      }
 31498      return p
 31499  }
 31500  
 31501  // SHRDQ performs "Integer Double Precision Shift Right".
 31502  //
 31503  // Mnemonic        : SHRD
 31504  // Supported forms : (4 forms)
 31505  //
 31506  //    * SHRDQ imm8, r64, r64
 31507  //    * SHRDQ cl, r64, r64
 31508  //    * SHRDQ imm8, r64, m64
 31509  //    * SHRDQ cl, r64, m64
 31510  //
 31511  func (self *Program) SHRDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 31512      p := self.alloc("SHRDQ", 3, Operands { v0, v1, v2 })
 31513      // SHRDQ imm8, r64, r64
 31514      if isImm8(v0) && isReg64(v1) && isReg64(v2) {
 31515          p.domain = DomainGeneric
 31516          p.add(0, func(m *_Encoding, v []interface{}) {
 31517              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[2]))
 31518              m.emit(0x0f)
 31519              m.emit(0xac)
 31520              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 31521              m.imm1(toImmAny(v[0]))
 31522          })
 31523      }
 31524      // SHRDQ cl, r64, r64
 31525      if v0 == CL && isReg64(v1) && isReg64(v2) {
 31526          p.domain = DomainGeneric
 31527          p.add(0, func(m *_Encoding, v []interface{}) {
 31528              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[2]))
 31529              m.emit(0x0f)
 31530              m.emit(0xad)
 31531              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 31532          })
 31533      }
 31534      // SHRDQ imm8, r64, m64
 31535      if isImm8(v0) && isReg64(v1) && isM64(v2) {
 31536          p.domain = DomainGeneric
 31537          p.add(0, func(m *_Encoding, v []interface{}) {
 31538              m.rexm(1, hcode(v[1]), addr(v[2]))
 31539              m.emit(0x0f)
 31540              m.emit(0xac)
 31541              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 31542              m.imm1(toImmAny(v[0]))
 31543          })
 31544      }
 31545      // SHRDQ cl, r64, m64
 31546      if v0 == CL && isReg64(v1) && isM64(v2) {
 31547          p.domain = DomainGeneric
 31548          p.add(0, func(m *_Encoding, v []interface{}) {
 31549              m.rexm(1, hcode(v[1]), addr(v[2]))
 31550              m.emit(0x0f)
 31551              m.emit(0xad)
 31552              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 31553          })
 31554      }
 31555      if p.len == 0 {
 31556          panic("invalid operands for SHRDQ")
 31557      }
 31558      return p
 31559  }
 31560  
 31561  // SHRDW performs "Integer Double Precision Shift Right".
 31562  //
 31563  // Mnemonic        : SHRD
 31564  // Supported forms : (4 forms)
 31565  //
 31566  //    * SHRDW imm8, r16, r16
 31567  //    * SHRDW cl, r16, r16
 31568  //    * SHRDW imm8, r16, m16
 31569  //    * SHRDW cl, r16, m16
 31570  //
 31571  func (self *Program) SHRDW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 31572      p := self.alloc("SHRDW", 3, Operands { v0, v1, v2 })
 31573      // SHRDW imm8, r16, r16
 31574      if isImm8(v0) && isReg16(v1) && isReg16(v2) {
 31575          p.domain = DomainGeneric
 31576          p.add(0, func(m *_Encoding, v []interface{}) {
 31577              m.emit(0x66)
 31578              m.rexo(hcode(v[1]), v[2], false)
 31579              m.emit(0x0f)
 31580              m.emit(0xac)
 31581              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 31582              m.imm1(toImmAny(v[0]))
 31583          })
 31584      }
 31585      // SHRDW cl, r16, r16
 31586      if v0 == CL && isReg16(v1) && isReg16(v2) {
 31587          p.domain = DomainGeneric
 31588          p.add(0, func(m *_Encoding, v []interface{}) {
 31589              m.emit(0x66)
 31590              m.rexo(hcode(v[1]), v[2], false)
 31591              m.emit(0x0f)
 31592              m.emit(0xad)
 31593              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 31594          })
 31595      }
 31596      // SHRDW imm8, r16, m16
 31597      if isImm8(v0) && isReg16(v1) && isM16(v2) {
 31598          p.domain = DomainGeneric
 31599          p.add(0, func(m *_Encoding, v []interface{}) {
 31600              m.emit(0x66)
 31601              m.rexo(hcode(v[1]), addr(v[2]), false)
 31602              m.emit(0x0f)
 31603              m.emit(0xac)
 31604              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 31605              m.imm1(toImmAny(v[0]))
 31606          })
 31607      }
 31608      // SHRDW cl, r16, m16
 31609      if v0 == CL && isReg16(v1) && isM16(v2) {
 31610          p.domain = DomainGeneric
 31611          p.add(0, func(m *_Encoding, v []interface{}) {
 31612              m.emit(0x66)
 31613              m.rexo(hcode(v[1]), addr(v[2]), false)
 31614              m.emit(0x0f)
 31615              m.emit(0xad)
 31616              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 31617          })
 31618      }
 31619      if p.len == 0 {
 31620          panic("invalid operands for SHRDW")
 31621      }
 31622      return p
 31623  }
 31624  
 31625  // SHRL performs "Logical Shift Right".
 31626  //
 31627  // Mnemonic        : SHR
 31628  // Supported forms : (6 forms)
 31629  //
 31630  //    * SHRL 1, r32
 31631  //    * SHRL imm8, r32
 31632  //    * SHRL cl, r32
 31633  //    * SHRL 1, m32
 31634  //    * SHRL imm8, m32
 31635  //    * SHRL cl, m32
 31636  //
 31637  func (self *Program) SHRL(v0 interface{}, v1 interface{}) *Instruction {
 31638      p := self.alloc("SHRL", 2, Operands { v0, v1 })
 31639      // SHRL 1, r32
 31640      if isConst1(v0) && isReg32(v1) {
 31641          p.domain = DomainGeneric
 31642          p.add(0, func(m *_Encoding, v []interface{}) {
 31643              m.rexo(0, v[1], false)
 31644              m.emit(0xd1)
 31645              m.emit(0xe8 | lcode(v[1]))
 31646          })
 31647      }
 31648      // SHRL imm8, r32
 31649      if isImm8(v0) && isReg32(v1) {
 31650          p.domain = DomainGeneric
 31651          p.add(0, func(m *_Encoding, v []interface{}) {
 31652              m.rexo(0, v[1], false)
 31653              m.emit(0xc1)
 31654              m.emit(0xe8 | lcode(v[1]))
 31655              m.imm1(toImmAny(v[0]))
 31656          })
 31657      }
 31658      // SHRL cl, r32
 31659      if v0 == CL && isReg32(v1) {
 31660          p.domain = DomainGeneric
 31661          p.add(0, func(m *_Encoding, v []interface{}) {
 31662              m.rexo(0, v[1], false)
 31663              m.emit(0xd3)
 31664              m.emit(0xe8 | lcode(v[1]))
 31665          })
 31666      }
 31667      // SHRL 1, m32
 31668      if isConst1(v0) && isM32(v1) {
 31669          p.domain = DomainGeneric
 31670          p.add(0, func(m *_Encoding, v []interface{}) {
 31671              m.rexo(0, addr(v[1]), false)
 31672              m.emit(0xd1)
 31673              m.mrsd(5, addr(v[1]), 1)
 31674          })
 31675      }
 31676      // SHRL imm8, m32
 31677      if isImm8(v0) && isM32(v1) {
 31678          p.domain = DomainGeneric
 31679          p.add(0, func(m *_Encoding, v []interface{}) {
 31680              m.rexo(0, addr(v[1]), false)
 31681              m.emit(0xc1)
 31682              m.mrsd(5, addr(v[1]), 1)
 31683              m.imm1(toImmAny(v[0]))
 31684          })
 31685      }
 31686      // SHRL cl, m32
 31687      if v0 == CL && isM32(v1) {
 31688          p.domain = DomainGeneric
 31689          p.add(0, func(m *_Encoding, v []interface{}) {
 31690              m.rexo(0, addr(v[1]), false)
 31691              m.emit(0xd3)
 31692              m.mrsd(5, addr(v[1]), 1)
 31693          })
 31694      }
 31695      if p.len == 0 {
 31696          panic("invalid operands for SHRL")
 31697      }
 31698      return p
 31699  }
 31700  
 31701  // SHRQ performs "Logical Shift Right".
 31702  //
 31703  // Mnemonic        : SHR
 31704  // Supported forms : (6 forms)
 31705  //
 31706  //    * SHRQ 1, r64
 31707  //    * SHRQ imm8, r64
 31708  //    * SHRQ cl, r64
 31709  //    * SHRQ 1, m64
 31710  //    * SHRQ imm8, m64
 31711  //    * SHRQ cl, m64
 31712  //
 31713  func (self *Program) SHRQ(v0 interface{}, v1 interface{}) *Instruction {
 31714      p := self.alloc("SHRQ", 2, Operands { v0, v1 })
 31715      // SHRQ 1, r64
 31716      if isConst1(v0) && isReg64(v1) {
 31717          p.domain = DomainGeneric
 31718          p.add(0, func(m *_Encoding, v []interface{}) {
 31719              m.emit(0x48 | hcode(v[1]))
 31720              m.emit(0xd1)
 31721              m.emit(0xe8 | lcode(v[1]))
 31722          })
 31723      }
 31724      // SHRQ imm8, r64
 31725      if isImm8(v0) && isReg64(v1) {
 31726          p.domain = DomainGeneric
 31727          p.add(0, func(m *_Encoding, v []interface{}) {
 31728              m.emit(0x48 | hcode(v[1]))
 31729              m.emit(0xc1)
 31730              m.emit(0xe8 | lcode(v[1]))
 31731              m.imm1(toImmAny(v[0]))
 31732          })
 31733      }
 31734      // SHRQ cl, r64
 31735      if v0 == CL && isReg64(v1) {
 31736          p.domain = DomainGeneric
 31737          p.add(0, func(m *_Encoding, v []interface{}) {
 31738              m.emit(0x48 | hcode(v[1]))
 31739              m.emit(0xd3)
 31740              m.emit(0xe8 | lcode(v[1]))
 31741          })
 31742      }
 31743      // SHRQ 1, m64
 31744      if isConst1(v0) && isM64(v1) {
 31745          p.domain = DomainGeneric
 31746          p.add(0, func(m *_Encoding, v []interface{}) {
 31747              m.rexm(1, 0, addr(v[1]))
 31748              m.emit(0xd1)
 31749              m.mrsd(5, addr(v[1]), 1)
 31750          })
 31751      }
 31752      // SHRQ imm8, m64
 31753      if isImm8(v0) && isM64(v1) {
 31754          p.domain = DomainGeneric
 31755          p.add(0, func(m *_Encoding, v []interface{}) {
 31756              m.rexm(1, 0, addr(v[1]))
 31757              m.emit(0xc1)
 31758              m.mrsd(5, addr(v[1]), 1)
 31759              m.imm1(toImmAny(v[0]))
 31760          })
 31761      }
 31762      // SHRQ cl, m64
 31763      if v0 == CL && isM64(v1) {
 31764          p.domain = DomainGeneric
 31765          p.add(0, func(m *_Encoding, v []interface{}) {
 31766              m.rexm(1, 0, addr(v[1]))
 31767              m.emit(0xd3)
 31768              m.mrsd(5, addr(v[1]), 1)
 31769          })
 31770      }
 31771      if p.len == 0 {
 31772          panic("invalid operands for SHRQ")
 31773      }
 31774      return p
 31775  }
 31776  
 31777  // SHRW performs "Logical Shift Right".
 31778  //
 31779  // Mnemonic        : SHR
 31780  // Supported forms : (6 forms)
 31781  //
 31782  //    * SHRW 1, r16
 31783  //    * SHRW imm8, r16
 31784  //    * SHRW cl, r16
 31785  //    * SHRW 1, m16
 31786  //    * SHRW imm8, m16
 31787  //    * SHRW cl, m16
 31788  //
 31789  func (self *Program) SHRW(v0 interface{}, v1 interface{}) *Instruction {
 31790      p := self.alloc("SHRW", 2, Operands { v0, v1 })
 31791      // SHRW 1, r16
 31792      if isConst1(v0) && isReg16(v1) {
 31793          p.domain = DomainGeneric
 31794          p.add(0, func(m *_Encoding, v []interface{}) {
 31795              m.emit(0x66)
 31796              m.rexo(0, v[1], false)
 31797              m.emit(0xd1)
 31798              m.emit(0xe8 | lcode(v[1]))
 31799          })
 31800      }
 31801      // SHRW imm8, r16
 31802      if isImm8(v0) && isReg16(v1) {
 31803          p.domain = DomainGeneric
 31804          p.add(0, func(m *_Encoding, v []interface{}) {
 31805              m.emit(0x66)
 31806              m.rexo(0, v[1], false)
 31807              m.emit(0xc1)
 31808              m.emit(0xe8 | lcode(v[1]))
 31809              m.imm1(toImmAny(v[0]))
 31810          })
 31811      }
 31812      // SHRW cl, r16
 31813      if v0 == CL && isReg16(v1) {
 31814          p.domain = DomainGeneric
 31815          p.add(0, func(m *_Encoding, v []interface{}) {
 31816              m.emit(0x66)
 31817              m.rexo(0, v[1], false)
 31818              m.emit(0xd3)
 31819              m.emit(0xe8 | lcode(v[1]))
 31820          })
 31821      }
 31822      // SHRW 1, m16
 31823      if isConst1(v0) && isM16(v1) {
 31824          p.domain = DomainGeneric
 31825          p.add(0, func(m *_Encoding, v []interface{}) {
 31826              m.emit(0x66)
 31827              m.rexo(0, addr(v[1]), false)
 31828              m.emit(0xd1)
 31829              m.mrsd(5, addr(v[1]), 1)
 31830          })
 31831      }
 31832      // SHRW imm8, m16
 31833      if isImm8(v0) && isM16(v1) {
 31834          p.domain = DomainGeneric
 31835          p.add(0, func(m *_Encoding, v []interface{}) {
 31836              m.emit(0x66)
 31837              m.rexo(0, addr(v[1]), false)
 31838              m.emit(0xc1)
 31839              m.mrsd(5, addr(v[1]), 1)
 31840              m.imm1(toImmAny(v[0]))
 31841          })
 31842      }
 31843      // SHRW cl, m16
 31844      if v0 == CL && isM16(v1) {
 31845          p.domain = DomainGeneric
 31846          p.add(0, func(m *_Encoding, v []interface{}) {
 31847              m.emit(0x66)
 31848              m.rexo(0, addr(v[1]), false)
 31849              m.emit(0xd3)
 31850              m.mrsd(5, addr(v[1]), 1)
 31851          })
 31852      }
 31853      if p.len == 0 {
 31854          panic("invalid operands for SHRW")
 31855      }
 31856      return p
 31857  }
 31858  
 31859  // SHRXL performs "Logical Shift Right Without Affecting Flags".
 31860  //
 31861  // Mnemonic        : SHRX
 31862  // Supported forms : (2 forms)
 31863  //
 31864  //    * SHRXL r32, r32, r32    [BMI2]
 31865  //    * SHRXL r32, m32, r32    [BMI2]
 31866  //
 31867  func (self *Program) SHRXL(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 31868      p := self.alloc("SHRXL", 3, Operands { v0, v1, v2 })
 31869      // SHRXL r32, r32, r32
 31870      if isReg32(v0) && isReg32(v1) && isReg32(v2) {
 31871          self.require(ISA_BMI2)
 31872          p.domain = DomainGeneric
 31873          p.add(0, func(m *_Encoding, v []interface{}) {
 31874              m.emit(0xc4)
 31875              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 31876              m.emit(0x7b ^ (hlcode(v[0]) << 3))
 31877              m.emit(0xf7)
 31878              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 31879          })
 31880      }
 31881      // SHRXL r32, m32, r32
 31882      if isReg32(v0) && isM32(v1) && isReg32(v2) {
 31883          self.require(ISA_BMI2)
 31884          p.domain = DomainGeneric
 31885          p.add(0, func(m *_Encoding, v []interface{}) {
 31886              m.vex3(0xc4, 0b10, 0x03, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 31887              m.emit(0xf7)
 31888              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 31889          })
 31890      }
 31891      if p.len == 0 {
 31892          panic("invalid operands for SHRXL")
 31893      }
 31894      return p
 31895  }
 31896  
 31897  // SHRXQ performs "Logical Shift Right Without Affecting Flags".
 31898  //
 31899  // Mnemonic        : SHRX
 31900  // Supported forms : (2 forms)
 31901  //
 31902  //    * SHRXQ r64, r64, r64    [BMI2]
 31903  //    * SHRXQ r64, m64, r64    [BMI2]
 31904  //
 31905  func (self *Program) SHRXQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 31906      p := self.alloc("SHRXQ", 3, Operands { v0, v1, v2 })
 31907      // SHRXQ r64, r64, r64
 31908      if isReg64(v0) && isReg64(v1) && isReg64(v2) {
 31909          self.require(ISA_BMI2)
 31910          p.domain = DomainGeneric
 31911          p.add(0, func(m *_Encoding, v []interface{}) {
 31912              m.emit(0xc4)
 31913              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 31914              m.emit(0xfb ^ (hlcode(v[0]) << 3))
 31915              m.emit(0xf7)
 31916              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 31917          })
 31918      }
 31919      // SHRXQ r64, m64, r64
 31920      if isReg64(v0) && isM64(v1) && isReg64(v2) {
 31921          self.require(ISA_BMI2)
 31922          p.domain = DomainGeneric
 31923          p.add(0, func(m *_Encoding, v []interface{}) {
 31924              m.vex3(0xc4, 0b10, 0x83, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 31925              m.emit(0xf7)
 31926              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 31927          })
 31928      }
 31929      if p.len == 0 {
 31930          panic("invalid operands for SHRXQ")
 31931      }
 31932      return p
 31933  }
 31934  
 31935  // SHUFPD performs "Shuffle Packed Double-Precision Floating-Point Values".
 31936  //
 31937  // Mnemonic        : SHUFPD
 31938  // Supported forms : (2 forms)
 31939  //
 31940  //    * SHUFPD imm8, xmm, xmm     [SSE2]
 31941  //    * SHUFPD imm8, m128, xmm    [SSE2]
 31942  //
 31943  func (self *Program) SHUFPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 31944      p := self.alloc("SHUFPD", 3, Operands { v0, v1, v2 })
 31945      // SHUFPD imm8, xmm, xmm
 31946      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 31947          self.require(ISA_SSE2)
 31948          p.domain = DomainMMXSSE
 31949          p.add(0, func(m *_Encoding, v []interface{}) {
 31950              m.emit(0x66)
 31951              m.rexo(hcode(v[2]), v[1], false)
 31952              m.emit(0x0f)
 31953              m.emit(0xc6)
 31954              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 31955              m.imm1(toImmAny(v[0]))
 31956          })
 31957      }
 31958      // SHUFPD imm8, m128, xmm
 31959      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 31960          self.require(ISA_SSE2)
 31961          p.domain = DomainMMXSSE
 31962          p.add(0, func(m *_Encoding, v []interface{}) {
 31963              m.emit(0x66)
 31964              m.rexo(hcode(v[2]), addr(v[1]), false)
 31965              m.emit(0x0f)
 31966              m.emit(0xc6)
 31967              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 31968              m.imm1(toImmAny(v[0]))
 31969          })
 31970      }
 31971      if p.len == 0 {
 31972          panic("invalid operands for SHUFPD")
 31973      }
 31974      return p
 31975  }
 31976  
 31977  // SHUFPS performs "Shuffle Packed Single-Precision Floating-Point Values".
 31978  //
 31979  // Mnemonic        : SHUFPS
 31980  // Supported forms : (2 forms)
 31981  //
 31982  //    * SHUFPS imm8, xmm, xmm     [SSE]
 31983  //    * SHUFPS imm8, m128, xmm    [SSE]
 31984  //
 31985  func (self *Program) SHUFPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 31986      p := self.alloc("SHUFPS", 3, Operands { v0, v1, v2 })
 31987      // SHUFPS imm8, xmm, xmm
 31988      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 31989          self.require(ISA_SSE)
 31990          p.domain = DomainMMXSSE
 31991          p.add(0, func(m *_Encoding, v []interface{}) {
 31992              m.rexo(hcode(v[2]), v[1], false)
 31993              m.emit(0x0f)
 31994              m.emit(0xc6)
 31995              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 31996              m.imm1(toImmAny(v[0]))
 31997          })
 31998      }
 31999      // SHUFPS imm8, m128, xmm
 32000      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 32001          self.require(ISA_SSE)
 32002          p.domain = DomainMMXSSE
 32003          p.add(0, func(m *_Encoding, v []interface{}) {
 32004              m.rexo(hcode(v[2]), addr(v[1]), false)
 32005              m.emit(0x0f)
 32006              m.emit(0xc6)
 32007              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 32008              m.imm1(toImmAny(v[0]))
 32009          })
 32010      }
 32011      if p.len == 0 {
 32012          panic("invalid operands for SHUFPS")
 32013      }
 32014      return p
 32015  }
 32016  
 32017  // SQRTPD performs "Compute Square Roots of Packed Double-Precision Floating-Point Values".
 32018  //
 32019  // Mnemonic        : SQRTPD
 32020  // Supported forms : (2 forms)
 32021  //
 32022  //    * SQRTPD xmm, xmm     [SSE2]
 32023  //    * SQRTPD m128, xmm    [SSE2]
 32024  //
 32025  func (self *Program) SQRTPD(v0 interface{}, v1 interface{}) *Instruction {
 32026      p := self.alloc("SQRTPD", 2, Operands { v0, v1 })
 32027      // SQRTPD xmm, xmm
 32028      if isXMM(v0) && isXMM(v1) {
 32029          self.require(ISA_SSE2)
 32030          p.domain = DomainMMXSSE
 32031          p.add(0, func(m *_Encoding, v []interface{}) {
 32032              m.emit(0x66)
 32033              m.rexo(hcode(v[1]), v[0], false)
 32034              m.emit(0x0f)
 32035              m.emit(0x51)
 32036              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32037          })
 32038      }
 32039      // SQRTPD m128, xmm
 32040      if isM128(v0) && isXMM(v1) {
 32041          self.require(ISA_SSE2)
 32042          p.domain = DomainMMXSSE
 32043          p.add(0, func(m *_Encoding, v []interface{}) {
 32044              m.emit(0x66)
 32045              m.rexo(hcode(v[1]), addr(v[0]), false)
 32046              m.emit(0x0f)
 32047              m.emit(0x51)
 32048              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32049          })
 32050      }
 32051      if p.len == 0 {
 32052          panic("invalid operands for SQRTPD")
 32053      }
 32054      return p
 32055  }
 32056  
 32057  // SQRTPS performs "Compute Square Roots of Packed Single-Precision Floating-Point Values".
 32058  //
 32059  // Mnemonic        : SQRTPS
 32060  // Supported forms : (2 forms)
 32061  //
 32062  //    * SQRTPS xmm, xmm     [SSE]
 32063  //    * SQRTPS m128, xmm    [SSE]
 32064  //
 32065  func (self *Program) SQRTPS(v0 interface{}, v1 interface{}) *Instruction {
 32066      p := self.alloc("SQRTPS", 2, Operands { v0, v1 })
 32067      // SQRTPS xmm, xmm
 32068      if isXMM(v0) && isXMM(v1) {
 32069          self.require(ISA_SSE)
 32070          p.domain = DomainMMXSSE
 32071          p.add(0, func(m *_Encoding, v []interface{}) {
 32072              m.rexo(hcode(v[1]), v[0], false)
 32073              m.emit(0x0f)
 32074              m.emit(0x51)
 32075              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32076          })
 32077      }
 32078      // SQRTPS m128, xmm
 32079      if isM128(v0) && isXMM(v1) {
 32080          self.require(ISA_SSE)
 32081          p.domain = DomainMMXSSE
 32082          p.add(0, func(m *_Encoding, v []interface{}) {
 32083              m.rexo(hcode(v[1]), addr(v[0]), false)
 32084              m.emit(0x0f)
 32085              m.emit(0x51)
 32086              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32087          })
 32088      }
 32089      if p.len == 0 {
 32090          panic("invalid operands for SQRTPS")
 32091      }
 32092      return p
 32093  }
 32094  
 32095  // SQRTSD performs "Compute Square Root of Scalar Double-Precision Floating-Point Value".
 32096  //
 32097  // Mnemonic        : SQRTSD
 32098  // Supported forms : (2 forms)
 32099  //
 32100  //    * SQRTSD xmm, xmm    [SSE2]
 32101  //    * SQRTSD m64, xmm    [SSE2]
 32102  //
 32103  func (self *Program) SQRTSD(v0 interface{}, v1 interface{}) *Instruction {
 32104      p := self.alloc("SQRTSD", 2, Operands { v0, v1 })
 32105      // SQRTSD xmm, xmm
 32106      if isXMM(v0) && isXMM(v1) {
 32107          self.require(ISA_SSE2)
 32108          p.domain = DomainMMXSSE
 32109          p.add(0, func(m *_Encoding, v []interface{}) {
 32110              m.emit(0xf2)
 32111              m.rexo(hcode(v[1]), v[0], false)
 32112              m.emit(0x0f)
 32113              m.emit(0x51)
 32114              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32115          })
 32116      }
 32117      // SQRTSD m64, xmm
 32118      if isM64(v0) && isXMM(v1) {
 32119          self.require(ISA_SSE2)
 32120          p.domain = DomainMMXSSE
 32121          p.add(0, func(m *_Encoding, v []interface{}) {
 32122              m.emit(0xf2)
 32123              m.rexo(hcode(v[1]), addr(v[0]), false)
 32124              m.emit(0x0f)
 32125              m.emit(0x51)
 32126              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32127          })
 32128      }
 32129      if p.len == 0 {
 32130          panic("invalid operands for SQRTSD")
 32131      }
 32132      return p
 32133  }
 32134  
 32135  // SQRTSS performs "Compute Square Root of Scalar Single-Precision Floating-Point Value".
 32136  //
 32137  // Mnemonic        : SQRTSS
 32138  // Supported forms : (2 forms)
 32139  //
 32140  //    * SQRTSS xmm, xmm    [SSE]
 32141  //    * SQRTSS m32, xmm    [SSE]
 32142  //
 32143  func (self *Program) SQRTSS(v0 interface{}, v1 interface{}) *Instruction {
 32144      p := self.alloc("SQRTSS", 2, Operands { v0, v1 })
 32145      // SQRTSS xmm, xmm
 32146      if isXMM(v0) && isXMM(v1) {
 32147          self.require(ISA_SSE)
 32148          p.domain = DomainMMXSSE
 32149          p.add(0, func(m *_Encoding, v []interface{}) {
 32150              m.emit(0xf3)
 32151              m.rexo(hcode(v[1]), v[0], false)
 32152              m.emit(0x0f)
 32153              m.emit(0x51)
 32154              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32155          })
 32156      }
 32157      // SQRTSS m32, xmm
 32158      if isM32(v0) && isXMM(v1) {
 32159          self.require(ISA_SSE)
 32160          p.domain = DomainMMXSSE
 32161          p.add(0, func(m *_Encoding, v []interface{}) {
 32162              m.emit(0xf3)
 32163              m.rexo(hcode(v[1]), addr(v[0]), false)
 32164              m.emit(0x0f)
 32165              m.emit(0x51)
 32166              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32167          })
 32168      }
 32169      if p.len == 0 {
 32170          panic("invalid operands for SQRTSS")
 32171      }
 32172      return p
 32173  }
 32174  
 32175  // STC performs "Set Carry Flag".
 32176  //
 32177  // Mnemonic        : STC
 32178  // Supported forms : (1 form)
 32179  //
 32180  //    * STC
 32181  //
 32182  func (self *Program) STC() *Instruction {
 32183      p := self.alloc("STC", 0, Operands {  })
 32184      // STC
 32185      p.domain = DomainGeneric
 32186      p.add(0, func(m *_Encoding, v []interface{}) {
 32187          m.emit(0xf9)
 32188      })
 32189      return p
 32190  }
 32191  
 32192  // STD performs "Set Direction Flag".
 32193  //
 32194  // Mnemonic        : STD
 32195  // Supported forms : (1 form)
 32196  //
 32197  //    * STD
 32198  //
 32199  func (self *Program) STD() *Instruction {
 32200      p := self.alloc("STD", 0, Operands {  })
 32201      // STD
 32202      p.domain = DomainGeneric
 32203      p.add(0, func(m *_Encoding, v []interface{}) {
 32204          m.emit(0xfd)
 32205      })
 32206      return p
 32207  }
 32208  
 32209  // STMXCSR performs "Store MXCSR Register State".
 32210  //
 32211  // Mnemonic        : STMXCSR
 32212  // Supported forms : (1 form)
 32213  //
 32214  //    * STMXCSR m32    [SSE]
 32215  //
 32216  func (self *Program) STMXCSR(v0 interface{}) *Instruction {
 32217      p := self.alloc("STMXCSR", 1, Operands { v0 })
 32218      // STMXCSR m32
 32219      if isM32(v0) {
 32220          self.require(ISA_SSE)
 32221          p.domain = DomainMMXSSE
 32222          p.add(0, func(m *_Encoding, v []interface{}) {
 32223              m.rexo(0, addr(v[0]), false)
 32224              m.emit(0x0f)
 32225              m.emit(0xae)
 32226              m.mrsd(3, addr(v[0]), 1)
 32227          })
 32228      }
 32229      if p.len == 0 {
 32230          panic("invalid operands for STMXCSR")
 32231      }
 32232      return p
 32233  }
 32234  
 32235  // SUBB performs "Subtract".
 32236  //
 32237  // Mnemonic        : SUB
 32238  // Supported forms : (6 forms)
 32239  //
 32240  //    * SUBB imm8, al
 32241  //    * SUBB imm8, r8
 32242  //    * SUBB r8, r8
 32243  //    * SUBB m8, r8
 32244  //    * SUBB imm8, m8
 32245  //    * SUBB r8, m8
 32246  //
 32247  func (self *Program) SUBB(v0 interface{}, v1 interface{}) *Instruction {
 32248      p := self.alloc("SUBB", 2, Operands { v0, v1 })
 32249      // SUBB imm8, al
 32250      if isImm8(v0) && v1 == AL {
 32251          p.domain = DomainGeneric
 32252          p.add(0, func(m *_Encoding, v []interface{}) {
 32253              m.emit(0x2c)
 32254              m.imm1(toImmAny(v[0]))
 32255          })
 32256      }
 32257      // SUBB imm8, r8
 32258      if isImm8(v0) && isReg8(v1) {
 32259          p.domain = DomainGeneric
 32260          p.add(0, func(m *_Encoding, v []interface{}) {
 32261              m.rexo(0, v[1], isReg8REX(v[1]))
 32262              m.emit(0x80)
 32263              m.emit(0xe8 | lcode(v[1]))
 32264              m.imm1(toImmAny(v[0]))
 32265          })
 32266      }
 32267      // SUBB r8, r8
 32268      if isReg8(v0) && isReg8(v1) {
 32269          p.domain = DomainGeneric
 32270          p.add(0, func(m *_Encoding, v []interface{}) {
 32271              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
 32272              m.emit(0x28)
 32273              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 32274          })
 32275          p.add(0, func(m *_Encoding, v []interface{}) {
 32276              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]) || isReg8REX(v[1]))
 32277              m.emit(0x2a)
 32278              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32279          })
 32280      }
 32281      // SUBB m8, r8
 32282      if isM8(v0) && isReg8(v1) {
 32283          p.domain = DomainGeneric
 32284          p.add(0, func(m *_Encoding, v []interface{}) {
 32285              m.rexo(hcode(v[1]), addr(v[0]), isReg8REX(v[1]))
 32286              m.emit(0x2a)
 32287              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32288          })
 32289      }
 32290      // SUBB imm8, m8
 32291      if isImm8(v0) && isM8(v1) {
 32292          p.domain = DomainGeneric
 32293          p.add(0, func(m *_Encoding, v []interface{}) {
 32294              m.rexo(0, addr(v[1]), false)
 32295              m.emit(0x80)
 32296              m.mrsd(5, addr(v[1]), 1)
 32297              m.imm1(toImmAny(v[0]))
 32298          })
 32299      }
 32300      // SUBB r8, m8
 32301      if isReg8(v0) && isM8(v1) {
 32302          p.domain = DomainGeneric
 32303          p.add(0, func(m *_Encoding, v []interface{}) {
 32304              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
 32305              m.emit(0x28)
 32306              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 32307          })
 32308      }
 32309      if p.len == 0 {
 32310          panic("invalid operands for SUBB")
 32311      }
 32312      return p
 32313  }
 32314  
 32315  // SUBL performs "Subtract".
 32316  //
 32317  // Mnemonic        : SUB
 32318  // Supported forms : (8 forms)
 32319  //
 32320  //    * SUBL imm32, eax
 32321  //    * SUBL imm8, r32
 32322  //    * SUBL imm32, r32
 32323  //    * SUBL r32, r32
 32324  //    * SUBL m32, r32
 32325  //    * SUBL imm8, m32
 32326  //    * SUBL imm32, m32
 32327  //    * SUBL r32, m32
 32328  //
 32329  func (self *Program) SUBL(v0 interface{}, v1 interface{}) *Instruction {
 32330      p := self.alloc("SUBL", 2, Operands { v0, v1 })
 32331      // SUBL imm32, eax
 32332      if isImm32(v0) && v1 == EAX {
 32333          p.domain = DomainGeneric
 32334          p.add(0, func(m *_Encoding, v []interface{}) {
 32335              m.emit(0x2d)
 32336              m.imm4(toImmAny(v[0]))
 32337          })
 32338      }
 32339      // SUBL imm8, r32
 32340      if isImm8Ext(v0, 4) && isReg32(v1) {
 32341          p.domain = DomainGeneric
 32342          p.add(0, func(m *_Encoding, v []interface{}) {
 32343              m.rexo(0, v[1], false)
 32344              m.emit(0x83)
 32345              m.emit(0xe8 | lcode(v[1]))
 32346              m.imm1(toImmAny(v[0]))
 32347          })
 32348      }
 32349      // SUBL imm32, r32
 32350      if isImm32(v0) && isReg32(v1) {
 32351          p.domain = DomainGeneric
 32352          p.add(0, func(m *_Encoding, v []interface{}) {
 32353              m.rexo(0, v[1], false)
 32354              m.emit(0x81)
 32355              m.emit(0xe8 | lcode(v[1]))
 32356              m.imm4(toImmAny(v[0]))
 32357          })
 32358      }
 32359      // SUBL r32, r32
 32360      if isReg32(v0) && isReg32(v1) {
 32361          p.domain = DomainGeneric
 32362          p.add(0, func(m *_Encoding, v []interface{}) {
 32363              m.rexo(hcode(v[0]), v[1], false)
 32364              m.emit(0x29)
 32365              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 32366          })
 32367          p.add(0, func(m *_Encoding, v []interface{}) {
 32368              m.rexo(hcode(v[1]), v[0], false)
 32369              m.emit(0x2b)
 32370              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32371          })
 32372      }
 32373      // SUBL m32, r32
 32374      if isM32(v0) && isReg32(v1) {
 32375          p.domain = DomainGeneric
 32376          p.add(0, func(m *_Encoding, v []interface{}) {
 32377              m.rexo(hcode(v[1]), addr(v[0]), false)
 32378              m.emit(0x2b)
 32379              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32380          })
 32381      }
 32382      // SUBL imm8, m32
 32383      if isImm8Ext(v0, 4) && isM32(v1) {
 32384          p.domain = DomainGeneric
 32385          p.add(0, func(m *_Encoding, v []interface{}) {
 32386              m.rexo(0, addr(v[1]), false)
 32387              m.emit(0x83)
 32388              m.mrsd(5, addr(v[1]), 1)
 32389              m.imm1(toImmAny(v[0]))
 32390          })
 32391      }
 32392      // SUBL imm32, m32
 32393      if isImm32(v0) && isM32(v1) {
 32394          p.domain = DomainGeneric
 32395          p.add(0, func(m *_Encoding, v []interface{}) {
 32396              m.rexo(0, addr(v[1]), false)
 32397              m.emit(0x81)
 32398              m.mrsd(5, addr(v[1]), 1)
 32399              m.imm4(toImmAny(v[0]))
 32400          })
 32401      }
 32402      // SUBL r32, m32
 32403      if isReg32(v0) && isM32(v1) {
 32404          p.domain = DomainGeneric
 32405          p.add(0, func(m *_Encoding, v []interface{}) {
 32406              m.rexo(hcode(v[0]), addr(v[1]), false)
 32407              m.emit(0x29)
 32408              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 32409          })
 32410      }
 32411      if p.len == 0 {
 32412          panic("invalid operands for SUBL")
 32413      }
 32414      return p
 32415  }
 32416  
 32417  // SUBPD performs "Subtract Packed Double-Precision Floating-Point Values".
 32418  //
 32419  // Mnemonic        : SUBPD
 32420  // Supported forms : (2 forms)
 32421  //
 32422  //    * SUBPD xmm, xmm     [SSE2]
 32423  //    * SUBPD m128, xmm    [SSE2]
 32424  //
 32425  func (self *Program) SUBPD(v0 interface{}, v1 interface{}) *Instruction {
 32426      p := self.alloc("SUBPD", 2, Operands { v0, v1 })
 32427      // SUBPD xmm, xmm
 32428      if isXMM(v0) && isXMM(v1) {
 32429          self.require(ISA_SSE2)
 32430          p.domain = DomainMMXSSE
 32431          p.add(0, func(m *_Encoding, v []interface{}) {
 32432              m.emit(0x66)
 32433              m.rexo(hcode(v[1]), v[0], false)
 32434              m.emit(0x0f)
 32435              m.emit(0x5c)
 32436              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32437          })
 32438      }
 32439      // SUBPD m128, xmm
 32440      if isM128(v0) && isXMM(v1) {
 32441          self.require(ISA_SSE2)
 32442          p.domain = DomainMMXSSE
 32443          p.add(0, func(m *_Encoding, v []interface{}) {
 32444              m.emit(0x66)
 32445              m.rexo(hcode(v[1]), addr(v[0]), false)
 32446              m.emit(0x0f)
 32447              m.emit(0x5c)
 32448              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32449          })
 32450      }
 32451      if p.len == 0 {
 32452          panic("invalid operands for SUBPD")
 32453      }
 32454      return p
 32455  }
 32456  
 32457  // SUBPS performs "Subtract Packed Single-Precision Floating-Point Values".
 32458  //
 32459  // Mnemonic        : SUBPS
 32460  // Supported forms : (2 forms)
 32461  //
 32462  //    * SUBPS xmm, xmm     [SSE]
 32463  //    * SUBPS m128, xmm    [SSE]
 32464  //
 32465  func (self *Program) SUBPS(v0 interface{}, v1 interface{}) *Instruction {
 32466      p := self.alloc("SUBPS", 2, Operands { v0, v1 })
 32467      // SUBPS xmm, xmm
 32468      if isXMM(v0) && isXMM(v1) {
 32469          self.require(ISA_SSE)
 32470          p.domain = DomainMMXSSE
 32471          p.add(0, func(m *_Encoding, v []interface{}) {
 32472              m.rexo(hcode(v[1]), v[0], false)
 32473              m.emit(0x0f)
 32474              m.emit(0x5c)
 32475              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32476          })
 32477      }
 32478      // SUBPS m128, xmm
 32479      if isM128(v0) && isXMM(v1) {
 32480          self.require(ISA_SSE)
 32481          p.domain = DomainMMXSSE
 32482          p.add(0, func(m *_Encoding, v []interface{}) {
 32483              m.rexo(hcode(v[1]), addr(v[0]), false)
 32484              m.emit(0x0f)
 32485              m.emit(0x5c)
 32486              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32487          })
 32488      }
 32489      if p.len == 0 {
 32490          panic("invalid operands for SUBPS")
 32491      }
 32492      return p
 32493  }
 32494  
 32495  // SUBQ performs "Subtract".
 32496  //
 32497  // Mnemonic        : SUB
 32498  // Supported forms : (8 forms)
 32499  //
 32500  //    * SUBQ imm32, rax
 32501  //    * SUBQ imm8, r64
 32502  //    * SUBQ imm32, r64
 32503  //    * SUBQ r64, r64
 32504  //    * SUBQ m64, r64
 32505  //    * SUBQ imm8, m64
 32506  //    * SUBQ imm32, m64
 32507  //    * SUBQ r64, m64
 32508  //
 32509  func (self *Program) SUBQ(v0 interface{}, v1 interface{}) *Instruction {
 32510      p := self.alloc("SUBQ", 2, Operands { v0, v1 })
 32511      // SUBQ imm32, rax
 32512      if isImm32(v0) && v1 == RAX {
 32513          p.domain = DomainGeneric
 32514          p.add(0, func(m *_Encoding, v []interface{}) {
 32515              m.emit(0x48)
 32516              m.emit(0x2d)
 32517              m.imm4(toImmAny(v[0]))
 32518          })
 32519      }
 32520      // SUBQ imm8, r64
 32521      if isImm8Ext(v0, 8) && isReg64(v1) {
 32522          p.domain = DomainGeneric
 32523          p.add(0, func(m *_Encoding, v []interface{}) {
 32524              m.emit(0x48 | hcode(v[1]))
 32525              m.emit(0x83)
 32526              m.emit(0xe8 | lcode(v[1]))
 32527              m.imm1(toImmAny(v[0]))
 32528          })
 32529      }
 32530      // SUBQ imm32, r64
 32531      if isImm32Ext(v0, 8) && isReg64(v1) {
 32532          p.domain = DomainGeneric
 32533          p.add(0, func(m *_Encoding, v []interface{}) {
 32534              m.emit(0x48 | hcode(v[1]))
 32535              m.emit(0x81)
 32536              m.emit(0xe8 | lcode(v[1]))
 32537              m.imm4(toImmAny(v[0]))
 32538          })
 32539      }
 32540      // SUBQ r64, r64
 32541      if isReg64(v0) && isReg64(v1) {
 32542          p.domain = DomainGeneric
 32543          p.add(0, func(m *_Encoding, v []interface{}) {
 32544              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
 32545              m.emit(0x29)
 32546              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 32547          })
 32548          p.add(0, func(m *_Encoding, v []interface{}) {
 32549              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 32550              m.emit(0x2b)
 32551              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32552          })
 32553      }
 32554      // SUBQ m64, r64
 32555      if isM64(v0) && isReg64(v1) {
 32556          p.domain = DomainGeneric
 32557          p.add(0, func(m *_Encoding, v []interface{}) {
 32558              m.rexm(1, hcode(v[1]), addr(v[0]))
 32559              m.emit(0x2b)
 32560              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32561          })
 32562      }
 32563      // SUBQ imm8, m64
 32564      if isImm8Ext(v0, 8) && isM64(v1) {
 32565          p.domain = DomainGeneric
 32566          p.add(0, func(m *_Encoding, v []interface{}) {
 32567              m.rexm(1, 0, addr(v[1]))
 32568              m.emit(0x83)
 32569              m.mrsd(5, addr(v[1]), 1)
 32570              m.imm1(toImmAny(v[0]))
 32571          })
 32572      }
 32573      // SUBQ imm32, m64
 32574      if isImm32Ext(v0, 8) && isM64(v1) {
 32575          p.domain = DomainGeneric
 32576          p.add(0, func(m *_Encoding, v []interface{}) {
 32577              m.rexm(1, 0, addr(v[1]))
 32578              m.emit(0x81)
 32579              m.mrsd(5, addr(v[1]), 1)
 32580              m.imm4(toImmAny(v[0]))
 32581          })
 32582      }
 32583      // SUBQ r64, m64
 32584      if isReg64(v0) && isM64(v1) {
 32585          p.domain = DomainGeneric
 32586          p.add(0, func(m *_Encoding, v []interface{}) {
 32587              m.rexm(1, hcode(v[0]), addr(v[1]))
 32588              m.emit(0x29)
 32589              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 32590          })
 32591      }
 32592      if p.len == 0 {
 32593          panic("invalid operands for SUBQ")
 32594      }
 32595      return p
 32596  }
 32597  
 32598  // SUBSD performs "Subtract Scalar Double-Precision Floating-Point Values".
 32599  //
 32600  // Mnemonic        : SUBSD
 32601  // Supported forms : (2 forms)
 32602  //
 32603  //    * SUBSD xmm, xmm    [SSE2]
 32604  //    * SUBSD m64, xmm    [SSE2]
 32605  //
 32606  func (self *Program) SUBSD(v0 interface{}, v1 interface{}) *Instruction {
 32607      p := self.alloc("SUBSD", 2, Operands { v0, v1 })
 32608      // SUBSD xmm, xmm
 32609      if isXMM(v0) && isXMM(v1) {
 32610          self.require(ISA_SSE2)
 32611          p.domain = DomainMMXSSE
 32612          p.add(0, func(m *_Encoding, v []interface{}) {
 32613              m.emit(0xf2)
 32614              m.rexo(hcode(v[1]), v[0], false)
 32615              m.emit(0x0f)
 32616              m.emit(0x5c)
 32617              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32618          })
 32619      }
 32620      // SUBSD m64, xmm
 32621      if isM64(v0) && isXMM(v1) {
 32622          self.require(ISA_SSE2)
 32623          p.domain = DomainMMXSSE
 32624          p.add(0, func(m *_Encoding, v []interface{}) {
 32625              m.emit(0xf2)
 32626              m.rexo(hcode(v[1]), addr(v[0]), false)
 32627              m.emit(0x0f)
 32628              m.emit(0x5c)
 32629              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32630          })
 32631      }
 32632      if p.len == 0 {
 32633          panic("invalid operands for SUBSD")
 32634      }
 32635      return p
 32636  }
 32637  
 32638  // SUBSS performs "Subtract Scalar Single-Precision Floating-Point Values".
 32639  //
 32640  // Mnemonic        : SUBSS
 32641  // Supported forms : (2 forms)
 32642  //
 32643  //    * SUBSS xmm, xmm    [SSE]
 32644  //    * SUBSS m32, xmm    [SSE]
 32645  //
 32646  func (self *Program) SUBSS(v0 interface{}, v1 interface{}) *Instruction {
 32647      p := self.alloc("SUBSS", 2, Operands { v0, v1 })
 32648      // SUBSS xmm, xmm
 32649      if isXMM(v0) && isXMM(v1) {
 32650          self.require(ISA_SSE)
 32651          p.domain = DomainMMXSSE
 32652          p.add(0, func(m *_Encoding, v []interface{}) {
 32653              m.emit(0xf3)
 32654              m.rexo(hcode(v[1]), v[0], false)
 32655              m.emit(0x0f)
 32656              m.emit(0x5c)
 32657              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32658          })
 32659      }
 32660      // SUBSS m32, xmm
 32661      if isM32(v0) && isXMM(v1) {
 32662          self.require(ISA_SSE)
 32663          p.domain = DomainMMXSSE
 32664          p.add(0, func(m *_Encoding, v []interface{}) {
 32665              m.emit(0xf3)
 32666              m.rexo(hcode(v[1]), addr(v[0]), false)
 32667              m.emit(0x0f)
 32668              m.emit(0x5c)
 32669              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32670          })
 32671      }
 32672      if p.len == 0 {
 32673          panic("invalid operands for SUBSS")
 32674      }
 32675      return p
 32676  }
 32677  
 32678  // SUBW performs "Subtract".
 32679  //
 32680  // Mnemonic        : SUB
 32681  // Supported forms : (8 forms)
 32682  //
 32683  //    * SUBW imm16, ax
 32684  //    * SUBW imm8, r16
 32685  //    * SUBW imm16, r16
 32686  //    * SUBW r16, r16
 32687  //    * SUBW m16, r16
 32688  //    * SUBW imm8, m16
 32689  //    * SUBW imm16, m16
 32690  //    * SUBW r16, m16
 32691  //
 32692  func (self *Program) SUBW(v0 interface{}, v1 interface{}) *Instruction {
 32693      p := self.alloc("SUBW", 2, Operands { v0, v1 })
 32694      // SUBW imm16, ax
 32695      if isImm16(v0) && v1 == AX {
 32696          p.domain = DomainGeneric
 32697          p.add(0, func(m *_Encoding, v []interface{}) {
 32698              m.emit(0x66)
 32699              m.emit(0x2d)
 32700              m.imm2(toImmAny(v[0]))
 32701          })
 32702      }
 32703      // SUBW imm8, r16
 32704      if isImm8Ext(v0, 2) && isReg16(v1) {
 32705          p.domain = DomainGeneric
 32706          p.add(0, func(m *_Encoding, v []interface{}) {
 32707              m.emit(0x66)
 32708              m.rexo(0, v[1], false)
 32709              m.emit(0x83)
 32710              m.emit(0xe8 | lcode(v[1]))
 32711              m.imm1(toImmAny(v[0]))
 32712          })
 32713      }
 32714      // SUBW imm16, r16
 32715      if isImm16(v0) && isReg16(v1) {
 32716          p.domain = DomainGeneric
 32717          p.add(0, func(m *_Encoding, v []interface{}) {
 32718              m.emit(0x66)
 32719              m.rexo(0, v[1], false)
 32720              m.emit(0x81)
 32721              m.emit(0xe8 | lcode(v[1]))
 32722              m.imm2(toImmAny(v[0]))
 32723          })
 32724      }
 32725      // SUBW r16, r16
 32726      if isReg16(v0) && isReg16(v1) {
 32727          p.domain = DomainGeneric
 32728          p.add(0, func(m *_Encoding, v []interface{}) {
 32729              m.emit(0x66)
 32730              m.rexo(hcode(v[0]), v[1], false)
 32731              m.emit(0x29)
 32732              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 32733          })
 32734          p.add(0, func(m *_Encoding, v []interface{}) {
 32735              m.emit(0x66)
 32736              m.rexo(hcode(v[1]), v[0], false)
 32737              m.emit(0x2b)
 32738              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 32739          })
 32740      }
 32741      // SUBW m16, r16
 32742      if isM16(v0) && isReg16(v1) {
 32743          p.domain = DomainGeneric
 32744          p.add(0, func(m *_Encoding, v []interface{}) {
 32745              m.emit(0x66)
 32746              m.rexo(hcode(v[1]), addr(v[0]), false)
 32747              m.emit(0x2b)
 32748              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 32749          })
 32750      }
 32751      // SUBW imm8, m16
 32752      if isImm8Ext(v0, 2) && isM16(v1) {
 32753          p.domain = DomainGeneric
 32754          p.add(0, func(m *_Encoding, v []interface{}) {
 32755              m.emit(0x66)
 32756              m.rexo(0, addr(v[1]), false)
 32757              m.emit(0x83)
 32758              m.mrsd(5, addr(v[1]), 1)
 32759              m.imm1(toImmAny(v[0]))
 32760          })
 32761      }
 32762      // SUBW imm16, m16
 32763      if isImm16(v0) && isM16(v1) {
 32764          p.domain = DomainGeneric
 32765          p.add(0, func(m *_Encoding, v []interface{}) {
 32766              m.emit(0x66)
 32767              m.rexo(0, addr(v[1]), false)
 32768              m.emit(0x81)
 32769              m.mrsd(5, addr(v[1]), 1)
 32770              m.imm2(toImmAny(v[0]))
 32771          })
 32772      }
 32773      // SUBW r16, m16
 32774      if isReg16(v0) && isM16(v1) {
 32775          p.domain = DomainGeneric
 32776          p.add(0, func(m *_Encoding, v []interface{}) {
 32777              m.emit(0x66)
 32778              m.rexo(hcode(v[0]), addr(v[1]), false)
 32779              m.emit(0x29)
 32780              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 32781          })
 32782      }
 32783      if p.len == 0 {
 32784          panic("invalid operands for SUBW")
 32785      }
 32786      return p
 32787  }
 32788  
 32789  // SYSCALL performs "Fast System Call".
 32790  //
 32791  // Mnemonic        : SYSCALL
 32792  // Supported forms : (1 form)
 32793  //
 32794  //    * SYSCALL
 32795  //
 32796  func (self *Program) SYSCALL() *Instruction {
 32797      p := self.alloc("SYSCALL", 0, Operands {  })
 32798      // SYSCALL
 32799      p.domain = DomainGeneric
 32800      p.add(0, func(m *_Encoding, v []interface{}) {
 32801          m.emit(0x0f)
 32802          m.emit(0x05)
 32803      })
 32804      return p
 32805  }
 32806  
 32807  // T1MSKC performs "Inverse Mask From Trailing Ones".
 32808  //
 32809  // Mnemonic        : T1MSKC
 32810  // Supported forms : (4 forms)
 32811  //
 32812  //    * T1MSKC r32, r32    [TBM]
 32813  //    * T1MSKC m32, r32    [TBM]
 32814  //    * T1MSKC r64, r64    [TBM]
 32815  //    * T1MSKC m64, r64    [TBM]
 32816  //
 32817  func (self *Program) T1MSKC(v0 interface{}, v1 interface{}) *Instruction {
 32818      p := self.alloc("T1MSKC", 2, Operands { v0, v1 })
 32819      // T1MSKC r32, r32
 32820      if isReg32(v0) && isReg32(v1) {
 32821          self.require(ISA_TBM)
 32822          p.domain = DomainGeneric
 32823          p.add(0, func(m *_Encoding, v []interface{}) {
 32824              m.emit(0x8f)
 32825              m.emit(0xe9 ^ (hcode(v[0]) << 5))
 32826              m.emit(0x78 ^ (hlcode(v[1]) << 3))
 32827              m.emit(0x01)
 32828              m.emit(0xf8 | lcode(v[0]))
 32829          })
 32830      }
 32831      // T1MSKC m32, r32
 32832      if isM32(v0) && isReg32(v1) {
 32833          self.require(ISA_TBM)
 32834          p.domain = DomainGeneric
 32835          p.add(0, func(m *_Encoding, v []interface{}) {
 32836              m.vex3(0x8f, 0b1001, 0x00, 0, addr(v[0]), hlcode(v[1]))
 32837              m.emit(0x01)
 32838              m.mrsd(7, addr(v[0]), 1)
 32839          })
 32840      }
 32841      // T1MSKC r64, r64
 32842      if isReg64(v0) && isReg64(v1) {
 32843          self.require(ISA_TBM)
 32844          p.domain = DomainGeneric
 32845          p.add(0, func(m *_Encoding, v []interface{}) {
 32846              m.emit(0x8f)
 32847              m.emit(0xe9 ^ (hcode(v[0]) << 5))
 32848              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 32849              m.emit(0x01)
 32850              m.emit(0xf8 | lcode(v[0]))
 32851          })
 32852      }
 32853      // T1MSKC m64, r64
 32854      if isM64(v0) && isReg64(v1) {
 32855          self.require(ISA_TBM)
 32856          p.domain = DomainGeneric
 32857          p.add(0, func(m *_Encoding, v []interface{}) {
 32858              m.vex3(0x8f, 0b1001, 0x80, 0, addr(v[0]), hlcode(v[1]))
 32859              m.emit(0x01)
 32860              m.mrsd(7, addr(v[0]), 1)
 32861          })
 32862      }
 32863      if p.len == 0 {
 32864          panic("invalid operands for T1MSKC")
 32865      }
 32866      return p
 32867  }
 32868  
 32869  // TESTB performs "Logical Compare".
 32870  //
 32871  // Mnemonic        : TEST
 32872  // Supported forms : (5 forms)
 32873  //
 32874  //    * TESTB imm8, al
 32875  //    * TESTB imm8, r8
 32876  //    * TESTB r8, r8
 32877  //    * TESTB imm8, m8
 32878  //    * TESTB r8, m8
 32879  //
 32880  func (self *Program) TESTB(v0 interface{}, v1 interface{}) *Instruction {
 32881      p := self.alloc("TESTB", 2, Operands { v0, v1 })
 32882      // TESTB imm8, al
 32883      if isImm8(v0) && v1 == AL {
 32884          p.domain = DomainGeneric
 32885          p.add(0, func(m *_Encoding, v []interface{}) {
 32886              m.emit(0xa8)
 32887              m.imm1(toImmAny(v[0]))
 32888          })
 32889      }
 32890      // TESTB imm8, r8
 32891      if isImm8(v0) && isReg8(v1) {
 32892          p.domain = DomainGeneric
 32893          p.add(0, func(m *_Encoding, v []interface{}) {
 32894              m.rexo(0, v[1], isReg8REX(v[1]))
 32895              m.emit(0xf6)
 32896              m.emit(0xc0 | lcode(v[1]))
 32897              m.imm1(toImmAny(v[0]))
 32898          })
 32899      }
 32900      // TESTB r8, r8
 32901      if isReg8(v0) && isReg8(v1) {
 32902          p.domain = DomainGeneric
 32903          p.add(0, func(m *_Encoding, v []interface{}) {
 32904              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
 32905              m.emit(0x84)
 32906              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 32907          })
 32908      }
 32909      // TESTB imm8, m8
 32910      if isImm8(v0) && isM8(v1) {
 32911          p.domain = DomainGeneric
 32912          p.add(0, func(m *_Encoding, v []interface{}) {
 32913              m.rexo(0, addr(v[1]), false)
 32914              m.emit(0xf6)
 32915              m.mrsd(0, addr(v[1]), 1)
 32916              m.imm1(toImmAny(v[0]))
 32917          })
 32918      }
 32919      // TESTB r8, m8
 32920      if isReg8(v0) && isM8(v1) {
 32921          p.domain = DomainGeneric
 32922          p.add(0, func(m *_Encoding, v []interface{}) {
 32923              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
 32924              m.emit(0x84)
 32925              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 32926          })
 32927      }
 32928      if p.len == 0 {
 32929          panic("invalid operands for TESTB")
 32930      }
 32931      return p
 32932  }
 32933  
 32934  // TESTL performs "Logical Compare".
 32935  //
 32936  // Mnemonic        : TEST
 32937  // Supported forms : (5 forms)
 32938  //
 32939  //    * TESTL imm32, eax
 32940  //    * TESTL imm32, r32
 32941  //    * TESTL r32, r32
 32942  //    * TESTL imm32, m32
 32943  //    * TESTL r32, m32
 32944  //
 32945  func (self *Program) TESTL(v0 interface{}, v1 interface{}) *Instruction {
 32946      p := self.alloc("TESTL", 2, Operands { v0, v1 })
 32947      // TESTL imm32, eax
 32948      if isImm32(v0) && v1 == EAX {
 32949          p.domain = DomainGeneric
 32950          p.add(0, func(m *_Encoding, v []interface{}) {
 32951              m.emit(0xa9)
 32952              m.imm4(toImmAny(v[0]))
 32953          })
 32954      }
 32955      // TESTL imm32, r32
 32956      if isImm32(v0) && isReg32(v1) {
 32957          p.domain = DomainGeneric
 32958          p.add(0, func(m *_Encoding, v []interface{}) {
 32959              m.rexo(0, v[1], false)
 32960              m.emit(0xf7)
 32961              m.emit(0xc0 | lcode(v[1]))
 32962              m.imm4(toImmAny(v[0]))
 32963          })
 32964      }
 32965      // TESTL r32, r32
 32966      if isReg32(v0) && isReg32(v1) {
 32967          p.domain = DomainGeneric
 32968          p.add(0, func(m *_Encoding, v []interface{}) {
 32969              m.rexo(hcode(v[0]), v[1], false)
 32970              m.emit(0x85)
 32971              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 32972          })
 32973      }
 32974      // TESTL imm32, m32
 32975      if isImm32(v0) && isM32(v1) {
 32976          p.domain = DomainGeneric
 32977          p.add(0, func(m *_Encoding, v []interface{}) {
 32978              m.rexo(0, addr(v[1]), false)
 32979              m.emit(0xf7)
 32980              m.mrsd(0, addr(v[1]), 1)
 32981              m.imm4(toImmAny(v[0]))
 32982          })
 32983      }
 32984      // TESTL r32, m32
 32985      if isReg32(v0) && isM32(v1) {
 32986          p.domain = DomainGeneric
 32987          p.add(0, func(m *_Encoding, v []interface{}) {
 32988              m.rexo(hcode(v[0]), addr(v[1]), false)
 32989              m.emit(0x85)
 32990              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 32991          })
 32992      }
 32993      if p.len == 0 {
 32994          panic("invalid operands for TESTL")
 32995      }
 32996      return p
 32997  }
 32998  
 32999  // TESTQ performs "Logical Compare".
 33000  //
 33001  // Mnemonic        : TEST
 33002  // Supported forms : (5 forms)
 33003  //
 33004  //    * TESTQ imm32, rax
 33005  //    * TESTQ imm32, r64
 33006  //    * TESTQ r64, r64
 33007  //    * TESTQ imm32, m64
 33008  //    * TESTQ r64, m64
 33009  //
 33010  func (self *Program) TESTQ(v0 interface{}, v1 interface{}) *Instruction {
 33011      p := self.alloc("TESTQ", 2, Operands { v0, v1 })
 33012      // TESTQ imm32, rax
 33013      if isImm32(v0) && v1 == RAX {
 33014          p.domain = DomainGeneric
 33015          p.add(0, func(m *_Encoding, v []interface{}) {
 33016              m.emit(0x48)
 33017              m.emit(0xa9)
 33018              m.imm4(toImmAny(v[0]))
 33019          })
 33020      }
 33021      // TESTQ imm32, r64
 33022      if isImm32Ext(v0, 8) && isReg64(v1) {
 33023          p.domain = DomainGeneric
 33024          p.add(0, func(m *_Encoding, v []interface{}) {
 33025              m.emit(0x48 | hcode(v[1]))
 33026              m.emit(0xf7)
 33027              m.emit(0xc0 | lcode(v[1]))
 33028              m.imm4(toImmAny(v[0]))
 33029          })
 33030      }
 33031      // TESTQ r64, r64
 33032      if isReg64(v0) && isReg64(v1) {
 33033          p.domain = DomainGeneric
 33034          p.add(0, func(m *_Encoding, v []interface{}) {
 33035              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
 33036              m.emit(0x85)
 33037              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 33038          })
 33039      }
 33040      // TESTQ imm32, m64
 33041      if isImm32Ext(v0, 8) && isM64(v1) {
 33042          p.domain = DomainGeneric
 33043          p.add(0, func(m *_Encoding, v []interface{}) {
 33044              m.rexm(1, 0, addr(v[1]))
 33045              m.emit(0xf7)
 33046              m.mrsd(0, addr(v[1]), 1)
 33047              m.imm4(toImmAny(v[0]))
 33048          })
 33049      }
 33050      // TESTQ r64, m64
 33051      if isReg64(v0) && isM64(v1) {
 33052          p.domain = DomainGeneric
 33053          p.add(0, func(m *_Encoding, v []interface{}) {
 33054              m.rexm(1, hcode(v[0]), addr(v[1]))
 33055              m.emit(0x85)
 33056              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 33057          })
 33058      }
 33059      if p.len == 0 {
 33060          panic("invalid operands for TESTQ")
 33061      }
 33062      return p
 33063  }
 33064  
 33065  // TESTW performs "Logical Compare".
 33066  //
 33067  // Mnemonic        : TEST
 33068  // Supported forms : (5 forms)
 33069  //
 33070  //    * TESTW imm16, ax
 33071  //    * TESTW imm16, r16
 33072  //    * TESTW r16, r16
 33073  //    * TESTW imm16, m16
 33074  //    * TESTW r16, m16
 33075  //
 33076  func (self *Program) TESTW(v0 interface{}, v1 interface{}) *Instruction {
 33077      p := self.alloc("TESTW", 2, Operands { v0, v1 })
 33078      // TESTW imm16, ax
 33079      if isImm16(v0) && v1 == AX {
 33080          p.domain = DomainGeneric
 33081          p.add(0, func(m *_Encoding, v []interface{}) {
 33082              m.emit(0x66)
 33083              m.emit(0xa9)
 33084              m.imm2(toImmAny(v[0]))
 33085          })
 33086      }
 33087      // TESTW imm16, r16
 33088      if isImm16(v0) && isReg16(v1) {
 33089          p.domain = DomainGeneric
 33090          p.add(0, func(m *_Encoding, v []interface{}) {
 33091              m.emit(0x66)
 33092              m.rexo(0, v[1], false)
 33093              m.emit(0xf7)
 33094              m.emit(0xc0 | lcode(v[1]))
 33095              m.imm2(toImmAny(v[0]))
 33096          })
 33097      }
 33098      // TESTW r16, r16
 33099      if isReg16(v0) && isReg16(v1) {
 33100          p.domain = DomainGeneric
 33101          p.add(0, func(m *_Encoding, v []interface{}) {
 33102              m.emit(0x66)
 33103              m.rexo(hcode(v[0]), v[1], false)
 33104              m.emit(0x85)
 33105              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 33106          })
 33107      }
 33108      // TESTW imm16, m16
 33109      if isImm16(v0) && isM16(v1) {
 33110          p.domain = DomainGeneric
 33111          p.add(0, func(m *_Encoding, v []interface{}) {
 33112              m.emit(0x66)
 33113              m.rexo(0, addr(v[1]), false)
 33114              m.emit(0xf7)
 33115              m.mrsd(0, addr(v[1]), 1)
 33116              m.imm2(toImmAny(v[0]))
 33117          })
 33118      }
 33119      // TESTW r16, m16
 33120      if isReg16(v0) && isM16(v1) {
 33121          p.domain = DomainGeneric
 33122          p.add(0, func(m *_Encoding, v []interface{}) {
 33123              m.emit(0x66)
 33124              m.rexo(hcode(v[0]), addr(v[1]), false)
 33125              m.emit(0x85)
 33126              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 33127          })
 33128      }
 33129      if p.len == 0 {
 33130          panic("invalid operands for TESTW")
 33131      }
 33132      return p
 33133  }
 33134  
 33135  // TZCNTL performs "Count the Number of Trailing Zero Bits".
 33136  //
 33137  // Mnemonic        : TZCNT
 33138  // Supported forms : (2 forms)
 33139  //
 33140  //    * TZCNTL r32, r32    [BMI]
 33141  //    * TZCNTL m32, r32    [BMI]
 33142  //
 33143  func (self *Program) TZCNTL(v0 interface{}, v1 interface{}) *Instruction {
 33144      p := self.alloc("TZCNTL", 2, Operands { v0, v1 })
 33145      // TZCNTL r32, r32
 33146      if isReg32(v0) && isReg32(v1) {
 33147          self.require(ISA_BMI)
 33148          p.domain = DomainGeneric
 33149          p.add(0, func(m *_Encoding, v []interface{}) {
 33150              m.emit(0xf3)
 33151              m.rexo(hcode(v[1]), v[0], false)
 33152              m.emit(0x0f)
 33153              m.emit(0xbc)
 33154              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 33155          })
 33156      }
 33157      // TZCNTL m32, r32
 33158      if isM32(v0) && isReg32(v1) {
 33159          self.require(ISA_BMI)
 33160          p.domain = DomainGeneric
 33161          p.add(0, func(m *_Encoding, v []interface{}) {
 33162              m.emit(0xf3)
 33163              m.rexo(hcode(v[1]), addr(v[0]), false)
 33164              m.emit(0x0f)
 33165              m.emit(0xbc)
 33166              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 33167          })
 33168      }
 33169      if p.len == 0 {
 33170          panic("invalid operands for TZCNTL")
 33171      }
 33172      return p
 33173  }
 33174  
 33175  // TZCNTQ performs "Count the Number of Trailing Zero Bits".
 33176  //
 33177  // Mnemonic        : TZCNT
 33178  // Supported forms : (2 forms)
 33179  //
 33180  //    * TZCNTQ r64, r64    [BMI]
 33181  //    * TZCNTQ m64, r64    [BMI]
 33182  //
 33183  func (self *Program) TZCNTQ(v0 interface{}, v1 interface{}) *Instruction {
 33184      p := self.alloc("TZCNTQ", 2, Operands { v0, v1 })
 33185      // TZCNTQ r64, r64
 33186      if isReg64(v0) && isReg64(v1) {
 33187          self.require(ISA_BMI)
 33188          p.domain = DomainGeneric
 33189          p.add(0, func(m *_Encoding, v []interface{}) {
 33190              m.emit(0xf3)
 33191              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 33192              m.emit(0x0f)
 33193              m.emit(0xbc)
 33194              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 33195          })
 33196      }
 33197      // TZCNTQ m64, r64
 33198      if isM64(v0) && isReg64(v1) {
 33199          self.require(ISA_BMI)
 33200          p.domain = DomainGeneric
 33201          p.add(0, func(m *_Encoding, v []interface{}) {
 33202              m.emit(0xf3)
 33203              m.rexm(1, hcode(v[1]), addr(v[0]))
 33204              m.emit(0x0f)
 33205              m.emit(0xbc)
 33206              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 33207          })
 33208      }
 33209      if p.len == 0 {
 33210          panic("invalid operands for TZCNTQ")
 33211      }
 33212      return p
 33213  }
 33214  
 33215  // TZCNTW performs "Count the Number of Trailing Zero Bits".
 33216  //
 33217  // Mnemonic        : TZCNT
 33218  // Supported forms : (2 forms)
 33219  //
 33220  //    * TZCNTW r16, r16    [BMI]
 33221  //    * TZCNTW m16, r16    [BMI]
 33222  //
 33223  func (self *Program) TZCNTW(v0 interface{}, v1 interface{}) *Instruction {
 33224      p := self.alloc("TZCNTW", 2, Operands { v0, v1 })
 33225      // TZCNTW r16, r16
 33226      if isReg16(v0) && isReg16(v1) {
 33227          self.require(ISA_BMI)
 33228          p.domain = DomainGeneric
 33229          p.add(0, func(m *_Encoding, v []interface{}) {
 33230              m.emit(0x66)
 33231              m.emit(0xf3)
 33232              m.rexo(hcode(v[1]), v[0], false)
 33233              m.emit(0x0f)
 33234              m.emit(0xbc)
 33235              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 33236          })
 33237      }
 33238      // TZCNTW m16, r16
 33239      if isM16(v0) && isReg16(v1) {
 33240          self.require(ISA_BMI)
 33241          p.domain = DomainGeneric
 33242          p.add(0, func(m *_Encoding, v []interface{}) {
 33243              m.emit(0x66)
 33244              m.emit(0xf3)
 33245              m.rexo(hcode(v[1]), addr(v[0]), false)
 33246              m.emit(0x0f)
 33247              m.emit(0xbc)
 33248              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 33249          })
 33250      }
 33251      if p.len == 0 {
 33252          panic("invalid operands for TZCNTW")
 33253      }
 33254      return p
 33255  }
 33256  
 33257  // TZMSK performs "Mask From Trailing Zeros".
 33258  //
 33259  // Mnemonic        : TZMSK
 33260  // Supported forms : (4 forms)
 33261  //
 33262  //    * TZMSK r32, r32    [TBM]
 33263  //    * TZMSK m32, r32    [TBM]
 33264  //    * TZMSK r64, r64    [TBM]
 33265  //    * TZMSK m64, r64    [TBM]
 33266  //
 33267  func (self *Program) TZMSK(v0 interface{}, v1 interface{}) *Instruction {
 33268      p := self.alloc("TZMSK", 2, Operands { v0, v1 })
 33269      // TZMSK r32, r32
 33270      if isReg32(v0) && isReg32(v1) {
 33271          self.require(ISA_TBM)
 33272          p.domain = DomainGeneric
 33273          p.add(0, func(m *_Encoding, v []interface{}) {
 33274              m.emit(0x8f)
 33275              m.emit(0xe9 ^ (hcode(v[0]) << 5))
 33276              m.emit(0x78 ^ (hlcode(v[1]) << 3))
 33277              m.emit(0x01)
 33278              m.emit(0xe0 | lcode(v[0]))
 33279          })
 33280      }
 33281      // TZMSK m32, r32
 33282      if isM32(v0) && isReg32(v1) {
 33283          self.require(ISA_TBM)
 33284          p.domain = DomainGeneric
 33285          p.add(0, func(m *_Encoding, v []interface{}) {
 33286              m.vex3(0x8f, 0b1001, 0x00, 0, addr(v[0]), hlcode(v[1]))
 33287              m.emit(0x01)
 33288              m.mrsd(4, addr(v[0]), 1)
 33289          })
 33290      }
 33291      // TZMSK r64, r64
 33292      if isReg64(v0) && isReg64(v1) {
 33293          self.require(ISA_TBM)
 33294          p.domain = DomainGeneric
 33295          p.add(0, func(m *_Encoding, v []interface{}) {
 33296              m.emit(0x8f)
 33297              m.emit(0xe9 ^ (hcode(v[0]) << 5))
 33298              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 33299              m.emit(0x01)
 33300              m.emit(0xe0 | lcode(v[0]))
 33301          })
 33302      }
 33303      // TZMSK m64, r64
 33304      if isM64(v0) && isReg64(v1) {
 33305          self.require(ISA_TBM)
 33306          p.domain = DomainGeneric
 33307          p.add(0, func(m *_Encoding, v []interface{}) {
 33308              m.vex3(0x8f, 0b1001, 0x80, 0, addr(v[0]), hlcode(v[1]))
 33309              m.emit(0x01)
 33310              m.mrsd(4, addr(v[0]), 1)
 33311          })
 33312      }
 33313      if p.len == 0 {
 33314          panic("invalid operands for TZMSK")
 33315      }
 33316      return p
 33317  }
 33318  
 33319  // UCOMISD performs "Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS".
 33320  //
 33321  // Mnemonic        : UCOMISD
 33322  // Supported forms : (2 forms)
 33323  //
 33324  //    * UCOMISD xmm, xmm    [SSE2]
 33325  //    * UCOMISD m64, xmm    [SSE2]
 33326  //
 33327  func (self *Program) UCOMISD(v0 interface{}, v1 interface{}) *Instruction {
 33328      p := self.alloc("UCOMISD", 2, Operands { v0, v1 })
 33329      // UCOMISD xmm, xmm
 33330      if isXMM(v0) && isXMM(v1) {
 33331          self.require(ISA_SSE2)
 33332          p.domain = DomainMMXSSE
 33333          p.add(0, func(m *_Encoding, v []interface{}) {
 33334              m.emit(0x66)
 33335              m.rexo(hcode(v[1]), v[0], false)
 33336              m.emit(0x0f)
 33337              m.emit(0x2e)
 33338              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 33339          })
 33340      }
 33341      // UCOMISD m64, xmm
 33342      if isM64(v0) && isXMM(v1) {
 33343          self.require(ISA_SSE2)
 33344          p.domain = DomainMMXSSE
 33345          p.add(0, func(m *_Encoding, v []interface{}) {
 33346              m.emit(0x66)
 33347              m.rexo(hcode(v[1]), addr(v[0]), false)
 33348              m.emit(0x0f)
 33349              m.emit(0x2e)
 33350              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 33351          })
 33352      }
 33353      if p.len == 0 {
 33354          panic("invalid operands for UCOMISD")
 33355      }
 33356      return p
 33357  }
 33358  
 33359  // UCOMISS performs "Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS".
 33360  //
 33361  // Mnemonic        : UCOMISS
 33362  // Supported forms : (2 forms)
 33363  //
 33364  //    * UCOMISS xmm, xmm    [SSE]
 33365  //    * UCOMISS m32, xmm    [SSE]
 33366  //
 33367  func (self *Program) UCOMISS(v0 interface{}, v1 interface{}) *Instruction {
 33368      p := self.alloc("UCOMISS", 2, Operands { v0, v1 })
 33369      // UCOMISS xmm, xmm
 33370      if isXMM(v0) && isXMM(v1) {
 33371          self.require(ISA_SSE)
 33372          p.domain = DomainMMXSSE
 33373          p.add(0, func(m *_Encoding, v []interface{}) {
 33374              m.rexo(hcode(v[1]), v[0], false)
 33375              m.emit(0x0f)
 33376              m.emit(0x2e)
 33377              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 33378          })
 33379      }
 33380      // UCOMISS m32, xmm
 33381      if isM32(v0) && isXMM(v1) {
 33382          self.require(ISA_SSE)
 33383          p.domain = DomainMMXSSE
 33384          p.add(0, func(m *_Encoding, v []interface{}) {
 33385              m.rexo(hcode(v[1]), addr(v[0]), false)
 33386              m.emit(0x0f)
 33387              m.emit(0x2e)
 33388              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 33389          })
 33390      }
 33391      if p.len == 0 {
 33392          panic("invalid operands for UCOMISS")
 33393      }
 33394      return p
 33395  }
 33396  
 33397  // UD2 performs "Undefined Instruction".
 33398  //
 33399  // Mnemonic        : UD2
 33400  // Supported forms : (1 form)
 33401  //
 33402  //    * UD2
 33403  //
 33404  func (self *Program) UD2() *Instruction {
 33405      p := self.alloc("UD2", 0, Operands {  })
 33406      // UD2
 33407      p.domain = DomainGeneric
 33408      p.add(0, func(m *_Encoding, v []interface{}) {
 33409          m.emit(0x0f)
 33410          m.emit(0x0b)
 33411      })
 33412      return p
 33413  }
 33414  
 33415  // UNPCKHPD performs "Unpack and Interleave High Packed Double-Precision Floating-Point Values".
 33416  //
 33417  // Mnemonic        : UNPCKHPD
 33418  // Supported forms : (2 forms)
 33419  //
 33420  //    * UNPCKHPD xmm, xmm     [SSE2]
 33421  //    * UNPCKHPD m128, xmm    [SSE2]
 33422  //
 33423  func (self *Program) UNPCKHPD(v0 interface{}, v1 interface{}) *Instruction {
 33424      p := self.alloc("UNPCKHPD", 2, Operands { v0, v1 })
 33425      // UNPCKHPD xmm, xmm
 33426      if isXMM(v0) && isXMM(v1) {
 33427          self.require(ISA_SSE2)
 33428          p.domain = DomainMMXSSE
 33429          p.add(0, func(m *_Encoding, v []interface{}) {
 33430              m.emit(0x66)
 33431              m.rexo(hcode(v[1]), v[0], false)
 33432              m.emit(0x0f)
 33433              m.emit(0x15)
 33434              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 33435          })
 33436      }
 33437      // UNPCKHPD m128, xmm
 33438      if isM128(v0) && isXMM(v1) {
 33439          self.require(ISA_SSE2)
 33440          p.domain = DomainMMXSSE
 33441          p.add(0, func(m *_Encoding, v []interface{}) {
 33442              m.emit(0x66)
 33443              m.rexo(hcode(v[1]), addr(v[0]), false)
 33444              m.emit(0x0f)
 33445              m.emit(0x15)
 33446              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 33447          })
 33448      }
 33449      if p.len == 0 {
 33450          panic("invalid operands for UNPCKHPD")
 33451      }
 33452      return p
 33453  }
 33454  
 33455  // UNPCKHPS performs "Unpack and Interleave High Packed Single-Precision Floating-Point Values".
 33456  //
 33457  // Mnemonic        : UNPCKHPS
 33458  // Supported forms : (2 forms)
 33459  //
 33460  //    * UNPCKHPS xmm, xmm     [SSE]
 33461  //    * UNPCKHPS m128, xmm    [SSE]
 33462  //
 33463  func (self *Program) UNPCKHPS(v0 interface{}, v1 interface{}) *Instruction {
 33464      p := self.alloc("UNPCKHPS", 2, Operands { v0, v1 })
 33465      // UNPCKHPS xmm, xmm
 33466      if isXMM(v0) && isXMM(v1) {
 33467          self.require(ISA_SSE)
 33468          p.domain = DomainMMXSSE
 33469          p.add(0, func(m *_Encoding, v []interface{}) {
 33470              m.rexo(hcode(v[1]), v[0], false)
 33471              m.emit(0x0f)
 33472              m.emit(0x15)
 33473              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 33474          })
 33475      }
 33476      // UNPCKHPS m128, xmm
 33477      if isM128(v0) && isXMM(v1) {
 33478          self.require(ISA_SSE)
 33479          p.domain = DomainMMXSSE
 33480          p.add(0, func(m *_Encoding, v []interface{}) {
 33481              m.rexo(hcode(v[1]), addr(v[0]), false)
 33482              m.emit(0x0f)
 33483              m.emit(0x15)
 33484              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 33485          })
 33486      }
 33487      if p.len == 0 {
 33488          panic("invalid operands for UNPCKHPS")
 33489      }
 33490      return p
 33491  }
 33492  
 33493  // UNPCKLPD performs "Unpack and Interleave Low Packed Double-Precision Floating-Point Values".
 33494  //
 33495  // Mnemonic        : UNPCKLPD
 33496  // Supported forms : (2 forms)
 33497  //
 33498  //    * UNPCKLPD xmm, xmm     [SSE2]
 33499  //    * UNPCKLPD m128, xmm    [SSE2]
 33500  //
 33501  func (self *Program) UNPCKLPD(v0 interface{}, v1 interface{}) *Instruction {
 33502      p := self.alloc("UNPCKLPD", 2, Operands { v0, v1 })
 33503      // UNPCKLPD xmm, xmm
 33504      if isXMM(v0) && isXMM(v1) {
 33505          self.require(ISA_SSE2)
 33506          p.domain = DomainMMXSSE
 33507          p.add(0, func(m *_Encoding, v []interface{}) {
 33508              m.emit(0x66)
 33509              m.rexo(hcode(v[1]), v[0], false)
 33510              m.emit(0x0f)
 33511              m.emit(0x14)
 33512              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 33513          })
 33514      }
 33515      // UNPCKLPD m128, xmm
 33516      if isM128(v0) && isXMM(v1) {
 33517          self.require(ISA_SSE2)
 33518          p.domain = DomainMMXSSE
 33519          p.add(0, func(m *_Encoding, v []interface{}) {
 33520              m.emit(0x66)
 33521              m.rexo(hcode(v[1]), addr(v[0]), false)
 33522              m.emit(0x0f)
 33523              m.emit(0x14)
 33524              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 33525          })
 33526      }
 33527      if p.len == 0 {
 33528          panic("invalid operands for UNPCKLPD")
 33529      }
 33530      return p
 33531  }
 33532  
 33533  // UNPCKLPS performs "Unpack and Interleave Low Packed Single-Precision Floating-Point Values".
 33534  //
 33535  // Mnemonic        : UNPCKLPS
 33536  // Supported forms : (2 forms)
 33537  //
 33538  //    * UNPCKLPS xmm, xmm     [SSE]
 33539  //    * UNPCKLPS m128, xmm    [SSE]
 33540  //
 33541  func (self *Program) UNPCKLPS(v0 interface{}, v1 interface{}) *Instruction {
 33542      p := self.alloc("UNPCKLPS", 2, Operands { v0, v1 })
 33543      // UNPCKLPS xmm, xmm
 33544      if isXMM(v0) && isXMM(v1) {
 33545          self.require(ISA_SSE)
 33546          p.domain = DomainMMXSSE
 33547          p.add(0, func(m *_Encoding, v []interface{}) {
 33548              m.rexo(hcode(v[1]), v[0], false)
 33549              m.emit(0x0f)
 33550              m.emit(0x14)
 33551              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 33552          })
 33553      }
 33554      // UNPCKLPS m128, xmm
 33555      if isM128(v0) && isXMM(v1) {
 33556          self.require(ISA_SSE)
 33557          p.domain = DomainMMXSSE
 33558          p.add(0, func(m *_Encoding, v []interface{}) {
 33559              m.rexo(hcode(v[1]), addr(v[0]), false)
 33560              m.emit(0x0f)
 33561              m.emit(0x14)
 33562              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 33563          })
 33564      }
 33565      if p.len == 0 {
 33566          panic("invalid operands for UNPCKLPS")
 33567      }
 33568      return p
 33569  }
 33570  
 33571  // VADDPD performs "Add Packed Double-Precision Floating-Point Values".
 33572  //
 33573  // Mnemonic        : VADDPD
 33574  // Supported forms : (11 forms)
 33575  //
 33576  //    * VADDPD xmm, xmm, xmm                   [AVX]
 33577  //    * VADDPD m128, xmm, xmm                  [AVX]
 33578  //    * VADDPD ymm, ymm, ymm                   [AVX]
 33579  //    * VADDPD m256, ymm, ymm                  [AVX]
 33580  //    * VADDPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 33581  //    * VADDPD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 33582  //    * VADDPD zmm, zmm, zmm{k}{z}             [AVX512F]
 33583  //    * VADDPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 33584  //    * VADDPD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 33585  //    * VADDPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 33586  //    * VADDPD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 33587  //
 33588  func (self *Program) VADDPD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 33589      var p *Instruction
 33590      switch len(vv) {
 33591          case 0  : p = self.alloc("VADDPD", 3, Operands { v0, v1, v2 })
 33592          case 1  : p = self.alloc("VADDPD", 4, Operands { v0, v1, v2, vv[0] })
 33593          default : panic("instruction VADDPD takes 3 or 4 operands")
 33594      }
 33595      // VADDPD xmm, xmm, xmm
 33596      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 33597          self.require(ISA_AVX)
 33598          p.domain = DomainAVX
 33599          p.add(0, func(m *_Encoding, v []interface{}) {
 33600              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 33601              m.emit(0x58)
 33602              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33603          })
 33604      }
 33605      // VADDPD m128, xmm, xmm
 33606      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 33607          self.require(ISA_AVX)
 33608          p.domain = DomainAVX
 33609          p.add(0, func(m *_Encoding, v []interface{}) {
 33610              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 33611              m.emit(0x58)
 33612              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 33613          })
 33614      }
 33615      // VADDPD ymm, ymm, ymm
 33616      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 33617          self.require(ISA_AVX)
 33618          p.domain = DomainAVX
 33619          p.add(0, func(m *_Encoding, v []interface{}) {
 33620              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 33621              m.emit(0x58)
 33622              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33623          })
 33624      }
 33625      // VADDPD m256, ymm, ymm
 33626      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 33627          self.require(ISA_AVX)
 33628          p.domain = DomainAVX
 33629          p.add(0, func(m *_Encoding, v []interface{}) {
 33630              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 33631              m.emit(0x58)
 33632              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 33633          })
 33634      }
 33635      // VADDPD m512/m64bcst, zmm, zmm{k}{z}
 33636      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 33637          self.require(ISA_AVX512F)
 33638          p.domain = DomainAVX
 33639          p.add(0, func(m *_Encoding, v []interface{}) {
 33640              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 33641              m.emit(0x58)
 33642              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 33643          })
 33644      }
 33645      // VADDPD {er}, zmm, zmm, zmm{k}{z}
 33646      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 33647          self.require(ISA_AVX512F)
 33648          p.domain = DomainAVX
 33649          p.add(0, func(m *_Encoding, v []interface{}) {
 33650              m.emit(0x62)
 33651              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 33652              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 33653              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 33654              m.emit(0x58)
 33655              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 33656          })
 33657      }
 33658      // VADDPD zmm, zmm, zmm{k}{z}
 33659      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 33660          self.require(ISA_AVX512F)
 33661          p.domain = DomainAVX
 33662          p.add(0, func(m *_Encoding, v []interface{}) {
 33663              m.emit(0x62)
 33664              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 33665              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 33666              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 33667              m.emit(0x58)
 33668              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33669          })
 33670      }
 33671      // VADDPD m128/m64bcst, xmm, xmm{k}{z}
 33672      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 33673          self.require(ISA_AVX512VL | ISA_AVX512F)
 33674          p.domain = DomainAVX
 33675          p.add(0, func(m *_Encoding, v []interface{}) {
 33676              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 33677              m.emit(0x58)
 33678              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 33679          })
 33680      }
 33681      // VADDPD xmm, xmm, xmm{k}{z}
 33682      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 33683          self.require(ISA_AVX512VL | ISA_AVX512F)
 33684          p.domain = DomainAVX
 33685          p.add(0, func(m *_Encoding, v []interface{}) {
 33686              m.emit(0x62)
 33687              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 33688              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 33689              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 33690              m.emit(0x58)
 33691              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33692          })
 33693      }
 33694      // VADDPD m256/m64bcst, ymm, ymm{k}{z}
 33695      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 33696          self.require(ISA_AVX512VL | ISA_AVX512F)
 33697          p.domain = DomainAVX
 33698          p.add(0, func(m *_Encoding, v []interface{}) {
 33699              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 33700              m.emit(0x58)
 33701              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 33702          })
 33703      }
 33704      // VADDPD ymm, ymm, ymm{k}{z}
 33705      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 33706          self.require(ISA_AVX512VL | ISA_AVX512F)
 33707          p.domain = DomainAVX
 33708          p.add(0, func(m *_Encoding, v []interface{}) {
 33709              m.emit(0x62)
 33710              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 33711              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 33712              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 33713              m.emit(0x58)
 33714              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33715          })
 33716      }
 33717      if p.len == 0 {
 33718          panic("invalid operands for VADDPD")
 33719      }
 33720      return p
 33721  }
 33722  
 33723  // VADDPS performs "Add Packed Single-Precision Floating-Point Values".
 33724  //
 33725  // Mnemonic        : VADDPS
 33726  // Supported forms : (11 forms)
 33727  //
 33728  //    * VADDPS xmm, xmm, xmm                   [AVX]
 33729  //    * VADDPS m128, xmm, xmm                  [AVX]
 33730  //    * VADDPS ymm, ymm, ymm                   [AVX]
 33731  //    * VADDPS m256, ymm, ymm                  [AVX]
 33732  //    * VADDPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 33733  //    * VADDPS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 33734  //    * VADDPS zmm, zmm, zmm{k}{z}             [AVX512F]
 33735  //    * VADDPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 33736  //    * VADDPS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 33737  //    * VADDPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 33738  //    * VADDPS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 33739  //
 33740  func (self *Program) VADDPS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 33741      var p *Instruction
 33742      switch len(vv) {
 33743          case 0  : p = self.alloc("VADDPS", 3, Operands { v0, v1, v2 })
 33744          case 1  : p = self.alloc("VADDPS", 4, Operands { v0, v1, v2, vv[0] })
 33745          default : panic("instruction VADDPS takes 3 or 4 operands")
 33746      }
 33747      // VADDPS xmm, xmm, xmm
 33748      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 33749          self.require(ISA_AVX)
 33750          p.domain = DomainAVX
 33751          p.add(0, func(m *_Encoding, v []interface{}) {
 33752              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 33753              m.emit(0x58)
 33754              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33755          })
 33756      }
 33757      // VADDPS m128, xmm, xmm
 33758      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 33759          self.require(ISA_AVX)
 33760          p.domain = DomainAVX
 33761          p.add(0, func(m *_Encoding, v []interface{}) {
 33762              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 33763              m.emit(0x58)
 33764              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 33765          })
 33766      }
 33767      // VADDPS ymm, ymm, ymm
 33768      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 33769          self.require(ISA_AVX)
 33770          p.domain = DomainAVX
 33771          p.add(0, func(m *_Encoding, v []interface{}) {
 33772              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 33773              m.emit(0x58)
 33774              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33775          })
 33776      }
 33777      // VADDPS m256, ymm, ymm
 33778      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 33779          self.require(ISA_AVX)
 33780          p.domain = DomainAVX
 33781          p.add(0, func(m *_Encoding, v []interface{}) {
 33782              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 33783              m.emit(0x58)
 33784              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 33785          })
 33786      }
 33787      // VADDPS m512/m32bcst, zmm, zmm{k}{z}
 33788      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 33789          self.require(ISA_AVX512F)
 33790          p.domain = DomainAVX
 33791          p.add(0, func(m *_Encoding, v []interface{}) {
 33792              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 33793              m.emit(0x58)
 33794              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 33795          })
 33796      }
 33797      // VADDPS {er}, zmm, zmm, zmm{k}{z}
 33798      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 33799          self.require(ISA_AVX512F)
 33800          p.domain = DomainAVX
 33801          p.add(0, func(m *_Encoding, v []interface{}) {
 33802              m.emit(0x62)
 33803              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 33804              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 33805              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 33806              m.emit(0x58)
 33807              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 33808          })
 33809      }
 33810      // VADDPS zmm, zmm, zmm{k}{z}
 33811      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 33812          self.require(ISA_AVX512F)
 33813          p.domain = DomainAVX
 33814          p.add(0, func(m *_Encoding, v []interface{}) {
 33815              m.emit(0x62)
 33816              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 33817              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 33818              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 33819              m.emit(0x58)
 33820              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33821          })
 33822      }
 33823      // VADDPS m128/m32bcst, xmm, xmm{k}{z}
 33824      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 33825          self.require(ISA_AVX512VL | ISA_AVX512F)
 33826          p.domain = DomainAVX
 33827          p.add(0, func(m *_Encoding, v []interface{}) {
 33828              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 33829              m.emit(0x58)
 33830              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 33831          })
 33832      }
 33833      // VADDPS xmm, xmm, xmm{k}{z}
 33834      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 33835          self.require(ISA_AVX512VL | ISA_AVX512F)
 33836          p.domain = DomainAVX
 33837          p.add(0, func(m *_Encoding, v []interface{}) {
 33838              m.emit(0x62)
 33839              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 33840              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 33841              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 33842              m.emit(0x58)
 33843              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33844          })
 33845      }
 33846      // VADDPS m256/m32bcst, ymm, ymm{k}{z}
 33847      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 33848          self.require(ISA_AVX512VL | ISA_AVX512F)
 33849          p.domain = DomainAVX
 33850          p.add(0, func(m *_Encoding, v []interface{}) {
 33851              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 33852              m.emit(0x58)
 33853              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 33854          })
 33855      }
 33856      // VADDPS ymm, ymm, ymm{k}{z}
 33857      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 33858          self.require(ISA_AVX512VL | ISA_AVX512F)
 33859          p.domain = DomainAVX
 33860          p.add(0, func(m *_Encoding, v []interface{}) {
 33861              m.emit(0x62)
 33862              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 33863              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 33864              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 33865              m.emit(0x58)
 33866              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33867          })
 33868      }
 33869      if p.len == 0 {
 33870          panic("invalid operands for VADDPS")
 33871      }
 33872      return p
 33873  }
 33874  
 33875  // VADDSD performs "Add Scalar Double-Precision Floating-Point Values".
 33876  //
 33877  // Mnemonic        : VADDSD
 33878  // Supported forms : (5 forms)
 33879  //
 33880  //    * VADDSD xmm, xmm, xmm                [AVX]
 33881  //    * VADDSD m64, xmm, xmm                [AVX]
 33882  //    * VADDSD m64, xmm, xmm{k}{z}          [AVX512F]
 33883  //    * VADDSD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 33884  //    * VADDSD xmm, xmm, xmm{k}{z}          [AVX512F]
 33885  //
 33886  func (self *Program) VADDSD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 33887      var p *Instruction
 33888      switch len(vv) {
 33889          case 0  : p = self.alloc("VADDSD", 3, Operands { v0, v1, v2 })
 33890          case 1  : p = self.alloc("VADDSD", 4, Operands { v0, v1, v2, vv[0] })
 33891          default : panic("instruction VADDSD takes 3 or 4 operands")
 33892      }
 33893      // VADDSD xmm, xmm, xmm
 33894      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 33895          self.require(ISA_AVX)
 33896          p.domain = DomainAVX
 33897          p.add(0, func(m *_Encoding, v []interface{}) {
 33898              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 33899              m.emit(0x58)
 33900              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33901          })
 33902      }
 33903      // VADDSD m64, xmm, xmm
 33904      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 33905          self.require(ISA_AVX)
 33906          p.domain = DomainAVX
 33907          p.add(0, func(m *_Encoding, v []interface{}) {
 33908              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 33909              m.emit(0x58)
 33910              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 33911          })
 33912      }
 33913      // VADDSD m64, xmm, xmm{k}{z}
 33914      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 33915          self.require(ISA_AVX512F)
 33916          p.domain = DomainAVX
 33917          p.add(0, func(m *_Encoding, v []interface{}) {
 33918              m.evex(0b01, 0x87, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 33919              m.emit(0x58)
 33920              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 33921          })
 33922      }
 33923      // VADDSD {er}, xmm, xmm, xmm{k}{z}
 33924      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 33925          self.require(ISA_AVX512F)
 33926          p.domain = DomainAVX
 33927          p.add(0, func(m *_Encoding, v []interface{}) {
 33928              m.emit(0x62)
 33929              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 33930              m.emit(0xff ^ (hlcode(v[2]) << 3))
 33931              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 33932              m.emit(0x58)
 33933              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 33934          })
 33935      }
 33936      // VADDSD xmm, xmm, xmm{k}{z}
 33937      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 33938          self.require(ISA_AVX512F)
 33939          p.domain = DomainAVX
 33940          p.add(0, func(m *_Encoding, v []interface{}) {
 33941              m.emit(0x62)
 33942              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 33943              m.emit(0xff ^ (hlcode(v[1]) << 3))
 33944              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 33945              m.emit(0x58)
 33946              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33947          })
 33948      }
 33949      if p.len == 0 {
 33950          panic("invalid operands for VADDSD")
 33951      }
 33952      return p
 33953  }
 33954  
 33955  // VADDSS performs "Add Scalar Single-Precision Floating-Point Values".
 33956  //
 33957  // Mnemonic        : VADDSS
 33958  // Supported forms : (5 forms)
 33959  //
 33960  //    * VADDSS xmm, xmm, xmm                [AVX]
 33961  //    * VADDSS m32, xmm, xmm                [AVX]
 33962  //    * VADDSS m32, xmm, xmm{k}{z}          [AVX512F]
 33963  //    * VADDSS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 33964  //    * VADDSS xmm, xmm, xmm{k}{z}          [AVX512F]
 33965  //
 33966  func (self *Program) VADDSS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 33967      var p *Instruction
 33968      switch len(vv) {
 33969          case 0  : p = self.alloc("VADDSS", 3, Operands { v0, v1, v2 })
 33970          case 1  : p = self.alloc("VADDSS", 4, Operands { v0, v1, v2, vv[0] })
 33971          default : panic("instruction VADDSS takes 3 or 4 operands")
 33972      }
 33973      // VADDSS xmm, xmm, xmm
 33974      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 33975          self.require(ISA_AVX)
 33976          p.domain = DomainAVX
 33977          p.add(0, func(m *_Encoding, v []interface{}) {
 33978              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 33979              m.emit(0x58)
 33980              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 33981          })
 33982      }
 33983      // VADDSS m32, xmm, xmm
 33984      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 33985          self.require(ISA_AVX)
 33986          p.domain = DomainAVX
 33987          p.add(0, func(m *_Encoding, v []interface{}) {
 33988              m.vex2(2, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 33989              m.emit(0x58)
 33990              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 33991          })
 33992      }
 33993      // VADDSS m32, xmm, xmm{k}{z}
 33994      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 33995          self.require(ISA_AVX512F)
 33996          p.domain = DomainAVX
 33997          p.add(0, func(m *_Encoding, v []interface{}) {
 33998              m.evex(0b01, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 33999              m.emit(0x58)
 34000              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 34001          })
 34002      }
 34003      // VADDSS {er}, xmm, xmm, xmm{k}{z}
 34004      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 34005          self.require(ISA_AVX512F)
 34006          p.domain = DomainAVX
 34007          p.add(0, func(m *_Encoding, v []interface{}) {
 34008              m.emit(0x62)
 34009              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 34010              m.emit(0x7e ^ (hlcode(v[2]) << 3))
 34011              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 34012              m.emit(0x58)
 34013              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 34014          })
 34015      }
 34016      // VADDSS xmm, xmm, xmm{k}{z}
 34017      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 34018          self.require(ISA_AVX512F)
 34019          p.domain = DomainAVX
 34020          p.add(0, func(m *_Encoding, v []interface{}) {
 34021              m.emit(0x62)
 34022              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 34023              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 34024              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 34025              m.emit(0x58)
 34026              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34027          })
 34028      }
 34029      if p.len == 0 {
 34030          panic("invalid operands for VADDSS")
 34031      }
 34032      return p
 34033  }
 34034  
 34035  // VADDSUBPD performs "Packed Double-FP Add/Subtract".
 34036  //
 34037  // Mnemonic        : VADDSUBPD
 34038  // Supported forms : (4 forms)
 34039  //
 34040  //    * VADDSUBPD xmm, xmm, xmm     [AVX]
 34041  //    * VADDSUBPD m128, xmm, xmm    [AVX]
 34042  //    * VADDSUBPD ymm, ymm, ymm     [AVX]
 34043  //    * VADDSUBPD m256, ymm, ymm    [AVX]
 34044  //
 34045  func (self *Program) VADDSUBPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 34046      p := self.alloc("VADDSUBPD", 3, Operands { v0, v1, v2 })
 34047      // VADDSUBPD xmm, xmm, xmm
 34048      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 34049          self.require(ISA_AVX)
 34050          p.domain = DomainAVX
 34051          p.add(0, func(m *_Encoding, v []interface{}) {
 34052              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 34053              m.emit(0xd0)
 34054              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34055          })
 34056      }
 34057      // VADDSUBPD m128, xmm, xmm
 34058      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 34059          self.require(ISA_AVX)
 34060          p.domain = DomainAVX
 34061          p.add(0, func(m *_Encoding, v []interface{}) {
 34062              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34063              m.emit(0xd0)
 34064              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34065          })
 34066      }
 34067      // VADDSUBPD ymm, ymm, ymm
 34068      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 34069          self.require(ISA_AVX)
 34070          p.domain = DomainAVX
 34071          p.add(0, func(m *_Encoding, v []interface{}) {
 34072              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 34073              m.emit(0xd0)
 34074              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34075          })
 34076      }
 34077      // VADDSUBPD m256, ymm, ymm
 34078      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 34079          self.require(ISA_AVX)
 34080          p.domain = DomainAVX
 34081          p.add(0, func(m *_Encoding, v []interface{}) {
 34082              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34083              m.emit(0xd0)
 34084              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34085          })
 34086      }
 34087      if p.len == 0 {
 34088          panic("invalid operands for VADDSUBPD")
 34089      }
 34090      return p
 34091  }
 34092  
 34093  // VADDSUBPS performs "Packed Single-FP Add/Subtract".
 34094  //
 34095  // Mnemonic        : VADDSUBPS
 34096  // Supported forms : (4 forms)
 34097  //
 34098  //    * VADDSUBPS xmm, xmm, xmm     [AVX]
 34099  //    * VADDSUBPS m128, xmm, xmm    [AVX]
 34100  //    * VADDSUBPS ymm, ymm, ymm     [AVX]
 34101  //    * VADDSUBPS m256, ymm, ymm    [AVX]
 34102  //
 34103  func (self *Program) VADDSUBPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 34104      p := self.alloc("VADDSUBPS", 3, Operands { v0, v1, v2 })
 34105      // VADDSUBPS xmm, xmm, xmm
 34106      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 34107          self.require(ISA_AVX)
 34108          p.domain = DomainAVX
 34109          p.add(0, func(m *_Encoding, v []interface{}) {
 34110              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 34111              m.emit(0xd0)
 34112              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34113          })
 34114      }
 34115      // VADDSUBPS m128, xmm, xmm
 34116      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 34117          self.require(ISA_AVX)
 34118          p.domain = DomainAVX
 34119          p.add(0, func(m *_Encoding, v []interface{}) {
 34120              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34121              m.emit(0xd0)
 34122              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34123          })
 34124      }
 34125      // VADDSUBPS ymm, ymm, ymm
 34126      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 34127          self.require(ISA_AVX)
 34128          p.domain = DomainAVX
 34129          p.add(0, func(m *_Encoding, v []interface{}) {
 34130              m.vex2(7, hcode(v[2]), v[0], hlcode(v[1]))
 34131              m.emit(0xd0)
 34132              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34133          })
 34134      }
 34135      // VADDSUBPS m256, ymm, ymm
 34136      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 34137          self.require(ISA_AVX)
 34138          p.domain = DomainAVX
 34139          p.add(0, func(m *_Encoding, v []interface{}) {
 34140              m.vex2(7, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34141              m.emit(0xd0)
 34142              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34143          })
 34144      }
 34145      if p.len == 0 {
 34146          panic("invalid operands for VADDSUBPS")
 34147      }
 34148      return p
 34149  }
 34150  
 34151  // VAESDEC performs "Perform One Round of an AES Decryption Flow".
 34152  //
 34153  // Mnemonic        : VAESDEC
 34154  // Supported forms : (2 forms)
 34155  //
 34156  //    * VAESDEC xmm, xmm, xmm     [AES,AVX]
 34157  //    * VAESDEC m128, xmm, xmm    [AES,AVX]
 34158  //
 34159  func (self *Program) VAESDEC(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 34160      p := self.alloc("VAESDEC", 3, Operands { v0, v1, v2 })
 34161      // VAESDEC xmm, xmm, xmm
 34162      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 34163          self.require(ISA_AVX | ISA_AES)
 34164          p.domain = DomainCrypto
 34165          p.add(0, func(m *_Encoding, v []interface{}) {
 34166              m.emit(0xc4)
 34167              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 34168              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 34169              m.emit(0xde)
 34170              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34171          })
 34172      }
 34173      // VAESDEC m128, xmm, xmm
 34174      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 34175          self.require(ISA_AVX | ISA_AES)
 34176          p.domain = DomainCrypto
 34177          p.add(0, func(m *_Encoding, v []interface{}) {
 34178              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34179              m.emit(0xde)
 34180              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34181          })
 34182      }
 34183      if p.len == 0 {
 34184          panic("invalid operands for VAESDEC")
 34185      }
 34186      return p
 34187  }
 34188  
 34189  // VAESDECLAST performs "Perform Last Round of an AES Decryption Flow".
 34190  //
 34191  // Mnemonic        : VAESDECLAST
 34192  // Supported forms : (2 forms)
 34193  //
 34194  //    * VAESDECLAST xmm, xmm, xmm     [AES,AVX]
 34195  //    * VAESDECLAST m128, xmm, xmm    [AES,AVX]
 34196  //
 34197  func (self *Program) VAESDECLAST(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 34198      p := self.alloc("VAESDECLAST", 3, Operands { v0, v1, v2 })
 34199      // VAESDECLAST xmm, xmm, xmm
 34200      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 34201          self.require(ISA_AVX | ISA_AES)
 34202          p.domain = DomainCrypto
 34203          p.add(0, func(m *_Encoding, v []interface{}) {
 34204              m.emit(0xc4)
 34205              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 34206              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 34207              m.emit(0xdf)
 34208              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34209          })
 34210      }
 34211      // VAESDECLAST m128, xmm, xmm
 34212      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 34213          self.require(ISA_AVX | ISA_AES)
 34214          p.domain = DomainCrypto
 34215          p.add(0, func(m *_Encoding, v []interface{}) {
 34216              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34217              m.emit(0xdf)
 34218              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34219          })
 34220      }
 34221      if p.len == 0 {
 34222          panic("invalid operands for VAESDECLAST")
 34223      }
 34224      return p
 34225  }
 34226  
 34227  // VAESENC performs "Perform One Round of an AES Encryption Flow".
 34228  //
 34229  // Mnemonic        : VAESENC
 34230  // Supported forms : (2 forms)
 34231  //
 34232  //    * VAESENC xmm, xmm, xmm     [AES,AVX]
 34233  //    * VAESENC m128, xmm, xmm    [AES,AVX]
 34234  //
 34235  func (self *Program) VAESENC(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 34236      p := self.alloc("VAESENC", 3, Operands { v0, v1, v2 })
 34237      // VAESENC xmm, xmm, xmm
 34238      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 34239          self.require(ISA_AVX | ISA_AES)
 34240          p.domain = DomainCrypto
 34241          p.add(0, func(m *_Encoding, v []interface{}) {
 34242              m.emit(0xc4)
 34243              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 34244              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 34245              m.emit(0xdc)
 34246              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34247          })
 34248      }
 34249      // VAESENC m128, xmm, xmm
 34250      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 34251          self.require(ISA_AVX | ISA_AES)
 34252          p.domain = DomainCrypto
 34253          p.add(0, func(m *_Encoding, v []interface{}) {
 34254              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34255              m.emit(0xdc)
 34256              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34257          })
 34258      }
 34259      if p.len == 0 {
 34260          panic("invalid operands for VAESENC")
 34261      }
 34262      return p
 34263  }
 34264  
 34265  // VAESENCLAST performs "Perform Last Round of an AES Encryption Flow".
 34266  //
 34267  // Mnemonic        : VAESENCLAST
 34268  // Supported forms : (2 forms)
 34269  //
 34270  //    * VAESENCLAST xmm, xmm, xmm     [AES,AVX]
 34271  //    * VAESENCLAST m128, xmm, xmm    [AES,AVX]
 34272  //
 34273  func (self *Program) VAESENCLAST(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 34274      p := self.alloc("VAESENCLAST", 3, Operands { v0, v1, v2 })
 34275      // VAESENCLAST xmm, xmm, xmm
 34276      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 34277          self.require(ISA_AVX | ISA_AES)
 34278          p.domain = DomainCrypto
 34279          p.add(0, func(m *_Encoding, v []interface{}) {
 34280              m.emit(0xc4)
 34281              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 34282              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 34283              m.emit(0xdd)
 34284              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34285          })
 34286      }
 34287      // VAESENCLAST m128, xmm, xmm
 34288      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 34289          self.require(ISA_AVX | ISA_AES)
 34290          p.domain = DomainCrypto
 34291          p.add(0, func(m *_Encoding, v []interface{}) {
 34292              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34293              m.emit(0xdd)
 34294              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34295          })
 34296      }
 34297      if p.len == 0 {
 34298          panic("invalid operands for VAESENCLAST")
 34299      }
 34300      return p
 34301  }
 34302  
 34303  // VAESIMC performs "Perform the AES InvMixColumn Transformation".
 34304  //
 34305  // Mnemonic        : VAESIMC
 34306  // Supported forms : (2 forms)
 34307  //
 34308  //    * VAESIMC xmm, xmm     [AES,AVX]
 34309  //    * VAESIMC m128, xmm    [AES,AVX]
 34310  //
 34311  func (self *Program) VAESIMC(v0 interface{}, v1 interface{}) *Instruction {
 34312      p := self.alloc("VAESIMC", 2, Operands { v0, v1 })
 34313      // VAESIMC xmm, xmm
 34314      if isXMM(v0) && isXMM(v1) {
 34315          self.require(ISA_AVX | ISA_AES)
 34316          p.domain = DomainCrypto
 34317          p.add(0, func(m *_Encoding, v []interface{}) {
 34318              m.emit(0xc4)
 34319              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 34320              m.emit(0x79)
 34321              m.emit(0xdb)
 34322              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 34323          })
 34324      }
 34325      // VAESIMC m128, xmm
 34326      if isM128(v0) && isXMM(v1) {
 34327          self.require(ISA_AVX | ISA_AES)
 34328          p.domain = DomainCrypto
 34329          p.add(0, func(m *_Encoding, v []interface{}) {
 34330              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 34331              m.emit(0xdb)
 34332              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 34333          })
 34334      }
 34335      if p.len == 0 {
 34336          panic("invalid operands for VAESIMC")
 34337      }
 34338      return p
 34339  }
 34340  
 34341  // VAESKEYGENASSIST performs "AES Round Key Generation Assist".
 34342  //
 34343  // Mnemonic        : VAESKEYGENASSIST
 34344  // Supported forms : (2 forms)
 34345  //
 34346  //    * VAESKEYGENASSIST imm8, xmm, xmm     [AES,AVX]
 34347  //    * VAESKEYGENASSIST imm8, m128, xmm    [AES,AVX]
 34348  //
 34349  func (self *Program) VAESKEYGENASSIST(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 34350      p := self.alloc("VAESKEYGENASSIST", 3, Operands { v0, v1, v2 })
 34351      // VAESKEYGENASSIST imm8, xmm, xmm
 34352      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 34353          self.require(ISA_AVX | ISA_AES)
 34354          p.domain = DomainCrypto
 34355          p.add(0, func(m *_Encoding, v []interface{}) {
 34356              m.emit(0xc4)
 34357              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 34358              m.emit(0x79)
 34359              m.emit(0xdf)
 34360              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 34361              m.imm1(toImmAny(v[0]))
 34362          })
 34363      }
 34364      // VAESKEYGENASSIST imm8, m128, xmm
 34365      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 34366          self.require(ISA_AVX | ISA_AES)
 34367          p.domain = DomainCrypto
 34368          p.add(0, func(m *_Encoding, v []interface{}) {
 34369              m.vex3(0xc4, 0b11, 0x01, hcode(v[2]), addr(v[1]), 0)
 34370              m.emit(0xdf)
 34371              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 34372              m.imm1(toImmAny(v[0]))
 34373          })
 34374      }
 34375      if p.len == 0 {
 34376          panic("invalid operands for VAESKEYGENASSIST")
 34377      }
 34378      return p
 34379  }
 34380  
 34381  // VALIGND performs "Align Doubleword Vectors".
 34382  //
 34383  // Mnemonic        : VALIGND
 34384  // Supported forms : (6 forms)
 34385  //
 34386  //    * VALIGND imm8, m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 34387  //    * VALIGND imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 34388  //    * VALIGND imm8, m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 34389  //    * VALIGND imm8, xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 34390  //    * VALIGND imm8, m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 34391  //    * VALIGND imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 34392  //
 34393  func (self *Program) VALIGND(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 34394      p := self.alloc("VALIGND", 4, Operands { v0, v1, v2, v3 })
 34395      // VALIGND imm8, m512/m32bcst, zmm, zmm{k}{z}
 34396      if isImm8(v0) && isM512M32bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 34397          self.require(ISA_AVX512F)
 34398          p.domain = DomainAVX
 34399          p.add(0, func(m *_Encoding, v []interface{}) {
 34400              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 34401              m.emit(0x03)
 34402              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 34403              m.imm1(toImmAny(v[0]))
 34404          })
 34405      }
 34406      // VALIGND imm8, zmm, zmm, zmm{k}{z}
 34407      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 34408          self.require(ISA_AVX512F)
 34409          p.domain = DomainAVX
 34410          p.add(0, func(m *_Encoding, v []interface{}) {
 34411              m.emit(0x62)
 34412              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 34413              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 34414              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 34415              m.emit(0x03)
 34416              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 34417              m.imm1(toImmAny(v[0]))
 34418          })
 34419      }
 34420      // VALIGND imm8, m128/m32bcst, xmm, xmm{k}{z}
 34421      if isImm8(v0) && isM128M32bcst(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 34422          self.require(ISA_AVX512VL | ISA_AVX512F)
 34423          p.domain = DomainAVX
 34424          p.add(0, func(m *_Encoding, v []interface{}) {
 34425              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 34426              m.emit(0x03)
 34427              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 34428              m.imm1(toImmAny(v[0]))
 34429          })
 34430      }
 34431      // VALIGND imm8, xmm, xmm, xmm{k}{z}
 34432      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 34433          self.require(ISA_AVX512VL | ISA_AVX512F)
 34434          p.domain = DomainAVX
 34435          p.add(0, func(m *_Encoding, v []interface{}) {
 34436              m.emit(0x62)
 34437              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 34438              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 34439              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 34440              m.emit(0x03)
 34441              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 34442              m.imm1(toImmAny(v[0]))
 34443          })
 34444      }
 34445      // VALIGND imm8, m256/m32bcst, ymm, ymm{k}{z}
 34446      if isImm8(v0) && isM256M32bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 34447          self.require(ISA_AVX512VL | ISA_AVX512F)
 34448          p.domain = DomainAVX
 34449          p.add(0, func(m *_Encoding, v []interface{}) {
 34450              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 34451              m.emit(0x03)
 34452              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 34453              m.imm1(toImmAny(v[0]))
 34454          })
 34455      }
 34456      // VALIGND imm8, ymm, ymm, ymm{k}{z}
 34457      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 34458          self.require(ISA_AVX512VL | ISA_AVX512F)
 34459          p.domain = DomainAVX
 34460          p.add(0, func(m *_Encoding, v []interface{}) {
 34461              m.emit(0x62)
 34462              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 34463              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 34464              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 34465              m.emit(0x03)
 34466              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 34467              m.imm1(toImmAny(v[0]))
 34468          })
 34469      }
 34470      if p.len == 0 {
 34471          panic("invalid operands for VALIGND")
 34472      }
 34473      return p
 34474  }
 34475  
 34476  // VALIGNQ performs "Align Quadword Vectors".
 34477  //
 34478  // Mnemonic        : VALIGNQ
 34479  // Supported forms : (6 forms)
 34480  //
 34481  //    * VALIGNQ imm8, m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 34482  //    * VALIGNQ imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 34483  //    * VALIGNQ imm8, m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 34484  //    * VALIGNQ imm8, xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 34485  //    * VALIGNQ imm8, m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 34486  //    * VALIGNQ imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 34487  //
 34488  func (self *Program) VALIGNQ(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 34489      p := self.alloc("VALIGNQ", 4, Operands { v0, v1, v2, v3 })
 34490      // VALIGNQ imm8, m512/m64bcst, zmm, zmm{k}{z}
 34491      if isImm8(v0) && isM512M64bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 34492          self.require(ISA_AVX512F)
 34493          p.domain = DomainAVX
 34494          p.add(0, func(m *_Encoding, v []interface{}) {
 34495              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 34496              m.emit(0x03)
 34497              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 34498              m.imm1(toImmAny(v[0]))
 34499          })
 34500      }
 34501      // VALIGNQ imm8, zmm, zmm, zmm{k}{z}
 34502      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 34503          self.require(ISA_AVX512F)
 34504          p.domain = DomainAVX
 34505          p.add(0, func(m *_Encoding, v []interface{}) {
 34506              m.emit(0x62)
 34507              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 34508              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 34509              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 34510              m.emit(0x03)
 34511              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 34512              m.imm1(toImmAny(v[0]))
 34513          })
 34514      }
 34515      // VALIGNQ imm8, m128/m64bcst, xmm, xmm{k}{z}
 34516      if isImm8(v0) && isM128M64bcst(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 34517          self.require(ISA_AVX512VL | ISA_AVX512F)
 34518          p.domain = DomainAVX
 34519          p.add(0, func(m *_Encoding, v []interface{}) {
 34520              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 34521              m.emit(0x03)
 34522              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 34523              m.imm1(toImmAny(v[0]))
 34524          })
 34525      }
 34526      // VALIGNQ imm8, xmm, xmm, xmm{k}{z}
 34527      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 34528          self.require(ISA_AVX512VL | ISA_AVX512F)
 34529          p.domain = DomainAVX
 34530          p.add(0, func(m *_Encoding, v []interface{}) {
 34531              m.emit(0x62)
 34532              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 34533              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 34534              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 34535              m.emit(0x03)
 34536              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 34537              m.imm1(toImmAny(v[0]))
 34538          })
 34539      }
 34540      // VALIGNQ imm8, m256/m64bcst, ymm, ymm{k}{z}
 34541      if isImm8(v0) && isM256M64bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 34542          self.require(ISA_AVX512VL | ISA_AVX512F)
 34543          p.domain = DomainAVX
 34544          p.add(0, func(m *_Encoding, v []interface{}) {
 34545              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 34546              m.emit(0x03)
 34547              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 34548              m.imm1(toImmAny(v[0]))
 34549          })
 34550      }
 34551      // VALIGNQ imm8, ymm, ymm, ymm{k}{z}
 34552      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 34553          self.require(ISA_AVX512VL | ISA_AVX512F)
 34554          p.domain = DomainAVX
 34555          p.add(0, func(m *_Encoding, v []interface{}) {
 34556              m.emit(0x62)
 34557              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 34558              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 34559              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 34560              m.emit(0x03)
 34561              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 34562              m.imm1(toImmAny(v[0]))
 34563          })
 34564      }
 34565      if p.len == 0 {
 34566          panic("invalid operands for VALIGNQ")
 34567      }
 34568      return p
 34569  }
 34570  
 34571  // VANDNPD performs "Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values".
 34572  //
 34573  // Mnemonic        : VANDNPD
 34574  // Supported forms : (10 forms)
 34575  //
 34576  //    * VANDNPD xmm, xmm, xmm                   [AVX]
 34577  //    * VANDNPD m128, xmm, xmm                  [AVX]
 34578  //    * VANDNPD ymm, ymm, ymm                   [AVX]
 34579  //    * VANDNPD m256, ymm, ymm                  [AVX]
 34580  //    * VANDNPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512DQ]
 34581  //    * VANDNPD zmm, zmm, zmm{k}{z}             [AVX512DQ]
 34582  //    * VANDNPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 34583  //    * VANDNPD xmm, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 34584  //    * VANDNPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 34585  //    * VANDNPD ymm, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 34586  //
 34587  func (self *Program) VANDNPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 34588      p := self.alloc("VANDNPD", 3, Operands { v0, v1, v2 })
 34589      // VANDNPD xmm, xmm, xmm
 34590      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 34591          self.require(ISA_AVX)
 34592          p.domain = DomainAVX
 34593          p.add(0, func(m *_Encoding, v []interface{}) {
 34594              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 34595              m.emit(0x55)
 34596              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34597          })
 34598      }
 34599      // VANDNPD m128, xmm, xmm
 34600      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 34601          self.require(ISA_AVX)
 34602          p.domain = DomainAVX
 34603          p.add(0, func(m *_Encoding, v []interface{}) {
 34604              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34605              m.emit(0x55)
 34606              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34607          })
 34608      }
 34609      // VANDNPD ymm, ymm, ymm
 34610      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 34611          self.require(ISA_AVX)
 34612          p.domain = DomainAVX
 34613          p.add(0, func(m *_Encoding, v []interface{}) {
 34614              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 34615              m.emit(0x55)
 34616              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34617          })
 34618      }
 34619      // VANDNPD m256, ymm, ymm
 34620      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 34621          self.require(ISA_AVX)
 34622          p.domain = DomainAVX
 34623          p.add(0, func(m *_Encoding, v []interface{}) {
 34624              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34625              m.emit(0x55)
 34626              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34627          })
 34628      }
 34629      // VANDNPD m512/m64bcst, zmm, zmm{k}{z}
 34630      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 34631          self.require(ISA_AVX512DQ)
 34632          p.domain = DomainAVX
 34633          p.add(0, func(m *_Encoding, v []interface{}) {
 34634              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 34635              m.emit(0x55)
 34636              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 34637          })
 34638      }
 34639      // VANDNPD zmm, zmm, zmm{k}{z}
 34640      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 34641          self.require(ISA_AVX512DQ)
 34642          p.domain = DomainAVX
 34643          p.add(0, func(m *_Encoding, v []interface{}) {
 34644              m.emit(0x62)
 34645              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 34646              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 34647              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 34648              m.emit(0x55)
 34649              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34650          })
 34651      }
 34652      // VANDNPD m128/m64bcst, xmm, xmm{k}{z}
 34653      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 34654          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34655          p.domain = DomainAVX
 34656          p.add(0, func(m *_Encoding, v []interface{}) {
 34657              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 34658              m.emit(0x55)
 34659              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 34660          })
 34661      }
 34662      // VANDNPD xmm, xmm, xmm{k}{z}
 34663      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 34664          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34665          p.domain = DomainAVX
 34666          p.add(0, func(m *_Encoding, v []interface{}) {
 34667              m.emit(0x62)
 34668              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 34669              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 34670              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 34671              m.emit(0x55)
 34672              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34673          })
 34674      }
 34675      // VANDNPD m256/m64bcst, ymm, ymm{k}{z}
 34676      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 34677          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34678          p.domain = DomainAVX
 34679          p.add(0, func(m *_Encoding, v []interface{}) {
 34680              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 34681              m.emit(0x55)
 34682              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 34683          })
 34684      }
 34685      // VANDNPD ymm, ymm, ymm{k}{z}
 34686      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 34687          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34688          p.domain = DomainAVX
 34689          p.add(0, func(m *_Encoding, v []interface{}) {
 34690              m.emit(0x62)
 34691              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 34692              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 34693              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 34694              m.emit(0x55)
 34695              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34696          })
 34697      }
 34698      if p.len == 0 {
 34699          panic("invalid operands for VANDNPD")
 34700      }
 34701      return p
 34702  }
 34703  
 34704  // VANDNPS performs "Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values".
 34705  //
 34706  // Mnemonic        : VANDNPS
 34707  // Supported forms : (10 forms)
 34708  //
 34709  //    * VANDNPS xmm, xmm, xmm                   [AVX]
 34710  //    * VANDNPS m128, xmm, xmm                  [AVX]
 34711  //    * VANDNPS ymm, ymm, ymm                   [AVX]
 34712  //    * VANDNPS m256, ymm, ymm                  [AVX]
 34713  //    * VANDNPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512DQ]
 34714  //    * VANDNPS zmm, zmm, zmm{k}{z}             [AVX512DQ]
 34715  //    * VANDNPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 34716  //    * VANDNPS xmm, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 34717  //    * VANDNPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 34718  //    * VANDNPS ymm, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 34719  //
 34720  func (self *Program) VANDNPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 34721      p := self.alloc("VANDNPS", 3, Operands { v0, v1, v2 })
 34722      // VANDNPS xmm, xmm, xmm
 34723      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 34724          self.require(ISA_AVX)
 34725          p.domain = DomainAVX
 34726          p.add(0, func(m *_Encoding, v []interface{}) {
 34727              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 34728              m.emit(0x55)
 34729              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34730          })
 34731      }
 34732      // VANDNPS m128, xmm, xmm
 34733      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 34734          self.require(ISA_AVX)
 34735          p.domain = DomainAVX
 34736          p.add(0, func(m *_Encoding, v []interface{}) {
 34737              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34738              m.emit(0x55)
 34739              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34740          })
 34741      }
 34742      // VANDNPS ymm, ymm, ymm
 34743      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 34744          self.require(ISA_AVX)
 34745          p.domain = DomainAVX
 34746          p.add(0, func(m *_Encoding, v []interface{}) {
 34747              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 34748              m.emit(0x55)
 34749              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34750          })
 34751      }
 34752      // VANDNPS m256, ymm, ymm
 34753      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 34754          self.require(ISA_AVX)
 34755          p.domain = DomainAVX
 34756          p.add(0, func(m *_Encoding, v []interface{}) {
 34757              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34758              m.emit(0x55)
 34759              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34760          })
 34761      }
 34762      // VANDNPS m512/m32bcst, zmm, zmm{k}{z}
 34763      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 34764          self.require(ISA_AVX512DQ)
 34765          p.domain = DomainAVX
 34766          p.add(0, func(m *_Encoding, v []interface{}) {
 34767              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 34768              m.emit(0x55)
 34769              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 34770          })
 34771      }
 34772      // VANDNPS zmm, zmm, zmm{k}{z}
 34773      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 34774          self.require(ISA_AVX512DQ)
 34775          p.domain = DomainAVX
 34776          p.add(0, func(m *_Encoding, v []interface{}) {
 34777              m.emit(0x62)
 34778              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 34779              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 34780              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 34781              m.emit(0x55)
 34782              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34783          })
 34784      }
 34785      // VANDNPS m128/m32bcst, xmm, xmm{k}{z}
 34786      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 34787          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34788          p.domain = DomainAVX
 34789          p.add(0, func(m *_Encoding, v []interface{}) {
 34790              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 34791              m.emit(0x55)
 34792              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 34793          })
 34794      }
 34795      // VANDNPS xmm, xmm, xmm{k}{z}
 34796      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 34797          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34798          p.domain = DomainAVX
 34799          p.add(0, func(m *_Encoding, v []interface{}) {
 34800              m.emit(0x62)
 34801              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 34802              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 34803              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 34804              m.emit(0x55)
 34805              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34806          })
 34807      }
 34808      // VANDNPS m256/m32bcst, ymm, ymm{k}{z}
 34809      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 34810          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34811          p.domain = DomainAVX
 34812          p.add(0, func(m *_Encoding, v []interface{}) {
 34813              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 34814              m.emit(0x55)
 34815              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 34816          })
 34817      }
 34818      // VANDNPS ymm, ymm, ymm{k}{z}
 34819      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 34820          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34821          p.domain = DomainAVX
 34822          p.add(0, func(m *_Encoding, v []interface{}) {
 34823              m.emit(0x62)
 34824              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 34825              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 34826              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 34827              m.emit(0x55)
 34828              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34829          })
 34830      }
 34831      if p.len == 0 {
 34832          panic("invalid operands for VANDNPS")
 34833      }
 34834      return p
 34835  }
 34836  
 34837  // VANDPD performs "Bitwise Logical AND of Packed Double-Precision Floating-Point Values".
 34838  //
 34839  // Mnemonic        : VANDPD
 34840  // Supported forms : (10 forms)
 34841  //
 34842  //    * VANDPD xmm, xmm, xmm                   [AVX]
 34843  //    * VANDPD m128, xmm, xmm                  [AVX]
 34844  //    * VANDPD ymm, ymm, ymm                   [AVX]
 34845  //    * VANDPD m256, ymm, ymm                  [AVX]
 34846  //    * VANDPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512DQ]
 34847  //    * VANDPD zmm, zmm, zmm{k}{z}             [AVX512DQ]
 34848  //    * VANDPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 34849  //    * VANDPD xmm, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 34850  //    * VANDPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 34851  //    * VANDPD ymm, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 34852  //
 34853  func (self *Program) VANDPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 34854      p := self.alloc("VANDPD", 3, Operands { v0, v1, v2 })
 34855      // VANDPD xmm, xmm, xmm
 34856      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 34857          self.require(ISA_AVX)
 34858          p.domain = DomainAVX
 34859          p.add(0, func(m *_Encoding, v []interface{}) {
 34860              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 34861              m.emit(0x54)
 34862              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34863          })
 34864      }
 34865      // VANDPD m128, xmm, xmm
 34866      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 34867          self.require(ISA_AVX)
 34868          p.domain = DomainAVX
 34869          p.add(0, func(m *_Encoding, v []interface{}) {
 34870              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34871              m.emit(0x54)
 34872              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34873          })
 34874      }
 34875      // VANDPD ymm, ymm, ymm
 34876      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 34877          self.require(ISA_AVX)
 34878          p.domain = DomainAVX
 34879          p.add(0, func(m *_Encoding, v []interface{}) {
 34880              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 34881              m.emit(0x54)
 34882              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34883          })
 34884      }
 34885      // VANDPD m256, ymm, ymm
 34886      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 34887          self.require(ISA_AVX)
 34888          p.domain = DomainAVX
 34889          p.add(0, func(m *_Encoding, v []interface{}) {
 34890              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 34891              m.emit(0x54)
 34892              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 34893          })
 34894      }
 34895      // VANDPD m512/m64bcst, zmm, zmm{k}{z}
 34896      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 34897          self.require(ISA_AVX512DQ)
 34898          p.domain = DomainAVX
 34899          p.add(0, func(m *_Encoding, v []interface{}) {
 34900              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 34901              m.emit(0x54)
 34902              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 34903          })
 34904      }
 34905      // VANDPD zmm, zmm, zmm{k}{z}
 34906      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 34907          self.require(ISA_AVX512DQ)
 34908          p.domain = DomainAVX
 34909          p.add(0, func(m *_Encoding, v []interface{}) {
 34910              m.emit(0x62)
 34911              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 34912              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 34913              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 34914              m.emit(0x54)
 34915              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34916          })
 34917      }
 34918      // VANDPD m128/m64bcst, xmm, xmm{k}{z}
 34919      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 34920          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34921          p.domain = DomainAVX
 34922          p.add(0, func(m *_Encoding, v []interface{}) {
 34923              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 34924              m.emit(0x54)
 34925              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 34926          })
 34927      }
 34928      // VANDPD xmm, xmm, xmm{k}{z}
 34929      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 34930          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34931          p.domain = DomainAVX
 34932          p.add(0, func(m *_Encoding, v []interface{}) {
 34933              m.emit(0x62)
 34934              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 34935              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 34936              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 34937              m.emit(0x54)
 34938              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34939          })
 34940      }
 34941      // VANDPD m256/m64bcst, ymm, ymm{k}{z}
 34942      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 34943          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34944          p.domain = DomainAVX
 34945          p.add(0, func(m *_Encoding, v []interface{}) {
 34946              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 34947              m.emit(0x54)
 34948              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 34949          })
 34950      }
 34951      // VANDPD ymm, ymm, ymm{k}{z}
 34952      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 34953          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 34954          p.domain = DomainAVX
 34955          p.add(0, func(m *_Encoding, v []interface{}) {
 34956              m.emit(0x62)
 34957              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 34958              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 34959              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 34960              m.emit(0x54)
 34961              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34962          })
 34963      }
 34964      if p.len == 0 {
 34965          panic("invalid operands for VANDPD")
 34966      }
 34967      return p
 34968  }
 34969  
 34970  // VANDPS performs "Bitwise Logical AND of Packed Single-Precision Floating-Point Values".
 34971  //
 34972  // Mnemonic        : VANDPS
 34973  // Supported forms : (10 forms)
 34974  //
 34975  //    * VANDPS xmm, xmm, xmm                   [AVX]
 34976  //    * VANDPS m128, xmm, xmm                  [AVX]
 34977  //    * VANDPS ymm, ymm, ymm                   [AVX]
 34978  //    * VANDPS m256, ymm, ymm                  [AVX]
 34979  //    * VANDPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512DQ]
 34980  //    * VANDPS zmm, zmm, zmm{k}{z}             [AVX512DQ]
 34981  //    * VANDPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 34982  //    * VANDPS xmm, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 34983  //    * VANDPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 34984  //    * VANDPS ymm, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 34985  //
 34986  func (self *Program) VANDPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 34987      p := self.alloc("VANDPS", 3, Operands { v0, v1, v2 })
 34988      // VANDPS xmm, xmm, xmm
 34989      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 34990          self.require(ISA_AVX)
 34991          p.domain = DomainAVX
 34992          p.add(0, func(m *_Encoding, v []interface{}) {
 34993              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 34994              m.emit(0x54)
 34995              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 34996          })
 34997      }
 34998      // VANDPS m128, xmm, xmm
 34999      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 35000          self.require(ISA_AVX)
 35001          p.domain = DomainAVX
 35002          p.add(0, func(m *_Encoding, v []interface{}) {
 35003              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 35004              m.emit(0x54)
 35005              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 35006          })
 35007      }
 35008      // VANDPS ymm, ymm, ymm
 35009      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 35010          self.require(ISA_AVX)
 35011          p.domain = DomainAVX
 35012          p.add(0, func(m *_Encoding, v []interface{}) {
 35013              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 35014              m.emit(0x54)
 35015              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 35016          })
 35017      }
 35018      // VANDPS m256, ymm, ymm
 35019      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 35020          self.require(ISA_AVX)
 35021          p.domain = DomainAVX
 35022          p.add(0, func(m *_Encoding, v []interface{}) {
 35023              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 35024              m.emit(0x54)
 35025              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 35026          })
 35027      }
 35028      // VANDPS m512/m32bcst, zmm, zmm{k}{z}
 35029      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 35030          self.require(ISA_AVX512DQ)
 35031          p.domain = DomainAVX
 35032          p.add(0, func(m *_Encoding, v []interface{}) {
 35033              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 35034              m.emit(0x54)
 35035              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 35036          })
 35037      }
 35038      // VANDPS zmm, zmm, zmm{k}{z}
 35039      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 35040          self.require(ISA_AVX512DQ)
 35041          p.domain = DomainAVX
 35042          p.add(0, func(m *_Encoding, v []interface{}) {
 35043              m.emit(0x62)
 35044              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 35045              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 35046              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 35047              m.emit(0x54)
 35048              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 35049          })
 35050      }
 35051      // VANDPS m128/m32bcst, xmm, xmm{k}{z}
 35052      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 35053          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35054          p.domain = DomainAVX
 35055          p.add(0, func(m *_Encoding, v []interface{}) {
 35056              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 35057              m.emit(0x54)
 35058              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 35059          })
 35060      }
 35061      // VANDPS xmm, xmm, xmm{k}{z}
 35062      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 35063          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35064          p.domain = DomainAVX
 35065          p.add(0, func(m *_Encoding, v []interface{}) {
 35066              m.emit(0x62)
 35067              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 35068              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 35069              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 35070              m.emit(0x54)
 35071              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 35072          })
 35073      }
 35074      // VANDPS m256/m32bcst, ymm, ymm{k}{z}
 35075      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 35076          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35077          p.domain = DomainAVX
 35078          p.add(0, func(m *_Encoding, v []interface{}) {
 35079              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 35080              m.emit(0x54)
 35081              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 35082          })
 35083      }
 35084      // VANDPS ymm, ymm, ymm{k}{z}
 35085      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 35086          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35087          p.domain = DomainAVX
 35088          p.add(0, func(m *_Encoding, v []interface{}) {
 35089              m.emit(0x62)
 35090              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 35091              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 35092              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 35093              m.emit(0x54)
 35094              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 35095          })
 35096      }
 35097      if p.len == 0 {
 35098          panic("invalid operands for VANDPS")
 35099      }
 35100      return p
 35101  }
 35102  
 35103  // VBLENDMPD performs "Blend Packed Double-Precision Floating-Point Vectors Using an OpMask Control".
 35104  //
 35105  // Mnemonic        : VBLENDMPD
 35106  // Supported forms : (6 forms)
 35107  //
 35108  //    * VBLENDMPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 35109  //    * VBLENDMPD zmm, zmm, zmm{k}{z}             [AVX512F]
 35110  //    * VBLENDMPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 35111  //    * VBLENDMPD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 35112  //    * VBLENDMPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 35113  //    * VBLENDMPD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 35114  //
 35115  func (self *Program) VBLENDMPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 35116      p := self.alloc("VBLENDMPD", 3, Operands { v0, v1, v2 })
 35117      // VBLENDMPD m512/m64bcst, zmm, zmm{k}{z}
 35118      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 35119          self.require(ISA_AVX512F)
 35120          p.domain = DomainAVX
 35121          p.add(0, func(m *_Encoding, v []interface{}) {
 35122              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 35123              m.emit(0x65)
 35124              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 35125          })
 35126      }
 35127      // VBLENDMPD zmm, zmm, zmm{k}{z}
 35128      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 35129          self.require(ISA_AVX512F)
 35130          p.domain = DomainAVX
 35131          p.add(0, func(m *_Encoding, v []interface{}) {
 35132              m.emit(0x62)
 35133              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 35134              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 35135              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 35136              m.emit(0x65)
 35137              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 35138          })
 35139      }
 35140      // VBLENDMPD m128/m64bcst, xmm, xmm{k}{z}
 35141      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 35142          self.require(ISA_AVX512VL | ISA_AVX512F)
 35143          p.domain = DomainAVX
 35144          p.add(0, func(m *_Encoding, v []interface{}) {
 35145              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 35146              m.emit(0x65)
 35147              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 35148          })
 35149      }
 35150      // VBLENDMPD xmm, xmm, xmm{k}{z}
 35151      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 35152          self.require(ISA_AVX512VL | ISA_AVX512F)
 35153          p.domain = DomainAVX
 35154          p.add(0, func(m *_Encoding, v []interface{}) {
 35155              m.emit(0x62)
 35156              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 35157              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 35158              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 35159              m.emit(0x65)
 35160              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 35161          })
 35162      }
 35163      // VBLENDMPD m256/m64bcst, ymm, ymm{k}{z}
 35164      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 35165          self.require(ISA_AVX512VL | ISA_AVX512F)
 35166          p.domain = DomainAVX
 35167          p.add(0, func(m *_Encoding, v []interface{}) {
 35168              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 35169              m.emit(0x65)
 35170              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 35171          })
 35172      }
 35173      // VBLENDMPD ymm, ymm, ymm{k}{z}
 35174      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 35175          self.require(ISA_AVX512VL | ISA_AVX512F)
 35176          p.domain = DomainAVX
 35177          p.add(0, func(m *_Encoding, v []interface{}) {
 35178              m.emit(0x62)
 35179              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 35180              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 35181              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 35182              m.emit(0x65)
 35183              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 35184          })
 35185      }
 35186      if p.len == 0 {
 35187          panic("invalid operands for VBLENDMPD")
 35188      }
 35189      return p
 35190  }
 35191  
 35192  // VBLENDMPS performs "Blend Packed Single-Precision Floating-Point Vectors Using an OpMask Control".
 35193  //
 35194  // Mnemonic        : VBLENDMPS
 35195  // Supported forms : (6 forms)
 35196  //
 35197  //    * VBLENDMPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 35198  //    * VBLENDMPS zmm, zmm, zmm{k}{z}             [AVX512F]
 35199  //    * VBLENDMPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 35200  //    * VBLENDMPS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 35201  //    * VBLENDMPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 35202  //    * VBLENDMPS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 35203  //
 35204  func (self *Program) VBLENDMPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 35205      p := self.alloc("VBLENDMPS", 3, Operands { v0, v1, v2 })
 35206      // VBLENDMPS m512/m32bcst, zmm, zmm{k}{z}
 35207      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 35208          self.require(ISA_AVX512F)
 35209          p.domain = DomainAVX
 35210          p.add(0, func(m *_Encoding, v []interface{}) {
 35211              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 35212              m.emit(0x65)
 35213              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 35214          })
 35215      }
 35216      // VBLENDMPS zmm, zmm, zmm{k}{z}
 35217      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 35218          self.require(ISA_AVX512F)
 35219          p.domain = DomainAVX
 35220          p.add(0, func(m *_Encoding, v []interface{}) {
 35221              m.emit(0x62)
 35222              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 35223              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 35224              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 35225              m.emit(0x65)
 35226              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 35227          })
 35228      }
 35229      // VBLENDMPS m128/m32bcst, xmm, xmm{k}{z}
 35230      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 35231          self.require(ISA_AVX512VL | ISA_AVX512F)
 35232          p.domain = DomainAVX
 35233          p.add(0, func(m *_Encoding, v []interface{}) {
 35234              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 35235              m.emit(0x65)
 35236              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 35237          })
 35238      }
 35239      // VBLENDMPS xmm, xmm, xmm{k}{z}
 35240      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 35241          self.require(ISA_AVX512VL | ISA_AVX512F)
 35242          p.domain = DomainAVX
 35243          p.add(0, func(m *_Encoding, v []interface{}) {
 35244              m.emit(0x62)
 35245              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 35246              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 35247              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 35248              m.emit(0x65)
 35249              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 35250          })
 35251      }
 35252      // VBLENDMPS m256/m32bcst, ymm, ymm{k}{z}
 35253      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 35254          self.require(ISA_AVX512VL | ISA_AVX512F)
 35255          p.domain = DomainAVX
 35256          p.add(0, func(m *_Encoding, v []interface{}) {
 35257              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 35258              m.emit(0x65)
 35259              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 35260          })
 35261      }
 35262      // VBLENDMPS ymm, ymm, ymm{k}{z}
 35263      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 35264          self.require(ISA_AVX512VL | ISA_AVX512F)
 35265          p.domain = DomainAVX
 35266          p.add(0, func(m *_Encoding, v []interface{}) {
 35267              m.emit(0x62)
 35268              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 35269              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 35270              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 35271              m.emit(0x65)
 35272              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 35273          })
 35274      }
 35275      if p.len == 0 {
 35276          panic("invalid operands for VBLENDMPS")
 35277      }
 35278      return p
 35279  }
 35280  
 35281  // VBLENDPD performs "Blend Packed Double Precision Floating-Point Values".
 35282  //
 35283  // Mnemonic        : VBLENDPD
 35284  // Supported forms : (4 forms)
 35285  //
 35286  //    * VBLENDPD imm8, xmm, xmm, xmm     [AVX]
 35287  //    * VBLENDPD imm8, m128, xmm, xmm    [AVX]
 35288  //    * VBLENDPD imm8, ymm, ymm, ymm     [AVX]
 35289  //    * VBLENDPD imm8, m256, ymm, ymm    [AVX]
 35290  //
 35291  func (self *Program) VBLENDPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 35292      p := self.alloc("VBLENDPD", 4, Operands { v0, v1, v2, v3 })
 35293      // VBLENDPD imm8, xmm, xmm, xmm
 35294      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 35295          self.require(ISA_AVX)
 35296          p.domain = DomainAVX
 35297          p.add(0, func(m *_Encoding, v []interface{}) {
 35298              m.emit(0xc4)
 35299              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 35300              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 35301              m.emit(0x0d)
 35302              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 35303              m.imm1(toImmAny(v[0]))
 35304          })
 35305      }
 35306      // VBLENDPD imm8, m128, xmm, xmm
 35307      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 35308          self.require(ISA_AVX)
 35309          p.domain = DomainAVX
 35310          p.add(0, func(m *_Encoding, v []interface{}) {
 35311              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 35312              m.emit(0x0d)
 35313              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 35314              m.imm1(toImmAny(v[0]))
 35315          })
 35316      }
 35317      // VBLENDPD imm8, ymm, ymm, ymm
 35318      if isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 35319          self.require(ISA_AVX)
 35320          p.domain = DomainAVX
 35321          p.add(0, func(m *_Encoding, v []interface{}) {
 35322              m.emit(0xc4)
 35323              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 35324              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 35325              m.emit(0x0d)
 35326              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 35327              m.imm1(toImmAny(v[0]))
 35328          })
 35329      }
 35330      // VBLENDPD imm8, m256, ymm, ymm
 35331      if isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 35332          self.require(ISA_AVX)
 35333          p.domain = DomainAVX
 35334          p.add(0, func(m *_Encoding, v []interface{}) {
 35335              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 35336              m.emit(0x0d)
 35337              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 35338              m.imm1(toImmAny(v[0]))
 35339          })
 35340      }
 35341      if p.len == 0 {
 35342          panic("invalid operands for VBLENDPD")
 35343      }
 35344      return p
 35345  }
 35346  
 35347  // VBLENDPS performs " Blend Packed Single Precision Floating-Point Values".
 35348  //
 35349  // Mnemonic        : VBLENDPS
 35350  // Supported forms : (4 forms)
 35351  //
 35352  //    * VBLENDPS imm8, xmm, xmm, xmm     [AVX]
 35353  //    * VBLENDPS imm8, m128, xmm, xmm    [AVX]
 35354  //    * VBLENDPS imm8, ymm, ymm, ymm     [AVX]
 35355  //    * VBLENDPS imm8, m256, ymm, ymm    [AVX]
 35356  //
 35357  func (self *Program) VBLENDPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 35358      p := self.alloc("VBLENDPS", 4, Operands { v0, v1, v2, v3 })
 35359      // VBLENDPS imm8, xmm, xmm, xmm
 35360      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 35361          self.require(ISA_AVX)
 35362          p.domain = DomainAVX
 35363          p.add(0, func(m *_Encoding, v []interface{}) {
 35364              m.emit(0xc4)
 35365              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 35366              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 35367              m.emit(0x0c)
 35368              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 35369              m.imm1(toImmAny(v[0]))
 35370          })
 35371      }
 35372      // VBLENDPS imm8, m128, xmm, xmm
 35373      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 35374          self.require(ISA_AVX)
 35375          p.domain = DomainAVX
 35376          p.add(0, func(m *_Encoding, v []interface{}) {
 35377              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 35378              m.emit(0x0c)
 35379              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 35380              m.imm1(toImmAny(v[0]))
 35381          })
 35382      }
 35383      // VBLENDPS imm8, ymm, ymm, ymm
 35384      if isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 35385          self.require(ISA_AVX)
 35386          p.domain = DomainAVX
 35387          p.add(0, func(m *_Encoding, v []interface{}) {
 35388              m.emit(0xc4)
 35389              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 35390              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 35391              m.emit(0x0c)
 35392              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 35393              m.imm1(toImmAny(v[0]))
 35394          })
 35395      }
 35396      // VBLENDPS imm8, m256, ymm, ymm
 35397      if isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 35398          self.require(ISA_AVX)
 35399          p.domain = DomainAVX
 35400          p.add(0, func(m *_Encoding, v []interface{}) {
 35401              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 35402              m.emit(0x0c)
 35403              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 35404              m.imm1(toImmAny(v[0]))
 35405          })
 35406      }
 35407      if p.len == 0 {
 35408          panic("invalid operands for VBLENDPS")
 35409      }
 35410      return p
 35411  }
 35412  
 35413  // VBLENDVPD performs " Variable Blend Packed Double Precision Floating-Point Values".
 35414  //
 35415  // Mnemonic        : VBLENDVPD
 35416  // Supported forms : (4 forms)
 35417  //
 35418  //    * VBLENDVPD xmm, xmm, xmm, xmm     [AVX]
 35419  //    * VBLENDVPD xmm, m128, xmm, xmm    [AVX]
 35420  //    * VBLENDVPD ymm, ymm, ymm, ymm     [AVX]
 35421  //    * VBLENDVPD ymm, m256, ymm, ymm    [AVX]
 35422  //
 35423  func (self *Program) VBLENDVPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 35424      p := self.alloc("VBLENDVPD", 4, Operands { v0, v1, v2, v3 })
 35425      // VBLENDVPD xmm, xmm, xmm, xmm
 35426      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 35427          self.require(ISA_AVX)
 35428          p.domain = DomainAVX
 35429          p.add(0, func(m *_Encoding, v []interface{}) {
 35430              m.emit(0xc4)
 35431              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 35432              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 35433              m.emit(0x4b)
 35434              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 35435              m.emit(hlcode(v[0]) << 4)
 35436          })
 35437      }
 35438      // VBLENDVPD xmm, m128, xmm, xmm
 35439      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 35440          self.require(ISA_AVX)
 35441          p.domain = DomainAVX
 35442          p.add(0, func(m *_Encoding, v []interface{}) {
 35443              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 35444              m.emit(0x4b)
 35445              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 35446              m.emit(hlcode(v[0]) << 4)
 35447          })
 35448      }
 35449      // VBLENDVPD ymm, ymm, ymm, ymm
 35450      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 35451          self.require(ISA_AVX)
 35452          p.domain = DomainAVX
 35453          p.add(0, func(m *_Encoding, v []interface{}) {
 35454              m.emit(0xc4)
 35455              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 35456              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 35457              m.emit(0x4b)
 35458              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 35459              m.emit(hlcode(v[0]) << 4)
 35460          })
 35461      }
 35462      // VBLENDVPD ymm, m256, ymm, ymm
 35463      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 35464          self.require(ISA_AVX)
 35465          p.domain = DomainAVX
 35466          p.add(0, func(m *_Encoding, v []interface{}) {
 35467              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 35468              m.emit(0x4b)
 35469              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 35470              m.emit(hlcode(v[0]) << 4)
 35471          })
 35472      }
 35473      if p.len == 0 {
 35474          panic("invalid operands for VBLENDVPD")
 35475      }
 35476      return p
 35477  }
 35478  
 35479  // VBLENDVPS performs " Variable Blend Packed Single Precision Floating-Point Values".
 35480  //
 35481  // Mnemonic        : VBLENDVPS
 35482  // Supported forms : (4 forms)
 35483  //
 35484  //    * VBLENDVPS xmm, xmm, xmm, xmm     [AVX]
 35485  //    * VBLENDVPS xmm, m128, xmm, xmm    [AVX]
 35486  //    * VBLENDVPS ymm, ymm, ymm, ymm     [AVX]
 35487  //    * VBLENDVPS ymm, m256, ymm, ymm    [AVX]
 35488  //
 35489  func (self *Program) VBLENDVPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 35490      p := self.alloc("VBLENDVPS", 4, Operands { v0, v1, v2, v3 })
 35491      // VBLENDVPS xmm, xmm, xmm, xmm
 35492      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 35493          self.require(ISA_AVX)
 35494          p.domain = DomainAVX
 35495          p.add(0, func(m *_Encoding, v []interface{}) {
 35496              m.emit(0xc4)
 35497              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 35498              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 35499              m.emit(0x4a)
 35500              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 35501              m.emit(hlcode(v[0]) << 4)
 35502          })
 35503      }
 35504      // VBLENDVPS xmm, m128, xmm, xmm
 35505      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 35506          self.require(ISA_AVX)
 35507          p.domain = DomainAVX
 35508          p.add(0, func(m *_Encoding, v []interface{}) {
 35509              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 35510              m.emit(0x4a)
 35511              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 35512              m.emit(hlcode(v[0]) << 4)
 35513          })
 35514      }
 35515      // VBLENDVPS ymm, ymm, ymm, ymm
 35516      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 35517          self.require(ISA_AVX)
 35518          p.domain = DomainAVX
 35519          p.add(0, func(m *_Encoding, v []interface{}) {
 35520              m.emit(0xc4)
 35521              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 35522              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 35523              m.emit(0x4a)
 35524              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 35525              m.emit(hlcode(v[0]) << 4)
 35526          })
 35527      }
 35528      // VBLENDVPS ymm, m256, ymm, ymm
 35529      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 35530          self.require(ISA_AVX)
 35531          p.domain = DomainAVX
 35532          p.add(0, func(m *_Encoding, v []interface{}) {
 35533              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 35534              m.emit(0x4a)
 35535              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 35536              m.emit(hlcode(v[0]) << 4)
 35537          })
 35538      }
 35539      if p.len == 0 {
 35540          panic("invalid operands for VBLENDVPS")
 35541      }
 35542      return p
 35543  }
 35544  
 35545  // VBROADCASTF128 performs "Broadcast 128 Bit of Floating-Point Data".
 35546  //
 35547  // Mnemonic        : VBROADCASTF128
 35548  // Supported forms : (1 form)
 35549  //
 35550  //    * VBROADCASTF128 m128, ymm    [AVX]
 35551  //
 35552  func (self *Program) VBROADCASTF128(v0 interface{}, v1 interface{}) *Instruction {
 35553      p := self.alloc("VBROADCASTF128", 2, Operands { v0, v1 })
 35554      // VBROADCASTF128 m128, ymm
 35555      if isM128(v0) && isYMM(v1) {
 35556          self.require(ISA_AVX)
 35557          p.domain = DomainAVX
 35558          p.add(0, func(m *_Encoding, v []interface{}) {
 35559              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 35560              m.emit(0x1a)
 35561              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 35562          })
 35563      }
 35564      if p.len == 0 {
 35565          panic("invalid operands for VBROADCASTF128")
 35566      }
 35567      return p
 35568  }
 35569  
 35570  // VBROADCASTF32X2 performs "Broadcast Two Single-Precision Floating-Point Elements".
 35571  //
 35572  // Mnemonic        : VBROADCASTF32X2
 35573  // Supported forms : (4 forms)
 35574  //
 35575  //    * VBROADCASTF32X2 xmm, zmm{k}{z}    [AVX512DQ]
 35576  //    * VBROADCASTF32X2 m64, zmm{k}{z}    [AVX512DQ]
 35577  //    * VBROADCASTF32X2 xmm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 35578  //    * VBROADCASTF32X2 m64, ymm{k}{z}    [AVX512DQ,AVX512VL]
 35579  //
 35580  func (self *Program) VBROADCASTF32X2(v0 interface{}, v1 interface{}) *Instruction {
 35581      p := self.alloc("VBROADCASTF32X2", 2, Operands { v0, v1 })
 35582      // VBROADCASTF32X2 xmm, zmm{k}{z}
 35583      if isEVEXXMM(v0) && isZMMkz(v1) {
 35584          self.require(ISA_AVX512DQ)
 35585          p.domain = DomainAVX
 35586          p.add(0, func(m *_Encoding, v []interface{}) {
 35587              m.emit(0x62)
 35588              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 35589              m.emit(0x7d)
 35590              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 35591              m.emit(0x19)
 35592              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 35593          })
 35594      }
 35595      // VBROADCASTF32X2 m64, zmm{k}{z}
 35596      if isM64(v0) && isZMMkz(v1) {
 35597          self.require(ISA_AVX512DQ)
 35598          p.domain = DomainAVX
 35599          p.add(0, func(m *_Encoding, v []interface{}) {
 35600              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35601              m.emit(0x19)
 35602              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 35603          })
 35604      }
 35605      // VBROADCASTF32X2 xmm, ymm{k}{z}
 35606      if isEVEXXMM(v0) && isYMMkz(v1) {
 35607          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35608          p.domain = DomainAVX
 35609          p.add(0, func(m *_Encoding, v []interface{}) {
 35610              m.emit(0x62)
 35611              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 35612              m.emit(0x7d)
 35613              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 35614              m.emit(0x19)
 35615              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 35616          })
 35617      }
 35618      // VBROADCASTF32X2 m64, ymm{k}{z}
 35619      if isM64(v0) && isYMMkz(v1) {
 35620          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35621          p.domain = DomainAVX
 35622          p.add(0, func(m *_Encoding, v []interface{}) {
 35623              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35624              m.emit(0x19)
 35625              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 35626          })
 35627      }
 35628      if p.len == 0 {
 35629          panic("invalid operands for VBROADCASTF32X2")
 35630      }
 35631      return p
 35632  }
 35633  
 35634  // VBROADCASTF32X4 performs "Broadcast Four Single-Precision Floating-Point Elements".
 35635  //
 35636  // Mnemonic        : VBROADCASTF32X4
 35637  // Supported forms : (2 forms)
 35638  //
 35639  //    * VBROADCASTF32X4 m128, zmm{k}{z}    [AVX512F]
 35640  //    * VBROADCASTF32X4 m128, ymm{k}{z}    [AVX512F,AVX512VL]
 35641  //
 35642  func (self *Program) VBROADCASTF32X4(v0 interface{}, v1 interface{}) *Instruction {
 35643      p := self.alloc("VBROADCASTF32X4", 2, Operands { v0, v1 })
 35644      // VBROADCASTF32X4 m128, zmm{k}{z}
 35645      if isM128(v0) && isZMMkz(v1) {
 35646          self.require(ISA_AVX512F)
 35647          p.domain = DomainAVX
 35648          p.add(0, func(m *_Encoding, v []interface{}) {
 35649              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35650              m.emit(0x1a)
 35651              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 35652          })
 35653      }
 35654      // VBROADCASTF32X4 m128, ymm{k}{z}
 35655      if isM128(v0) && isYMMkz(v1) {
 35656          self.require(ISA_AVX512VL | ISA_AVX512F)
 35657          p.domain = DomainAVX
 35658          p.add(0, func(m *_Encoding, v []interface{}) {
 35659              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35660              m.emit(0x1a)
 35661              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 35662          })
 35663      }
 35664      if p.len == 0 {
 35665          panic("invalid operands for VBROADCASTF32X4")
 35666      }
 35667      return p
 35668  }
 35669  
 35670  // VBROADCASTF32X8 performs "Broadcast Eight Single-Precision Floating-Point Elements".
 35671  //
 35672  // Mnemonic        : VBROADCASTF32X8
 35673  // Supported forms : (1 form)
 35674  //
 35675  //    * VBROADCASTF32X8 m256, zmm{k}{z}    [AVX512DQ]
 35676  //
 35677  func (self *Program) VBROADCASTF32X8(v0 interface{}, v1 interface{}) *Instruction {
 35678      p := self.alloc("VBROADCASTF32X8", 2, Operands { v0, v1 })
 35679      // VBROADCASTF32X8 m256, zmm{k}{z}
 35680      if isM256(v0) && isZMMkz(v1) {
 35681          self.require(ISA_AVX512DQ)
 35682          p.domain = DomainAVX
 35683          p.add(0, func(m *_Encoding, v []interface{}) {
 35684              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35685              m.emit(0x1b)
 35686              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 35687          })
 35688      }
 35689      if p.len == 0 {
 35690          panic("invalid operands for VBROADCASTF32X8")
 35691      }
 35692      return p
 35693  }
 35694  
 35695  // VBROADCASTF64X2 performs "Broadcast Two Double-Precision Floating-Point Elements".
 35696  //
 35697  // Mnemonic        : VBROADCASTF64X2
 35698  // Supported forms : (2 forms)
 35699  //
 35700  //    * VBROADCASTF64X2 m128, zmm{k}{z}    [AVX512DQ]
 35701  //    * VBROADCASTF64X2 m128, ymm{k}{z}    [AVX512DQ,AVX512VL]
 35702  //
 35703  func (self *Program) VBROADCASTF64X2(v0 interface{}, v1 interface{}) *Instruction {
 35704      p := self.alloc("VBROADCASTF64X2", 2, Operands { v0, v1 })
 35705      // VBROADCASTF64X2 m128, zmm{k}{z}
 35706      if isM128(v0) && isZMMkz(v1) {
 35707          self.require(ISA_AVX512DQ)
 35708          p.domain = DomainAVX
 35709          p.add(0, func(m *_Encoding, v []interface{}) {
 35710              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35711              m.emit(0x1a)
 35712              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 35713          })
 35714      }
 35715      // VBROADCASTF64X2 m128, ymm{k}{z}
 35716      if isM128(v0) && isYMMkz(v1) {
 35717          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35718          p.domain = DomainAVX
 35719          p.add(0, func(m *_Encoding, v []interface{}) {
 35720              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35721              m.emit(0x1a)
 35722              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 35723          })
 35724      }
 35725      if p.len == 0 {
 35726          panic("invalid operands for VBROADCASTF64X2")
 35727      }
 35728      return p
 35729  }
 35730  
 35731  // VBROADCASTF64X4 performs "Broadcast Four Double-Precision Floating-Point Elements".
 35732  //
 35733  // Mnemonic        : VBROADCASTF64X4
 35734  // Supported forms : (1 form)
 35735  //
 35736  //    * VBROADCASTF64X4 m256, zmm{k}{z}    [AVX512F]
 35737  //
 35738  func (self *Program) VBROADCASTF64X4(v0 interface{}, v1 interface{}) *Instruction {
 35739      p := self.alloc("VBROADCASTF64X4", 2, Operands { v0, v1 })
 35740      // VBROADCASTF64X4 m256, zmm{k}{z}
 35741      if isM256(v0) && isZMMkz(v1) {
 35742          self.require(ISA_AVX512F)
 35743          p.domain = DomainAVX
 35744          p.add(0, func(m *_Encoding, v []interface{}) {
 35745              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35746              m.emit(0x1b)
 35747              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 35748          })
 35749      }
 35750      if p.len == 0 {
 35751          panic("invalid operands for VBROADCASTF64X4")
 35752      }
 35753      return p
 35754  }
 35755  
 35756  // VBROADCASTI128 performs "Broadcast 128 Bits of Integer Data".
 35757  //
 35758  // Mnemonic        : VBROADCASTI128
 35759  // Supported forms : (1 form)
 35760  //
 35761  //    * VBROADCASTI128 m128, ymm    [AVX2]
 35762  //
 35763  func (self *Program) VBROADCASTI128(v0 interface{}, v1 interface{}) *Instruction {
 35764      p := self.alloc("VBROADCASTI128", 2, Operands { v0, v1 })
 35765      // VBROADCASTI128 m128, ymm
 35766      if isM128(v0) && isYMM(v1) {
 35767          self.require(ISA_AVX2)
 35768          p.domain = DomainAVX
 35769          p.add(0, func(m *_Encoding, v []interface{}) {
 35770              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 35771              m.emit(0x5a)
 35772              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 35773          })
 35774      }
 35775      if p.len == 0 {
 35776          panic("invalid operands for VBROADCASTI128")
 35777      }
 35778      return p
 35779  }
 35780  
 35781  // VBROADCASTI32X2 performs "Broadcast Two Doubleword Elements".
 35782  //
 35783  // Mnemonic        : VBROADCASTI32X2
 35784  // Supported forms : (6 forms)
 35785  //
 35786  //    * VBROADCASTI32X2 xmm, zmm{k}{z}    [AVX512DQ]
 35787  //    * VBROADCASTI32X2 m64, zmm{k}{z}    [AVX512DQ]
 35788  //    * VBROADCASTI32X2 xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 35789  //    * VBROADCASTI32X2 xmm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 35790  //    * VBROADCASTI32X2 m64, xmm{k}{z}    [AVX512DQ,AVX512VL]
 35791  //    * VBROADCASTI32X2 m64, ymm{k}{z}    [AVX512DQ,AVX512VL]
 35792  //
 35793  func (self *Program) VBROADCASTI32X2(v0 interface{}, v1 interface{}) *Instruction {
 35794      p := self.alloc("VBROADCASTI32X2", 2, Operands { v0, v1 })
 35795      // VBROADCASTI32X2 xmm, zmm{k}{z}
 35796      if isEVEXXMM(v0) && isZMMkz(v1) {
 35797          self.require(ISA_AVX512DQ)
 35798          p.domain = DomainAVX
 35799          p.add(0, func(m *_Encoding, v []interface{}) {
 35800              m.emit(0x62)
 35801              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 35802              m.emit(0x7d)
 35803              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 35804              m.emit(0x59)
 35805              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 35806          })
 35807      }
 35808      // VBROADCASTI32X2 m64, zmm{k}{z}
 35809      if isM64(v0) && isZMMkz(v1) {
 35810          self.require(ISA_AVX512DQ)
 35811          p.domain = DomainAVX
 35812          p.add(0, func(m *_Encoding, v []interface{}) {
 35813              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35814              m.emit(0x59)
 35815              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 35816          })
 35817      }
 35818      // VBROADCASTI32X2 xmm, xmm{k}{z}
 35819      if isEVEXXMM(v0) && isXMMkz(v1) {
 35820          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35821          p.domain = DomainAVX
 35822          p.add(0, func(m *_Encoding, v []interface{}) {
 35823              m.emit(0x62)
 35824              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 35825              m.emit(0x7d)
 35826              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 35827              m.emit(0x59)
 35828              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 35829          })
 35830      }
 35831      // VBROADCASTI32X2 xmm, ymm{k}{z}
 35832      if isEVEXXMM(v0) && isYMMkz(v1) {
 35833          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35834          p.domain = DomainAVX
 35835          p.add(0, func(m *_Encoding, v []interface{}) {
 35836              m.emit(0x62)
 35837              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 35838              m.emit(0x7d)
 35839              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 35840              m.emit(0x59)
 35841              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 35842          })
 35843      }
 35844      // VBROADCASTI32X2 m64, xmm{k}{z}
 35845      if isM64(v0) && isXMMkz(v1) {
 35846          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35847          p.domain = DomainAVX
 35848          p.add(0, func(m *_Encoding, v []interface{}) {
 35849              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35850              m.emit(0x59)
 35851              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 35852          })
 35853      }
 35854      // VBROADCASTI32X2 m64, ymm{k}{z}
 35855      if isM64(v0) && isYMMkz(v1) {
 35856          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35857          p.domain = DomainAVX
 35858          p.add(0, func(m *_Encoding, v []interface{}) {
 35859              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35860              m.emit(0x59)
 35861              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 35862          })
 35863      }
 35864      if p.len == 0 {
 35865          panic("invalid operands for VBROADCASTI32X2")
 35866      }
 35867      return p
 35868  }
 35869  
 35870  // VBROADCASTI32X4 performs "Broadcast Four Doubleword Elements".
 35871  //
 35872  // Mnemonic        : VBROADCASTI32X4
 35873  // Supported forms : (2 forms)
 35874  //
 35875  //    * VBROADCASTI32X4 m128, zmm{k}{z}    [AVX512F]
 35876  //    * VBROADCASTI32X4 m128, ymm{k}{z}    [AVX512F,AVX512VL]
 35877  //
 35878  func (self *Program) VBROADCASTI32X4(v0 interface{}, v1 interface{}) *Instruction {
 35879      p := self.alloc("VBROADCASTI32X4", 2, Operands { v0, v1 })
 35880      // VBROADCASTI32X4 m128, zmm{k}{z}
 35881      if isM128(v0) && isZMMkz(v1) {
 35882          self.require(ISA_AVX512F)
 35883          p.domain = DomainAVX
 35884          p.add(0, func(m *_Encoding, v []interface{}) {
 35885              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35886              m.emit(0x5a)
 35887              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 35888          })
 35889      }
 35890      // VBROADCASTI32X4 m128, ymm{k}{z}
 35891      if isM128(v0) && isYMMkz(v1) {
 35892          self.require(ISA_AVX512VL | ISA_AVX512F)
 35893          p.domain = DomainAVX
 35894          p.add(0, func(m *_Encoding, v []interface{}) {
 35895              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35896              m.emit(0x5a)
 35897              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 35898          })
 35899      }
 35900      if p.len == 0 {
 35901          panic("invalid operands for VBROADCASTI32X4")
 35902      }
 35903      return p
 35904  }
 35905  
 35906  // VBROADCASTI32X8 performs "Broadcast Eight Doubleword Elements".
 35907  //
 35908  // Mnemonic        : VBROADCASTI32X8
 35909  // Supported forms : (1 form)
 35910  //
 35911  //    * VBROADCASTI32X8 m256, zmm{k}{z}    [AVX512DQ]
 35912  //
 35913  func (self *Program) VBROADCASTI32X8(v0 interface{}, v1 interface{}) *Instruction {
 35914      p := self.alloc("VBROADCASTI32X8", 2, Operands { v0, v1 })
 35915      // VBROADCASTI32X8 m256, zmm{k}{z}
 35916      if isM256(v0) && isZMMkz(v1) {
 35917          self.require(ISA_AVX512DQ)
 35918          p.domain = DomainAVX
 35919          p.add(0, func(m *_Encoding, v []interface{}) {
 35920              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35921              m.emit(0x5b)
 35922              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 35923          })
 35924      }
 35925      if p.len == 0 {
 35926          panic("invalid operands for VBROADCASTI32X8")
 35927      }
 35928      return p
 35929  }
 35930  
 35931  // VBROADCASTI64X2 performs "Broadcast Two Quadword Elements".
 35932  //
 35933  // Mnemonic        : VBROADCASTI64X2
 35934  // Supported forms : (2 forms)
 35935  //
 35936  //    * VBROADCASTI64X2 m128, zmm{k}{z}    [AVX512DQ]
 35937  //    * VBROADCASTI64X2 m128, ymm{k}{z}    [AVX512DQ,AVX512VL]
 35938  //
 35939  func (self *Program) VBROADCASTI64X2(v0 interface{}, v1 interface{}) *Instruction {
 35940      p := self.alloc("VBROADCASTI64X2", 2, Operands { v0, v1 })
 35941      // VBROADCASTI64X2 m128, zmm{k}{z}
 35942      if isM128(v0) && isZMMkz(v1) {
 35943          self.require(ISA_AVX512DQ)
 35944          p.domain = DomainAVX
 35945          p.add(0, func(m *_Encoding, v []interface{}) {
 35946              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35947              m.emit(0x5a)
 35948              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 35949          })
 35950      }
 35951      // VBROADCASTI64X2 m128, ymm{k}{z}
 35952      if isM128(v0) && isYMMkz(v1) {
 35953          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 35954          p.domain = DomainAVX
 35955          p.add(0, func(m *_Encoding, v []interface{}) {
 35956              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35957              m.emit(0x5a)
 35958              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 35959          })
 35960      }
 35961      if p.len == 0 {
 35962          panic("invalid operands for VBROADCASTI64X2")
 35963      }
 35964      return p
 35965  }
 35966  
 35967  // VBROADCASTI64X4 performs "Broadcast Four Quadword Elements".
 35968  //
 35969  // Mnemonic        : VBROADCASTI64X4
 35970  // Supported forms : (1 form)
 35971  //
 35972  //    * VBROADCASTI64X4 m256, zmm{k}{z}    [AVX512F]
 35973  //
 35974  func (self *Program) VBROADCASTI64X4(v0 interface{}, v1 interface{}) *Instruction {
 35975      p := self.alloc("VBROADCASTI64X4", 2, Operands { v0, v1 })
 35976      // VBROADCASTI64X4 m256, zmm{k}{z}
 35977      if isM256(v0) && isZMMkz(v1) {
 35978          self.require(ISA_AVX512F)
 35979          p.domain = DomainAVX
 35980          p.add(0, func(m *_Encoding, v []interface{}) {
 35981              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 35982              m.emit(0x5b)
 35983              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 35984          })
 35985      }
 35986      if p.len == 0 {
 35987          panic("invalid operands for VBROADCASTI64X4")
 35988      }
 35989      return p
 35990  }
 35991  
 35992  // VBROADCASTSD performs "Broadcast Double-Precision Floating-Point Element".
 35993  //
 35994  // Mnemonic        : VBROADCASTSD
 35995  // Supported forms : (6 forms)
 35996  //
 35997  //    * VBROADCASTSD m64, ymm          [AVX]
 35998  //    * VBROADCASTSD xmm, ymm          [AVX2]
 35999  //    * VBROADCASTSD xmm, zmm{k}{z}    [AVX512F]
 36000  //    * VBROADCASTSD m64, zmm{k}{z}    [AVX512F]
 36001  //    * VBROADCASTSD xmm, ymm{k}{z}    [AVX512F,AVX512VL]
 36002  //    * VBROADCASTSD m64, ymm{k}{z}    [AVX512F,AVX512VL]
 36003  //
 36004  func (self *Program) VBROADCASTSD(v0 interface{}, v1 interface{}) *Instruction {
 36005      p := self.alloc("VBROADCASTSD", 2, Operands { v0, v1 })
 36006      // VBROADCASTSD m64, ymm
 36007      if isM64(v0) && isYMM(v1) {
 36008          self.require(ISA_AVX)
 36009          p.domain = DomainAVX
 36010          p.add(0, func(m *_Encoding, v []interface{}) {
 36011              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 36012              m.emit(0x19)
 36013              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 36014          })
 36015      }
 36016      // VBROADCASTSD xmm, ymm
 36017      if isXMM(v0) && isYMM(v1) {
 36018          self.require(ISA_AVX2)
 36019          p.domain = DomainAVX
 36020          p.add(0, func(m *_Encoding, v []interface{}) {
 36021              m.emit(0xc4)
 36022              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 36023              m.emit(0x7d)
 36024              m.emit(0x19)
 36025              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 36026          })
 36027      }
 36028      // VBROADCASTSD xmm, zmm{k}{z}
 36029      if isEVEXXMM(v0) && isZMMkz(v1) {
 36030          self.require(ISA_AVX512F)
 36031          p.domain = DomainAVX
 36032          p.add(0, func(m *_Encoding, v []interface{}) {
 36033              m.emit(0x62)
 36034              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 36035              m.emit(0xfd)
 36036              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 36037              m.emit(0x19)
 36038              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 36039          })
 36040      }
 36041      // VBROADCASTSD m64, zmm{k}{z}
 36042      if isM64(v0) && isZMMkz(v1) {
 36043          self.require(ISA_AVX512F)
 36044          p.domain = DomainAVX
 36045          p.add(0, func(m *_Encoding, v []interface{}) {
 36046              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 36047              m.emit(0x19)
 36048              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 36049          })
 36050      }
 36051      // VBROADCASTSD xmm, ymm{k}{z}
 36052      if isEVEXXMM(v0) && isYMMkz(v1) {
 36053          self.require(ISA_AVX512VL | ISA_AVX512F)
 36054          p.domain = DomainAVX
 36055          p.add(0, func(m *_Encoding, v []interface{}) {
 36056              m.emit(0x62)
 36057              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 36058              m.emit(0xfd)
 36059              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 36060              m.emit(0x19)
 36061              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 36062          })
 36063      }
 36064      // VBROADCASTSD m64, ymm{k}{z}
 36065      if isM64(v0) && isYMMkz(v1) {
 36066          self.require(ISA_AVX512VL | ISA_AVX512F)
 36067          p.domain = DomainAVX
 36068          p.add(0, func(m *_Encoding, v []interface{}) {
 36069              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 36070              m.emit(0x19)
 36071              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 36072          })
 36073      }
 36074      if p.len == 0 {
 36075          panic("invalid operands for VBROADCASTSD")
 36076      }
 36077      return p
 36078  }
 36079  
 36080  // VBROADCASTSS performs "Broadcast Single-Precision Floating-Point Element".
 36081  //
 36082  // Mnemonic        : VBROADCASTSS
 36083  // Supported forms : (8 forms)
 36084  //
 36085  //    * VBROADCASTSS m32, xmm          [AVX]
 36086  //    * VBROADCASTSS m32, ymm          [AVX]
 36087  //    * VBROADCASTSS xmm, xmm          [AVX2]
 36088  //    * VBROADCASTSS xmm, ymm          [AVX2]
 36089  //    * VBROADCASTSS xmm, zmm{k}{z}    [AVX512F]
 36090  //    * VBROADCASTSS m32, zmm{k}{z}    [AVX512F]
 36091  //    * VBROADCASTSS xmm, ymm{k}{z}    [AVX512F,AVX512VL]
 36092  //    * VBROADCASTSS m32, ymm{k}{z}    [AVX512F,AVX512VL]
 36093  //
 36094  func (self *Program) VBROADCASTSS(v0 interface{}, v1 interface{}) *Instruction {
 36095      p := self.alloc("VBROADCASTSS", 2, Operands { v0, v1 })
 36096      // VBROADCASTSS m32, xmm
 36097      if isM32(v0) && isXMM(v1) {
 36098          self.require(ISA_AVX)
 36099          p.domain = DomainAVX
 36100          p.add(0, func(m *_Encoding, v []interface{}) {
 36101              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 36102              m.emit(0x18)
 36103              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 36104          })
 36105      }
 36106      // VBROADCASTSS m32, ymm
 36107      if isM32(v0) && isYMM(v1) {
 36108          self.require(ISA_AVX)
 36109          p.domain = DomainAVX
 36110          p.add(0, func(m *_Encoding, v []interface{}) {
 36111              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 36112              m.emit(0x18)
 36113              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 36114          })
 36115      }
 36116      // VBROADCASTSS xmm, xmm
 36117      if isXMM(v0) && isXMM(v1) {
 36118          self.require(ISA_AVX2)
 36119          p.domain = DomainAVX
 36120          p.add(0, func(m *_Encoding, v []interface{}) {
 36121              m.emit(0xc4)
 36122              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 36123              m.emit(0x79)
 36124              m.emit(0x18)
 36125              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 36126          })
 36127      }
 36128      // VBROADCASTSS xmm, ymm
 36129      if isXMM(v0) && isYMM(v1) {
 36130          self.require(ISA_AVX2)
 36131          p.domain = DomainAVX
 36132          p.add(0, func(m *_Encoding, v []interface{}) {
 36133              m.emit(0xc4)
 36134              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 36135              m.emit(0x7d)
 36136              m.emit(0x18)
 36137              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 36138          })
 36139      }
 36140      // VBROADCASTSS xmm, zmm{k}{z}
 36141      if isEVEXXMM(v0) && isZMMkz(v1) {
 36142          self.require(ISA_AVX512F)
 36143          p.domain = DomainAVX
 36144          p.add(0, func(m *_Encoding, v []interface{}) {
 36145              m.emit(0x62)
 36146              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 36147              m.emit(0x7d)
 36148              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 36149              m.emit(0x18)
 36150              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 36151          })
 36152      }
 36153      // VBROADCASTSS m32, zmm{k}{z}
 36154      if isM32(v0) && isZMMkz(v1) {
 36155          self.require(ISA_AVX512F)
 36156          p.domain = DomainAVX
 36157          p.add(0, func(m *_Encoding, v []interface{}) {
 36158              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 36159              m.emit(0x18)
 36160              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 36161          })
 36162      }
 36163      // VBROADCASTSS xmm, ymm{k}{z}
 36164      if isEVEXXMM(v0) && isYMMkz(v1) {
 36165          self.require(ISA_AVX512VL | ISA_AVX512F)
 36166          p.domain = DomainAVX
 36167          p.add(0, func(m *_Encoding, v []interface{}) {
 36168              m.emit(0x62)
 36169              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 36170              m.emit(0x7d)
 36171              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 36172              m.emit(0x18)
 36173              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 36174          })
 36175      }
 36176      // VBROADCASTSS m32, ymm{k}{z}
 36177      if isM32(v0) && isYMMkz(v1) {
 36178          self.require(ISA_AVX512VL | ISA_AVX512F)
 36179          p.domain = DomainAVX
 36180          p.add(0, func(m *_Encoding, v []interface{}) {
 36181              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 36182              m.emit(0x18)
 36183              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 36184          })
 36185      }
 36186      if p.len == 0 {
 36187          panic("invalid operands for VBROADCASTSS")
 36188      }
 36189      return p
 36190  }
 36191  
 36192  // VCMPPD performs "Compare Packed Double-Precision Floating-Point Values".
 36193  //
 36194  // Mnemonic        : VCMPPD
 36195  // Supported forms : (11 forms)
 36196  //
 36197  //    * VCMPPD imm8, xmm, xmm, xmm              [AVX]
 36198  //    * VCMPPD imm8, m128, xmm, xmm             [AVX]
 36199  //    * VCMPPD imm8, ymm, ymm, ymm              [AVX]
 36200  //    * VCMPPD imm8, m256, ymm, ymm             [AVX]
 36201  //    * VCMPPD imm8, m512/m64bcst, zmm, k{k}    [AVX512F]
 36202  //    * VCMPPD imm8, {sae}, zmm, zmm, k{k}      [AVX512F]
 36203  //    * VCMPPD imm8, zmm, zmm, k{k}             [AVX512F]
 36204  //    * VCMPPD imm8, m128/m64bcst, xmm, k{k}    [AVX512F,AVX512VL]
 36205  //    * VCMPPD imm8, xmm, xmm, k{k}             [AVX512F,AVX512VL]
 36206  //    * VCMPPD imm8, m256/m64bcst, ymm, k{k}    [AVX512F,AVX512VL]
 36207  //    * VCMPPD imm8, ymm, ymm, k{k}             [AVX512F,AVX512VL]
 36208  //
 36209  func (self *Program) VCMPPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 36210      var p *Instruction
 36211      switch len(vv) {
 36212          case 0  : p = self.alloc("VCMPPD", 4, Operands { v0, v1, v2, v3 })
 36213          case 1  : p = self.alloc("VCMPPD", 5, Operands { v0, v1, v2, v3, vv[0] })
 36214          default : panic("instruction VCMPPD takes 4 or 5 operands")
 36215      }
 36216      // VCMPPD imm8, xmm, xmm, xmm
 36217      if len(vv) == 0 && isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 36218          self.require(ISA_AVX)
 36219          p.domain = DomainAVX
 36220          p.add(0, func(m *_Encoding, v []interface{}) {
 36221              m.vex2(1, hcode(v[3]), v[1], hlcode(v[2]))
 36222              m.emit(0xc2)
 36223              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36224              m.imm1(toImmAny(v[0]))
 36225          })
 36226      }
 36227      // VCMPPD imm8, m128, xmm, xmm
 36228      if len(vv) == 0 && isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 36229          self.require(ISA_AVX)
 36230          p.domain = DomainAVX
 36231          p.add(0, func(m *_Encoding, v []interface{}) {
 36232              m.vex2(1, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 36233              m.emit(0xc2)
 36234              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 36235              m.imm1(toImmAny(v[0]))
 36236          })
 36237      }
 36238      // VCMPPD imm8, ymm, ymm, ymm
 36239      if len(vv) == 0 && isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 36240          self.require(ISA_AVX)
 36241          p.domain = DomainAVX
 36242          p.add(0, func(m *_Encoding, v []interface{}) {
 36243              m.vex2(5, hcode(v[3]), v[1], hlcode(v[2]))
 36244              m.emit(0xc2)
 36245              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36246              m.imm1(toImmAny(v[0]))
 36247          })
 36248      }
 36249      // VCMPPD imm8, m256, ymm, ymm
 36250      if len(vv) == 0 && isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 36251          self.require(ISA_AVX)
 36252          p.domain = DomainAVX
 36253          p.add(0, func(m *_Encoding, v []interface{}) {
 36254              m.vex2(5, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 36255              m.emit(0xc2)
 36256              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 36257              m.imm1(toImmAny(v[0]))
 36258          })
 36259      }
 36260      // VCMPPD imm8, m512/m64bcst, zmm, k{k}
 36261      if len(vv) == 0 && isImm8(v0) && isM512M64bcst(v1) && isZMM(v2) && isKk(v3) {
 36262          self.require(ISA_AVX512F)
 36263          p.domain = DomainAVX
 36264          p.add(0, func(m *_Encoding, v []interface{}) {
 36265              m.evex(0b01, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 36266              m.emit(0xc2)
 36267              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 36268              m.imm1(toImmAny(v[0]))
 36269          })
 36270      }
 36271      // VCMPPD imm8, {sae}, zmm, zmm, k{k}
 36272      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isZMM(v2) && isZMM(v3) && isKk(vv[0]) {
 36273          self.require(ISA_AVX512F)
 36274          p.domain = DomainAVX
 36275          p.add(0, func(m *_Encoding, v []interface{}) {
 36276              m.emit(0x62)
 36277              m.emit(0xf1 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 36278              m.emit(0xfd ^ (hlcode(v[3]) << 3))
 36279              m.emit((0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 36280              m.emit(0xc2)
 36281              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 36282              m.imm1(toImmAny(v[0]))
 36283          })
 36284      }
 36285      // VCMPPD imm8, zmm, zmm, k{k}
 36286      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isZMM(v2) && isKk(v3) {
 36287          self.require(ISA_AVX512F)
 36288          p.domain = DomainAVX
 36289          p.add(0, func(m *_Encoding, v []interface{}) {
 36290              m.emit(0x62)
 36291              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 36292              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 36293              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 36294              m.emit(0xc2)
 36295              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36296              m.imm1(toImmAny(v[0]))
 36297          })
 36298      }
 36299      // VCMPPD imm8, m128/m64bcst, xmm, k{k}
 36300      if len(vv) == 0 && isImm8(v0) && isM128M64bcst(v1) && isEVEXXMM(v2) && isKk(v3) {
 36301          self.require(ISA_AVX512VL | ISA_AVX512F)
 36302          p.domain = DomainAVX
 36303          p.add(0, func(m *_Encoding, v []interface{}) {
 36304              m.evex(0b01, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 36305              m.emit(0xc2)
 36306              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 36307              m.imm1(toImmAny(v[0]))
 36308          })
 36309      }
 36310      // VCMPPD imm8, xmm, xmm, k{k}
 36311      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 36312          self.require(ISA_AVX512VL | ISA_AVX512F)
 36313          p.domain = DomainAVX
 36314          p.add(0, func(m *_Encoding, v []interface{}) {
 36315              m.emit(0x62)
 36316              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 36317              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 36318              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 36319              m.emit(0xc2)
 36320              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36321              m.imm1(toImmAny(v[0]))
 36322          })
 36323      }
 36324      // VCMPPD imm8, m256/m64bcst, ymm, k{k}
 36325      if len(vv) == 0 && isImm8(v0) && isM256M64bcst(v1) && isEVEXYMM(v2) && isKk(v3) {
 36326          self.require(ISA_AVX512VL | ISA_AVX512F)
 36327          p.domain = DomainAVX
 36328          p.add(0, func(m *_Encoding, v []interface{}) {
 36329              m.evex(0b01, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 36330              m.emit(0xc2)
 36331              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 36332              m.imm1(toImmAny(v[0]))
 36333          })
 36334      }
 36335      // VCMPPD imm8, ymm, ymm, k{k}
 36336      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isKk(v3) {
 36337          self.require(ISA_AVX512VL | ISA_AVX512F)
 36338          p.domain = DomainAVX
 36339          p.add(0, func(m *_Encoding, v []interface{}) {
 36340              m.emit(0x62)
 36341              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 36342              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 36343              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 36344              m.emit(0xc2)
 36345              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36346              m.imm1(toImmAny(v[0]))
 36347          })
 36348      }
 36349      if p.len == 0 {
 36350          panic("invalid operands for VCMPPD")
 36351      }
 36352      return p
 36353  }
 36354  
 36355  // VCMPPS performs "Compare Packed Single-Precision Floating-Point Values".
 36356  //
 36357  // Mnemonic        : VCMPPS
 36358  // Supported forms : (11 forms)
 36359  //
 36360  //    * VCMPPS imm8, xmm, xmm, xmm              [AVX]
 36361  //    * VCMPPS imm8, m128, xmm, xmm             [AVX]
 36362  //    * VCMPPS imm8, ymm, ymm, ymm              [AVX]
 36363  //    * VCMPPS imm8, m256, ymm, ymm             [AVX]
 36364  //    * VCMPPS imm8, m512/m32bcst, zmm, k{k}    [AVX512F]
 36365  //    * VCMPPS imm8, {sae}, zmm, zmm, k{k}      [AVX512F]
 36366  //    * VCMPPS imm8, zmm, zmm, k{k}             [AVX512F]
 36367  //    * VCMPPS imm8, m128/m32bcst, xmm, k{k}    [AVX512F,AVX512VL]
 36368  //    * VCMPPS imm8, xmm, xmm, k{k}             [AVX512F,AVX512VL]
 36369  //    * VCMPPS imm8, m256/m32bcst, ymm, k{k}    [AVX512F,AVX512VL]
 36370  //    * VCMPPS imm8, ymm, ymm, k{k}             [AVX512F,AVX512VL]
 36371  //
 36372  func (self *Program) VCMPPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 36373      var p *Instruction
 36374      switch len(vv) {
 36375          case 0  : p = self.alloc("VCMPPS", 4, Operands { v0, v1, v2, v3 })
 36376          case 1  : p = self.alloc("VCMPPS", 5, Operands { v0, v1, v2, v3, vv[0] })
 36377          default : panic("instruction VCMPPS takes 4 or 5 operands")
 36378      }
 36379      // VCMPPS imm8, xmm, xmm, xmm
 36380      if len(vv) == 0 && isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 36381          self.require(ISA_AVX)
 36382          p.domain = DomainAVX
 36383          p.add(0, func(m *_Encoding, v []interface{}) {
 36384              m.vex2(0, hcode(v[3]), v[1], hlcode(v[2]))
 36385              m.emit(0xc2)
 36386              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36387              m.imm1(toImmAny(v[0]))
 36388          })
 36389      }
 36390      // VCMPPS imm8, m128, xmm, xmm
 36391      if len(vv) == 0 && isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 36392          self.require(ISA_AVX)
 36393          p.domain = DomainAVX
 36394          p.add(0, func(m *_Encoding, v []interface{}) {
 36395              m.vex2(0, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 36396              m.emit(0xc2)
 36397              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 36398              m.imm1(toImmAny(v[0]))
 36399          })
 36400      }
 36401      // VCMPPS imm8, ymm, ymm, ymm
 36402      if len(vv) == 0 && isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 36403          self.require(ISA_AVX)
 36404          p.domain = DomainAVX
 36405          p.add(0, func(m *_Encoding, v []interface{}) {
 36406              m.vex2(4, hcode(v[3]), v[1], hlcode(v[2]))
 36407              m.emit(0xc2)
 36408              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36409              m.imm1(toImmAny(v[0]))
 36410          })
 36411      }
 36412      // VCMPPS imm8, m256, ymm, ymm
 36413      if len(vv) == 0 && isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 36414          self.require(ISA_AVX)
 36415          p.domain = DomainAVX
 36416          p.add(0, func(m *_Encoding, v []interface{}) {
 36417              m.vex2(4, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 36418              m.emit(0xc2)
 36419              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 36420              m.imm1(toImmAny(v[0]))
 36421          })
 36422      }
 36423      // VCMPPS imm8, m512/m32bcst, zmm, k{k}
 36424      if len(vv) == 0 && isImm8(v0) && isM512M32bcst(v1) && isZMM(v2) && isKk(v3) {
 36425          self.require(ISA_AVX512F)
 36426          p.domain = DomainAVX
 36427          p.add(0, func(m *_Encoding, v []interface{}) {
 36428              m.evex(0b01, 0x04, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 36429              m.emit(0xc2)
 36430              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 36431              m.imm1(toImmAny(v[0]))
 36432          })
 36433      }
 36434      // VCMPPS imm8, {sae}, zmm, zmm, k{k}
 36435      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isZMM(v2) && isZMM(v3) && isKk(vv[0]) {
 36436          self.require(ISA_AVX512F)
 36437          p.domain = DomainAVX
 36438          p.add(0, func(m *_Encoding, v []interface{}) {
 36439              m.emit(0x62)
 36440              m.emit(0xf1 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 36441              m.emit(0x7c ^ (hlcode(v[3]) << 3))
 36442              m.emit((0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 36443              m.emit(0xc2)
 36444              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 36445              m.imm1(toImmAny(v[0]))
 36446          })
 36447      }
 36448      // VCMPPS imm8, zmm, zmm, k{k}
 36449      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isZMM(v2) && isKk(v3) {
 36450          self.require(ISA_AVX512F)
 36451          p.domain = DomainAVX
 36452          p.add(0, func(m *_Encoding, v []interface{}) {
 36453              m.emit(0x62)
 36454              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 36455              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 36456              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 36457              m.emit(0xc2)
 36458              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36459              m.imm1(toImmAny(v[0]))
 36460          })
 36461      }
 36462      // VCMPPS imm8, m128/m32bcst, xmm, k{k}
 36463      if len(vv) == 0 && isImm8(v0) && isM128M32bcst(v1) && isEVEXXMM(v2) && isKk(v3) {
 36464          self.require(ISA_AVX512VL | ISA_AVX512F)
 36465          p.domain = DomainAVX
 36466          p.add(0, func(m *_Encoding, v []interface{}) {
 36467              m.evex(0b01, 0x04, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 36468              m.emit(0xc2)
 36469              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 36470              m.imm1(toImmAny(v[0]))
 36471          })
 36472      }
 36473      // VCMPPS imm8, xmm, xmm, k{k}
 36474      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 36475          self.require(ISA_AVX512VL | ISA_AVX512F)
 36476          p.domain = DomainAVX
 36477          p.add(0, func(m *_Encoding, v []interface{}) {
 36478              m.emit(0x62)
 36479              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 36480              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 36481              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 36482              m.emit(0xc2)
 36483              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36484              m.imm1(toImmAny(v[0]))
 36485          })
 36486      }
 36487      // VCMPPS imm8, m256/m32bcst, ymm, k{k}
 36488      if len(vv) == 0 && isImm8(v0) && isM256M32bcst(v1) && isEVEXYMM(v2) && isKk(v3) {
 36489          self.require(ISA_AVX512VL | ISA_AVX512F)
 36490          p.domain = DomainAVX
 36491          p.add(0, func(m *_Encoding, v []interface{}) {
 36492              m.evex(0b01, 0x04, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 36493              m.emit(0xc2)
 36494              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 36495              m.imm1(toImmAny(v[0]))
 36496          })
 36497      }
 36498      // VCMPPS imm8, ymm, ymm, k{k}
 36499      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isKk(v3) {
 36500          self.require(ISA_AVX512VL | ISA_AVX512F)
 36501          p.domain = DomainAVX
 36502          p.add(0, func(m *_Encoding, v []interface{}) {
 36503              m.emit(0x62)
 36504              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 36505              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 36506              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 36507              m.emit(0xc2)
 36508              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36509              m.imm1(toImmAny(v[0]))
 36510          })
 36511      }
 36512      if p.len == 0 {
 36513          panic("invalid operands for VCMPPS")
 36514      }
 36515      return p
 36516  }
 36517  
 36518  // VCMPSD performs "Compare Scalar Double-Precision Floating-Point Values".
 36519  //
 36520  // Mnemonic        : VCMPSD
 36521  // Supported forms : (5 forms)
 36522  //
 36523  //    * VCMPSD imm8, xmm, xmm, xmm            [AVX]
 36524  //    * VCMPSD imm8, m64, xmm, xmm            [AVX]
 36525  //    * VCMPSD imm8, m64, xmm, k{k}           [AVX512F]
 36526  //    * VCMPSD imm8, {sae}, xmm, xmm, k{k}    [AVX512F]
 36527  //    * VCMPSD imm8, xmm, xmm, k{k}           [AVX512F]
 36528  //
 36529  func (self *Program) VCMPSD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 36530      var p *Instruction
 36531      switch len(vv) {
 36532          case 0  : p = self.alloc("VCMPSD", 4, Operands { v0, v1, v2, v3 })
 36533          case 1  : p = self.alloc("VCMPSD", 5, Operands { v0, v1, v2, v3, vv[0] })
 36534          default : panic("instruction VCMPSD takes 4 or 5 operands")
 36535      }
 36536      // VCMPSD imm8, xmm, xmm, xmm
 36537      if len(vv) == 0 && isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 36538          self.require(ISA_AVX)
 36539          p.domain = DomainAVX
 36540          p.add(0, func(m *_Encoding, v []interface{}) {
 36541              m.vex2(3, hcode(v[3]), v[1], hlcode(v[2]))
 36542              m.emit(0xc2)
 36543              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36544              m.imm1(toImmAny(v[0]))
 36545          })
 36546      }
 36547      // VCMPSD imm8, m64, xmm, xmm
 36548      if len(vv) == 0 && isImm8(v0) && isM64(v1) && isXMM(v2) && isXMM(v3) {
 36549          self.require(ISA_AVX)
 36550          p.domain = DomainAVX
 36551          p.add(0, func(m *_Encoding, v []interface{}) {
 36552              m.vex2(3, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 36553              m.emit(0xc2)
 36554              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 36555              m.imm1(toImmAny(v[0]))
 36556          })
 36557      }
 36558      // VCMPSD imm8, m64, xmm, k{k}
 36559      if len(vv) == 0 && isImm8(v0) && isM64(v1) && isEVEXXMM(v2) && isKk(v3) {
 36560          self.require(ISA_AVX512F)
 36561          p.domain = DomainAVX
 36562          p.add(0, func(m *_Encoding, v []interface{}) {
 36563              m.evex(0b01, 0x87, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 36564              m.emit(0xc2)
 36565              m.mrsd(lcode(v[3]), addr(v[1]), 8)
 36566              m.imm1(toImmAny(v[0]))
 36567          })
 36568      }
 36569      // VCMPSD imm8, {sae}, xmm, xmm, k{k}
 36570      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) && isKk(vv[0]) {
 36571          self.require(ISA_AVX512F)
 36572          p.domain = DomainAVX
 36573          p.add(0, func(m *_Encoding, v []interface{}) {
 36574              m.emit(0x62)
 36575              m.emit(0xf1 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 36576              m.emit(0xff ^ (hlcode(v[3]) << 3))
 36577              m.emit((0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 36578              m.emit(0xc2)
 36579              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 36580              m.imm1(toImmAny(v[0]))
 36581          })
 36582      }
 36583      // VCMPSD imm8, xmm, xmm, k{k}
 36584      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 36585          self.require(ISA_AVX512F)
 36586          p.domain = DomainAVX
 36587          p.add(0, func(m *_Encoding, v []interface{}) {
 36588              m.emit(0x62)
 36589              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 36590              m.emit(0xff ^ (hlcode(v[2]) << 3))
 36591              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 36592              m.emit(0xc2)
 36593              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36594              m.imm1(toImmAny(v[0]))
 36595          })
 36596      }
 36597      if p.len == 0 {
 36598          panic("invalid operands for VCMPSD")
 36599      }
 36600      return p
 36601  }
 36602  
 36603  // VCMPSS performs "Compare Scalar Single-Precision Floating-Point Values".
 36604  //
 36605  // Mnemonic        : VCMPSS
 36606  // Supported forms : (5 forms)
 36607  //
 36608  //    * VCMPSS imm8, xmm, xmm, xmm            [AVX]
 36609  //    * VCMPSS imm8, m32, xmm, xmm            [AVX]
 36610  //    * VCMPSS imm8, m32, xmm, k{k}           [AVX512F]
 36611  //    * VCMPSS imm8, {sae}, xmm, xmm, k{k}    [AVX512F]
 36612  //    * VCMPSS imm8, xmm, xmm, k{k}           [AVX512F]
 36613  //
 36614  func (self *Program) VCMPSS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 36615      var p *Instruction
 36616      switch len(vv) {
 36617          case 0  : p = self.alloc("VCMPSS", 4, Operands { v0, v1, v2, v3 })
 36618          case 1  : p = self.alloc("VCMPSS", 5, Operands { v0, v1, v2, v3, vv[0] })
 36619          default : panic("instruction VCMPSS takes 4 or 5 operands")
 36620      }
 36621      // VCMPSS imm8, xmm, xmm, xmm
 36622      if len(vv) == 0 && isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 36623          self.require(ISA_AVX)
 36624          p.domain = DomainAVX
 36625          p.add(0, func(m *_Encoding, v []interface{}) {
 36626              m.vex2(2, hcode(v[3]), v[1], hlcode(v[2]))
 36627              m.emit(0xc2)
 36628              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36629              m.imm1(toImmAny(v[0]))
 36630          })
 36631      }
 36632      // VCMPSS imm8, m32, xmm, xmm
 36633      if len(vv) == 0 && isImm8(v0) && isM32(v1) && isXMM(v2) && isXMM(v3) {
 36634          self.require(ISA_AVX)
 36635          p.domain = DomainAVX
 36636          p.add(0, func(m *_Encoding, v []interface{}) {
 36637              m.vex2(2, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 36638              m.emit(0xc2)
 36639              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 36640              m.imm1(toImmAny(v[0]))
 36641          })
 36642      }
 36643      // VCMPSS imm8, m32, xmm, k{k}
 36644      if len(vv) == 0 && isImm8(v0) && isM32(v1) && isEVEXXMM(v2) && isKk(v3) {
 36645          self.require(ISA_AVX512F)
 36646          p.domain = DomainAVX
 36647          p.add(0, func(m *_Encoding, v []interface{}) {
 36648              m.evex(0b01, 0x06, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 36649              m.emit(0xc2)
 36650              m.mrsd(lcode(v[3]), addr(v[1]), 4)
 36651              m.imm1(toImmAny(v[0]))
 36652          })
 36653      }
 36654      // VCMPSS imm8, {sae}, xmm, xmm, k{k}
 36655      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) && isKk(vv[0]) {
 36656          self.require(ISA_AVX512F)
 36657          p.domain = DomainAVX
 36658          p.add(0, func(m *_Encoding, v []interface{}) {
 36659              m.emit(0x62)
 36660              m.emit(0xf1 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 36661              m.emit(0x7e ^ (hlcode(v[3]) << 3))
 36662              m.emit((0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 36663              m.emit(0xc2)
 36664              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 36665              m.imm1(toImmAny(v[0]))
 36666          })
 36667      }
 36668      // VCMPSS imm8, xmm, xmm, k{k}
 36669      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 36670          self.require(ISA_AVX512F)
 36671          p.domain = DomainAVX
 36672          p.add(0, func(m *_Encoding, v []interface{}) {
 36673              m.emit(0x62)
 36674              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 36675              m.emit(0x7e ^ (hlcode(v[2]) << 3))
 36676              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 36677              m.emit(0xc2)
 36678              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 36679              m.imm1(toImmAny(v[0]))
 36680          })
 36681      }
 36682      if p.len == 0 {
 36683          panic("invalid operands for VCMPSS")
 36684      }
 36685      return p
 36686  }
 36687  
 36688  // VCOMISD performs "Compare Scalar Ordered Double-Precision Floating-Point Values and Set EFLAGS".
 36689  //
 36690  // Mnemonic        : VCOMISD
 36691  // Supported forms : (5 forms)
 36692  //
 36693  //    * VCOMISD xmm, xmm           [AVX]
 36694  //    * VCOMISD m64, xmm           [AVX]
 36695  //    * VCOMISD m64, xmm           [AVX512F]
 36696  //    * VCOMISD {sae}, xmm, xmm    [AVX512F]
 36697  //    * VCOMISD xmm, xmm           [AVX512F]
 36698  //
 36699  func (self *Program) VCOMISD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 36700      var p *Instruction
 36701      switch len(vv) {
 36702          case 0  : p = self.alloc("VCOMISD", 2, Operands { v0, v1 })
 36703          case 1  : p = self.alloc("VCOMISD", 3, Operands { v0, v1, vv[0] })
 36704          default : panic("instruction VCOMISD takes 2 or 3 operands")
 36705      }
 36706      // VCOMISD xmm, xmm
 36707      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 36708          self.require(ISA_AVX)
 36709          p.domain = DomainAVX
 36710          p.add(0, func(m *_Encoding, v []interface{}) {
 36711              m.vex2(1, hcode(v[1]), v[0], 0)
 36712              m.emit(0x2f)
 36713              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 36714          })
 36715      }
 36716      // VCOMISD m64, xmm
 36717      if len(vv) == 0 && isM64(v0) && isXMM(v1) {
 36718          self.require(ISA_AVX)
 36719          p.domain = DomainAVX
 36720          p.add(0, func(m *_Encoding, v []interface{}) {
 36721              m.vex2(1, hcode(v[1]), addr(v[0]), 0)
 36722              m.emit(0x2f)
 36723              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 36724          })
 36725      }
 36726      // VCOMISD m64, xmm
 36727      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) {
 36728          self.require(ISA_AVX512F)
 36729          p.domain = DomainAVX
 36730          p.add(0, func(m *_Encoding, v []interface{}) {
 36731              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 36732              m.emit(0x2f)
 36733              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 36734          })
 36735      }
 36736      // VCOMISD {sae}, xmm, xmm
 36737      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(vv[0]) {
 36738          self.require(ISA_AVX512F)
 36739          p.domain = DomainAVX
 36740          p.add(0, func(m *_Encoding, v []interface{}) {
 36741              m.emit(0x62)
 36742              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 36743              m.emit(0xfd)
 36744              m.emit(0x18)
 36745              m.emit(0x2f)
 36746              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 36747          })
 36748      }
 36749      // VCOMISD xmm, xmm
 36750      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) {
 36751          self.require(ISA_AVX512F)
 36752          p.domain = DomainAVX
 36753          p.add(0, func(m *_Encoding, v []interface{}) {
 36754              m.emit(0x62)
 36755              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 36756              m.emit(0xfd)
 36757              m.emit(0x48)
 36758              m.emit(0x2f)
 36759              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 36760          })
 36761      }
 36762      if p.len == 0 {
 36763          panic("invalid operands for VCOMISD")
 36764      }
 36765      return p
 36766  }
 36767  
 36768  // VCOMISS performs "Compare Scalar Ordered Single-Precision Floating-Point Values and Set EFLAGS".
 36769  //
 36770  // Mnemonic        : VCOMISS
 36771  // Supported forms : (5 forms)
 36772  //
 36773  //    * VCOMISS xmm, xmm           [AVX]
 36774  //    * VCOMISS m32, xmm           [AVX]
 36775  //    * VCOMISS m32, xmm           [AVX512F]
 36776  //    * VCOMISS {sae}, xmm, xmm    [AVX512F]
 36777  //    * VCOMISS xmm, xmm           [AVX512F]
 36778  //
 36779  func (self *Program) VCOMISS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 36780      var p *Instruction
 36781      switch len(vv) {
 36782          case 0  : p = self.alloc("VCOMISS", 2, Operands { v0, v1 })
 36783          case 1  : p = self.alloc("VCOMISS", 3, Operands { v0, v1, vv[0] })
 36784          default : panic("instruction VCOMISS takes 2 or 3 operands")
 36785      }
 36786      // VCOMISS xmm, xmm
 36787      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 36788          self.require(ISA_AVX)
 36789          p.domain = DomainAVX
 36790          p.add(0, func(m *_Encoding, v []interface{}) {
 36791              m.vex2(0, hcode(v[1]), v[0], 0)
 36792              m.emit(0x2f)
 36793              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 36794          })
 36795      }
 36796      // VCOMISS m32, xmm
 36797      if len(vv) == 0 && isM32(v0) && isXMM(v1) {
 36798          self.require(ISA_AVX)
 36799          p.domain = DomainAVX
 36800          p.add(0, func(m *_Encoding, v []interface{}) {
 36801              m.vex2(0, hcode(v[1]), addr(v[0]), 0)
 36802              m.emit(0x2f)
 36803              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 36804          })
 36805      }
 36806      // VCOMISS m32, xmm
 36807      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) {
 36808          self.require(ISA_AVX512F)
 36809          p.domain = DomainAVX
 36810          p.add(0, func(m *_Encoding, v []interface{}) {
 36811              m.evex(0b01, 0x04, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 36812              m.emit(0x2f)
 36813              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 36814          })
 36815      }
 36816      // VCOMISS {sae}, xmm, xmm
 36817      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(vv[0]) {
 36818          self.require(ISA_AVX512F)
 36819          p.domain = DomainAVX
 36820          p.add(0, func(m *_Encoding, v []interface{}) {
 36821              m.emit(0x62)
 36822              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 36823              m.emit(0x7c)
 36824              m.emit(0x18)
 36825              m.emit(0x2f)
 36826              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 36827          })
 36828      }
 36829      // VCOMISS xmm, xmm
 36830      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) {
 36831          self.require(ISA_AVX512F)
 36832          p.domain = DomainAVX
 36833          p.add(0, func(m *_Encoding, v []interface{}) {
 36834              m.emit(0x62)
 36835              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 36836              m.emit(0x7c)
 36837              m.emit(0x48)
 36838              m.emit(0x2f)
 36839              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 36840          })
 36841      }
 36842      if p.len == 0 {
 36843          panic("invalid operands for VCOMISS")
 36844      }
 36845      return p
 36846  }
 36847  
 36848  // VCOMPRESSPD performs "Store Sparse Packed Double-Precision Floating-Point Values into Dense Memory/Register".
 36849  //
 36850  // Mnemonic        : VCOMPRESSPD
 36851  // Supported forms : (6 forms)
 36852  //
 36853  //    * VCOMPRESSPD zmm, zmm{k}{z}     [AVX512F]
 36854  //    * VCOMPRESSPD zmm, m512{k}{z}    [AVX512F]
 36855  //    * VCOMPRESSPD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 36856  //    * VCOMPRESSPD xmm, m128{k}{z}    [AVX512F,AVX512VL]
 36857  //    * VCOMPRESSPD ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 36858  //    * VCOMPRESSPD ymm, m256{k}{z}    [AVX512F,AVX512VL]
 36859  //
 36860  func (self *Program) VCOMPRESSPD(v0 interface{}, v1 interface{}) *Instruction {
 36861      p := self.alloc("VCOMPRESSPD", 2, Operands { v0, v1 })
 36862      // VCOMPRESSPD zmm, zmm{k}{z}
 36863      if isZMM(v0) && isZMMkz(v1) {
 36864          self.require(ISA_AVX512F)
 36865          p.domain = DomainAVX
 36866          p.add(0, func(m *_Encoding, v []interface{}) {
 36867              m.emit(0x62)
 36868              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 36869              m.emit(0xfd)
 36870              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 36871              m.emit(0x8a)
 36872              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 36873          })
 36874      }
 36875      // VCOMPRESSPD zmm, m512{k}{z}
 36876      if isZMM(v0) && isM512kz(v1) {
 36877          self.require(ISA_AVX512F)
 36878          p.domain = DomainAVX
 36879          p.add(0, func(m *_Encoding, v []interface{}) {
 36880              m.evex(0b10, 0x85, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 36881              m.emit(0x8a)
 36882              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 36883          })
 36884      }
 36885      // VCOMPRESSPD xmm, xmm{k}{z}
 36886      if isEVEXXMM(v0) && isXMMkz(v1) {
 36887          self.require(ISA_AVX512VL | ISA_AVX512F)
 36888          p.domain = DomainAVX
 36889          p.add(0, func(m *_Encoding, v []interface{}) {
 36890              m.emit(0x62)
 36891              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 36892              m.emit(0xfd)
 36893              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 36894              m.emit(0x8a)
 36895              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 36896          })
 36897      }
 36898      // VCOMPRESSPD xmm, m128{k}{z}
 36899      if isEVEXXMM(v0) && isM128kz(v1) {
 36900          self.require(ISA_AVX512VL | ISA_AVX512F)
 36901          p.domain = DomainAVX
 36902          p.add(0, func(m *_Encoding, v []interface{}) {
 36903              m.evex(0b10, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 36904              m.emit(0x8a)
 36905              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 36906          })
 36907      }
 36908      // VCOMPRESSPD ymm, ymm{k}{z}
 36909      if isEVEXYMM(v0) && isYMMkz(v1) {
 36910          self.require(ISA_AVX512VL | ISA_AVX512F)
 36911          p.domain = DomainAVX
 36912          p.add(0, func(m *_Encoding, v []interface{}) {
 36913              m.emit(0x62)
 36914              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 36915              m.emit(0xfd)
 36916              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 36917              m.emit(0x8a)
 36918              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 36919          })
 36920      }
 36921      // VCOMPRESSPD ymm, m256{k}{z}
 36922      if isEVEXYMM(v0) && isM256kz(v1) {
 36923          self.require(ISA_AVX512VL | ISA_AVX512F)
 36924          p.domain = DomainAVX
 36925          p.add(0, func(m *_Encoding, v []interface{}) {
 36926              m.evex(0b10, 0x85, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 36927              m.emit(0x8a)
 36928              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 36929          })
 36930      }
 36931      if p.len == 0 {
 36932          panic("invalid operands for VCOMPRESSPD")
 36933      }
 36934      return p
 36935  }
 36936  
 36937  // VCOMPRESSPS performs "Store Sparse Packed Single-Precision Floating-Point Values into Dense Memory/Register".
 36938  //
 36939  // Mnemonic        : VCOMPRESSPS
 36940  // Supported forms : (6 forms)
 36941  //
 36942  //    * VCOMPRESSPS zmm, zmm{k}{z}     [AVX512F]
 36943  //    * VCOMPRESSPS zmm, m512{k}{z}    [AVX512F]
 36944  //    * VCOMPRESSPS xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 36945  //    * VCOMPRESSPS xmm, m128{k}{z}    [AVX512F,AVX512VL]
 36946  //    * VCOMPRESSPS ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 36947  //    * VCOMPRESSPS ymm, m256{k}{z}    [AVX512F,AVX512VL]
 36948  //
 36949  func (self *Program) VCOMPRESSPS(v0 interface{}, v1 interface{}) *Instruction {
 36950      p := self.alloc("VCOMPRESSPS", 2, Operands { v0, v1 })
 36951      // VCOMPRESSPS zmm, zmm{k}{z}
 36952      if isZMM(v0) && isZMMkz(v1) {
 36953          self.require(ISA_AVX512F)
 36954          p.domain = DomainAVX
 36955          p.add(0, func(m *_Encoding, v []interface{}) {
 36956              m.emit(0x62)
 36957              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 36958              m.emit(0x7d)
 36959              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 36960              m.emit(0x8a)
 36961              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 36962          })
 36963      }
 36964      // VCOMPRESSPS zmm, m512{k}{z}
 36965      if isZMM(v0) && isM512kz(v1) {
 36966          self.require(ISA_AVX512F)
 36967          p.domain = DomainAVX
 36968          p.add(0, func(m *_Encoding, v []interface{}) {
 36969              m.evex(0b10, 0x05, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 36970              m.emit(0x8a)
 36971              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 36972          })
 36973      }
 36974      // VCOMPRESSPS xmm, xmm{k}{z}
 36975      if isEVEXXMM(v0) && isXMMkz(v1) {
 36976          self.require(ISA_AVX512VL | ISA_AVX512F)
 36977          p.domain = DomainAVX
 36978          p.add(0, func(m *_Encoding, v []interface{}) {
 36979              m.emit(0x62)
 36980              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 36981              m.emit(0x7d)
 36982              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 36983              m.emit(0x8a)
 36984              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 36985          })
 36986      }
 36987      // VCOMPRESSPS xmm, m128{k}{z}
 36988      if isEVEXXMM(v0) && isM128kz(v1) {
 36989          self.require(ISA_AVX512VL | ISA_AVX512F)
 36990          p.domain = DomainAVX
 36991          p.add(0, func(m *_Encoding, v []interface{}) {
 36992              m.evex(0b10, 0x05, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 36993              m.emit(0x8a)
 36994              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 36995          })
 36996      }
 36997      // VCOMPRESSPS ymm, ymm{k}{z}
 36998      if isEVEXYMM(v0) && isYMMkz(v1) {
 36999          self.require(ISA_AVX512VL | ISA_AVX512F)
 37000          p.domain = DomainAVX
 37001          p.add(0, func(m *_Encoding, v []interface{}) {
 37002              m.emit(0x62)
 37003              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 37004              m.emit(0x7d)
 37005              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 37006              m.emit(0x8a)
 37007              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 37008          })
 37009      }
 37010      // VCOMPRESSPS ymm, m256{k}{z}
 37011      if isEVEXYMM(v0) && isM256kz(v1) {
 37012          self.require(ISA_AVX512VL | ISA_AVX512F)
 37013          p.domain = DomainAVX
 37014          p.add(0, func(m *_Encoding, v []interface{}) {
 37015              m.evex(0b10, 0x05, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 37016              m.emit(0x8a)
 37017              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 37018          })
 37019      }
 37020      if p.len == 0 {
 37021          panic("invalid operands for VCOMPRESSPS")
 37022      }
 37023      return p
 37024  }
 37025  
 37026  // VCVTDQ2PD performs "Convert Packed Dword Integers to Packed Double-Precision FP Values".
 37027  //
 37028  // Mnemonic        : VCVTDQ2PD
 37029  // Supported forms : (10 forms)
 37030  //
 37031  //    * VCVTDQ2PD xmm, xmm                   [AVX]
 37032  //    * VCVTDQ2PD m64, xmm                   [AVX]
 37033  //    * VCVTDQ2PD xmm, ymm                   [AVX]
 37034  //    * VCVTDQ2PD m128, ymm                  [AVX]
 37035  //    * VCVTDQ2PD m256/m32bcst, zmm{k}{z}    [AVX512F]
 37036  //    * VCVTDQ2PD ymm, zmm{k}{z}             [AVX512F]
 37037  //    * VCVTDQ2PD m64/m32bcst, xmm{k}{z}     [AVX512F,AVX512VL]
 37038  //    * VCVTDQ2PD m128/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 37039  //    * VCVTDQ2PD xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 37040  //    * VCVTDQ2PD xmm, ymm{k}{z}             [AVX512F,AVX512VL]
 37041  //
 37042  func (self *Program) VCVTDQ2PD(v0 interface{}, v1 interface{}) *Instruction {
 37043      p := self.alloc("VCVTDQ2PD", 2, Operands { v0, v1 })
 37044      // VCVTDQ2PD xmm, xmm
 37045      if isXMM(v0) && isXMM(v1) {
 37046          self.require(ISA_AVX)
 37047          p.domain = DomainAVX
 37048          p.add(0, func(m *_Encoding, v []interface{}) {
 37049              m.vex2(2, hcode(v[1]), v[0], 0)
 37050              m.emit(0xe6)
 37051              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37052          })
 37053      }
 37054      // VCVTDQ2PD m64, xmm
 37055      if isM64(v0) && isXMM(v1) {
 37056          self.require(ISA_AVX)
 37057          p.domain = DomainAVX
 37058          p.add(0, func(m *_Encoding, v []interface{}) {
 37059              m.vex2(2, hcode(v[1]), addr(v[0]), 0)
 37060              m.emit(0xe6)
 37061              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 37062          })
 37063      }
 37064      // VCVTDQ2PD xmm, ymm
 37065      if isXMM(v0) && isYMM(v1) {
 37066          self.require(ISA_AVX)
 37067          p.domain = DomainAVX
 37068          p.add(0, func(m *_Encoding, v []interface{}) {
 37069              m.vex2(6, hcode(v[1]), v[0], 0)
 37070              m.emit(0xe6)
 37071              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37072          })
 37073      }
 37074      // VCVTDQ2PD m128, ymm
 37075      if isM128(v0) && isYMM(v1) {
 37076          self.require(ISA_AVX)
 37077          p.domain = DomainAVX
 37078          p.add(0, func(m *_Encoding, v []interface{}) {
 37079              m.vex2(6, hcode(v[1]), addr(v[0]), 0)
 37080              m.emit(0xe6)
 37081              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 37082          })
 37083      }
 37084      // VCVTDQ2PD m256/m32bcst, zmm{k}{z}
 37085      if isM256M32bcst(v0) && isZMMkz(v1) {
 37086          self.require(ISA_AVX512F)
 37087          p.domain = DomainAVX
 37088          p.add(0, func(m *_Encoding, v []interface{}) {
 37089              m.evex(0b01, 0x06, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37090              m.emit(0xe6)
 37091              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 37092          })
 37093      }
 37094      // VCVTDQ2PD ymm, zmm{k}{z}
 37095      if isEVEXYMM(v0) && isZMMkz(v1) {
 37096          self.require(ISA_AVX512F)
 37097          p.domain = DomainAVX
 37098          p.add(0, func(m *_Encoding, v []interface{}) {
 37099              m.emit(0x62)
 37100              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37101              m.emit(0x7e)
 37102              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 37103              m.emit(0xe6)
 37104              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37105          })
 37106      }
 37107      // VCVTDQ2PD m64/m32bcst, xmm{k}{z}
 37108      if isM64M32bcst(v0) && isXMMkz(v1) {
 37109          self.require(ISA_AVX512VL | ISA_AVX512F)
 37110          p.domain = DomainAVX
 37111          p.add(0, func(m *_Encoding, v []interface{}) {
 37112              m.evex(0b01, 0x06, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37113              m.emit(0xe6)
 37114              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 37115          })
 37116      }
 37117      // VCVTDQ2PD m128/m32bcst, ymm{k}{z}
 37118      if isM128M32bcst(v0) && isYMMkz(v1) {
 37119          self.require(ISA_AVX512VL | ISA_AVX512F)
 37120          p.domain = DomainAVX
 37121          p.add(0, func(m *_Encoding, v []interface{}) {
 37122              m.evex(0b01, 0x06, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37123              m.emit(0xe6)
 37124              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 37125          })
 37126      }
 37127      // VCVTDQ2PD xmm, xmm{k}{z}
 37128      if isEVEXXMM(v0) && isXMMkz(v1) {
 37129          self.require(ISA_AVX512VL | ISA_AVX512F)
 37130          p.domain = DomainAVX
 37131          p.add(0, func(m *_Encoding, v []interface{}) {
 37132              m.emit(0x62)
 37133              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37134              m.emit(0x7e)
 37135              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 37136              m.emit(0xe6)
 37137              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37138          })
 37139      }
 37140      // VCVTDQ2PD xmm, ymm{k}{z}
 37141      if isEVEXXMM(v0) && isYMMkz(v1) {
 37142          self.require(ISA_AVX512VL | ISA_AVX512F)
 37143          p.domain = DomainAVX
 37144          p.add(0, func(m *_Encoding, v []interface{}) {
 37145              m.emit(0x62)
 37146              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37147              m.emit(0x7e)
 37148              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 37149              m.emit(0xe6)
 37150              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37151          })
 37152      }
 37153      if p.len == 0 {
 37154          panic("invalid operands for VCVTDQ2PD")
 37155      }
 37156      return p
 37157  }
 37158  
 37159  // VCVTDQ2PS performs "Convert Packed Dword Integers to Packed Single-Precision FP Values".
 37160  //
 37161  // Mnemonic        : VCVTDQ2PS
 37162  // Supported forms : (11 forms)
 37163  //
 37164  //    * VCVTDQ2PS xmm, xmm                   [AVX]
 37165  //    * VCVTDQ2PS m128, xmm                  [AVX]
 37166  //    * VCVTDQ2PS ymm, ymm                   [AVX]
 37167  //    * VCVTDQ2PS m256, ymm                  [AVX]
 37168  //    * VCVTDQ2PS m512/m32bcst, zmm{k}{z}    [AVX512F]
 37169  //    * VCVTDQ2PS {er}, zmm, zmm{k}{z}       [AVX512F]
 37170  //    * VCVTDQ2PS zmm, zmm{k}{z}             [AVX512F]
 37171  //    * VCVTDQ2PS m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 37172  //    * VCVTDQ2PS m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 37173  //    * VCVTDQ2PS xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 37174  //    * VCVTDQ2PS ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 37175  //
 37176  func (self *Program) VCVTDQ2PS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 37177      var p *Instruction
 37178      switch len(vv) {
 37179          case 0  : p = self.alloc("VCVTDQ2PS", 2, Operands { v0, v1 })
 37180          case 1  : p = self.alloc("VCVTDQ2PS", 3, Operands { v0, v1, vv[0] })
 37181          default : panic("instruction VCVTDQ2PS takes 2 or 3 operands")
 37182      }
 37183      // VCVTDQ2PS xmm, xmm
 37184      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 37185          self.require(ISA_AVX)
 37186          p.domain = DomainAVX
 37187          p.add(0, func(m *_Encoding, v []interface{}) {
 37188              m.vex2(0, hcode(v[1]), v[0], 0)
 37189              m.emit(0x5b)
 37190              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37191          })
 37192      }
 37193      // VCVTDQ2PS m128, xmm
 37194      if len(vv) == 0 && isM128(v0) && isXMM(v1) {
 37195          self.require(ISA_AVX)
 37196          p.domain = DomainAVX
 37197          p.add(0, func(m *_Encoding, v []interface{}) {
 37198              m.vex2(0, hcode(v[1]), addr(v[0]), 0)
 37199              m.emit(0x5b)
 37200              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 37201          })
 37202      }
 37203      // VCVTDQ2PS ymm, ymm
 37204      if len(vv) == 0 && isYMM(v0) && isYMM(v1) {
 37205          self.require(ISA_AVX)
 37206          p.domain = DomainAVX
 37207          p.add(0, func(m *_Encoding, v []interface{}) {
 37208              m.vex2(4, hcode(v[1]), v[0], 0)
 37209              m.emit(0x5b)
 37210              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37211          })
 37212      }
 37213      // VCVTDQ2PS m256, ymm
 37214      if len(vv) == 0 && isM256(v0) && isYMM(v1) {
 37215          self.require(ISA_AVX)
 37216          p.domain = DomainAVX
 37217          p.add(0, func(m *_Encoding, v []interface{}) {
 37218              m.vex2(4, hcode(v[1]), addr(v[0]), 0)
 37219              m.emit(0x5b)
 37220              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 37221          })
 37222      }
 37223      // VCVTDQ2PS m512/m32bcst, zmm{k}{z}
 37224      if len(vv) == 0 && isM512M32bcst(v0) && isZMMkz(v1) {
 37225          self.require(ISA_AVX512F)
 37226          p.domain = DomainAVX
 37227          p.add(0, func(m *_Encoding, v []interface{}) {
 37228              m.evex(0b01, 0x04, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37229              m.emit(0x5b)
 37230              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 37231          })
 37232      }
 37233      // VCVTDQ2PS {er}, zmm, zmm{k}{z}
 37234      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 37235          self.require(ISA_AVX512F)
 37236          p.domain = DomainAVX
 37237          p.add(0, func(m *_Encoding, v []interface{}) {
 37238              m.emit(0x62)
 37239              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 37240              m.emit(0x7c)
 37241              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 37242              m.emit(0x5b)
 37243              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 37244          })
 37245      }
 37246      // VCVTDQ2PS zmm, zmm{k}{z}
 37247      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 37248          self.require(ISA_AVX512F)
 37249          p.domain = DomainAVX
 37250          p.add(0, func(m *_Encoding, v []interface{}) {
 37251              m.emit(0x62)
 37252              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37253              m.emit(0x7c)
 37254              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 37255              m.emit(0x5b)
 37256              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37257          })
 37258      }
 37259      // VCVTDQ2PS m128/m32bcst, xmm{k}{z}
 37260      if len(vv) == 0 && isM128M32bcst(v0) && isXMMkz(v1) {
 37261          self.require(ISA_AVX512VL | ISA_AVX512F)
 37262          p.domain = DomainAVX
 37263          p.add(0, func(m *_Encoding, v []interface{}) {
 37264              m.evex(0b01, 0x04, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37265              m.emit(0x5b)
 37266              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 37267          })
 37268      }
 37269      // VCVTDQ2PS m256/m32bcst, ymm{k}{z}
 37270      if len(vv) == 0 && isM256M32bcst(v0) && isYMMkz(v1) {
 37271          self.require(ISA_AVX512VL | ISA_AVX512F)
 37272          p.domain = DomainAVX
 37273          p.add(0, func(m *_Encoding, v []interface{}) {
 37274              m.evex(0b01, 0x04, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37275              m.emit(0x5b)
 37276              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 37277          })
 37278      }
 37279      // VCVTDQ2PS xmm, xmm{k}{z}
 37280      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 37281          self.require(ISA_AVX512VL | ISA_AVX512F)
 37282          p.domain = DomainAVX
 37283          p.add(0, func(m *_Encoding, v []interface{}) {
 37284              m.emit(0x62)
 37285              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37286              m.emit(0x7c)
 37287              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 37288              m.emit(0x5b)
 37289              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37290          })
 37291      }
 37292      // VCVTDQ2PS ymm, ymm{k}{z}
 37293      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 37294          self.require(ISA_AVX512VL | ISA_AVX512F)
 37295          p.domain = DomainAVX
 37296          p.add(0, func(m *_Encoding, v []interface{}) {
 37297              m.emit(0x62)
 37298              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37299              m.emit(0x7c)
 37300              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 37301              m.emit(0x5b)
 37302              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37303          })
 37304      }
 37305      if p.len == 0 {
 37306          panic("invalid operands for VCVTDQ2PS")
 37307      }
 37308      return p
 37309  }
 37310  
 37311  // VCVTPD2DQ performs "Convert Packed Double-Precision FP Values to Packed Dword Integers".
 37312  //
 37313  // Mnemonic        : VCVTPD2DQ
 37314  // Supported forms : (11 forms)
 37315  //
 37316  //    * VCVTPD2DQ xmm, xmm                   [AVX]
 37317  //    * VCVTPD2DQ ymm, xmm                   [AVX]
 37318  //    * VCVTPD2DQ m128, xmm                  [AVX]
 37319  //    * VCVTPD2DQ m256, xmm                  [AVX]
 37320  //    * VCVTPD2DQ m512/m64bcst, ymm{k}{z}    [AVX512F]
 37321  //    * VCVTPD2DQ {er}, zmm, ymm{k}{z}       [AVX512F]
 37322  //    * VCVTPD2DQ zmm, ymm{k}{z}             [AVX512F]
 37323  //    * VCVTPD2DQ m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 37324  //    * VCVTPD2DQ m256/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 37325  //    * VCVTPD2DQ xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 37326  //    * VCVTPD2DQ ymm, xmm{k}{z}             [AVX512F,AVX512VL]
 37327  //
 37328  func (self *Program) VCVTPD2DQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 37329      var p *Instruction
 37330      switch len(vv) {
 37331          case 0  : p = self.alloc("VCVTPD2DQ", 2, Operands { v0, v1 })
 37332          case 1  : p = self.alloc("VCVTPD2DQ", 3, Operands { v0, v1, vv[0] })
 37333          default : panic("instruction VCVTPD2DQ takes 2 or 3 operands")
 37334      }
 37335      // VCVTPD2DQ xmm, xmm
 37336      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 37337          self.require(ISA_AVX)
 37338          p.domain = DomainAVX
 37339          p.add(0, func(m *_Encoding, v []interface{}) {
 37340              m.vex2(3, hcode(v[1]), v[0], 0)
 37341              m.emit(0xe6)
 37342              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37343          })
 37344      }
 37345      // VCVTPD2DQ ymm, xmm
 37346      if len(vv) == 0 && isYMM(v0) && isXMM(v1) {
 37347          self.require(ISA_AVX)
 37348          p.domain = DomainAVX
 37349          p.add(0, func(m *_Encoding, v []interface{}) {
 37350              m.vex2(7, hcode(v[1]), v[0], 0)
 37351              m.emit(0xe6)
 37352              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37353          })
 37354      }
 37355      // VCVTPD2DQ m128, xmm
 37356      if len(vv) == 0 && isM128(v0) && isXMM(v1) {
 37357          self.require(ISA_AVX)
 37358          p.domain = DomainAVX
 37359          p.add(0, func(m *_Encoding, v []interface{}) {
 37360              m.vex2(3, hcode(v[1]), addr(v[0]), 0)
 37361              m.emit(0xe6)
 37362              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 37363          })
 37364      }
 37365      // VCVTPD2DQ m256, xmm
 37366      if len(vv) == 0 && isM256(v0) && isXMM(v1) {
 37367          self.require(ISA_AVX)
 37368          p.domain = DomainAVX
 37369          p.add(0, func(m *_Encoding, v []interface{}) {
 37370              m.vex2(7, hcode(v[1]), addr(v[0]), 0)
 37371              m.emit(0xe6)
 37372              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 37373          })
 37374      }
 37375      // VCVTPD2DQ m512/m64bcst, ymm{k}{z}
 37376      if len(vv) == 0 && isM512M64bcst(v0) && isYMMkz(v1) {
 37377          self.require(ISA_AVX512F)
 37378          p.domain = DomainAVX
 37379          p.add(0, func(m *_Encoding, v []interface{}) {
 37380              m.evex(0b01, 0x87, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37381              m.emit(0xe6)
 37382              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 37383          })
 37384      }
 37385      // VCVTPD2DQ {er}, zmm, ymm{k}{z}
 37386      if len(vv) == 1 && isER(v0) && isZMM(v1) && isYMMkz(vv[0]) {
 37387          self.require(ISA_AVX512F)
 37388          p.domain = DomainAVX
 37389          p.add(0, func(m *_Encoding, v []interface{}) {
 37390              m.emit(0x62)
 37391              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 37392              m.emit(0xff)
 37393              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 37394              m.emit(0xe6)
 37395              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 37396          })
 37397      }
 37398      // VCVTPD2DQ zmm, ymm{k}{z}
 37399      if len(vv) == 0 && isZMM(v0) && isYMMkz(v1) {
 37400          self.require(ISA_AVX512F)
 37401          p.domain = DomainAVX
 37402          p.add(0, func(m *_Encoding, v []interface{}) {
 37403              m.emit(0x62)
 37404              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37405              m.emit(0xff)
 37406              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 37407              m.emit(0xe6)
 37408              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37409          })
 37410      }
 37411      // VCVTPD2DQ m128/m64bcst, xmm{k}{z}
 37412      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 37413          self.require(ISA_AVX512VL | ISA_AVX512F)
 37414          p.domain = DomainAVX
 37415          p.add(0, func(m *_Encoding, v []interface{}) {
 37416              m.evex(0b01, 0x87, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37417              m.emit(0xe6)
 37418              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 37419          })
 37420      }
 37421      // VCVTPD2DQ m256/m64bcst, xmm{k}{z}
 37422      if len(vv) == 0 && isM256M64bcst(v0) && isXMMkz(v1) {
 37423          self.require(ISA_AVX512VL | ISA_AVX512F)
 37424          p.domain = DomainAVX
 37425          p.add(0, func(m *_Encoding, v []interface{}) {
 37426              m.evex(0b01, 0x87, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37427              m.emit(0xe6)
 37428              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 37429          })
 37430      }
 37431      // VCVTPD2DQ xmm, xmm{k}{z}
 37432      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 37433          self.require(ISA_AVX512VL | ISA_AVX512F)
 37434          p.domain = DomainAVX
 37435          p.add(0, func(m *_Encoding, v []interface{}) {
 37436              m.emit(0x62)
 37437              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37438              m.emit(0xff)
 37439              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 37440              m.emit(0xe6)
 37441              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37442          })
 37443      }
 37444      // VCVTPD2DQ ymm, xmm{k}{z}
 37445      if len(vv) == 0 && isEVEXYMM(v0) && isXMMkz(v1) {
 37446          self.require(ISA_AVX512VL | ISA_AVX512F)
 37447          p.domain = DomainAVX
 37448          p.add(0, func(m *_Encoding, v []interface{}) {
 37449              m.emit(0x62)
 37450              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37451              m.emit(0xff)
 37452              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 37453              m.emit(0xe6)
 37454              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37455          })
 37456      }
 37457      if p.len == 0 {
 37458          panic("invalid operands for VCVTPD2DQ")
 37459      }
 37460      return p
 37461  }
 37462  
 37463  // VCVTPD2PS performs "Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values".
 37464  //
 37465  // Mnemonic        : VCVTPD2PS
 37466  // Supported forms : (11 forms)
 37467  //
 37468  //    * VCVTPD2PS xmm, xmm                   [AVX]
 37469  //    * VCVTPD2PS ymm, xmm                   [AVX]
 37470  //    * VCVTPD2PS m128, xmm                  [AVX]
 37471  //    * VCVTPD2PS m256, xmm                  [AVX]
 37472  //    * VCVTPD2PS m512/m64bcst, ymm{k}{z}    [AVX512F]
 37473  //    * VCVTPD2PS {er}, zmm, ymm{k}{z}       [AVX512F]
 37474  //    * VCVTPD2PS zmm, ymm{k}{z}             [AVX512F]
 37475  //    * VCVTPD2PS m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 37476  //    * VCVTPD2PS m256/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 37477  //    * VCVTPD2PS xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 37478  //    * VCVTPD2PS ymm, xmm{k}{z}             [AVX512F,AVX512VL]
 37479  //
 37480  func (self *Program) VCVTPD2PS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 37481      var p *Instruction
 37482      switch len(vv) {
 37483          case 0  : p = self.alloc("VCVTPD2PS", 2, Operands { v0, v1 })
 37484          case 1  : p = self.alloc("VCVTPD2PS", 3, Operands { v0, v1, vv[0] })
 37485          default : panic("instruction VCVTPD2PS takes 2 or 3 operands")
 37486      }
 37487      // VCVTPD2PS xmm, xmm
 37488      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 37489          self.require(ISA_AVX)
 37490          p.domain = DomainAVX
 37491          p.add(0, func(m *_Encoding, v []interface{}) {
 37492              m.vex2(1, hcode(v[1]), v[0], 0)
 37493              m.emit(0x5a)
 37494              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37495          })
 37496      }
 37497      // VCVTPD2PS ymm, xmm
 37498      if len(vv) == 0 && isYMM(v0) && isXMM(v1) {
 37499          self.require(ISA_AVX)
 37500          p.domain = DomainAVX
 37501          p.add(0, func(m *_Encoding, v []interface{}) {
 37502              m.vex2(5, hcode(v[1]), v[0], 0)
 37503              m.emit(0x5a)
 37504              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37505          })
 37506      }
 37507      // VCVTPD2PS m128, xmm
 37508      if len(vv) == 0 && isM128(v0) && isXMM(v1) {
 37509          self.require(ISA_AVX)
 37510          p.domain = DomainAVX
 37511          p.add(0, func(m *_Encoding, v []interface{}) {
 37512              m.vex2(1, hcode(v[1]), addr(v[0]), 0)
 37513              m.emit(0x5a)
 37514              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 37515          })
 37516      }
 37517      // VCVTPD2PS m256, xmm
 37518      if len(vv) == 0 && isM256(v0) && isXMM(v1) {
 37519          self.require(ISA_AVX)
 37520          p.domain = DomainAVX
 37521          p.add(0, func(m *_Encoding, v []interface{}) {
 37522              m.vex2(5, hcode(v[1]), addr(v[0]), 0)
 37523              m.emit(0x5a)
 37524              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 37525          })
 37526      }
 37527      // VCVTPD2PS m512/m64bcst, ymm{k}{z}
 37528      if len(vv) == 0 && isM512M64bcst(v0) && isYMMkz(v1) {
 37529          self.require(ISA_AVX512F)
 37530          p.domain = DomainAVX
 37531          p.add(0, func(m *_Encoding, v []interface{}) {
 37532              m.evex(0b01, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37533              m.emit(0x5a)
 37534              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 37535          })
 37536      }
 37537      // VCVTPD2PS {er}, zmm, ymm{k}{z}
 37538      if len(vv) == 1 && isER(v0) && isZMM(v1) && isYMMkz(vv[0]) {
 37539          self.require(ISA_AVX512F)
 37540          p.domain = DomainAVX
 37541          p.add(0, func(m *_Encoding, v []interface{}) {
 37542              m.emit(0x62)
 37543              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 37544              m.emit(0xfd)
 37545              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 37546              m.emit(0x5a)
 37547              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 37548          })
 37549      }
 37550      // VCVTPD2PS zmm, ymm{k}{z}
 37551      if len(vv) == 0 && isZMM(v0) && isYMMkz(v1) {
 37552          self.require(ISA_AVX512F)
 37553          p.domain = DomainAVX
 37554          p.add(0, func(m *_Encoding, v []interface{}) {
 37555              m.emit(0x62)
 37556              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37557              m.emit(0xfd)
 37558              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 37559              m.emit(0x5a)
 37560              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37561          })
 37562      }
 37563      // VCVTPD2PS m128/m64bcst, xmm{k}{z}
 37564      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 37565          self.require(ISA_AVX512VL | ISA_AVX512F)
 37566          p.domain = DomainAVX
 37567          p.add(0, func(m *_Encoding, v []interface{}) {
 37568              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37569              m.emit(0x5a)
 37570              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 37571          })
 37572      }
 37573      // VCVTPD2PS m256/m64bcst, xmm{k}{z}
 37574      if len(vv) == 0 && isM256M64bcst(v0) && isXMMkz(v1) {
 37575          self.require(ISA_AVX512VL | ISA_AVX512F)
 37576          p.domain = DomainAVX
 37577          p.add(0, func(m *_Encoding, v []interface{}) {
 37578              m.evex(0b01, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37579              m.emit(0x5a)
 37580              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 37581          })
 37582      }
 37583      // VCVTPD2PS xmm, xmm{k}{z}
 37584      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 37585          self.require(ISA_AVX512VL | ISA_AVX512F)
 37586          p.domain = DomainAVX
 37587          p.add(0, func(m *_Encoding, v []interface{}) {
 37588              m.emit(0x62)
 37589              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37590              m.emit(0xfd)
 37591              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 37592              m.emit(0x5a)
 37593              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37594          })
 37595      }
 37596      // VCVTPD2PS ymm, xmm{k}{z}
 37597      if len(vv) == 0 && isEVEXYMM(v0) && isXMMkz(v1) {
 37598          self.require(ISA_AVX512VL | ISA_AVX512F)
 37599          p.domain = DomainAVX
 37600          p.add(0, func(m *_Encoding, v []interface{}) {
 37601              m.emit(0x62)
 37602              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37603              m.emit(0xfd)
 37604              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 37605              m.emit(0x5a)
 37606              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37607          })
 37608      }
 37609      if p.len == 0 {
 37610          panic("invalid operands for VCVTPD2PS")
 37611      }
 37612      return p
 37613  }
 37614  
 37615  // VCVTPD2QQ performs "Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers".
 37616  //
 37617  // Mnemonic        : VCVTPD2QQ
 37618  // Supported forms : (7 forms)
 37619  //
 37620  //    * VCVTPD2QQ m512/m64bcst, zmm{k}{z}    [AVX512DQ]
 37621  //    * VCVTPD2QQ {er}, zmm, zmm{k}{z}       [AVX512DQ]
 37622  //    * VCVTPD2QQ zmm, zmm{k}{z}             [AVX512DQ]
 37623  //    * VCVTPD2QQ m128/m64bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 37624  //    * VCVTPD2QQ m256/m64bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 37625  //    * VCVTPD2QQ xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 37626  //    * VCVTPD2QQ ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 37627  //
 37628  func (self *Program) VCVTPD2QQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 37629      var p *Instruction
 37630      switch len(vv) {
 37631          case 0  : p = self.alloc("VCVTPD2QQ", 2, Operands { v0, v1 })
 37632          case 1  : p = self.alloc("VCVTPD2QQ", 3, Operands { v0, v1, vv[0] })
 37633          default : panic("instruction VCVTPD2QQ takes 2 or 3 operands")
 37634      }
 37635      // VCVTPD2QQ m512/m64bcst, zmm{k}{z}
 37636      if len(vv) == 0 && isM512M64bcst(v0) && isZMMkz(v1) {
 37637          self.require(ISA_AVX512DQ)
 37638          p.domain = DomainAVX
 37639          p.add(0, func(m *_Encoding, v []interface{}) {
 37640              m.evex(0b01, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37641              m.emit(0x7b)
 37642              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 37643          })
 37644      }
 37645      // VCVTPD2QQ {er}, zmm, zmm{k}{z}
 37646      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 37647          self.require(ISA_AVX512DQ)
 37648          p.domain = DomainAVX
 37649          p.add(0, func(m *_Encoding, v []interface{}) {
 37650              m.emit(0x62)
 37651              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 37652              m.emit(0xfd)
 37653              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 37654              m.emit(0x7b)
 37655              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 37656          })
 37657      }
 37658      // VCVTPD2QQ zmm, zmm{k}{z}
 37659      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 37660          self.require(ISA_AVX512DQ)
 37661          p.domain = DomainAVX
 37662          p.add(0, func(m *_Encoding, v []interface{}) {
 37663              m.emit(0x62)
 37664              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37665              m.emit(0xfd)
 37666              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 37667              m.emit(0x7b)
 37668              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37669          })
 37670      }
 37671      // VCVTPD2QQ m128/m64bcst, xmm{k}{z}
 37672      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 37673          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 37674          p.domain = DomainAVX
 37675          p.add(0, func(m *_Encoding, v []interface{}) {
 37676              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37677              m.emit(0x7b)
 37678              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 37679          })
 37680      }
 37681      // VCVTPD2QQ m256/m64bcst, ymm{k}{z}
 37682      if len(vv) == 0 && isM256M64bcst(v0) && isYMMkz(v1) {
 37683          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 37684          p.domain = DomainAVX
 37685          p.add(0, func(m *_Encoding, v []interface{}) {
 37686              m.evex(0b01, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37687              m.emit(0x7b)
 37688              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 37689          })
 37690      }
 37691      // VCVTPD2QQ xmm, xmm{k}{z}
 37692      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 37693          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 37694          p.domain = DomainAVX
 37695          p.add(0, func(m *_Encoding, v []interface{}) {
 37696              m.emit(0x62)
 37697              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37698              m.emit(0xfd)
 37699              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 37700              m.emit(0x7b)
 37701              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37702          })
 37703      }
 37704      // VCVTPD2QQ ymm, ymm{k}{z}
 37705      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 37706          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 37707          p.domain = DomainAVX
 37708          p.add(0, func(m *_Encoding, v []interface{}) {
 37709              m.emit(0x62)
 37710              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37711              m.emit(0xfd)
 37712              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 37713              m.emit(0x7b)
 37714              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37715          })
 37716      }
 37717      if p.len == 0 {
 37718          panic("invalid operands for VCVTPD2QQ")
 37719      }
 37720      return p
 37721  }
 37722  
 37723  // VCVTPD2UDQ performs "Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers".
 37724  //
 37725  // Mnemonic        : VCVTPD2UDQ
 37726  // Supported forms : (7 forms)
 37727  //
 37728  //    * VCVTPD2UDQ m512/m64bcst, ymm{k}{z}    [AVX512F]
 37729  //    * VCVTPD2UDQ {er}, zmm, ymm{k}{z}       [AVX512F]
 37730  //    * VCVTPD2UDQ zmm, ymm{k}{z}             [AVX512F]
 37731  //    * VCVTPD2UDQ m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 37732  //    * VCVTPD2UDQ m256/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 37733  //    * VCVTPD2UDQ xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 37734  //    * VCVTPD2UDQ ymm, xmm{k}{z}             [AVX512F,AVX512VL]
 37735  //
 37736  func (self *Program) VCVTPD2UDQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 37737      var p *Instruction
 37738      switch len(vv) {
 37739          case 0  : p = self.alloc("VCVTPD2UDQ", 2, Operands { v0, v1 })
 37740          case 1  : p = self.alloc("VCVTPD2UDQ", 3, Operands { v0, v1, vv[0] })
 37741          default : panic("instruction VCVTPD2UDQ takes 2 or 3 operands")
 37742      }
 37743      // VCVTPD2UDQ m512/m64bcst, ymm{k}{z}
 37744      if len(vv) == 0 && isM512M64bcst(v0) && isYMMkz(v1) {
 37745          self.require(ISA_AVX512F)
 37746          p.domain = DomainAVX
 37747          p.add(0, func(m *_Encoding, v []interface{}) {
 37748              m.evex(0b01, 0x84, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37749              m.emit(0x79)
 37750              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 37751          })
 37752      }
 37753      // VCVTPD2UDQ {er}, zmm, ymm{k}{z}
 37754      if len(vv) == 1 && isER(v0) && isZMM(v1) && isYMMkz(vv[0]) {
 37755          self.require(ISA_AVX512F)
 37756          p.domain = DomainAVX
 37757          p.add(0, func(m *_Encoding, v []interface{}) {
 37758              m.emit(0x62)
 37759              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 37760              m.emit(0xfc)
 37761              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 37762              m.emit(0x79)
 37763              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 37764          })
 37765      }
 37766      // VCVTPD2UDQ zmm, ymm{k}{z}
 37767      if len(vv) == 0 && isZMM(v0) && isYMMkz(v1) {
 37768          self.require(ISA_AVX512F)
 37769          p.domain = DomainAVX
 37770          p.add(0, func(m *_Encoding, v []interface{}) {
 37771              m.emit(0x62)
 37772              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37773              m.emit(0xfc)
 37774              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 37775              m.emit(0x79)
 37776              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37777          })
 37778      }
 37779      // VCVTPD2UDQ m128/m64bcst, xmm{k}{z}
 37780      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 37781          self.require(ISA_AVX512VL | ISA_AVX512F)
 37782          p.domain = DomainAVX
 37783          p.add(0, func(m *_Encoding, v []interface{}) {
 37784              m.evex(0b01, 0x84, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37785              m.emit(0x79)
 37786              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 37787          })
 37788      }
 37789      // VCVTPD2UDQ m256/m64bcst, xmm{k}{z}
 37790      if len(vv) == 0 && isM256M64bcst(v0) && isXMMkz(v1) {
 37791          self.require(ISA_AVX512VL | ISA_AVX512F)
 37792          p.domain = DomainAVX
 37793          p.add(0, func(m *_Encoding, v []interface{}) {
 37794              m.evex(0b01, 0x84, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37795              m.emit(0x79)
 37796              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 37797          })
 37798      }
 37799      // VCVTPD2UDQ xmm, xmm{k}{z}
 37800      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 37801          self.require(ISA_AVX512VL | ISA_AVX512F)
 37802          p.domain = DomainAVX
 37803          p.add(0, func(m *_Encoding, v []interface{}) {
 37804              m.emit(0x62)
 37805              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37806              m.emit(0xfc)
 37807              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 37808              m.emit(0x79)
 37809              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37810          })
 37811      }
 37812      // VCVTPD2UDQ ymm, xmm{k}{z}
 37813      if len(vv) == 0 && isEVEXYMM(v0) && isXMMkz(v1) {
 37814          self.require(ISA_AVX512VL | ISA_AVX512F)
 37815          p.domain = DomainAVX
 37816          p.add(0, func(m *_Encoding, v []interface{}) {
 37817              m.emit(0x62)
 37818              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37819              m.emit(0xfc)
 37820              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 37821              m.emit(0x79)
 37822              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37823          })
 37824      }
 37825      if p.len == 0 {
 37826          panic("invalid operands for VCVTPD2UDQ")
 37827      }
 37828      return p
 37829  }
 37830  
 37831  // VCVTPD2UQQ performs "Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers".
 37832  //
 37833  // Mnemonic        : VCVTPD2UQQ
 37834  // Supported forms : (7 forms)
 37835  //
 37836  //    * VCVTPD2UQQ m512/m64bcst, zmm{k}{z}    [AVX512DQ]
 37837  //    * VCVTPD2UQQ {er}, zmm, zmm{k}{z}       [AVX512DQ]
 37838  //    * VCVTPD2UQQ zmm, zmm{k}{z}             [AVX512DQ]
 37839  //    * VCVTPD2UQQ m128/m64bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 37840  //    * VCVTPD2UQQ m256/m64bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 37841  //    * VCVTPD2UQQ xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 37842  //    * VCVTPD2UQQ ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 37843  //
 37844  func (self *Program) VCVTPD2UQQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 37845      var p *Instruction
 37846      switch len(vv) {
 37847          case 0  : p = self.alloc("VCVTPD2UQQ", 2, Operands { v0, v1 })
 37848          case 1  : p = self.alloc("VCVTPD2UQQ", 3, Operands { v0, v1, vv[0] })
 37849          default : panic("instruction VCVTPD2UQQ takes 2 or 3 operands")
 37850      }
 37851      // VCVTPD2UQQ m512/m64bcst, zmm{k}{z}
 37852      if len(vv) == 0 && isM512M64bcst(v0) && isZMMkz(v1) {
 37853          self.require(ISA_AVX512DQ)
 37854          p.domain = DomainAVX
 37855          p.add(0, func(m *_Encoding, v []interface{}) {
 37856              m.evex(0b01, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37857              m.emit(0x79)
 37858              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 37859          })
 37860      }
 37861      // VCVTPD2UQQ {er}, zmm, zmm{k}{z}
 37862      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 37863          self.require(ISA_AVX512DQ)
 37864          p.domain = DomainAVX
 37865          p.add(0, func(m *_Encoding, v []interface{}) {
 37866              m.emit(0x62)
 37867              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 37868              m.emit(0xfd)
 37869              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 37870              m.emit(0x79)
 37871              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 37872          })
 37873      }
 37874      // VCVTPD2UQQ zmm, zmm{k}{z}
 37875      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 37876          self.require(ISA_AVX512DQ)
 37877          p.domain = DomainAVX
 37878          p.add(0, func(m *_Encoding, v []interface{}) {
 37879              m.emit(0x62)
 37880              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37881              m.emit(0xfd)
 37882              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 37883              m.emit(0x79)
 37884              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37885          })
 37886      }
 37887      // VCVTPD2UQQ m128/m64bcst, xmm{k}{z}
 37888      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 37889          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 37890          p.domain = DomainAVX
 37891          p.add(0, func(m *_Encoding, v []interface{}) {
 37892              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37893              m.emit(0x79)
 37894              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 37895          })
 37896      }
 37897      // VCVTPD2UQQ m256/m64bcst, ymm{k}{z}
 37898      if len(vv) == 0 && isM256M64bcst(v0) && isYMMkz(v1) {
 37899          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 37900          p.domain = DomainAVX
 37901          p.add(0, func(m *_Encoding, v []interface{}) {
 37902              m.evex(0b01, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 37903              m.emit(0x79)
 37904              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 37905          })
 37906      }
 37907      // VCVTPD2UQQ xmm, xmm{k}{z}
 37908      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 37909          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 37910          p.domain = DomainAVX
 37911          p.add(0, func(m *_Encoding, v []interface{}) {
 37912              m.emit(0x62)
 37913              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37914              m.emit(0xfd)
 37915              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 37916              m.emit(0x79)
 37917              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37918          })
 37919      }
 37920      // VCVTPD2UQQ ymm, ymm{k}{z}
 37921      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 37922          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 37923          p.domain = DomainAVX
 37924          p.add(0, func(m *_Encoding, v []interface{}) {
 37925              m.emit(0x62)
 37926              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 37927              m.emit(0xfd)
 37928              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 37929              m.emit(0x79)
 37930              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37931          })
 37932      }
 37933      if p.len == 0 {
 37934          panic("invalid operands for VCVTPD2UQQ")
 37935      }
 37936      return p
 37937  }
 37938  
 37939  // VCVTPH2PS performs "Convert Half-Precision FP Values to Single-Precision FP Values".
 37940  //
 37941  // Mnemonic        : VCVTPH2PS
 37942  // Supported forms : (11 forms)
 37943  //
 37944  //    * VCVTPH2PS xmm, xmm                 [F16C]
 37945  //    * VCVTPH2PS m64, xmm                 [F16C]
 37946  //    * VCVTPH2PS xmm, ymm                 [F16C]
 37947  //    * VCVTPH2PS m128, ymm                [F16C]
 37948  //    * VCVTPH2PS m256, zmm{k}{z}          [AVX512F]
 37949  //    * VCVTPH2PS {sae}, ymm, zmm{k}{z}    [AVX512F]
 37950  //    * VCVTPH2PS ymm, zmm{k}{z}           [AVX512F]
 37951  //    * VCVTPH2PS xmm, xmm{k}{z}           [AVX512F,AVX512VL]
 37952  //    * VCVTPH2PS xmm, ymm{k}{z}           [AVX512F,AVX512VL]
 37953  //    * VCVTPH2PS m64, xmm{k}{z}           [AVX512F,AVX512VL]
 37954  //    * VCVTPH2PS m128, ymm{k}{z}          [AVX512F,AVX512VL]
 37955  //
 37956  func (self *Program) VCVTPH2PS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 37957      var p *Instruction
 37958      switch len(vv) {
 37959          case 0  : p = self.alloc("VCVTPH2PS", 2, Operands { v0, v1 })
 37960          case 1  : p = self.alloc("VCVTPH2PS", 3, Operands { v0, v1, vv[0] })
 37961          default : panic("instruction VCVTPH2PS takes 2 or 3 operands")
 37962      }
 37963      // VCVTPH2PS xmm, xmm
 37964      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 37965          self.require(ISA_F16C)
 37966          p.domain = DomainAVX
 37967          p.add(0, func(m *_Encoding, v []interface{}) {
 37968              m.emit(0xc4)
 37969              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 37970              m.emit(0x79)
 37971              m.emit(0x13)
 37972              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37973          })
 37974      }
 37975      // VCVTPH2PS m64, xmm
 37976      if len(vv) == 0 && isM64(v0) && isXMM(v1) {
 37977          self.require(ISA_F16C)
 37978          p.domain = DomainAVX
 37979          p.add(0, func(m *_Encoding, v []interface{}) {
 37980              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 37981              m.emit(0x13)
 37982              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 37983          })
 37984      }
 37985      // VCVTPH2PS xmm, ymm
 37986      if len(vv) == 0 && isXMM(v0) && isYMM(v1) {
 37987          self.require(ISA_F16C)
 37988          p.domain = DomainAVX
 37989          p.add(0, func(m *_Encoding, v []interface{}) {
 37990              m.emit(0xc4)
 37991              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 37992              m.emit(0x7d)
 37993              m.emit(0x13)
 37994              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 37995          })
 37996      }
 37997      // VCVTPH2PS m128, ymm
 37998      if len(vv) == 0 && isM128(v0) && isYMM(v1) {
 37999          self.require(ISA_F16C)
 38000          p.domain = DomainAVX
 38001          p.add(0, func(m *_Encoding, v []interface{}) {
 38002              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 38003              m.emit(0x13)
 38004              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 38005          })
 38006      }
 38007      // VCVTPH2PS m256, zmm{k}{z}
 38008      if len(vv) == 0 && isM256(v0) && isZMMkz(v1) {
 38009          self.require(ISA_AVX512F)
 38010          p.domain = DomainAVX
 38011          p.add(0, func(m *_Encoding, v []interface{}) {
 38012              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 38013              m.emit(0x13)
 38014              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 38015          })
 38016      }
 38017      // VCVTPH2PS {sae}, ymm, zmm{k}{z}
 38018      if len(vv) == 1 && isSAE(v0) && isEVEXYMM(v1) && isZMMkz(vv[0]) {
 38019          self.require(ISA_AVX512F)
 38020          p.domain = DomainAVX
 38021          p.add(0, func(m *_Encoding, v []interface{}) {
 38022              m.emit(0x62)
 38023              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 38024              m.emit(0x7d)
 38025              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 38026              m.emit(0x13)
 38027              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 38028          })
 38029      }
 38030      // VCVTPH2PS ymm, zmm{k}{z}
 38031      if len(vv) == 0 && isEVEXYMM(v0) && isZMMkz(v1) {
 38032          self.require(ISA_AVX512F)
 38033          p.domain = DomainAVX
 38034          p.add(0, func(m *_Encoding, v []interface{}) {
 38035              m.emit(0x62)
 38036              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38037              m.emit(0x7d)
 38038              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 38039              m.emit(0x13)
 38040              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38041          })
 38042      }
 38043      // VCVTPH2PS xmm, xmm{k}{z}
 38044      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 38045          self.require(ISA_AVX512VL | ISA_AVX512F)
 38046          p.domain = DomainAVX
 38047          p.add(0, func(m *_Encoding, v []interface{}) {
 38048              m.emit(0x62)
 38049              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38050              m.emit(0x7d)
 38051              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 38052              m.emit(0x13)
 38053              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38054          })
 38055      }
 38056      // VCVTPH2PS xmm, ymm{k}{z}
 38057      if len(vv) == 0 && isEVEXXMM(v0) && isYMMkz(v1) {
 38058          self.require(ISA_AVX512VL | ISA_AVX512F)
 38059          p.domain = DomainAVX
 38060          p.add(0, func(m *_Encoding, v []interface{}) {
 38061              m.emit(0x62)
 38062              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38063              m.emit(0x7d)
 38064              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 38065              m.emit(0x13)
 38066              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38067          })
 38068      }
 38069      // VCVTPH2PS m64, xmm{k}{z}
 38070      if len(vv) == 0 && isM64(v0) && isXMMkz(v1) {
 38071          self.require(ISA_AVX512VL | ISA_AVX512F)
 38072          p.domain = DomainAVX
 38073          p.add(0, func(m *_Encoding, v []interface{}) {
 38074              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 38075              m.emit(0x13)
 38076              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 38077          })
 38078      }
 38079      // VCVTPH2PS m128, ymm{k}{z}
 38080      if len(vv) == 0 && isM128(v0) && isYMMkz(v1) {
 38081          self.require(ISA_AVX512VL | ISA_AVX512F)
 38082          p.domain = DomainAVX
 38083          p.add(0, func(m *_Encoding, v []interface{}) {
 38084              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 38085              m.emit(0x13)
 38086              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 38087          })
 38088      }
 38089      if p.len == 0 {
 38090          panic("invalid operands for VCVTPH2PS")
 38091      }
 38092      return p
 38093  }
 38094  
 38095  // VCVTPS2DQ performs "Convert Packed Single-Precision FP Values to Packed Dword Integers".
 38096  //
 38097  // Mnemonic        : VCVTPS2DQ
 38098  // Supported forms : (11 forms)
 38099  //
 38100  //    * VCVTPS2DQ xmm, xmm                   [AVX]
 38101  //    * VCVTPS2DQ m128, xmm                  [AVX]
 38102  //    * VCVTPS2DQ ymm, ymm                   [AVX]
 38103  //    * VCVTPS2DQ m256, ymm                  [AVX]
 38104  //    * VCVTPS2DQ m512/m32bcst, zmm{k}{z}    [AVX512F]
 38105  //    * VCVTPS2DQ {er}, zmm, zmm{k}{z}       [AVX512F]
 38106  //    * VCVTPS2DQ zmm, zmm{k}{z}             [AVX512F]
 38107  //    * VCVTPS2DQ m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 38108  //    * VCVTPS2DQ m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 38109  //    * VCVTPS2DQ xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 38110  //    * VCVTPS2DQ ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 38111  //
 38112  func (self *Program) VCVTPS2DQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 38113      var p *Instruction
 38114      switch len(vv) {
 38115          case 0  : p = self.alloc("VCVTPS2DQ", 2, Operands { v0, v1 })
 38116          case 1  : p = self.alloc("VCVTPS2DQ", 3, Operands { v0, v1, vv[0] })
 38117          default : panic("instruction VCVTPS2DQ takes 2 or 3 operands")
 38118      }
 38119      // VCVTPS2DQ xmm, xmm
 38120      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 38121          self.require(ISA_AVX)
 38122          p.domain = DomainAVX
 38123          p.add(0, func(m *_Encoding, v []interface{}) {
 38124              m.vex2(1, hcode(v[1]), v[0], 0)
 38125              m.emit(0x5b)
 38126              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38127          })
 38128      }
 38129      // VCVTPS2DQ m128, xmm
 38130      if len(vv) == 0 && isM128(v0) && isXMM(v1) {
 38131          self.require(ISA_AVX)
 38132          p.domain = DomainAVX
 38133          p.add(0, func(m *_Encoding, v []interface{}) {
 38134              m.vex2(1, hcode(v[1]), addr(v[0]), 0)
 38135              m.emit(0x5b)
 38136              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 38137          })
 38138      }
 38139      // VCVTPS2DQ ymm, ymm
 38140      if len(vv) == 0 && isYMM(v0) && isYMM(v1) {
 38141          self.require(ISA_AVX)
 38142          p.domain = DomainAVX
 38143          p.add(0, func(m *_Encoding, v []interface{}) {
 38144              m.vex2(5, hcode(v[1]), v[0], 0)
 38145              m.emit(0x5b)
 38146              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38147          })
 38148      }
 38149      // VCVTPS2DQ m256, ymm
 38150      if len(vv) == 0 && isM256(v0) && isYMM(v1) {
 38151          self.require(ISA_AVX)
 38152          p.domain = DomainAVX
 38153          p.add(0, func(m *_Encoding, v []interface{}) {
 38154              m.vex2(5, hcode(v[1]), addr(v[0]), 0)
 38155              m.emit(0x5b)
 38156              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 38157          })
 38158      }
 38159      // VCVTPS2DQ m512/m32bcst, zmm{k}{z}
 38160      if len(vv) == 0 && isM512M32bcst(v0) && isZMMkz(v1) {
 38161          self.require(ISA_AVX512F)
 38162          p.domain = DomainAVX
 38163          p.add(0, func(m *_Encoding, v []interface{}) {
 38164              m.evex(0b01, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38165              m.emit(0x5b)
 38166              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 38167          })
 38168      }
 38169      // VCVTPS2DQ {er}, zmm, zmm{k}{z}
 38170      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 38171          self.require(ISA_AVX512F)
 38172          p.domain = DomainAVX
 38173          p.add(0, func(m *_Encoding, v []interface{}) {
 38174              m.emit(0x62)
 38175              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 38176              m.emit(0x7d)
 38177              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 38178              m.emit(0x5b)
 38179              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 38180          })
 38181      }
 38182      // VCVTPS2DQ zmm, zmm{k}{z}
 38183      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 38184          self.require(ISA_AVX512F)
 38185          p.domain = DomainAVX
 38186          p.add(0, func(m *_Encoding, v []interface{}) {
 38187              m.emit(0x62)
 38188              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38189              m.emit(0x7d)
 38190              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 38191              m.emit(0x5b)
 38192              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38193          })
 38194      }
 38195      // VCVTPS2DQ m128/m32bcst, xmm{k}{z}
 38196      if len(vv) == 0 && isM128M32bcst(v0) && isXMMkz(v1) {
 38197          self.require(ISA_AVX512VL | ISA_AVX512F)
 38198          p.domain = DomainAVX
 38199          p.add(0, func(m *_Encoding, v []interface{}) {
 38200              m.evex(0b01, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38201              m.emit(0x5b)
 38202              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 38203          })
 38204      }
 38205      // VCVTPS2DQ m256/m32bcst, ymm{k}{z}
 38206      if len(vv) == 0 && isM256M32bcst(v0) && isYMMkz(v1) {
 38207          self.require(ISA_AVX512VL | ISA_AVX512F)
 38208          p.domain = DomainAVX
 38209          p.add(0, func(m *_Encoding, v []interface{}) {
 38210              m.evex(0b01, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38211              m.emit(0x5b)
 38212              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 38213          })
 38214      }
 38215      // VCVTPS2DQ xmm, xmm{k}{z}
 38216      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 38217          self.require(ISA_AVX512VL | ISA_AVX512F)
 38218          p.domain = DomainAVX
 38219          p.add(0, func(m *_Encoding, v []interface{}) {
 38220              m.emit(0x62)
 38221              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38222              m.emit(0x7d)
 38223              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 38224              m.emit(0x5b)
 38225              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38226          })
 38227      }
 38228      // VCVTPS2DQ ymm, ymm{k}{z}
 38229      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 38230          self.require(ISA_AVX512VL | ISA_AVX512F)
 38231          p.domain = DomainAVX
 38232          p.add(0, func(m *_Encoding, v []interface{}) {
 38233              m.emit(0x62)
 38234              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38235              m.emit(0x7d)
 38236              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 38237              m.emit(0x5b)
 38238              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38239          })
 38240      }
 38241      if p.len == 0 {
 38242          panic("invalid operands for VCVTPS2DQ")
 38243      }
 38244      return p
 38245  }
 38246  
 38247  // VCVTPS2PD performs "Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values".
 38248  //
 38249  // Mnemonic        : VCVTPS2PD
 38250  // Supported forms : (11 forms)
 38251  //
 38252  //    * VCVTPS2PD xmm, xmm                   [AVX]
 38253  //    * VCVTPS2PD m64, xmm                   [AVX]
 38254  //    * VCVTPS2PD xmm, ymm                   [AVX]
 38255  //    * VCVTPS2PD m128, ymm                  [AVX]
 38256  //    * VCVTPS2PD m256/m32bcst, zmm{k}{z}    [AVX512F]
 38257  //    * VCVTPS2PD {sae}, ymm, zmm{k}{z}      [AVX512F]
 38258  //    * VCVTPS2PD ymm, zmm{k}{z}             [AVX512F]
 38259  //    * VCVTPS2PD m64/m32bcst, xmm{k}{z}     [AVX512F,AVX512VL]
 38260  //    * VCVTPS2PD m128/m32bcst, ymm{k}{z}    [AVX512VL]
 38261  //    * VCVTPS2PD xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 38262  //    * VCVTPS2PD xmm, ymm{k}{z}             [AVX512VL]
 38263  //
 38264  func (self *Program) VCVTPS2PD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 38265      var p *Instruction
 38266      switch len(vv) {
 38267          case 0  : p = self.alloc("VCVTPS2PD", 2, Operands { v0, v1 })
 38268          case 1  : p = self.alloc("VCVTPS2PD", 3, Operands { v0, v1, vv[0] })
 38269          default : panic("instruction VCVTPS2PD takes 2 or 3 operands")
 38270      }
 38271      // VCVTPS2PD xmm, xmm
 38272      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 38273          self.require(ISA_AVX)
 38274          p.domain = DomainAVX
 38275          p.add(0, func(m *_Encoding, v []interface{}) {
 38276              m.vex2(0, hcode(v[1]), v[0], 0)
 38277              m.emit(0x5a)
 38278              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38279          })
 38280      }
 38281      // VCVTPS2PD m64, xmm
 38282      if len(vv) == 0 && isM64(v0) && isXMM(v1) {
 38283          self.require(ISA_AVX)
 38284          p.domain = DomainAVX
 38285          p.add(0, func(m *_Encoding, v []interface{}) {
 38286              m.vex2(0, hcode(v[1]), addr(v[0]), 0)
 38287              m.emit(0x5a)
 38288              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 38289          })
 38290      }
 38291      // VCVTPS2PD xmm, ymm
 38292      if len(vv) == 0 && isXMM(v0) && isYMM(v1) {
 38293          self.require(ISA_AVX)
 38294          p.domain = DomainAVX
 38295          p.add(0, func(m *_Encoding, v []interface{}) {
 38296              m.vex2(4, hcode(v[1]), v[0], 0)
 38297              m.emit(0x5a)
 38298              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38299          })
 38300      }
 38301      // VCVTPS2PD m128, ymm
 38302      if len(vv) == 0 && isM128(v0) && isYMM(v1) {
 38303          self.require(ISA_AVX)
 38304          p.domain = DomainAVX
 38305          p.add(0, func(m *_Encoding, v []interface{}) {
 38306              m.vex2(4, hcode(v[1]), addr(v[0]), 0)
 38307              m.emit(0x5a)
 38308              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 38309          })
 38310      }
 38311      // VCVTPS2PD m256/m32bcst, zmm{k}{z}
 38312      if len(vv) == 0 && isM256M32bcst(v0) && isZMMkz(v1) {
 38313          self.require(ISA_AVX512F)
 38314          p.domain = DomainAVX
 38315          p.add(0, func(m *_Encoding, v []interface{}) {
 38316              m.evex(0b01, 0x04, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38317              m.emit(0x5a)
 38318              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 38319          })
 38320      }
 38321      // VCVTPS2PD {sae}, ymm, zmm{k}{z}
 38322      if len(vv) == 1 && isSAE(v0) && isEVEXYMM(v1) && isZMMkz(vv[0]) {
 38323          self.require(ISA_AVX512F)
 38324          p.domain = DomainAVX
 38325          p.add(0, func(m *_Encoding, v []interface{}) {
 38326              m.emit(0x62)
 38327              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 38328              m.emit(0x7c)
 38329              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 38330              m.emit(0x5a)
 38331              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 38332          })
 38333      }
 38334      // VCVTPS2PD ymm, zmm{k}{z}
 38335      if len(vv) == 0 && isEVEXYMM(v0) && isZMMkz(v1) {
 38336          self.require(ISA_AVX512F)
 38337          p.domain = DomainAVX
 38338          p.add(0, func(m *_Encoding, v []interface{}) {
 38339              m.emit(0x62)
 38340              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38341              m.emit(0x7c)
 38342              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 38343              m.emit(0x5a)
 38344              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38345          })
 38346      }
 38347      // VCVTPS2PD m64/m32bcst, xmm{k}{z}
 38348      if len(vv) == 0 && isM64M32bcst(v0) && isXMMkz(v1) {
 38349          self.require(ISA_AVX512VL | ISA_AVX512F)
 38350          p.domain = DomainAVX
 38351          p.add(0, func(m *_Encoding, v []interface{}) {
 38352              m.evex(0b01, 0x04, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38353              m.emit(0x5a)
 38354              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 38355          })
 38356      }
 38357      // VCVTPS2PD m128/m32bcst, ymm{k}{z}
 38358      if len(vv) == 0 && isM128M32bcst(v0) && isYMMkz(v1) {
 38359          self.require(ISA_AVX512VL)
 38360          p.domain = DomainAVX
 38361          p.add(0, func(m *_Encoding, v []interface{}) {
 38362              m.evex(0b01, 0x04, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38363              m.emit(0x5a)
 38364              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 38365          })
 38366      }
 38367      // VCVTPS2PD xmm, xmm{k}{z}
 38368      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 38369          self.require(ISA_AVX512VL | ISA_AVX512F)
 38370          p.domain = DomainAVX
 38371          p.add(0, func(m *_Encoding, v []interface{}) {
 38372              m.emit(0x62)
 38373              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38374              m.emit(0x7c)
 38375              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 38376              m.emit(0x5a)
 38377              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38378          })
 38379      }
 38380      // VCVTPS2PD xmm, ymm{k}{z}
 38381      if len(vv) == 0 && isEVEXXMM(v0) && isYMMkz(v1) {
 38382          self.require(ISA_AVX512VL)
 38383          p.domain = DomainAVX
 38384          p.add(0, func(m *_Encoding, v []interface{}) {
 38385              m.emit(0x62)
 38386              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38387              m.emit(0x7c)
 38388              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 38389              m.emit(0x5a)
 38390              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38391          })
 38392      }
 38393      if p.len == 0 {
 38394          panic("invalid operands for VCVTPS2PD")
 38395      }
 38396      return p
 38397  }
 38398  
 38399  // VCVTPS2PH performs "Convert Single-Precision FP value to Half-Precision FP value".
 38400  //
 38401  // Mnemonic        : VCVTPS2PH
 38402  // Supported forms : (11 forms)
 38403  //
 38404  //    * VCVTPS2PH imm8, xmm, xmm                 [F16C]
 38405  //    * VCVTPS2PH imm8, ymm, xmm                 [F16C]
 38406  //    * VCVTPS2PH imm8, xmm, m64                 [F16C]
 38407  //    * VCVTPS2PH imm8, ymm, m128                [F16C]
 38408  //    * VCVTPS2PH imm8, zmm, m256{k}{z}          [AVX512F]
 38409  //    * VCVTPS2PH imm8, {sae}, zmm, ymm{k}{z}    [AVX512F]
 38410  //    * VCVTPS2PH imm8, zmm, ymm{k}{z}           [AVX512F]
 38411  //    * VCVTPS2PH imm8, xmm, xmm{k}{z}           [AVX512F,AVX512VL]
 38412  //    * VCVTPS2PH imm8, xmm, m64{k}{z}           [AVX512F,AVX512VL]
 38413  //    * VCVTPS2PH imm8, ymm, xmm{k}{z}           [AVX512F,AVX512VL]
 38414  //    * VCVTPS2PH imm8, ymm, m128{k}{z}          [AVX512F,AVX512VL]
 38415  //
 38416  func (self *Program) VCVTPS2PH(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 38417      var p *Instruction
 38418      switch len(vv) {
 38419          case 0  : p = self.alloc("VCVTPS2PH", 3, Operands { v0, v1, v2 })
 38420          case 1  : p = self.alloc("VCVTPS2PH", 4, Operands { v0, v1, v2, vv[0] })
 38421          default : panic("instruction VCVTPS2PH takes 3 or 4 operands")
 38422      }
 38423      // VCVTPS2PH imm8, xmm, xmm
 38424      if len(vv) == 0 && isImm8(v0) && isXMM(v1) && isXMM(v2) {
 38425          self.require(ISA_F16C)
 38426          p.domain = DomainAVX
 38427          p.add(0, func(m *_Encoding, v []interface{}) {
 38428              m.emit(0xc4)
 38429              m.emit(0xe3 ^ (hcode(v[1]) << 7) ^ (hcode(v[2]) << 5))
 38430              m.emit(0x79)
 38431              m.emit(0x1d)
 38432              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 38433              m.imm1(toImmAny(v[0]))
 38434          })
 38435      }
 38436      // VCVTPS2PH imm8, ymm, xmm
 38437      if len(vv) == 0 && isImm8(v0) && isYMM(v1) && isXMM(v2) {
 38438          self.require(ISA_F16C)
 38439          p.domain = DomainAVX
 38440          p.add(0, func(m *_Encoding, v []interface{}) {
 38441              m.emit(0xc4)
 38442              m.emit(0xe3 ^ (hcode(v[1]) << 7) ^ (hcode(v[2]) << 5))
 38443              m.emit(0x7d)
 38444              m.emit(0x1d)
 38445              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 38446              m.imm1(toImmAny(v[0]))
 38447          })
 38448      }
 38449      // VCVTPS2PH imm8, xmm, m64
 38450      if len(vv) == 0 && isImm8(v0) && isXMM(v1) && isM64(v2) {
 38451          self.require(ISA_F16C)
 38452          p.domain = DomainAVX
 38453          p.add(0, func(m *_Encoding, v []interface{}) {
 38454              m.vex3(0xc4, 0b11, 0x01, hcode(v[1]), addr(v[2]), 0)
 38455              m.emit(0x1d)
 38456              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 38457              m.imm1(toImmAny(v[0]))
 38458          })
 38459      }
 38460      // VCVTPS2PH imm8, ymm, m128
 38461      if len(vv) == 0 && isImm8(v0) && isYMM(v1) && isM128(v2) {
 38462          self.require(ISA_F16C)
 38463          p.domain = DomainAVX
 38464          p.add(0, func(m *_Encoding, v []interface{}) {
 38465              m.vex3(0xc4, 0b11, 0x05, hcode(v[1]), addr(v[2]), 0)
 38466              m.emit(0x1d)
 38467              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 38468              m.imm1(toImmAny(v[0]))
 38469          })
 38470      }
 38471      // VCVTPS2PH imm8, zmm, m256{k}{z}
 38472      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isM256kz(v2) {
 38473          self.require(ISA_AVX512F)
 38474          p.domain = DomainAVX
 38475          p.add(0, func(m *_Encoding, v []interface{}) {
 38476              m.evex(0b11, 0x05, 0b10, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 38477              m.emit(0x1d)
 38478              m.mrsd(lcode(v[1]), addr(v[2]), 32)
 38479              m.imm1(toImmAny(v[0]))
 38480          })
 38481      }
 38482      // VCVTPS2PH imm8, {sae}, zmm, ymm{k}{z}
 38483      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isZMM(v2) && isYMMkz(vv[0]) {
 38484          self.require(ISA_AVX512F)
 38485          p.domain = DomainAVX
 38486          p.add(0, func(m *_Encoding, v []interface{}) {
 38487              m.emit(0x62)
 38488              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[3]) << 5) | (ecode(v[2]) << 4)))
 38489              m.emit(0x7d)
 38490              m.emit((zcode(v[3]) << 7) | kcode(v[3]) | 0x18)
 38491              m.emit(0x1d)
 38492              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[3]))
 38493              m.imm1(toImmAny(v[0]))
 38494          })
 38495      }
 38496      // VCVTPS2PH imm8, zmm, ymm{k}{z}
 38497      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isYMMkz(v2) {
 38498          self.require(ISA_AVX512F)
 38499          p.domain = DomainAVX
 38500          p.add(0, func(m *_Encoding, v []interface{}) {
 38501              m.emit(0x62)
 38502              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 38503              m.emit(0x7d)
 38504              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 38505              m.emit(0x1d)
 38506              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 38507              m.imm1(toImmAny(v[0]))
 38508          })
 38509      }
 38510      // VCVTPS2PH imm8, xmm, xmm{k}{z}
 38511      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 38512          self.require(ISA_AVX512VL | ISA_AVX512F)
 38513          p.domain = DomainAVX
 38514          p.add(0, func(m *_Encoding, v []interface{}) {
 38515              m.emit(0x62)
 38516              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 38517              m.emit(0x7d)
 38518              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 38519              m.emit(0x1d)
 38520              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 38521              m.imm1(toImmAny(v[0]))
 38522          })
 38523      }
 38524      // VCVTPS2PH imm8, xmm, m64{k}{z}
 38525      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isM64kz(v2) {
 38526          self.require(ISA_AVX512VL | ISA_AVX512F)
 38527          p.domain = DomainAVX
 38528          p.add(0, func(m *_Encoding, v []interface{}) {
 38529              m.evex(0b11, 0x05, 0b00, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 38530              m.emit(0x1d)
 38531              m.mrsd(lcode(v[1]), addr(v[2]), 8)
 38532              m.imm1(toImmAny(v[0]))
 38533          })
 38534      }
 38535      // VCVTPS2PH imm8, ymm, xmm{k}{z}
 38536      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isXMMkz(v2) {
 38537          self.require(ISA_AVX512VL | ISA_AVX512F)
 38538          p.domain = DomainAVX
 38539          p.add(0, func(m *_Encoding, v []interface{}) {
 38540              m.emit(0x62)
 38541              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 38542              m.emit(0x7d)
 38543              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 38544              m.emit(0x1d)
 38545              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 38546              m.imm1(toImmAny(v[0]))
 38547          })
 38548      }
 38549      // VCVTPS2PH imm8, ymm, m128{k}{z}
 38550      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isM128kz(v2) {
 38551          self.require(ISA_AVX512VL | ISA_AVX512F)
 38552          p.domain = DomainAVX
 38553          p.add(0, func(m *_Encoding, v []interface{}) {
 38554              m.evex(0b11, 0x05, 0b01, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 38555              m.emit(0x1d)
 38556              m.mrsd(lcode(v[1]), addr(v[2]), 16)
 38557              m.imm1(toImmAny(v[0]))
 38558          })
 38559      }
 38560      if p.len == 0 {
 38561          panic("invalid operands for VCVTPS2PH")
 38562      }
 38563      return p
 38564  }
 38565  
 38566  // VCVTPS2QQ performs "Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values".
 38567  //
 38568  // Mnemonic        : VCVTPS2QQ
 38569  // Supported forms : (7 forms)
 38570  //
 38571  //    * VCVTPS2QQ m256/m32bcst, zmm{k}{z}    [AVX512DQ]
 38572  //    * VCVTPS2QQ {er}, ymm, zmm{k}{z}       [AVX512DQ]
 38573  //    * VCVTPS2QQ ymm, zmm{k}{z}             [AVX512DQ]
 38574  //    * VCVTPS2QQ m64/m32bcst, xmm{k}{z}     [AVX512DQ,AVX512VL]
 38575  //    * VCVTPS2QQ m128/m32bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 38576  //    * VCVTPS2QQ xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 38577  //    * VCVTPS2QQ xmm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 38578  //
 38579  func (self *Program) VCVTPS2QQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 38580      var p *Instruction
 38581      switch len(vv) {
 38582          case 0  : p = self.alloc("VCVTPS2QQ", 2, Operands { v0, v1 })
 38583          case 1  : p = self.alloc("VCVTPS2QQ", 3, Operands { v0, v1, vv[0] })
 38584          default : panic("instruction VCVTPS2QQ takes 2 or 3 operands")
 38585      }
 38586      // VCVTPS2QQ m256/m32bcst, zmm{k}{z}
 38587      if len(vv) == 0 && isM256M32bcst(v0) && isZMMkz(v1) {
 38588          self.require(ISA_AVX512DQ)
 38589          p.domain = DomainAVX
 38590          p.add(0, func(m *_Encoding, v []interface{}) {
 38591              m.evex(0b01, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38592              m.emit(0x7b)
 38593              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 38594          })
 38595      }
 38596      // VCVTPS2QQ {er}, ymm, zmm{k}{z}
 38597      if len(vv) == 1 && isER(v0) && isEVEXYMM(v1) && isZMMkz(vv[0]) {
 38598          self.require(ISA_AVX512DQ)
 38599          p.domain = DomainAVX
 38600          p.add(0, func(m *_Encoding, v []interface{}) {
 38601              m.emit(0x62)
 38602              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 38603              m.emit(0x7d)
 38604              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 38605              m.emit(0x7b)
 38606              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 38607          })
 38608      }
 38609      // VCVTPS2QQ ymm, zmm{k}{z}
 38610      if len(vv) == 0 && isEVEXYMM(v0) && isZMMkz(v1) {
 38611          self.require(ISA_AVX512DQ)
 38612          p.domain = DomainAVX
 38613          p.add(0, func(m *_Encoding, v []interface{}) {
 38614              m.emit(0x62)
 38615              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38616              m.emit(0x7d)
 38617              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 38618              m.emit(0x7b)
 38619              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38620          })
 38621      }
 38622      // VCVTPS2QQ m64/m32bcst, xmm{k}{z}
 38623      if len(vv) == 0 && isM64M32bcst(v0) && isXMMkz(v1) {
 38624          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38625          p.domain = DomainAVX
 38626          p.add(0, func(m *_Encoding, v []interface{}) {
 38627              m.evex(0b01, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38628              m.emit(0x7b)
 38629              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 38630          })
 38631      }
 38632      // VCVTPS2QQ m128/m32bcst, ymm{k}{z}
 38633      if len(vv) == 0 && isM128M32bcst(v0) && isYMMkz(v1) {
 38634          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38635          p.domain = DomainAVX
 38636          p.add(0, func(m *_Encoding, v []interface{}) {
 38637              m.evex(0b01, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38638              m.emit(0x7b)
 38639              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 38640          })
 38641      }
 38642      // VCVTPS2QQ xmm, xmm{k}{z}
 38643      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 38644          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38645          p.domain = DomainAVX
 38646          p.add(0, func(m *_Encoding, v []interface{}) {
 38647              m.emit(0x62)
 38648              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38649              m.emit(0x7d)
 38650              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 38651              m.emit(0x7b)
 38652              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38653          })
 38654      }
 38655      // VCVTPS2QQ xmm, ymm{k}{z}
 38656      if len(vv) == 0 && isEVEXXMM(v0) && isYMMkz(v1) {
 38657          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38658          p.domain = DomainAVX
 38659          p.add(0, func(m *_Encoding, v []interface{}) {
 38660              m.emit(0x62)
 38661              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38662              m.emit(0x7d)
 38663              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 38664              m.emit(0x7b)
 38665              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38666          })
 38667      }
 38668      if p.len == 0 {
 38669          panic("invalid operands for VCVTPS2QQ")
 38670      }
 38671      return p
 38672  }
 38673  
 38674  // VCVTPS2UDQ performs "Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values".
 38675  //
 38676  // Mnemonic        : VCVTPS2UDQ
 38677  // Supported forms : (7 forms)
 38678  //
 38679  //    * VCVTPS2UDQ m512/m32bcst, zmm{k}{z}    [AVX512F]
 38680  //    * VCVTPS2UDQ {er}, zmm, zmm{k}{z}       [AVX512F]
 38681  //    * VCVTPS2UDQ zmm, zmm{k}{z}             [AVX512F]
 38682  //    * VCVTPS2UDQ m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 38683  //    * VCVTPS2UDQ m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 38684  //    * VCVTPS2UDQ xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 38685  //    * VCVTPS2UDQ ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 38686  //
 38687  func (self *Program) VCVTPS2UDQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 38688      var p *Instruction
 38689      switch len(vv) {
 38690          case 0  : p = self.alloc("VCVTPS2UDQ", 2, Operands { v0, v1 })
 38691          case 1  : p = self.alloc("VCVTPS2UDQ", 3, Operands { v0, v1, vv[0] })
 38692          default : panic("instruction VCVTPS2UDQ takes 2 or 3 operands")
 38693      }
 38694      // VCVTPS2UDQ m512/m32bcst, zmm{k}{z}
 38695      if len(vv) == 0 && isM512M32bcst(v0) && isZMMkz(v1) {
 38696          self.require(ISA_AVX512F)
 38697          p.domain = DomainAVX
 38698          p.add(0, func(m *_Encoding, v []interface{}) {
 38699              m.evex(0b01, 0x04, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38700              m.emit(0x79)
 38701              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 38702          })
 38703      }
 38704      // VCVTPS2UDQ {er}, zmm, zmm{k}{z}
 38705      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 38706          self.require(ISA_AVX512F)
 38707          p.domain = DomainAVX
 38708          p.add(0, func(m *_Encoding, v []interface{}) {
 38709              m.emit(0x62)
 38710              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 38711              m.emit(0x7c)
 38712              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 38713              m.emit(0x79)
 38714              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 38715          })
 38716      }
 38717      // VCVTPS2UDQ zmm, zmm{k}{z}
 38718      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 38719          self.require(ISA_AVX512F)
 38720          p.domain = DomainAVX
 38721          p.add(0, func(m *_Encoding, v []interface{}) {
 38722              m.emit(0x62)
 38723              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38724              m.emit(0x7c)
 38725              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 38726              m.emit(0x79)
 38727              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38728          })
 38729      }
 38730      // VCVTPS2UDQ m128/m32bcst, xmm{k}{z}
 38731      if len(vv) == 0 && isM128M32bcst(v0) && isXMMkz(v1) {
 38732          self.require(ISA_AVX512VL | ISA_AVX512F)
 38733          p.domain = DomainAVX
 38734          p.add(0, func(m *_Encoding, v []interface{}) {
 38735              m.evex(0b01, 0x04, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38736              m.emit(0x79)
 38737              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 38738          })
 38739      }
 38740      // VCVTPS2UDQ m256/m32bcst, ymm{k}{z}
 38741      if len(vv) == 0 && isM256M32bcst(v0) && isYMMkz(v1) {
 38742          self.require(ISA_AVX512VL | ISA_AVX512F)
 38743          p.domain = DomainAVX
 38744          p.add(0, func(m *_Encoding, v []interface{}) {
 38745              m.evex(0b01, 0x04, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38746              m.emit(0x79)
 38747              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 38748          })
 38749      }
 38750      // VCVTPS2UDQ xmm, xmm{k}{z}
 38751      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 38752          self.require(ISA_AVX512VL | ISA_AVX512F)
 38753          p.domain = DomainAVX
 38754          p.add(0, func(m *_Encoding, v []interface{}) {
 38755              m.emit(0x62)
 38756              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38757              m.emit(0x7c)
 38758              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 38759              m.emit(0x79)
 38760              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38761          })
 38762      }
 38763      // VCVTPS2UDQ ymm, ymm{k}{z}
 38764      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 38765          self.require(ISA_AVX512VL | ISA_AVX512F)
 38766          p.domain = DomainAVX
 38767          p.add(0, func(m *_Encoding, v []interface{}) {
 38768              m.emit(0x62)
 38769              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38770              m.emit(0x7c)
 38771              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 38772              m.emit(0x79)
 38773              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38774          })
 38775      }
 38776      if p.len == 0 {
 38777          panic("invalid operands for VCVTPS2UDQ")
 38778      }
 38779      return p
 38780  }
 38781  
 38782  // VCVTPS2UQQ performs "Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values".
 38783  //
 38784  // Mnemonic        : VCVTPS2UQQ
 38785  // Supported forms : (7 forms)
 38786  //
 38787  //    * VCVTPS2UQQ m256/m32bcst, zmm{k}{z}    [AVX512DQ]
 38788  //    * VCVTPS2UQQ {er}, ymm, zmm{k}{z}       [AVX512DQ]
 38789  //    * VCVTPS2UQQ ymm, zmm{k}{z}             [AVX512DQ]
 38790  //    * VCVTPS2UQQ m64/m32bcst, xmm{k}{z}     [AVX512DQ,AVX512VL]
 38791  //    * VCVTPS2UQQ m128/m32bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 38792  //    * VCVTPS2UQQ xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 38793  //    * VCVTPS2UQQ xmm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 38794  //
 38795  func (self *Program) VCVTPS2UQQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 38796      var p *Instruction
 38797      switch len(vv) {
 38798          case 0  : p = self.alloc("VCVTPS2UQQ", 2, Operands { v0, v1 })
 38799          case 1  : p = self.alloc("VCVTPS2UQQ", 3, Operands { v0, v1, vv[0] })
 38800          default : panic("instruction VCVTPS2UQQ takes 2 or 3 operands")
 38801      }
 38802      // VCVTPS2UQQ m256/m32bcst, zmm{k}{z}
 38803      if len(vv) == 0 && isM256M32bcst(v0) && isZMMkz(v1) {
 38804          self.require(ISA_AVX512DQ)
 38805          p.domain = DomainAVX
 38806          p.add(0, func(m *_Encoding, v []interface{}) {
 38807              m.evex(0b01, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38808              m.emit(0x79)
 38809              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 38810          })
 38811      }
 38812      // VCVTPS2UQQ {er}, ymm, zmm{k}{z}
 38813      if len(vv) == 1 && isER(v0) && isEVEXYMM(v1) && isZMMkz(vv[0]) {
 38814          self.require(ISA_AVX512DQ)
 38815          p.domain = DomainAVX
 38816          p.add(0, func(m *_Encoding, v []interface{}) {
 38817              m.emit(0x62)
 38818              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 38819              m.emit(0x7d)
 38820              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 38821              m.emit(0x79)
 38822              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 38823          })
 38824      }
 38825      // VCVTPS2UQQ ymm, zmm{k}{z}
 38826      if len(vv) == 0 && isEVEXYMM(v0) && isZMMkz(v1) {
 38827          self.require(ISA_AVX512DQ)
 38828          p.domain = DomainAVX
 38829          p.add(0, func(m *_Encoding, v []interface{}) {
 38830              m.emit(0x62)
 38831              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38832              m.emit(0x7d)
 38833              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 38834              m.emit(0x79)
 38835              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38836          })
 38837      }
 38838      // VCVTPS2UQQ m64/m32bcst, xmm{k}{z}
 38839      if len(vv) == 0 && isM64M32bcst(v0) && isXMMkz(v1) {
 38840          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38841          p.domain = DomainAVX
 38842          p.add(0, func(m *_Encoding, v []interface{}) {
 38843              m.evex(0b01, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38844              m.emit(0x79)
 38845              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 38846          })
 38847      }
 38848      // VCVTPS2UQQ m128/m32bcst, ymm{k}{z}
 38849      if len(vv) == 0 && isM128M32bcst(v0) && isYMMkz(v1) {
 38850          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38851          p.domain = DomainAVX
 38852          p.add(0, func(m *_Encoding, v []interface{}) {
 38853              m.evex(0b01, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38854              m.emit(0x79)
 38855              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 38856          })
 38857      }
 38858      // VCVTPS2UQQ xmm, xmm{k}{z}
 38859      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 38860          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38861          p.domain = DomainAVX
 38862          p.add(0, func(m *_Encoding, v []interface{}) {
 38863              m.emit(0x62)
 38864              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38865              m.emit(0x7d)
 38866              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 38867              m.emit(0x79)
 38868              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38869          })
 38870      }
 38871      // VCVTPS2UQQ xmm, ymm{k}{z}
 38872      if len(vv) == 0 && isEVEXXMM(v0) && isYMMkz(v1) {
 38873          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38874          p.domain = DomainAVX
 38875          p.add(0, func(m *_Encoding, v []interface{}) {
 38876              m.emit(0x62)
 38877              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38878              m.emit(0x7d)
 38879              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 38880              m.emit(0x79)
 38881              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38882          })
 38883      }
 38884      if p.len == 0 {
 38885          panic("invalid operands for VCVTPS2UQQ")
 38886      }
 38887      return p
 38888  }
 38889  
 38890  // VCVTQQ2PD performs "Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values".
 38891  //
 38892  // Mnemonic        : VCVTQQ2PD
 38893  // Supported forms : (7 forms)
 38894  //
 38895  //    * VCVTQQ2PD m512/m64bcst, zmm{k}{z}    [AVX512DQ]
 38896  //    * VCVTQQ2PD {er}, zmm, zmm{k}{z}       [AVX512DQ]
 38897  //    * VCVTQQ2PD zmm, zmm{k}{z}             [AVX512DQ]
 38898  //    * VCVTQQ2PD m128/m64bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 38899  //    * VCVTQQ2PD m256/m64bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 38900  //    * VCVTQQ2PD xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 38901  //    * VCVTQQ2PD ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 38902  //
 38903  func (self *Program) VCVTQQ2PD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 38904      var p *Instruction
 38905      switch len(vv) {
 38906          case 0  : p = self.alloc("VCVTQQ2PD", 2, Operands { v0, v1 })
 38907          case 1  : p = self.alloc("VCVTQQ2PD", 3, Operands { v0, v1, vv[0] })
 38908          default : panic("instruction VCVTQQ2PD takes 2 or 3 operands")
 38909      }
 38910      // VCVTQQ2PD m512/m64bcst, zmm{k}{z}
 38911      if len(vv) == 0 && isM512M64bcst(v0) && isZMMkz(v1) {
 38912          self.require(ISA_AVX512DQ)
 38913          p.domain = DomainAVX
 38914          p.add(0, func(m *_Encoding, v []interface{}) {
 38915              m.evex(0b01, 0x86, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38916              m.emit(0xe6)
 38917              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 38918          })
 38919      }
 38920      // VCVTQQ2PD {er}, zmm, zmm{k}{z}
 38921      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 38922          self.require(ISA_AVX512DQ)
 38923          p.domain = DomainAVX
 38924          p.add(0, func(m *_Encoding, v []interface{}) {
 38925              m.emit(0x62)
 38926              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 38927              m.emit(0xfe)
 38928              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 38929              m.emit(0xe6)
 38930              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 38931          })
 38932      }
 38933      // VCVTQQ2PD zmm, zmm{k}{z}
 38934      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 38935          self.require(ISA_AVX512DQ)
 38936          p.domain = DomainAVX
 38937          p.add(0, func(m *_Encoding, v []interface{}) {
 38938              m.emit(0x62)
 38939              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38940              m.emit(0xfe)
 38941              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 38942              m.emit(0xe6)
 38943              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38944          })
 38945      }
 38946      // VCVTQQ2PD m128/m64bcst, xmm{k}{z}
 38947      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 38948          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38949          p.domain = DomainAVX
 38950          p.add(0, func(m *_Encoding, v []interface{}) {
 38951              m.evex(0b01, 0x86, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38952              m.emit(0xe6)
 38953              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 38954          })
 38955      }
 38956      // VCVTQQ2PD m256/m64bcst, ymm{k}{z}
 38957      if len(vv) == 0 && isM256M64bcst(v0) && isYMMkz(v1) {
 38958          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38959          p.domain = DomainAVX
 38960          p.add(0, func(m *_Encoding, v []interface{}) {
 38961              m.evex(0b01, 0x86, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 38962              m.emit(0xe6)
 38963              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 38964          })
 38965      }
 38966      // VCVTQQ2PD xmm, xmm{k}{z}
 38967      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 38968          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38969          p.domain = DomainAVX
 38970          p.add(0, func(m *_Encoding, v []interface{}) {
 38971              m.emit(0x62)
 38972              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38973              m.emit(0xfe)
 38974              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 38975              m.emit(0xe6)
 38976              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38977          })
 38978      }
 38979      // VCVTQQ2PD ymm, ymm{k}{z}
 38980      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 38981          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 38982          p.domain = DomainAVX
 38983          p.add(0, func(m *_Encoding, v []interface{}) {
 38984              m.emit(0x62)
 38985              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 38986              m.emit(0xfe)
 38987              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 38988              m.emit(0xe6)
 38989              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 38990          })
 38991      }
 38992      if p.len == 0 {
 38993          panic("invalid operands for VCVTQQ2PD")
 38994      }
 38995      return p
 38996  }
 38997  
 38998  // VCVTQQ2PS performs "Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values".
 38999  //
 39000  // Mnemonic        : VCVTQQ2PS
 39001  // Supported forms : (7 forms)
 39002  //
 39003  //    * VCVTQQ2PS m512/m64bcst, ymm{k}{z}    [AVX512DQ]
 39004  //    * VCVTQQ2PS {er}, zmm, ymm{k}{z}       [AVX512DQ]
 39005  //    * VCVTQQ2PS zmm, ymm{k}{z}             [AVX512DQ]
 39006  //    * VCVTQQ2PS m128/m64bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 39007  //    * VCVTQQ2PS m256/m64bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 39008  //    * VCVTQQ2PS xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 39009  //    * VCVTQQ2PS ymm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 39010  //
 39011  func (self *Program) VCVTQQ2PS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 39012      var p *Instruction
 39013      switch len(vv) {
 39014          case 0  : p = self.alloc("VCVTQQ2PS", 2, Operands { v0, v1 })
 39015          case 1  : p = self.alloc("VCVTQQ2PS", 3, Operands { v0, v1, vv[0] })
 39016          default : panic("instruction VCVTQQ2PS takes 2 or 3 operands")
 39017      }
 39018      // VCVTQQ2PS m512/m64bcst, ymm{k}{z}
 39019      if len(vv) == 0 && isM512M64bcst(v0) && isYMMkz(v1) {
 39020          self.require(ISA_AVX512DQ)
 39021          p.domain = DomainAVX
 39022          p.add(0, func(m *_Encoding, v []interface{}) {
 39023              m.evex(0b01, 0x84, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 39024              m.emit(0x5b)
 39025              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 39026          })
 39027      }
 39028      // VCVTQQ2PS {er}, zmm, ymm{k}{z}
 39029      if len(vv) == 1 && isER(v0) && isZMM(v1) && isYMMkz(vv[0]) {
 39030          self.require(ISA_AVX512DQ)
 39031          p.domain = DomainAVX
 39032          p.add(0, func(m *_Encoding, v []interface{}) {
 39033              m.emit(0x62)
 39034              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 39035              m.emit(0xfc)
 39036              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 39037              m.emit(0x5b)
 39038              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 39039          })
 39040      }
 39041      // VCVTQQ2PS zmm, ymm{k}{z}
 39042      if len(vv) == 0 && isZMM(v0) && isYMMkz(v1) {
 39043          self.require(ISA_AVX512DQ)
 39044          p.domain = DomainAVX
 39045          p.add(0, func(m *_Encoding, v []interface{}) {
 39046              m.emit(0x62)
 39047              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 39048              m.emit(0xfc)
 39049              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 39050              m.emit(0x5b)
 39051              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39052          })
 39053      }
 39054      // VCVTQQ2PS m128/m64bcst, xmm{k}{z}
 39055      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 39056          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 39057          p.domain = DomainAVX
 39058          p.add(0, func(m *_Encoding, v []interface{}) {
 39059              m.evex(0b01, 0x84, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 39060              m.emit(0x5b)
 39061              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 39062          })
 39063      }
 39064      // VCVTQQ2PS m256/m64bcst, xmm{k}{z}
 39065      if len(vv) == 0 && isM256M64bcst(v0) && isXMMkz(v1) {
 39066          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 39067          p.domain = DomainAVX
 39068          p.add(0, func(m *_Encoding, v []interface{}) {
 39069              m.evex(0b01, 0x84, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 39070              m.emit(0x5b)
 39071              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 39072          })
 39073      }
 39074      // VCVTQQ2PS xmm, xmm{k}{z}
 39075      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 39076          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 39077          p.domain = DomainAVX
 39078          p.add(0, func(m *_Encoding, v []interface{}) {
 39079              m.emit(0x62)
 39080              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 39081              m.emit(0xfc)
 39082              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 39083              m.emit(0x5b)
 39084              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39085          })
 39086      }
 39087      // VCVTQQ2PS ymm, xmm{k}{z}
 39088      if len(vv) == 0 && isEVEXYMM(v0) && isXMMkz(v1) {
 39089          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 39090          p.domain = DomainAVX
 39091          p.add(0, func(m *_Encoding, v []interface{}) {
 39092              m.emit(0x62)
 39093              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 39094              m.emit(0xfc)
 39095              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 39096              m.emit(0x5b)
 39097              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39098          })
 39099      }
 39100      if p.len == 0 {
 39101          panic("invalid operands for VCVTQQ2PS")
 39102      }
 39103      return p
 39104  }
 39105  
 39106  // VCVTSD2SI performs "Convert Scalar Double-Precision FP Value to Integer".
 39107  //
 39108  // Mnemonic        : VCVTSD2SI
 39109  // Supported forms : (10 forms)
 39110  //
 39111  //    * VCVTSD2SI xmm, r32          [AVX]
 39112  //    * VCVTSD2SI m64, r32          [AVX]
 39113  //    * VCVTSD2SI xmm, r64          [AVX]
 39114  //    * VCVTSD2SI m64, r64          [AVX]
 39115  //    * VCVTSD2SI m64, r32          [AVX512F]
 39116  //    * VCVTSD2SI m64, r64          [AVX512F]
 39117  //    * VCVTSD2SI {er}, xmm, r32    [AVX512F]
 39118  //    * VCVTSD2SI {er}, xmm, r64    [AVX512F]
 39119  //    * VCVTSD2SI xmm, r32          [AVX512F]
 39120  //    * VCVTSD2SI xmm, r64          [AVX512F]
 39121  //
 39122  func (self *Program) VCVTSD2SI(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 39123      var p *Instruction
 39124      switch len(vv) {
 39125          case 0  : p = self.alloc("VCVTSD2SI", 2, Operands { v0, v1 })
 39126          case 1  : p = self.alloc("VCVTSD2SI", 3, Operands { v0, v1, vv[0] })
 39127          default : panic("instruction VCVTSD2SI takes 2 or 3 operands")
 39128      }
 39129      // VCVTSD2SI xmm, r32
 39130      if len(vv) == 0 && isXMM(v0) && isReg32(v1) {
 39131          self.require(ISA_AVX)
 39132          p.domain = DomainAVX
 39133          p.add(0, func(m *_Encoding, v []interface{}) {
 39134              m.vex2(3, hcode(v[1]), v[0], 0)
 39135              m.emit(0x2d)
 39136              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39137          })
 39138      }
 39139      // VCVTSD2SI m64, r32
 39140      if len(vv) == 0 && isM64(v0) && isReg32(v1) {
 39141          self.require(ISA_AVX)
 39142          p.domain = DomainAVX
 39143          p.add(0, func(m *_Encoding, v []interface{}) {
 39144              m.vex2(3, hcode(v[1]), addr(v[0]), 0)
 39145              m.emit(0x2d)
 39146              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 39147          })
 39148      }
 39149      // VCVTSD2SI xmm, r64
 39150      if len(vv) == 0 && isXMM(v0) && isReg64(v1) {
 39151          self.require(ISA_AVX)
 39152          p.domain = DomainAVX
 39153          p.add(0, func(m *_Encoding, v []interface{}) {
 39154              m.emit(0xc4)
 39155              m.emit(0xe1 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 39156              m.emit(0xfb)
 39157              m.emit(0x2d)
 39158              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39159          })
 39160      }
 39161      // VCVTSD2SI m64, r64
 39162      if len(vv) == 0 && isM64(v0) && isReg64(v1) {
 39163          self.require(ISA_AVX)
 39164          p.domain = DomainAVX
 39165          p.add(0, func(m *_Encoding, v []interface{}) {
 39166              m.vex3(0xc4, 0b1, 0x83, hcode(v[1]), addr(v[0]), 0)
 39167              m.emit(0x2d)
 39168              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 39169          })
 39170      }
 39171      // VCVTSD2SI m64, r32
 39172      if len(vv) == 0 && isM64(v0) && isReg32(v1) {
 39173          self.require(ISA_AVX512F)
 39174          p.domain = DomainAVX
 39175          p.add(0, func(m *_Encoding, v []interface{}) {
 39176              m.evex(0b01, 0x07, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 39177              m.emit(0x2d)
 39178              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 39179          })
 39180      }
 39181      // VCVTSD2SI m64, r64
 39182      if len(vv) == 0 && isM64(v0) && isReg64(v1) {
 39183          self.require(ISA_AVX512F)
 39184          p.domain = DomainAVX
 39185          p.add(0, func(m *_Encoding, v []interface{}) {
 39186              m.evex(0b01, 0x87, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 39187              m.emit(0x2d)
 39188              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 39189          })
 39190      }
 39191      // VCVTSD2SI {er}, xmm, r32
 39192      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isReg32(vv[0]) {
 39193          self.require(ISA_AVX512F)
 39194          p.domain = DomainAVX
 39195          p.add(0, func(m *_Encoding, v []interface{}) {
 39196              m.emit(0x62)
 39197              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 39198              m.emit(0x7f)
 39199              m.emit((vcode(v[0]) << 5) | 0x18)
 39200              m.emit(0x2d)
 39201              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 39202          })
 39203      }
 39204      // VCVTSD2SI {er}, xmm, r64
 39205      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isReg64(vv[0]) {
 39206          self.require(ISA_AVX512F)
 39207          p.domain = DomainAVX
 39208          p.add(0, func(m *_Encoding, v []interface{}) {
 39209              m.emit(0x62)
 39210              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 39211              m.emit(0xff)
 39212              m.emit((vcode(v[0]) << 5) | 0x18)
 39213              m.emit(0x2d)
 39214              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 39215          })
 39216      }
 39217      // VCVTSD2SI xmm, r32
 39218      if len(vv) == 0 && isEVEXXMM(v0) && isReg32(v1) {
 39219          self.require(ISA_AVX512F)
 39220          p.domain = DomainAVX
 39221          p.add(0, func(m *_Encoding, v []interface{}) {
 39222              m.emit(0x62)
 39223              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 39224              m.emit(0x7f)
 39225              m.emit(0x48)
 39226              m.emit(0x2d)
 39227              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39228          })
 39229      }
 39230      // VCVTSD2SI xmm, r64
 39231      if len(vv) == 0 && isEVEXXMM(v0) && isReg64(v1) {
 39232          self.require(ISA_AVX512F)
 39233          p.domain = DomainAVX
 39234          p.add(0, func(m *_Encoding, v []interface{}) {
 39235              m.emit(0x62)
 39236              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 39237              m.emit(0xff)
 39238              m.emit(0x48)
 39239              m.emit(0x2d)
 39240              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39241          })
 39242      }
 39243      if p.len == 0 {
 39244          panic("invalid operands for VCVTSD2SI")
 39245      }
 39246      return p
 39247  }
 39248  
 39249  // VCVTSD2SS performs "Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value".
 39250  //
 39251  // Mnemonic        : VCVTSD2SS
 39252  // Supported forms : (5 forms)
 39253  //
 39254  //    * VCVTSD2SS xmm, xmm, xmm                [AVX]
 39255  //    * VCVTSD2SS m64, xmm, xmm                [AVX]
 39256  //    * VCVTSD2SS m64, xmm, xmm{k}{z}          [AVX512F]
 39257  //    * VCVTSD2SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 39258  //    * VCVTSD2SS xmm, xmm, xmm{k}{z}          [AVX512F]
 39259  //
 39260  func (self *Program) VCVTSD2SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 39261      var p *Instruction
 39262      switch len(vv) {
 39263          case 0  : p = self.alloc("VCVTSD2SS", 3, Operands { v0, v1, v2 })
 39264          case 1  : p = self.alloc("VCVTSD2SS", 4, Operands { v0, v1, v2, vv[0] })
 39265          default : panic("instruction VCVTSD2SS takes 3 or 4 operands")
 39266      }
 39267      // VCVTSD2SS xmm, xmm, xmm
 39268      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 39269          self.require(ISA_AVX)
 39270          p.domain = DomainAVX
 39271          p.add(0, func(m *_Encoding, v []interface{}) {
 39272              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 39273              m.emit(0x5a)
 39274              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39275          })
 39276      }
 39277      // VCVTSD2SS m64, xmm, xmm
 39278      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 39279          self.require(ISA_AVX)
 39280          p.domain = DomainAVX
 39281          p.add(0, func(m *_Encoding, v []interface{}) {
 39282              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 39283              m.emit(0x5a)
 39284              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 39285          })
 39286      }
 39287      // VCVTSD2SS m64, xmm, xmm{k}{z}
 39288      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 39289          self.require(ISA_AVX512F)
 39290          p.domain = DomainAVX
 39291          p.add(0, func(m *_Encoding, v []interface{}) {
 39292              m.evex(0b01, 0x87, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 39293              m.emit(0x5a)
 39294              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 39295          })
 39296      }
 39297      // VCVTSD2SS {er}, xmm, xmm, xmm{k}{z}
 39298      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 39299          self.require(ISA_AVX512F)
 39300          p.domain = DomainAVX
 39301          p.add(0, func(m *_Encoding, v []interface{}) {
 39302              m.emit(0x62)
 39303              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 39304              m.emit(0xff ^ (hlcode(v[2]) << 3))
 39305              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 39306              m.emit(0x5a)
 39307              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 39308          })
 39309      }
 39310      // VCVTSD2SS xmm, xmm, xmm{k}{z}
 39311      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 39312          self.require(ISA_AVX512F)
 39313          p.domain = DomainAVX
 39314          p.add(0, func(m *_Encoding, v []interface{}) {
 39315              m.emit(0x62)
 39316              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 39317              m.emit(0xff ^ (hlcode(v[1]) << 3))
 39318              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 39319              m.emit(0x5a)
 39320              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39321          })
 39322      }
 39323      if p.len == 0 {
 39324          panic("invalid operands for VCVTSD2SS")
 39325      }
 39326      return p
 39327  }
 39328  
 39329  // VCVTSD2USI performs "Convert Scalar Double-Precision Floating-Point Value to Unsigned Doubleword Integer".
 39330  //
 39331  // Mnemonic        : VCVTSD2USI
 39332  // Supported forms : (6 forms)
 39333  //
 39334  //    * VCVTSD2USI m64, r32          [AVX512F]
 39335  //    * VCVTSD2USI m64, r64          [AVX512F]
 39336  //    * VCVTSD2USI {er}, xmm, r32    [AVX512F]
 39337  //    * VCVTSD2USI {er}, xmm, r64    [AVX512F]
 39338  //    * VCVTSD2USI xmm, r32          [AVX512F]
 39339  //    * VCVTSD2USI xmm, r64          [AVX512F]
 39340  //
 39341  func (self *Program) VCVTSD2USI(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 39342      var p *Instruction
 39343      switch len(vv) {
 39344          case 0  : p = self.alloc("VCVTSD2USI", 2, Operands { v0, v1 })
 39345          case 1  : p = self.alloc("VCVTSD2USI", 3, Operands { v0, v1, vv[0] })
 39346          default : panic("instruction VCVTSD2USI takes 2 or 3 operands")
 39347      }
 39348      // VCVTSD2USI m64, r32
 39349      if len(vv) == 0 && isM64(v0) && isReg32(v1) {
 39350          self.require(ISA_AVX512F)
 39351          p.domain = DomainAVX
 39352          p.add(0, func(m *_Encoding, v []interface{}) {
 39353              m.evex(0b01, 0x07, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 39354              m.emit(0x79)
 39355              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 39356          })
 39357      }
 39358      // VCVTSD2USI m64, r64
 39359      if len(vv) == 0 && isM64(v0) && isReg64(v1) {
 39360          self.require(ISA_AVX512F)
 39361          p.domain = DomainAVX
 39362          p.add(0, func(m *_Encoding, v []interface{}) {
 39363              m.evex(0b01, 0x87, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 39364              m.emit(0x79)
 39365              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 39366          })
 39367      }
 39368      // VCVTSD2USI {er}, xmm, r32
 39369      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isReg32(vv[0]) {
 39370          self.require(ISA_AVX512F)
 39371          p.domain = DomainAVX
 39372          p.add(0, func(m *_Encoding, v []interface{}) {
 39373              m.emit(0x62)
 39374              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 39375              m.emit(0x7f)
 39376              m.emit((vcode(v[0]) << 5) | 0x18)
 39377              m.emit(0x79)
 39378              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 39379          })
 39380      }
 39381      // VCVTSD2USI {er}, xmm, r64
 39382      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isReg64(vv[0]) {
 39383          self.require(ISA_AVX512F)
 39384          p.domain = DomainAVX
 39385          p.add(0, func(m *_Encoding, v []interface{}) {
 39386              m.emit(0x62)
 39387              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 39388              m.emit(0xff)
 39389              m.emit((vcode(v[0]) << 5) | 0x18)
 39390              m.emit(0x79)
 39391              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 39392          })
 39393      }
 39394      // VCVTSD2USI xmm, r32
 39395      if len(vv) == 0 && isEVEXXMM(v0) && isReg32(v1) {
 39396          self.require(ISA_AVX512F)
 39397          p.domain = DomainAVX
 39398          p.add(0, func(m *_Encoding, v []interface{}) {
 39399              m.emit(0x62)
 39400              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 39401              m.emit(0x7f)
 39402              m.emit(0x48)
 39403              m.emit(0x79)
 39404              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39405          })
 39406      }
 39407      // VCVTSD2USI xmm, r64
 39408      if len(vv) == 0 && isEVEXXMM(v0) && isReg64(v1) {
 39409          self.require(ISA_AVX512F)
 39410          p.domain = DomainAVX
 39411          p.add(0, func(m *_Encoding, v []interface{}) {
 39412              m.emit(0x62)
 39413              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 39414              m.emit(0xff)
 39415              m.emit(0x48)
 39416              m.emit(0x79)
 39417              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39418          })
 39419      }
 39420      if p.len == 0 {
 39421          panic("invalid operands for VCVTSD2USI")
 39422      }
 39423      return p
 39424  }
 39425  
 39426  // VCVTSI2SD performs "Convert Dword Integer to Scalar Double-Precision FP Value".
 39427  //
 39428  // Mnemonic        : VCVTSI2SD
 39429  // Supported forms : (9 forms)
 39430  //
 39431  //    * VCVTSI2SD r32, xmm, xmm          [AVX]
 39432  //    * VCVTSI2SD r64, xmm, xmm          [AVX]
 39433  //    * VCVTSI2SD m32, xmm, xmm          [AVX]
 39434  //    * VCVTSI2SD m64, xmm, xmm          [AVX]
 39435  //    * VCVTSI2SD r32, xmm, xmm          [AVX512F]
 39436  //    * VCVTSI2SD m32, xmm, xmm          [AVX512F]
 39437  //    * VCVTSI2SD m64, xmm, xmm          [AVX512F]
 39438  //    * VCVTSI2SD {er}, r64, xmm, xmm    [AVX512F]
 39439  //    * VCVTSI2SD r64, xmm, xmm          [AVX512F]
 39440  //
 39441  func (self *Program) VCVTSI2SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 39442      var p *Instruction
 39443      switch len(vv) {
 39444          case 0  : p = self.alloc("VCVTSI2SD", 3, Operands { v0, v1, v2 })
 39445          case 1  : p = self.alloc("VCVTSI2SD", 4, Operands { v0, v1, v2, vv[0] })
 39446          default : panic("instruction VCVTSI2SD takes 3 or 4 operands")
 39447      }
 39448      // VCVTSI2SD r32, xmm, xmm
 39449      if len(vv) == 0 && isReg32(v0) && isXMM(v1) && isXMM(v2) {
 39450          self.require(ISA_AVX)
 39451          p.domain = DomainAVX
 39452          p.add(0, func(m *_Encoding, v []interface{}) {
 39453              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 39454              m.emit(0x2a)
 39455              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39456          })
 39457      }
 39458      // VCVTSI2SD r64, xmm, xmm
 39459      if len(vv) == 0 && isReg64(v0) && isXMM(v1) && isXMM(v2) {
 39460          self.require(ISA_AVX)
 39461          p.domain = DomainAVX
 39462          p.add(0, func(m *_Encoding, v []interface{}) {
 39463              m.emit(0xc4)
 39464              m.emit(0xe1 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 39465              m.emit(0xfb ^ (hlcode(v[1]) << 3))
 39466              m.emit(0x2a)
 39467              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39468          })
 39469      }
 39470      // VCVTSI2SD m32, xmm, xmm
 39471      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 39472          self.require(ISA_AVX)
 39473          p.domain = DomainAVX
 39474          p.add(0, func(m *_Encoding, v []interface{}) {
 39475              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 39476              m.emit(0x2a)
 39477              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 39478          })
 39479      }
 39480      // VCVTSI2SD m64, xmm, xmm
 39481      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 39482          self.require(ISA_AVX)
 39483          p.domain = DomainAVX
 39484          p.add(0, func(m *_Encoding, v []interface{}) {
 39485              m.vex3(0xc4, 0b1, 0x83, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 39486              m.emit(0x2a)
 39487              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 39488          })
 39489      }
 39490      // VCVTSI2SD r32, xmm, xmm
 39491      if len(vv) == 0 && isReg32(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 39492          self.require(ISA_AVX512F)
 39493          p.domain = DomainAVX
 39494          p.add(0, func(m *_Encoding, v []interface{}) {
 39495              m.emit(0x62)
 39496              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 39497              m.emit(0x7f ^ (hlcode(v[1]) << 3))
 39498              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x00)
 39499              m.emit(0x2a)
 39500              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39501          })
 39502      }
 39503      // VCVTSI2SD m32, xmm, xmm
 39504      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 39505          self.require(ISA_AVX512F)
 39506          p.domain = DomainAVX
 39507          p.add(0, func(m *_Encoding, v []interface{}) {
 39508              m.evex(0b01, 0x07, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 39509              m.emit(0x2a)
 39510              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 39511          })
 39512      }
 39513      // VCVTSI2SD m64, xmm, xmm
 39514      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 39515          self.require(ISA_AVX512F)
 39516          p.domain = DomainAVX
 39517          p.add(0, func(m *_Encoding, v []interface{}) {
 39518              m.evex(0b01, 0x87, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 39519              m.emit(0x2a)
 39520              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 39521          })
 39522      }
 39523      // VCVTSI2SD {er}, r64, xmm, xmm
 39524      if len(vv) == 1 && isER(v0) && isReg64(v1) && isEVEXXMM(v2) && isEVEXXMM(vv[0]) {
 39525          self.require(ISA_AVX512F)
 39526          p.domain = DomainAVX
 39527          p.add(0, func(m *_Encoding, v []interface{}) {
 39528              m.emit(0x62)
 39529              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 39530              m.emit(0xff ^ (hlcode(v[2]) << 3))
 39531              m.emit((vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | 0x10)
 39532              m.emit(0x2a)
 39533              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 39534          })
 39535      }
 39536      // VCVTSI2SD r64, xmm, xmm
 39537      if len(vv) == 0 && isReg64(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 39538          self.require(ISA_AVX512F)
 39539          p.domain = DomainAVX
 39540          p.add(0, func(m *_Encoding, v []interface{}) {
 39541              m.emit(0x62)
 39542              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 39543              m.emit(0xff ^ (hlcode(v[1]) << 3))
 39544              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x40)
 39545              m.emit(0x2a)
 39546              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39547          })
 39548      }
 39549      if p.len == 0 {
 39550          panic("invalid operands for VCVTSI2SD")
 39551      }
 39552      return p
 39553  }
 39554  
 39555  // VCVTSI2SS performs "Convert Dword Integer to Scalar Single-Precision FP Value".
 39556  //
 39557  // Mnemonic        : VCVTSI2SS
 39558  // Supported forms : (10 forms)
 39559  //
 39560  //    * VCVTSI2SS r32, xmm, xmm          [AVX]
 39561  //    * VCVTSI2SS r64, xmm, xmm          [AVX]
 39562  //    * VCVTSI2SS m32, xmm, xmm          [AVX]
 39563  //    * VCVTSI2SS m64, xmm, xmm          [AVX]
 39564  //    * VCVTSI2SS m32, xmm, xmm          [AVX512F]
 39565  //    * VCVTSI2SS m64, xmm, xmm          [AVX512F]
 39566  //    * VCVTSI2SS {er}, r32, xmm, xmm    [AVX512F]
 39567  //    * VCVTSI2SS {er}, r64, xmm, xmm    [AVX512F]
 39568  //    * VCVTSI2SS r32, xmm, xmm          [AVX512F]
 39569  //    * VCVTSI2SS r64, xmm, xmm          [AVX512F]
 39570  //
 39571  func (self *Program) VCVTSI2SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 39572      var p *Instruction
 39573      switch len(vv) {
 39574          case 0  : p = self.alloc("VCVTSI2SS", 3, Operands { v0, v1, v2 })
 39575          case 1  : p = self.alloc("VCVTSI2SS", 4, Operands { v0, v1, v2, vv[0] })
 39576          default : panic("instruction VCVTSI2SS takes 3 or 4 operands")
 39577      }
 39578      // VCVTSI2SS r32, xmm, xmm
 39579      if len(vv) == 0 && isReg32(v0) && isXMM(v1) && isXMM(v2) {
 39580          self.require(ISA_AVX)
 39581          p.domain = DomainAVX
 39582          p.add(0, func(m *_Encoding, v []interface{}) {
 39583              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 39584              m.emit(0x2a)
 39585              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39586          })
 39587      }
 39588      // VCVTSI2SS r64, xmm, xmm
 39589      if len(vv) == 0 && isReg64(v0) && isXMM(v1) && isXMM(v2) {
 39590          self.require(ISA_AVX)
 39591          p.domain = DomainAVX
 39592          p.add(0, func(m *_Encoding, v []interface{}) {
 39593              m.emit(0xc4)
 39594              m.emit(0xe1 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 39595              m.emit(0xfa ^ (hlcode(v[1]) << 3))
 39596              m.emit(0x2a)
 39597              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39598          })
 39599      }
 39600      // VCVTSI2SS m32, xmm, xmm
 39601      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 39602          self.require(ISA_AVX)
 39603          p.domain = DomainAVX
 39604          p.add(0, func(m *_Encoding, v []interface{}) {
 39605              m.vex2(2, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 39606              m.emit(0x2a)
 39607              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 39608          })
 39609      }
 39610      // VCVTSI2SS m64, xmm, xmm
 39611      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 39612          self.require(ISA_AVX)
 39613          p.domain = DomainAVX
 39614          p.add(0, func(m *_Encoding, v []interface{}) {
 39615              m.vex3(0xc4, 0b1, 0x82, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 39616              m.emit(0x2a)
 39617              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 39618          })
 39619      }
 39620      // VCVTSI2SS m32, xmm, xmm
 39621      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 39622          self.require(ISA_AVX512F)
 39623          p.domain = DomainAVX
 39624          p.add(0, func(m *_Encoding, v []interface{}) {
 39625              m.evex(0b01, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 39626              m.emit(0x2a)
 39627              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 39628          })
 39629      }
 39630      // VCVTSI2SS m64, xmm, xmm
 39631      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 39632          self.require(ISA_AVX512F)
 39633          p.domain = DomainAVX
 39634          p.add(0, func(m *_Encoding, v []interface{}) {
 39635              m.evex(0b01, 0x86, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 39636              m.emit(0x2a)
 39637              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 39638          })
 39639      }
 39640      // VCVTSI2SS {er}, r32, xmm, xmm
 39641      if len(vv) == 1 && isER(v0) && isReg32(v1) && isEVEXXMM(v2) && isEVEXXMM(vv[0]) {
 39642          self.require(ISA_AVX512F)
 39643          p.domain = DomainAVX
 39644          p.add(0, func(m *_Encoding, v []interface{}) {
 39645              m.emit(0x62)
 39646              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 39647              m.emit(0x7e ^ (hlcode(v[2]) << 3))
 39648              m.emit((vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | 0x10)
 39649              m.emit(0x2a)
 39650              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 39651          })
 39652      }
 39653      // VCVTSI2SS {er}, r64, xmm, xmm
 39654      if len(vv) == 1 && isER(v0) && isReg64(v1) && isEVEXXMM(v2) && isEVEXXMM(vv[0]) {
 39655          self.require(ISA_AVX512F)
 39656          p.domain = DomainAVX
 39657          p.add(0, func(m *_Encoding, v []interface{}) {
 39658              m.emit(0x62)
 39659              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 39660              m.emit(0xfe ^ (hlcode(v[2]) << 3))
 39661              m.emit((vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | 0x10)
 39662              m.emit(0x2a)
 39663              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 39664          })
 39665      }
 39666      // VCVTSI2SS r32, xmm, xmm
 39667      if len(vv) == 0 && isReg32(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 39668          self.require(ISA_AVX512F)
 39669          p.domain = DomainAVX
 39670          p.add(0, func(m *_Encoding, v []interface{}) {
 39671              m.emit(0x62)
 39672              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 39673              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 39674              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x40)
 39675              m.emit(0x2a)
 39676              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39677          })
 39678      }
 39679      // VCVTSI2SS r64, xmm, xmm
 39680      if len(vv) == 0 && isReg64(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 39681          self.require(ISA_AVX512F)
 39682          p.domain = DomainAVX
 39683          p.add(0, func(m *_Encoding, v []interface{}) {
 39684              m.emit(0x62)
 39685              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 39686              m.emit(0xfe ^ (hlcode(v[1]) << 3))
 39687              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x40)
 39688              m.emit(0x2a)
 39689              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39690          })
 39691      }
 39692      if p.len == 0 {
 39693          panic("invalid operands for VCVTSI2SS")
 39694      }
 39695      return p
 39696  }
 39697  
 39698  // VCVTSS2SD performs "Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value".
 39699  //
 39700  // Mnemonic        : VCVTSS2SD
 39701  // Supported forms : (5 forms)
 39702  //
 39703  //    * VCVTSS2SD xmm, xmm, xmm                 [AVX]
 39704  //    * VCVTSS2SD m32, xmm, xmm                 [AVX]
 39705  //    * VCVTSS2SD m32, xmm, xmm{k}{z}           [AVX512F]
 39706  //    * VCVTSS2SD {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 39707  //    * VCVTSS2SD xmm, xmm, xmm{k}{z}           [AVX512F]
 39708  //
 39709  func (self *Program) VCVTSS2SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 39710      var p *Instruction
 39711      switch len(vv) {
 39712          case 0  : p = self.alloc("VCVTSS2SD", 3, Operands { v0, v1, v2 })
 39713          case 1  : p = self.alloc("VCVTSS2SD", 4, Operands { v0, v1, v2, vv[0] })
 39714          default : panic("instruction VCVTSS2SD takes 3 or 4 operands")
 39715      }
 39716      // VCVTSS2SD xmm, xmm, xmm
 39717      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 39718          self.require(ISA_AVX)
 39719          p.domain = DomainAVX
 39720          p.add(0, func(m *_Encoding, v []interface{}) {
 39721              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 39722              m.emit(0x5a)
 39723              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39724          })
 39725      }
 39726      // VCVTSS2SD m32, xmm, xmm
 39727      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 39728          self.require(ISA_AVX)
 39729          p.domain = DomainAVX
 39730          p.add(0, func(m *_Encoding, v []interface{}) {
 39731              m.vex2(2, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 39732              m.emit(0x5a)
 39733              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 39734          })
 39735      }
 39736      // VCVTSS2SD m32, xmm, xmm{k}{z}
 39737      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 39738          self.require(ISA_AVX512F)
 39739          p.domain = DomainAVX
 39740          p.add(0, func(m *_Encoding, v []interface{}) {
 39741              m.evex(0b01, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 39742              m.emit(0x5a)
 39743              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 39744          })
 39745      }
 39746      // VCVTSS2SD {sae}, xmm, xmm, xmm{k}{z}
 39747      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 39748          self.require(ISA_AVX512F)
 39749          p.domain = DomainAVX
 39750          p.add(0, func(m *_Encoding, v []interface{}) {
 39751              m.emit(0x62)
 39752              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 39753              m.emit(0x7e ^ (hlcode(v[2]) << 3))
 39754              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 39755              m.emit(0x5a)
 39756              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 39757          })
 39758      }
 39759      // VCVTSS2SD xmm, xmm, xmm{k}{z}
 39760      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 39761          self.require(ISA_AVX512F)
 39762          p.domain = DomainAVX
 39763          p.add(0, func(m *_Encoding, v []interface{}) {
 39764              m.emit(0x62)
 39765              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 39766              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 39767              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 39768              m.emit(0x5a)
 39769              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 39770          })
 39771      }
 39772      if p.len == 0 {
 39773          panic("invalid operands for VCVTSS2SD")
 39774      }
 39775      return p
 39776  }
 39777  
 39778  // VCVTSS2SI performs "Convert Scalar Single-Precision FP Value to Dword Integer".
 39779  //
 39780  // Mnemonic        : VCVTSS2SI
 39781  // Supported forms : (10 forms)
 39782  //
 39783  //    * VCVTSS2SI xmm, r32          [AVX]
 39784  //    * VCVTSS2SI m32, r32          [AVX]
 39785  //    * VCVTSS2SI xmm, r64          [AVX]
 39786  //    * VCVTSS2SI m32, r64          [AVX]
 39787  //    * VCVTSS2SI m32, r32          [AVX512F]
 39788  //    * VCVTSS2SI m32, r64          [AVX512F]
 39789  //    * VCVTSS2SI {er}, xmm, r32    [AVX512F]
 39790  //    * VCVTSS2SI {er}, xmm, r64    [AVX512F]
 39791  //    * VCVTSS2SI xmm, r32          [AVX512F]
 39792  //    * VCVTSS2SI xmm, r64          [AVX512F]
 39793  //
 39794  func (self *Program) VCVTSS2SI(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 39795      var p *Instruction
 39796      switch len(vv) {
 39797          case 0  : p = self.alloc("VCVTSS2SI", 2, Operands { v0, v1 })
 39798          case 1  : p = self.alloc("VCVTSS2SI", 3, Operands { v0, v1, vv[0] })
 39799          default : panic("instruction VCVTSS2SI takes 2 or 3 operands")
 39800      }
 39801      // VCVTSS2SI xmm, r32
 39802      if len(vv) == 0 && isXMM(v0) && isReg32(v1) {
 39803          self.require(ISA_AVX)
 39804          p.domain = DomainAVX
 39805          p.add(0, func(m *_Encoding, v []interface{}) {
 39806              m.vex2(2, hcode(v[1]), v[0], 0)
 39807              m.emit(0x2d)
 39808              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39809          })
 39810      }
 39811      // VCVTSS2SI m32, r32
 39812      if len(vv) == 0 && isM32(v0) && isReg32(v1) {
 39813          self.require(ISA_AVX)
 39814          p.domain = DomainAVX
 39815          p.add(0, func(m *_Encoding, v []interface{}) {
 39816              m.vex2(2, hcode(v[1]), addr(v[0]), 0)
 39817              m.emit(0x2d)
 39818              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 39819          })
 39820      }
 39821      // VCVTSS2SI xmm, r64
 39822      if len(vv) == 0 && isXMM(v0) && isReg64(v1) {
 39823          self.require(ISA_AVX)
 39824          p.domain = DomainAVX
 39825          p.add(0, func(m *_Encoding, v []interface{}) {
 39826              m.emit(0xc4)
 39827              m.emit(0xe1 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 39828              m.emit(0xfa)
 39829              m.emit(0x2d)
 39830              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39831          })
 39832      }
 39833      // VCVTSS2SI m32, r64
 39834      if len(vv) == 0 && isM32(v0) && isReg64(v1) {
 39835          self.require(ISA_AVX)
 39836          p.domain = DomainAVX
 39837          p.add(0, func(m *_Encoding, v []interface{}) {
 39838              m.vex3(0xc4, 0b1, 0x82, hcode(v[1]), addr(v[0]), 0)
 39839              m.emit(0x2d)
 39840              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 39841          })
 39842      }
 39843      // VCVTSS2SI m32, r32
 39844      if len(vv) == 0 && isM32(v0) && isReg32(v1) {
 39845          self.require(ISA_AVX512F)
 39846          p.domain = DomainAVX
 39847          p.add(0, func(m *_Encoding, v []interface{}) {
 39848              m.evex(0b01, 0x06, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 39849              m.emit(0x2d)
 39850              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 39851          })
 39852      }
 39853      // VCVTSS2SI m32, r64
 39854      if len(vv) == 0 && isM32(v0) && isReg64(v1) {
 39855          self.require(ISA_AVX512F)
 39856          p.domain = DomainAVX
 39857          p.add(0, func(m *_Encoding, v []interface{}) {
 39858              m.evex(0b01, 0x86, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 39859              m.emit(0x2d)
 39860              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 39861          })
 39862      }
 39863      // VCVTSS2SI {er}, xmm, r32
 39864      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isReg32(vv[0]) {
 39865          self.require(ISA_AVX512F)
 39866          p.domain = DomainAVX
 39867          p.add(0, func(m *_Encoding, v []interface{}) {
 39868              m.emit(0x62)
 39869              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 39870              m.emit(0x7e)
 39871              m.emit((vcode(v[0]) << 5) | 0x18)
 39872              m.emit(0x2d)
 39873              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 39874          })
 39875      }
 39876      // VCVTSS2SI {er}, xmm, r64
 39877      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isReg64(vv[0]) {
 39878          self.require(ISA_AVX512F)
 39879          p.domain = DomainAVX
 39880          p.add(0, func(m *_Encoding, v []interface{}) {
 39881              m.emit(0x62)
 39882              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 39883              m.emit(0xfe)
 39884              m.emit((vcode(v[0]) << 5) | 0x18)
 39885              m.emit(0x2d)
 39886              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 39887          })
 39888      }
 39889      // VCVTSS2SI xmm, r32
 39890      if len(vv) == 0 && isEVEXXMM(v0) && isReg32(v1) {
 39891          self.require(ISA_AVX512F)
 39892          p.domain = DomainAVX
 39893          p.add(0, func(m *_Encoding, v []interface{}) {
 39894              m.emit(0x62)
 39895              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 39896              m.emit(0x7e)
 39897              m.emit(0x48)
 39898              m.emit(0x2d)
 39899              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39900          })
 39901      }
 39902      // VCVTSS2SI xmm, r64
 39903      if len(vv) == 0 && isEVEXXMM(v0) && isReg64(v1) {
 39904          self.require(ISA_AVX512F)
 39905          p.domain = DomainAVX
 39906          p.add(0, func(m *_Encoding, v []interface{}) {
 39907              m.emit(0x62)
 39908              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 39909              m.emit(0xfe)
 39910              m.emit(0x48)
 39911              m.emit(0x2d)
 39912              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39913          })
 39914      }
 39915      if p.len == 0 {
 39916          panic("invalid operands for VCVTSS2SI")
 39917      }
 39918      return p
 39919  }
 39920  
 39921  // VCVTSS2USI performs "Convert Scalar Single-Precision Floating-Point Value to Unsigned Doubleword Integer".
 39922  //
 39923  // Mnemonic        : VCVTSS2USI
 39924  // Supported forms : (6 forms)
 39925  //
 39926  //    * VCVTSS2USI m32, r32          [AVX512F]
 39927  //    * VCVTSS2USI m32, r64          [AVX512F]
 39928  //    * VCVTSS2USI {er}, xmm, r32    [AVX512F]
 39929  //    * VCVTSS2USI {er}, xmm, r64    [AVX512F]
 39930  //    * VCVTSS2USI xmm, r32          [AVX512F]
 39931  //    * VCVTSS2USI xmm, r64          [AVX512F]
 39932  //
 39933  func (self *Program) VCVTSS2USI(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 39934      var p *Instruction
 39935      switch len(vv) {
 39936          case 0  : p = self.alloc("VCVTSS2USI", 2, Operands { v0, v1 })
 39937          case 1  : p = self.alloc("VCVTSS2USI", 3, Operands { v0, v1, vv[0] })
 39938          default : panic("instruction VCVTSS2USI takes 2 or 3 operands")
 39939      }
 39940      // VCVTSS2USI m32, r32
 39941      if len(vv) == 0 && isM32(v0) && isReg32(v1) {
 39942          self.require(ISA_AVX512F)
 39943          p.domain = DomainAVX
 39944          p.add(0, func(m *_Encoding, v []interface{}) {
 39945              m.evex(0b01, 0x06, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 39946              m.emit(0x79)
 39947              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 39948          })
 39949      }
 39950      // VCVTSS2USI m32, r64
 39951      if len(vv) == 0 && isM32(v0) && isReg64(v1) {
 39952          self.require(ISA_AVX512F)
 39953          p.domain = DomainAVX
 39954          p.add(0, func(m *_Encoding, v []interface{}) {
 39955              m.evex(0b01, 0x86, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 39956              m.emit(0x79)
 39957              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 39958          })
 39959      }
 39960      // VCVTSS2USI {er}, xmm, r32
 39961      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isReg32(vv[0]) {
 39962          self.require(ISA_AVX512F)
 39963          p.domain = DomainAVX
 39964          p.add(0, func(m *_Encoding, v []interface{}) {
 39965              m.emit(0x62)
 39966              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 39967              m.emit(0x7e)
 39968              m.emit((vcode(v[0]) << 5) | 0x18)
 39969              m.emit(0x79)
 39970              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 39971          })
 39972      }
 39973      // VCVTSS2USI {er}, xmm, r64
 39974      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isReg64(vv[0]) {
 39975          self.require(ISA_AVX512F)
 39976          p.domain = DomainAVX
 39977          p.add(0, func(m *_Encoding, v []interface{}) {
 39978              m.emit(0x62)
 39979              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 39980              m.emit(0xfe)
 39981              m.emit((vcode(v[0]) << 5) | 0x18)
 39982              m.emit(0x79)
 39983              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 39984          })
 39985      }
 39986      // VCVTSS2USI xmm, r32
 39987      if len(vv) == 0 && isEVEXXMM(v0) && isReg32(v1) {
 39988          self.require(ISA_AVX512F)
 39989          p.domain = DomainAVX
 39990          p.add(0, func(m *_Encoding, v []interface{}) {
 39991              m.emit(0x62)
 39992              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 39993              m.emit(0x7e)
 39994              m.emit(0x48)
 39995              m.emit(0x79)
 39996              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 39997          })
 39998      }
 39999      // VCVTSS2USI xmm, r64
 40000      if len(vv) == 0 && isEVEXXMM(v0) && isReg64(v1) {
 40001          self.require(ISA_AVX512F)
 40002          p.domain = DomainAVX
 40003          p.add(0, func(m *_Encoding, v []interface{}) {
 40004              m.emit(0x62)
 40005              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40006              m.emit(0xfe)
 40007              m.emit(0x48)
 40008              m.emit(0x79)
 40009              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40010          })
 40011      }
 40012      if p.len == 0 {
 40013          panic("invalid operands for VCVTSS2USI")
 40014      }
 40015      return p
 40016  }
 40017  
 40018  // VCVTTPD2DQ performs "Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers".
 40019  //
 40020  // Mnemonic        : VCVTTPD2DQ
 40021  // Supported forms : (11 forms)
 40022  //
 40023  //    * VCVTTPD2DQ xmm, xmm                   [AVX]
 40024  //    * VCVTTPD2DQ ymm, xmm                   [AVX]
 40025  //    * VCVTTPD2DQ m128, xmm                  [AVX]
 40026  //    * VCVTTPD2DQ m256, xmm                  [AVX]
 40027  //    * VCVTTPD2DQ m512/m64bcst, ymm{k}{z}    [AVX512F]
 40028  //    * VCVTTPD2DQ {sae}, zmm, ymm{k}{z}      [AVX512F]
 40029  //    * VCVTTPD2DQ zmm, ymm{k}{z}             [AVX512F]
 40030  //    * VCVTTPD2DQ m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 40031  //    * VCVTTPD2DQ m256/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 40032  //    * VCVTTPD2DQ xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 40033  //    * VCVTTPD2DQ ymm, xmm{k}{z}             [AVX512F,AVX512VL]
 40034  //
 40035  func (self *Program) VCVTTPD2DQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 40036      var p *Instruction
 40037      switch len(vv) {
 40038          case 0  : p = self.alloc("VCVTTPD2DQ", 2, Operands { v0, v1 })
 40039          case 1  : p = self.alloc("VCVTTPD2DQ", 3, Operands { v0, v1, vv[0] })
 40040          default : panic("instruction VCVTTPD2DQ takes 2 or 3 operands")
 40041      }
 40042      // VCVTTPD2DQ xmm, xmm
 40043      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 40044          self.require(ISA_AVX)
 40045          p.domain = DomainAVX
 40046          p.add(0, func(m *_Encoding, v []interface{}) {
 40047              m.vex2(1, hcode(v[1]), v[0], 0)
 40048              m.emit(0xe6)
 40049              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40050          })
 40051      }
 40052      // VCVTTPD2DQ ymm, xmm
 40053      if len(vv) == 0 && isYMM(v0) && isXMM(v1) {
 40054          self.require(ISA_AVX)
 40055          p.domain = DomainAVX
 40056          p.add(0, func(m *_Encoding, v []interface{}) {
 40057              m.vex2(5, hcode(v[1]), v[0], 0)
 40058              m.emit(0xe6)
 40059              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40060          })
 40061      }
 40062      // VCVTTPD2DQ m128, xmm
 40063      if len(vv) == 0 && isM128(v0) && isXMM(v1) {
 40064          self.require(ISA_AVX)
 40065          p.domain = DomainAVX
 40066          p.add(0, func(m *_Encoding, v []interface{}) {
 40067              m.vex2(1, hcode(v[1]), addr(v[0]), 0)
 40068              m.emit(0xe6)
 40069              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 40070          })
 40071      }
 40072      // VCVTTPD2DQ m256, xmm
 40073      if len(vv) == 0 && isM256(v0) && isXMM(v1) {
 40074          self.require(ISA_AVX)
 40075          p.domain = DomainAVX
 40076          p.add(0, func(m *_Encoding, v []interface{}) {
 40077              m.vex2(5, hcode(v[1]), addr(v[0]), 0)
 40078              m.emit(0xe6)
 40079              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 40080          })
 40081      }
 40082      // VCVTTPD2DQ m512/m64bcst, ymm{k}{z}
 40083      if len(vv) == 0 && isM512M64bcst(v0) && isYMMkz(v1) {
 40084          self.require(ISA_AVX512F)
 40085          p.domain = DomainAVX
 40086          p.add(0, func(m *_Encoding, v []interface{}) {
 40087              m.evex(0b01, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40088              m.emit(0xe6)
 40089              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 40090          })
 40091      }
 40092      // VCVTTPD2DQ {sae}, zmm, ymm{k}{z}
 40093      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isYMMkz(vv[0]) {
 40094          self.require(ISA_AVX512F)
 40095          p.domain = DomainAVX
 40096          p.add(0, func(m *_Encoding, v []interface{}) {
 40097              m.emit(0x62)
 40098              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 40099              m.emit(0xfd)
 40100              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 40101              m.emit(0xe6)
 40102              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 40103          })
 40104      }
 40105      // VCVTTPD2DQ zmm, ymm{k}{z}
 40106      if len(vv) == 0 && isZMM(v0) && isYMMkz(v1) {
 40107          self.require(ISA_AVX512F)
 40108          p.domain = DomainAVX
 40109          p.add(0, func(m *_Encoding, v []interface{}) {
 40110              m.emit(0x62)
 40111              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40112              m.emit(0xfd)
 40113              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 40114              m.emit(0xe6)
 40115              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40116          })
 40117      }
 40118      // VCVTTPD2DQ m128/m64bcst, xmm{k}{z}
 40119      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 40120          self.require(ISA_AVX512VL | ISA_AVX512F)
 40121          p.domain = DomainAVX
 40122          p.add(0, func(m *_Encoding, v []interface{}) {
 40123              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40124              m.emit(0xe6)
 40125              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 40126          })
 40127      }
 40128      // VCVTTPD2DQ m256/m64bcst, xmm{k}{z}
 40129      if len(vv) == 0 && isM256M64bcst(v0) && isXMMkz(v1) {
 40130          self.require(ISA_AVX512VL | ISA_AVX512F)
 40131          p.domain = DomainAVX
 40132          p.add(0, func(m *_Encoding, v []interface{}) {
 40133              m.evex(0b01, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40134              m.emit(0xe6)
 40135              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 40136          })
 40137      }
 40138      // VCVTTPD2DQ xmm, xmm{k}{z}
 40139      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 40140          self.require(ISA_AVX512VL | ISA_AVX512F)
 40141          p.domain = DomainAVX
 40142          p.add(0, func(m *_Encoding, v []interface{}) {
 40143              m.emit(0x62)
 40144              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40145              m.emit(0xfd)
 40146              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 40147              m.emit(0xe6)
 40148              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40149          })
 40150      }
 40151      // VCVTTPD2DQ ymm, xmm{k}{z}
 40152      if len(vv) == 0 && isEVEXYMM(v0) && isXMMkz(v1) {
 40153          self.require(ISA_AVX512VL | ISA_AVX512F)
 40154          p.domain = DomainAVX
 40155          p.add(0, func(m *_Encoding, v []interface{}) {
 40156              m.emit(0x62)
 40157              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40158              m.emit(0xfd)
 40159              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 40160              m.emit(0xe6)
 40161              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40162          })
 40163      }
 40164      if p.len == 0 {
 40165          panic("invalid operands for VCVTTPD2DQ")
 40166      }
 40167      return p
 40168  }
 40169  
 40170  // VCVTTPD2QQ performs "Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Quadword Integers".
 40171  //
 40172  // Mnemonic        : VCVTTPD2QQ
 40173  // Supported forms : (7 forms)
 40174  //
 40175  //    * VCVTTPD2QQ m512/m64bcst, zmm{k}{z}    [AVX512DQ]
 40176  //    * VCVTTPD2QQ {sae}, zmm, zmm{k}{z}      [AVX512DQ]
 40177  //    * VCVTTPD2QQ zmm, zmm{k}{z}             [AVX512DQ]
 40178  //    * VCVTTPD2QQ m128/m64bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 40179  //    * VCVTTPD2QQ m256/m64bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 40180  //    * VCVTTPD2QQ xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 40181  //    * VCVTTPD2QQ ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 40182  //
 40183  func (self *Program) VCVTTPD2QQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 40184      var p *Instruction
 40185      switch len(vv) {
 40186          case 0  : p = self.alloc("VCVTTPD2QQ", 2, Operands { v0, v1 })
 40187          case 1  : p = self.alloc("VCVTTPD2QQ", 3, Operands { v0, v1, vv[0] })
 40188          default : panic("instruction VCVTTPD2QQ takes 2 or 3 operands")
 40189      }
 40190      // VCVTTPD2QQ m512/m64bcst, zmm{k}{z}
 40191      if len(vv) == 0 && isM512M64bcst(v0) && isZMMkz(v1) {
 40192          self.require(ISA_AVX512DQ)
 40193          p.domain = DomainAVX
 40194          p.add(0, func(m *_Encoding, v []interface{}) {
 40195              m.evex(0b01, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40196              m.emit(0x7a)
 40197              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 40198          })
 40199      }
 40200      // VCVTTPD2QQ {sae}, zmm, zmm{k}{z}
 40201      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 40202          self.require(ISA_AVX512DQ)
 40203          p.domain = DomainAVX
 40204          p.add(0, func(m *_Encoding, v []interface{}) {
 40205              m.emit(0x62)
 40206              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 40207              m.emit(0xfd)
 40208              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 40209              m.emit(0x7a)
 40210              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 40211          })
 40212      }
 40213      // VCVTTPD2QQ zmm, zmm{k}{z}
 40214      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 40215          self.require(ISA_AVX512DQ)
 40216          p.domain = DomainAVX
 40217          p.add(0, func(m *_Encoding, v []interface{}) {
 40218              m.emit(0x62)
 40219              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40220              m.emit(0xfd)
 40221              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 40222              m.emit(0x7a)
 40223              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40224          })
 40225      }
 40226      // VCVTTPD2QQ m128/m64bcst, xmm{k}{z}
 40227      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 40228          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40229          p.domain = DomainAVX
 40230          p.add(0, func(m *_Encoding, v []interface{}) {
 40231              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40232              m.emit(0x7a)
 40233              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 40234          })
 40235      }
 40236      // VCVTTPD2QQ m256/m64bcst, ymm{k}{z}
 40237      if len(vv) == 0 && isM256M64bcst(v0) && isYMMkz(v1) {
 40238          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40239          p.domain = DomainAVX
 40240          p.add(0, func(m *_Encoding, v []interface{}) {
 40241              m.evex(0b01, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40242              m.emit(0x7a)
 40243              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 40244          })
 40245      }
 40246      // VCVTTPD2QQ xmm, xmm{k}{z}
 40247      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 40248          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40249          p.domain = DomainAVX
 40250          p.add(0, func(m *_Encoding, v []interface{}) {
 40251              m.emit(0x62)
 40252              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40253              m.emit(0xfd)
 40254              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 40255              m.emit(0x7a)
 40256              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40257          })
 40258      }
 40259      // VCVTTPD2QQ ymm, ymm{k}{z}
 40260      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 40261          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40262          p.domain = DomainAVX
 40263          p.add(0, func(m *_Encoding, v []interface{}) {
 40264              m.emit(0x62)
 40265              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40266              m.emit(0xfd)
 40267              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 40268              m.emit(0x7a)
 40269              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40270          })
 40271      }
 40272      if p.len == 0 {
 40273          panic("invalid operands for VCVTTPD2QQ")
 40274      }
 40275      return p
 40276  }
 40277  
 40278  // VCVTTPD2UDQ performs "Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers".
 40279  //
 40280  // Mnemonic        : VCVTTPD2UDQ
 40281  // Supported forms : (7 forms)
 40282  //
 40283  //    * VCVTTPD2UDQ m512/m64bcst, ymm{k}{z}    [AVX512F]
 40284  //    * VCVTTPD2UDQ {sae}, zmm, ymm{k}{z}      [AVX512F]
 40285  //    * VCVTTPD2UDQ zmm, ymm{k}{z}             [AVX512F]
 40286  //    * VCVTTPD2UDQ m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 40287  //    * VCVTTPD2UDQ m256/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 40288  //    * VCVTTPD2UDQ xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 40289  //    * VCVTTPD2UDQ ymm, xmm{k}{z}             [AVX512F,AVX512VL]
 40290  //
 40291  func (self *Program) VCVTTPD2UDQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 40292      var p *Instruction
 40293      switch len(vv) {
 40294          case 0  : p = self.alloc("VCVTTPD2UDQ", 2, Operands { v0, v1 })
 40295          case 1  : p = self.alloc("VCVTTPD2UDQ", 3, Operands { v0, v1, vv[0] })
 40296          default : panic("instruction VCVTTPD2UDQ takes 2 or 3 operands")
 40297      }
 40298      // VCVTTPD2UDQ m512/m64bcst, ymm{k}{z}
 40299      if len(vv) == 0 && isM512M64bcst(v0) && isYMMkz(v1) {
 40300          self.require(ISA_AVX512F)
 40301          p.domain = DomainAVX
 40302          p.add(0, func(m *_Encoding, v []interface{}) {
 40303              m.evex(0b01, 0x84, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40304              m.emit(0x78)
 40305              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 40306          })
 40307      }
 40308      // VCVTTPD2UDQ {sae}, zmm, ymm{k}{z}
 40309      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isYMMkz(vv[0]) {
 40310          self.require(ISA_AVX512F)
 40311          p.domain = DomainAVX
 40312          p.add(0, func(m *_Encoding, v []interface{}) {
 40313              m.emit(0x62)
 40314              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 40315              m.emit(0xfc)
 40316              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 40317              m.emit(0x78)
 40318              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 40319          })
 40320      }
 40321      // VCVTTPD2UDQ zmm, ymm{k}{z}
 40322      if len(vv) == 0 && isZMM(v0) && isYMMkz(v1) {
 40323          self.require(ISA_AVX512F)
 40324          p.domain = DomainAVX
 40325          p.add(0, func(m *_Encoding, v []interface{}) {
 40326              m.emit(0x62)
 40327              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40328              m.emit(0xfc)
 40329              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 40330              m.emit(0x78)
 40331              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40332          })
 40333      }
 40334      // VCVTTPD2UDQ m128/m64bcst, xmm{k}{z}
 40335      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 40336          self.require(ISA_AVX512VL | ISA_AVX512F)
 40337          p.domain = DomainAVX
 40338          p.add(0, func(m *_Encoding, v []interface{}) {
 40339              m.evex(0b01, 0x84, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40340              m.emit(0x78)
 40341              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 40342          })
 40343      }
 40344      // VCVTTPD2UDQ m256/m64bcst, xmm{k}{z}
 40345      if len(vv) == 0 && isM256M64bcst(v0) && isXMMkz(v1) {
 40346          self.require(ISA_AVX512VL | ISA_AVX512F)
 40347          p.domain = DomainAVX
 40348          p.add(0, func(m *_Encoding, v []interface{}) {
 40349              m.evex(0b01, 0x84, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40350              m.emit(0x78)
 40351              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 40352          })
 40353      }
 40354      // VCVTTPD2UDQ xmm, xmm{k}{z}
 40355      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 40356          self.require(ISA_AVX512VL | ISA_AVX512F)
 40357          p.domain = DomainAVX
 40358          p.add(0, func(m *_Encoding, v []interface{}) {
 40359              m.emit(0x62)
 40360              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40361              m.emit(0xfc)
 40362              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 40363              m.emit(0x78)
 40364              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40365          })
 40366      }
 40367      // VCVTTPD2UDQ ymm, xmm{k}{z}
 40368      if len(vv) == 0 && isEVEXYMM(v0) && isXMMkz(v1) {
 40369          self.require(ISA_AVX512VL | ISA_AVX512F)
 40370          p.domain = DomainAVX
 40371          p.add(0, func(m *_Encoding, v []interface{}) {
 40372              m.emit(0x62)
 40373              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40374              m.emit(0xfc)
 40375              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 40376              m.emit(0x78)
 40377              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40378          })
 40379      }
 40380      if p.len == 0 {
 40381          panic("invalid operands for VCVTTPD2UDQ")
 40382      }
 40383      return p
 40384  }
 40385  
 40386  // VCVTTPD2UQQ performs "Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers".
 40387  //
 40388  // Mnemonic        : VCVTTPD2UQQ
 40389  // Supported forms : (7 forms)
 40390  //
 40391  //    * VCVTTPD2UQQ m512/m64bcst, zmm{k}{z}    [AVX512DQ]
 40392  //    * VCVTTPD2UQQ {sae}, zmm, zmm{k}{z}      [AVX512DQ]
 40393  //    * VCVTTPD2UQQ zmm, zmm{k}{z}             [AVX512DQ]
 40394  //    * VCVTTPD2UQQ m128/m64bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 40395  //    * VCVTTPD2UQQ m256/m64bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 40396  //    * VCVTTPD2UQQ xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 40397  //    * VCVTTPD2UQQ ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 40398  //
 40399  func (self *Program) VCVTTPD2UQQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 40400      var p *Instruction
 40401      switch len(vv) {
 40402          case 0  : p = self.alloc("VCVTTPD2UQQ", 2, Operands { v0, v1 })
 40403          case 1  : p = self.alloc("VCVTTPD2UQQ", 3, Operands { v0, v1, vv[0] })
 40404          default : panic("instruction VCVTTPD2UQQ takes 2 or 3 operands")
 40405      }
 40406      // VCVTTPD2UQQ m512/m64bcst, zmm{k}{z}
 40407      if len(vv) == 0 && isM512M64bcst(v0) && isZMMkz(v1) {
 40408          self.require(ISA_AVX512DQ)
 40409          p.domain = DomainAVX
 40410          p.add(0, func(m *_Encoding, v []interface{}) {
 40411              m.evex(0b01, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40412              m.emit(0x78)
 40413              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 40414          })
 40415      }
 40416      // VCVTTPD2UQQ {sae}, zmm, zmm{k}{z}
 40417      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 40418          self.require(ISA_AVX512DQ)
 40419          p.domain = DomainAVX
 40420          p.add(0, func(m *_Encoding, v []interface{}) {
 40421              m.emit(0x62)
 40422              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 40423              m.emit(0xfd)
 40424              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 40425              m.emit(0x78)
 40426              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 40427          })
 40428      }
 40429      // VCVTTPD2UQQ zmm, zmm{k}{z}
 40430      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 40431          self.require(ISA_AVX512DQ)
 40432          p.domain = DomainAVX
 40433          p.add(0, func(m *_Encoding, v []interface{}) {
 40434              m.emit(0x62)
 40435              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40436              m.emit(0xfd)
 40437              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 40438              m.emit(0x78)
 40439              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40440          })
 40441      }
 40442      // VCVTTPD2UQQ m128/m64bcst, xmm{k}{z}
 40443      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 40444          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40445          p.domain = DomainAVX
 40446          p.add(0, func(m *_Encoding, v []interface{}) {
 40447              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40448              m.emit(0x78)
 40449              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 40450          })
 40451      }
 40452      // VCVTTPD2UQQ m256/m64bcst, ymm{k}{z}
 40453      if len(vv) == 0 && isM256M64bcst(v0) && isYMMkz(v1) {
 40454          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40455          p.domain = DomainAVX
 40456          p.add(0, func(m *_Encoding, v []interface{}) {
 40457              m.evex(0b01, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40458              m.emit(0x78)
 40459              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 40460          })
 40461      }
 40462      // VCVTTPD2UQQ xmm, xmm{k}{z}
 40463      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 40464          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40465          p.domain = DomainAVX
 40466          p.add(0, func(m *_Encoding, v []interface{}) {
 40467              m.emit(0x62)
 40468              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40469              m.emit(0xfd)
 40470              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 40471              m.emit(0x78)
 40472              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40473          })
 40474      }
 40475      // VCVTTPD2UQQ ymm, ymm{k}{z}
 40476      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 40477          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40478          p.domain = DomainAVX
 40479          p.add(0, func(m *_Encoding, v []interface{}) {
 40480              m.emit(0x62)
 40481              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40482              m.emit(0xfd)
 40483              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 40484              m.emit(0x78)
 40485              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40486          })
 40487      }
 40488      if p.len == 0 {
 40489          panic("invalid operands for VCVTTPD2UQQ")
 40490      }
 40491      return p
 40492  }
 40493  
 40494  // VCVTTPS2DQ performs "Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers".
 40495  //
 40496  // Mnemonic        : VCVTTPS2DQ
 40497  // Supported forms : (11 forms)
 40498  //
 40499  //    * VCVTTPS2DQ xmm, xmm                   [AVX]
 40500  //    * VCVTTPS2DQ m128, xmm                  [AVX]
 40501  //    * VCVTTPS2DQ ymm, ymm                   [AVX]
 40502  //    * VCVTTPS2DQ m256, ymm                  [AVX]
 40503  //    * VCVTTPS2DQ m512/m32bcst, zmm{k}{z}    [AVX512F]
 40504  //    * VCVTTPS2DQ {sae}, zmm, zmm{k}{z}      [AVX512F]
 40505  //    * VCVTTPS2DQ zmm, zmm{k}{z}             [AVX512F]
 40506  //    * VCVTTPS2DQ m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 40507  //    * VCVTTPS2DQ m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 40508  //    * VCVTTPS2DQ xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 40509  //    * VCVTTPS2DQ ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 40510  //
 40511  func (self *Program) VCVTTPS2DQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 40512      var p *Instruction
 40513      switch len(vv) {
 40514          case 0  : p = self.alloc("VCVTTPS2DQ", 2, Operands { v0, v1 })
 40515          case 1  : p = self.alloc("VCVTTPS2DQ", 3, Operands { v0, v1, vv[0] })
 40516          default : panic("instruction VCVTTPS2DQ takes 2 or 3 operands")
 40517      }
 40518      // VCVTTPS2DQ xmm, xmm
 40519      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 40520          self.require(ISA_AVX)
 40521          p.domain = DomainAVX
 40522          p.add(0, func(m *_Encoding, v []interface{}) {
 40523              m.vex2(2, hcode(v[1]), v[0], 0)
 40524              m.emit(0x5b)
 40525              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40526          })
 40527      }
 40528      // VCVTTPS2DQ m128, xmm
 40529      if len(vv) == 0 && isM128(v0) && isXMM(v1) {
 40530          self.require(ISA_AVX)
 40531          p.domain = DomainAVX
 40532          p.add(0, func(m *_Encoding, v []interface{}) {
 40533              m.vex2(2, hcode(v[1]), addr(v[0]), 0)
 40534              m.emit(0x5b)
 40535              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 40536          })
 40537      }
 40538      // VCVTTPS2DQ ymm, ymm
 40539      if len(vv) == 0 && isYMM(v0) && isYMM(v1) {
 40540          self.require(ISA_AVX)
 40541          p.domain = DomainAVX
 40542          p.add(0, func(m *_Encoding, v []interface{}) {
 40543              m.vex2(6, hcode(v[1]), v[0], 0)
 40544              m.emit(0x5b)
 40545              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40546          })
 40547      }
 40548      // VCVTTPS2DQ m256, ymm
 40549      if len(vv) == 0 && isM256(v0) && isYMM(v1) {
 40550          self.require(ISA_AVX)
 40551          p.domain = DomainAVX
 40552          p.add(0, func(m *_Encoding, v []interface{}) {
 40553              m.vex2(6, hcode(v[1]), addr(v[0]), 0)
 40554              m.emit(0x5b)
 40555              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 40556          })
 40557      }
 40558      // VCVTTPS2DQ m512/m32bcst, zmm{k}{z}
 40559      if len(vv) == 0 && isM512M32bcst(v0) && isZMMkz(v1) {
 40560          self.require(ISA_AVX512F)
 40561          p.domain = DomainAVX
 40562          p.add(0, func(m *_Encoding, v []interface{}) {
 40563              m.evex(0b01, 0x06, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40564              m.emit(0x5b)
 40565              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 40566          })
 40567      }
 40568      // VCVTTPS2DQ {sae}, zmm, zmm{k}{z}
 40569      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 40570          self.require(ISA_AVX512F)
 40571          p.domain = DomainAVX
 40572          p.add(0, func(m *_Encoding, v []interface{}) {
 40573              m.emit(0x62)
 40574              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 40575              m.emit(0x7e)
 40576              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 40577              m.emit(0x5b)
 40578              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 40579          })
 40580      }
 40581      // VCVTTPS2DQ zmm, zmm{k}{z}
 40582      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 40583          self.require(ISA_AVX512F)
 40584          p.domain = DomainAVX
 40585          p.add(0, func(m *_Encoding, v []interface{}) {
 40586              m.emit(0x62)
 40587              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40588              m.emit(0x7e)
 40589              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 40590              m.emit(0x5b)
 40591              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40592          })
 40593      }
 40594      // VCVTTPS2DQ m128/m32bcst, xmm{k}{z}
 40595      if len(vv) == 0 && isM128M32bcst(v0) && isXMMkz(v1) {
 40596          self.require(ISA_AVX512VL | ISA_AVX512F)
 40597          p.domain = DomainAVX
 40598          p.add(0, func(m *_Encoding, v []interface{}) {
 40599              m.evex(0b01, 0x06, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40600              m.emit(0x5b)
 40601              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 40602          })
 40603      }
 40604      // VCVTTPS2DQ m256/m32bcst, ymm{k}{z}
 40605      if len(vv) == 0 && isM256M32bcst(v0) && isYMMkz(v1) {
 40606          self.require(ISA_AVX512VL | ISA_AVX512F)
 40607          p.domain = DomainAVX
 40608          p.add(0, func(m *_Encoding, v []interface{}) {
 40609              m.evex(0b01, 0x06, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40610              m.emit(0x5b)
 40611              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 40612          })
 40613      }
 40614      // VCVTTPS2DQ xmm, xmm{k}{z}
 40615      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 40616          self.require(ISA_AVX512VL | ISA_AVX512F)
 40617          p.domain = DomainAVX
 40618          p.add(0, func(m *_Encoding, v []interface{}) {
 40619              m.emit(0x62)
 40620              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40621              m.emit(0x7e)
 40622              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 40623              m.emit(0x5b)
 40624              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40625          })
 40626      }
 40627      // VCVTTPS2DQ ymm, ymm{k}{z}
 40628      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 40629          self.require(ISA_AVX512VL | ISA_AVX512F)
 40630          p.domain = DomainAVX
 40631          p.add(0, func(m *_Encoding, v []interface{}) {
 40632              m.emit(0x62)
 40633              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40634              m.emit(0x7e)
 40635              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 40636              m.emit(0x5b)
 40637              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40638          })
 40639      }
 40640      if p.len == 0 {
 40641          panic("invalid operands for VCVTTPS2DQ")
 40642      }
 40643      return p
 40644  }
 40645  
 40646  // VCVTTPS2QQ performs "Convert with Truncation Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values".
 40647  //
 40648  // Mnemonic        : VCVTTPS2QQ
 40649  // Supported forms : (7 forms)
 40650  //
 40651  //    * VCVTTPS2QQ m256/m32bcst, zmm{k}{z}    [AVX512DQ]
 40652  //    * VCVTTPS2QQ {sae}, ymm, zmm{k}{z}      [AVX512DQ]
 40653  //    * VCVTTPS2QQ ymm, zmm{k}{z}             [AVX512DQ]
 40654  //    * VCVTTPS2QQ m64/m32bcst, xmm{k}{z}     [AVX512DQ,AVX512VL]
 40655  //    * VCVTTPS2QQ m128/m32bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 40656  //    * VCVTTPS2QQ xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 40657  //    * VCVTTPS2QQ xmm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 40658  //
 40659  func (self *Program) VCVTTPS2QQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 40660      var p *Instruction
 40661      switch len(vv) {
 40662          case 0  : p = self.alloc("VCVTTPS2QQ", 2, Operands { v0, v1 })
 40663          case 1  : p = self.alloc("VCVTTPS2QQ", 3, Operands { v0, v1, vv[0] })
 40664          default : panic("instruction VCVTTPS2QQ takes 2 or 3 operands")
 40665      }
 40666      // VCVTTPS2QQ m256/m32bcst, zmm{k}{z}
 40667      if len(vv) == 0 && isM256M32bcst(v0) && isZMMkz(v1) {
 40668          self.require(ISA_AVX512DQ)
 40669          p.domain = DomainAVX
 40670          p.add(0, func(m *_Encoding, v []interface{}) {
 40671              m.evex(0b01, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40672              m.emit(0x7a)
 40673              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 40674          })
 40675      }
 40676      // VCVTTPS2QQ {sae}, ymm, zmm{k}{z}
 40677      if len(vv) == 1 && isSAE(v0) && isEVEXYMM(v1) && isZMMkz(vv[0]) {
 40678          self.require(ISA_AVX512DQ)
 40679          p.domain = DomainAVX
 40680          p.add(0, func(m *_Encoding, v []interface{}) {
 40681              m.emit(0x62)
 40682              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 40683              m.emit(0x7d)
 40684              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 40685              m.emit(0x7a)
 40686              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 40687          })
 40688      }
 40689      // VCVTTPS2QQ ymm, zmm{k}{z}
 40690      if len(vv) == 0 && isEVEXYMM(v0) && isZMMkz(v1) {
 40691          self.require(ISA_AVX512DQ)
 40692          p.domain = DomainAVX
 40693          p.add(0, func(m *_Encoding, v []interface{}) {
 40694              m.emit(0x62)
 40695              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40696              m.emit(0x7d)
 40697              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 40698              m.emit(0x7a)
 40699              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40700          })
 40701      }
 40702      // VCVTTPS2QQ m64/m32bcst, xmm{k}{z}
 40703      if len(vv) == 0 && isM64M32bcst(v0) && isXMMkz(v1) {
 40704          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40705          p.domain = DomainAVX
 40706          p.add(0, func(m *_Encoding, v []interface{}) {
 40707              m.evex(0b01, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40708              m.emit(0x7a)
 40709              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 40710          })
 40711      }
 40712      // VCVTTPS2QQ m128/m32bcst, ymm{k}{z}
 40713      if len(vv) == 0 && isM128M32bcst(v0) && isYMMkz(v1) {
 40714          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40715          p.domain = DomainAVX
 40716          p.add(0, func(m *_Encoding, v []interface{}) {
 40717              m.evex(0b01, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40718              m.emit(0x7a)
 40719              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 40720          })
 40721      }
 40722      // VCVTTPS2QQ xmm, xmm{k}{z}
 40723      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 40724          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40725          p.domain = DomainAVX
 40726          p.add(0, func(m *_Encoding, v []interface{}) {
 40727              m.emit(0x62)
 40728              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40729              m.emit(0x7d)
 40730              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 40731              m.emit(0x7a)
 40732              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40733          })
 40734      }
 40735      // VCVTTPS2QQ xmm, ymm{k}{z}
 40736      if len(vv) == 0 && isEVEXXMM(v0) && isYMMkz(v1) {
 40737          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40738          p.domain = DomainAVX
 40739          p.add(0, func(m *_Encoding, v []interface{}) {
 40740              m.emit(0x62)
 40741              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40742              m.emit(0x7d)
 40743              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 40744              m.emit(0x7a)
 40745              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40746          })
 40747      }
 40748      if p.len == 0 {
 40749          panic("invalid operands for VCVTTPS2QQ")
 40750      }
 40751      return p
 40752  }
 40753  
 40754  // VCVTTPS2UDQ performs "Convert with Truncation Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values".
 40755  //
 40756  // Mnemonic        : VCVTTPS2UDQ
 40757  // Supported forms : (7 forms)
 40758  //
 40759  //    * VCVTTPS2UDQ m512/m32bcst, zmm{k}{z}    [AVX512F]
 40760  //    * VCVTTPS2UDQ {sae}, zmm, zmm{k}{z}      [AVX512F]
 40761  //    * VCVTTPS2UDQ zmm, zmm{k}{z}             [AVX512F]
 40762  //    * VCVTTPS2UDQ m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 40763  //    * VCVTTPS2UDQ m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 40764  //    * VCVTTPS2UDQ xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 40765  //    * VCVTTPS2UDQ ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 40766  //
 40767  func (self *Program) VCVTTPS2UDQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 40768      var p *Instruction
 40769      switch len(vv) {
 40770          case 0  : p = self.alloc("VCVTTPS2UDQ", 2, Operands { v0, v1 })
 40771          case 1  : p = self.alloc("VCVTTPS2UDQ", 3, Operands { v0, v1, vv[0] })
 40772          default : panic("instruction VCVTTPS2UDQ takes 2 or 3 operands")
 40773      }
 40774      // VCVTTPS2UDQ m512/m32bcst, zmm{k}{z}
 40775      if len(vv) == 0 && isM512M32bcst(v0) && isZMMkz(v1) {
 40776          self.require(ISA_AVX512F)
 40777          p.domain = DomainAVX
 40778          p.add(0, func(m *_Encoding, v []interface{}) {
 40779              m.evex(0b01, 0x04, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40780              m.emit(0x78)
 40781              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 40782          })
 40783      }
 40784      // VCVTTPS2UDQ {sae}, zmm, zmm{k}{z}
 40785      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 40786          self.require(ISA_AVX512F)
 40787          p.domain = DomainAVX
 40788          p.add(0, func(m *_Encoding, v []interface{}) {
 40789              m.emit(0x62)
 40790              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 40791              m.emit(0x7c)
 40792              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 40793              m.emit(0x78)
 40794              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 40795          })
 40796      }
 40797      // VCVTTPS2UDQ zmm, zmm{k}{z}
 40798      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 40799          self.require(ISA_AVX512F)
 40800          p.domain = DomainAVX
 40801          p.add(0, func(m *_Encoding, v []interface{}) {
 40802              m.emit(0x62)
 40803              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40804              m.emit(0x7c)
 40805              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 40806              m.emit(0x78)
 40807              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40808          })
 40809      }
 40810      // VCVTTPS2UDQ m128/m32bcst, xmm{k}{z}
 40811      if len(vv) == 0 && isM128M32bcst(v0) && isXMMkz(v1) {
 40812          self.require(ISA_AVX512VL | ISA_AVX512F)
 40813          p.domain = DomainAVX
 40814          p.add(0, func(m *_Encoding, v []interface{}) {
 40815              m.evex(0b01, 0x04, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40816              m.emit(0x78)
 40817              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 40818          })
 40819      }
 40820      // VCVTTPS2UDQ m256/m32bcst, ymm{k}{z}
 40821      if len(vv) == 0 && isM256M32bcst(v0) && isYMMkz(v1) {
 40822          self.require(ISA_AVX512VL | ISA_AVX512F)
 40823          p.domain = DomainAVX
 40824          p.add(0, func(m *_Encoding, v []interface{}) {
 40825              m.evex(0b01, 0x04, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40826              m.emit(0x78)
 40827              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 40828          })
 40829      }
 40830      // VCVTTPS2UDQ xmm, xmm{k}{z}
 40831      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 40832          self.require(ISA_AVX512VL | ISA_AVX512F)
 40833          p.domain = DomainAVX
 40834          p.add(0, func(m *_Encoding, v []interface{}) {
 40835              m.emit(0x62)
 40836              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40837              m.emit(0x7c)
 40838              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 40839              m.emit(0x78)
 40840              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40841          })
 40842      }
 40843      // VCVTTPS2UDQ ymm, ymm{k}{z}
 40844      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 40845          self.require(ISA_AVX512VL | ISA_AVX512F)
 40846          p.domain = DomainAVX
 40847          p.add(0, func(m *_Encoding, v []interface{}) {
 40848              m.emit(0x62)
 40849              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40850              m.emit(0x7c)
 40851              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 40852              m.emit(0x78)
 40853              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40854          })
 40855      }
 40856      if p.len == 0 {
 40857          panic("invalid operands for VCVTTPS2UDQ")
 40858      }
 40859      return p
 40860  }
 40861  
 40862  // VCVTTPS2UQQ performs "Convert with Truncation Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values".
 40863  //
 40864  // Mnemonic        : VCVTTPS2UQQ
 40865  // Supported forms : (7 forms)
 40866  //
 40867  //    * VCVTTPS2UQQ m256/m32bcst, zmm{k}{z}    [AVX512DQ]
 40868  //    * VCVTTPS2UQQ {sae}, ymm, zmm{k}{z}      [AVX512DQ]
 40869  //    * VCVTTPS2UQQ ymm, zmm{k}{z}             [AVX512DQ]
 40870  //    * VCVTTPS2UQQ m64/m32bcst, xmm{k}{z}     [AVX512DQ,AVX512VL]
 40871  //    * VCVTTPS2UQQ m128/m32bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 40872  //    * VCVTTPS2UQQ xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 40873  //    * VCVTTPS2UQQ xmm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 40874  //
 40875  func (self *Program) VCVTTPS2UQQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 40876      var p *Instruction
 40877      switch len(vv) {
 40878          case 0  : p = self.alloc("VCVTTPS2UQQ", 2, Operands { v0, v1 })
 40879          case 1  : p = self.alloc("VCVTTPS2UQQ", 3, Operands { v0, v1, vv[0] })
 40880          default : panic("instruction VCVTTPS2UQQ takes 2 or 3 operands")
 40881      }
 40882      // VCVTTPS2UQQ m256/m32bcst, zmm{k}{z}
 40883      if len(vv) == 0 && isM256M32bcst(v0) && isZMMkz(v1) {
 40884          self.require(ISA_AVX512DQ)
 40885          p.domain = DomainAVX
 40886          p.add(0, func(m *_Encoding, v []interface{}) {
 40887              m.evex(0b01, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40888              m.emit(0x78)
 40889              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 40890          })
 40891      }
 40892      // VCVTTPS2UQQ {sae}, ymm, zmm{k}{z}
 40893      if len(vv) == 1 && isSAE(v0) && isEVEXYMM(v1) && isZMMkz(vv[0]) {
 40894          self.require(ISA_AVX512DQ)
 40895          p.domain = DomainAVX
 40896          p.add(0, func(m *_Encoding, v []interface{}) {
 40897              m.emit(0x62)
 40898              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 40899              m.emit(0x7d)
 40900              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 40901              m.emit(0x78)
 40902              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 40903          })
 40904      }
 40905      // VCVTTPS2UQQ ymm, zmm{k}{z}
 40906      if len(vv) == 0 && isEVEXYMM(v0) && isZMMkz(v1) {
 40907          self.require(ISA_AVX512DQ)
 40908          p.domain = DomainAVX
 40909          p.add(0, func(m *_Encoding, v []interface{}) {
 40910              m.emit(0x62)
 40911              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40912              m.emit(0x7d)
 40913              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 40914              m.emit(0x78)
 40915              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40916          })
 40917      }
 40918      // VCVTTPS2UQQ m64/m32bcst, xmm{k}{z}
 40919      if len(vv) == 0 && isM64M32bcst(v0) && isXMMkz(v1) {
 40920          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40921          p.domain = DomainAVX
 40922          p.add(0, func(m *_Encoding, v []interface{}) {
 40923              m.evex(0b01, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40924              m.emit(0x78)
 40925              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 40926          })
 40927      }
 40928      // VCVTTPS2UQQ m128/m32bcst, ymm{k}{z}
 40929      if len(vv) == 0 && isM128M32bcst(v0) && isYMMkz(v1) {
 40930          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40931          p.domain = DomainAVX
 40932          p.add(0, func(m *_Encoding, v []interface{}) {
 40933              m.evex(0b01, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 40934              m.emit(0x78)
 40935              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 40936          })
 40937      }
 40938      // VCVTTPS2UQQ xmm, xmm{k}{z}
 40939      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 40940          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40941          p.domain = DomainAVX
 40942          p.add(0, func(m *_Encoding, v []interface{}) {
 40943              m.emit(0x62)
 40944              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40945              m.emit(0x7d)
 40946              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 40947              m.emit(0x78)
 40948              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40949          })
 40950      }
 40951      // VCVTTPS2UQQ xmm, ymm{k}{z}
 40952      if len(vv) == 0 && isEVEXXMM(v0) && isYMMkz(v1) {
 40953          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 40954          p.domain = DomainAVX
 40955          p.add(0, func(m *_Encoding, v []interface{}) {
 40956              m.emit(0x62)
 40957              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 40958              m.emit(0x7d)
 40959              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 40960              m.emit(0x78)
 40961              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 40962          })
 40963      }
 40964      if p.len == 0 {
 40965          panic("invalid operands for VCVTTPS2UQQ")
 40966      }
 40967      return p
 40968  }
 40969  
 40970  // VCVTTSD2SI performs "Convert with Truncation Scalar Double-Precision FP Value to Signed Integer".
 40971  //
 40972  // Mnemonic        : VCVTTSD2SI
 40973  // Supported forms : (10 forms)
 40974  //
 40975  //    * VCVTTSD2SI xmm, r32           [AVX]
 40976  //    * VCVTTSD2SI m64, r32           [AVX]
 40977  //    * VCVTTSD2SI xmm, r64           [AVX]
 40978  //    * VCVTTSD2SI m64, r64           [AVX]
 40979  //    * VCVTTSD2SI m64, r32           [AVX512F]
 40980  //    * VCVTTSD2SI m64, r64           [AVX512F]
 40981  //    * VCVTTSD2SI {sae}, xmm, r32    [AVX512F]
 40982  //    * VCVTTSD2SI {sae}, xmm, r64    [AVX512F]
 40983  //    * VCVTTSD2SI xmm, r32           [AVX512F]
 40984  //    * VCVTTSD2SI xmm, r64           [AVX512F]
 40985  //
 40986  func (self *Program) VCVTTSD2SI(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 40987      var p *Instruction
 40988      switch len(vv) {
 40989          case 0  : p = self.alloc("VCVTTSD2SI", 2, Operands { v0, v1 })
 40990          case 1  : p = self.alloc("VCVTTSD2SI", 3, Operands { v0, v1, vv[0] })
 40991          default : panic("instruction VCVTTSD2SI takes 2 or 3 operands")
 40992      }
 40993      // VCVTTSD2SI xmm, r32
 40994      if len(vv) == 0 && isXMM(v0) && isReg32(v1) {
 40995          self.require(ISA_AVX)
 40996          p.domain = DomainAVX
 40997          p.add(0, func(m *_Encoding, v []interface{}) {
 40998              m.vex2(3, hcode(v[1]), v[0], 0)
 40999              m.emit(0x2c)
 41000              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41001          })
 41002      }
 41003      // VCVTTSD2SI m64, r32
 41004      if len(vv) == 0 && isM64(v0) && isReg32(v1) {
 41005          self.require(ISA_AVX)
 41006          p.domain = DomainAVX
 41007          p.add(0, func(m *_Encoding, v []interface{}) {
 41008              m.vex2(3, hcode(v[1]), addr(v[0]), 0)
 41009              m.emit(0x2c)
 41010              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 41011          })
 41012      }
 41013      // VCVTTSD2SI xmm, r64
 41014      if len(vv) == 0 && isXMM(v0) && isReg64(v1) {
 41015          self.require(ISA_AVX)
 41016          p.domain = DomainAVX
 41017          p.add(0, func(m *_Encoding, v []interface{}) {
 41018              m.emit(0xc4)
 41019              m.emit(0xe1 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 41020              m.emit(0xfb)
 41021              m.emit(0x2c)
 41022              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41023          })
 41024      }
 41025      // VCVTTSD2SI m64, r64
 41026      if len(vv) == 0 && isM64(v0) && isReg64(v1) {
 41027          self.require(ISA_AVX)
 41028          p.domain = DomainAVX
 41029          p.add(0, func(m *_Encoding, v []interface{}) {
 41030              m.vex3(0xc4, 0b1, 0x83, hcode(v[1]), addr(v[0]), 0)
 41031              m.emit(0x2c)
 41032              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 41033          })
 41034      }
 41035      // VCVTTSD2SI m64, r32
 41036      if len(vv) == 0 && isM64(v0) && isReg32(v1) {
 41037          self.require(ISA_AVX512F)
 41038          p.domain = DomainAVX
 41039          p.add(0, func(m *_Encoding, v []interface{}) {
 41040              m.evex(0b01, 0x07, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 41041              m.emit(0x2c)
 41042              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 41043          })
 41044      }
 41045      // VCVTTSD2SI m64, r64
 41046      if len(vv) == 0 && isM64(v0) && isReg64(v1) {
 41047          self.require(ISA_AVX512F)
 41048          p.domain = DomainAVX
 41049          p.add(0, func(m *_Encoding, v []interface{}) {
 41050              m.evex(0b01, 0x87, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 41051              m.emit(0x2c)
 41052              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 41053          })
 41054      }
 41055      // VCVTTSD2SI {sae}, xmm, r32
 41056      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isReg32(vv[0]) {
 41057          self.require(ISA_AVX512F)
 41058          p.domain = DomainAVX
 41059          p.add(0, func(m *_Encoding, v []interface{}) {
 41060              m.emit(0x62)
 41061              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 41062              m.emit(0x7f)
 41063              m.emit(0x18)
 41064              m.emit(0x2c)
 41065              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 41066          })
 41067      }
 41068      // VCVTTSD2SI {sae}, xmm, r64
 41069      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isReg64(vv[0]) {
 41070          self.require(ISA_AVX512F)
 41071          p.domain = DomainAVX
 41072          p.add(0, func(m *_Encoding, v []interface{}) {
 41073              m.emit(0x62)
 41074              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 41075              m.emit(0xff)
 41076              m.emit(0x18)
 41077              m.emit(0x2c)
 41078              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 41079          })
 41080      }
 41081      // VCVTTSD2SI xmm, r32
 41082      if len(vv) == 0 && isEVEXXMM(v0) && isReg32(v1) {
 41083          self.require(ISA_AVX512F)
 41084          p.domain = DomainAVX
 41085          p.add(0, func(m *_Encoding, v []interface{}) {
 41086              m.emit(0x62)
 41087              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41088              m.emit(0x7f)
 41089              m.emit(0x48)
 41090              m.emit(0x2c)
 41091              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41092          })
 41093      }
 41094      // VCVTTSD2SI xmm, r64
 41095      if len(vv) == 0 && isEVEXXMM(v0) && isReg64(v1) {
 41096          self.require(ISA_AVX512F)
 41097          p.domain = DomainAVX
 41098          p.add(0, func(m *_Encoding, v []interface{}) {
 41099              m.emit(0x62)
 41100              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41101              m.emit(0xff)
 41102              m.emit(0x48)
 41103              m.emit(0x2c)
 41104              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41105          })
 41106      }
 41107      if p.len == 0 {
 41108          panic("invalid operands for VCVTTSD2SI")
 41109      }
 41110      return p
 41111  }
 41112  
 41113  // VCVTTSD2USI performs "Convert with Truncation Scalar Double-Precision Floating-Point Value to Unsigned Integer".
 41114  //
 41115  // Mnemonic        : VCVTTSD2USI
 41116  // Supported forms : (6 forms)
 41117  //
 41118  //    * VCVTTSD2USI m64, r32           [AVX512F]
 41119  //    * VCVTTSD2USI m64, r64           [AVX512F]
 41120  //    * VCVTTSD2USI {sae}, xmm, r32    [AVX512F]
 41121  //    * VCVTTSD2USI {sae}, xmm, r64    [AVX512F]
 41122  //    * VCVTTSD2USI xmm, r32           [AVX512F]
 41123  //    * VCVTTSD2USI xmm, r64           [AVX512F]
 41124  //
 41125  func (self *Program) VCVTTSD2USI(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 41126      var p *Instruction
 41127      switch len(vv) {
 41128          case 0  : p = self.alloc("VCVTTSD2USI", 2, Operands { v0, v1 })
 41129          case 1  : p = self.alloc("VCVTTSD2USI", 3, Operands { v0, v1, vv[0] })
 41130          default : panic("instruction VCVTTSD2USI takes 2 or 3 operands")
 41131      }
 41132      // VCVTTSD2USI m64, r32
 41133      if len(vv) == 0 && isM64(v0) && isReg32(v1) {
 41134          self.require(ISA_AVX512F)
 41135          p.domain = DomainAVX
 41136          p.add(0, func(m *_Encoding, v []interface{}) {
 41137              m.evex(0b01, 0x07, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 41138              m.emit(0x78)
 41139              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 41140          })
 41141      }
 41142      // VCVTTSD2USI m64, r64
 41143      if len(vv) == 0 && isM64(v0) && isReg64(v1) {
 41144          self.require(ISA_AVX512F)
 41145          p.domain = DomainAVX
 41146          p.add(0, func(m *_Encoding, v []interface{}) {
 41147              m.evex(0b01, 0x87, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 41148              m.emit(0x78)
 41149              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 41150          })
 41151      }
 41152      // VCVTTSD2USI {sae}, xmm, r32
 41153      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isReg32(vv[0]) {
 41154          self.require(ISA_AVX512F)
 41155          p.domain = DomainAVX
 41156          p.add(0, func(m *_Encoding, v []interface{}) {
 41157              m.emit(0x62)
 41158              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 41159              m.emit(0x7f)
 41160              m.emit(0x18)
 41161              m.emit(0x78)
 41162              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 41163          })
 41164      }
 41165      // VCVTTSD2USI {sae}, xmm, r64
 41166      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isReg64(vv[0]) {
 41167          self.require(ISA_AVX512F)
 41168          p.domain = DomainAVX
 41169          p.add(0, func(m *_Encoding, v []interface{}) {
 41170              m.emit(0x62)
 41171              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 41172              m.emit(0xff)
 41173              m.emit(0x18)
 41174              m.emit(0x78)
 41175              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 41176          })
 41177      }
 41178      // VCVTTSD2USI xmm, r32
 41179      if len(vv) == 0 && isEVEXXMM(v0) && isReg32(v1) {
 41180          self.require(ISA_AVX512F)
 41181          p.domain = DomainAVX
 41182          p.add(0, func(m *_Encoding, v []interface{}) {
 41183              m.emit(0x62)
 41184              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41185              m.emit(0x7f)
 41186              m.emit(0x48)
 41187              m.emit(0x78)
 41188              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41189          })
 41190      }
 41191      // VCVTTSD2USI xmm, r64
 41192      if len(vv) == 0 && isEVEXXMM(v0) && isReg64(v1) {
 41193          self.require(ISA_AVX512F)
 41194          p.domain = DomainAVX
 41195          p.add(0, func(m *_Encoding, v []interface{}) {
 41196              m.emit(0x62)
 41197              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41198              m.emit(0xff)
 41199              m.emit(0x48)
 41200              m.emit(0x78)
 41201              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41202          })
 41203      }
 41204      if p.len == 0 {
 41205          panic("invalid operands for VCVTTSD2USI")
 41206      }
 41207      return p
 41208  }
 41209  
 41210  // VCVTTSS2SI performs "Convert with Truncation Scalar Single-Precision FP Value to Dword Integer".
 41211  //
 41212  // Mnemonic        : VCVTTSS2SI
 41213  // Supported forms : (10 forms)
 41214  //
 41215  //    * VCVTTSS2SI xmm, r32           [AVX]
 41216  //    * VCVTTSS2SI m32, r32           [AVX]
 41217  //    * VCVTTSS2SI xmm, r64           [AVX]
 41218  //    * VCVTTSS2SI m32, r64           [AVX]
 41219  //    * VCVTTSS2SI m32, r32           [AVX512F]
 41220  //    * VCVTTSS2SI m32, r64           [AVX512F]
 41221  //    * VCVTTSS2SI {sae}, xmm, r32    [AVX512F]
 41222  //    * VCVTTSS2SI {sae}, xmm, r64    [AVX512F]
 41223  //    * VCVTTSS2SI xmm, r32           [AVX512F]
 41224  //    * VCVTTSS2SI xmm, r64           [AVX512F]
 41225  //
 41226  func (self *Program) VCVTTSS2SI(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 41227      var p *Instruction
 41228      switch len(vv) {
 41229          case 0  : p = self.alloc("VCVTTSS2SI", 2, Operands { v0, v1 })
 41230          case 1  : p = self.alloc("VCVTTSS2SI", 3, Operands { v0, v1, vv[0] })
 41231          default : panic("instruction VCVTTSS2SI takes 2 or 3 operands")
 41232      }
 41233      // VCVTTSS2SI xmm, r32
 41234      if len(vv) == 0 && isXMM(v0) && isReg32(v1) {
 41235          self.require(ISA_AVX)
 41236          p.domain = DomainAVX
 41237          p.add(0, func(m *_Encoding, v []interface{}) {
 41238              m.vex2(2, hcode(v[1]), v[0], 0)
 41239              m.emit(0x2c)
 41240              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41241          })
 41242      }
 41243      // VCVTTSS2SI m32, r32
 41244      if len(vv) == 0 && isM32(v0) && isReg32(v1) {
 41245          self.require(ISA_AVX)
 41246          p.domain = DomainAVX
 41247          p.add(0, func(m *_Encoding, v []interface{}) {
 41248              m.vex2(2, hcode(v[1]), addr(v[0]), 0)
 41249              m.emit(0x2c)
 41250              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 41251          })
 41252      }
 41253      // VCVTTSS2SI xmm, r64
 41254      if len(vv) == 0 && isXMM(v0) && isReg64(v1) {
 41255          self.require(ISA_AVX)
 41256          p.domain = DomainAVX
 41257          p.add(0, func(m *_Encoding, v []interface{}) {
 41258              m.emit(0xc4)
 41259              m.emit(0xe1 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 41260              m.emit(0xfa)
 41261              m.emit(0x2c)
 41262              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41263          })
 41264      }
 41265      // VCVTTSS2SI m32, r64
 41266      if len(vv) == 0 && isM32(v0) && isReg64(v1) {
 41267          self.require(ISA_AVX)
 41268          p.domain = DomainAVX
 41269          p.add(0, func(m *_Encoding, v []interface{}) {
 41270              m.vex3(0xc4, 0b1, 0x82, hcode(v[1]), addr(v[0]), 0)
 41271              m.emit(0x2c)
 41272              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 41273          })
 41274      }
 41275      // VCVTTSS2SI m32, r32
 41276      if len(vv) == 0 && isM32(v0) && isReg32(v1) {
 41277          self.require(ISA_AVX512F)
 41278          p.domain = DomainAVX
 41279          p.add(0, func(m *_Encoding, v []interface{}) {
 41280              m.evex(0b01, 0x06, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 41281              m.emit(0x2c)
 41282              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 41283          })
 41284      }
 41285      // VCVTTSS2SI m32, r64
 41286      if len(vv) == 0 && isM32(v0) && isReg64(v1) {
 41287          self.require(ISA_AVX512F)
 41288          p.domain = DomainAVX
 41289          p.add(0, func(m *_Encoding, v []interface{}) {
 41290              m.evex(0b01, 0x86, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 41291              m.emit(0x2c)
 41292              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 41293          })
 41294      }
 41295      // VCVTTSS2SI {sae}, xmm, r32
 41296      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isReg32(vv[0]) {
 41297          self.require(ISA_AVX512F)
 41298          p.domain = DomainAVX
 41299          p.add(0, func(m *_Encoding, v []interface{}) {
 41300              m.emit(0x62)
 41301              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 41302              m.emit(0x7e)
 41303              m.emit(0x18)
 41304              m.emit(0x2c)
 41305              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 41306          })
 41307      }
 41308      // VCVTTSS2SI {sae}, xmm, r64
 41309      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isReg64(vv[0]) {
 41310          self.require(ISA_AVX512F)
 41311          p.domain = DomainAVX
 41312          p.add(0, func(m *_Encoding, v []interface{}) {
 41313              m.emit(0x62)
 41314              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 41315              m.emit(0xfe)
 41316              m.emit(0x18)
 41317              m.emit(0x2c)
 41318              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 41319          })
 41320      }
 41321      // VCVTTSS2SI xmm, r32
 41322      if len(vv) == 0 && isEVEXXMM(v0) && isReg32(v1) {
 41323          self.require(ISA_AVX512F)
 41324          p.domain = DomainAVX
 41325          p.add(0, func(m *_Encoding, v []interface{}) {
 41326              m.emit(0x62)
 41327              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41328              m.emit(0x7e)
 41329              m.emit(0x48)
 41330              m.emit(0x2c)
 41331              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41332          })
 41333      }
 41334      // VCVTTSS2SI xmm, r64
 41335      if len(vv) == 0 && isEVEXXMM(v0) && isReg64(v1) {
 41336          self.require(ISA_AVX512F)
 41337          p.domain = DomainAVX
 41338          p.add(0, func(m *_Encoding, v []interface{}) {
 41339              m.emit(0x62)
 41340              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41341              m.emit(0xfe)
 41342              m.emit(0x48)
 41343              m.emit(0x2c)
 41344              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41345          })
 41346      }
 41347      if p.len == 0 {
 41348          panic("invalid operands for VCVTTSS2SI")
 41349      }
 41350      return p
 41351  }
 41352  
 41353  // VCVTTSS2USI performs "Convert with Truncation Scalar Single-Precision Floating-Point Value to Unsigned Integer".
 41354  //
 41355  // Mnemonic        : VCVTTSS2USI
 41356  // Supported forms : (6 forms)
 41357  //
 41358  //    * VCVTTSS2USI m32, r32           [AVX512F]
 41359  //    * VCVTTSS2USI m32, r64           [AVX512F]
 41360  //    * VCVTTSS2USI {sae}, xmm, r32    [AVX512F]
 41361  //    * VCVTTSS2USI {sae}, xmm, r64    [AVX512F]
 41362  //    * VCVTTSS2USI xmm, r32           [AVX512F]
 41363  //    * VCVTTSS2USI xmm, r64           [AVX512F]
 41364  //
 41365  func (self *Program) VCVTTSS2USI(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 41366      var p *Instruction
 41367      switch len(vv) {
 41368          case 0  : p = self.alloc("VCVTTSS2USI", 2, Operands { v0, v1 })
 41369          case 1  : p = self.alloc("VCVTTSS2USI", 3, Operands { v0, v1, vv[0] })
 41370          default : panic("instruction VCVTTSS2USI takes 2 or 3 operands")
 41371      }
 41372      // VCVTTSS2USI m32, r32
 41373      if len(vv) == 0 && isM32(v0) && isReg32(v1) {
 41374          self.require(ISA_AVX512F)
 41375          p.domain = DomainAVX
 41376          p.add(0, func(m *_Encoding, v []interface{}) {
 41377              m.evex(0b01, 0x06, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 41378              m.emit(0x78)
 41379              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 41380          })
 41381      }
 41382      // VCVTTSS2USI m32, r64
 41383      if len(vv) == 0 && isM32(v0) && isReg64(v1) {
 41384          self.require(ISA_AVX512F)
 41385          p.domain = DomainAVX
 41386          p.add(0, func(m *_Encoding, v []interface{}) {
 41387              m.evex(0b01, 0x86, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 41388              m.emit(0x78)
 41389              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 41390          })
 41391      }
 41392      // VCVTTSS2USI {sae}, xmm, r32
 41393      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isReg32(vv[0]) {
 41394          self.require(ISA_AVX512F)
 41395          p.domain = DomainAVX
 41396          p.add(0, func(m *_Encoding, v []interface{}) {
 41397              m.emit(0x62)
 41398              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 41399              m.emit(0x7e)
 41400              m.emit(0x18)
 41401              m.emit(0x78)
 41402              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 41403          })
 41404      }
 41405      // VCVTTSS2USI {sae}, xmm, r64
 41406      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isReg64(vv[0]) {
 41407          self.require(ISA_AVX512F)
 41408          p.domain = DomainAVX
 41409          p.add(0, func(m *_Encoding, v []interface{}) {
 41410              m.emit(0x62)
 41411              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 41412              m.emit(0xfe)
 41413              m.emit(0x18)
 41414              m.emit(0x78)
 41415              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 41416          })
 41417      }
 41418      // VCVTTSS2USI xmm, r32
 41419      if len(vv) == 0 && isEVEXXMM(v0) && isReg32(v1) {
 41420          self.require(ISA_AVX512F)
 41421          p.domain = DomainAVX
 41422          p.add(0, func(m *_Encoding, v []interface{}) {
 41423              m.emit(0x62)
 41424              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41425              m.emit(0x7e)
 41426              m.emit(0x48)
 41427              m.emit(0x78)
 41428              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41429          })
 41430      }
 41431      // VCVTTSS2USI xmm, r64
 41432      if len(vv) == 0 && isEVEXXMM(v0) && isReg64(v1) {
 41433          self.require(ISA_AVX512F)
 41434          p.domain = DomainAVX
 41435          p.add(0, func(m *_Encoding, v []interface{}) {
 41436              m.emit(0x62)
 41437              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41438              m.emit(0xfe)
 41439              m.emit(0x48)
 41440              m.emit(0x78)
 41441              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41442          })
 41443      }
 41444      if p.len == 0 {
 41445          panic("invalid operands for VCVTTSS2USI")
 41446      }
 41447      return p
 41448  }
 41449  
 41450  // VCVTUDQ2PD performs "Convert Packed Unsigned Doubleword Integers to Packed Double-Precision Floating-Point Values".
 41451  //
 41452  // Mnemonic        : VCVTUDQ2PD
 41453  // Supported forms : (6 forms)
 41454  //
 41455  //    * VCVTUDQ2PD m256/m32bcst, zmm{k}{z}    [AVX512F]
 41456  //    * VCVTUDQ2PD ymm, zmm{k}{z}             [AVX512F]
 41457  //    * VCVTUDQ2PD m64/m32bcst, xmm{k}{z}     [AVX512F,AVX512VL]
 41458  //    * VCVTUDQ2PD m128/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 41459  //    * VCVTUDQ2PD xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 41460  //    * VCVTUDQ2PD xmm, ymm{k}{z}             [AVX512F,AVX512VL]
 41461  //
 41462  func (self *Program) VCVTUDQ2PD(v0 interface{}, v1 interface{}) *Instruction {
 41463      p := self.alloc("VCVTUDQ2PD", 2, Operands { v0, v1 })
 41464      // VCVTUDQ2PD m256/m32bcst, zmm{k}{z}
 41465      if isM256M32bcst(v0) && isZMMkz(v1) {
 41466          self.require(ISA_AVX512F)
 41467          p.domain = DomainAVX
 41468          p.add(0, func(m *_Encoding, v []interface{}) {
 41469              m.evex(0b01, 0x06, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41470              m.emit(0x7a)
 41471              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 41472          })
 41473      }
 41474      // VCVTUDQ2PD ymm, zmm{k}{z}
 41475      if isEVEXYMM(v0) && isZMMkz(v1) {
 41476          self.require(ISA_AVX512F)
 41477          p.domain = DomainAVX
 41478          p.add(0, func(m *_Encoding, v []interface{}) {
 41479              m.emit(0x62)
 41480              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41481              m.emit(0x7e)
 41482              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 41483              m.emit(0x7a)
 41484              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41485          })
 41486      }
 41487      // VCVTUDQ2PD m64/m32bcst, xmm{k}{z}
 41488      if isM64M32bcst(v0) && isXMMkz(v1) {
 41489          self.require(ISA_AVX512VL | ISA_AVX512F)
 41490          p.domain = DomainAVX
 41491          p.add(0, func(m *_Encoding, v []interface{}) {
 41492              m.evex(0b01, 0x06, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41493              m.emit(0x7a)
 41494              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 41495          })
 41496      }
 41497      // VCVTUDQ2PD m128/m32bcst, ymm{k}{z}
 41498      if isM128M32bcst(v0) && isYMMkz(v1) {
 41499          self.require(ISA_AVX512VL | ISA_AVX512F)
 41500          p.domain = DomainAVX
 41501          p.add(0, func(m *_Encoding, v []interface{}) {
 41502              m.evex(0b01, 0x06, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41503              m.emit(0x7a)
 41504              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 41505          })
 41506      }
 41507      // VCVTUDQ2PD xmm, xmm{k}{z}
 41508      if isEVEXXMM(v0) && isXMMkz(v1) {
 41509          self.require(ISA_AVX512VL | ISA_AVX512F)
 41510          p.domain = DomainAVX
 41511          p.add(0, func(m *_Encoding, v []interface{}) {
 41512              m.emit(0x62)
 41513              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41514              m.emit(0x7e)
 41515              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 41516              m.emit(0x7a)
 41517              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41518          })
 41519      }
 41520      // VCVTUDQ2PD xmm, ymm{k}{z}
 41521      if isEVEXXMM(v0) && isYMMkz(v1) {
 41522          self.require(ISA_AVX512VL | ISA_AVX512F)
 41523          p.domain = DomainAVX
 41524          p.add(0, func(m *_Encoding, v []interface{}) {
 41525              m.emit(0x62)
 41526              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41527              m.emit(0x7e)
 41528              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 41529              m.emit(0x7a)
 41530              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41531          })
 41532      }
 41533      if p.len == 0 {
 41534          panic("invalid operands for VCVTUDQ2PD")
 41535      }
 41536      return p
 41537  }
 41538  
 41539  // VCVTUDQ2PS performs "Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values".
 41540  //
 41541  // Mnemonic        : VCVTUDQ2PS
 41542  // Supported forms : (7 forms)
 41543  //
 41544  //    * VCVTUDQ2PS m512/m32bcst, zmm{k}{z}    [AVX512F]
 41545  //    * VCVTUDQ2PS {er}, zmm, zmm{k}{z}       [AVX512F]
 41546  //    * VCVTUDQ2PS zmm, zmm{k}{z}             [AVX512F]
 41547  //    * VCVTUDQ2PS m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 41548  //    * VCVTUDQ2PS m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 41549  //    * VCVTUDQ2PS xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 41550  //    * VCVTUDQ2PS ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 41551  //
 41552  func (self *Program) VCVTUDQ2PS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 41553      var p *Instruction
 41554      switch len(vv) {
 41555          case 0  : p = self.alloc("VCVTUDQ2PS", 2, Operands { v0, v1 })
 41556          case 1  : p = self.alloc("VCVTUDQ2PS", 3, Operands { v0, v1, vv[0] })
 41557          default : panic("instruction VCVTUDQ2PS takes 2 or 3 operands")
 41558      }
 41559      // VCVTUDQ2PS m512/m32bcst, zmm{k}{z}
 41560      if len(vv) == 0 && isM512M32bcst(v0) && isZMMkz(v1) {
 41561          self.require(ISA_AVX512F)
 41562          p.domain = DomainAVX
 41563          p.add(0, func(m *_Encoding, v []interface{}) {
 41564              m.evex(0b01, 0x07, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41565              m.emit(0x7a)
 41566              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 41567          })
 41568      }
 41569      // VCVTUDQ2PS {er}, zmm, zmm{k}{z}
 41570      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 41571          self.require(ISA_AVX512F)
 41572          p.domain = DomainAVX
 41573          p.add(0, func(m *_Encoding, v []interface{}) {
 41574              m.emit(0x62)
 41575              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 41576              m.emit(0x7f)
 41577              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 41578              m.emit(0x7a)
 41579              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 41580          })
 41581      }
 41582      // VCVTUDQ2PS zmm, zmm{k}{z}
 41583      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 41584          self.require(ISA_AVX512F)
 41585          p.domain = DomainAVX
 41586          p.add(0, func(m *_Encoding, v []interface{}) {
 41587              m.emit(0x62)
 41588              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41589              m.emit(0x7f)
 41590              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 41591              m.emit(0x7a)
 41592              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41593          })
 41594      }
 41595      // VCVTUDQ2PS m128/m32bcst, xmm{k}{z}
 41596      if len(vv) == 0 && isM128M32bcst(v0) && isXMMkz(v1) {
 41597          self.require(ISA_AVX512VL | ISA_AVX512F)
 41598          p.domain = DomainAVX
 41599          p.add(0, func(m *_Encoding, v []interface{}) {
 41600              m.evex(0b01, 0x07, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41601              m.emit(0x7a)
 41602              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 41603          })
 41604      }
 41605      // VCVTUDQ2PS m256/m32bcst, ymm{k}{z}
 41606      if len(vv) == 0 && isM256M32bcst(v0) && isYMMkz(v1) {
 41607          self.require(ISA_AVX512VL | ISA_AVX512F)
 41608          p.domain = DomainAVX
 41609          p.add(0, func(m *_Encoding, v []interface{}) {
 41610              m.evex(0b01, 0x07, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41611              m.emit(0x7a)
 41612              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 41613          })
 41614      }
 41615      // VCVTUDQ2PS xmm, xmm{k}{z}
 41616      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 41617          self.require(ISA_AVX512VL | ISA_AVX512F)
 41618          p.domain = DomainAVX
 41619          p.add(0, func(m *_Encoding, v []interface{}) {
 41620              m.emit(0x62)
 41621              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41622              m.emit(0x7f)
 41623              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 41624              m.emit(0x7a)
 41625              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41626          })
 41627      }
 41628      // VCVTUDQ2PS ymm, ymm{k}{z}
 41629      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 41630          self.require(ISA_AVX512VL | ISA_AVX512F)
 41631          p.domain = DomainAVX
 41632          p.add(0, func(m *_Encoding, v []interface{}) {
 41633              m.emit(0x62)
 41634              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41635              m.emit(0x7f)
 41636              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 41637              m.emit(0x7a)
 41638              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41639          })
 41640      }
 41641      if p.len == 0 {
 41642          panic("invalid operands for VCVTUDQ2PS")
 41643      }
 41644      return p
 41645  }
 41646  
 41647  // VCVTUQQ2PD performs "Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values".
 41648  //
 41649  // Mnemonic        : VCVTUQQ2PD
 41650  // Supported forms : (7 forms)
 41651  //
 41652  //    * VCVTUQQ2PD m512/m64bcst, zmm{k}{z}    [AVX512DQ]
 41653  //    * VCVTUQQ2PD {er}, zmm, zmm{k}{z}       [AVX512DQ]
 41654  //    * VCVTUQQ2PD zmm, zmm{k}{z}             [AVX512DQ]
 41655  //    * VCVTUQQ2PD m128/m64bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 41656  //    * VCVTUQQ2PD m256/m64bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 41657  //    * VCVTUQQ2PD xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 41658  //    * VCVTUQQ2PD ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 41659  //
 41660  func (self *Program) VCVTUQQ2PD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 41661      var p *Instruction
 41662      switch len(vv) {
 41663          case 0  : p = self.alloc("VCVTUQQ2PD", 2, Operands { v0, v1 })
 41664          case 1  : p = self.alloc("VCVTUQQ2PD", 3, Operands { v0, v1, vv[0] })
 41665          default : panic("instruction VCVTUQQ2PD takes 2 or 3 operands")
 41666      }
 41667      // VCVTUQQ2PD m512/m64bcst, zmm{k}{z}
 41668      if len(vv) == 0 && isM512M64bcst(v0) && isZMMkz(v1) {
 41669          self.require(ISA_AVX512DQ)
 41670          p.domain = DomainAVX
 41671          p.add(0, func(m *_Encoding, v []interface{}) {
 41672              m.evex(0b01, 0x86, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41673              m.emit(0x7a)
 41674              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 41675          })
 41676      }
 41677      // VCVTUQQ2PD {er}, zmm, zmm{k}{z}
 41678      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 41679          self.require(ISA_AVX512DQ)
 41680          p.domain = DomainAVX
 41681          p.add(0, func(m *_Encoding, v []interface{}) {
 41682              m.emit(0x62)
 41683              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 41684              m.emit(0xfe)
 41685              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 41686              m.emit(0x7a)
 41687              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 41688          })
 41689      }
 41690      // VCVTUQQ2PD zmm, zmm{k}{z}
 41691      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 41692          self.require(ISA_AVX512DQ)
 41693          p.domain = DomainAVX
 41694          p.add(0, func(m *_Encoding, v []interface{}) {
 41695              m.emit(0x62)
 41696              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41697              m.emit(0xfe)
 41698              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 41699              m.emit(0x7a)
 41700              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41701          })
 41702      }
 41703      // VCVTUQQ2PD m128/m64bcst, xmm{k}{z}
 41704      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 41705          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 41706          p.domain = DomainAVX
 41707          p.add(0, func(m *_Encoding, v []interface{}) {
 41708              m.evex(0b01, 0x86, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41709              m.emit(0x7a)
 41710              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 41711          })
 41712      }
 41713      // VCVTUQQ2PD m256/m64bcst, ymm{k}{z}
 41714      if len(vv) == 0 && isM256M64bcst(v0) && isYMMkz(v1) {
 41715          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 41716          p.domain = DomainAVX
 41717          p.add(0, func(m *_Encoding, v []interface{}) {
 41718              m.evex(0b01, 0x86, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41719              m.emit(0x7a)
 41720              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 41721          })
 41722      }
 41723      // VCVTUQQ2PD xmm, xmm{k}{z}
 41724      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 41725          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 41726          p.domain = DomainAVX
 41727          p.add(0, func(m *_Encoding, v []interface{}) {
 41728              m.emit(0x62)
 41729              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41730              m.emit(0xfe)
 41731              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 41732              m.emit(0x7a)
 41733              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41734          })
 41735      }
 41736      // VCVTUQQ2PD ymm, ymm{k}{z}
 41737      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 41738          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 41739          p.domain = DomainAVX
 41740          p.add(0, func(m *_Encoding, v []interface{}) {
 41741              m.emit(0x62)
 41742              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41743              m.emit(0xfe)
 41744              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 41745              m.emit(0x7a)
 41746              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41747          })
 41748      }
 41749      if p.len == 0 {
 41750          panic("invalid operands for VCVTUQQ2PD")
 41751      }
 41752      return p
 41753  }
 41754  
 41755  // VCVTUQQ2PS performs "Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values".
 41756  //
 41757  // Mnemonic        : VCVTUQQ2PS
 41758  // Supported forms : (7 forms)
 41759  //
 41760  //    * VCVTUQQ2PS m512/m64bcst, ymm{k}{z}    [AVX512DQ]
 41761  //    * VCVTUQQ2PS {er}, zmm, ymm{k}{z}       [AVX512DQ]
 41762  //    * VCVTUQQ2PS zmm, ymm{k}{z}             [AVX512DQ]
 41763  //    * VCVTUQQ2PS m128/m64bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 41764  //    * VCVTUQQ2PS m256/m64bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 41765  //    * VCVTUQQ2PS xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 41766  //    * VCVTUQQ2PS ymm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 41767  //
 41768  func (self *Program) VCVTUQQ2PS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 41769      var p *Instruction
 41770      switch len(vv) {
 41771          case 0  : p = self.alloc("VCVTUQQ2PS", 2, Operands { v0, v1 })
 41772          case 1  : p = self.alloc("VCVTUQQ2PS", 3, Operands { v0, v1, vv[0] })
 41773          default : panic("instruction VCVTUQQ2PS takes 2 or 3 operands")
 41774      }
 41775      // VCVTUQQ2PS m512/m64bcst, ymm{k}{z}
 41776      if len(vv) == 0 && isM512M64bcst(v0) && isYMMkz(v1) {
 41777          self.require(ISA_AVX512DQ)
 41778          p.domain = DomainAVX
 41779          p.add(0, func(m *_Encoding, v []interface{}) {
 41780              m.evex(0b01, 0x87, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41781              m.emit(0x7a)
 41782              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 41783          })
 41784      }
 41785      // VCVTUQQ2PS {er}, zmm, ymm{k}{z}
 41786      if len(vv) == 1 && isER(v0) && isZMM(v1) && isYMMkz(vv[0]) {
 41787          self.require(ISA_AVX512DQ)
 41788          p.domain = DomainAVX
 41789          p.add(0, func(m *_Encoding, v []interface{}) {
 41790              m.emit(0x62)
 41791              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 41792              m.emit(0xff)
 41793              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 41794              m.emit(0x7a)
 41795              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 41796          })
 41797      }
 41798      // VCVTUQQ2PS zmm, ymm{k}{z}
 41799      if len(vv) == 0 && isZMM(v0) && isYMMkz(v1) {
 41800          self.require(ISA_AVX512DQ)
 41801          p.domain = DomainAVX
 41802          p.add(0, func(m *_Encoding, v []interface{}) {
 41803              m.emit(0x62)
 41804              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41805              m.emit(0xff)
 41806              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 41807              m.emit(0x7a)
 41808              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41809          })
 41810      }
 41811      // VCVTUQQ2PS m128/m64bcst, xmm{k}{z}
 41812      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 41813          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 41814          p.domain = DomainAVX
 41815          p.add(0, func(m *_Encoding, v []interface{}) {
 41816              m.evex(0b01, 0x87, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41817              m.emit(0x7a)
 41818              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 41819          })
 41820      }
 41821      // VCVTUQQ2PS m256/m64bcst, xmm{k}{z}
 41822      if len(vv) == 0 && isM256M64bcst(v0) && isXMMkz(v1) {
 41823          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 41824          p.domain = DomainAVX
 41825          p.add(0, func(m *_Encoding, v []interface{}) {
 41826              m.evex(0b01, 0x87, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 41827              m.emit(0x7a)
 41828              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 41829          })
 41830      }
 41831      // VCVTUQQ2PS xmm, xmm{k}{z}
 41832      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 41833          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 41834          p.domain = DomainAVX
 41835          p.add(0, func(m *_Encoding, v []interface{}) {
 41836              m.emit(0x62)
 41837              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41838              m.emit(0xff)
 41839              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 41840              m.emit(0x7a)
 41841              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41842          })
 41843      }
 41844      // VCVTUQQ2PS ymm, xmm{k}{z}
 41845      if len(vv) == 0 && isEVEXYMM(v0) && isXMMkz(v1) {
 41846          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 41847          p.domain = DomainAVX
 41848          p.add(0, func(m *_Encoding, v []interface{}) {
 41849              m.emit(0x62)
 41850              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 41851              m.emit(0xff)
 41852              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 41853              m.emit(0x7a)
 41854              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 41855          })
 41856      }
 41857      if p.len == 0 {
 41858          panic("invalid operands for VCVTUQQ2PS")
 41859      }
 41860      return p
 41861  }
 41862  
 41863  // VCVTUSI2SD performs "Convert Unsigned Integer to Scalar Double-Precision Floating-Point Value".
 41864  //
 41865  // Mnemonic        : VCVTUSI2SD
 41866  // Supported forms : (5 forms)
 41867  //
 41868  //    * VCVTUSI2SD r32, xmm, xmm          [AVX512F]
 41869  //    * VCVTUSI2SD m32, xmm, xmm          [AVX512F]
 41870  //    * VCVTUSI2SD m64, xmm, xmm          [AVX512F]
 41871  //    * VCVTUSI2SD {er}, r64, xmm, xmm    [AVX512F]
 41872  //    * VCVTUSI2SD r64, xmm, xmm          [AVX512F]
 41873  //
 41874  func (self *Program) VCVTUSI2SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 41875      var p *Instruction
 41876      switch len(vv) {
 41877          case 0  : p = self.alloc("VCVTUSI2SD", 3, Operands { v0, v1, v2 })
 41878          case 1  : p = self.alloc("VCVTUSI2SD", 4, Operands { v0, v1, v2, vv[0] })
 41879          default : panic("instruction VCVTUSI2SD takes 3 or 4 operands")
 41880      }
 41881      // VCVTUSI2SD r32, xmm, xmm
 41882      if len(vv) == 0 && isReg32(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 41883          self.require(ISA_AVX512F)
 41884          p.domain = DomainAVX
 41885          p.add(0, func(m *_Encoding, v []interface{}) {
 41886              m.emit(0x62)
 41887              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 41888              m.emit(0x7f ^ (hlcode(v[1]) << 3))
 41889              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x00)
 41890              m.emit(0x7b)
 41891              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 41892          })
 41893      }
 41894      // VCVTUSI2SD m32, xmm, xmm
 41895      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 41896          self.require(ISA_AVX512F)
 41897          p.domain = DomainAVX
 41898          p.add(0, func(m *_Encoding, v []interface{}) {
 41899              m.evex(0b01, 0x07, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 41900              m.emit(0x7b)
 41901              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 41902          })
 41903      }
 41904      // VCVTUSI2SD m64, xmm, xmm
 41905      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 41906          self.require(ISA_AVX512F)
 41907          p.domain = DomainAVX
 41908          p.add(0, func(m *_Encoding, v []interface{}) {
 41909              m.evex(0b01, 0x87, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 41910              m.emit(0x7b)
 41911              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 41912          })
 41913      }
 41914      // VCVTUSI2SD {er}, r64, xmm, xmm
 41915      if len(vv) == 1 && isER(v0) && isReg64(v1) && isEVEXXMM(v2) && isEVEXXMM(vv[0]) {
 41916          self.require(ISA_AVX512F)
 41917          p.domain = DomainAVX
 41918          p.add(0, func(m *_Encoding, v []interface{}) {
 41919              m.emit(0x62)
 41920              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 41921              m.emit(0xff ^ (hlcode(v[2]) << 3))
 41922              m.emit((vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | 0x10)
 41923              m.emit(0x7b)
 41924              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 41925          })
 41926      }
 41927      // VCVTUSI2SD r64, xmm, xmm
 41928      if len(vv) == 0 && isReg64(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 41929          self.require(ISA_AVX512F)
 41930          p.domain = DomainAVX
 41931          p.add(0, func(m *_Encoding, v []interface{}) {
 41932              m.emit(0x62)
 41933              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 41934              m.emit(0xff ^ (hlcode(v[1]) << 3))
 41935              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x40)
 41936              m.emit(0x7b)
 41937              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 41938          })
 41939      }
 41940      if p.len == 0 {
 41941          panic("invalid operands for VCVTUSI2SD")
 41942      }
 41943      return p
 41944  }
 41945  
 41946  // VCVTUSI2SS performs "Convert Unsigned Integer to Scalar Single-Precision Floating-Point Value".
 41947  //
 41948  // Mnemonic        : VCVTUSI2SS
 41949  // Supported forms : (6 forms)
 41950  //
 41951  //    * VCVTUSI2SS m32, xmm, xmm          [AVX512F]
 41952  //    * VCVTUSI2SS m64, xmm, xmm          [AVX512F]
 41953  //    * VCVTUSI2SS {er}, r32, xmm, xmm    [AVX512F]
 41954  //    * VCVTUSI2SS {er}, r64, xmm, xmm    [AVX512F]
 41955  //    * VCVTUSI2SS r32, xmm, xmm          [AVX512F]
 41956  //    * VCVTUSI2SS r64, xmm, xmm          [AVX512F]
 41957  //
 41958  func (self *Program) VCVTUSI2SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 41959      var p *Instruction
 41960      switch len(vv) {
 41961          case 0  : p = self.alloc("VCVTUSI2SS", 3, Operands { v0, v1, v2 })
 41962          case 1  : p = self.alloc("VCVTUSI2SS", 4, Operands { v0, v1, v2, vv[0] })
 41963          default : panic("instruction VCVTUSI2SS takes 3 or 4 operands")
 41964      }
 41965      // VCVTUSI2SS m32, xmm, xmm
 41966      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 41967          self.require(ISA_AVX512F)
 41968          p.domain = DomainAVX
 41969          p.add(0, func(m *_Encoding, v []interface{}) {
 41970              m.evex(0b01, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 41971              m.emit(0x7b)
 41972              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 41973          })
 41974      }
 41975      // VCVTUSI2SS m64, xmm, xmm
 41976      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 41977          self.require(ISA_AVX512F)
 41978          p.domain = DomainAVX
 41979          p.add(0, func(m *_Encoding, v []interface{}) {
 41980              m.evex(0b01, 0x86, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 41981              m.emit(0x7b)
 41982              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 41983          })
 41984      }
 41985      // VCVTUSI2SS {er}, r32, xmm, xmm
 41986      if len(vv) == 1 && isER(v0) && isReg32(v1) && isEVEXXMM(v2) && isEVEXXMM(vv[0]) {
 41987          self.require(ISA_AVX512F)
 41988          p.domain = DomainAVX
 41989          p.add(0, func(m *_Encoding, v []interface{}) {
 41990              m.emit(0x62)
 41991              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 41992              m.emit(0x7e ^ (hlcode(v[2]) << 3))
 41993              m.emit((vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | 0x10)
 41994              m.emit(0x7b)
 41995              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 41996          })
 41997      }
 41998      // VCVTUSI2SS {er}, r64, xmm, xmm
 41999      if len(vv) == 1 && isER(v0) && isReg64(v1) && isEVEXXMM(v2) && isEVEXXMM(vv[0]) {
 42000          self.require(ISA_AVX512F)
 42001          p.domain = DomainAVX
 42002          p.add(0, func(m *_Encoding, v []interface{}) {
 42003              m.emit(0x62)
 42004              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 42005              m.emit(0xfe ^ (hlcode(v[2]) << 3))
 42006              m.emit((vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | 0x10)
 42007              m.emit(0x7b)
 42008              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 42009          })
 42010      }
 42011      // VCVTUSI2SS r32, xmm, xmm
 42012      if len(vv) == 0 && isReg32(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 42013          self.require(ISA_AVX512F)
 42014          p.domain = DomainAVX
 42015          p.add(0, func(m *_Encoding, v []interface{}) {
 42016              m.emit(0x62)
 42017              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 42018              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 42019              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x40)
 42020              m.emit(0x7b)
 42021              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42022          })
 42023      }
 42024      // VCVTUSI2SS r64, xmm, xmm
 42025      if len(vv) == 0 && isReg64(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 42026          self.require(ISA_AVX512F)
 42027          p.domain = DomainAVX
 42028          p.add(0, func(m *_Encoding, v []interface{}) {
 42029              m.emit(0x62)
 42030              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 42031              m.emit(0xfe ^ (hlcode(v[1]) << 3))
 42032              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x40)
 42033              m.emit(0x7b)
 42034              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42035          })
 42036      }
 42037      if p.len == 0 {
 42038          panic("invalid operands for VCVTUSI2SS")
 42039      }
 42040      return p
 42041  }
 42042  
 42043  // VDBPSADBW performs "Double Block Packed Sum-Absolute-Differences on Unsigned Bytes".
 42044  //
 42045  // Mnemonic        : VDBPSADBW
 42046  // Supported forms : (6 forms)
 42047  //
 42048  //    * VDBPSADBW imm8, zmm, zmm, zmm{k}{z}     [AVX512BW]
 42049  //    * VDBPSADBW imm8, m512, zmm, zmm{k}{z}    [AVX512BW]
 42050  //    * VDBPSADBW imm8, xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 42051  //    * VDBPSADBW imm8, m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 42052  //    * VDBPSADBW imm8, ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 42053  //    * VDBPSADBW imm8, m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 42054  //
 42055  func (self *Program) VDBPSADBW(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 42056      p := self.alloc("VDBPSADBW", 4, Operands { v0, v1, v2, v3 })
 42057      // VDBPSADBW imm8, zmm, zmm, zmm{k}{z}
 42058      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 42059          self.require(ISA_AVX512BW)
 42060          p.domain = DomainAVX
 42061          p.add(0, func(m *_Encoding, v []interface{}) {
 42062              m.emit(0x62)
 42063              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 42064              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 42065              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 42066              m.emit(0x42)
 42067              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 42068              m.imm1(toImmAny(v[0]))
 42069          })
 42070      }
 42071      // VDBPSADBW imm8, m512, zmm, zmm{k}{z}
 42072      if isImm8(v0) && isM512(v1) && isZMM(v2) && isZMMkz(v3) {
 42073          self.require(ISA_AVX512BW)
 42074          p.domain = DomainAVX
 42075          p.add(0, func(m *_Encoding, v []interface{}) {
 42076              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 42077              m.emit(0x42)
 42078              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 42079              m.imm1(toImmAny(v[0]))
 42080          })
 42081      }
 42082      // VDBPSADBW imm8, xmm, xmm, xmm{k}{z}
 42083      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 42084          self.require(ISA_AVX512VL | ISA_AVX512BW)
 42085          p.domain = DomainAVX
 42086          p.add(0, func(m *_Encoding, v []interface{}) {
 42087              m.emit(0x62)
 42088              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 42089              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 42090              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 42091              m.emit(0x42)
 42092              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 42093              m.imm1(toImmAny(v[0]))
 42094          })
 42095      }
 42096      // VDBPSADBW imm8, m128, xmm, xmm{k}{z}
 42097      if isImm8(v0) && isM128(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 42098          self.require(ISA_AVX512VL | ISA_AVX512BW)
 42099          p.domain = DomainAVX
 42100          p.add(0, func(m *_Encoding, v []interface{}) {
 42101              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 42102              m.emit(0x42)
 42103              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 42104              m.imm1(toImmAny(v[0]))
 42105          })
 42106      }
 42107      // VDBPSADBW imm8, ymm, ymm, ymm{k}{z}
 42108      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 42109          self.require(ISA_AVX512VL | ISA_AVX512BW)
 42110          p.domain = DomainAVX
 42111          p.add(0, func(m *_Encoding, v []interface{}) {
 42112              m.emit(0x62)
 42113              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 42114              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 42115              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 42116              m.emit(0x42)
 42117              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 42118              m.imm1(toImmAny(v[0]))
 42119          })
 42120      }
 42121      // VDBPSADBW imm8, m256, ymm, ymm{k}{z}
 42122      if isImm8(v0) && isM256(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 42123          self.require(ISA_AVX512VL | ISA_AVX512BW)
 42124          p.domain = DomainAVX
 42125          p.add(0, func(m *_Encoding, v []interface{}) {
 42126              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 42127              m.emit(0x42)
 42128              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 42129              m.imm1(toImmAny(v[0]))
 42130          })
 42131      }
 42132      if p.len == 0 {
 42133          panic("invalid operands for VDBPSADBW")
 42134      }
 42135      return p
 42136  }
 42137  
 42138  // VDIVPD performs "Divide Packed Double-Precision Floating-Point Values".
 42139  //
 42140  // Mnemonic        : VDIVPD
 42141  // Supported forms : (11 forms)
 42142  //
 42143  //    * VDIVPD xmm, xmm, xmm                   [AVX]
 42144  //    * VDIVPD m128, xmm, xmm                  [AVX]
 42145  //    * VDIVPD ymm, ymm, ymm                   [AVX]
 42146  //    * VDIVPD m256, ymm, ymm                  [AVX]
 42147  //    * VDIVPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 42148  //    * VDIVPD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 42149  //    * VDIVPD zmm, zmm, zmm{k}{z}             [AVX512F]
 42150  //    * VDIVPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 42151  //    * VDIVPD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 42152  //    * VDIVPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 42153  //    * VDIVPD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 42154  //
 42155  func (self *Program) VDIVPD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 42156      var p *Instruction
 42157      switch len(vv) {
 42158          case 0  : p = self.alloc("VDIVPD", 3, Operands { v0, v1, v2 })
 42159          case 1  : p = self.alloc("VDIVPD", 4, Operands { v0, v1, v2, vv[0] })
 42160          default : panic("instruction VDIVPD takes 3 or 4 operands")
 42161      }
 42162      // VDIVPD xmm, xmm, xmm
 42163      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 42164          self.require(ISA_AVX)
 42165          p.domain = DomainAVX
 42166          p.add(0, func(m *_Encoding, v []interface{}) {
 42167              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 42168              m.emit(0x5e)
 42169              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42170          })
 42171      }
 42172      // VDIVPD m128, xmm, xmm
 42173      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 42174          self.require(ISA_AVX)
 42175          p.domain = DomainAVX
 42176          p.add(0, func(m *_Encoding, v []interface{}) {
 42177              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 42178              m.emit(0x5e)
 42179              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 42180          })
 42181      }
 42182      // VDIVPD ymm, ymm, ymm
 42183      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 42184          self.require(ISA_AVX)
 42185          p.domain = DomainAVX
 42186          p.add(0, func(m *_Encoding, v []interface{}) {
 42187              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 42188              m.emit(0x5e)
 42189              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42190          })
 42191      }
 42192      // VDIVPD m256, ymm, ymm
 42193      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 42194          self.require(ISA_AVX)
 42195          p.domain = DomainAVX
 42196          p.add(0, func(m *_Encoding, v []interface{}) {
 42197              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 42198              m.emit(0x5e)
 42199              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 42200          })
 42201      }
 42202      // VDIVPD m512/m64bcst, zmm, zmm{k}{z}
 42203      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 42204          self.require(ISA_AVX512F)
 42205          p.domain = DomainAVX
 42206          p.add(0, func(m *_Encoding, v []interface{}) {
 42207              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 42208              m.emit(0x5e)
 42209              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 42210          })
 42211      }
 42212      // VDIVPD {er}, zmm, zmm, zmm{k}{z}
 42213      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 42214          self.require(ISA_AVX512F)
 42215          p.domain = DomainAVX
 42216          p.add(0, func(m *_Encoding, v []interface{}) {
 42217              m.emit(0x62)
 42218              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 42219              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 42220              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 42221              m.emit(0x5e)
 42222              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 42223          })
 42224      }
 42225      // VDIVPD zmm, zmm, zmm{k}{z}
 42226      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 42227          self.require(ISA_AVX512F)
 42228          p.domain = DomainAVX
 42229          p.add(0, func(m *_Encoding, v []interface{}) {
 42230              m.emit(0x62)
 42231              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 42232              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 42233              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 42234              m.emit(0x5e)
 42235              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42236          })
 42237      }
 42238      // VDIVPD m128/m64bcst, xmm, xmm{k}{z}
 42239      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 42240          self.require(ISA_AVX512VL | ISA_AVX512F)
 42241          p.domain = DomainAVX
 42242          p.add(0, func(m *_Encoding, v []interface{}) {
 42243              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 42244              m.emit(0x5e)
 42245              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 42246          })
 42247      }
 42248      // VDIVPD xmm, xmm, xmm{k}{z}
 42249      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 42250          self.require(ISA_AVX512VL | ISA_AVX512F)
 42251          p.domain = DomainAVX
 42252          p.add(0, func(m *_Encoding, v []interface{}) {
 42253              m.emit(0x62)
 42254              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 42255              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 42256              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 42257              m.emit(0x5e)
 42258              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42259          })
 42260      }
 42261      // VDIVPD m256/m64bcst, ymm, ymm{k}{z}
 42262      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 42263          self.require(ISA_AVX512VL | ISA_AVX512F)
 42264          p.domain = DomainAVX
 42265          p.add(0, func(m *_Encoding, v []interface{}) {
 42266              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 42267              m.emit(0x5e)
 42268              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 42269          })
 42270      }
 42271      // VDIVPD ymm, ymm, ymm{k}{z}
 42272      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 42273          self.require(ISA_AVX512VL | ISA_AVX512F)
 42274          p.domain = DomainAVX
 42275          p.add(0, func(m *_Encoding, v []interface{}) {
 42276              m.emit(0x62)
 42277              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 42278              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 42279              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 42280              m.emit(0x5e)
 42281              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42282          })
 42283      }
 42284      if p.len == 0 {
 42285          panic("invalid operands for VDIVPD")
 42286      }
 42287      return p
 42288  }
 42289  
 42290  // VDIVPS performs "Divide Packed Single-Precision Floating-Point Values".
 42291  //
 42292  // Mnemonic        : VDIVPS
 42293  // Supported forms : (11 forms)
 42294  //
 42295  //    * VDIVPS xmm, xmm, xmm                   [AVX]
 42296  //    * VDIVPS m128, xmm, xmm                  [AVX]
 42297  //    * VDIVPS ymm, ymm, ymm                   [AVX]
 42298  //    * VDIVPS m256, ymm, ymm                  [AVX]
 42299  //    * VDIVPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 42300  //    * VDIVPS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 42301  //    * VDIVPS zmm, zmm, zmm{k}{z}             [AVX512F]
 42302  //    * VDIVPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 42303  //    * VDIVPS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 42304  //    * VDIVPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 42305  //    * VDIVPS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 42306  //
 42307  func (self *Program) VDIVPS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 42308      var p *Instruction
 42309      switch len(vv) {
 42310          case 0  : p = self.alloc("VDIVPS", 3, Operands { v0, v1, v2 })
 42311          case 1  : p = self.alloc("VDIVPS", 4, Operands { v0, v1, v2, vv[0] })
 42312          default : panic("instruction VDIVPS takes 3 or 4 operands")
 42313      }
 42314      // VDIVPS xmm, xmm, xmm
 42315      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 42316          self.require(ISA_AVX)
 42317          p.domain = DomainAVX
 42318          p.add(0, func(m *_Encoding, v []interface{}) {
 42319              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 42320              m.emit(0x5e)
 42321              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42322          })
 42323      }
 42324      // VDIVPS m128, xmm, xmm
 42325      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 42326          self.require(ISA_AVX)
 42327          p.domain = DomainAVX
 42328          p.add(0, func(m *_Encoding, v []interface{}) {
 42329              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 42330              m.emit(0x5e)
 42331              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 42332          })
 42333      }
 42334      // VDIVPS ymm, ymm, ymm
 42335      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 42336          self.require(ISA_AVX)
 42337          p.domain = DomainAVX
 42338          p.add(0, func(m *_Encoding, v []interface{}) {
 42339              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 42340              m.emit(0x5e)
 42341              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42342          })
 42343      }
 42344      // VDIVPS m256, ymm, ymm
 42345      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 42346          self.require(ISA_AVX)
 42347          p.domain = DomainAVX
 42348          p.add(0, func(m *_Encoding, v []interface{}) {
 42349              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 42350              m.emit(0x5e)
 42351              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 42352          })
 42353      }
 42354      // VDIVPS m512/m32bcst, zmm, zmm{k}{z}
 42355      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 42356          self.require(ISA_AVX512F)
 42357          p.domain = DomainAVX
 42358          p.add(0, func(m *_Encoding, v []interface{}) {
 42359              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 42360              m.emit(0x5e)
 42361              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 42362          })
 42363      }
 42364      // VDIVPS {er}, zmm, zmm, zmm{k}{z}
 42365      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 42366          self.require(ISA_AVX512F)
 42367          p.domain = DomainAVX
 42368          p.add(0, func(m *_Encoding, v []interface{}) {
 42369              m.emit(0x62)
 42370              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 42371              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 42372              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 42373              m.emit(0x5e)
 42374              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 42375          })
 42376      }
 42377      // VDIVPS zmm, zmm, zmm{k}{z}
 42378      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 42379          self.require(ISA_AVX512F)
 42380          p.domain = DomainAVX
 42381          p.add(0, func(m *_Encoding, v []interface{}) {
 42382              m.emit(0x62)
 42383              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 42384              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 42385              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 42386              m.emit(0x5e)
 42387              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42388          })
 42389      }
 42390      // VDIVPS m128/m32bcst, xmm, xmm{k}{z}
 42391      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 42392          self.require(ISA_AVX512VL | ISA_AVX512F)
 42393          p.domain = DomainAVX
 42394          p.add(0, func(m *_Encoding, v []interface{}) {
 42395              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 42396              m.emit(0x5e)
 42397              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 42398          })
 42399      }
 42400      // VDIVPS xmm, xmm, xmm{k}{z}
 42401      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 42402          self.require(ISA_AVX512VL | ISA_AVX512F)
 42403          p.domain = DomainAVX
 42404          p.add(0, func(m *_Encoding, v []interface{}) {
 42405              m.emit(0x62)
 42406              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 42407              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 42408              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 42409              m.emit(0x5e)
 42410              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42411          })
 42412      }
 42413      // VDIVPS m256/m32bcst, ymm, ymm{k}{z}
 42414      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 42415          self.require(ISA_AVX512VL | ISA_AVX512F)
 42416          p.domain = DomainAVX
 42417          p.add(0, func(m *_Encoding, v []interface{}) {
 42418              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 42419              m.emit(0x5e)
 42420              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 42421          })
 42422      }
 42423      // VDIVPS ymm, ymm, ymm{k}{z}
 42424      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 42425          self.require(ISA_AVX512VL | ISA_AVX512F)
 42426          p.domain = DomainAVX
 42427          p.add(0, func(m *_Encoding, v []interface{}) {
 42428              m.emit(0x62)
 42429              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 42430              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 42431              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 42432              m.emit(0x5e)
 42433              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42434          })
 42435      }
 42436      if p.len == 0 {
 42437          panic("invalid operands for VDIVPS")
 42438      }
 42439      return p
 42440  }
 42441  
 42442  // VDIVSD performs "Divide Scalar Double-Precision Floating-Point Values".
 42443  //
 42444  // Mnemonic        : VDIVSD
 42445  // Supported forms : (5 forms)
 42446  //
 42447  //    * VDIVSD xmm, xmm, xmm                [AVX]
 42448  //    * VDIVSD m64, xmm, xmm                [AVX]
 42449  //    * VDIVSD m64, xmm, xmm{k}{z}          [AVX512F]
 42450  //    * VDIVSD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 42451  //    * VDIVSD xmm, xmm, xmm{k}{z}          [AVX512F]
 42452  //
 42453  func (self *Program) VDIVSD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 42454      var p *Instruction
 42455      switch len(vv) {
 42456          case 0  : p = self.alloc("VDIVSD", 3, Operands { v0, v1, v2 })
 42457          case 1  : p = self.alloc("VDIVSD", 4, Operands { v0, v1, v2, vv[0] })
 42458          default : panic("instruction VDIVSD takes 3 or 4 operands")
 42459      }
 42460      // VDIVSD xmm, xmm, xmm
 42461      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 42462          self.require(ISA_AVX)
 42463          p.domain = DomainAVX
 42464          p.add(0, func(m *_Encoding, v []interface{}) {
 42465              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 42466              m.emit(0x5e)
 42467              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42468          })
 42469      }
 42470      // VDIVSD m64, xmm, xmm
 42471      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 42472          self.require(ISA_AVX)
 42473          p.domain = DomainAVX
 42474          p.add(0, func(m *_Encoding, v []interface{}) {
 42475              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 42476              m.emit(0x5e)
 42477              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 42478          })
 42479      }
 42480      // VDIVSD m64, xmm, xmm{k}{z}
 42481      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 42482          self.require(ISA_AVX512F)
 42483          p.domain = DomainAVX
 42484          p.add(0, func(m *_Encoding, v []interface{}) {
 42485              m.evex(0b01, 0x87, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 42486              m.emit(0x5e)
 42487              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 42488          })
 42489      }
 42490      // VDIVSD {er}, xmm, xmm, xmm{k}{z}
 42491      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 42492          self.require(ISA_AVX512F)
 42493          p.domain = DomainAVX
 42494          p.add(0, func(m *_Encoding, v []interface{}) {
 42495              m.emit(0x62)
 42496              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 42497              m.emit(0xff ^ (hlcode(v[2]) << 3))
 42498              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 42499              m.emit(0x5e)
 42500              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 42501          })
 42502      }
 42503      // VDIVSD xmm, xmm, xmm{k}{z}
 42504      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 42505          self.require(ISA_AVX512F)
 42506          p.domain = DomainAVX
 42507          p.add(0, func(m *_Encoding, v []interface{}) {
 42508              m.emit(0x62)
 42509              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 42510              m.emit(0xff ^ (hlcode(v[1]) << 3))
 42511              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 42512              m.emit(0x5e)
 42513              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42514          })
 42515      }
 42516      if p.len == 0 {
 42517          panic("invalid operands for VDIVSD")
 42518      }
 42519      return p
 42520  }
 42521  
 42522  // VDIVSS performs "Divide Scalar Single-Precision Floating-Point Values".
 42523  //
 42524  // Mnemonic        : VDIVSS
 42525  // Supported forms : (5 forms)
 42526  //
 42527  //    * VDIVSS xmm, xmm, xmm                [AVX]
 42528  //    * VDIVSS m32, xmm, xmm                [AVX]
 42529  //    * VDIVSS m32, xmm, xmm{k}{z}          [AVX512F]
 42530  //    * VDIVSS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 42531  //    * VDIVSS xmm, xmm, xmm{k}{z}          [AVX512F]
 42532  //
 42533  func (self *Program) VDIVSS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 42534      var p *Instruction
 42535      switch len(vv) {
 42536          case 0  : p = self.alloc("VDIVSS", 3, Operands { v0, v1, v2 })
 42537          case 1  : p = self.alloc("VDIVSS", 4, Operands { v0, v1, v2, vv[0] })
 42538          default : panic("instruction VDIVSS takes 3 or 4 operands")
 42539      }
 42540      // VDIVSS xmm, xmm, xmm
 42541      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 42542          self.require(ISA_AVX)
 42543          p.domain = DomainAVX
 42544          p.add(0, func(m *_Encoding, v []interface{}) {
 42545              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 42546              m.emit(0x5e)
 42547              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42548          })
 42549      }
 42550      // VDIVSS m32, xmm, xmm
 42551      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 42552          self.require(ISA_AVX)
 42553          p.domain = DomainAVX
 42554          p.add(0, func(m *_Encoding, v []interface{}) {
 42555              m.vex2(2, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 42556              m.emit(0x5e)
 42557              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 42558          })
 42559      }
 42560      // VDIVSS m32, xmm, xmm{k}{z}
 42561      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 42562          self.require(ISA_AVX512F)
 42563          p.domain = DomainAVX
 42564          p.add(0, func(m *_Encoding, v []interface{}) {
 42565              m.evex(0b01, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 42566              m.emit(0x5e)
 42567              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 42568          })
 42569      }
 42570      // VDIVSS {er}, xmm, xmm, xmm{k}{z}
 42571      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 42572          self.require(ISA_AVX512F)
 42573          p.domain = DomainAVX
 42574          p.add(0, func(m *_Encoding, v []interface{}) {
 42575              m.emit(0x62)
 42576              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 42577              m.emit(0x7e ^ (hlcode(v[2]) << 3))
 42578              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 42579              m.emit(0x5e)
 42580              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 42581          })
 42582      }
 42583      // VDIVSS xmm, xmm, xmm{k}{z}
 42584      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 42585          self.require(ISA_AVX512F)
 42586          p.domain = DomainAVX
 42587          p.add(0, func(m *_Encoding, v []interface{}) {
 42588              m.emit(0x62)
 42589              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 42590              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 42591              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 42592              m.emit(0x5e)
 42593              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 42594          })
 42595      }
 42596      if p.len == 0 {
 42597          panic("invalid operands for VDIVSS")
 42598      }
 42599      return p
 42600  }
 42601  
 42602  // VDPPD performs "Dot Product of Packed Double Precision Floating-Point Values".
 42603  //
 42604  // Mnemonic        : VDPPD
 42605  // Supported forms : (2 forms)
 42606  //
 42607  //    * VDPPD imm8, xmm, xmm, xmm     [AVX]
 42608  //    * VDPPD imm8, m128, xmm, xmm    [AVX]
 42609  //
 42610  func (self *Program) VDPPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 42611      p := self.alloc("VDPPD", 4, Operands { v0, v1, v2, v3 })
 42612      // VDPPD imm8, xmm, xmm, xmm
 42613      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 42614          self.require(ISA_AVX)
 42615          p.domain = DomainAVX
 42616          p.add(0, func(m *_Encoding, v []interface{}) {
 42617              m.emit(0xc4)
 42618              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 42619              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 42620              m.emit(0x41)
 42621              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 42622              m.imm1(toImmAny(v[0]))
 42623          })
 42624      }
 42625      // VDPPD imm8, m128, xmm, xmm
 42626      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 42627          self.require(ISA_AVX)
 42628          p.domain = DomainAVX
 42629          p.add(0, func(m *_Encoding, v []interface{}) {
 42630              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 42631              m.emit(0x41)
 42632              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 42633              m.imm1(toImmAny(v[0]))
 42634          })
 42635      }
 42636      if p.len == 0 {
 42637          panic("invalid operands for VDPPD")
 42638      }
 42639      return p
 42640  }
 42641  
 42642  // VDPPS performs "Dot Product of Packed Single Precision Floating-Point Values".
 42643  //
 42644  // Mnemonic        : VDPPS
 42645  // Supported forms : (4 forms)
 42646  //
 42647  //    * VDPPS imm8, xmm, xmm, xmm     [AVX]
 42648  //    * VDPPS imm8, m128, xmm, xmm    [AVX]
 42649  //    * VDPPS imm8, ymm, ymm, ymm     [AVX]
 42650  //    * VDPPS imm8, m256, ymm, ymm    [AVX]
 42651  //
 42652  func (self *Program) VDPPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 42653      p := self.alloc("VDPPS", 4, Operands { v0, v1, v2, v3 })
 42654      // VDPPS imm8, xmm, xmm, xmm
 42655      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 42656          self.require(ISA_AVX)
 42657          p.domain = DomainAVX
 42658          p.add(0, func(m *_Encoding, v []interface{}) {
 42659              m.emit(0xc4)
 42660              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 42661              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 42662              m.emit(0x40)
 42663              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 42664              m.imm1(toImmAny(v[0]))
 42665          })
 42666      }
 42667      // VDPPS imm8, m128, xmm, xmm
 42668      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 42669          self.require(ISA_AVX)
 42670          p.domain = DomainAVX
 42671          p.add(0, func(m *_Encoding, v []interface{}) {
 42672              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 42673              m.emit(0x40)
 42674              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 42675              m.imm1(toImmAny(v[0]))
 42676          })
 42677      }
 42678      // VDPPS imm8, ymm, ymm, ymm
 42679      if isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 42680          self.require(ISA_AVX)
 42681          p.domain = DomainAVX
 42682          p.add(0, func(m *_Encoding, v []interface{}) {
 42683              m.emit(0xc4)
 42684              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 42685              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 42686              m.emit(0x40)
 42687              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 42688              m.imm1(toImmAny(v[0]))
 42689          })
 42690      }
 42691      // VDPPS imm8, m256, ymm, ymm
 42692      if isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 42693          self.require(ISA_AVX)
 42694          p.domain = DomainAVX
 42695          p.add(0, func(m *_Encoding, v []interface{}) {
 42696              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 42697              m.emit(0x40)
 42698              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 42699              m.imm1(toImmAny(v[0]))
 42700          })
 42701      }
 42702      if p.len == 0 {
 42703          panic("invalid operands for VDPPS")
 42704      }
 42705      return p
 42706  }
 42707  
 42708  // VEXP2PD performs "Approximation to the Exponential 2^x of Packed Double-Precision Floating-Point Values with Less Than 2^-23 Relative Error".
 42709  //
 42710  // Mnemonic        : VEXP2PD
 42711  // Supported forms : (3 forms)
 42712  //
 42713  //    * VEXP2PD m512/m64bcst, zmm{k}{z}    [AVX512ER]
 42714  //    * VEXP2PD {sae}, zmm, zmm{k}{z}      [AVX512ER]
 42715  //    * VEXP2PD zmm, zmm{k}{z}             [AVX512ER]
 42716  //
 42717  func (self *Program) VEXP2PD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 42718      var p *Instruction
 42719      switch len(vv) {
 42720          case 0  : p = self.alloc("VEXP2PD", 2, Operands { v0, v1 })
 42721          case 1  : p = self.alloc("VEXP2PD", 3, Operands { v0, v1, vv[0] })
 42722          default : panic("instruction VEXP2PD takes 2 or 3 operands")
 42723      }
 42724      // VEXP2PD m512/m64bcst, zmm{k}{z}
 42725      if len(vv) == 0 && isM512M64bcst(v0) && isZMMkz(v1) {
 42726          self.require(ISA_AVX512ER)
 42727          p.domain = DomainAVX
 42728          p.add(0, func(m *_Encoding, v []interface{}) {
 42729              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 42730              m.emit(0xc8)
 42731              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 42732          })
 42733      }
 42734      // VEXP2PD {sae}, zmm, zmm{k}{z}
 42735      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 42736          self.require(ISA_AVX512ER)
 42737          p.domain = DomainAVX
 42738          p.add(0, func(m *_Encoding, v []interface{}) {
 42739              m.emit(0x62)
 42740              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 42741              m.emit(0xfd)
 42742              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 42743              m.emit(0xc8)
 42744              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 42745          })
 42746      }
 42747      // VEXP2PD zmm, zmm{k}{z}
 42748      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 42749          self.require(ISA_AVX512ER)
 42750          p.domain = DomainAVX
 42751          p.add(0, func(m *_Encoding, v []interface{}) {
 42752              m.emit(0x62)
 42753              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 42754              m.emit(0xfd)
 42755              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 42756              m.emit(0xc8)
 42757              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 42758          })
 42759      }
 42760      if p.len == 0 {
 42761          panic("invalid operands for VEXP2PD")
 42762      }
 42763      return p
 42764  }
 42765  
 42766  // VEXP2PS performs "Approximation to the Exponential 2^x of Packed Single-Precision Floating-Point Values with Less Than 2^-23 Relative Error".
 42767  //
 42768  // Mnemonic        : VEXP2PS
 42769  // Supported forms : (3 forms)
 42770  //
 42771  //    * VEXP2PS m512/m32bcst, zmm{k}{z}    [AVX512ER]
 42772  //    * VEXP2PS {sae}, zmm, zmm{k}{z}      [AVX512ER]
 42773  //    * VEXP2PS zmm, zmm{k}{z}             [AVX512ER]
 42774  //
 42775  func (self *Program) VEXP2PS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 42776      var p *Instruction
 42777      switch len(vv) {
 42778          case 0  : p = self.alloc("VEXP2PS", 2, Operands { v0, v1 })
 42779          case 1  : p = self.alloc("VEXP2PS", 3, Operands { v0, v1, vv[0] })
 42780          default : panic("instruction VEXP2PS takes 2 or 3 operands")
 42781      }
 42782      // VEXP2PS m512/m32bcst, zmm{k}{z}
 42783      if len(vv) == 0 && isM512M32bcst(v0) && isZMMkz(v1) {
 42784          self.require(ISA_AVX512ER)
 42785          p.domain = DomainAVX
 42786          p.add(0, func(m *_Encoding, v []interface{}) {
 42787              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 42788              m.emit(0xc8)
 42789              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 42790          })
 42791      }
 42792      // VEXP2PS {sae}, zmm, zmm{k}{z}
 42793      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 42794          self.require(ISA_AVX512ER)
 42795          p.domain = DomainAVX
 42796          p.add(0, func(m *_Encoding, v []interface{}) {
 42797              m.emit(0x62)
 42798              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 42799              m.emit(0x7d)
 42800              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 42801              m.emit(0xc8)
 42802              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 42803          })
 42804      }
 42805      // VEXP2PS zmm, zmm{k}{z}
 42806      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 42807          self.require(ISA_AVX512ER)
 42808          p.domain = DomainAVX
 42809          p.add(0, func(m *_Encoding, v []interface{}) {
 42810              m.emit(0x62)
 42811              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 42812              m.emit(0x7d)
 42813              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 42814              m.emit(0xc8)
 42815              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 42816          })
 42817      }
 42818      if p.len == 0 {
 42819          panic("invalid operands for VEXP2PS")
 42820      }
 42821      return p
 42822  }
 42823  
 42824  // VEXPANDPD performs "Load Sparse Packed Double-Precision Floating-Point Values from Dense Memory".
 42825  //
 42826  // Mnemonic        : VEXPANDPD
 42827  // Supported forms : (6 forms)
 42828  //
 42829  //    * VEXPANDPD zmm, zmm{k}{z}     [AVX512F]
 42830  //    * VEXPANDPD m512, zmm{k}{z}    [AVX512F]
 42831  //    * VEXPANDPD xmm, xmm{k}{z}     [AVX512VL]
 42832  //    * VEXPANDPD ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 42833  //    * VEXPANDPD m128, xmm{k}{z}    [AVX512VL]
 42834  //    * VEXPANDPD m256, ymm{k}{z}    [AVX512F,AVX512VL]
 42835  //
 42836  func (self *Program) VEXPANDPD(v0 interface{}, v1 interface{}) *Instruction {
 42837      p := self.alloc("VEXPANDPD", 2, Operands { v0, v1 })
 42838      // VEXPANDPD zmm, zmm{k}{z}
 42839      if isZMM(v0) && isZMMkz(v1) {
 42840          self.require(ISA_AVX512F)
 42841          p.domain = DomainAVX
 42842          p.add(0, func(m *_Encoding, v []interface{}) {
 42843              m.emit(0x62)
 42844              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 42845              m.emit(0xfd)
 42846              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 42847              m.emit(0x88)
 42848              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 42849          })
 42850      }
 42851      // VEXPANDPD m512, zmm{k}{z}
 42852      if isM512(v0) && isZMMkz(v1) {
 42853          self.require(ISA_AVX512F)
 42854          p.domain = DomainAVX
 42855          p.add(0, func(m *_Encoding, v []interface{}) {
 42856              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 42857              m.emit(0x88)
 42858              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 42859          })
 42860      }
 42861      // VEXPANDPD xmm, xmm{k}{z}
 42862      if isEVEXXMM(v0) && isXMMkz(v1) {
 42863          self.require(ISA_AVX512VL)
 42864          p.domain = DomainAVX
 42865          p.add(0, func(m *_Encoding, v []interface{}) {
 42866              m.emit(0x62)
 42867              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 42868              m.emit(0xfd)
 42869              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 42870              m.emit(0x88)
 42871              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 42872          })
 42873      }
 42874      // VEXPANDPD ymm, ymm{k}{z}
 42875      if isEVEXYMM(v0) && isYMMkz(v1) {
 42876          self.require(ISA_AVX512VL | ISA_AVX512F)
 42877          p.domain = DomainAVX
 42878          p.add(0, func(m *_Encoding, v []interface{}) {
 42879              m.emit(0x62)
 42880              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 42881              m.emit(0xfd)
 42882              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 42883              m.emit(0x88)
 42884              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 42885          })
 42886      }
 42887      // VEXPANDPD m128, xmm{k}{z}
 42888      if isM128(v0) && isXMMkz(v1) {
 42889          self.require(ISA_AVX512VL)
 42890          p.domain = DomainAVX
 42891          p.add(0, func(m *_Encoding, v []interface{}) {
 42892              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 42893              m.emit(0x88)
 42894              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 42895          })
 42896      }
 42897      // VEXPANDPD m256, ymm{k}{z}
 42898      if isM256(v0) && isYMMkz(v1) {
 42899          self.require(ISA_AVX512VL | ISA_AVX512F)
 42900          p.domain = DomainAVX
 42901          p.add(0, func(m *_Encoding, v []interface{}) {
 42902              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 42903              m.emit(0x88)
 42904              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 42905          })
 42906      }
 42907      if p.len == 0 {
 42908          panic("invalid operands for VEXPANDPD")
 42909      }
 42910      return p
 42911  }
 42912  
 42913  // VEXPANDPS performs "Load Sparse Packed Single-Precision Floating-Point Values from Dense Memory".
 42914  //
 42915  // Mnemonic        : VEXPANDPS
 42916  // Supported forms : (6 forms)
 42917  //
 42918  //    * VEXPANDPS zmm, zmm{k}{z}     [AVX512F]
 42919  //    * VEXPANDPS m512, zmm{k}{z}    [AVX512F]
 42920  //    * VEXPANDPS xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 42921  //    * VEXPANDPS ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 42922  //    * VEXPANDPS m128, xmm{k}{z}    [AVX512F,AVX512VL]
 42923  //    * VEXPANDPS m256, ymm{k}{z}    [AVX512F,AVX512VL]
 42924  //
 42925  func (self *Program) VEXPANDPS(v0 interface{}, v1 interface{}) *Instruction {
 42926      p := self.alloc("VEXPANDPS", 2, Operands { v0, v1 })
 42927      // VEXPANDPS zmm, zmm{k}{z}
 42928      if isZMM(v0) && isZMMkz(v1) {
 42929          self.require(ISA_AVX512F)
 42930          p.domain = DomainAVX
 42931          p.add(0, func(m *_Encoding, v []interface{}) {
 42932              m.emit(0x62)
 42933              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 42934              m.emit(0x7d)
 42935              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 42936              m.emit(0x88)
 42937              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 42938          })
 42939      }
 42940      // VEXPANDPS m512, zmm{k}{z}
 42941      if isM512(v0) && isZMMkz(v1) {
 42942          self.require(ISA_AVX512F)
 42943          p.domain = DomainAVX
 42944          p.add(0, func(m *_Encoding, v []interface{}) {
 42945              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 42946              m.emit(0x88)
 42947              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 42948          })
 42949      }
 42950      // VEXPANDPS xmm, xmm{k}{z}
 42951      if isEVEXXMM(v0) && isXMMkz(v1) {
 42952          self.require(ISA_AVX512VL | ISA_AVX512F)
 42953          p.domain = DomainAVX
 42954          p.add(0, func(m *_Encoding, v []interface{}) {
 42955              m.emit(0x62)
 42956              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 42957              m.emit(0x7d)
 42958              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 42959              m.emit(0x88)
 42960              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 42961          })
 42962      }
 42963      // VEXPANDPS ymm, ymm{k}{z}
 42964      if isEVEXYMM(v0) && isYMMkz(v1) {
 42965          self.require(ISA_AVX512VL | ISA_AVX512F)
 42966          p.domain = DomainAVX
 42967          p.add(0, func(m *_Encoding, v []interface{}) {
 42968              m.emit(0x62)
 42969              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 42970              m.emit(0x7d)
 42971              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 42972              m.emit(0x88)
 42973              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 42974          })
 42975      }
 42976      // VEXPANDPS m128, xmm{k}{z}
 42977      if isM128(v0) && isXMMkz(v1) {
 42978          self.require(ISA_AVX512VL | ISA_AVX512F)
 42979          p.domain = DomainAVX
 42980          p.add(0, func(m *_Encoding, v []interface{}) {
 42981              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 42982              m.emit(0x88)
 42983              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 42984          })
 42985      }
 42986      // VEXPANDPS m256, ymm{k}{z}
 42987      if isM256(v0) && isYMMkz(v1) {
 42988          self.require(ISA_AVX512VL | ISA_AVX512F)
 42989          p.domain = DomainAVX
 42990          p.add(0, func(m *_Encoding, v []interface{}) {
 42991              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 42992              m.emit(0x88)
 42993              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 42994          })
 42995      }
 42996      if p.len == 0 {
 42997          panic("invalid operands for VEXPANDPS")
 42998      }
 42999      return p
 43000  }
 43001  
 43002  // VEXTRACTF128 performs "Extract Packed Floating-Point Values".
 43003  //
 43004  // Mnemonic        : VEXTRACTF128
 43005  // Supported forms : (2 forms)
 43006  //
 43007  //    * VEXTRACTF128 imm8, ymm, xmm     [AVX]
 43008  //    * VEXTRACTF128 imm8, ymm, m128    [AVX]
 43009  //
 43010  func (self *Program) VEXTRACTF128(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 43011      p := self.alloc("VEXTRACTF128", 3, Operands { v0, v1, v2 })
 43012      // VEXTRACTF128 imm8, ymm, xmm
 43013      if isImm8(v0) && isYMM(v1) && isXMM(v2) {
 43014          self.require(ISA_AVX)
 43015          p.domain = DomainAVX
 43016          p.add(0, func(m *_Encoding, v []interface{}) {
 43017              m.emit(0xc4)
 43018              m.emit(0xe3 ^ (hcode(v[1]) << 7) ^ (hcode(v[2]) << 5))
 43019              m.emit(0x7d)
 43020              m.emit(0x19)
 43021              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43022              m.imm1(toImmAny(v[0]))
 43023          })
 43024      }
 43025      // VEXTRACTF128 imm8, ymm, m128
 43026      if isImm8(v0) && isYMM(v1) && isM128(v2) {
 43027          self.require(ISA_AVX)
 43028          p.domain = DomainAVX
 43029          p.add(0, func(m *_Encoding, v []interface{}) {
 43030              m.vex3(0xc4, 0b11, 0x05, hcode(v[1]), addr(v[2]), 0)
 43031              m.emit(0x19)
 43032              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 43033              m.imm1(toImmAny(v[0]))
 43034          })
 43035      }
 43036      if p.len == 0 {
 43037          panic("invalid operands for VEXTRACTF128")
 43038      }
 43039      return p
 43040  }
 43041  
 43042  // VEXTRACTF32X4 performs "Extract 128 Bits of Packed Single-Precision Floating-Point Values".
 43043  //
 43044  // Mnemonic        : VEXTRACTF32X4
 43045  // Supported forms : (4 forms)
 43046  //
 43047  //    * VEXTRACTF32X4 imm8, zmm, xmm{k}{z}     [AVX512F]
 43048  //    * VEXTRACTF32X4 imm8, zmm, m128{k}{z}    [AVX512F]
 43049  //    * VEXTRACTF32X4 imm8, ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 43050  //    * VEXTRACTF32X4 imm8, ymm, m128{k}{z}    [AVX512F,AVX512VL]
 43051  //
 43052  func (self *Program) VEXTRACTF32X4(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 43053      p := self.alloc("VEXTRACTF32X4", 3, Operands { v0, v1, v2 })
 43054      // VEXTRACTF32X4 imm8, zmm, xmm{k}{z}
 43055      if isImm8(v0) && isZMM(v1) && isXMMkz(v2) {
 43056          self.require(ISA_AVX512F)
 43057          p.domain = DomainAVX
 43058          p.add(0, func(m *_Encoding, v []interface{}) {
 43059              m.emit(0x62)
 43060              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43061              m.emit(0x7d)
 43062              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 43063              m.emit(0x19)
 43064              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43065              m.imm1(toImmAny(v[0]))
 43066          })
 43067      }
 43068      // VEXTRACTF32X4 imm8, zmm, m128{k}{z}
 43069      if isImm8(v0) && isZMM(v1) && isM128kz(v2) {
 43070          self.require(ISA_AVX512F)
 43071          p.domain = DomainAVX
 43072          p.add(0, func(m *_Encoding, v []interface{}) {
 43073              m.evex(0b11, 0x05, 0b10, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43074              m.emit(0x19)
 43075              m.mrsd(lcode(v[1]), addr(v[2]), 16)
 43076              m.imm1(toImmAny(v[0]))
 43077          })
 43078      }
 43079      // VEXTRACTF32X4 imm8, ymm, xmm{k}{z}
 43080      if isImm8(v0) && isEVEXYMM(v1) && isXMMkz(v2) {
 43081          self.require(ISA_AVX512VL | ISA_AVX512F)
 43082          p.domain = DomainAVX
 43083          p.add(0, func(m *_Encoding, v []interface{}) {
 43084              m.emit(0x62)
 43085              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43086              m.emit(0x7d)
 43087              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 43088              m.emit(0x19)
 43089              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43090              m.imm1(toImmAny(v[0]))
 43091          })
 43092      }
 43093      // VEXTRACTF32X4 imm8, ymm, m128{k}{z}
 43094      if isImm8(v0) && isEVEXYMM(v1) && isM128kz(v2) {
 43095          self.require(ISA_AVX512VL | ISA_AVX512F)
 43096          p.domain = DomainAVX
 43097          p.add(0, func(m *_Encoding, v []interface{}) {
 43098              m.evex(0b11, 0x05, 0b01, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43099              m.emit(0x19)
 43100              m.mrsd(lcode(v[1]), addr(v[2]), 16)
 43101              m.imm1(toImmAny(v[0]))
 43102          })
 43103      }
 43104      if p.len == 0 {
 43105          panic("invalid operands for VEXTRACTF32X4")
 43106      }
 43107      return p
 43108  }
 43109  
 43110  // VEXTRACTF32X8 performs "Extract 256 Bits of Packed Single-Precision Floating-Point Values".
 43111  //
 43112  // Mnemonic        : VEXTRACTF32X8
 43113  // Supported forms : (2 forms)
 43114  //
 43115  //    * VEXTRACTF32X8 imm8, zmm, ymm{k}{z}     [AVX512DQ]
 43116  //    * VEXTRACTF32X8 imm8, zmm, m256{k}{z}    [AVX512DQ]
 43117  //
 43118  func (self *Program) VEXTRACTF32X8(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 43119      p := self.alloc("VEXTRACTF32X8", 3, Operands { v0, v1, v2 })
 43120      // VEXTRACTF32X8 imm8, zmm, ymm{k}{z}
 43121      if isImm8(v0) && isZMM(v1) && isYMMkz(v2) {
 43122          self.require(ISA_AVX512DQ)
 43123          p.domain = DomainAVX
 43124          p.add(0, func(m *_Encoding, v []interface{}) {
 43125              m.emit(0x62)
 43126              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43127              m.emit(0x7d)
 43128              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 43129              m.emit(0x1b)
 43130              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43131              m.imm1(toImmAny(v[0]))
 43132          })
 43133      }
 43134      // VEXTRACTF32X8 imm8, zmm, m256{k}{z}
 43135      if isImm8(v0) && isZMM(v1) && isM256kz(v2) {
 43136          self.require(ISA_AVX512DQ)
 43137          p.domain = DomainAVX
 43138          p.add(0, func(m *_Encoding, v []interface{}) {
 43139              m.evex(0b11, 0x05, 0b10, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43140              m.emit(0x1b)
 43141              m.mrsd(lcode(v[1]), addr(v[2]), 32)
 43142              m.imm1(toImmAny(v[0]))
 43143          })
 43144      }
 43145      if p.len == 0 {
 43146          panic("invalid operands for VEXTRACTF32X8")
 43147      }
 43148      return p
 43149  }
 43150  
 43151  // VEXTRACTF64X2 performs "Extract 128 Bits of Packed Double-Precision Floating-Point Values".
 43152  //
 43153  // Mnemonic        : VEXTRACTF64X2
 43154  // Supported forms : (4 forms)
 43155  //
 43156  //    * VEXTRACTF64X2 imm8, zmm, xmm{k}{z}     [AVX512DQ]
 43157  //    * VEXTRACTF64X2 imm8, zmm, m128{k}{z}    [AVX512DQ]
 43158  //    * VEXTRACTF64X2 imm8, ymm, xmm{k}{z}     [AVX512DQ,AVX512VL]
 43159  //    * VEXTRACTF64X2 imm8, ymm, m128{k}{z}    [AVX512DQ,AVX512VL]
 43160  //
 43161  func (self *Program) VEXTRACTF64X2(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 43162      p := self.alloc("VEXTRACTF64X2", 3, Operands { v0, v1, v2 })
 43163      // VEXTRACTF64X2 imm8, zmm, xmm{k}{z}
 43164      if isImm8(v0) && isZMM(v1) && isXMMkz(v2) {
 43165          self.require(ISA_AVX512DQ)
 43166          p.domain = DomainAVX
 43167          p.add(0, func(m *_Encoding, v []interface{}) {
 43168              m.emit(0x62)
 43169              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43170              m.emit(0xfd)
 43171              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 43172              m.emit(0x19)
 43173              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43174              m.imm1(toImmAny(v[0]))
 43175          })
 43176      }
 43177      // VEXTRACTF64X2 imm8, zmm, m128{k}{z}
 43178      if isImm8(v0) && isZMM(v1) && isM128kz(v2) {
 43179          self.require(ISA_AVX512DQ)
 43180          p.domain = DomainAVX
 43181          p.add(0, func(m *_Encoding, v []interface{}) {
 43182              m.evex(0b11, 0x85, 0b10, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43183              m.emit(0x19)
 43184              m.mrsd(lcode(v[1]), addr(v[2]), 16)
 43185              m.imm1(toImmAny(v[0]))
 43186          })
 43187      }
 43188      // VEXTRACTF64X2 imm8, ymm, xmm{k}{z}
 43189      if isImm8(v0) && isEVEXYMM(v1) && isXMMkz(v2) {
 43190          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 43191          p.domain = DomainAVX
 43192          p.add(0, func(m *_Encoding, v []interface{}) {
 43193              m.emit(0x62)
 43194              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43195              m.emit(0xfd)
 43196              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 43197              m.emit(0x19)
 43198              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43199              m.imm1(toImmAny(v[0]))
 43200          })
 43201      }
 43202      // VEXTRACTF64X2 imm8, ymm, m128{k}{z}
 43203      if isImm8(v0) && isEVEXYMM(v1) && isM128kz(v2) {
 43204          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 43205          p.domain = DomainAVX
 43206          p.add(0, func(m *_Encoding, v []interface{}) {
 43207              m.evex(0b11, 0x85, 0b01, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43208              m.emit(0x19)
 43209              m.mrsd(lcode(v[1]), addr(v[2]), 16)
 43210              m.imm1(toImmAny(v[0]))
 43211          })
 43212      }
 43213      if p.len == 0 {
 43214          panic("invalid operands for VEXTRACTF64X2")
 43215      }
 43216      return p
 43217  }
 43218  
 43219  // VEXTRACTF64X4 performs "Extract 256 Bits of Packed Double-Precision Floating-Point Values".
 43220  //
 43221  // Mnemonic        : VEXTRACTF64X4
 43222  // Supported forms : (2 forms)
 43223  //
 43224  //    * VEXTRACTF64X4 imm8, zmm, ymm{k}{z}     [AVX512F]
 43225  //    * VEXTRACTF64X4 imm8, zmm, m256{k}{z}    [AVX512F]
 43226  //
 43227  func (self *Program) VEXTRACTF64X4(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 43228      p := self.alloc("VEXTRACTF64X4", 3, Operands { v0, v1, v2 })
 43229      // VEXTRACTF64X4 imm8, zmm, ymm{k}{z}
 43230      if isImm8(v0) && isZMM(v1) && isYMMkz(v2) {
 43231          self.require(ISA_AVX512F)
 43232          p.domain = DomainAVX
 43233          p.add(0, func(m *_Encoding, v []interface{}) {
 43234              m.emit(0x62)
 43235              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43236              m.emit(0xfd)
 43237              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 43238              m.emit(0x1b)
 43239              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43240              m.imm1(toImmAny(v[0]))
 43241          })
 43242      }
 43243      // VEXTRACTF64X4 imm8, zmm, m256{k}{z}
 43244      if isImm8(v0) && isZMM(v1) && isM256kz(v2) {
 43245          self.require(ISA_AVX512F)
 43246          p.domain = DomainAVX
 43247          p.add(0, func(m *_Encoding, v []interface{}) {
 43248              m.evex(0b11, 0x85, 0b10, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43249              m.emit(0x1b)
 43250              m.mrsd(lcode(v[1]), addr(v[2]), 32)
 43251              m.imm1(toImmAny(v[0]))
 43252          })
 43253      }
 43254      if p.len == 0 {
 43255          panic("invalid operands for VEXTRACTF64X4")
 43256      }
 43257      return p
 43258  }
 43259  
 43260  // VEXTRACTI128 performs "Extract Packed Integer Values".
 43261  //
 43262  // Mnemonic        : VEXTRACTI128
 43263  // Supported forms : (2 forms)
 43264  //
 43265  //    * VEXTRACTI128 imm8, ymm, xmm     [AVX2]
 43266  //    * VEXTRACTI128 imm8, ymm, m128    [AVX2]
 43267  //
 43268  func (self *Program) VEXTRACTI128(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 43269      p := self.alloc("VEXTRACTI128", 3, Operands { v0, v1, v2 })
 43270      // VEXTRACTI128 imm8, ymm, xmm
 43271      if isImm8(v0) && isYMM(v1) && isXMM(v2) {
 43272          self.require(ISA_AVX2)
 43273          p.domain = DomainAVX
 43274          p.add(0, func(m *_Encoding, v []interface{}) {
 43275              m.emit(0xc4)
 43276              m.emit(0xe3 ^ (hcode(v[1]) << 7) ^ (hcode(v[2]) << 5))
 43277              m.emit(0x7d)
 43278              m.emit(0x39)
 43279              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43280              m.imm1(toImmAny(v[0]))
 43281          })
 43282      }
 43283      // VEXTRACTI128 imm8, ymm, m128
 43284      if isImm8(v0) && isYMM(v1) && isM128(v2) {
 43285          self.require(ISA_AVX2)
 43286          p.domain = DomainAVX
 43287          p.add(0, func(m *_Encoding, v []interface{}) {
 43288              m.vex3(0xc4, 0b11, 0x05, hcode(v[1]), addr(v[2]), 0)
 43289              m.emit(0x39)
 43290              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 43291              m.imm1(toImmAny(v[0]))
 43292          })
 43293      }
 43294      if p.len == 0 {
 43295          panic("invalid operands for VEXTRACTI128")
 43296      }
 43297      return p
 43298  }
 43299  
 43300  // VEXTRACTI32X4 performs "Extract 128 Bits of Packed Doubleword Integer Values".
 43301  //
 43302  // Mnemonic        : VEXTRACTI32X4
 43303  // Supported forms : (4 forms)
 43304  //
 43305  //    * VEXTRACTI32X4 imm8, zmm, xmm{k}{z}     [AVX512F]
 43306  //    * VEXTRACTI32X4 imm8, zmm, m128{k}{z}    [AVX512F]
 43307  //    * VEXTRACTI32X4 imm8, ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 43308  //    * VEXTRACTI32X4 imm8, ymm, m128{k}{z}    [AVX512F,AVX512VL]
 43309  //
 43310  func (self *Program) VEXTRACTI32X4(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 43311      p := self.alloc("VEXTRACTI32X4", 3, Operands { v0, v1, v2 })
 43312      // VEXTRACTI32X4 imm8, zmm, xmm{k}{z}
 43313      if isImm8(v0) && isZMM(v1) && isXMMkz(v2) {
 43314          self.require(ISA_AVX512F)
 43315          p.domain = DomainAVX
 43316          p.add(0, func(m *_Encoding, v []interface{}) {
 43317              m.emit(0x62)
 43318              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43319              m.emit(0x7d)
 43320              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 43321              m.emit(0x39)
 43322              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43323              m.imm1(toImmAny(v[0]))
 43324          })
 43325      }
 43326      // VEXTRACTI32X4 imm8, zmm, m128{k}{z}
 43327      if isImm8(v0) && isZMM(v1) && isM128kz(v2) {
 43328          self.require(ISA_AVX512F)
 43329          p.domain = DomainAVX
 43330          p.add(0, func(m *_Encoding, v []interface{}) {
 43331              m.evex(0b11, 0x05, 0b10, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43332              m.emit(0x39)
 43333              m.mrsd(lcode(v[1]), addr(v[2]), 16)
 43334              m.imm1(toImmAny(v[0]))
 43335          })
 43336      }
 43337      // VEXTRACTI32X4 imm8, ymm, xmm{k}{z}
 43338      if isImm8(v0) && isEVEXYMM(v1) && isXMMkz(v2) {
 43339          self.require(ISA_AVX512VL | ISA_AVX512F)
 43340          p.domain = DomainAVX
 43341          p.add(0, func(m *_Encoding, v []interface{}) {
 43342              m.emit(0x62)
 43343              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43344              m.emit(0x7d)
 43345              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 43346              m.emit(0x39)
 43347              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43348              m.imm1(toImmAny(v[0]))
 43349          })
 43350      }
 43351      // VEXTRACTI32X4 imm8, ymm, m128{k}{z}
 43352      if isImm8(v0) && isEVEXYMM(v1) && isM128kz(v2) {
 43353          self.require(ISA_AVX512VL | ISA_AVX512F)
 43354          p.domain = DomainAVX
 43355          p.add(0, func(m *_Encoding, v []interface{}) {
 43356              m.evex(0b11, 0x05, 0b01, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43357              m.emit(0x39)
 43358              m.mrsd(lcode(v[1]), addr(v[2]), 16)
 43359              m.imm1(toImmAny(v[0]))
 43360          })
 43361      }
 43362      if p.len == 0 {
 43363          panic("invalid operands for VEXTRACTI32X4")
 43364      }
 43365      return p
 43366  }
 43367  
 43368  // VEXTRACTI32X8 performs "Extract 256 Bits of Packed Doubleword Integer Values".
 43369  //
 43370  // Mnemonic        : VEXTRACTI32X8
 43371  // Supported forms : (2 forms)
 43372  //
 43373  //    * VEXTRACTI32X8 imm8, zmm, ymm{k}{z}     [AVX512DQ]
 43374  //    * VEXTRACTI32X8 imm8, zmm, m256{k}{z}    [AVX512DQ]
 43375  //
 43376  func (self *Program) VEXTRACTI32X8(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 43377      p := self.alloc("VEXTRACTI32X8", 3, Operands { v0, v1, v2 })
 43378      // VEXTRACTI32X8 imm8, zmm, ymm{k}{z}
 43379      if isImm8(v0) && isZMM(v1) && isYMMkz(v2) {
 43380          self.require(ISA_AVX512DQ)
 43381          p.domain = DomainAVX
 43382          p.add(0, func(m *_Encoding, v []interface{}) {
 43383              m.emit(0x62)
 43384              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43385              m.emit(0x7d)
 43386              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 43387              m.emit(0x3b)
 43388              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43389              m.imm1(toImmAny(v[0]))
 43390          })
 43391      }
 43392      // VEXTRACTI32X8 imm8, zmm, m256{k}{z}
 43393      if isImm8(v0) && isZMM(v1) && isM256kz(v2) {
 43394          self.require(ISA_AVX512DQ)
 43395          p.domain = DomainAVX
 43396          p.add(0, func(m *_Encoding, v []interface{}) {
 43397              m.evex(0b11, 0x05, 0b10, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43398              m.emit(0x3b)
 43399              m.mrsd(lcode(v[1]), addr(v[2]), 32)
 43400              m.imm1(toImmAny(v[0]))
 43401          })
 43402      }
 43403      if p.len == 0 {
 43404          panic("invalid operands for VEXTRACTI32X8")
 43405      }
 43406      return p
 43407  }
 43408  
 43409  // VEXTRACTI64X2 performs "Extract 128 Bits of Packed Quadword Integer Values".
 43410  //
 43411  // Mnemonic        : VEXTRACTI64X2
 43412  // Supported forms : (4 forms)
 43413  //
 43414  //    * VEXTRACTI64X2 imm8, zmm, xmm{k}{z}     [AVX512DQ]
 43415  //    * VEXTRACTI64X2 imm8, zmm, m128{k}{z}    [AVX512DQ]
 43416  //    * VEXTRACTI64X2 imm8, ymm, xmm{k}{z}     [AVX512DQ,AVX512VL]
 43417  //    * VEXTRACTI64X2 imm8, ymm, m128{k}{z}    [AVX512DQ,AVX512VL]
 43418  //
 43419  func (self *Program) VEXTRACTI64X2(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 43420      p := self.alloc("VEXTRACTI64X2", 3, Operands { v0, v1, v2 })
 43421      // VEXTRACTI64X2 imm8, zmm, xmm{k}{z}
 43422      if isImm8(v0) && isZMM(v1) && isXMMkz(v2) {
 43423          self.require(ISA_AVX512DQ)
 43424          p.domain = DomainAVX
 43425          p.add(0, func(m *_Encoding, v []interface{}) {
 43426              m.emit(0x62)
 43427              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43428              m.emit(0xfd)
 43429              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 43430              m.emit(0x39)
 43431              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43432              m.imm1(toImmAny(v[0]))
 43433          })
 43434      }
 43435      // VEXTRACTI64X2 imm8, zmm, m128{k}{z}
 43436      if isImm8(v0) && isZMM(v1) && isM128kz(v2) {
 43437          self.require(ISA_AVX512DQ)
 43438          p.domain = DomainAVX
 43439          p.add(0, func(m *_Encoding, v []interface{}) {
 43440              m.evex(0b11, 0x85, 0b10, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43441              m.emit(0x39)
 43442              m.mrsd(lcode(v[1]), addr(v[2]), 16)
 43443              m.imm1(toImmAny(v[0]))
 43444          })
 43445      }
 43446      // VEXTRACTI64X2 imm8, ymm, xmm{k}{z}
 43447      if isImm8(v0) && isEVEXYMM(v1) && isXMMkz(v2) {
 43448          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 43449          p.domain = DomainAVX
 43450          p.add(0, func(m *_Encoding, v []interface{}) {
 43451              m.emit(0x62)
 43452              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43453              m.emit(0xfd)
 43454              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 43455              m.emit(0x39)
 43456              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43457              m.imm1(toImmAny(v[0]))
 43458          })
 43459      }
 43460      // VEXTRACTI64X2 imm8, ymm, m128{k}{z}
 43461      if isImm8(v0) && isEVEXYMM(v1) && isM128kz(v2) {
 43462          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 43463          p.domain = DomainAVX
 43464          p.add(0, func(m *_Encoding, v []interface{}) {
 43465              m.evex(0b11, 0x85, 0b01, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43466              m.emit(0x39)
 43467              m.mrsd(lcode(v[1]), addr(v[2]), 16)
 43468              m.imm1(toImmAny(v[0]))
 43469          })
 43470      }
 43471      if p.len == 0 {
 43472          panic("invalid operands for VEXTRACTI64X2")
 43473      }
 43474      return p
 43475  }
 43476  
 43477  // VEXTRACTI64X4 performs "Extract 256 Bits of Packed Quadword Integer Values".
 43478  //
 43479  // Mnemonic        : VEXTRACTI64X4
 43480  // Supported forms : (2 forms)
 43481  //
 43482  //    * VEXTRACTI64X4 imm8, zmm, ymm{k}{z}     [AVX512F]
 43483  //    * VEXTRACTI64X4 imm8, zmm, m256{k}{z}    [AVX512F]
 43484  //
 43485  func (self *Program) VEXTRACTI64X4(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 43486      p := self.alloc("VEXTRACTI64X4", 3, Operands { v0, v1, v2 })
 43487      // VEXTRACTI64X4 imm8, zmm, ymm{k}{z}
 43488      if isImm8(v0) && isZMM(v1) && isYMMkz(v2) {
 43489          self.require(ISA_AVX512F)
 43490          p.domain = DomainAVX
 43491          p.add(0, func(m *_Encoding, v []interface{}) {
 43492              m.emit(0x62)
 43493              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43494              m.emit(0xfd)
 43495              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 43496              m.emit(0x3b)
 43497              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43498              m.imm1(toImmAny(v[0]))
 43499          })
 43500      }
 43501      // VEXTRACTI64X4 imm8, zmm, m256{k}{z}
 43502      if isImm8(v0) && isZMM(v1) && isM256kz(v2) {
 43503          self.require(ISA_AVX512F)
 43504          p.domain = DomainAVX
 43505          p.add(0, func(m *_Encoding, v []interface{}) {
 43506              m.evex(0b11, 0x85, 0b10, ehcode(v[1]), addr(v[2]), 0, kcode(v[2]), zcode(v[2]), 0)
 43507              m.emit(0x3b)
 43508              m.mrsd(lcode(v[1]), addr(v[2]), 32)
 43509              m.imm1(toImmAny(v[0]))
 43510          })
 43511      }
 43512      if p.len == 0 {
 43513          panic("invalid operands for VEXTRACTI64X4")
 43514      }
 43515      return p
 43516  }
 43517  
 43518  // VEXTRACTPS performs "Extract Packed Single Precision Floating-Point Value".
 43519  //
 43520  // Mnemonic        : VEXTRACTPS
 43521  // Supported forms : (4 forms)
 43522  //
 43523  //    * VEXTRACTPS imm8, xmm, r32    [AVX]
 43524  //    * VEXTRACTPS imm8, xmm, m32    [AVX]
 43525  //    * VEXTRACTPS imm8, xmm, r32    [AVX512F]
 43526  //    * VEXTRACTPS imm8, xmm, m32    [AVX512F]
 43527  //
 43528  func (self *Program) VEXTRACTPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 43529      p := self.alloc("VEXTRACTPS", 3, Operands { v0, v1, v2 })
 43530      // VEXTRACTPS imm8, xmm, r32
 43531      if isImm8(v0) && isXMM(v1) && isReg32(v2) {
 43532          self.require(ISA_AVX)
 43533          p.domain = DomainAVX
 43534          p.add(0, func(m *_Encoding, v []interface{}) {
 43535              m.emit(0xc4)
 43536              m.emit(0xe3 ^ (hcode(v[1]) << 7) ^ (hcode(v[2]) << 5))
 43537              m.emit(0x79)
 43538              m.emit(0x17)
 43539              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43540              m.imm1(toImmAny(v[0]))
 43541          })
 43542      }
 43543      // VEXTRACTPS imm8, xmm, m32
 43544      if isImm8(v0) && isXMM(v1) && isM32(v2) {
 43545          self.require(ISA_AVX)
 43546          p.domain = DomainAVX
 43547          p.add(0, func(m *_Encoding, v []interface{}) {
 43548              m.vex3(0xc4, 0b11, 0x01, hcode(v[1]), addr(v[2]), 0)
 43549              m.emit(0x17)
 43550              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 43551              m.imm1(toImmAny(v[0]))
 43552          })
 43553      }
 43554      // VEXTRACTPS imm8, xmm, r32
 43555      if isImm8(v0) && isEVEXXMM(v1) && isReg32(v2) {
 43556          self.require(ISA_AVX512F)
 43557          p.domain = DomainAVX
 43558          p.add(0, func(m *_Encoding, v []interface{}) {
 43559              m.emit(0x62)
 43560              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 43561              m.emit(0x7d)
 43562              m.emit(0x08)
 43563              m.emit(0x17)
 43564              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 43565              m.imm1(toImmAny(v[0]))
 43566          })
 43567      }
 43568      // VEXTRACTPS imm8, xmm, m32
 43569      if isImm8(v0) && isEVEXXMM(v1) && isM32(v2) {
 43570          self.require(ISA_AVX512F)
 43571          p.domain = DomainAVX
 43572          p.add(0, func(m *_Encoding, v []interface{}) {
 43573              m.evex(0b11, 0x05, 0b00, ehcode(v[1]), addr(v[2]), 0, 0, 0, 0)
 43574              m.emit(0x17)
 43575              m.mrsd(lcode(v[1]), addr(v[2]), 4)
 43576              m.imm1(toImmAny(v[0]))
 43577          })
 43578      }
 43579      if p.len == 0 {
 43580          panic("invalid operands for VEXTRACTPS")
 43581      }
 43582      return p
 43583  }
 43584  
 43585  // VFIXUPIMMPD performs "Fix Up Special Packed Double-Precision Floating-Point Values".
 43586  //
 43587  // Mnemonic        : VFIXUPIMMPD
 43588  // Supported forms : (7 forms)
 43589  //
 43590  //    * VFIXUPIMMPD imm8, m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 43591  //    * VFIXUPIMMPD imm8, {sae}, zmm, zmm, zmm{k}{z}      [AVX512F]
 43592  //    * VFIXUPIMMPD imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 43593  //    * VFIXUPIMMPD imm8, m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 43594  //    * VFIXUPIMMPD imm8, xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 43595  //    * VFIXUPIMMPD imm8, m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 43596  //    * VFIXUPIMMPD imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 43597  //
 43598  func (self *Program) VFIXUPIMMPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 43599      var p *Instruction
 43600      switch len(vv) {
 43601          case 0  : p = self.alloc("VFIXUPIMMPD", 4, Operands { v0, v1, v2, v3 })
 43602          case 1  : p = self.alloc("VFIXUPIMMPD", 5, Operands { v0, v1, v2, v3, vv[0] })
 43603          default : panic("instruction VFIXUPIMMPD takes 4 or 5 operands")
 43604      }
 43605      // VFIXUPIMMPD imm8, m512/m64bcst, zmm, zmm{k}{z}
 43606      if len(vv) == 0 && isImm8(v0) && isM512M64bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 43607          self.require(ISA_AVX512F)
 43608          p.domain = DomainAVX
 43609          p.add(0, func(m *_Encoding, v []interface{}) {
 43610              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 43611              m.emit(0x54)
 43612              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 43613              m.imm1(toImmAny(v[0]))
 43614          })
 43615      }
 43616      // VFIXUPIMMPD imm8, {sae}, zmm, zmm, zmm{k}{z}
 43617      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isZMM(v2) && isZMM(v3) && isZMMkz(vv[0]) {
 43618          self.require(ISA_AVX512F)
 43619          p.domain = DomainAVX
 43620          p.add(0, func(m *_Encoding, v []interface{}) {
 43621              m.emit(0x62)
 43622              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 43623              m.emit(0xfd ^ (hlcode(v[3]) << 3))
 43624              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 43625              m.emit(0x54)
 43626              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 43627              m.imm1(toImmAny(v[0]))
 43628          })
 43629      }
 43630      // VFIXUPIMMPD imm8, zmm, zmm, zmm{k}{z}
 43631      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 43632          self.require(ISA_AVX512F)
 43633          p.domain = DomainAVX
 43634          p.add(0, func(m *_Encoding, v []interface{}) {
 43635              m.emit(0x62)
 43636              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 43637              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 43638              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 43639              m.emit(0x54)
 43640              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 43641              m.imm1(toImmAny(v[0]))
 43642          })
 43643      }
 43644      // VFIXUPIMMPD imm8, m128/m64bcst, xmm, xmm{k}{z}
 43645      if len(vv) == 0 && isImm8(v0) && isM128M64bcst(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 43646          self.require(ISA_AVX512VL | ISA_AVX512F)
 43647          p.domain = DomainAVX
 43648          p.add(0, func(m *_Encoding, v []interface{}) {
 43649              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 43650              m.emit(0x54)
 43651              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 43652              m.imm1(toImmAny(v[0]))
 43653          })
 43654      }
 43655      // VFIXUPIMMPD imm8, xmm, xmm, xmm{k}{z}
 43656      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 43657          self.require(ISA_AVX512VL | ISA_AVX512F)
 43658          p.domain = DomainAVX
 43659          p.add(0, func(m *_Encoding, v []interface{}) {
 43660              m.emit(0x62)
 43661              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 43662              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 43663              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 43664              m.emit(0x54)
 43665              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 43666              m.imm1(toImmAny(v[0]))
 43667          })
 43668      }
 43669      // VFIXUPIMMPD imm8, m256/m64bcst, ymm, ymm{k}{z}
 43670      if len(vv) == 0 && isImm8(v0) && isM256M64bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 43671          self.require(ISA_AVX512VL | ISA_AVX512F)
 43672          p.domain = DomainAVX
 43673          p.add(0, func(m *_Encoding, v []interface{}) {
 43674              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 43675              m.emit(0x54)
 43676              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 43677              m.imm1(toImmAny(v[0]))
 43678          })
 43679      }
 43680      // VFIXUPIMMPD imm8, ymm, ymm, ymm{k}{z}
 43681      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 43682          self.require(ISA_AVX512VL | ISA_AVX512F)
 43683          p.domain = DomainAVX
 43684          p.add(0, func(m *_Encoding, v []interface{}) {
 43685              m.emit(0x62)
 43686              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 43687              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 43688              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 43689              m.emit(0x54)
 43690              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 43691              m.imm1(toImmAny(v[0]))
 43692          })
 43693      }
 43694      if p.len == 0 {
 43695          panic("invalid operands for VFIXUPIMMPD")
 43696      }
 43697      return p
 43698  }
 43699  
 43700  // VFIXUPIMMPS performs "Fix Up Special Packed Single-Precision Floating-Point Values".
 43701  //
 43702  // Mnemonic        : VFIXUPIMMPS
 43703  // Supported forms : (7 forms)
 43704  //
 43705  //    * VFIXUPIMMPS imm8, m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 43706  //    * VFIXUPIMMPS imm8, {sae}, zmm, zmm, zmm{k}{z}      [AVX512F]
 43707  //    * VFIXUPIMMPS imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 43708  //    * VFIXUPIMMPS imm8, m128/m32bcst, xmm, xmm{k}{z}    [AVX512VL]
 43709  //    * VFIXUPIMMPS imm8, xmm, xmm, xmm{k}{z}             [AVX512VL]
 43710  //    * VFIXUPIMMPS imm8, m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 43711  //    * VFIXUPIMMPS imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 43712  //
 43713  func (self *Program) VFIXUPIMMPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 43714      var p *Instruction
 43715      switch len(vv) {
 43716          case 0  : p = self.alloc("VFIXUPIMMPS", 4, Operands { v0, v1, v2, v3 })
 43717          case 1  : p = self.alloc("VFIXUPIMMPS", 5, Operands { v0, v1, v2, v3, vv[0] })
 43718          default : panic("instruction VFIXUPIMMPS takes 4 or 5 operands")
 43719      }
 43720      // VFIXUPIMMPS imm8, m512/m32bcst, zmm, zmm{k}{z}
 43721      if len(vv) == 0 && isImm8(v0) && isM512M32bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 43722          self.require(ISA_AVX512F)
 43723          p.domain = DomainAVX
 43724          p.add(0, func(m *_Encoding, v []interface{}) {
 43725              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 43726              m.emit(0x54)
 43727              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 43728              m.imm1(toImmAny(v[0]))
 43729          })
 43730      }
 43731      // VFIXUPIMMPS imm8, {sae}, zmm, zmm, zmm{k}{z}
 43732      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isZMM(v2) && isZMM(v3) && isZMMkz(vv[0]) {
 43733          self.require(ISA_AVX512F)
 43734          p.domain = DomainAVX
 43735          p.add(0, func(m *_Encoding, v []interface{}) {
 43736              m.emit(0x62)
 43737              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 43738              m.emit(0x7d ^ (hlcode(v[3]) << 3))
 43739              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 43740              m.emit(0x54)
 43741              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 43742              m.imm1(toImmAny(v[0]))
 43743          })
 43744      }
 43745      // VFIXUPIMMPS imm8, zmm, zmm, zmm{k}{z}
 43746      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 43747          self.require(ISA_AVX512F)
 43748          p.domain = DomainAVX
 43749          p.add(0, func(m *_Encoding, v []interface{}) {
 43750              m.emit(0x62)
 43751              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 43752              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 43753              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 43754              m.emit(0x54)
 43755              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 43756              m.imm1(toImmAny(v[0]))
 43757          })
 43758      }
 43759      // VFIXUPIMMPS imm8, m128/m32bcst, xmm, xmm{k}{z}
 43760      if len(vv) == 0 && isImm8(v0) && isM128M32bcst(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 43761          self.require(ISA_AVX512VL)
 43762          p.domain = DomainAVX
 43763          p.add(0, func(m *_Encoding, v []interface{}) {
 43764              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 43765              m.emit(0x54)
 43766              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 43767              m.imm1(toImmAny(v[0]))
 43768          })
 43769      }
 43770      // VFIXUPIMMPS imm8, xmm, xmm, xmm{k}{z}
 43771      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 43772          self.require(ISA_AVX512VL)
 43773          p.domain = DomainAVX
 43774          p.add(0, func(m *_Encoding, v []interface{}) {
 43775              m.emit(0x62)
 43776              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 43777              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 43778              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 43779              m.emit(0x54)
 43780              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 43781              m.imm1(toImmAny(v[0]))
 43782          })
 43783      }
 43784      // VFIXUPIMMPS imm8, m256/m32bcst, ymm, ymm{k}{z}
 43785      if len(vv) == 0 && isImm8(v0) && isM256M32bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 43786          self.require(ISA_AVX512VL | ISA_AVX512F)
 43787          p.domain = DomainAVX
 43788          p.add(0, func(m *_Encoding, v []interface{}) {
 43789              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 43790              m.emit(0x54)
 43791              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 43792              m.imm1(toImmAny(v[0]))
 43793          })
 43794      }
 43795      // VFIXUPIMMPS imm8, ymm, ymm, ymm{k}{z}
 43796      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 43797          self.require(ISA_AVX512VL | ISA_AVX512F)
 43798          p.domain = DomainAVX
 43799          p.add(0, func(m *_Encoding, v []interface{}) {
 43800              m.emit(0x62)
 43801              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 43802              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 43803              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 43804              m.emit(0x54)
 43805              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 43806              m.imm1(toImmAny(v[0]))
 43807          })
 43808      }
 43809      if p.len == 0 {
 43810          panic("invalid operands for VFIXUPIMMPS")
 43811      }
 43812      return p
 43813  }
 43814  
 43815  // VFIXUPIMMSD performs "Fix Up Special Scalar Double-Precision Floating-Point Value".
 43816  //
 43817  // Mnemonic        : VFIXUPIMMSD
 43818  // Supported forms : (3 forms)
 43819  //
 43820  //    * VFIXUPIMMSD imm8, m64, xmm, xmm{k}{z}           [AVX512F]
 43821  //    * VFIXUPIMMSD imm8, {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 43822  //    * VFIXUPIMMSD imm8, xmm, xmm, xmm{k}{z}           [AVX512F]
 43823  //
 43824  func (self *Program) VFIXUPIMMSD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 43825      var p *Instruction
 43826      switch len(vv) {
 43827          case 0  : p = self.alloc("VFIXUPIMMSD", 4, Operands { v0, v1, v2, v3 })
 43828          case 1  : p = self.alloc("VFIXUPIMMSD", 5, Operands { v0, v1, v2, v3, vv[0] })
 43829          default : panic("instruction VFIXUPIMMSD takes 4 or 5 operands")
 43830      }
 43831      // VFIXUPIMMSD imm8, m64, xmm, xmm{k}{z}
 43832      if len(vv) == 0 && isImm8(v0) && isM64(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 43833          self.require(ISA_AVX512F)
 43834          p.domain = DomainAVX
 43835          p.add(0, func(m *_Encoding, v []interface{}) {
 43836              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 43837              m.emit(0x55)
 43838              m.mrsd(lcode(v[3]), addr(v[1]), 8)
 43839              m.imm1(toImmAny(v[0]))
 43840          })
 43841      }
 43842      // VFIXUPIMMSD imm8, {sae}, xmm, xmm, xmm{k}{z}
 43843      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) && isXMMkz(vv[0]) {
 43844          self.require(ISA_AVX512F)
 43845          p.domain = DomainAVX
 43846          p.add(0, func(m *_Encoding, v []interface{}) {
 43847              m.emit(0x62)
 43848              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 43849              m.emit(0xfd ^ (hlcode(v[3]) << 3))
 43850              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 43851              m.emit(0x55)
 43852              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 43853              m.imm1(toImmAny(v[0]))
 43854          })
 43855      }
 43856      // VFIXUPIMMSD imm8, xmm, xmm, xmm{k}{z}
 43857      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 43858          self.require(ISA_AVX512F)
 43859          p.domain = DomainAVX
 43860          p.add(0, func(m *_Encoding, v []interface{}) {
 43861              m.emit(0x62)
 43862              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 43863              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 43864              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 43865              m.emit(0x55)
 43866              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 43867              m.imm1(toImmAny(v[0]))
 43868          })
 43869      }
 43870      if p.len == 0 {
 43871          panic("invalid operands for VFIXUPIMMSD")
 43872      }
 43873      return p
 43874  }
 43875  
 43876  // VFIXUPIMMSS performs "Fix Up Special Scalar Single-Precision Floating-Point Value".
 43877  //
 43878  // Mnemonic        : VFIXUPIMMSS
 43879  // Supported forms : (3 forms)
 43880  //
 43881  //    * VFIXUPIMMSS imm8, m32, xmm, xmm{k}{z}           [AVX512F]
 43882  //    * VFIXUPIMMSS imm8, {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 43883  //    * VFIXUPIMMSS imm8, xmm, xmm, xmm{k}{z}           [AVX512F]
 43884  //
 43885  func (self *Program) VFIXUPIMMSS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 43886      var p *Instruction
 43887      switch len(vv) {
 43888          case 0  : p = self.alloc("VFIXUPIMMSS", 4, Operands { v0, v1, v2, v3 })
 43889          case 1  : p = self.alloc("VFIXUPIMMSS", 5, Operands { v0, v1, v2, v3, vv[0] })
 43890          default : panic("instruction VFIXUPIMMSS takes 4 or 5 operands")
 43891      }
 43892      // VFIXUPIMMSS imm8, m32, xmm, xmm{k}{z}
 43893      if len(vv) == 0 && isImm8(v0) && isM32(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 43894          self.require(ISA_AVX512F)
 43895          p.domain = DomainAVX
 43896          p.add(0, func(m *_Encoding, v []interface{}) {
 43897              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 43898              m.emit(0x55)
 43899              m.mrsd(lcode(v[3]), addr(v[1]), 4)
 43900              m.imm1(toImmAny(v[0]))
 43901          })
 43902      }
 43903      // VFIXUPIMMSS imm8, {sae}, xmm, xmm, xmm{k}{z}
 43904      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) && isXMMkz(vv[0]) {
 43905          self.require(ISA_AVX512F)
 43906          p.domain = DomainAVX
 43907          p.add(0, func(m *_Encoding, v []interface{}) {
 43908              m.emit(0x62)
 43909              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 43910              m.emit(0x7d ^ (hlcode(v[3]) << 3))
 43911              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 43912              m.emit(0x55)
 43913              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 43914              m.imm1(toImmAny(v[0]))
 43915          })
 43916      }
 43917      // VFIXUPIMMSS imm8, xmm, xmm, xmm{k}{z}
 43918      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 43919          self.require(ISA_AVX512F)
 43920          p.domain = DomainAVX
 43921          p.add(0, func(m *_Encoding, v []interface{}) {
 43922              m.emit(0x62)
 43923              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 43924              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 43925              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 43926              m.emit(0x55)
 43927              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 43928              m.imm1(toImmAny(v[0]))
 43929          })
 43930      }
 43931      if p.len == 0 {
 43932          panic("invalid operands for VFIXUPIMMSS")
 43933      }
 43934      return p
 43935  }
 43936  
 43937  // VFMADD132PD performs "Fused Multiply-Add of Packed Double-Precision Floating-Point Values".
 43938  //
 43939  // Mnemonic        : VFMADD132PD
 43940  // Supported forms : (11 forms)
 43941  //
 43942  //    * VFMADD132PD xmm, xmm, xmm                   [FMA3]
 43943  //    * VFMADD132PD m128, xmm, xmm                  [FMA3]
 43944  //    * VFMADD132PD ymm, ymm, ymm                   [FMA3]
 43945  //    * VFMADD132PD m256, ymm, ymm                  [FMA3]
 43946  //    * VFMADD132PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 43947  //    * VFMADD132PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 43948  //    * VFMADD132PD zmm, zmm, zmm{k}{z}             [AVX512F]
 43949  //    * VFMADD132PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 43950  //    * VFMADD132PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 43951  //    * VFMADD132PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 43952  //    * VFMADD132PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 43953  //
 43954  func (self *Program) VFMADD132PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 43955      var p *Instruction
 43956      switch len(vv) {
 43957          case 0  : p = self.alloc("VFMADD132PD", 3, Operands { v0, v1, v2 })
 43958          case 1  : p = self.alloc("VFMADD132PD", 4, Operands { v0, v1, v2, vv[0] })
 43959          default : panic("instruction VFMADD132PD takes 3 or 4 operands")
 43960      }
 43961      // VFMADD132PD xmm, xmm, xmm
 43962      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 43963          self.require(ISA_FMA3)
 43964          p.domain = DomainFMA
 43965          p.add(0, func(m *_Encoding, v []interface{}) {
 43966              m.emit(0xc4)
 43967              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 43968              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 43969              m.emit(0x98)
 43970              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 43971          })
 43972      }
 43973      // VFMADD132PD m128, xmm, xmm
 43974      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 43975          self.require(ISA_FMA3)
 43976          p.domain = DomainFMA
 43977          p.add(0, func(m *_Encoding, v []interface{}) {
 43978              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 43979              m.emit(0x98)
 43980              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 43981          })
 43982      }
 43983      // VFMADD132PD ymm, ymm, ymm
 43984      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 43985          self.require(ISA_FMA3)
 43986          p.domain = DomainFMA
 43987          p.add(0, func(m *_Encoding, v []interface{}) {
 43988              m.emit(0xc4)
 43989              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 43990              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 43991              m.emit(0x98)
 43992              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 43993          })
 43994      }
 43995      // VFMADD132PD m256, ymm, ymm
 43996      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 43997          self.require(ISA_FMA3)
 43998          p.domain = DomainFMA
 43999          p.add(0, func(m *_Encoding, v []interface{}) {
 44000              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44001              m.emit(0x98)
 44002              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44003          })
 44004      }
 44005      // VFMADD132PD m512/m64bcst, zmm, zmm{k}{z}
 44006      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 44007          self.require(ISA_AVX512F)
 44008          p.domain = DomainFMA
 44009          p.add(0, func(m *_Encoding, v []interface{}) {
 44010              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44011              m.emit(0x98)
 44012              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 44013          })
 44014      }
 44015      // VFMADD132PD {er}, zmm, zmm, zmm{k}{z}
 44016      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 44017          self.require(ISA_AVX512F)
 44018          p.domain = DomainFMA
 44019          p.add(0, func(m *_Encoding, v []interface{}) {
 44020              m.emit(0x62)
 44021              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 44022              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 44023              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 44024              m.emit(0x98)
 44025              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 44026          })
 44027      }
 44028      // VFMADD132PD zmm, zmm, zmm{k}{z}
 44029      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 44030          self.require(ISA_AVX512F)
 44031          p.domain = DomainFMA
 44032          p.add(0, func(m *_Encoding, v []interface{}) {
 44033              m.emit(0x62)
 44034              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44035              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44036              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 44037              m.emit(0x98)
 44038              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44039          })
 44040      }
 44041      // VFMADD132PD m128/m64bcst, xmm, xmm{k}{z}
 44042      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44043          self.require(ISA_AVX512VL | ISA_AVX512F)
 44044          p.domain = DomainFMA
 44045          p.add(0, func(m *_Encoding, v []interface{}) {
 44046              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44047              m.emit(0x98)
 44048              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 44049          })
 44050      }
 44051      // VFMADD132PD xmm, xmm, xmm{k}{z}
 44052      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44053          self.require(ISA_AVX512VL | ISA_AVX512F)
 44054          p.domain = DomainFMA
 44055          p.add(0, func(m *_Encoding, v []interface{}) {
 44056              m.emit(0x62)
 44057              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44058              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44059              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 44060              m.emit(0x98)
 44061              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44062          })
 44063      }
 44064      // VFMADD132PD m256/m64bcst, ymm, ymm{k}{z}
 44065      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 44066          self.require(ISA_AVX512VL | ISA_AVX512F)
 44067          p.domain = DomainFMA
 44068          p.add(0, func(m *_Encoding, v []interface{}) {
 44069              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44070              m.emit(0x98)
 44071              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 44072          })
 44073      }
 44074      // VFMADD132PD ymm, ymm, ymm{k}{z}
 44075      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 44076          self.require(ISA_AVX512VL | ISA_AVX512F)
 44077          p.domain = DomainFMA
 44078          p.add(0, func(m *_Encoding, v []interface{}) {
 44079              m.emit(0x62)
 44080              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44081              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44082              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 44083              m.emit(0x98)
 44084              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44085          })
 44086      }
 44087      if p.len == 0 {
 44088          panic("invalid operands for VFMADD132PD")
 44089      }
 44090      return p
 44091  }
 44092  
 44093  // VFMADD132PS performs "Fused Multiply-Add of Packed Single-Precision Floating-Point Values".
 44094  //
 44095  // Mnemonic        : VFMADD132PS
 44096  // Supported forms : (11 forms)
 44097  //
 44098  //    * VFMADD132PS xmm, xmm, xmm                   [FMA3]
 44099  //    * VFMADD132PS m128, xmm, xmm                  [FMA3]
 44100  //    * VFMADD132PS ymm, ymm, ymm                   [FMA3]
 44101  //    * VFMADD132PS m256, ymm, ymm                  [FMA3]
 44102  //    * VFMADD132PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 44103  //    * VFMADD132PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 44104  //    * VFMADD132PS zmm, zmm, zmm{k}{z}             [AVX512F]
 44105  //    * VFMADD132PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 44106  //    * VFMADD132PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 44107  //    * VFMADD132PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 44108  //    * VFMADD132PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 44109  //
 44110  func (self *Program) VFMADD132PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 44111      var p *Instruction
 44112      switch len(vv) {
 44113          case 0  : p = self.alloc("VFMADD132PS", 3, Operands { v0, v1, v2 })
 44114          case 1  : p = self.alloc("VFMADD132PS", 4, Operands { v0, v1, v2, vv[0] })
 44115          default : panic("instruction VFMADD132PS takes 3 or 4 operands")
 44116      }
 44117      // VFMADD132PS xmm, xmm, xmm
 44118      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 44119          self.require(ISA_FMA3)
 44120          p.domain = DomainFMA
 44121          p.add(0, func(m *_Encoding, v []interface{}) {
 44122              m.emit(0xc4)
 44123              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44124              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 44125              m.emit(0x98)
 44126              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44127          })
 44128      }
 44129      // VFMADD132PS m128, xmm, xmm
 44130      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 44131          self.require(ISA_FMA3)
 44132          p.domain = DomainFMA
 44133          p.add(0, func(m *_Encoding, v []interface{}) {
 44134              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44135              m.emit(0x98)
 44136              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44137          })
 44138      }
 44139      // VFMADD132PS ymm, ymm, ymm
 44140      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 44141          self.require(ISA_FMA3)
 44142          p.domain = DomainFMA
 44143          p.add(0, func(m *_Encoding, v []interface{}) {
 44144              m.emit(0xc4)
 44145              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44146              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 44147              m.emit(0x98)
 44148              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44149          })
 44150      }
 44151      // VFMADD132PS m256, ymm, ymm
 44152      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 44153          self.require(ISA_FMA3)
 44154          p.domain = DomainFMA
 44155          p.add(0, func(m *_Encoding, v []interface{}) {
 44156              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44157              m.emit(0x98)
 44158              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44159          })
 44160      }
 44161      // VFMADD132PS m512/m32bcst, zmm, zmm{k}{z}
 44162      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 44163          self.require(ISA_AVX512F)
 44164          p.domain = DomainFMA
 44165          p.add(0, func(m *_Encoding, v []interface{}) {
 44166              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44167              m.emit(0x98)
 44168              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 44169          })
 44170      }
 44171      // VFMADD132PS {er}, zmm, zmm, zmm{k}{z}
 44172      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 44173          self.require(ISA_AVX512F)
 44174          p.domain = DomainFMA
 44175          p.add(0, func(m *_Encoding, v []interface{}) {
 44176              m.emit(0x62)
 44177              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 44178              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 44179              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 44180              m.emit(0x98)
 44181              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 44182          })
 44183      }
 44184      // VFMADD132PS zmm, zmm, zmm{k}{z}
 44185      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 44186          self.require(ISA_AVX512F)
 44187          p.domain = DomainFMA
 44188          p.add(0, func(m *_Encoding, v []interface{}) {
 44189              m.emit(0x62)
 44190              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44191              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 44192              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 44193              m.emit(0x98)
 44194              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44195          })
 44196      }
 44197      // VFMADD132PS m128/m32bcst, xmm, xmm{k}{z}
 44198      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44199          self.require(ISA_AVX512VL | ISA_AVX512F)
 44200          p.domain = DomainFMA
 44201          p.add(0, func(m *_Encoding, v []interface{}) {
 44202              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44203              m.emit(0x98)
 44204              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 44205          })
 44206      }
 44207      // VFMADD132PS xmm, xmm, xmm{k}{z}
 44208      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44209          self.require(ISA_AVX512VL | ISA_AVX512F)
 44210          p.domain = DomainFMA
 44211          p.add(0, func(m *_Encoding, v []interface{}) {
 44212              m.emit(0x62)
 44213              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44214              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 44215              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 44216              m.emit(0x98)
 44217              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44218          })
 44219      }
 44220      // VFMADD132PS m256/m32bcst, ymm, ymm{k}{z}
 44221      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 44222          self.require(ISA_AVX512VL | ISA_AVX512F)
 44223          p.domain = DomainFMA
 44224          p.add(0, func(m *_Encoding, v []interface{}) {
 44225              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44226              m.emit(0x98)
 44227              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 44228          })
 44229      }
 44230      // VFMADD132PS ymm, ymm, ymm{k}{z}
 44231      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 44232          self.require(ISA_AVX512VL | ISA_AVX512F)
 44233          p.domain = DomainFMA
 44234          p.add(0, func(m *_Encoding, v []interface{}) {
 44235              m.emit(0x62)
 44236              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44237              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 44238              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 44239              m.emit(0x98)
 44240              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44241          })
 44242      }
 44243      if p.len == 0 {
 44244          panic("invalid operands for VFMADD132PS")
 44245      }
 44246      return p
 44247  }
 44248  
 44249  // VFMADD132SD performs "Fused Multiply-Add of Scalar Double-Precision Floating-Point Values".
 44250  //
 44251  // Mnemonic        : VFMADD132SD
 44252  // Supported forms : (5 forms)
 44253  //
 44254  //    * VFMADD132SD xmm, xmm, xmm                [FMA3]
 44255  //    * VFMADD132SD m64, xmm, xmm                [FMA3]
 44256  //    * VFMADD132SD m64, xmm, xmm{k}{z}          [AVX512F]
 44257  //    * VFMADD132SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 44258  //    * VFMADD132SD xmm, xmm, xmm{k}{z}          [AVX512F]
 44259  //
 44260  func (self *Program) VFMADD132SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 44261      var p *Instruction
 44262      switch len(vv) {
 44263          case 0  : p = self.alloc("VFMADD132SD", 3, Operands { v0, v1, v2 })
 44264          case 1  : p = self.alloc("VFMADD132SD", 4, Operands { v0, v1, v2, vv[0] })
 44265          default : panic("instruction VFMADD132SD takes 3 or 4 operands")
 44266      }
 44267      // VFMADD132SD xmm, xmm, xmm
 44268      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 44269          self.require(ISA_FMA3)
 44270          p.domain = DomainFMA
 44271          p.add(0, func(m *_Encoding, v []interface{}) {
 44272              m.emit(0xc4)
 44273              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44274              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 44275              m.emit(0x99)
 44276              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44277          })
 44278      }
 44279      // VFMADD132SD m64, xmm, xmm
 44280      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 44281          self.require(ISA_FMA3)
 44282          p.domain = DomainFMA
 44283          p.add(0, func(m *_Encoding, v []interface{}) {
 44284              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44285              m.emit(0x99)
 44286              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44287          })
 44288      }
 44289      // VFMADD132SD m64, xmm, xmm{k}{z}
 44290      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44291          self.require(ISA_AVX512F)
 44292          p.domain = DomainFMA
 44293          p.add(0, func(m *_Encoding, v []interface{}) {
 44294              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 44295              m.emit(0x99)
 44296              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 44297          })
 44298      }
 44299      // VFMADD132SD {er}, xmm, xmm, xmm{k}{z}
 44300      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 44301          self.require(ISA_AVX512F)
 44302          p.domain = DomainFMA
 44303          p.add(0, func(m *_Encoding, v []interface{}) {
 44304              m.emit(0x62)
 44305              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 44306              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 44307              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 44308              m.emit(0x99)
 44309              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 44310          })
 44311      }
 44312      // VFMADD132SD xmm, xmm, xmm{k}{z}
 44313      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44314          self.require(ISA_AVX512F)
 44315          p.domain = DomainFMA
 44316          p.add(0, func(m *_Encoding, v []interface{}) {
 44317              m.emit(0x62)
 44318              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44319              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44320              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 44321              m.emit(0x99)
 44322              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44323          })
 44324      }
 44325      if p.len == 0 {
 44326          panic("invalid operands for VFMADD132SD")
 44327      }
 44328      return p
 44329  }
 44330  
 44331  // VFMADD132SS performs "Fused Multiply-Add of Scalar Single-Precision Floating-Point Values".
 44332  //
 44333  // Mnemonic        : VFMADD132SS
 44334  // Supported forms : (5 forms)
 44335  //
 44336  //    * VFMADD132SS xmm, xmm, xmm                [FMA3]
 44337  //    * VFMADD132SS m32, xmm, xmm                [FMA3]
 44338  //    * VFMADD132SS m32, xmm, xmm{k}{z}          [AVX512F]
 44339  //    * VFMADD132SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 44340  //    * VFMADD132SS xmm, xmm, xmm{k}{z}          [AVX512F]
 44341  //
 44342  func (self *Program) VFMADD132SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 44343      var p *Instruction
 44344      switch len(vv) {
 44345          case 0  : p = self.alloc("VFMADD132SS", 3, Operands { v0, v1, v2 })
 44346          case 1  : p = self.alloc("VFMADD132SS", 4, Operands { v0, v1, v2, vv[0] })
 44347          default : panic("instruction VFMADD132SS takes 3 or 4 operands")
 44348      }
 44349      // VFMADD132SS xmm, xmm, xmm
 44350      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 44351          self.require(ISA_FMA3)
 44352          p.domain = DomainFMA
 44353          p.add(0, func(m *_Encoding, v []interface{}) {
 44354              m.emit(0xc4)
 44355              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44356              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 44357              m.emit(0x99)
 44358              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44359          })
 44360      }
 44361      // VFMADD132SS m32, xmm, xmm
 44362      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 44363          self.require(ISA_FMA3)
 44364          p.domain = DomainFMA
 44365          p.add(0, func(m *_Encoding, v []interface{}) {
 44366              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44367              m.emit(0x99)
 44368              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44369          })
 44370      }
 44371      // VFMADD132SS m32, xmm, xmm{k}{z}
 44372      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44373          self.require(ISA_AVX512F)
 44374          p.domain = DomainFMA
 44375          p.add(0, func(m *_Encoding, v []interface{}) {
 44376              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 44377              m.emit(0x99)
 44378              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 44379          })
 44380      }
 44381      // VFMADD132SS {er}, xmm, xmm, xmm{k}{z}
 44382      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 44383          self.require(ISA_AVX512F)
 44384          p.domain = DomainFMA
 44385          p.add(0, func(m *_Encoding, v []interface{}) {
 44386              m.emit(0x62)
 44387              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 44388              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 44389              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 44390              m.emit(0x99)
 44391              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 44392          })
 44393      }
 44394      // VFMADD132SS xmm, xmm, xmm{k}{z}
 44395      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44396          self.require(ISA_AVX512F)
 44397          p.domain = DomainFMA
 44398          p.add(0, func(m *_Encoding, v []interface{}) {
 44399              m.emit(0x62)
 44400              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44401              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 44402              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 44403              m.emit(0x99)
 44404              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44405          })
 44406      }
 44407      if p.len == 0 {
 44408          panic("invalid operands for VFMADD132SS")
 44409      }
 44410      return p
 44411  }
 44412  
 44413  // VFMADD213PD performs "Fused Multiply-Add of Packed Double-Precision Floating-Point Values".
 44414  //
 44415  // Mnemonic        : VFMADD213PD
 44416  // Supported forms : (11 forms)
 44417  //
 44418  //    * VFMADD213PD xmm, xmm, xmm                   [FMA3]
 44419  //    * VFMADD213PD m128, xmm, xmm                  [FMA3]
 44420  //    * VFMADD213PD ymm, ymm, ymm                   [FMA3]
 44421  //    * VFMADD213PD m256, ymm, ymm                  [FMA3]
 44422  //    * VFMADD213PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 44423  //    * VFMADD213PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 44424  //    * VFMADD213PD zmm, zmm, zmm{k}{z}             [AVX512F]
 44425  //    * VFMADD213PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 44426  //    * VFMADD213PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 44427  //    * VFMADD213PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 44428  //    * VFMADD213PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 44429  //
 44430  func (self *Program) VFMADD213PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 44431      var p *Instruction
 44432      switch len(vv) {
 44433          case 0  : p = self.alloc("VFMADD213PD", 3, Operands { v0, v1, v2 })
 44434          case 1  : p = self.alloc("VFMADD213PD", 4, Operands { v0, v1, v2, vv[0] })
 44435          default : panic("instruction VFMADD213PD takes 3 or 4 operands")
 44436      }
 44437      // VFMADD213PD xmm, xmm, xmm
 44438      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 44439          self.require(ISA_FMA3)
 44440          p.domain = DomainFMA
 44441          p.add(0, func(m *_Encoding, v []interface{}) {
 44442              m.emit(0xc4)
 44443              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44444              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 44445              m.emit(0xa8)
 44446              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44447          })
 44448      }
 44449      // VFMADD213PD m128, xmm, xmm
 44450      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 44451          self.require(ISA_FMA3)
 44452          p.domain = DomainFMA
 44453          p.add(0, func(m *_Encoding, v []interface{}) {
 44454              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44455              m.emit(0xa8)
 44456              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44457          })
 44458      }
 44459      // VFMADD213PD ymm, ymm, ymm
 44460      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 44461          self.require(ISA_FMA3)
 44462          p.domain = DomainFMA
 44463          p.add(0, func(m *_Encoding, v []interface{}) {
 44464              m.emit(0xc4)
 44465              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44466              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44467              m.emit(0xa8)
 44468              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44469          })
 44470      }
 44471      // VFMADD213PD m256, ymm, ymm
 44472      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 44473          self.require(ISA_FMA3)
 44474          p.domain = DomainFMA
 44475          p.add(0, func(m *_Encoding, v []interface{}) {
 44476              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44477              m.emit(0xa8)
 44478              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44479          })
 44480      }
 44481      // VFMADD213PD m512/m64bcst, zmm, zmm{k}{z}
 44482      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 44483          self.require(ISA_AVX512F)
 44484          p.domain = DomainFMA
 44485          p.add(0, func(m *_Encoding, v []interface{}) {
 44486              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44487              m.emit(0xa8)
 44488              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 44489          })
 44490      }
 44491      // VFMADD213PD {er}, zmm, zmm, zmm{k}{z}
 44492      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 44493          self.require(ISA_AVX512F)
 44494          p.domain = DomainFMA
 44495          p.add(0, func(m *_Encoding, v []interface{}) {
 44496              m.emit(0x62)
 44497              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 44498              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 44499              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 44500              m.emit(0xa8)
 44501              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 44502          })
 44503      }
 44504      // VFMADD213PD zmm, zmm, zmm{k}{z}
 44505      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 44506          self.require(ISA_AVX512F)
 44507          p.domain = DomainFMA
 44508          p.add(0, func(m *_Encoding, v []interface{}) {
 44509              m.emit(0x62)
 44510              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44511              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44512              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 44513              m.emit(0xa8)
 44514              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44515          })
 44516      }
 44517      // VFMADD213PD m128/m64bcst, xmm, xmm{k}{z}
 44518      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44519          self.require(ISA_AVX512VL | ISA_AVX512F)
 44520          p.domain = DomainFMA
 44521          p.add(0, func(m *_Encoding, v []interface{}) {
 44522              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44523              m.emit(0xa8)
 44524              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 44525          })
 44526      }
 44527      // VFMADD213PD xmm, xmm, xmm{k}{z}
 44528      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44529          self.require(ISA_AVX512VL | ISA_AVX512F)
 44530          p.domain = DomainFMA
 44531          p.add(0, func(m *_Encoding, v []interface{}) {
 44532              m.emit(0x62)
 44533              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44534              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44535              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 44536              m.emit(0xa8)
 44537              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44538          })
 44539      }
 44540      // VFMADD213PD m256/m64bcst, ymm, ymm{k}{z}
 44541      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 44542          self.require(ISA_AVX512VL | ISA_AVX512F)
 44543          p.domain = DomainFMA
 44544          p.add(0, func(m *_Encoding, v []interface{}) {
 44545              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44546              m.emit(0xa8)
 44547              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 44548          })
 44549      }
 44550      // VFMADD213PD ymm, ymm, ymm{k}{z}
 44551      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 44552          self.require(ISA_AVX512VL | ISA_AVX512F)
 44553          p.domain = DomainFMA
 44554          p.add(0, func(m *_Encoding, v []interface{}) {
 44555              m.emit(0x62)
 44556              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44557              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44558              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 44559              m.emit(0xa8)
 44560              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44561          })
 44562      }
 44563      if p.len == 0 {
 44564          panic("invalid operands for VFMADD213PD")
 44565      }
 44566      return p
 44567  }
 44568  
 44569  // VFMADD213PS performs "Fused Multiply-Add of Packed Single-Precision Floating-Point Values".
 44570  //
 44571  // Mnemonic        : VFMADD213PS
 44572  // Supported forms : (11 forms)
 44573  //
 44574  //    * VFMADD213PS xmm, xmm, xmm                   [FMA3]
 44575  //    * VFMADD213PS m128, xmm, xmm                  [FMA3]
 44576  //    * VFMADD213PS ymm, ymm, ymm                   [FMA3]
 44577  //    * VFMADD213PS m256, ymm, ymm                  [FMA3]
 44578  //    * VFMADD213PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 44579  //    * VFMADD213PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 44580  //    * VFMADD213PS zmm, zmm, zmm{k}{z}             [AVX512F]
 44581  //    * VFMADD213PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 44582  //    * VFMADD213PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 44583  //    * VFMADD213PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 44584  //    * VFMADD213PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 44585  //
 44586  func (self *Program) VFMADD213PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 44587      var p *Instruction
 44588      switch len(vv) {
 44589          case 0  : p = self.alloc("VFMADD213PS", 3, Operands { v0, v1, v2 })
 44590          case 1  : p = self.alloc("VFMADD213PS", 4, Operands { v0, v1, v2, vv[0] })
 44591          default : panic("instruction VFMADD213PS takes 3 or 4 operands")
 44592      }
 44593      // VFMADD213PS xmm, xmm, xmm
 44594      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 44595          self.require(ISA_FMA3)
 44596          p.domain = DomainFMA
 44597          p.add(0, func(m *_Encoding, v []interface{}) {
 44598              m.emit(0xc4)
 44599              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44600              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 44601              m.emit(0xa8)
 44602              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44603          })
 44604      }
 44605      // VFMADD213PS m128, xmm, xmm
 44606      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 44607          self.require(ISA_FMA3)
 44608          p.domain = DomainFMA
 44609          p.add(0, func(m *_Encoding, v []interface{}) {
 44610              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44611              m.emit(0xa8)
 44612              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44613          })
 44614      }
 44615      // VFMADD213PS ymm, ymm, ymm
 44616      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 44617          self.require(ISA_FMA3)
 44618          p.domain = DomainFMA
 44619          p.add(0, func(m *_Encoding, v []interface{}) {
 44620              m.emit(0xc4)
 44621              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44622              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 44623              m.emit(0xa8)
 44624              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44625          })
 44626      }
 44627      // VFMADD213PS m256, ymm, ymm
 44628      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 44629          self.require(ISA_FMA3)
 44630          p.domain = DomainFMA
 44631          p.add(0, func(m *_Encoding, v []interface{}) {
 44632              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44633              m.emit(0xa8)
 44634              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44635          })
 44636      }
 44637      // VFMADD213PS m512/m32bcst, zmm, zmm{k}{z}
 44638      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 44639          self.require(ISA_AVX512F)
 44640          p.domain = DomainFMA
 44641          p.add(0, func(m *_Encoding, v []interface{}) {
 44642              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44643              m.emit(0xa8)
 44644              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 44645          })
 44646      }
 44647      // VFMADD213PS {er}, zmm, zmm, zmm{k}{z}
 44648      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 44649          self.require(ISA_AVX512F)
 44650          p.domain = DomainFMA
 44651          p.add(0, func(m *_Encoding, v []interface{}) {
 44652              m.emit(0x62)
 44653              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 44654              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 44655              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 44656              m.emit(0xa8)
 44657              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 44658          })
 44659      }
 44660      // VFMADD213PS zmm, zmm, zmm{k}{z}
 44661      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 44662          self.require(ISA_AVX512F)
 44663          p.domain = DomainFMA
 44664          p.add(0, func(m *_Encoding, v []interface{}) {
 44665              m.emit(0x62)
 44666              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44667              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 44668              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 44669              m.emit(0xa8)
 44670              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44671          })
 44672      }
 44673      // VFMADD213PS m128/m32bcst, xmm, xmm{k}{z}
 44674      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44675          self.require(ISA_AVX512VL | ISA_AVX512F)
 44676          p.domain = DomainFMA
 44677          p.add(0, func(m *_Encoding, v []interface{}) {
 44678              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44679              m.emit(0xa8)
 44680              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 44681          })
 44682      }
 44683      // VFMADD213PS xmm, xmm, xmm{k}{z}
 44684      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44685          self.require(ISA_AVX512VL | ISA_AVX512F)
 44686          p.domain = DomainFMA
 44687          p.add(0, func(m *_Encoding, v []interface{}) {
 44688              m.emit(0x62)
 44689              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44690              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 44691              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 44692              m.emit(0xa8)
 44693              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44694          })
 44695      }
 44696      // VFMADD213PS m256/m32bcst, ymm, ymm{k}{z}
 44697      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 44698          self.require(ISA_AVX512VL | ISA_AVX512F)
 44699          p.domain = DomainFMA
 44700          p.add(0, func(m *_Encoding, v []interface{}) {
 44701              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44702              m.emit(0xa8)
 44703              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 44704          })
 44705      }
 44706      // VFMADD213PS ymm, ymm, ymm{k}{z}
 44707      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 44708          self.require(ISA_AVX512VL | ISA_AVX512F)
 44709          p.domain = DomainFMA
 44710          p.add(0, func(m *_Encoding, v []interface{}) {
 44711              m.emit(0x62)
 44712              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44713              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 44714              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 44715              m.emit(0xa8)
 44716              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44717          })
 44718      }
 44719      if p.len == 0 {
 44720          panic("invalid operands for VFMADD213PS")
 44721      }
 44722      return p
 44723  }
 44724  
 44725  // VFMADD213SD performs "Fused Multiply-Add of Scalar Double-Precision Floating-Point Values".
 44726  //
 44727  // Mnemonic        : VFMADD213SD
 44728  // Supported forms : (5 forms)
 44729  //
 44730  //    * VFMADD213SD xmm, xmm, xmm                [FMA3]
 44731  //    * VFMADD213SD m64, xmm, xmm                [FMA3]
 44732  //    * VFMADD213SD m64, xmm, xmm{k}{z}          [AVX512F]
 44733  //    * VFMADD213SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 44734  //    * VFMADD213SD xmm, xmm, xmm{k}{z}          [AVX512F]
 44735  //
 44736  func (self *Program) VFMADD213SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 44737      var p *Instruction
 44738      switch len(vv) {
 44739          case 0  : p = self.alloc("VFMADD213SD", 3, Operands { v0, v1, v2 })
 44740          case 1  : p = self.alloc("VFMADD213SD", 4, Operands { v0, v1, v2, vv[0] })
 44741          default : panic("instruction VFMADD213SD takes 3 or 4 operands")
 44742      }
 44743      // VFMADD213SD xmm, xmm, xmm
 44744      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 44745          self.require(ISA_FMA3)
 44746          p.domain = DomainFMA
 44747          p.add(0, func(m *_Encoding, v []interface{}) {
 44748              m.emit(0xc4)
 44749              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44750              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 44751              m.emit(0xa9)
 44752              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44753          })
 44754      }
 44755      // VFMADD213SD m64, xmm, xmm
 44756      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 44757          self.require(ISA_FMA3)
 44758          p.domain = DomainFMA
 44759          p.add(0, func(m *_Encoding, v []interface{}) {
 44760              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44761              m.emit(0xa9)
 44762              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44763          })
 44764      }
 44765      // VFMADD213SD m64, xmm, xmm{k}{z}
 44766      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44767          self.require(ISA_AVX512F)
 44768          p.domain = DomainFMA
 44769          p.add(0, func(m *_Encoding, v []interface{}) {
 44770              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 44771              m.emit(0xa9)
 44772              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 44773          })
 44774      }
 44775      // VFMADD213SD {er}, xmm, xmm, xmm{k}{z}
 44776      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 44777          self.require(ISA_AVX512F)
 44778          p.domain = DomainFMA
 44779          p.add(0, func(m *_Encoding, v []interface{}) {
 44780              m.emit(0x62)
 44781              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 44782              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 44783              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 44784              m.emit(0xa9)
 44785              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 44786          })
 44787      }
 44788      // VFMADD213SD xmm, xmm, xmm{k}{z}
 44789      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44790          self.require(ISA_AVX512F)
 44791          p.domain = DomainFMA
 44792          p.add(0, func(m *_Encoding, v []interface{}) {
 44793              m.emit(0x62)
 44794              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44795              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44796              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 44797              m.emit(0xa9)
 44798              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44799          })
 44800      }
 44801      if p.len == 0 {
 44802          panic("invalid operands for VFMADD213SD")
 44803      }
 44804      return p
 44805  }
 44806  
 44807  // VFMADD213SS performs "Fused Multiply-Add of Scalar Single-Precision Floating-Point Values".
 44808  //
 44809  // Mnemonic        : VFMADD213SS
 44810  // Supported forms : (5 forms)
 44811  //
 44812  //    * VFMADD213SS xmm, xmm, xmm                [FMA3]
 44813  //    * VFMADD213SS m32, xmm, xmm                [FMA3]
 44814  //    * VFMADD213SS m32, xmm, xmm{k}{z}          [AVX512F]
 44815  //    * VFMADD213SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 44816  //    * VFMADD213SS xmm, xmm, xmm{k}{z}          [AVX512F]
 44817  //
 44818  func (self *Program) VFMADD213SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 44819      var p *Instruction
 44820      switch len(vv) {
 44821          case 0  : p = self.alloc("VFMADD213SS", 3, Operands { v0, v1, v2 })
 44822          case 1  : p = self.alloc("VFMADD213SS", 4, Operands { v0, v1, v2, vv[0] })
 44823          default : panic("instruction VFMADD213SS takes 3 or 4 operands")
 44824      }
 44825      // VFMADD213SS xmm, xmm, xmm
 44826      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 44827          self.require(ISA_FMA3)
 44828          p.domain = DomainFMA
 44829          p.add(0, func(m *_Encoding, v []interface{}) {
 44830              m.emit(0xc4)
 44831              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44832              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 44833              m.emit(0xa9)
 44834              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44835          })
 44836      }
 44837      // VFMADD213SS m32, xmm, xmm
 44838      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 44839          self.require(ISA_FMA3)
 44840          p.domain = DomainFMA
 44841          p.add(0, func(m *_Encoding, v []interface{}) {
 44842              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44843              m.emit(0xa9)
 44844              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44845          })
 44846      }
 44847      // VFMADD213SS m32, xmm, xmm{k}{z}
 44848      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44849          self.require(ISA_AVX512F)
 44850          p.domain = DomainFMA
 44851          p.add(0, func(m *_Encoding, v []interface{}) {
 44852              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 44853              m.emit(0xa9)
 44854              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 44855          })
 44856      }
 44857      // VFMADD213SS {er}, xmm, xmm, xmm{k}{z}
 44858      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 44859          self.require(ISA_AVX512F)
 44860          p.domain = DomainFMA
 44861          p.add(0, func(m *_Encoding, v []interface{}) {
 44862              m.emit(0x62)
 44863              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 44864              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 44865              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 44866              m.emit(0xa9)
 44867              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 44868          })
 44869      }
 44870      // VFMADD213SS xmm, xmm, xmm{k}{z}
 44871      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44872          self.require(ISA_AVX512F)
 44873          p.domain = DomainFMA
 44874          p.add(0, func(m *_Encoding, v []interface{}) {
 44875              m.emit(0x62)
 44876              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44877              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 44878              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 44879              m.emit(0xa9)
 44880              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44881          })
 44882      }
 44883      if p.len == 0 {
 44884          panic("invalid operands for VFMADD213SS")
 44885      }
 44886      return p
 44887  }
 44888  
 44889  // VFMADD231PD performs "Fused Multiply-Add of Packed Double-Precision Floating-Point Values".
 44890  //
 44891  // Mnemonic        : VFMADD231PD
 44892  // Supported forms : (11 forms)
 44893  //
 44894  //    * VFMADD231PD xmm, xmm, xmm                   [FMA3]
 44895  //    * VFMADD231PD m128, xmm, xmm                  [FMA3]
 44896  //    * VFMADD231PD ymm, ymm, ymm                   [FMA3]
 44897  //    * VFMADD231PD m256, ymm, ymm                  [FMA3]
 44898  //    * VFMADD231PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 44899  //    * VFMADD231PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 44900  //    * VFMADD231PD zmm, zmm, zmm{k}{z}             [AVX512F]
 44901  //    * VFMADD231PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 44902  //    * VFMADD231PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 44903  //    * VFMADD231PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 44904  //    * VFMADD231PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 44905  //
 44906  func (self *Program) VFMADD231PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 44907      var p *Instruction
 44908      switch len(vv) {
 44909          case 0  : p = self.alloc("VFMADD231PD", 3, Operands { v0, v1, v2 })
 44910          case 1  : p = self.alloc("VFMADD231PD", 4, Operands { v0, v1, v2, vv[0] })
 44911          default : panic("instruction VFMADD231PD takes 3 or 4 operands")
 44912      }
 44913      // VFMADD231PD xmm, xmm, xmm
 44914      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 44915          self.require(ISA_FMA3)
 44916          p.domain = DomainFMA
 44917          p.add(0, func(m *_Encoding, v []interface{}) {
 44918              m.emit(0xc4)
 44919              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44920              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 44921              m.emit(0xb8)
 44922              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44923          })
 44924      }
 44925      // VFMADD231PD m128, xmm, xmm
 44926      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 44927          self.require(ISA_FMA3)
 44928          p.domain = DomainFMA
 44929          p.add(0, func(m *_Encoding, v []interface{}) {
 44930              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44931              m.emit(0xb8)
 44932              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44933          })
 44934      }
 44935      // VFMADD231PD ymm, ymm, ymm
 44936      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 44937          self.require(ISA_FMA3)
 44938          p.domain = DomainFMA
 44939          p.add(0, func(m *_Encoding, v []interface{}) {
 44940              m.emit(0xc4)
 44941              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 44942              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44943              m.emit(0xb8)
 44944              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44945          })
 44946      }
 44947      // VFMADD231PD m256, ymm, ymm
 44948      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 44949          self.require(ISA_FMA3)
 44950          p.domain = DomainFMA
 44951          p.add(0, func(m *_Encoding, v []interface{}) {
 44952              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 44953              m.emit(0xb8)
 44954              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 44955          })
 44956      }
 44957      // VFMADD231PD m512/m64bcst, zmm, zmm{k}{z}
 44958      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 44959          self.require(ISA_AVX512F)
 44960          p.domain = DomainFMA
 44961          p.add(0, func(m *_Encoding, v []interface{}) {
 44962              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44963              m.emit(0xb8)
 44964              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 44965          })
 44966      }
 44967      // VFMADD231PD {er}, zmm, zmm, zmm{k}{z}
 44968      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 44969          self.require(ISA_AVX512F)
 44970          p.domain = DomainFMA
 44971          p.add(0, func(m *_Encoding, v []interface{}) {
 44972              m.emit(0x62)
 44973              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 44974              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 44975              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 44976              m.emit(0xb8)
 44977              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 44978          })
 44979      }
 44980      // VFMADD231PD zmm, zmm, zmm{k}{z}
 44981      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 44982          self.require(ISA_AVX512F)
 44983          p.domain = DomainFMA
 44984          p.add(0, func(m *_Encoding, v []interface{}) {
 44985              m.emit(0x62)
 44986              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 44987              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 44988              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 44989              m.emit(0xb8)
 44990              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 44991          })
 44992      }
 44993      // VFMADD231PD m128/m64bcst, xmm, xmm{k}{z}
 44994      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 44995          self.require(ISA_AVX512VL | ISA_AVX512F)
 44996          p.domain = DomainFMA
 44997          p.add(0, func(m *_Encoding, v []interface{}) {
 44998              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 44999              m.emit(0xb8)
 45000              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 45001          })
 45002      }
 45003      // VFMADD231PD xmm, xmm, xmm{k}{z}
 45004      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 45005          self.require(ISA_AVX512VL | ISA_AVX512F)
 45006          p.domain = DomainFMA
 45007          p.add(0, func(m *_Encoding, v []interface{}) {
 45008              m.emit(0x62)
 45009              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45010              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 45011              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 45012              m.emit(0xb8)
 45013              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45014          })
 45015      }
 45016      // VFMADD231PD m256/m64bcst, ymm, ymm{k}{z}
 45017      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 45018          self.require(ISA_AVX512VL | ISA_AVX512F)
 45019          p.domain = DomainFMA
 45020          p.add(0, func(m *_Encoding, v []interface{}) {
 45021              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 45022              m.emit(0xb8)
 45023              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 45024          })
 45025      }
 45026      // VFMADD231PD ymm, ymm, ymm{k}{z}
 45027      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 45028          self.require(ISA_AVX512VL | ISA_AVX512F)
 45029          p.domain = DomainFMA
 45030          p.add(0, func(m *_Encoding, v []interface{}) {
 45031              m.emit(0x62)
 45032              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45033              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 45034              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 45035              m.emit(0xb8)
 45036              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45037          })
 45038      }
 45039      if p.len == 0 {
 45040          panic("invalid operands for VFMADD231PD")
 45041      }
 45042      return p
 45043  }
 45044  
 45045  // VFMADD231PS performs "Fused Multiply-Add of Packed Single-Precision Floating-Point Values".
 45046  //
 45047  // Mnemonic        : VFMADD231PS
 45048  // Supported forms : (11 forms)
 45049  //
 45050  //    * VFMADD231PS xmm, xmm, xmm                   [FMA3]
 45051  //    * VFMADD231PS m128, xmm, xmm                  [FMA3]
 45052  //    * VFMADD231PS ymm, ymm, ymm                   [FMA3]
 45053  //    * VFMADD231PS m256, ymm, ymm                  [FMA3]
 45054  //    * VFMADD231PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 45055  //    * VFMADD231PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 45056  //    * VFMADD231PS zmm, zmm, zmm{k}{z}             [AVX512F]
 45057  //    * VFMADD231PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 45058  //    * VFMADD231PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 45059  //    * VFMADD231PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 45060  //    * VFMADD231PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 45061  //
 45062  func (self *Program) VFMADD231PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 45063      var p *Instruction
 45064      switch len(vv) {
 45065          case 0  : p = self.alloc("VFMADD231PS", 3, Operands { v0, v1, v2 })
 45066          case 1  : p = self.alloc("VFMADD231PS", 4, Operands { v0, v1, v2, vv[0] })
 45067          default : panic("instruction VFMADD231PS takes 3 or 4 operands")
 45068      }
 45069      // VFMADD231PS xmm, xmm, xmm
 45070      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 45071          self.require(ISA_FMA3)
 45072          p.domain = DomainFMA
 45073          p.add(0, func(m *_Encoding, v []interface{}) {
 45074              m.emit(0xc4)
 45075              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 45076              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 45077              m.emit(0xb8)
 45078              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45079          })
 45080      }
 45081      // VFMADD231PS m128, xmm, xmm
 45082      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 45083          self.require(ISA_FMA3)
 45084          p.domain = DomainFMA
 45085          p.add(0, func(m *_Encoding, v []interface{}) {
 45086              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 45087              m.emit(0xb8)
 45088              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 45089          })
 45090      }
 45091      // VFMADD231PS ymm, ymm, ymm
 45092      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 45093          self.require(ISA_FMA3)
 45094          p.domain = DomainFMA
 45095          p.add(0, func(m *_Encoding, v []interface{}) {
 45096              m.emit(0xc4)
 45097              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 45098              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 45099              m.emit(0xb8)
 45100              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45101          })
 45102      }
 45103      // VFMADD231PS m256, ymm, ymm
 45104      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 45105          self.require(ISA_FMA3)
 45106          p.domain = DomainFMA
 45107          p.add(0, func(m *_Encoding, v []interface{}) {
 45108              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 45109              m.emit(0xb8)
 45110              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 45111          })
 45112      }
 45113      // VFMADD231PS m512/m32bcst, zmm, zmm{k}{z}
 45114      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 45115          self.require(ISA_AVX512F)
 45116          p.domain = DomainFMA
 45117          p.add(0, func(m *_Encoding, v []interface{}) {
 45118              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 45119              m.emit(0xb8)
 45120              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 45121          })
 45122      }
 45123      // VFMADD231PS {er}, zmm, zmm, zmm{k}{z}
 45124      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 45125          self.require(ISA_AVX512F)
 45126          p.domain = DomainFMA
 45127          p.add(0, func(m *_Encoding, v []interface{}) {
 45128              m.emit(0x62)
 45129              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 45130              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 45131              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 45132              m.emit(0xb8)
 45133              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 45134          })
 45135      }
 45136      // VFMADD231PS zmm, zmm, zmm{k}{z}
 45137      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 45138          self.require(ISA_AVX512F)
 45139          p.domain = DomainFMA
 45140          p.add(0, func(m *_Encoding, v []interface{}) {
 45141              m.emit(0x62)
 45142              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45143              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 45144              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 45145              m.emit(0xb8)
 45146              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45147          })
 45148      }
 45149      // VFMADD231PS m128/m32bcst, xmm, xmm{k}{z}
 45150      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 45151          self.require(ISA_AVX512VL | ISA_AVX512F)
 45152          p.domain = DomainFMA
 45153          p.add(0, func(m *_Encoding, v []interface{}) {
 45154              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 45155              m.emit(0xb8)
 45156              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 45157          })
 45158      }
 45159      // VFMADD231PS xmm, xmm, xmm{k}{z}
 45160      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 45161          self.require(ISA_AVX512VL | ISA_AVX512F)
 45162          p.domain = DomainFMA
 45163          p.add(0, func(m *_Encoding, v []interface{}) {
 45164              m.emit(0x62)
 45165              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45166              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 45167              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 45168              m.emit(0xb8)
 45169              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45170          })
 45171      }
 45172      // VFMADD231PS m256/m32bcst, ymm, ymm{k}{z}
 45173      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 45174          self.require(ISA_AVX512VL | ISA_AVX512F)
 45175          p.domain = DomainFMA
 45176          p.add(0, func(m *_Encoding, v []interface{}) {
 45177              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 45178              m.emit(0xb8)
 45179              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 45180          })
 45181      }
 45182      // VFMADD231PS ymm, ymm, ymm{k}{z}
 45183      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 45184          self.require(ISA_AVX512VL | ISA_AVX512F)
 45185          p.domain = DomainFMA
 45186          p.add(0, func(m *_Encoding, v []interface{}) {
 45187              m.emit(0x62)
 45188              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45189              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 45190              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 45191              m.emit(0xb8)
 45192              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45193          })
 45194      }
 45195      if p.len == 0 {
 45196          panic("invalid operands for VFMADD231PS")
 45197      }
 45198      return p
 45199  }
 45200  
 45201  // VFMADD231SD performs "Fused Multiply-Add of Scalar Double-Precision Floating-Point Values".
 45202  //
 45203  // Mnemonic        : VFMADD231SD
 45204  // Supported forms : (5 forms)
 45205  //
 45206  //    * VFMADD231SD xmm, xmm, xmm                [FMA3]
 45207  //    * VFMADD231SD m64, xmm, xmm                [FMA3]
 45208  //    * VFMADD231SD m64, xmm, xmm{k}{z}          [AVX512F]
 45209  //    * VFMADD231SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 45210  //    * VFMADD231SD xmm, xmm, xmm{k}{z}          [AVX512F]
 45211  //
 45212  func (self *Program) VFMADD231SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 45213      var p *Instruction
 45214      switch len(vv) {
 45215          case 0  : p = self.alloc("VFMADD231SD", 3, Operands { v0, v1, v2 })
 45216          case 1  : p = self.alloc("VFMADD231SD", 4, Operands { v0, v1, v2, vv[0] })
 45217          default : panic("instruction VFMADD231SD takes 3 or 4 operands")
 45218      }
 45219      // VFMADD231SD xmm, xmm, xmm
 45220      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 45221          self.require(ISA_FMA3)
 45222          p.domain = DomainFMA
 45223          p.add(0, func(m *_Encoding, v []interface{}) {
 45224              m.emit(0xc4)
 45225              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 45226              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 45227              m.emit(0xb9)
 45228              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45229          })
 45230      }
 45231      // VFMADD231SD m64, xmm, xmm
 45232      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 45233          self.require(ISA_FMA3)
 45234          p.domain = DomainFMA
 45235          p.add(0, func(m *_Encoding, v []interface{}) {
 45236              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 45237              m.emit(0xb9)
 45238              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 45239          })
 45240      }
 45241      // VFMADD231SD m64, xmm, xmm{k}{z}
 45242      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 45243          self.require(ISA_AVX512F)
 45244          p.domain = DomainFMA
 45245          p.add(0, func(m *_Encoding, v []interface{}) {
 45246              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 45247              m.emit(0xb9)
 45248              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 45249          })
 45250      }
 45251      // VFMADD231SD {er}, xmm, xmm, xmm{k}{z}
 45252      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 45253          self.require(ISA_AVX512F)
 45254          p.domain = DomainFMA
 45255          p.add(0, func(m *_Encoding, v []interface{}) {
 45256              m.emit(0x62)
 45257              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 45258              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 45259              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 45260              m.emit(0xb9)
 45261              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 45262          })
 45263      }
 45264      // VFMADD231SD xmm, xmm, xmm{k}{z}
 45265      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 45266          self.require(ISA_AVX512F)
 45267          p.domain = DomainFMA
 45268          p.add(0, func(m *_Encoding, v []interface{}) {
 45269              m.emit(0x62)
 45270              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45271              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 45272              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 45273              m.emit(0xb9)
 45274              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45275          })
 45276      }
 45277      if p.len == 0 {
 45278          panic("invalid operands for VFMADD231SD")
 45279      }
 45280      return p
 45281  }
 45282  
 45283  // VFMADD231SS performs "Fused Multiply-Add of Scalar Single-Precision Floating-Point Values".
 45284  //
 45285  // Mnemonic        : VFMADD231SS
 45286  // Supported forms : (5 forms)
 45287  //
 45288  //    * VFMADD231SS xmm, xmm, xmm                [FMA3]
 45289  //    * VFMADD231SS m32, xmm, xmm                [FMA3]
 45290  //    * VFMADD231SS m32, xmm, xmm{k}{z}          [AVX512F]
 45291  //    * VFMADD231SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 45292  //    * VFMADD231SS xmm, xmm, xmm{k}{z}          [AVX512F]
 45293  //
 45294  func (self *Program) VFMADD231SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 45295      var p *Instruction
 45296      switch len(vv) {
 45297          case 0  : p = self.alloc("VFMADD231SS", 3, Operands { v0, v1, v2 })
 45298          case 1  : p = self.alloc("VFMADD231SS", 4, Operands { v0, v1, v2, vv[0] })
 45299          default : panic("instruction VFMADD231SS takes 3 or 4 operands")
 45300      }
 45301      // VFMADD231SS xmm, xmm, xmm
 45302      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 45303          self.require(ISA_FMA3)
 45304          p.domain = DomainFMA
 45305          p.add(0, func(m *_Encoding, v []interface{}) {
 45306              m.emit(0xc4)
 45307              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 45308              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 45309              m.emit(0xb9)
 45310              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45311          })
 45312      }
 45313      // VFMADD231SS m32, xmm, xmm
 45314      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 45315          self.require(ISA_FMA3)
 45316          p.domain = DomainFMA
 45317          p.add(0, func(m *_Encoding, v []interface{}) {
 45318              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 45319              m.emit(0xb9)
 45320              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 45321          })
 45322      }
 45323      // VFMADD231SS m32, xmm, xmm{k}{z}
 45324      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 45325          self.require(ISA_AVX512F)
 45326          p.domain = DomainFMA
 45327          p.add(0, func(m *_Encoding, v []interface{}) {
 45328              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 45329              m.emit(0xb9)
 45330              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 45331          })
 45332      }
 45333      // VFMADD231SS {er}, xmm, xmm, xmm{k}{z}
 45334      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 45335          self.require(ISA_AVX512F)
 45336          p.domain = DomainFMA
 45337          p.add(0, func(m *_Encoding, v []interface{}) {
 45338              m.emit(0x62)
 45339              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 45340              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 45341              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 45342              m.emit(0xb9)
 45343              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 45344          })
 45345      }
 45346      // VFMADD231SS xmm, xmm, xmm{k}{z}
 45347      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 45348          self.require(ISA_AVX512F)
 45349          p.domain = DomainFMA
 45350          p.add(0, func(m *_Encoding, v []interface{}) {
 45351              m.emit(0x62)
 45352              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45353              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 45354              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 45355              m.emit(0xb9)
 45356              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45357          })
 45358      }
 45359      if p.len == 0 {
 45360          panic("invalid operands for VFMADD231SS")
 45361      }
 45362      return p
 45363  }
 45364  
 45365  // VFMADDPD performs "Fused Multiply-Add of Packed Double-Precision Floating-Point Values".
 45366  //
 45367  // Mnemonic        : VFMADDPD
 45368  // Supported forms : (6 forms)
 45369  //
 45370  //    * VFMADDPD xmm, xmm, xmm, xmm     [FMA4]
 45371  //    * VFMADDPD m128, xmm, xmm, xmm    [FMA4]
 45372  //    * VFMADDPD xmm, m128, xmm, xmm    [FMA4]
 45373  //    * VFMADDPD ymm, ymm, ymm, ymm     [FMA4]
 45374  //    * VFMADDPD m256, ymm, ymm, ymm    [FMA4]
 45375  //    * VFMADDPD ymm, m256, ymm, ymm    [FMA4]
 45376  //
 45377  func (self *Program) VFMADDPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 45378      p := self.alloc("VFMADDPD", 4, Operands { v0, v1, v2, v3 })
 45379      // VFMADDPD xmm, xmm, xmm, xmm
 45380      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 45381          self.require(ISA_FMA4)
 45382          p.domain = DomainFMA
 45383          p.add(0, func(m *_Encoding, v []interface{}) {
 45384              m.emit(0xc4)
 45385              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 45386              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 45387              m.emit(0x69)
 45388              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 45389              m.emit(hlcode(v[1]) << 4)
 45390          })
 45391          p.add(0, func(m *_Encoding, v []interface{}) {
 45392              m.emit(0xc4)
 45393              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 45394              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 45395              m.emit(0x69)
 45396              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 45397              m.emit(hlcode(v[0]) << 4)
 45398          })
 45399      }
 45400      // VFMADDPD m128, xmm, xmm, xmm
 45401      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 45402          self.require(ISA_FMA4)
 45403          p.domain = DomainFMA
 45404          p.add(0, func(m *_Encoding, v []interface{}) {
 45405              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 45406              m.emit(0x69)
 45407              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 45408              m.emit(hlcode(v[1]) << 4)
 45409          })
 45410      }
 45411      // VFMADDPD xmm, m128, xmm, xmm
 45412      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 45413          self.require(ISA_FMA4)
 45414          p.domain = DomainFMA
 45415          p.add(0, func(m *_Encoding, v []interface{}) {
 45416              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 45417              m.emit(0x69)
 45418              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 45419              m.emit(hlcode(v[0]) << 4)
 45420          })
 45421      }
 45422      // VFMADDPD ymm, ymm, ymm, ymm
 45423      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 45424          self.require(ISA_FMA4)
 45425          p.domain = DomainFMA
 45426          p.add(0, func(m *_Encoding, v []interface{}) {
 45427              m.emit(0xc4)
 45428              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 45429              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 45430              m.emit(0x69)
 45431              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 45432              m.emit(hlcode(v[1]) << 4)
 45433          })
 45434          p.add(0, func(m *_Encoding, v []interface{}) {
 45435              m.emit(0xc4)
 45436              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 45437              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 45438              m.emit(0x69)
 45439              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 45440              m.emit(hlcode(v[0]) << 4)
 45441          })
 45442      }
 45443      // VFMADDPD m256, ymm, ymm, ymm
 45444      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 45445          self.require(ISA_FMA4)
 45446          p.domain = DomainFMA
 45447          p.add(0, func(m *_Encoding, v []interface{}) {
 45448              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 45449              m.emit(0x69)
 45450              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 45451              m.emit(hlcode(v[1]) << 4)
 45452          })
 45453      }
 45454      // VFMADDPD ymm, m256, ymm, ymm
 45455      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 45456          self.require(ISA_FMA4)
 45457          p.domain = DomainFMA
 45458          p.add(0, func(m *_Encoding, v []interface{}) {
 45459              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 45460              m.emit(0x69)
 45461              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 45462              m.emit(hlcode(v[0]) << 4)
 45463          })
 45464      }
 45465      if p.len == 0 {
 45466          panic("invalid operands for VFMADDPD")
 45467      }
 45468      return p
 45469  }
 45470  
 45471  // VFMADDPS performs "Fused Multiply-Add of Packed Single-Precision Floating-Point Values".
 45472  //
 45473  // Mnemonic        : VFMADDPS
 45474  // Supported forms : (6 forms)
 45475  //
 45476  //    * VFMADDPS xmm, xmm, xmm, xmm     [FMA4]
 45477  //    * VFMADDPS m128, xmm, xmm, xmm    [FMA4]
 45478  //    * VFMADDPS xmm, m128, xmm, xmm    [FMA4]
 45479  //    * VFMADDPS ymm, ymm, ymm, ymm     [FMA4]
 45480  //    * VFMADDPS m256, ymm, ymm, ymm    [FMA4]
 45481  //    * VFMADDPS ymm, m256, ymm, ymm    [FMA4]
 45482  //
 45483  func (self *Program) VFMADDPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 45484      p := self.alloc("VFMADDPS", 4, Operands { v0, v1, v2, v3 })
 45485      // VFMADDPS xmm, xmm, xmm, xmm
 45486      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 45487          self.require(ISA_FMA4)
 45488          p.domain = DomainFMA
 45489          p.add(0, func(m *_Encoding, v []interface{}) {
 45490              m.emit(0xc4)
 45491              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 45492              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 45493              m.emit(0x68)
 45494              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 45495              m.emit(hlcode(v[1]) << 4)
 45496          })
 45497          p.add(0, func(m *_Encoding, v []interface{}) {
 45498              m.emit(0xc4)
 45499              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 45500              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 45501              m.emit(0x68)
 45502              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 45503              m.emit(hlcode(v[0]) << 4)
 45504          })
 45505      }
 45506      // VFMADDPS m128, xmm, xmm, xmm
 45507      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 45508          self.require(ISA_FMA4)
 45509          p.domain = DomainFMA
 45510          p.add(0, func(m *_Encoding, v []interface{}) {
 45511              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 45512              m.emit(0x68)
 45513              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 45514              m.emit(hlcode(v[1]) << 4)
 45515          })
 45516      }
 45517      // VFMADDPS xmm, m128, xmm, xmm
 45518      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 45519          self.require(ISA_FMA4)
 45520          p.domain = DomainFMA
 45521          p.add(0, func(m *_Encoding, v []interface{}) {
 45522              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 45523              m.emit(0x68)
 45524              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 45525              m.emit(hlcode(v[0]) << 4)
 45526          })
 45527      }
 45528      // VFMADDPS ymm, ymm, ymm, ymm
 45529      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 45530          self.require(ISA_FMA4)
 45531          p.domain = DomainFMA
 45532          p.add(0, func(m *_Encoding, v []interface{}) {
 45533              m.emit(0xc4)
 45534              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 45535              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 45536              m.emit(0x68)
 45537              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 45538              m.emit(hlcode(v[1]) << 4)
 45539          })
 45540          p.add(0, func(m *_Encoding, v []interface{}) {
 45541              m.emit(0xc4)
 45542              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 45543              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 45544              m.emit(0x68)
 45545              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 45546              m.emit(hlcode(v[0]) << 4)
 45547          })
 45548      }
 45549      // VFMADDPS m256, ymm, ymm, ymm
 45550      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 45551          self.require(ISA_FMA4)
 45552          p.domain = DomainFMA
 45553          p.add(0, func(m *_Encoding, v []interface{}) {
 45554              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 45555              m.emit(0x68)
 45556              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 45557              m.emit(hlcode(v[1]) << 4)
 45558          })
 45559      }
 45560      // VFMADDPS ymm, m256, ymm, ymm
 45561      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 45562          self.require(ISA_FMA4)
 45563          p.domain = DomainFMA
 45564          p.add(0, func(m *_Encoding, v []interface{}) {
 45565              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 45566              m.emit(0x68)
 45567              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 45568              m.emit(hlcode(v[0]) << 4)
 45569          })
 45570      }
 45571      if p.len == 0 {
 45572          panic("invalid operands for VFMADDPS")
 45573      }
 45574      return p
 45575  }
 45576  
 45577  // VFMADDSD performs "Fused Multiply-Add of Scalar Double-Precision Floating-Point Values".
 45578  //
 45579  // Mnemonic        : VFMADDSD
 45580  // Supported forms : (3 forms)
 45581  //
 45582  //    * VFMADDSD xmm, xmm, xmm, xmm    [FMA4]
 45583  //    * VFMADDSD m64, xmm, xmm, xmm    [FMA4]
 45584  //    * VFMADDSD xmm, m64, xmm, xmm    [FMA4]
 45585  //
 45586  func (self *Program) VFMADDSD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 45587      p := self.alloc("VFMADDSD", 4, Operands { v0, v1, v2, v3 })
 45588      // VFMADDSD xmm, xmm, xmm, xmm
 45589      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 45590          self.require(ISA_FMA4)
 45591          p.domain = DomainFMA
 45592          p.add(0, func(m *_Encoding, v []interface{}) {
 45593              m.emit(0xc4)
 45594              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 45595              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 45596              m.emit(0x6b)
 45597              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 45598              m.emit(hlcode(v[1]) << 4)
 45599          })
 45600          p.add(0, func(m *_Encoding, v []interface{}) {
 45601              m.emit(0xc4)
 45602              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 45603              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 45604              m.emit(0x6b)
 45605              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 45606              m.emit(hlcode(v[0]) << 4)
 45607          })
 45608      }
 45609      // VFMADDSD m64, xmm, xmm, xmm
 45610      if isM64(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 45611          self.require(ISA_FMA4)
 45612          p.domain = DomainFMA
 45613          p.add(0, func(m *_Encoding, v []interface{}) {
 45614              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 45615              m.emit(0x6b)
 45616              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 45617              m.emit(hlcode(v[1]) << 4)
 45618          })
 45619      }
 45620      // VFMADDSD xmm, m64, xmm, xmm
 45621      if isXMM(v0) && isM64(v1) && isXMM(v2) && isXMM(v3) {
 45622          self.require(ISA_FMA4)
 45623          p.domain = DomainFMA
 45624          p.add(0, func(m *_Encoding, v []interface{}) {
 45625              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 45626              m.emit(0x6b)
 45627              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 45628              m.emit(hlcode(v[0]) << 4)
 45629          })
 45630      }
 45631      if p.len == 0 {
 45632          panic("invalid operands for VFMADDSD")
 45633      }
 45634      return p
 45635  }
 45636  
 45637  // VFMADDSS performs "Fused Multiply-Add of Scalar Single-Precision Floating-Point Values".
 45638  //
 45639  // Mnemonic        : VFMADDSS
 45640  // Supported forms : (3 forms)
 45641  //
 45642  //    * VFMADDSS xmm, xmm, xmm, xmm    [FMA4]
 45643  //    * VFMADDSS m32, xmm, xmm, xmm    [FMA4]
 45644  //    * VFMADDSS xmm, m32, xmm, xmm    [FMA4]
 45645  //
 45646  func (self *Program) VFMADDSS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 45647      p := self.alloc("VFMADDSS", 4, Operands { v0, v1, v2, v3 })
 45648      // VFMADDSS xmm, xmm, xmm, xmm
 45649      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 45650          self.require(ISA_FMA4)
 45651          p.domain = DomainFMA
 45652          p.add(0, func(m *_Encoding, v []interface{}) {
 45653              m.emit(0xc4)
 45654              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 45655              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 45656              m.emit(0x6a)
 45657              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 45658              m.emit(hlcode(v[1]) << 4)
 45659          })
 45660          p.add(0, func(m *_Encoding, v []interface{}) {
 45661              m.emit(0xc4)
 45662              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 45663              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 45664              m.emit(0x6a)
 45665              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 45666              m.emit(hlcode(v[0]) << 4)
 45667          })
 45668      }
 45669      // VFMADDSS m32, xmm, xmm, xmm
 45670      if isM32(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 45671          self.require(ISA_FMA4)
 45672          p.domain = DomainFMA
 45673          p.add(0, func(m *_Encoding, v []interface{}) {
 45674              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 45675              m.emit(0x6a)
 45676              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 45677              m.emit(hlcode(v[1]) << 4)
 45678          })
 45679      }
 45680      // VFMADDSS xmm, m32, xmm, xmm
 45681      if isXMM(v0) && isM32(v1) && isXMM(v2) && isXMM(v3) {
 45682          self.require(ISA_FMA4)
 45683          p.domain = DomainFMA
 45684          p.add(0, func(m *_Encoding, v []interface{}) {
 45685              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 45686              m.emit(0x6a)
 45687              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 45688              m.emit(hlcode(v[0]) << 4)
 45689          })
 45690      }
 45691      if p.len == 0 {
 45692          panic("invalid operands for VFMADDSS")
 45693      }
 45694      return p
 45695  }
 45696  
 45697  // VFMADDSUB132PD performs "Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values".
 45698  //
 45699  // Mnemonic        : VFMADDSUB132PD
 45700  // Supported forms : (11 forms)
 45701  //
 45702  //    * VFMADDSUB132PD xmm, xmm, xmm                   [FMA3]
 45703  //    * VFMADDSUB132PD m128, xmm, xmm                  [FMA3]
 45704  //    * VFMADDSUB132PD ymm, ymm, ymm                   [FMA3]
 45705  //    * VFMADDSUB132PD m256, ymm, ymm                  [FMA3]
 45706  //    * VFMADDSUB132PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 45707  //    * VFMADDSUB132PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 45708  //    * VFMADDSUB132PD zmm, zmm, zmm{k}{z}             [AVX512F]
 45709  //    * VFMADDSUB132PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 45710  //    * VFMADDSUB132PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 45711  //    * VFMADDSUB132PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 45712  //    * VFMADDSUB132PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 45713  //
 45714  func (self *Program) VFMADDSUB132PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 45715      var p *Instruction
 45716      switch len(vv) {
 45717          case 0  : p = self.alloc("VFMADDSUB132PD", 3, Operands { v0, v1, v2 })
 45718          case 1  : p = self.alloc("VFMADDSUB132PD", 4, Operands { v0, v1, v2, vv[0] })
 45719          default : panic("instruction VFMADDSUB132PD takes 3 or 4 operands")
 45720      }
 45721      // VFMADDSUB132PD xmm, xmm, xmm
 45722      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 45723          self.require(ISA_FMA3)
 45724          p.domain = DomainFMA
 45725          p.add(0, func(m *_Encoding, v []interface{}) {
 45726              m.emit(0xc4)
 45727              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 45728              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 45729              m.emit(0x96)
 45730              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45731          })
 45732      }
 45733      // VFMADDSUB132PD m128, xmm, xmm
 45734      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 45735          self.require(ISA_FMA3)
 45736          p.domain = DomainFMA
 45737          p.add(0, func(m *_Encoding, v []interface{}) {
 45738              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 45739              m.emit(0x96)
 45740              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 45741          })
 45742      }
 45743      // VFMADDSUB132PD ymm, ymm, ymm
 45744      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 45745          self.require(ISA_FMA3)
 45746          p.domain = DomainFMA
 45747          p.add(0, func(m *_Encoding, v []interface{}) {
 45748              m.emit(0xc4)
 45749              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 45750              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 45751              m.emit(0x96)
 45752              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45753          })
 45754      }
 45755      // VFMADDSUB132PD m256, ymm, ymm
 45756      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 45757          self.require(ISA_FMA3)
 45758          p.domain = DomainFMA
 45759          p.add(0, func(m *_Encoding, v []interface{}) {
 45760              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 45761              m.emit(0x96)
 45762              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 45763          })
 45764      }
 45765      // VFMADDSUB132PD m512/m64bcst, zmm, zmm{k}{z}
 45766      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 45767          self.require(ISA_AVX512F)
 45768          p.domain = DomainFMA
 45769          p.add(0, func(m *_Encoding, v []interface{}) {
 45770              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 45771              m.emit(0x96)
 45772              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 45773          })
 45774      }
 45775      // VFMADDSUB132PD {er}, zmm, zmm, zmm{k}{z}
 45776      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 45777          self.require(ISA_AVX512F)
 45778          p.domain = DomainFMA
 45779          p.add(0, func(m *_Encoding, v []interface{}) {
 45780              m.emit(0x62)
 45781              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 45782              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 45783              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 45784              m.emit(0x96)
 45785              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 45786          })
 45787      }
 45788      // VFMADDSUB132PD zmm, zmm, zmm{k}{z}
 45789      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 45790          self.require(ISA_AVX512F)
 45791          p.domain = DomainFMA
 45792          p.add(0, func(m *_Encoding, v []interface{}) {
 45793              m.emit(0x62)
 45794              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45795              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 45796              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 45797              m.emit(0x96)
 45798              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45799          })
 45800      }
 45801      // VFMADDSUB132PD m128/m64bcst, xmm, xmm{k}{z}
 45802      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 45803          self.require(ISA_AVX512VL | ISA_AVX512F)
 45804          p.domain = DomainFMA
 45805          p.add(0, func(m *_Encoding, v []interface{}) {
 45806              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 45807              m.emit(0x96)
 45808              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 45809          })
 45810      }
 45811      // VFMADDSUB132PD xmm, xmm, xmm{k}{z}
 45812      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 45813          self.require(ISA_AVX512VL | ISA_AVX512F)
 45814          p.domain = DomainFMA
 45815          p.add(0, func(m *_Encoding, v []interface{}) {
 45816              m.emit(0x62)
 45817              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45818              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 45819              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 45820              m.emit(0x96)
 45821              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45822          })
 45823      }
 45824      // VFMADDSUB132PD m256/m64bcst, ymm, ymm{k}{z}
 45825      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 45826          self.require(ISA_AVX512VL | ISA_AVX512F)
 45827          p.domain = DomainFMA
 45828          p.add(0, func(m *_Encoding, v []interface{}) {
 45829              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 45830              m.emit(0x96)
 45831              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 45832          })
 45833      }
 45834      // VFMADDSUB132PD ymm, ymm, ymm{k}{z}
 45835      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 45836          self.require(ISA_AVX512VL | ISA_AVX512F)
 45837          p.domain = DomainFMA
 45838          p.add(0, func(m *_Encoding, v []interface{}) {
 45839              m.emit(0x62)
 45840              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45841              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 45842              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 45843              m.emit(0x96)
 45844              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45845          })
 45846      }
 45847      if p.len == 0 {
 45848          panic("invalid operands for VFMADDSUB132PD")
 45849      }
 45850      return p
 45851  }
 45852  
 45853  // VFMADDSUB132PS performs "Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values".
 45854  //
 45855  // Mnemonic        : VFMADDSUB132PS
 45856  // Supported forms : (11 forms)
 45857  //
 45858  //    * VFMADDSUB132PS xmm, xmm, xmm                   [FMA3]
 45859  //    * VFMADDSUB132PS m128, xmm, xmm                  [FMA3]
 45860  //    * VFMADDSUB132PS ymm, ymm, ymm                   [FMA3]
 45861  //    * VFMADDSUB132PS m256, ymm, ymm                  [FMA3]
 45862  //    * VFMADDSUB132PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 45863  //    * VFMADDSUB132PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 45864  //    * VFMADDSUB132PS zmm, zmm, zmm{k}{z}             [AVX512F]
 45865  //    * VFMADDSUB132PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 45866  //    * VFMADDSUB132PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 45867  //    * VFMADDSUB132PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 45868  //    * VFMADDSUB132PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 45869  //
 45870  func (self *Program) VFMADDSUB132PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 45871      var p *Instruction
 45872      switch len(vv) {
 45873          case 0  : p = self.alloc("VFMADDSUB132PS", 3, Operands { v0, v1, v2 })
 45874          case 1  : p = self.alloc("VFMADDSUB132PS", 4, Operands { v0, v1, v2, vv[0] })
 45875          default : panic("instruction VFMADDSUB132PS takes 3 or 4 operands")
 45876      }
 45877      // VFMADDSUB132PS xmm, xmm, xmm
 45878      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 45879          self.require(ISA_FMA3)
 45880          p.domain = DomainFMA
 45881          p.add(0, func(m *_Encoding, v []interface{}) {
 45882              m.emit(0xc4)
 45883              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 45884              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 45885              m.emit(0x96)
 45886              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45887          })
 45888      }
 45889      // VFMADDSUB132PS m128, xmm, xmm
 45890      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 45891          self.require(ISA_FMA3)
 45892          p.domain = DomainFMA
 45893          p.add(0, func(m *_Encoding, v []interface{}) {
 45894              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 45895              m.emit(0x96)
 45896              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 45897          })
 45898      }
 45899      // VFMADDSUB132PS ymm, ymm, ymm
 45900      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 45901          self.require(ISA_FMA3)
 45902          p.domain = DomainFMA
 45903          p.add(0, func(m *_Encoding, v []interface{}) {
 45904              m.emit(0xc4)
 45905              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 45906              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 45907              m.emit(0x96)
 45908              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45909          })
 45910      }
 45911      // VFMADDSUB132PS m256, ymm, ymm
 45912      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 45913          self.require(ISA_FMA3)
 45914          p.domain = DomainFMA
 45915          p.add(0, func(m *_Encoding, v []interface{}) {
 45916              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 45917              m.emit(0x96)
 45918              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 45919          })
 45920      }
 45921      // VFMADDSUB132PS m512/m32bcst, zmm, zmm{k}{z}
 45922      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 45923          self.require(ISA_AVX512F)
 45924          p.domain = DomainFMA
 45925          p.add(0, func(m *_Encoding, v []interface{}) {
 45926              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 45927              m.emit(0x96)
 45928              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 45929          })
 45930      }
 45931      // VFMADDSUB132PS {er}, zmm, zmm, zmm{k}{z}
 45932      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 45933          self.require(ISA_AVX512F)
 45934          p.domain = DomainFMA
 45935          p.add(0, func(m *_Encoding, v []interface{}) {
 45936              m.emit(0x62)
 45937              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 45938              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 45939              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 45940              m.emit(0x96)
 45941              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 45942          })
 45943      }
 45944      // VFMADDSUB132PS zmm, zmm, zmm{k}{z}
 45945      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 45946          self.require(ISA_AVX512F)
 45947          p.domain = DomainFMA
 45948          p.add(0, func(m *_Encoding, v []interface{}) {
 45949              m.emit(0x62)
 45950              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45951              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 45952              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 45953              m.emit(0x96)
 45954              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45955          })
 45956      }
 45957      // VFMADDSUB132PS m128/m32bcst, xmm, xmm{k}{z}
 45958      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 45959          self.require(ISA_AVX512VL | ISA_AVX512F)
 45960          p.domain = DomainFMA
 45961          p.add(0, func(m *_Encoding, v []interface{}) {
 45962              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 45963              m.emit(0x96)
 45964              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 45965          })
 45966      }
 45967      // VFMADDSUB132PS xmm, xmm, xmm{k}{z}
 45968      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 45969          self.require(ISA_AVX512VL | ISA_AVX512F)
 45970          p.domain = DomainFMA
 45971          p.add(0, func(m *_Encoding, v []interface{}) {
 45972              m.emit(0x62)
 45973              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45974              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 45975              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 45976              m.emit(0x96)
 45977              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 45978          })
 45979      }
 45980      // VFMADDSUB132PS m256/m32bcst, ymm, ymm{k}{z}
 45981      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 45982          self.require(ISA_AVX512VL | ISA_AVX512F)
 45983          p.domain = DomainFMA
 45984          p.add(0, func(m *_Encoding, v []interface{}) {
 45985              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 45986              m.emit(0x96)
 45987              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 45988          })
 45989      }
 45990      // VFMADDSUB132PS ymm, ymm, ymm{k}{z}
 45991      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 45992          self.require(ISA_AVX512VL | ISA_AVX512F)
 45993          p.domain = DomainFMA
 45994          p.add(0, func(m *_Encoding, v []interface{}) {
 45995              m.emit(0x62)
 45996              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 45997              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 45998              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 45999              m.emit(0x96)
 46000              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46001          })
 46002      }
 46003      if p.len == 0 {
 46004          panic("invalid operands for VFMADDSUB132PS")
 46005      }
 46006      return p
 46007  }
 46008  
 46009  // VFMADDSUB213PD performs "Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values".
 46010  //
 46011  // Mnemonic        : VFMADDSUB213PD
 46012  // Supported forms : (11 forms)
 46013  //
 46014  //    * VFMADDSUB213PD xmm, xmm, xmm                   [FMA3]
 46015  //    * VFMADDSUB213PD m128, xmm, xmm                  [FMA3]
 46016  //    * VFMADDSUB213PD ymm, ymm, ymm                   [FMA3]
 46017  //    * VFMADDSUB213PD m256, ymm, ymm                  [FMA3]
 46018  //    * VFMADDSUB213PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 46019  //    * VFMADDSUB213PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 46020  //    * VFMADDSUB213PD zmm, zmm, zmm{k}{z}             [AVX512F]
 46021  //    * VFMADDSUB213PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 46022  //    * VFMADDSUB213PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 46023  //    * VFMADDSUB213PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 46024  //    * VFMADDSUB213PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 46025  //
 46026  func (self *Program) VFMADDSUB213PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 46027      var p *Instruction
 46028      switch len(vv) {
 46029          case 0  : p = self.alloc("VFMADDSUB213PD", 3, Operands { v0, v1, v2 })
 46030          case 1  : p = self.alloc("VFMADDSUB213PD", 4, Operands { v0, v1, v2, vv[0] })
 46031          default : panic("instruction VFMADDSUB213PD takes 3 or 4 operands")
 46032      }
 46033      // VFMADDSUB213PD xmm, xmm, xmm
 46034      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 46035          self.require(ISA_FMA3)
 46036          p.domain = DomainFMA
 46037          p.add(0, func(m *_Encoding, v []interface{}) {
 46038              m.emit(0xc4)
 46039              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 46040              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 46041              m.emit(0xa6)
 46042              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46043          })
 46044      }
 46045      // VFMADDSUB213PD m128, xmm, xmm
 46046      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 46047          self.require(ISA_FMA3)
 46048          p.domain = DomainFMA
 46049          p.add(0, func(m *_Encoding, v []interface{}) {
 46050              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 46051              m.emit(0xa6)
 46052              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 46053          })
 46054      }
 46055      // VFMADDSUB213PD ymm, ymm, ymm
 46056      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 46057          self.require(ISA_FMA3)
 46058          p.domain = DomainFMA
 46059          p.add(0, func(m *_Encoding, v []interface{}) {
 46060              m.emit(0xc4)
 46061              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 46062              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46063              m.emit(0xa6)
 46064              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46065          })
 46066      }
 46067      // VFMADDSUB213PD m256, ymm, ymm
 46068      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 46069          self.require(ISA_FMA3)
 46070          p.domain = DomainFMA
 46071          p.add(0, func(m *_Encoding, v []interface{}) {
 46072              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 46073              m.emit(0xa6)
 46074              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 46075          })
 46076      }
 46077      // VFMADDSUB213PD m512/m64bcst, zmm, zmm{k}{z}
 46078      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 46079          self.require(ISA_AVX512F)
 46080          p.domain = DomainFMA
 46081          p.add(0, func(m *_Encoding, v []interface{}) {
 46082              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46083              m.emit(0xa6)
 46084              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 46085          })
 46086      }
 46087      // VFMADDSUB213PD {er}, zmm, zmm, zmm{k}{z}
 46088      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 46089          self.require(ISA_AVX512F)
 46090          p.domain = DomainFMA
 46091          p.add(0, func(m *_Encoding, v []interface{}) {
 46092              m.emit(0x62)
 46093              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 46094              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 46095              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 46096              m.emit(0xa6)
 46097              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 46098          })
 46099      }
 46100      // VFMADDSUB213PD zmm, zmm, zmm{k}{z}
 46101      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 46102          self.require(ISA_AVX512F)
 46103          p.domain = DomainFMA
 46104          p.add(0, func(m *_Encoding, v []interface{}) {
 46105              m.emit(0x62)
 46106              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46107              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46108              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 46109              m.emit(0xa6)
 46110              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46111          })
 46112      }
 46113      // VFMADDSUB213PD m128/m64bcst, xmm, xmm{k}{z}
 46114      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 46115          self.require(ISA_AVX512VL | ISA_AVX512F)
 46116          p.domain = DomainFMA
 46117          p.add(0, func(m *_Encoding, v []interface{}) {
 46118              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46119              m.emit(0xa6)
 46120              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 46121          })
 46122      }
 46123      // VFMADDSUB213PD xmm, xmm, xmm{k}{z}
 46124      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 46125          self.require(ISA_AVX512VL | ISA_AVX512F)
 46126          p.domain = DomainFMA
 46127          p.add(0, func(m *_Encoding, v []interface{}) {
 46128              m.emit(0x62)
 46129              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46130              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46131              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 46132              m.emit(0xa6)
 46133              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46134          })
 46135      }
 46136      // VFMADDSUB213PD m256/m64bcst, ymm, ymm{k}{z}
 46137      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 46138          self.require(ISA_AVX512VL | ISA_AVX512F)
 46139          p.domain = DomainFMA
 46140          p.add(0, func(m *_Encoding, v []interface{}) {
 46141              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46142              m.emit(0xa6)
 46143              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 46144          })
 46145      }
 46146      // VFMADDSUB213PD ymm, ymm, ymm{k}{z}
 46147      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 46148          self.require(ISA_AVX512VL | ISA_AVX512F)
 46149          p.domain = DomainFMA
 46150          p.add(0, func(m *_Encoding, v []interface{}) {
 46151              m.emit(0x62)
 46152              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46153              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46154              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 46155              m.emit(0xa6)
 46156              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46157          })
 46158      }
 46159      if p.len == 0 {
 46160          panic("invalid operands for VFMADDSUB213PD")
 46161      }
 46162      return p
 46163  }
 46164  
 46165  // VFMADDSUB213PS performs "Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values".
 46166  //
 46167  // Mnemonic        : VFMADDSUB213PS
 46168  // Supported forms : (11 forms)
 46169  //
 46170  //    * VFMADDSUB213PS xmm, xmm, xmm                   [FMA3]
 46171  //    * VFMADDSUB213PS m128, xmm, xmm                  [FMA3]
 46172  //    * VFMADDSUB213PS ymm, ymm, ymm                   [FMA3]
 46173  //    * VFMADDSUB213PS m256, ymm, ymm                  [FMA3]
 46174  //    * VFMADDSUB213PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 46175  //    * VFMADDSUB213PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 46176  //    * VFMADDSUB213PS zmm, zmm, zmm{k}{z}             [AVX512F]
 46177  //    * VFMADDSUB213PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 46178  //    * VFMADDSUB213PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 46179  //    * VFMADDSUB213PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 46180  //    * VFMADDSUB213PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 46181  //
 46182  func (self *Program) VFMADDSUB213PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 46183      var p *Instruction
 46184      switch len(vv) {
 46185          case 0  : p = self.alloc("VFMADDSUB213PS", 3, Operands { v0, v1, v2 })
 46186          case 1  : p = self.alloc("VFMADDSUB213PS", 4, Operands { v0, v1, v2, vv[0] })
 46187          default : panic("instruction VFMADDSUB213PS takes 3 or 4 operands")
 46188      }
 46189      // VFMADDSUB213PS xmm, xmm, xmm
 46190      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 46191          self.require(ISA_FMA3)
 46192          p.domain = DomainFMA
 46193          p.add(0, func(m *_Encoding, v []interface{}) {
 46194              m.emit(0xc4)
 46195              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 46196              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 46197              m.emit(0xa6)
 46198              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46199          })
 46200      }
 46201      // VFMADDSUB213PS m128, xmm, xmm
 46202      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 46203          self.require(ISA_FMA3)
 46204          p.domain = DomainFMA
 46205          p.add(0, func(m *_Encoding, v []interface{}) {
 46206              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 46207              m.emit(0xa6)
 46208              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 46209          })
 46210      }
 46211      // VFMADDSUB213PS ymm, ymm, ymm
 46212      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 46213          self.require(ISA_FMA3)
 46214          p.domain = DomainFMA
 46215          p.add(0, func(m *_Encoding, v []interface{}) {
 46216              m.emit(0xc4)
 46217              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 46218              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 46219              m.emit(0xa6)
 46220              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46221          })
 46222      }
 46223      // VFMADDSUB213PS m256, ymm, ymm
 46224      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 46225          self.require(ISA_FMA3)
 46226          p.domain = DomainFMA
 46227          p.add(0, func(m *_Encoding, v []interface{}) {
 46228              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 46229              m.emit(0xa6)
 46230              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 46231          })
 46232      }
 46233      // VFMADDSUB213PS m512/m32bcst, zmm, zmm{k}{z}
 46234      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 46235          self.require(ISA_AVX512F)
 46236          p.domain = DomainFMA
 46237          p.add(0, func(m *_Encoding, v []interface{}) {
 46238              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46239              m.emit(0xa6)
 46240              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 46241          })
 46242      }
 46243      // VFMADDSUB213PS {er}, zmm, zmm, zmm{k}{z}
 46244      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 46245          self.require(ISA_AVX512F)
 46246          p.domain = DomainFMA
 46247          p.add(0, func(m *_Encoding, v []interface{}) {
 46248              m.emit(0x62)
 46249              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 46250              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 46251              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 46252              m.emit(0xa6)
 46253              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 46254          })
 46255      }
 46256      // VFMADDSUB213PS zmm, zmm, zmm{k}{z}
 46257      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 46258          self.require(ISA_AVX512F)
 46259          p.domain = DomainFMA
 46260          p.add(0, func(m *_Encoding, v []interface{}) {
 46261              m.emit(0x62)
 46262              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46263              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 46264              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 46265              m.emit(0xa6)
 46266              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46267          })
 46268      }
 46269      // VFMADDSUB213PS m128/m32bcst, xmm, xmm{k}{z}
 46270      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 46271          self.require(ISA_AVX512VL | ISA_AVX512F)
 46272          p.domain = DomainFMA
 46273          p.add(0, func(m *_Encoding, v []interface{}) {
 46274              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46275              m.emit(0xa6)
 46276              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 46277          })
 46278      }
 46279      // VFMADDSUB213PS xmm, xmm, xmm{k}{z}
 46280      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 46281          self.require(ISA_AVX512VL | ISA_AVX512F)
 46282          p.domain = DomainFMA
 46283          p.add(0, func(m *_Encoding, v []interface{}) {
 46284              m.emit(0x62)
 46285              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46286              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 46287              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 46288              m.emit(0xa6)
 46289              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46290          })
 46291      }
 46292      // VFMADDSUB213PS m256/m32bcst, ymm, ymm{k}{z}
 46293      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 46294          self.require(ISA_AVX512VL | ISA_AVX512F)
 46295          p.domain = DomainFMA
 46296          p.add(0, func(m *_Encoding, v []interface{}) {
 46297              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46298              m.emit(0xa6)
 46299              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 46300          })
 46301      }
 46302      // VFMADDSUB213PS ymm, ymm, ymm{k}{z}
 46303      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 46304          self.require(ISA_AVX512VL | ISA_AVX512F)
 46305          p.domain = DomainFMA
 46306          p.add(0, func(m *_Encoding, v []interface{}) {
 46307              m.emit(0x62)
 46308              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46309              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 46310              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 46311              m.emit(0xa6)
 46312              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46313          })
 46314      }
 46315      if p.len == 0 {
 46316          panic("invalid operands for VFMADDSUB213PS")
 46317      }
 46318      return p
 46319  }
 46320  
 46321  // VFMADDSUB231PD performs "Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values".
 46322  //
 46323  // Mnemonic        : VFMADDSUB231PD
 46324  // Supported forms : (11 forms)
 46325  //
 46326  //    * VFMADDSUB231PD xmm, xmm, xmm                   [FMA3]
 46327  //    * VFMADDSUB231PD m128, xmm, xmm                  [FMA3]
 46328  //    * VFMADDSUB231PD ymm, ymm, ymm                   [FMA3]
 46329  //    * VFMADDSUB231PD m256, ymm, ymm                  [FMA3]
 46330  //    * VFMADDSUB231PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 46331  //    * VFMADDSUB231PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 46332  //    * VFMADDSUB231PD zmm, zmm, zmm{k}{z}             [AVX512F]
 46333  //    * VFMADDSUB231PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 46334  //    * VFMADDSUB231PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 46335  //    * VFMADDSUB231PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 46336  //    * VFMADDSUB231PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 46337  //
 46338  func (self *Program) VFMADDSUB231PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 46339      var p *Instruction
 46340      switch len(vv) {
 46341          case 0  : p = self.alloc("VFMADDSUB231PD", 3, Operands { v0, v1, v2 })
 46342          case 1  : p = self.alloc("VFMADDSUB231PD", 4, Operands { v0, v1, v2, vv[0] })
 46343          default : panic("instruction VFMADDSUB231PD takes 3 or 4 operands")
 46344      }
 46345      // VFMADDSUB231PD xmm, xmm, xmm
 46346      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 46347          self.require(ISA_FMA3)
 46348          p.domain = DomainFMA
 46349          p.add(0, func(m *_Encoding, v []interface{}) {
 46350              m.emit(0xc4)
 46351              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 46352              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 46353              m.emit(0xb6)
 46354              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46355          })
 46356      }
 46357      // VFMADDSUB231PD m128, xmm, xmm
 46358      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 46359          self.require(ISA_FMA3)
 46360          p.domain = DomainFMA
 46361          p.add(0, func(m *_Encoding, v []interface{}) {
 46362              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 46363              m.emit(0xb6)
 46364              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 46365          })
 46366      }
 46367      // VFMADDSUB231PD ymm, ymm, ymm
 46368      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 46369          self.require(ISA_FMA3)
 46370          p.domain = DomainFMA
 46371          p.add(0, func(m *_Encoding, v []interface{}) {
 46372              m.emit(0xc4)
 46373              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 46374              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46375              m.emit(0xb6)
 46376              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46377          })
 46378      }
 46379      // VFMADDSUB231PD m256, ymm, ymm
 46380      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 46381          self.require(ISA_FMA3)
 46382          p.domain = DomainFMA
 46383          p.add(0, func(m *_Encoding, v []interface{}) {
 46384              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 46385              m.emit(0xb6)
 46386              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 46387          })
 46388      }
 46389      // VFMADDSUB231PD m512/m64bcst, zmm, zmm{k}{z}
 46390      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 46391          self.require(ISA_AVX512F)
 46392          p.domain = DomainFMA
 46393          p.add(0, func(m *_Encoding, v []interface{}) {
 46394              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46395              m.emit(0xb6)
 46396              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 46397          })
 46398      }
 46399      // VFMADDSUB231PD {er}, zmm, zmm, zmm{k}{z}
 46400      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 46401          self.require(ISA_AVX512F)
 46402          p.domain = DomainFMA
 46403          p.add(0, func(m *_Encoding, v []interface{}) {
 46404              m.emit(0x62)
 46405              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 46406              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 46407              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 46408              m.emit(0xb6)
 46409              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 46410          })
 46411      }
 46412      // VFMADDSUB231PD zmm, zmm, zmm{k}{z}
 46413      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 46414          self.require(ISA_AVX512F)
 46415          p.domain = DomainFMA
 46416          p.add(0, func(m *_Encoding, v []interface{}) {
 46417              m.emit(0x62)
 46418              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46419              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46420              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 46421              m.emit(0xb6)
 46422              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46423          })
 46424      }
 46425      // VFMADDSUB231PD m128/m64bcst, xmm, xmm{k}{z}
 46426      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 46427          self.require(ISA_AVX512VL | ISA_AVX512F)
 46428          p.domain = DomainFMA
 46429          p.add(0, func(m *_Encoding, v []interface{}) {
 46430              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46431              m.emit(0xb6)
 46432              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 46433          })
 46434      }
 46435      // VFMADDSUB231PD xmm, xmm, xmm{k}{z}
 46436      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 46437          self.require(ISA_AVX512VL | ISA_AVX512F)
 46438          p.domain = DomainFMA
 46439          p.add(0, func(m *_Encoding, v []interface{}) {
 46440              m.emit(0x62)
 46441              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46442              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46443              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 46444              m.emit(0xb6)
 46445              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46446          })
 46447      }
 46448      // VFMADDSUB231PD m256/m64bcst, ymm, ymm{k}{z}
 46449      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 46450          self.require(ISA_AVX512VL | ISA_AVX512F)
 46451          p.domain = DomainFMA
 46452          p.add(0, func(m *_Encoding, v []interface{}) {
 46453              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46454              m.emit(0xb6)
 46455              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 46456          })
 46457      }
 46458      // VFMADDSUB231PD ymm, ymm, ymm{k}{z}
 46459      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 46460          self.require(ISA_AVX512VL | ISA_AVX512F)
 46461          p.domain = DomainFMA
 46462          p.add(0, func(m *_Encoding, v []interface{}) {
 46463              m.emit(0x62)
 46464              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46465              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46466              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 46467              m.emit(0xb6)
 46468              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46469          })
 46470      }
 46471      if p.len == 0 {
 46472          panic("invalid operands for VFMADDSUB231PD")
 46473      }
 46474      return p
 46475  }
 46476  
 46477  // VFMADDSUB231PS performs "Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values".
 46478  //
 46479  // Mnemonic        : VFMADDSUB231PS
 46480  // Supported forms : (11 forms)
 46481  //
 46482  //    * VFMADDSUB231PS xmm, xmm, xmm                   [FMA3]
 46483  //    * VFMADDSUB231PS m128, xmm, xmm                  [FMA3]
 46484  //    * VFMADDSUB231PS ymm, ymm, ymm                   [FMA3]
 46485  //    * VFMADDSUB231PS m256, ymm, ymm                  [FMA3]
 46486  //    * VFMADDSUB231PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 46487  //    * VFMADDSUB231PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 46488  //    * VFMADDSUB231PS zmm, zmm, zmm{k}{z}             [AVX512F]
 46489  //    * VFMADDSUB231PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 46490  //    * VFMADDSUB231PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 46491  //    * VFMADDSUB231PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 46492  //    * VFMADDSUB231PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 46493  //
 46494  func (self *Program) VFMADDSUB231PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 46495      var p *Instruction
 46496      switch len(vv) {
 46497          case 0  : p = self.alloc("VFMADDSUB231PS", 3, Operands { v0, v1, v2 })
 46498          case 1  : p = self.alloc("VFMADDSUB231PS", 4, Operands { v0, v1, v2, vv[0] })
 46499          default : panic("instruction VFMADDSUB231PS takes 3 or 4 operands")
 46500      }
 46501      // VFMADDSUB231PS xmm, xmm, xmm
 46502      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 46503          self.require(ISA_FMA3)
 46504          p.domain = DomainFMA
 46505          p.add(0, func(m *_Encoding, v []interface{}) {
 46506              m.emit(0xc4)
 46507              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 46508              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 46509              m.emit(0xb6)
 46510              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46511          })
 46512      }
 46513      // VFMADDSUB231PS m128, xmm, xmm
 46514      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 46515          self.require(ISA_FMA3)
 46516          p.domain = DomainFMA
 46517          p.add(0, func(m *_Encoding, v []interface{}) {
 46518              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 46519              m.emit(0xb6)
 46520              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 46521          })
 46522      }
 46523      // VFMADDSUB231PS ymm, ymm, ymm
 46524      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 46525          self.require(ISA_FMA3)
 46526          p.domain = DomainFMA
 46527          p.add(0, func(m *_Encoding, v []interface{}) {
 46528              m.emit(0xc4)
 46529              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 46530              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 46531              m.emit(0xb6)
 46532              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46533          })
 46534      }
 46535      // VFMADDSUB231PS m256, ymm, ymm
 46536      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 46537          self.require(ISA_FMA3)
 46538          p.domain = DomainFMA
 46539          p.add(0, func(m *_Encoding, v []interface{}) {
 46540              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 46541              m.emit(0xb6)
 46542              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 46543          })
 46544      }
 46545      // VFMADDSUB231PS m512/m32bcst, zmm, zmm{k}{z}
 46546      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 46547          self.require(ISA_AVX512F)
 46548          p.domain = DomainFMA
 46549          p.add(0, func(m *_Encoding, v []interface{}) {
 46550              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46551              m.emit(0xb6)
 46552              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 46553          })
 46554      }
 46555      // VFMADDSUB231PS {er}, zmm, zmm, zmm{k}{z}
 46556      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 46557          self.require(ISA_AVX512F)
 46558          p.domain = DomainFMA
 46559          p.add(0, func(m *_Encoding, v []interface{}) {
 46560              m.emit(0x62)
 46561              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 46562              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 46563              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 46564              m.emit(0xb6)
 46565              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 46566          })
 46567      }
 46568      // VFMADDSUB231PS zmm, zmm, zmm{k}{z}
 46569      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 46570          self.require(ISA_AVX512F)
 46571          p.domain = DomainFMA
 46572          p.add(0, func(m *_Encoding, v []interface{}) {
 46573              m.emit(0x62)
 46574              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46575              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 46576              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 46577              m.emit(0xb6)
 46578              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46579          })
 46580      }
 46581      // VFMADDSUB231PS m128/m32bcst, xmm, xmm{k}{z}
 46582      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 46583          self.require(ISA_AVX512VL | ISA_AVX512F)
 46584          p.domain = DomainFMA
 46585          p.add(0, func(m *_Encoding, v []interface{}) {
 46586              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46587              m.emit(0xb6)
 46588              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 46589          })
 46590      }
 46591      // VFMADDSUB231PS xmm, xmm, xmm{k}{z}
 46592      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 46593          self.require(ISA_AVX512VL | ISA_AVX512F)
 46594          p.domain = DomainFMA
 46595          p.add(0, func(m *_Encoding, v []interface{}) {
 46596              m.emit(0x62)
 46597              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46598              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 46599              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 46600              m.emit(0xb6)
 46601              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46602          })
 46603      }
 46604      // VFMADDSUB231PS m256/m32bcst, ymm, ymm{k}{z}
 46605      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 46606          self.require(ISA_AVX512VL | ISA_AVX512F)
 46607          p.domain = DomainFMA
 46608          p.add(0, func(m *_Encoding, v []interface{}) {
 46609              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46610              m.emit(0xb6)
 46611              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 46612          })
 46613      }
 46614      // VFMADDSUB231PS ymm, ymm, ymm{k}{z}
 46615      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 46616          self.require(ISA_AVX512VL | ISA_AVX512F)
 46617          p.domain = DomainFMA
 46618          p.add(0, func(m *_Encoding, v []interface{}) {
 46619              m.emit(0x62)
 46620              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46621              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 46622              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 46623              m.emit(0xb6)
 46624              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46625          })
 46626      }
 46627      if p.len == 0 {
 46628          panic("invalid operands for VFMADDSUB231PS")
 46629      }
 46630      return p
 46631  }
 46632  
 46633  // VFMADDSUBPD performs "Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values".
 46634  //
 46635  // Mnemonic        : VFMADDSUBPD
 46636  // Supported forms : (6 forms)
 46637  //
 46638  //    * VFMADDSUBPD xmm, xmm, xmm, xmm     [FMA4]
 46639  //    * VFMADDSUBPD m128, xmm, xmm, xmm    [FMA4]
 46640  //    * VFMADDSUBPD xmm, m128, xmm, xmm    [FMA4]
 46641  //    * VFMADDSUBPD ymm, ymm, ymm, ymm     [FMA4]
 46642  //    * VFMADDSUBPD m256, ymm, ymm, ymm    [FMA4]
 46643  //    * VFMADDSUBPD ymm, m256, ymm, ymm    [FMA4]
 46644  //
 46645  func (self *Program) VFMADDSUBPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 46646      p := self.alloc("VFMADDSUBPD", 4, Operands { v0, v1, v2, v3 })
 46647      // VFMADDSUBPD xmm, xmm, xmm, xmm
 46648      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 46649          self.require(ISA_FMA4)
 46650          p.domain = DomainFMA
 46651          p.add(0, func(m *_Encoding, v []interface{}) {
 46652              m.emit(0xc4)
 46653              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 46654              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 46655              m.emit(0x5d)
 46656              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 46657              m.emit(hlcode(v[1]) << 4)
 46658          })
 46659          p.add(0, func(m *_Encoding, v []interface{}) {
 46660              m.emit(0xc4)
 46661              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 46662              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 46663              m.emit(0x5d)
 46664              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 46665              m.emit(hlcode(v[0]) << 4)
 46666          })
 46667      }
 46668      // VFMADDSUBPD m128, xmm, xmm, xmm
 46669      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 46670          self.require(ISA_FMA4)
 46671          p.domain = DomainFMA
 46672          p.add(0, func(m *_Encoding, v []interface{}) {
 46673              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 46674              m.emit(0x5d)
 46675              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 46676              m.emit(hlcode(v[1]) << 4)
 46677          })
 46678      }
 46679      // VFMADDSUBPD xmm, m128, xmm, xmm
 46680      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 46681          self.require(ISA_FMA4)
 46682          p.domain = DomainFMA
 46683          p.add(0, func(m *_Encoding, v []interface{}) {
 46684              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 46685              m.emit(0x5d)
 46686              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 46687              m.emit(hlcode(v[0]) << 4)
 46688          })
 46689      }
 46690      // VFMADDSUBPD ymm, ymm, ymm, ymm
 46691      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 46692          self.require(ISA_FMA4)
 46693          p.domain = DomainFMA
 46694          p.add(0, func(m *_Encoding, v []interface{}) {
 46695              m.emit(0xc4)
 46696              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 46697              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 46698              m.emit(0x5d)
 46699              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 46700              m.emit(hlcode(v[1]) << 4)
 46701          })
 46702          p.add(0, func(m *_Encoding, v []interface{}) {
 46703              m.emit(0xc4)
 46704              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 46705              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 46706              m.emit(0x5d)
 46707              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 46708              m.emit(hlcode(v[0]) << 4)
 46709          })
 46710      }
 46711      // VFMADDSUBPD m256, ymm, ymm, ymm
 46712      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 46713          self.require(ISA_FMA4)
 46714          p.domain = DomainFMA
 46715          p.add(0, func(m *_Encoding, v []interface{}) {
 46716              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 46717              m.emit(0x5d)
 46718              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 46719              m.emit(hlcode(v[1]) << 4)
 46720          })
 46721      }
 46722      // VFMADDSUBPD ymm, m256, ymm, ymm
 46723      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 46724          self.require(ISA_FMA4)
 46725          p.domain = DomainFMA
 46726          p.add(0, func(m *_Encoding, v []interface{}) {
 46727              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 46728              m.emit(0x5d)
 46729              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 46730              m.emit(hlcode(v[0]) << 4)
 46731          })
 46732      }
 46733      if p.len == 0 {
 46734          panic("invalid operands for VFMADDSUBPD")
 46735      }
 46736      return p
 46737  }
 46738  
 46739  // VFMADDSUBPS performs "Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values".
 46740  //
 46741  // Mnemonic        : VFMADDSUBPS
 46742  // Supported forms : (6 forms)
 46743  //
 46744  //    * VFMADDSUBPS xmm, xmm, xmm, xmm     [FMA4]
 46745  //    * VFMADDSUBPS m128, xmm, xmm, xmm    [FMA4]
 46746  //    * VFMADDSUBPS xmm, m128, xmm, xmm    [FMA4]
 46747  //    * VFMADDSUBPS ymm, ymm, ymm, ymm     [FMA4]
 46748  //    * VFMADDSUBPS m256, ymm, ymm, ymm    [FMA4]
 46749  //    * VFMADDSUBPS ymm, m256, ymm, ymm    [FMA4]
 46750  //
 46751  func (self *Program) VFMADDSUBPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 46752      p := self.alloc("VFMADDSUBPS", 4, Operands { v0, v1, v2, v3 })
 46753      // VFMADDSUBPS xmm, xmm, xmm, xmm
 46754      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 46755          self.require(ISA_FMA4)
 46756          p.domain = DomainFMA
 46757          p.add(0, func(m *_Encoding, v []interface{}) {
 46758              m.emit(0xc4)
 46759              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 46760              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 46761              m.emit(0x5c)
 46762              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 46763              m.emit(hlcode(v[1]) << 4)
 46764          })
 46765          p.add(0, func(m *_Encoding, v []interface{}) {
 46766              m.emit(0xc4)
 46767              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 46768              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 46769              m.emit(0x5c)
 46770              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 46771              m.emit(hlcode(v[0]) << 4)
 46772          })
 46773      }
 46774      // VFMADDSUBPS m128, xmm, xmm, xmm
 46775      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 46776          self.require(ISA_FMA4)
 46777          p.domain = DomainFMA
 46778          p.add(0, func(m *_Encoding, v []interface{}) {
 46779              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 46780              m.emit(0x5c)
 46781              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 46782              m.emit(hlcode(v[1]) << 4)
 46783          })
 46784      }
 46785      // VFMADDSUBPS xmm, m128, xmm, xmm
 46786      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 46787          self.require(ISA_FMA4)
 46788          p.domain = DomainFMA
 46789          p.add(0, func(m *_Encoding, v []interface{}) {
 46790              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 46791              m.emit(0x5c)
 46792              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 46793              m.emit(hlcode(v[0]) << 4)
 46794          })
 46795      }
 46796      // VFMADDSUBPS ymm, ymm, ymm, ymm
 46797      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 46798          self.require(ISA_FMA4)
 46799          p.domain = DomainFMA
 46800          p.add(0, func(m *_Encoding, v []interface{}) {
 46801              m.emit(0xc4)
 46802              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 46803              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 46804              m.emit(0x5c)
 46805              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 46806              m.emit(hlcode(v[1]) << 4)
 46807          })
 46808          p.add(0, func(m *_Encoding, v []interface{}) {
 46809              m.emit(0xc4)
 46810              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 46811              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 46812              m.emit(0x5c)
 46813              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 46814              m.emit(hlcode(v[0]) << 4)
 46815          })
 46816      }
 46817      // VFMADDSUBPS m256, ymm, ymm, ymm
 46818      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 46819          self.require(ISA_FMA4)
 46820          p.domain = DomainFMA
 46821          p.add(0, func(m *_Encoding, v []interface{}) {
 46822              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 46823              m.emit(0x5c)
 46824              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 46825              m.emit(hlcode(v[1]) << 4)
 46826          })
 46827      }
 46828      // VFMADDSUBPS ymm, m256, ymm, ymm
 46829      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 46830          self.require(ISA_FMA4)
 46831          p.domain = DomainFMA
 46832          p.add(0, func(m *_Encoding, v []interface{}) {
 46833              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 46834              m.emit(0x5c)
 46835              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 46836              m.emit(hlcode(v[0]) << 4)
 46837          })
 46838      }
 46839      if p.len == 0 {
 46840          panic("invalid operands for VFMADDSUBPS")
 46841      }
 46842      return p
 46843  }
 46844  
 46845  // VFMSUB132PD performs "Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values".
 46846  //
 46847  // Mnemonic        : VFMSUB132PD
 46848  // Supported forms : (11 forms)
 46849  //
 46850  //    * VFMSUB132PD xmm, xmm, xmm                   [FMA3]
 46851  //    * VFMSUB132PD m128, xmm, xmm                  [FMA3]
 46852  //    * VFMSUB132PD ymm, ymm, ymm                   [FMA3]
 46853  //    * VFMSUB132PD m256, ymm, ymm                  [FMA3]
 46854  //    * VFMSUB132PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 46855  //    * VFMSUB132PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 46856  //    * VFMSUB132PD zmm, zmm, zmm{k}{z}             [AVX512F]
 46857  //    * VFMSUB132PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 46858  //    * VFMSUB132PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 46859  //    * VFMSUB132PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 46860  //    * VFMSUB132PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 46861  //
 46862  func (self *Program) VFMSUB132PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 46863      var p *Instruction
 46864      switch len(vv) {
 46865          case 0  : p = self.alloc("VFMSUB132PD", 3, Operands { v0, v1, v2 })
 46866          case 1  : p = self.alloc("VFMSUB132PD", 4, Operands { v0, v1, v2, vv[0] })
 46867          default : panic("instruction VFMSUB132PD takes 3 or 4 operands")
 46868      }
 46869      // VFMSUB132PD xmm, xmm, xmm
 46870      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 46871          self.require(ISA_FMA3)
 46872          p.domain = DomainFMA
 46873          p.add(0, func(m *_Encoding, v []interface{}) {
 46874              m.emit(0xc4)
 46875              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 46876              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 46877              m.emit(0x9a)
 46878              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46879          })
 46880      }
 46881      // VFMSUB132PD m128, xmm, xmm
 46882      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 46883          self.require(ISA_FMA3)
 46884          p.domain = DomainFMA
 46885          p.add(0, func(m *_Encoding, v []interface{}) {
 46886              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 46887              m.emit(0x9a)
 46888              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 46889          })
 46890      }
 46891      // VFMSUB132PD ymm, ymm, ymm
 46892      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 46893          self.require(ISA_FMA3)
 46894          p.domain = DomainFMA
 46895          p.add(0, func(m *_Encoding, v []interface{}) {
 46896              m.emit(0xc4)
 46897              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 46898              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46899              m.emit(0x9a)
 46900              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46901          })
 46902      }
 46903      // VFMSUB132PD m256, ymm, ymm
 46904      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 46905          self.require(ISA_FMA3)
 46906          p.domain = DomainFMA
 46907          p.add(0, func(m *_Encoding, v []interface{}) {
 46908              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 46909              m.emit(0x9a)
 46910              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 46911          })
 46912      }
 46913      // VFMSUB132PD m512/m64bcst, zmm, zmm{k}{z}
 46914      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 46915          self.require(ISA_AVX512F)
 46916          p.domain = DomainFMA
 46917          p.add(0, func(m *_Encoding, v []interface{}) {
 46918              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46919              m.emit(0x9a)
 46920              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 46921          })
 46922      }
 46923      // VFMSUB132PD {er}, zmm, zmm, zmm{k}{z}
 46924      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 46925          self.require(ISA_AVX512F)
 46926          p.domain = DomainFMA
 46927          p.add(0, func(m *_Encoding, v []interface{}) {
 46928              m.emit(0x62)
 46929              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 46930              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 46931              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 46932              m.emit(0x9a)
 46933              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 46934          })
 46935      }
 46936      // VFMSUB132PD zmm, zmm, zmm{k}{z}
 46937      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 46938          self.require(ISA_AVX512F)
 46939          p.domain = DomainFMA
 46940          p.add(0, func(m *_Encoding, v []interface{}) {
 46941              m.emit(0x62)
 46942              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46943              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46944              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 46945              m.emit(0x9a)
 46946              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46947          })
 46948      }
 46949      // VFMSUB132PD m128/m64bcst, xmm, xmm{k}{z}
 46950      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 46951          self.require(ISA_AVX512VL | ISA_AVX512F)
 46952          p.domain = DomainFMA
 46953          p.add(0, func(m *_Encoding, v []interface{}) {
 46954              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46955              m.emit(0x9a)
 46956              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 46957          })
 46958      }
 46959      // VFMSUB132PD xmm, xmm, xmm{k}{z}
 46960      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 46961          self.require(ISA_AVX512VL | ISA_AVX512F)
 46962          p.domain = DomainFMA
 46963          p.add(0, func(m *_Encoding, v []interface{}) {
 46964              m.emit(0x62)
 46965              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46966              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46967              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 46968              m.emit(0x9a)
 46969              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46970          })
 46971      }
 46972      // VFMSUB132PD m256/m64bcst, ymm, ymm{k}{z}
 46973      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 46974          self.require(ISA_AVX512VL | ISA_AVX512F)
 46975          p.domain = DomainFMA
 46976          p.add(0, func(m *_Encoding, v []interface{}) {
 46977              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 46978              m.emit(0x9a)
 46979              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 46980          })
 46981      }
 46982      // VFMSUB132PD ymm, ymm, ymm{k}{z}
 46983      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 46984          self.require(ISA_AVX512VL | ISA_AVX512F)
 46985          p.domain = DomainFMA
 46986          p.add(0, func(m *_Encoding, v []interface{}) {
 46987              m.emit(0x62)
 46988              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 46989              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 46990              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 46991              m.emit(0x9a)
 46992              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 46993          })
 46994      }
 46995      if p.len == 0 {
 46996          panic("invalid operands for VFMSUB132PD")
 46997      }
 46998      return p
 46999  }
 47000  
 47001  // VFMSUB132PS performs "Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values".
 47002  //
 47003  // Mnemonic        : VFMSUB132PS
 47004  // Supported forms : (11 forms)
 47005  //
 47006  //    * VFMSUB132PS xmm, xmm, xmm                   [FMA3]
 47007  //    * VFMSUB132PS m128, xmm, xmm                  [FMA3]
 47008  //    * VFMSUB132PS ymm, ymm, ymm                   [FMA3]
 47009  //    * VFMSUB132PS m256, ymm, ymm                  [FMA3]
 47010  //    * VFMSUB132PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 47011  //    * VFMSUB132PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 47012  //    * VFMSUB132PS zmm, zmm, zmm{k}{z}             [AVX512F]
 47013  //    * VFMSUB132PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 47014  //    * VFMSUB132PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 47015  //    * VFMSUB132PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 47016  //    * VFMSUB132PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 47017  //
 47018  func (self *Program) VFMSUB132PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 47019      var p *Instruction
 47020      switch len(vv) {
 47021          case 0  : p = self.alloc("VFMSUB132PS", 3, Operands { v0, v1, v2 })
 47022          case 1  : p = self.alloc("VFMSUB132PS", 4, Operands { v0, v1, v2, vv[0] })
 47023          default : panic("instruction VFMSUB132PS takes 3 or 4 operands")
 47024      }
 47025      // VFMSUB132PS xmm, xmm, xmm
 47026      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 47027          self.require(ISA_FMA3)
 47028          p.domain = DomainFMA
 47029          p.add(0, func(m *_Encoding, v []interface{}) {
 47030              m.emit(0xc4)
 47031              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47032              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 47033              m.emit(0x9a)
 47034              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47035          })
 47036      }
 47037      // VFMSUB132PS m128, xmm, xmm
 47038      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 47039          self.require(ISA_FMA3)
 47040          p.domain = DomainFMA
 47041          p.add(0, func(m *_Encoding, v []interface{}) {
 47042              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47043              m.emit(0x9a)
 47044              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47045          })
 47046      }
 47047      // VFMSUB132PS ymm, ymm, ymm
 47048      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 47049          self.require(ISA_FMA3)
 47050          p.domain = DomainFMA
 47051          p.add(0, func(m *_Encoding, v []interface{}) {
 47052              m.emit(0xc4)
 47053              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47054              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 47055              m.emit(0x9a)
 47056              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47057          })
 47058      }
 47059      // VFMSUB132PS m256, ymm, ymm
 47060      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 47061          self.require(ISA_FMA3)
 47062          p.domain = DomainFMA
 47063          p.add(0, func(m *_Encoding, v []interface{}) {
 47064              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47065              m.emit(0x9a)
 47066              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47067          })
 47068      }
 47069      // VFMSUB132PS m512/m32bcst, zmm, zmm{k}{z}
 47070      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 47071          self.require(ISA_AVX512F)
 47072          p.domain = DomainFMA
 47073          p.add(0, func(m *_Encoding, v []interface{}) {
 47074              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47075              m.emit(0x9a)
 47076              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 47077          })
 47078      }
 47079      // VFMSUB132PS {er}, zmm, zmm, zmm{k}{z}
 47080      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 47081          self.require(ISA_AVX512F)
 47082          p.domain = DomainFMA
 47083          p.add(0, func(m *_Encoding, v []interface{}) {
 47084              m.emit(0x62)
 47085              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 47086              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 47087              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 47088              m.emit(0x9a)
 47089              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 47090          })
 47091      }
 47092      // VFMSUB132PS zmm, zmm, zmm{k}{z}
 47093      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 47094          self.require(ISA_AVX512F)
 47095          p.domain = DomainFMA
 47096          p.add(0, func(m *_Encoding, v []interface{}) {
 47097              m.emit(0x62)
 47098              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47099              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 47100              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 47101              m.emit(0x9a)
 47102              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47103          })
 47104      }
 47105      // VFMSUB132PS m128/m32bcst, xmm, xmm{k}{z}
 47106      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47107          self.require(ISA_AVX512VL | ISA_AVX512F)
 47108          p.domain = DomainFMA
 47109          p.add(0, func(m *_Encoding, v []interface{}) {
 47110              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47111              m.emit(0x9a)
 47112              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 47113          })
 47114      }
 47115      // VFMSUB132PS xmm, xmm, xmm{k}{z}
 47116      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47117          self.require(ISA_AVX512VL | ISA_AVX512F)
 47118          p.domain = DomainFMA
 47119          p.add(0, func(m *_Encoding, v []interface{}) {
 47120              m.emit(0x62)
 47121              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47122              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 47123              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 47124              m.emit(0x9a)
 47125              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47126          })
 47127      }
 47128      // VFMSUB132PS m256/m32bcst, ymm, ymm{k}{z}
 47129      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 47130          self.require(ISA_AVX512VL | ISA_AVX512F)
 47131          p.domain = DomainFMA
 47132          p.add(0, func(m *_Encoding, v []interface{}) {
 47133              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47134              m.emit(0x9a)
 47135              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 47136          })
 47137      }
 47138      // VFMSUB132PS ymm, ymm, ymm{k}{z}
 47139      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 47140          self.require(ISA_AVX512VL | ISA_AVX512F)
 47141          p.domain = DomainFMA
 47142          p.add(0, func(m *_Encoding, v []interface{}) {
 47143              m.emit(0x62)
 47144              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47145              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 47146              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 47147              m.emit(0x9a)
 47148              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47149          })
 47150      }
 47151      if p.len == 0 {
 47152          panic("invalid operands for VFMSUB132PS")
 47153      }
 47154      return p
 47155  }
 47156  
 47157  // VFMSUB132SD performs "Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values".
 47158  //
 47159  // Mnemonic        : VFMSUB132SD
 47160  // Supported forms : (5 forms)
 47161  //
 47162  //    * VFMSUB132SD xmm, xmm, xmm                [FMA3]
 47163  //    * VFMSUB132SD m64, xmm, xmm                [FMA3]
 47164  //    * VFMSUB132SD m64, xmm, xmm{k}{z}          [AVX512F]
 47165  //    * VFMSUB132SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 47166  //    * VFMSUB132SD xmm, xmm, xmm{k}{z}          [AVX512F]
 47167  //
 47168  func (self *Program) VFMSUB132SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 47169      var p *Instruction
 47170      switch len(vv) {
 47171          case 0  : p = self.alloc("VFMSUB132SD", 3, Operands { v0, v1, v2 })
 47172          case 1  : p = self.alloc("VFMSUB132SD", 4, Operands { v0, v1, v2, vv[0] })
 47173          default : panic("instruction VFMSUB132SD takes 3 or 4 operands")
 47174      }
 47175      // VFMSUB132SD xmm, xmm, xmm
 47176      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 47177          self.require(ISA_FMA3)
 47178          p.domain = DomainFMA
 47179          p.add(0, func(m *_Encoding, v []interface{}) {
 47180              m.emit(0xc4)
 47181              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47182              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 47183              m.emit(0x9b)
 47184              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47185          })
 47186      }
 47187      // VFMSUB132SD m64, xmm, xmm
 47188      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 47189          self.require(ISA_FMA3)
 47190          p.domain = DomainFMA
 47191          p.add(0, func(m *_Encoding, v []interface{}) {
 47192              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47193              m.emit(0x9b)
 47194              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47195          })
 47196      }
 47197      // VFMSUB132SD m64, xmm, xmm{k}{z}
 47198      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47199          self.require(ISA_AVX512F)
 47200          p.domain = DomainFMA
 47201          p.add(0, func(m *_Encoding, v []interface{}) {
 47202              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 47203              m.emit(0x9b)
 47204              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 47205          })
 47206      }
 47207      // VFMSUB132SD {er}, xmm, xmm, xmm{k}{z}
 47208      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 47209          self.require(ISA_AVX512F)
 47210          p.domain = DomainFMA
 47211          p.add(0, func(m *_Encoding, v []interface{}) {
 47212              m.emit(0x62)
 47213              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 47214              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 47215              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 47216              m.emit(0x9b)
 47217              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 47218          })
 47219      }
 47220      // VFMSUB132SD xmm, xmm, xmm{k}{z}
 47221      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47222          self.require(ISA_AVX512F)
 47223          p.domain = DomainFMA
 47224          p.add(0, func(m *_Encoding, v []interface{}) {
 47225              m.emit(0x62)
 47226              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47227              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 47228              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 47229              m.emit(0x9b)
 47230              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47231          })
 47232      }
 47233      if p.len == 0 {
 47234          panic("invalid operands for VFMSUB132SD")
 47235      }
 47236      return p
 47237  }
 47238  
 47239  // VFMSUB132SS performs "Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values".
 47240  //
 47241  // Mnemonic        : VFMSUB132SS
 47242  // Supported forms : (5 forms)
 47243  //
 47244  //    * VFMSUB132SS xmm, xmm, xmm                [FMA3]
 47245  //    * VFMSUB132SS m32, xmm, xmm                [FMA3]
 47246  //    * VFMSUB132SS m32, xmm, xmm{k}{z}          [AVX512F]
 47247  //    * VFMSUB132SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 47248  //    * VFMSUB132SS xmm, xmm, xmm{k}{z}          [AVX512F]
 47249  //
 47250  func (self *Program) VFMSUB132SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 47251      var p *Instruction
 47252      switch len(vv) {
 47253          case 0  : p = self.alloc("VFMSUB132SS", 3, Operands { v0, v1, v2 })
 47254          case 1  : p = self.alloc("VFMSUB132SS", 4, Operands { v0, v1, v2, vv[0] })
 47255          default : panic("instruction VFMSUB132SS takes 3 or 4 operands")
 47256      }
 47257      // VFMSUB132SS xmm, xmm, xmm
 47258      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 47259          self.require(ISA_FMA3)
 47260          p.domain = DomainFMA
 47261          p.add(0, func(m *_Encoding, v []interface{}) {
 47262              m.emit(0xc4)
 47263              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47264              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 47265              m.emit(0x9b)
 47266              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47267          })
 47268      }
 47269      // VFMSUB132SS m32, xmm, xmm
 47270      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 47271          self.require(ISA_FMA3)
 47272          p.domain = DomainFMA
 47273          p.add(0, func(m *_Encoding, v []interface{}) {
 47274              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47275              m.emit(0x9b)
 47276              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47277          })
 47278      }
 47279      // VFMSUB132SS m32, xmm, xmm{k}{z}
 47280      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47281          self.require(ISA_AVX512F)
 47282          p.domain = DomainFMA
 47283          p.add(0, func(m *_Encoding, v []interface{}) {
 47284              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 47285              m.emit(0x9b)
 47286              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 47287          })
 47288      }
 47289      // VFMSUB132SS {er}, xmm, xmm, xmm{k}{z}
 47290      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 47291          self.require(ISA_AVX512F)
 47292          p.domain = DomainFMA
 47293          p.add(0, func(m *_Encoding, v []interface{}) {
 47294              m.emit(0x62)
 47295              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 47296              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 47297              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 47298              m.emit(0x9b)
 47299              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 47300          })
 47301      }
 47302      // VFMSUB132SS xmm, xmm, xmm{k}{z}
 47303      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47304          self.require(ISA_AVX512F)
 47305          p.domain = DomainFMA
 47306          p.add(0, func(m *_Encoding, v []interface{}) {
 47307              m.emit(0x62)
 47308              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47309              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 47310              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 47311              m.emit(0x9b)
 47312              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47313          })
 47314      }
 47315      if p.len == 0 {
 47316          panic("invalid operands for VFMSUB132SS")
 47317      }
 47318      return p
 47319  }
 47320  
 47321  // VFMSUB213PD performs "Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values".
 47322  //
 47323  // Mnemonic        : VFMSUB213PD
 47324  // Supported forms : (11 forms)
 47325  //
 47326  //    * VFMSUB213PD xmm, xmm, xmm                   [FMA3]
 47327  //    * VFMSUB213PD m128, xmm, xmm                  [FMA3]
 47328  //    * VFMSUB213PD ymm, ymm, ymm                   [FMA3]
 47329  //    * VFMSUB213PD m256, ymm, ymm                  [FMA3]
 47330  //    * VFMSUB213PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 47331  //    * VFMSUB213PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 47332  //    * VFMSUB213PD zmm, zmm, zmm{k}{z}             [AVX512F]
 47333  //    * VFMSUB213PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 47334  //    * VFMSUB213PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 47335  //    * VFMSUB213PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 47336  //    * VFMSUB213PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 47337  //
 47338  func (self *Program) VFMSUB213PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 47339      var p *Instruction
 47340      switch len(vv) {
 47341          case 0  : p = self.alloc("VFMSUB213PD", 3, Operands { v0, v1, v2 })
 47342          case 1  : p = self.alloc("VFMSUB213PD", 4, Operands { v0, v1, v2, vv[0] })
 47343          default : panic("instruction VFMSUB213PD takes 3 or 4 operands")
 47344      }
 47345      // VFMSUB213PD xmm, xmm, xmm
 47346      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 47347          self.require(ISA_FMA3)
 47348          p.domain = DomainFMA
 47349          p.add(0, func(m *_Encoding, v []interface{}) {
 47350              m.emit(0xc4)
 47351              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47352              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 47353              m.emit(0xaa)
 47354              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47355          })
 47356      }
 47357      // VFMSUB213PD m128, xmm, xmm
 47358      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 47359          self.require(ISA_FMA3)
 47360          p.domain = DomainFMA
 47361          p.add(0, func(m *_Encoding, v []interface{}) {
 47362              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47363              m.emit(0xaa)
 47364              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47365          })
 47366      }
 47367      // VFMSUB213PD ymm, ymm, ymm
 47368      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 47369          self.require(ISA_FMA3)
 47370          p.domain = DomainFMA
 47371          p.add(0, func(m *_Encoding, v []interface{}) {
 47372              m.emit(0xc4)
 47373              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47374              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 47375              m.emit(0xaa)
 47376              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47377          })
 47378      }
 47379      // VFMSUB213PD m256, ymm, ymm
 47380      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 47381          self.require(ISA_FMA3)
 47382          p.domain = DomainFMA
 47383          p.add(0, func(m *_Encoding, v []interface{}) {
 47384              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47385              m.emit(0xaa)
 47386              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47387          })
 47388      }
 47389      // VFMSUB213PD m512/m64bcst, zmm, zmm{k}{z}
 47390      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 47391          self.require(ISA_AVX512F)
 47392          p.domain = DomainFMA
 47393          p.add(0, func(m *_Encoding, v []interface{}) {
 47394              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47395              m.emit(0xaa)
 47396              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 47397          })
 47398      }
 47399      // VFMSUB213PD {er}, zmm, zmm, zmm{k}{z}
 47400      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 47401          self.require(ISA_AVX512F)
 47402          p.domain = DomainFMA
 47403          p.add(0, func(m *_Encoding, v []interface{}) {
 47404              m.emit(0x62)
 47405              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 47406              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 47407              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 47408              m.emit(0xaa)
 47409              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 47410          })
 47411      }
 47412      // VFMSUB213PD zmm, zmm, zmm{k}{z}
 47413      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 47414          self.require(ISA_AVX512F)
 47415          p.domain = DomainFMA
 47416          p.add(0, func(m *_Encoding, v []interface{}) {
 47417              m.emit(0x62)
 47418              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47419              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 47420              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 47421              m.emit(0xaa)
 47422              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47423          })
 47424      }
 47425      // VFMSUB213PD m128/m64bcst, xmm, xmm{k}{z}
 47426      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47427          self.require(ISA_AVX512VL | ISA_AVX512F)
 47428          p.domain = DomainFMA
 47429          p.add(0, func(m *_Encoding, v []interface{}) {
 47430              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47431              m.emit(0xaa)
 47432              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 47433          })
 47434      }
 47435      // VFMSUB213PD xmm, xmm, xmm{k}{z}
 47436      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47437          self.require(ISA_AVX512VL | ISA_AVX512F)
 47438          p.domain = DomainFMA
 47439          p.add(0, func(m *_Encoding, v []interface{}) {
 47440              m.emit(0x62)
 47441              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47442              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 47443              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 47444              m.emit(0xaa)
 47445              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47446          })
 47447      }
 47448      // VFMSUB213PD m256/m64bcst, ymm, ymm{k}{z}
 47449      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 47450          self.require(ISA_AVX512VL | ISA_AVX512F)
 47451          p.domain = DomainFMA
 47452          p.add(0, func(m *_Encoding, v []interface{}) {
 47453              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47454              m.emit(0xaa)
 47455              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 47456          })
 47457      }
 47458      // VFMSUB213PD ymm, ymm, ymm{k}{z}
 47459      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 47460          self.require(ISA_AVX512VL | ISA_AVX512F)
 47461          p.domain = DomainFMA
 47462          p.add(0, func(m *_Encoding, v []interface{}) {
 47463              m.emit(0x62)
 47464              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47465              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 47466              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 47467              m.emit(0xaa)
 47468              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47469          })
 47470      }
 47471      if p.len == 0 {
 47472          panic("invalid operands for VFMSUB213PD")
 47473      }
 47474      return p
 47475  }
 47476  
 47477  // VFMSUB213PS performs "Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values".
 47478  //
 47479  // Mnemonic        : VFMSUB213PS
 47480  // Supported forms : (11 forms)
 47481  //
 47482  //    * VFMSUB213PS xmm, xmm, xmm                   [FMA3]
 47483  //    * VFMSUB213PS m128, xmm, xmm                  [FMA3]
 47484  //    * VFMSUB213PS ymm, ymm, ymm                   [FMA3]
 47485  //    * VFMSUB213PS m256, ymm, ymm                  [FMA3]
 47486  //    * VFMSUB213PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 47487  //    * VFMSUB213PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 47488  //    * VFMSUB213PS zmm, zmm, zmm{k}{z}             [AVX512F]
 47489  //    * VFMSUB213PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 47490  //    * VFMSUB213PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 47491  //    * VFMSUB213PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 47492  //    * VFMSUB213PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 47493  //
 47494  func (self *Program) VFMSUB213PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 47495      var p *Instruction
 47496      switch len(vv) {
 47497          case 0  : p = self.alloc("VFMSUB213PS", 3, Operands { v0, v1, v2 })
 47498          case 1  : p = self.alloc("VFMSUB213PS", 4, Operands { v0, v1, v2, vv[0] })
 47499          default : panic("instruction VFMSUB213PS takes 3 or 4 operands")
 47500      }
 47501      // VFMSUB213PS xmm, xmm, xmm
 47502      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 47503          self.require(ISA_FMA3)
 47504          p.domain = DomainFMA
 47505          p.add(0, func(m *_Encoding, v []interface{}) {
 47506              m.emit(0xc4)
 47507              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47508              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 47509              m.emit(0xaa)
 47510              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47511          })
 47512      }
 47513      // VFMSUB213PS m128, xmm, xmm
 47514      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 47515          self.require(ISA_FMA3)
 47516          p.domain = DomainFMA
 47517          p.add(0, func(m *_Encoding, v []interface{}) {
 47518              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47519              m.emit(0xaa)
 47520              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47521          })
 47522      }
 47523      // VFMSUB213PS ymm, ymm, ymm
 47524      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 47525          self.require(ISA_FMA3)
 47526          p.domain = DomainFMA
 47527          p.add(0, func(m *_Encoding, v []interface{}) {
 47528              m.emit(0xc4)
 47529              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47530              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 47531              m.emit(0xaa)
 47532              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47533          })
 47534      }
 47535      // VFMSUB213PS m256, ymm, ymm
 47536      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 47537          self.require(ISA_FMA3)
 47538          p.domain = DomainFMA
 47539          p.add(0, func(m *_Encoding, v []interface{}) {
 47540              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47541              m.emit(0xaa)
 47542              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47543          })
 47544      }
 47545      // VFMSUB213PS m512/m32bcst, zmm, zmm{k}{z}
 47546      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 47547          self.require(ISA_AVX512F)
 47548          p.domain = DomainFMA
 47549          p.add(0, func(m *_Encoding, v []interface{}) {
 47550              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47551              m.emit(0xaa)
 47552              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 47553          })
 47554      }
 47555      // VFMSUB213PS {er}, zmm, zmm, zmm{k}{z}
 47556      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 47557          self.require(ISA_AVX512F)
 47558          p.domain = DomainFMA
 47559          p.add(0, func(m *_Encoding, v []interface{}) {
 47560              m.emit(0x62)
 47561              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 47562              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 47563              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 47564              m.emit(0xaa)
 47565              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 47566          })
 47567      }
 47568      // VFMSUB213PS zmm, zmm, zmm{k}{z}
 47569      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 47570          self.require(ISA_AVX512F)
 47571          p.domain = DomainFMA
 47572          p.add(0, func(m *_Encoding, v []interface{}) {
 47573              m.emit(0x62)
 47574              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47575              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 47576              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 47577              m.emit(0xaa)
 47578              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47579          })
 47580      }
 47581      // VFMSUB213PS m128/m32bcst, xmm, xmm{k}{z}
 47582      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47583          self.require(ISA_AVX512VL | ISA_AVX512F)
 47584          p.domain = DomainFMA
 47585          p.add(0, func(m *_Encoding, v []interface{}) {
 47586              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47587              m.emit(0xaa)
 47588              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 47589          })
 47590      }
 47591      // VFMSUB213PS xmm, xmm, xmm{k}{z}
 47592      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47593          self.require(ISA_AVX512VL | ISA_AVX512F)
 47594          p.domain = DomainFMA
 47595          p.add(0, func(m *_Encoding, v []interface{}) {
 47596              m.emit(0x62)
 47597              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47598              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 47599              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 47600              m.emit(0xaa)
 47601              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47602          })
 47603      }
 47604      // VFMSUB213PS m256/m32bcst, ymm, ymm{k}{z}
 47605      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 47606          self.require(ISA_AVX512VL | ISA_AVX512F)
 47607          p.domain = DomainFMA
 47608          p.add(0, func(m *_Encoding, v []interface{}) {
 47609              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47610              m.emit(0xaa)
 47611              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 47612          })
 47613      }
 47614      // VFMSUB213PS ymm, ymm, ymm{k}{z}
 47615      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 47616          self.require(ISA_AVX512VL | ISA_AVX512F)
 47617          p.domain = DomainFMA
 47618          p.add(0, func(m *_Encoding, v []interface{}) {
 47619              m.emit(0x62)
 47620              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47621              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 47622              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 47623              m.emit(0xaa)
 47624              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47625          })
 47626      }
 47627      if p.len == 0 {
 47628          panic("invalid operands for VFMSUB213PS")
 47629      }
 47630      return p
 47631  }
 47632  
 47633  // VFMSUB213SD performs "Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values".
 47634  //
 47635  // Mnemonic        : VFMSUB213SD
 47636  // Supported forms : (5 forms)
 47637  //
 47638  //    * VFMSUB213SD xmm, xmm, xmm                [FMA3]
 47639  //    * VFMSUB213SD m64, xmm, xmm                [FMA3]
 47640  //    * VFMSUB213SD m64, xmm, xmm{k}{z}          [AVX512F]
 47641  //    * VFMSUB213SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 47642  //    * VFMSUB213SD xmm, xmm, xmm{k}{z}          [AVX512F]
 47643  //
 47644  func (self *Program) VFMSUB213SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 47645      var p *Instruction
 47646      switch len(vv) {
 47647          case 0  : p = self.alloc("VFMSUB213SD", 3, Operands { v0, v1, v2 })
 47648          case 1  : p = self.alloc("VFMSUB213SD", 4, Operands { v0, v1, v2, vv[0] })
 47649          default : panic("instruction VFMSUB213SD takes 3 or 4 operands")
 47650      }
 47651      // VFMSUB213SD xmm, xmm, xmm
 47652      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 47653          self.require(ISA_FMA3)
 47654          p.domain = DomainFMA
 47655          p.add(0, func(m *_Encoding, v []interface{}) {
 47656              m.emit(0xc4)
 47657              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47658              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 47659              m.emit(0xab)
 47660              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47661          })
 47662      }
 47663      // VFMSUB213SD m64, xmm, xmm
 47664      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 47665          self.require(ISA_FMA3)
 47666          p.domain = DomainFMA
 47667          p.add(0, func(m *_Encoding, v []interface{}) {
 47668              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47669              m.emit(0xab)
 47670              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47671          })
 47672      }
 47673      // VFMSUB213SD m64, xmm, xmm{k}{z}
 47674      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47675          self.require(ISA_AVX512F)
 47676          p.domain = DomainFMA
 47677          p.add(0, func(m *_Encoding, v []interface{}) {
 47678              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 47679              m.emit(0xab)
 47680              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 47681          })
 47682      }
 47683      // VFMSUB213SD {er}, xmm, xmm, xmm{k}{z}
 47684      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 47685          self.require(ISA_AVX512F)
 47686          p.domain = DomainFMA
 47687          p.add(0, func(m *_Encoding, v []interface{}) {
 47688              m.emit(0x62)
 47689              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 47690              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 47691              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 47692              m.emit(0xab)
 47693              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 47694          })
 47695      }
 47696      // VFMSUB213SD xmm, xmm, xmm{k}{z}
 47697      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47698          self.require(ISA_AVX512F)
 47699          p.domain = DomainFMA
 47700          p.add(0, func(m *_Encoding, v []interface{}) {
 47701              m.emit(0x62)
 47702              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47703              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 47704              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 47705              m.emit(0xab)
 47706              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47707          })
 47708      }
 47709      if p.len == 0 {
 47710          panic("invalid operands for VFMSUB213SD")
 47711      }
 47712      return p
 47713  }
 47714  
 47715  // VFMSUB213SS performs "Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values".
 47716  //
 47717  // Mnemonic        : VFMSUB213SS
 47718  // Supported forms : (5 forms)
 47719  //
 47720  //    * VFMSUB213SS xmm, xmm, xmm                [FMA3]
 47721  //    * VFMSUB213SS m32, xmm, xmm                [FMA3]
 47722  //    * VFMSUB213SS m32, xmm, xmm{k}{z}          [AVX512F]
 47723  //    * VFMSUB213SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 47724  //    * VFMSUB213SS xmm, xmm, xmm{k}{z}          [AVX512F]
 47725  //
 47726  func (self *Program) VFMSUB213SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 47727      var p *Instruction
 47728      switch len(vv) {
 47729          case 0  : p = self.alloc("VFMSUB213SS", 3, Operands { v0, v1, v2 })
 47730          case 1  : p = self.alloc("VFMSUB213SS", 4, Operands { v0, v1, v2, vv[0] })
 47731          default : panic("instruction VFMSUB213SS takes 3 or 4 operands")
 47732      }
 47733      // VFMSUB213SS xmm, xmm, xmm
 47734      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 47735          self.require(ISA_FMA3)
 47736          p.domain = DomainFMA
 47737          p.add(0, func(m *_Encoding, v []interface{}) {
 47738              m.emit(0xc4)
 47739              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47740              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 47741              m.emit(0xab)
 47742              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47743          })
 47744      }
 47745      // VFMSUB213SS m32, xmm, xmm
 47746      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 47747          self.require(ISA_FMA3)
 47748          p.domain = DomainFMA
 47749          p.add(0, func(m *_Encoding, v []interface{}) {
 47750              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47751              m.emit(0xab)
 47752              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47753          })
 47754      }
 47755      // VFMSUB213SS m32, xmm, xmm{k}{z}
 47756      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47757          self.require(ISA_AVX512F)
 47758          p.domain = DomainFMA
 47759          p.add(0, func(m *_Encoding, v []interface{}) {
 47760              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 47761              m.emit(0xab)
 47762              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 47763          })
 47764      }
 47765      // VFMSUB213SS {er}, xmm, xmm, xmm{k}{z}
 47766      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 47767          self.require(ISA_AVX512F)
 47768          p.domain = DomainFMA
 47769          p.add(0, func(m *_Encoding, v []interface{}) {
 47770              m.emit(0x62)
 47771              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 47772              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 47773              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 47774              m.emit(0xab)
 47775              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 47776          })
 47777      }
 47778      // VFMSUB213SS xmm, xmm, xmm{k}{z}
 47779      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47780          self.require(ISA_AVX512F)
 47781          p.domain = DomainFMA
 47782          p.add(0, func(m *_Encoding, v []interface{}) {
 47783              m.emit(0x62)
 47784              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47785              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 47786              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 47787              m.emit(0xab)
 47788              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47789          })
 47790      }
 47791      if p.len == 0 {
 47792          panic("invalid operands for VFMSUB213SS")
 47793      }
 47794      return p
 47795  }
 47796  
 47797  // VFMSUB231PD performs "Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values".
 47798  //
 47799  // Mnemonic        : VFMSUB231PD
 47800  // Supported forms : (11 forms)
 47801  //
 47802  //    * VFMSUB231PD xmm, xmm, xmm                   [FMA3]
 47803  //    * VFMSUB231PD m128, xmm, xmm                  [FMA3]
 47804  //    * VFMSUB231PD ymm, ymm, ymm                   [FMA3]
 47805  //    * VFMSUB231PD m256, ymm, ymm                  [FMA3]
 47806  //    * VFMSUB231PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 47807  //    * VFMSUB231PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 47808  //    * VFMSUB231PD zmm, zmm, zmm{k}{z}             [AVX512F]
 47809  //    * VFMSUB231PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 47810  //    * VFMSUB231PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 47811  //    * VFMSUB231PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 47812  //    * VFMSUB231PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 47813  //
 47814  func (self *Program) VFMSUB231PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 47815      var p *Instruction
 47816      switch len(vv) {
 47817          case 0  : p = self.alloc("VFMSUB231PD", 3, Operands { v0, v1, v2 })
 47818          case 1  : p = self.alloc("VFMSUB231PD", 4, Operands { v0, v1, v2, vv[0] })
 47819          default : panic("instruction VFMSUB231PD takes 3 or 4 operands")
 47820      }
 47821      // VFMSUB231PD xmm, xmm, xmm
 47822      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 47823          self.require(ISA_FMA3)
 47824          p.domain = DomainFMA
 47825          p.add(0, func(m *_Encoding, v []interface{}) {
 47826              m.emit(0xc4)
 47827              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47828              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 47829              m.emit(0xba)
 47830              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47831          })
 47832      }
 47833      // VFMSUB231PD m128, xmm, xmm
 47834      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 47835          self.require(ISA_FMA3)
 47836          p.domain = DomainFMA
 47837          p.add(0, func(m *_Encoding, v []interface{}) {
 47838              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47839              m.emit(0xba)
 47840              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47841          })
 47842      }
 47843      // VFMSUB231PD ymm, ymm, ymm
 47844      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 47845          self.require(ISA_FMA3)
 47846          p.domain = DomainFMA
 47847          p.add(0, func(m *_Encoding, v []interface{}) {
 47848              m.emit(0xc4)
 47849              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47850              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 47851              m.emit(0xba)
 47852              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47853          })
 47854      }
 47855      // VFMSUB231PD m256, ymm, ymm
 47856      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 47857          self.require(ISA_FMA3)
 47858          p.domain = DomainFMA
 47859          p.add(0, func(m *_Encoding, v []interface{}) {
 47860              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47861              m.emit(0xba)
 47862              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47863          })
 47864      }
 47865      // VFMSUB231PD m512/m64bcst, zmm, zmm{k}{z}
 47866      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 47867          self.require(ISA_AVX512F)
 47868          p.domain = DomainFMA
 47869          p.add(0, func(m *_Encoding, v []interface{}) {
 47870              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47871              m.emit(0xba)
 47872              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 47873          })
 47874      }
 47875      // VFMSUB231PD {er}, zmm, zmm, zmm{k}{z}
 47876      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 47877          self.require(ISA_AVX512F)
 47878          p.domain = DomainFMA
 47879          p.add(0, func(m *_Encoding, v []interface{}) {
 47880              m.emit(0x62)
 47881              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 47882              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 47883              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 47884              m.emit(0xba)
 47885              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 47886          })
 47887      }
 47888      // VFMSUB231PD zmm, zmm, zmm{k}{z}
 47889      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 47890          self.require(ISA_AVX512F)
 47891          p.domain = DomainFMA
 47892          p.add(0, func(m *_Encoding, v []interface{}) {
 47893              m.emit(0x62)
 47894              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47895              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 47896              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 47897              m.emit(0xba)
 47898              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47899          })
 47900      }
 47901      // VFMSUB231PD m128/m64bcst, xmm, xmm{k}{z}
 47902      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47903          self.require(ISA_AVX512VL | ISA_AVX512F)
 47904          p.domain = DomainFMA
 47905          p.add(0, func(m *_Encoding, v []interface{}) {
 47906              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47907              m.emit(0xba)
 47908              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 47909          })
 47910      }
 47911      // VFMSUB231PD xmm, xmm, xmm{k}{z}
 47912      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 47913          self.require(ISA_AVX512VL | ISA_AVX512F)
 47914          p.domain = DomainFMA
 47915          p.add(0, func(m *_Encoding, v []interface{}) {
 47916              m.emit(0x62)
 47917              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47918              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 47919              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 47920              m.emit(0xba)
 47921              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47922          })
 47923      }
 47924      // VFMSUB231PD m256/m64bcst, ymm, ymm{k}{z}
 47925      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 47926          self.require(ISA_AVX512VL | ISA_AVX512F)
 47927          p.domain = DomainFMA
 47928          p.add(0, func(m *_Encoding, v []interface{}) {
 47929              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 47930              m.emit(0xba)
 47931              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 47932          })
 47933      }
 47934      // VFMSUB231PD ymm, ymm, ymm{k}{z}
 47935      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 47936          self.require(ISA_AVX512VL | ISA_AVX512F)
 47937          p.domain = DomainFMA
 47938          p.add(0, func(m *_Encoding, v []interface{}) {
 47939              m.emit(0x62)
 47940              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 47941              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 47942              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 47943              m.emit(0xba)
 47944              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47945          })
 47946      }
 47947      if p.len == 0 {
 47948          panic("invalid operands for VFMSUB231PD")
 47949      }
 47950      return p
 47951  }
 47952  
 47953  // VFMSUB231PS performs "Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values".
 47954  //
 47955  // Mnemonic        : VFMSUB231PS
 47956  // Supported forms : (11 forms)
 47957  //
 47958  //    * VFMSUB231PS xmm, xmm, xmm                   [FMA3]
 47959  //    * VFMSUB231PS m128, xmm, xmm                  [FMA3]
 47960  //    * VFMSUB231PS ymm, ymm, ymm                   [FMA3]
 47961  //    * VFMSUB231PS m256, ymm, ymm                  [FMA3]
 47962  //    * VFMSUB231PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 47963  //    * VFMSUB231PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 47964  //    * VFMSUB231PS zmm, zmm, zmm{k}{z}             [AVX512F]
 47965  //    * VFMSUB231PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 47966  //    * VFMSUB231PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 47967  //    * VFMSUB231PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 47968  //    * VFMSUB231PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 47969  //
 47970  func (self *Program) VFMSUB231PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 47971      var p *Instruction
 47972      switch len(vv) {
 47973          case 0  : p = self.alloc("VFMSUB231PS", 3, Operands { v0, v1, v2 })
 47974          case 1  : p = self.alloc("VFMSUB231PS", 4, Operands { v0, v1, v2, vv[0] })
 47975          default : panic("instruction VFMSUB231PS takes 3 or 4 operands")
 47976      }
 47977      // VFMSUB231PS xmm, xmm, xmm
 47978      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 47979          self.require(ISA_FMA3)
 47980          p.domain = DomainFMA
 47981          p.add(0, func(m *_Encoding, v []interface{}) {
 47982              m.emit(0xc4)
 47983              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 47984              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 47985              m.emit(0xba)
 47986              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 47987          })
 47988      }
 47989      // VFMSUB231PS m128, xmm, xmm
 47990      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 47991          self.require(ISA_FMA3)
 47992          p.domain = DomainFMA
 47993          p.add(0, func(m *_Encoding, v []interface{}) {
 47994              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 47995              m.emit(0xba)
 47996              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 47997          })
 47998      }
 47999      // VFMSUB231PS ymm, ymm, ymm
 48000      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 48001          self.require(ISA_FMA3)
 48002          p.domain = DomainFMA
 48003          p.add(0, func(m *_Encoding, v []interface{}) {
 48004              m.emit(0xc4)
 48005              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48006              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48007              m.emit(0xba)
 48008              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48009          })
 48010      }
 48011      // VFMSUB231PS m256, ymm, ymm
 48012      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 48013          self.require(ISA_FMA3)
 48014          p.domain = DomainFMA
 48015          p.add(0, func(m *_Encoding, v []interface{}) {
 48016              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48017              m.emit(0xba)
 48018              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48019          })
 48020      }
 48021      // VFMSUB231PS m512/m32bcst, zmm, zmm{k}{z}
 48022      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 48023          self.require(ISA_AVX512F)
 48024          p.domain = DomainFMA
 48025          p.add(0, func(m *_Encoding, v []interface{}) {
 48026              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48027              m.emit(0xba)
 48028              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 48029          })
 48030      }
 48031      // VFMSUB231PS {er}, zmm, zmm, zmm{k}{z}
 48032      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 48033          self.require(ISA_AVX512F)
 48034          p.domain = DomainFMA
 48035          p.add(0, func(m *_Encoding, v []interface{}) {
 48036              m.emit(0x62)
 48037              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 48038              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 48039              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 48040              m.emit(0xba)
 48041              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 48042          })
 48043      }
 48044      // VFMSUB231PS zmm, zmm, zmm{k}{z}
 48045      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 48046          self.require(ISA_AVX512F)
 48047          p.domain = DomainFMA
 48048          p.add(0, func(m *_Encoding, v []interface{}) {
 48049              m.emit(0x62)
 48050              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48051              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48052              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 48053              m.emit(0xba)
 48054              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48055          })
 48056      }
 48057      // VFMSUB231PS m128/m32bcst, xmm, xmm{k}{z}
 48058      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48059          self.require(ISA_AVX512VL | ISA_AVX512F)
 48060          p.domain = DomainFMA
 48061          p.add(0, func(m *_Encoding, v []interface{}) {
 48062              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48063              m.emit(0xba)
 48064              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 48065          })
 48066      }
 48067      // VFMSUB231PS xmm, xmm, xmm{k}{z}
 48068      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48069          self.require(ISA_AVX512VL | ISA_AVX512F)
 48070          p.domain = DomainFMA
 48071          p.add(0, func(m *_Encoding, v []interface{}) {
 48072              m.emit(0x62)
 48073              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48074              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48075              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 48076              m.emit(0xba)
 48077              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48078          })
 48079      }
 48080      // VFMSUB231PS m256/m32bcst, ymm, ymm{k}{z}
 48081      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 48082          self.require(ISA_AVX512VL | ISA_AVX512F)
 48083          p.domain = DomainFMA
 48084          p.add(0, func(m *_Encoding, v []interface{}) {
 48085              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48086              m.emit(0xba)
 48087              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 48088          })
 48089      }
 48090      // VFMSUB231PS ymm, ymm, ymm{k}{z}
 48091      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 48092          self.require(ISA_AVX512VL | ISA_AVX512F)
 48093          p.domain = DomainFMA
 48094          p.add(0, func(m *_Encoding, v []interface{}) {
 48095              m.emit(0x62)
 48096              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48097              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48098              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 48099              m.emit(0xba)
 48100              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48101          })
 48102      }
 48103      if p.len == 0 {
 48104          panic("invalid operands for VFMSUB231PS")
 48105      }
 48106      return p
 48107  }
 48108  
 48109  // VFMSUB231SD performs "Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values".
 48110  //
 48111  // Mnemonic        : VFMSUB231SD
 48112  // Supported forms : (5 forms)
 48113  //
 48114  //    * VFMSUB231SD xmm, xmm, xmm                [FMA3]
 48115  //    * VFMSUB231SD m64, xmm, xmm                [FMA3]
 48116  //    * VFMSUB231SD m64, xmm, xmm{k}{z}          [AVX512F]
 48117  //    * VFMSUB231SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 48118  //    * VFMSUB231SD xmm, xmm, xmm{k}{z}          [AVX512F]
 48119  //
 48120  func (self *Program) VFMSUB231SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 48121      var p *Instruction
 48122      switch len(vv) {
 48123          case 0  : p = self.alloc("VFMSUB231SD", 3, Operands { v0, v1, v2 })
 48124          case 1  : p = self.alloc("VFMSUB231SD", 4, Operands { v0, v1, v2, vv[0] })
 48125          default : panic("instruction VFMSUB231SD takes 3 or 4 operands")
 48126      }
 48127      // VFMSUB231SD xmm, xmm, xmm
 48128      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 48129          self.require(ISA_FMA3)
 48130          p.domain = DomainFMA
 48131          p.add(0, func(m *_Encoding, v []interface{}) {
 48132              m.emit(0xc4)
 48133              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48134              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 48135              m.emit(0xbb)
 48136              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48137          })
 48138      }
 48139      // VFMSUB231SD m64, xmm, xmm
 48140      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 48141          self.require(ISA_FMA3)
 48142          p.domain = DomainFMA
 48143          p.add(0, func(m *_Encoding, v []interface{}) {
 48144              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48145              m.emit(0xbb)
 48146              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48147          })
 48148      }
 48149      // VFMSUB231SD m64, xmm, xmm{k}{z}
 48150      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48151          self.require(ISA_AVX512F)
 48152          p.domain = DomainFMA
 48153          p.add(0, func(m *_Encoding, v []interface{}) {
 48154              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 48155              m.emit(0xbb)
 48156              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 48157          })
 48158      }
 48159      // VFMSUB231SD {er}, xmm, xmm, xmm{k}{z}
 48160      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 48161          self.require(ISA_AVX512F)
 48162          p.domain = DomainFMA
 48163          p.add(0, func(m *_Encoding, v []interface{}) {
 48164              m.emit(0x62)
 48165              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 48166              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 48167              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 48168              m.emit(0xbb)
 48169              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 48170          })
 48171      }
 48172      // VFMSUB231SD xmm, xmm, xmm{k}{z}
 48173      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48174          self.require(ISA_AVX512F)
 48175          p.domain = DomainFMA
 48176          p.add(0, func(m *_Encoding, v []interface{}) {
 48177              m.emit(0x62)
 48178              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48179              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 48180              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 48181              m.emit(0xbb)
 48182              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48183          })
 48184      }
 48185      if p.len == 0 {
 48186          panic("invalid operands for VFMSUB231SD")
 48187      }
 48188      return p
 48189  }
 48190  
 48191  // VFMSUB231SS performs "Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values".
 48192  //
 48193  // Mnemonic        : VFMSUB231SS
 48194  // Supported forms : (5 forms)
 48195  //
 48196  //    * VFMSUB231SS xmm, xmm, xmm                [FMA3]
 48197  //    * VFMSUB231SS m32, xmm, xmm                [FMA3]
 48198  //    * VFMSUB231SS m32, xmm, xmm{k}{z}          [AVX512F]
 48199  //    * VFMSUB231SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 48200  //    * VFMSUB231SS xmm, xmm, xmm{k}{z}          [AVX512F]
 48201  //
 48202  func (self *Program) VFMSUB231SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 48203      var p *Instruction
 48204      switch len(vv) {
 48205          case 0  : p = self.alloc("VFMSUB231SS", 3, Operands { v0, v1, v2 })
 48206          case 1  : p = self.alloc("VFMSUB231SS", 4, Operands { v0, v1, v2, vv[0] })
 48207          default : panic("instruction VFMSUB231SS takes 3 or 4 operands")
 48208      }
 48209      // VFMSUB231SS xmm, xmm, xmm
 48210      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 48211          self.require(ISA_FMA3)
 48212          p.domain = DomainFMA
 48213          p.add(0, func(m *_Encoding, v []interface{}) {
 48214              m.emit(0xc4)
 48215              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48216              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 48217              m.emit(0xbb)
 48218              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48219          })
 48220      }
 48221      // VFMSUB231SS m32, xmm, xmm
 48222      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 48223          self.require(ISA_FMA3)
 48224          p.domain = DomainFMA
 48225          p.add(0, func(m *_Encoding, v []interface{}) {
 48226              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48227              m.emit(0xbb)
 48228              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48229          })
 48230      }
 48231      // VFMSUB231SS m32, xmm, xmm{k}{z}
 48232      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48233          self.require(ISA_AVX512F)
 48234          p.domain = DomainFMA
 48235          p.add(0, func(m *_Encoding, v []interface{}) {
 48236              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 48237              m.emit(0xbb)
 48238              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 48239          })
 48240      }
 48241      // VFMSUB231SS {er}, xmm, xmm, xmm{k}{z}
 48242      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 48243          self.require(ISA_AVX512F)
 48244          p.domain = DomainFMA
 48245          p.add(0, func(m *_Encoding, v []interface{}) {
 48246              m.emit(0x62)
 48247              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 48248              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 48249              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 48250              m.emit(0xbb)
 48251              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 48252          })
 48253      }
 48254      // VFMSUB231SS xmm, xmm, xmm{k}{z}
 48255      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48256          self.require(ISA_AVX512F)
 48257          p.domain = DomainFMA
 48258          p.add(0, func(m *_Encoding, v []interface{}) {
 48259              m.emit(0x62)
 48260              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48261              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48262              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 48263              m.emit(0xbb)
 48264              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48265          })
 48266      }
 48267      if p.len == 0 {
 48268          panic("invalid operands for VFMSUB231SS")
 48269      }
 48270      return p
 48271  }
 48272  
 48273  // VFMSUBADD132PD performs "Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values".
 48274  //
 48275  // Mnemonic        : VFMSUBADD132PD
 48276  // Supported forms : (11 forms)
 48277  //
 48278  //    * VFMSUBADD132PD xmm, xmm, xmm                   [FMA3]
 48279  //    * VFMSUBADD132PD m128, xmm, xmm                  [FMA3]
 48280  //    * VFMSUBADD132PD ymm, ymm, ymm                   [FMA3]
 48281  //    * VFMSUBADD132PD m256, ymm, ymm                  [FMA3]
 48282  //    * VFMSUBADD132PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 48283  //    * VFMSUBADD132PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 48284  //    * VFMSUBADD132PD zmm, zmm, zmm{k}{z}             [AVX512F]
 48285  //    * VFMSUBADD132PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 48286  //    * VFMSUBADD132PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 48287  //    * VFMSUBADD132PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 48288  //    * VFMSUBADD132PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 48289  //
 48290  func (self *Program) VFMSUBADD132PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 48291      var p *Instruction
 48292      switch len(vv) {
 48293          case 0  : p = self.alloc("VFMSUBADD132PD", 3, Operands { v0, v1, v2 })
 48294          case 1  : p = self.alloc("VFMSUBADD132PD", 4, Operands { v0, v1, v2, vv[0] })
 48295          default : panic("instruction VFMSUBADD132PD takes 3 or 4 operands")
 48296      }
 48297      // VFMSUBADD132PD xmm, xmm, xmm
 48298      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 48299          self.require(ISA_FMA3)
 48300          p.domain = DomainFMA
 48301          p.add(0, func(m *_Encoding, v []interface{}) {
 48302              m.emit(0xc4)
 48303              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48304              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 48305              m.emit(0x97)
 48306              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48307          })
 48308      }
 48309      // VFMSUBADD132PD m128, xmm, xmm
 48310      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 48311          self.require(ISA_FMA3)
 48312          p.domain = DomainFMA
 48313          p.add(0, func(m *_Encoding, v []interface{}) {
 48314              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48315              m.emit(0x97)
 48316              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48317          })
 48318      }
 48319      // VFMSUBADD132PD ymm, ymm, ymm
 48320      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 48321          self.require(ISA_FMA3)
 48322          p.domain = DomainFMA
 48323          p.add(0, func(m *_Encoding, v []interface{}) {
 48324              m.emit(0xc4)
 48325              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48326              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 48327              m.emit(0x97)
 48328              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48329          })
 48330      }
 48331      // VFMSUBADD132PD m256, ymm, ymm
 48332      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 48333          self.require(ISA_FMA3)
 48334          p.domain = DomainFMA
 48335          p.add(0, func(m *_Encoding, v []interface{}) {
 48336              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48337              m.emit(0x97)
 48338              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48339          })
 48340      }
 48341      // VFMSUBADD132PD m512/m64bcst, zmm, zmm{k}{z}
 48342      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 48343          self.require(ISA_AVX512F)
 48344          p.domain = DomainFMA
 48345          p.add(0, func(m *_Encoding, v []interface{}) {
 48346              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48347              m.emit(0x97)
 48348              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 48349          })
 48350      }
 48351      // VFMSUBADD132PD {er}, zmm, zmm, zmm{k}{z}
 48352      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 48353          self.require(ISA_AVX512F)
 48354          p.domain = DomainFMA
 48355          p.add(0, func(m *_Encoding, v []interface{}) {
 48356              m.emit(0x62)
 48357              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 48358              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 48359              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 48360              m.emit(0x97)
 48361              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 48362          })
 48363      }
 48364      // VFMSUBADD132PD zmm, zmm, zmm{k}{z}
 48365      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 48366          self.require(ISA_AVX512F)
 48367          p.domain = DomainFMA
 48368          p.add(0, func(m *_Encoding, v []interface{}) {
 48369              m.emit(0x62)
 48370              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48371              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 48372              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 48373              m.emit(0x97)
 48374              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48375          })
 48376      }
 48377      // VFMSUBADD132PD m128/m64bcst, xmm, xmm{k}{z}
 48378      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48379          self.require(ISA_AVX512VL | ISA_AVX512F)
 48380          p.domain = DomainFMA
 48381          p.add(0, func(m *_Encoding, v []interface{}) {
 48382              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48383              m.emit(0x97)
 48384              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 48385          })
 48386      }
 48387      // VFMSUBADD132PD xmm, xmm, xmm{k}{z}
 48388      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48389          self.require(ISA_AVX512VL | ISA_AVX512F)
 48390          p.domain = DomainFMA
 48391          p.add(0, func(m *_Encoding, v []interface{}) {
 48392              m.emit(0x62)
 48393              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48394              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 48395              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 48396              m.emit(0x97)
 48397              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48398          })
 48399      }
 48400      // VFMSUBADD132PD m256/m64bcst, ymm, ymm{k}{z}
 48401      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 48402          self.require(ISA_AVX512VL | ISA_AVX512F)
 48403          p.domain = DomainFMA
 48404          p.add(0, func(m *_Encoding, v []interface{}) {
 48405              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48406              m.emit(0x97)
 48407              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 48408          })
 48409      }
 48410      // VFMSUBADD132PD ymm, ymm, ymm{k}{z}
 48411      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 48412          self.require(ISA_AVX512VL | ISA_AVX512F)
 48413          p.domain = DomainFMA
 48414          p.add(0, func(m *_Encoding, v []interface{}) {
 48415              m.emit(0x62)
 48416              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48417              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 48418              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 48419              m.emit(0x97)
 48420              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48421          })
 48422      }
 48423      if p.len == 0 {
 48424          panic("invalid operands for VFMSUBADD132PD")
 48425      }
 48426      return p
 48427  }
 48428  
 48429  // VFMSUBADD132PS performs "Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values".
 48430  //
 48431  // Mnemonic        : VFMSUBADD132PS
 48432  // Supported forms : (11 forms)
 48433  //
 48434  //    * VFMSUBADD132PS xmm, xmm, xmm                   [FMA3]
 48435  //    * VFMSUBADD132PS m128, xmm, xmm                  [FMA3]
 48436  //    * VFMSUBADD132PS ymm, ymm, ymm                   [FMA3]
 48437  //    * VFMSUBADD132PS m256, ymm, ymm                  [FMA3]
 48438  //    * VFMSUBADD132PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 48439  //    * VFMSUBADD132PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 48440  //    * VFMSUBADD132PS zmm, zmm, zmm{k}{z}             [AVX512F]
 48441  //    * VFMSUBADD132PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 48442  //    * VFMSUBADD132PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 48443  //    * VFMSUBADD132PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 48444  //    * VFMSUBADD132PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 48445  //
 48446  func (self *Program) VFMSUBADD132PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 48447      var p *Instruction
 48448      switch len(vv) {
 48449          case 0  : p = self.alloc("VFMSUBADD132PS", 3, Operands { v0, v1, v2 })
 48450          case 1  : p = self.alloc("VFMSUBADD132PS", 4, Operands { v0, v1, v2, vv[0] })
 48451          default : panic("instruction VFMSUBADD132PS takes 3 or 4 operands")
 48452      }
 48453      // VFMSUBADD132PS xmm, xmm, xmm
 48454      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 48455          self.require(ISA_FMA3)
 48456          p.domain = DomainFMA
 48457          p.add(0, func(m *_Encoding, v []interface{}) {
 48458              m.emit(0xc4)
 48459              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48460              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 48461              m.emit(0x97)
 48462              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48463          })
 48464      }
 48465      // VFMSUBADD132PS m128, xmm, xmm
 48466      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 48467          self.require(ISA_FMA3)
 48468          p.domain = DomainFMA
 48469          p.add(0, func(m *_Encoding, v []interface{}) {
 48470              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48471              m.emit(0x97)
 48472              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48473          })
 48474      }
 48475      // VFMSUBADD132PS ymm, ymm, ymm
 48476      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 48477          self.require(ISA_FMA3)
 48478          p.domain = DomainFMA
 48479          p.add(0, func(m *_Encoding, v []interface{}) {
 48480              m.emit(0xc4)
 48481              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48482              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48483              m.emit(0x97)
 48484              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48485          })
 48486      }
 48487      // VFMSUBADD132PS m256, ymm, ymm
 48488      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 48489          self.require(ISA_FMA3)
 48490          p.domain = DomainFMA
 48491          p.add(0, func(m *_Encoding, v []interface{}) {
 48492              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48493              m.emit(0x97)
 48494              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48495          })
 48496      }
 48497      // VFMSUBADD132PS m512/m32bcst, zmm, zmm{k}{z}
 48498      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 48499          self.require(ISA_AVX512F)
 48500          p.domain = DomainFMA
 48501          p.add(0, func(m *_Encoding, v []interface{}) {
 48502              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48503              m.emit(0x97)
 48504              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 48505          })
 48506      }
 48507      // VFMSUBADD132PS {er}, zmm, zmm, zmm{k}{z}
 48508      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 48509          self.require(ISA_AVX512F)
 48510          p.domain = DomainFMA
 48511          p.add(0, func(m *_Encoding, v []interface{}) {
 48512              m.emit(0x62)
 48513              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 48514              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 48515              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 48516              m.emit(0x97)
 48517              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 48518          })
 48519      }
 48520      // VFMSUBADD132PS zmm, zmm, zmm{k}{z}
 48521      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 48522          self.require(ISA_AVX512F)
 48523          p.domain = DomainFMA
 48524          p.add(0, func(m *_Encoding, v []interface{}) {
 48525              m.emit(0x62)
 48526              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48527              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48528              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 48529              m.emit(0x97)
 48530              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48531          })
 48532      }
 48533      // VFMSUBADD132PS m128/m32bcst, xmm, xmm{k}{z}
 48534      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48535          self.require(ISA_AVX512VL | ISA_AVX512F)
 48536          p.domain = DomainFMA
 48537          p.add(0, func(m *_Encoding, v []interface{}) {
 48538              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48539              m.emit(0x97)
 48540              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 48541          })
 48542      }
 48543      // VFMSUBADD132PS xmm, xmm, xmm{k}{z}
 48544      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48545          self.require(ISA_AVX512VL | ISA_AVX512F)
 48546          p.domain = DomainFMA
 48547          p.add(0, func(m *_Encoding, v []interface{}) {
 48548              m.emit(0x62)
 48549              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48550              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48551              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 48552              m.emit(0x97)
 48553              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48554          })
 48555      }
 48556      // VFMSUBADD132PS m256/m32bcst, ymm, ymm{k}{z}
 48557      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 48558          self.require(ISA_AVX512VL | ISA_AVX512F)
 48559          p.domain = DomainFMA
 48560          p.add(0, func(m *_Encoding, v []interface{}) {
 48561              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48562              m.emit(0x97)
 48563              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 48564          })
 48565      }
 48566      // VFMSUBADD132PS ymm, ymm, ymm{k}{z}
 48567      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 48568          self.require(ISA_AVX512VL | ISA_AVX512F)
 48569          p.domain = DomainFMA
 48570          p.add(0, func(m *_Encoding, v []interface{}) {
 48571              m.emit(0x62)
 48572              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48573              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48574              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 48575              m.emit(0x97)
 48576              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48577          })
 48578      }
 48579      if p.len == 0 {
 48580          panic("invalid operands for VFMSUBADD132PS")
 48581      }
 48582      return p
 48583  }
 48584  
 48585  // VFMSUBADD213PD performs "Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values".
 48586  //
 48587  // Mnemonic        : VFMSUBADD213PD
 48588  // Supported forms : (11 forms)
 48589  //
 48590  //    * VFMSUBADD213PD xmm, xmm, xmm                   [FMA3]
 48591  //    * VFMSUBADD213PD m128, xmm, xmm                  [FMA3]
 48592  //    * VFMSUBADD213PD ymm, ymm, ymm                   [FMA3]
 48593  //    * VFMSUBADD213PD m256, ymm, ymm                  [FMA3]
 48594  //    * VFMSUBADD213PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 48595  //    * VFMSUBADD213PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 48596  //    * VFMSUBADD213PD zmm, zmm, zmm{k}{z}             [AVX512F]
 48597  //    * VFMSUBADD213PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 48598  //    * VFMSUBADD213PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 48599  //    * VFMSUBADD213PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 48600  //    * VFMSUBADD213PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 48601  //
 48602  func (self *Program) VFMSUBADD213PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 48603      var p *Instruction
 48604      switch len(vv) {
 48605          case 0  : p = self.alloc("VFMSUBADD213PD", 3, Operands { v0, v1, v2 })
 48606          case 1  : p = self.alloc("VFMSUBADD213PD", 4, Operands { v0, v1, v2, vv[0] })
 48607          default : panic("instruction VFMSUBADD213PD takes 3 or 4 operands")
 48608      }
 48609      // VFMSUBADD213PD xmm, xmm, xmm
 48610      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 48611          self.require(ISA_FMA3)
 48612          p.domain = DomainFMA
 48613          p.add(0, func(m *_Encoding, v []interface{}) {
 48614              m.emit(0xc4)
 48615              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48616              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 48617              m.emit(0xa7)
 48618              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48619          })
 48620      }
 48621      // VFMSUBADD213PD m128, xmm, xmm
 48622      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 48623          self.require(ISA_FMA3)
 48624          p.domain = DomainFMA
 48625          p.add(0, func(m *_Encoding, v []interface{}) {
 48626              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48627              m.emit(0xa7)
 48628              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48629          })
 48630      }
 48631      // VFMSUBADD213PD ymm, ymm, ymm
 48632      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 48633          self.require(ISA_FMA3)
 48634          p.domain = DomainFMA
 48635          p.add(0, func(m *_Encoding, v []interface{}) {
 48636              m.emit(0xc4)
 48637              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48638              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 48639              m.emit(0xa7)
 48640              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48641          })
 48642      }
 48643      // VFMSUBADD213PD m256, ymm, ymm
 48644      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 48645          self.require(ISA_FMA3)
 48646          p.domain = DomainFMA
 48647          p.add(0, func(m *_Encoding, v []interface{}) {
 48648              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48649              m.emit(0xa7)
 48650              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48651          })
 48652      }
 48653      // VFMSUBADD213PD m512/m64bcst, zmm, zmm{k}{z}
 48654      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 48655          self.require(ISA_AVX512F)
 48656          p.domain = DomainFMA
 48657          p.add(0, func(m *_Encoding, v []interface{}) {
 48658              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48659              m.emit(0xa7)
 48660              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 48661          })
 48662      }
 48663      // VFMSUBADD213PD {er}, zmm, zmm, zmm{k}{z}
 48664      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 48665          self.require(ISA_AVX512F)
 48666          p.domain = DomainFMA
 48667          p.add(0, func(m *_Encoding, v []interface{}) {
 48668              m.emit(0x62)
 48669              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 48670              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 48671              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 48672              m.emit(0xa7)
 48673              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 48674          })
 48675      }
 48676      // VFMSUBADD213PD zmm, zmm, zmm{k}{z}
 48677      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 48678          self.require(ISA_AVX512F)
 48679          p.domain = DomainFMA
 48680          p.add(0, func(m *_Encoding, v []interface{}) {
 48681              m.emit(0x62)
 48682              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48683              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 48684              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 48685              m.emit(0xa7)
 48686              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48687          })
 48688      }
 48689      // VFMSUBADD213PD m128/m64bcst, xmm, xmm{k}{z}
 48690      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48691          self.require(ISA_AVX512VL | ISA_AVX512F)
 48692          p.domain = DomainFMA
 48693          p.add(0, func(m *_Encoding, v []interface{}) {
 48694              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48695              m.emit(0xa7)
 48696              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 48697          })
 48698      }
 48699      // VFMSUBADD213PD xmm, xmm, xmm{k}{z}
 48700      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48701          self.require(ISA_AVX512VL | ISA_AVX512F)
 48702          p.domain = DomainFMA
 48703          p.add(0, func(m *_Encoding, v []interface{}) {
 48704              m.emit(0x62)
 48705              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48706              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 48707              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 48708              m.emit(0xa7)
 48709              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48710          })
 48711      }
 48712      // VFMSUBADD213PD m256/m64bcst, ymm, ymm{k}{z}
 48713      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 48714          self.require(ISA_AVX512VL | ISA_AVX512F)
 48715          p.domain = DomainFMA
 48716          p.add(0, func(m *_Encoding, v []interface{}) {
 48717              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48718              m.emit(0xa7)
 48719              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 48720          })
 48721      }
 48722      // VFMSUBADD213PD ymm, ymm, ymm{k}{z}
 48723      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 48724          self.require(ISA_AVX512VL | ISA_AVX512F)
 48725          p.domain = DomainFMA
 48726          p.add(0, func(m *_Encoding, v []interface{}) {
 48727              m.emit(0x62)
 48728              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48729              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 48730              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 48731              m.emit(0xa7)
 48732              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48733          })
 48734      }
 48735      if p.len == 0 {
 48736          panic("invalid operands for VFMSUBADD213PD")
 48737      }
 48738      return p
 48739  }
 48740  
 48741  // VFMSUBADD213PS performs "Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values".
 48742  //
 48743  // Mnemonic        : VFMSUBADD213PS
 48744  // Supported forms : (11 forms)
 48745  //
 48746  //    * VFMSUBADD213PS xmm, xmm, xmm                   [FMA3]
 48747  //    * VFMSUBADD213PS m128, xmm, xmm                  [FMA3]
 48748  //    * VFMSUBADD213PS ymm, ymm, ymm                   [FMA3]
 48749  //    * VFMSUBADD213PS m256, ymm, ymm                  [FMA3]
 48750  //    * VFMSUBADD213PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 48751  //    * VFMSUBADD213PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 48752  //    * VFMSUBADD213PS zmm, zmm, zmm{k}{z}             [AVX512F]
 48753  //    * VFMSUBADD213PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 48754  //    * VFMSUBADD213PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 48755  //    * VFMSUBADD213PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 48756  //    * VFMSUBADD213PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 48757  //
 48758  func (self *Program) VFMSUBADD213PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 48759      var p *Instruction
 48760      switch len(vv) {
 48761          case 0  : p = self.alloc("VFMSUBADD213PS", 3, Operands { v0, v1, v2 })
 48762          case 1  : p = self.alloc("VFMSUBADD213PS", 4, Operands { v0, v1, v2, vv[0] })
 48763          default : panic("instruction VFMSUBADD213PS takes 3 or 4 operands")
 48764      }
 48765      // VFMSUBADD213PS xmm, xmm, xmm
 48766      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 48767          self.require(ISA_FMA3)
 48768          p.domain = DomainFMA
 48769          p.add(0, func(m *_Encoding, v []interface{}) {
 48770              m.emit(0xc4)
 48771              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48772              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 48773              m.emit(0xa7)
 48774              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48775          })
 48776      }
 48777      // VFMSUBADD213PS m128, xmm, xmm
 48778      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 48779          self.require(ISA_FMA3)
 48780          p.domain = DomainFMA
 48781          p.add(0, func(m *_Encoding, v []interface{}) {
 48782              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48783              m.emit(0xa7)
 48784              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48785          })
 48786      }
 48787      // VFMSUBADD213PS ymm, ymm, ymm
 48788      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 48789          self.require(ISA_FMA3)
 48790          p.domain = DomainFMA
 48791          p.add(0, func(m *_Encoding, v []interface{}) {
 48792              m.emit(0xc4)
 48793              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48794              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48795              m.emit(0xa7)
 48796              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48797          })
 48798      }
 48799      // VFMSUBADD213PS m256, ymm, ymm
 48800      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 48801          self.require(ISA_FMA3)
 48802          p.domain = DomainFMA
 48803          p.add(0, func(m *_Encoding, v []interface{}) {
 48804              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48805              m.emit(0xa7)
 48806              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48807          })
 48808      }
 48809      // VFMSUBADD213PS m512/m32bcst, zmm, zmm{k}{z}
 48810      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 48811          self.require(ISA_AVX512F)
 48812          p.domain = DomainFMA
 48813          p.add(0, func(m *_Encoding, v []interface{}) {
 48814              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48815              m.emit(0xa7)
 48816              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 48817          })
 48818      }
 48819      // VFMSUBADD213PS {er}, zmm, zmm, zmm{k}{z}
 48820      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 48821          self.require(ISA_AVX512F)
 48822          p.domain = DomainFMA
 48823          p.add(0, func(m *_Encoding, v []interface{}) {
 48824              m.emit(0x62)
 48825              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 48826              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 48827              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 48828              m.emit(0xa7)
 48829              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 48830          })
 48831      }
 48832      // VFMSUBADD213PS zmm, zmm, zmm{k}{z}
 48833      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 48834          self.require(ISA_AVX512F)
 48835          p.domain = DomainFMA
 48836          p.add(0, func(m *_Encoding, v []interface{}) {
 48837              m.emit(0x62)
 48838              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48839              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48840              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 48841              m.emit(0xa7)
 48842              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48843          })
 48844      }
 48845      // VFMSUBADD213PS m128/m32bcst, xmm, xmm{k}{z}
 48846      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48847          self.require(ISA_AVX512VL | ISA_AVX512F)
 48848          p.domain = DomainFMA
 48849          p.add(0, func(m *_Encoding, v []interface{}) {
 48850              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48851              m.emit(0xa7)
 48852              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 48853          })
 48854      }
 48855      // VFMSUBADD213PS xmm, xmm, xmm{k}{z}
 48856      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 48857          self.require(ISA_AVX512VL | ISA_AVX512F)
 48858          p.domain = DomainFMA
 48859          p.add(0, func(m *_Encoding, v []interface{}) {
 48860              m.emit(0x62)
 48861              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48862              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48863              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 48864              m.emit(0xa7)
 48865              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48866          })
 48867      }
 48868      // VFMSUBADD213PS m256/m32bcst, ymm, ymm{k}{z}
 48869      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 48870          self.require(ISA_AVX512VL | ISA_AVX512F)
 48871          p.domain = DomainFMA
 48872          p.add(0, func(m *_Encoding, v []interface{}) {
 48873              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48874              m.emit(0xa7)
 48875              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 48876          })
 48877      }
 48878      // VFMSUBADD213PS ymm, ymm, ymm{k}{z}
 48879      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 48880          self.require(ISA_AVX512VL | ISA_AVX512F)
 48881          p.domain = DomainFMA
 48882          p.add(0, func(m *_Encoding, v []interface{}) {
 48883              m.emit(0x62)
 48884              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48885              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 48886              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 48887              m.emit(0xa7)
 48888              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48889          })
 48890      }
 48891      if p.len == 0 {
 48892          panic("invalid operands for VFMSUBADD213PS")
 48893      }
 48894      return p
 48895  }
 48896  
 48897  // VFMSUBADD231PD performs "Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values".
 48898  //
 48899  // Mnemonic        : VFMSUBADD231PD
 48900  // Supported forms : (11 forms)
 48901  //
 48902  //    * VFMSUBADD231PD xmm, xmm, xmm                   [FMA3]
 48903  //    * VFMSUBADD231PD m128, xmm, xmm                  [FMA3]
 48904  //    * VFMSUBADD231PD ymm, ymm, ymm                   [FMA3]
 48905  //    * VFMSUBADD231PD m256, ymm, ymm                  [FMA3]
 48906  //    * VFMSUBADD231PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 48907  //    * VFMSUBADD231PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 48908  //    * VFMSUBADD231PD zmm, zmm, zmm{k}{z}             [AVX512F]
 48909  //    * VFMSUBADD231PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 48910  //    * VFMSUBADD231PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 48911  //    * VFMSUBADD231PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 48912  //    * VFMSUBADD231PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 48913  //
 48914  func (self *Program) VFMSUBADD231PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 48915      var p *Instruction
 48916      switch len(vv) {
 48917          case 0  : p = self.alloc("VFMSUBADD231PD", 3, Operands { v0, v1, v2 })
 48918          case 1  : p = self.alloc("VFMSUBADD231PD", 4, Operands { v0, v1, v2, vv[0] })
 48919          default : panic("instruction VFMSUBADD231PD takes 3 or 4 operands")
 48920      }
 48921      // VFMSUBADD231PD xmm, xmm, xmm
 48922      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 48923          self.require(ISA_FMA3)
 48924          p.domain = DomainFMA
 48925          p.add(0, func(m *_Encoding, v []interface{}) {
 48926              m.emit(0xc4)
 48927              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48928              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 48929              m.emit(0xb7)
 48930              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48931          })
 48932      }
 48933      // VFMSUBADD231PD m128, xmm, xmm
 48934      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 48935          self.require(ISA_FMA3)
 48936          p.domain = DomainFMA
 48937          p.add(0, func(m *_Encoding, v []interface{}) {
 48938              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48939              m.emit(0xb7)
 48940              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48941          })
 48942      }
 48943      // VFMSUBADD231PD ymm, ymm, ymm
 48944      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 48945          self.require(ISA_FMA3)
 48946          p.domain = DomainFMA
 48947          p.add(0, func(m *_Encoding, v []interface{}) {
 48948              m.emit(0xc4)
 48949              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 48950              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 48951              m.emit(0xb7)
 48952              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48953          })
 48954      }
 48955      // VFMSUBADD231PD m256, ymm, ymm
 48956      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 48957          self.require(ISA_FMA3)
 48958          p.domain = DomainFMA
 48959          p.add(0, func(m *_Encoding, v []interface{}) {
 48960              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 48961              m.emit(0xb7)
 48962              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 48963          })
 48964      }
 48965      // VFMSUBADD231PD m512/m64bcst, zmm, zmm{k}{z}
 48966      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 48967          self.require(ISA_AVX512F)
 48968          p.domain = DomainFMA
 48969          p.add(0, func(m *_Encoding, v []interface{}) {
 48970              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 48971              m.emit(0xb7)
 48972              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 48973          })
 48974      }
 48975      // VFMSUBADD231PD {er}, zmm, zmm, zmm{k}{z}
 48976      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 48977          self.require(ISA_AVX512F)
 48978          p.domain = DomainFMA
 48979          p.add(0, func(m *_Encoding, v []interface{}) {
 48980              m.emit(0x62)
 48981              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 48982              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 48983              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 48984              m.emit(0xb7)
 48985              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 48986          })
 48987      }
 48988      // VFMSUBADD231PD zmm, zmm, zmm{k}{z}
 48989      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 48990          self.require(ISA_AVX512F)
 48991          p.domain = DomainFMA
 48992          p.add(0, func(m *_Encoding, v []interface{}) {
 48993              m.emit(0x62)
 48994              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 48995              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 48996              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 48997              m.emit(0xb7)
 48998              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 48999          })
 49000      }
 49001      // VFMSUBADD231PD m128/m64bcst, xmm, xmm{k}{z}
 49002      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 49003          self.require(ISA_AVX512VL | ISA_AVX512F)
 49004          p.domain = DomainFMA
 49005          p.add(0, func(m *_Encoding, v []interface{}) {
 49006              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 49007              m.emit(0xb7)
 49008              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 49009          })
 49010      }
 49011      // VFMSUBADD231PD xmm, xmm, xmm{k}{z}
 49012      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 49013          self.require(ISA_AVX512VL | ISA_AVX512F)
 49014          p.domain = DomainFMA
 49015          p.add(0, func(m *_Encoding, v []interface{}) {
 49016              m.emit(0x62)
 49017              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 49018              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 49019              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 49020              m.emit(0xb7)
 49021              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49022          })
 49023      }
 49024      // VFMSUBADD231PD m256/m64bcst, ymm, ymm{k}{z}
 49025      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 49026          self.require(ISA_AVX512VL | ISA_AVX512F)
 49027          p.domain = DomainFMA
 49028          p.add(0, func(m *_Encoding, v []interface{}) {
 49029              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 49030              m.emit(0xb7)
 49031              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 49032          })
 49033      }
 49034      // VFMSUBADD231PD ymm, ymm, ymm{k}{z}
 49035      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 49036          self.require(ISA_AVX512VL | ISA_AVX512F)
 49037          p.domain = DomainFMA
 49038          p.add(0, func(m *_Encoding, v []interface{}) {
 49039              m.emit(0x62)
 49040              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 49041              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 49042              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 49043              m.emit(0xb7)
 49044              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49045          })
 49046      }
 49047      if p.len == 0 {
 49048          panic("invalid operands for VFMSUBADD231PD")
 49049      }
 49050      return p
 49051  }
 49052  
 49053  // VFMSUBADD231PS performs "Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values".
 49054  //
 49055  // Mnemonic        : VFMSUBADD231PS
 49056  // Supported forms : (11 forms)
 49057  //
 49058  //    * VFMSUBADD231PS xmm, xmm, xmm                   [FMA3]
 49059  //    * VFMSUBADD231PS m128, xmm, xmm                  [FMA3]
 49060  //    * VFMSUBADD231PS ymm, ymm, ymm                   [FMA3]
 49061  //    * VFMSUBADD231PS m256, ymm, ymm                  [FMA3]
 49062  //    * VFMSUBADD231PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 49063  //    * VFMSUBADD231PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 49064  //    * VFMSUBADD231PS zmm, zmm, zmm{k}{z}             [AVX512F]
 49065  //    * VFMSUBADD231PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 49066  //    * VFMSUBADD231PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 49067  //    * VFMSUBADD231PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 49068  //    * VFMSUBADD231PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 49069  //
 49070  func (self *Program) VFMSUBADD231PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 49071      var p *Instruction
 49072      switch len(vv) {
 49073          case 0  : p = self.alloc("VFMSUBADD231PS", 3, Operands { v0, v1, v2 })
 49074          case 1  : p = self.alloc("VFMSUBADD231PS", 4, Operands { v0, v1, v2, vv[0] })
 49075          default : panic("instruction VFMSUBADD231PS takes 3 or 4 operands")
 49076      }
 49077      // VFMSUBADD231PS xmm, xmm, xmm
 49078      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 49079          self.require(ISA_FMA3)
 49080          p.domain = DomainFMA
 49081          p.add(0, func(m *_Encoding, v []interface{}) {
 49082              m.emit(0xc4)
 49083              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 49084              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 49085              m.emit(0xb7)
 49086              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49087          })
 49088      }
 49089      // VFMSUBADD231PS m128, xmm, xmm
 49090      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 49091          self.require(ISA_FMA3)
 49092          p.domain = DomainFMA
 49093          p.add(0, func(m *_Encoding, v []interface{}) {
 49094              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 49095              m.emit(0xb7)
 49096              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 49097          })
 49098      }
 49099      // VFMSUBADD231PS ymm, ymm, ymm
 49100      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 49101          self.require(ISA_FMA3)
 49102          p.domain = DomainFMA
 49103          p.add(0, func(m *_Encoding, v []interface{}) {
 49104              m.emit(0xc4)
 49105              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 49106              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 49107              m.emit(0xb7)
 49108              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49109          })
 49110      }
 49111      // VFMSUBADD231PS m256, ymm, ymm
 49112      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 49113          self.require(ISA_FMA3)
 49114          p.domain = DomainFMA
 49115          p.add(0, func(m *_Encoding, v []interface{}) {
 49116              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 49117              m.emit(0xb7)
 49118              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 49119          })
 49120      }
 49121      // VFMSUBADD231PS m512/m32bcst, zmm, zmm{k}{z}
 49122      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 49123          self.require(ISA_AVX512F)
 49124          p.domain = DomainFMA
 49125          p.add(0, func(m *_Encoding, v []interface{}) {
 49126              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 49127              m.emit(0xb7)
 49128              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 49129          })
 49130      }
 49131      // VFMSUBADD231PS {er}, zmm, zmm, zmm{k}{z}
 49132      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 49133          self.require(ISA_AVX512F)
 49134          p.domain = DomainFMA
 49135          p.add(0, func(m *_Encoding, v []interface{}) {
 49136              m.emit(0x62)
 49137              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 49138              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 49139              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 49140              m.emit(0xb7)
 49141              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49142          })
 49143      }
 49144      // VFMSUBADD231PS zmm, zmm, zmm{k}{z}
 49145      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 49146          self.require(ISA_AVX512F)
 49147          p.domain = DomainFMA
 49148          p.add(0, func(m *_Encoding, v []interface{}) {
 49149              m.emit(0x62)
 49150              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 49151              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 49152              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 49153              m.emit(0xb7)
 49154              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49155          })
 49156      }
 49157      // VFMSUBADD231PS m128/m32bcst, xmm, xmm{k}{z}
 49158      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 49159          self.require(ISA_AVX512VL | ISA_AVX512F)
 49160          p.domain = DomainFMA
 49161          p.add(0, func(m *_Encoding, v []interface{}) {
 49162              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 49163              m.emit(0xb7)
 49164              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 49165          })
 49166      }
 49167      // VFMSUBADD231PS xmm, xmm, xmm{k}{z}
 49168      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 49169          self.require(ISA_AVX512VL | ISA_AVX512F)
 49170          p.domain = DomainFMA
 49171          p.add(0, func(m *_Encoding, v []interface{}) {
 49172              m.emit(0x62)
 49173              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 49174              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 49175              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 49176              m.emit(0xb7)
 49177              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49178          })
 49179      }
 49180      // VFMSUBADD231PS m256/m32bcst, ymm, ymm{k}{z}
 49181      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 49182          self.require(ISA_AVX512VL | ISA_AVX512F)
 49183          p.domain = DomainFMA
 49184          p.add(0, func(m *_Encoding, v []interface{}) {
 49185              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 49186              m.emit(0xb7)
 49187              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 49188          })
 49189      }
 49190      // VFMSUBADD231PS ymm, ymm, ymm{k}{z}
 49191      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 49192          self.require(ISA_AVX512VL | ISA_AVX512F)
 49193          p.domain = DomainFMA
 49194          p.add(0, func(m *_Encoding, v []interface{}) {
 49195              m.emit(0x62)
 49196              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 49197              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 49198              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 49199              m.emit(0xb7)
 49200              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49201          })
 49202      }
 49203      if p.len == 0 {
 49204          panic("invalid operands for VFMSUBADD231PS")
 49205      }
 49206      return p
 49207  }
 49208  
 49209  // VFMSUBADDPD performs "Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values".
 49210  //
 49211  // Mnemonic        : VFMSUBADDPD
 49212  // Supported forms : (6 forms)
 49213  //
 49214  //    * VFMSUBADDPD xmm, xmm, xmm, xmm     [FMA4]
 49215  //    * VFMSUBADDPD m128, xmm, xmm, xmm    [FMA4]
 49216  //    * VFMSUBADDPD xmm, m128, xmm, xmm    [FMA4]
 49217  //    * VFMSUBADDPD ymm, ymm, ymm, ymm     [FMA4]
 49218  //    * VFMSUBADDPD m256, ymm, ymm, ymm    [FMA4]
 49219  //    * VFMSUBADDPD ymm, m256, ymm, ymm    [FMA4]
 49220  //
 49221  func (self *Program) VFMSUBADDPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 49222      p := self.alloc("VFMSUBADDPD", 4, Operands { v0, v1, v2, v3 })
 49223      // VFMSUBADDPD xmm, xmm, xmm, xmm
 49224      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49225          self.require(ISA_FMA4)
 49226          p.domain = DomainFMA
 49227          p.add(0, func(m *_Encoding, v []interface{}) {
 49228              m.emit(0xc4)
 49229              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 49230              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 49231              m.emit(0x5f)
 49232              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 49233              m.emit(hlcode(v[1]) << 4)
 49234          })
 49235          p.add(0, func(m *_Encoding, v []interface{}) {
 49236              m.emit(0xc4)
 49237              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 49238              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 49239              m.emit(0x5f)
 49240              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49241              m.emit(hlcode(v[0]) << 4)
 49242          })
 49243      }
 49244      // VFMSUBADDPD m128, xmm, xmm, xmm
 49245      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49246          self.require(ISA_FMA4)
 49247          p.domain = DomainFMA
 49248          p.add(0, func(m *_Encoding, v []interface{}) {
 49249              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 49250              m.emit(0x5f)
 49251              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 49252              m.emit(hlcode(v[1]) << 4)
 49253          })
 49254      }
 49255      // VFMSUBADDPD xmm, m128, xmm, xmm
 49256      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 49257          self.require(ISA_FMA4)
 49258          p.domain = DomainFMA
 49259          p.add(0, func(m *_Encoding, v []interface{}) {
 49260              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 49261              m.emit(0x5f)
 49262              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 49263              m.emit(hlcode(v[0]) << 4)
 49264          })
 49265      }
 49266      // VFMSUBADDPD ymm, ymm, ymm, ymm
 49267      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 49268          self.require(ISA_FMA4)
 49269          p.domain = DomainFMA
 49270          p.add(0, func(m *_Encoding, v []interface{}) {
 49271              m.emit(0xc4)
 49272              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 49273              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 49274              m.emit(0x5f)
 49275              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 49276              m.emit(hlcode(v[1]) << 4)
 49277          })
 49278          p.add(0, func(m *_Encoding, v []interface{}) {
 49279              m.emit(0xc4)
 49280              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 49281              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 49282              m.emit(0x5f)
 49283              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49284              m.emit(hlcode(v[0]) << 4)
 49285          })
 49286      }
 49287      // VFMSUBADDPD m256, ymm, ymm, ymm
 49288      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 49289          self.require(ISA_FMA4)
 49290          p.domain = DomainFMA
 49291          p.add(0, func(m *_Encoding, v []interface{}) {
 49292              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 49293              m.emit(0x5f)
 49294              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 49295              m.emit(hlcode(v[1]) << 4)
 49296          })
 49297      }
 49298      // VFMSUBADDPD ymm, m256, ymm, ymm
 49299      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 49300          self.require(ISA_FMA4)
 49301          p.domain = DomainFMA
 49302          p.add(0, func(m *_Encoding, v []interface{}) {
 49303              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 49304              m.emit(0x5f)
 49305              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 49306              m.emit(hlcode(v[0]) << 4)
 49307          })
 49308      }
 49309      if p.len == 0 {
 49310          panic("invalid operands for VFMSUBADDPD")
 49311      }
 49312      return p
 49313  }
 49314  
 49315  // VFMSUBADDPS performs "Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values".
 49316  //
 49317  // Mnemonic        : VFMSUBADDPS
 49318  // Supported forms : (6 forms)
 49319  //
 49320  //    * VFMSUBADDPS xmm, xmm, xmm, xmm     [FMA4]
 49321  //    * VFMSUBADDPS m128, xmm, xmm, xmm    [FMA4]
 49322  //    * VFMSUBADDPS xmm, m128, xmm, xmm    [FMA4]
 49323  //    * VFMSUBADDPS ymm, ymm, ymm, ymm     [FMA4]
 49324  //    * VFMSUBADDPS m256, ymm, ymm, ymm    [FMA4]
 49325  //    * VFMSUBADDPS ymm, m256, ymm, ymm    [FMA4]
 49326  //
 49327  func (self *Program) VFMSUBADDPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 49328      p := self.alloc("VFMSUBADDPS", 4, Operands { v0, v1, v2, v3 })
 49329      // VFMSUBADDPS xmm, xmm, xmm, xmm
 49330      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49331          self.require(ISA_FMA4)
 49332          p.domain = DomainFMA
 49333          p.add(0, func(m *_Encoding, v []interface{}) {
 49334              m.emit(0xc4)
 49335              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 49336              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 49337              m.emit(0x5e)
 49338              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 49339              m.emit(hlcode(v[1]) << 4)
 49340          })
 49341          p.add(0, func(m *_Encoding, v []interface{}) {
 49342              m.emit(0xc4)
 49343              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 49344              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 49345              m.emit(0x5e)
 49346              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49347              m.emit(hlcode(v[0]) << 4)
 49348          })
 49349      }
 49350      // VFMSUBADDPS m128, xmm, xmm, xmm
 49351      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49352          self.require(ISA_FMA4)
 49353          p.domain = DomainFMA
 49354          p.add(0, func(m *_Encoding, v []interface{}) {
 49355              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 49356              m.emit(0x5e)
 49357              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 49358              m.emit(hlcode(v[1]) << 4)
 49359          })
 49360      }
 49361      // VFMSUBADDPS xmm, m128, xmm, xmm
 49362      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 49363          self.require(ISA_FMA4)
 49364          p.domain = DomainFMA
 49365          p.add(0, func(m *_Encoding, v []interface{}) {
 49366              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 49367              m.emit(0x5e)
 49368              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 49369              m.emit(hlcode(v[0]) << 4)
 49370          })
 49371      }
 49372      // VFMSUBADDPS ymm, ymm, ymm, ymm
 49373      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 49374          self.require(ISA_FMA4)
 49375          p.domain = DomainFMA
 49376          p.add(0, func(m *_Encoding, v []interface{}) {
 49377              m.emit(0xc4)
 49378              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 49379              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 49380              m.emit(0x5e)
 49381              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 49382              m.emit(hlcode(v[1]) << 4)
 49383          })
 49384          p.add(0, func(m *_Encoding, v []interface{}) {
 49385              m.emit(0xc4)
 49386              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 49387              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 49388              m.emit(0x5e)
 49389              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49390              m.emit(hlcode(v[0]) << 4)
 49391          })
 49392      }
 49393      // VFMSUBADDPS m256, ymm, ymm, ymm
 49394      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 49395          self.require(ISA_FMA4)
 49396          p.domain = DomainFMA
 49397          p.add(0, func(m *_Encoding, v []interface{}) {
 49398              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 49399              m.emit(0x5e)
 49400              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 49401              m.emit(hlcode(v[1]) << 4)
 49402          })
 49403      }
 49404      // VFMSUBADDPS ymm, m256, ymm, ymm
 49405      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 49406          self.require(ISA_FMA4)
 49407          p.domain = DomainFMA
 49408          p.add(0, func(m *_Encoding, v []interface{}) {
 49409              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 49410              m.emit(0x5e)
 49411              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 49412              m.emit(hlcode(v[0]) << 4)
 49413          })
 49414      }
 49415      if p.len == 0 {
 49416          panic("invalid operands for VFMSUBADDPS")
 49417      }
 49418      return p
 49419  }
 49420  
 49421  // VFMSUBPD performs "Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values".
 49422  //
 49423  // Mnemonic        : VFMSUBPD
 49424  // Supported forms : (6 forms)
 49425  //
 49426  //    * VFMSUBPD xmm, xmm, xmm, xmm     [FMA4]
 49427  //    * VFMSUBPD m128, xmm, xmm, xmm    [FMA4]
 49428  //    * VFMSUBPD xmm, m128, xmm, xmm    [FMA4]
 49429  //    * VFMSUBPD ymm, ymm, ymm, ymm     [FMA4]
 49430  //    * VFMSUBPD m256, ymm, ymm, ymm    [FMA4]
 49431  //    * VFMSUBPD ymm, m256, ymm, ymm    [FMA4]
 49432  //
 49433  func (self *Program) VFMSUBPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 49434      p := self.alloc("VFMSUBPD", 4, Operands { v0, v1, v2, v3 })
 49435      // VFMSUBPD xmm, xmm, xmm, xmm
 49436      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49437          self.require(ISA_FMA4)
 49438          p.domain = DomainFMA
 49439          p.add(0, func(m *_Encoding, v []interface{}) {
 49440              m.emit(0xc4)
 49441              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 49442              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 49443              m.emit(0x6d)
 49444              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 49445              m.emit(hlcode(v[1]) << 4)
 49446          })
 49447          p.add(0, func(m *_Encoding, v []interface{}) {
 49448              m.emit(0xc4)
 49449              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 49450              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 49451              m.emit(0x6d)
 49452              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49453              m.emit(hlcode(v[0]) << 4)
 49454          })
 49455      }
 49456      // VFMSUBPD m128, xmm, xmm, xmm
 49457      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49458          self.require(ISA_FMA4)
 49459          p.domain = DomainFMA
 49460          p.add(0, func(m *_Encoding, v []interface{}) {
 49461              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 49462              m.emit(0x6d)
 49463              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 49464              m.emit(hlcode(v[1]) << 4)
 49465          })
 49466      }
 49467      // VFMSUBPD xmm, m128, xmm, xmm
 49468      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 49469          self.require(ISA_FMA4)
 49470          p.domain = DomainFMA
 49471          p.add(0, func(m *_Encoding, v []interface{}) {
 49472              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 49473              m.emit(0x6d)
 49474              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 49475              m.emit(hlcode(v[0]) << 4)
 49476          })
 49477      }
 49478      // VFMSUBPD ymm, ymm, ymm, ymm
 49479      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 49480          self.require(ISA_FMA4)
 49481          p.domain = DomainFMA
 49482          p.add(0, func(m *_Encoding, v []interface{}) {
 49483              m.emit(0xc4)
 49484              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 49485              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 49486              m.emit(0x6d)
 49487              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 49488              m.emit(hlcode(v[1]) << 4)
 49489          })
 49490          p.add(0, func(m *_Encoding, v []interface{}) {
 49491              m.emit(0xc4)
 49492              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 49493              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 49494              m.emit(0x6d)
 49495              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49496              m.emit(hlcode(v[0]) << 4)
 49497          })
 49498      }
 49499      // VFMSUBPD m256, ymm, ymm, ymm
 49500      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 49501          self.require(ISA_FMA4)
 49502          p.domain = DomainFMA
 49503          p.add(0, func(m *_Encoding, v []interface{}) {
 49504              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 49505              m.emit(0x6d)
 49506              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 49507              m.emit(hlcode(v[1]) << 4)
 49508          })
 49509      }
 49510      // VFMSUBPD ymm, m256, ymm, ymm
 49511      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 49512          self.require(ISA_FMA4)
 49513          p.domain = DomainFMA
 49514          p.add(0, func(m *_Encoding, v []interface{}) {
 49515              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 49516              m.emit(0x6d)
 49517              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 49518              m.emit(hlcode(v[0]) << 4)
 49519          })
 49520      }
 49521      if p.len == 0 {
 49522          panic("invalid operands for VFMSUBPD")
 49523      }
 49524      return p
 49525  }
 49526  
 49527  // VFMSUBPS performs "Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values".
 49528  //
 49529  // Mnemonic        : VFMSUBPS
 49530  // Supported forms : (6 forms)
 49531  //
 49532  //    * VFMSUBPS xmm, xmm, xmm, xmm     [FMA4]
 49533  //    * VFMSUBPS m128, xmm, xmm, xmm    [FMA4]
 49534  //    * VFMSUBPS xmm, m128, xmm, xmm    [FMA4]
 49535  //    * VFMSUBPS ymm, ymm, ymm, ymm     [FMA4]
 49536  //    * VFMSUBPS m256, ymm, ymm, ymm    [FMA4]
 49537  //    * VFMSUBPS ymm, m256, ymm, ymm    [FMA4]
 49538  //
 49539  func (self *Program) VFMSUBPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 49540      p := self.alloc("VFMSUBPS", 4, Operands { v0, v1, v2, v3 })
 49541      // VFMSUBPS xmm, xmm, xmm, xmm
 49542      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49543          self.require(ISA_FMA4)
 49544          p.domain = DomainFMA
 49545          p.add(0, func(m *_Encoding, v []interface{}) {
 49546              m.emit(0xc4)
 49547              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 49548              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 49549              m.emit(0x6c)
 49550              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 49551              m.emit(hlcode(v[1]) << 4)
 49552          })
 49553          p.add(0, func(m *_Encoding, v []interface{}) {
 49554              m.emit(0xc4)
 49555              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 49556              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 49557              m.emit(0x6c)
 49558              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49559              m.emit(hlcode(v[0]) << 4)
 49560          })
 49561      }
 49562      // VFMSUBPS m128, xmm, xmm, xmm
 49563      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49564          self.require(ISA_FMA4)
 49565          p.domain = DomainFMA
 49566          p.add(0, func(m *_Encoding, v []interface{}) {
 49567              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 49568              m.emit(0x6c)
 49569              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 49570              m.emit(hlcode(v[1]) << 4)
 49571          })
 49572      }
 49573      // VFMSUBPS xmm, m128, xmm, xmm
 49574      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 49575          self.require(ISA_FMA4)
 49576          p.domain = DomainFMA
 49577          p.add(0, func(m *_Encoding, v []interface{}) {
 49578              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 49579              m.emit(0x6c)
 49580              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 49581              m.emit(hlcode(v[0]) << 4)
 49582          })
 49583      }
 49584      // VFMSUBPS ymm, ymm, ymm, ymm
 49585      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 49586          self.require(ISA_FMA4)
 49587          p.domain = DomainFMA
 49588          p.add(0, func(m *_Encoding, v []interface{}) {
 49589              m.emit(0xc4)
 49590              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 49591              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 49592              m.emit(0x6c)
 49593              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 49594              m.emit(hlcode(v[1]) << 4)
 49595          })
 49596          p.add(0, func(m *_Encoding, v []interface{}) {
 49597              m.emit(0xc4)
 49598              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 49599              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 49600              m.emit(0x6c)
 49601              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49602              m.emit(hlcode(v[0]) << 4)
 49603          })
 49604      }
 49605      // VFMSUBPS m256, ymm, ymm, ymm
 49606      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 49607          self.require(ISA_FMA4)
 49608          p.domain = DomainFMA
 49609          p.add(0, func(m *_Encoding, v []interface{}) {
 49610              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 49611              m.emit(0x6c)
 49612              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 49613              m.emit(hlcode(v[1]) << 4)
 49614          })
 49615      }
 49616      // VFMSUBPS ymm, m256, ymm, ymm
 49617      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 49618          self.require(ISA_FMA4)
 49619          p.domain = DomainFMA
 49620          p.add(0, func(m *_Encoding, v []interface{}) {
 49621              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 49622              m.emit(0x6c)
 49623              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 49624              m.emit(hlcode(v[0]) << 4)
 49625          })
 49626      }
 49627      if p.len == 0 {
 49628          panic("invalid operands for VFMSUBPS")
 49629      }
 49630      return p
 49631  }
 49632  
 49633  // VFMSUBSD performs "Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values".
 49634  //
 49635  // Mnemonic        : VFMSUBSD
 49636  // Supported forms : (3 forms)
 49637  //
 49638  //    * VFMSUBSD xmm, xmm, xmm, xmm    [FMA4]
 49639  //    * VFMSUBSD m64, xmm, xmm, xmm    [FMA4]
 49640  //    * VFMSUBSD xmm, m64, xmm, xmm    [FMA4]
 49641  //
 49642  func (self *Program) VFMSUBSD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 49643      p := self.alloc("VFMSUBSD", 4, Operands { v0, v1, v2, v3 })
 49644      // VFMSUBSD xmm, xmm, xmm, xmm
 49645      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49646          self.require(ISA_FMA4)
 49647          p.domain = DomainFMA
 49648          p.add(0, func(m *_Encoding, v []interface{}) {
 49649              m.emit(0xc4)
 49650              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 49651              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 49652              m.emit(0x6f)
 49653              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 49654              m.emit(hlcode(v[1]) << 4)
 49655          })
 49656          p.add(0, func(m *_Encoding, v []interface{}) {
 49657              m.emit(0xc4)
 49658              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 49659              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 49660              m.emit(0x6f)
 49661              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49662              m.emit(hlcode(v[0]) << 4)
 49663          })
 49664      }
 49665      // VFMSUBSD m64, xmm, xmm, xmm
 49666      if isM64(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49667          self.require(ISA_FMA4)
 49668          p.domain = DomainFMA
 49669          p.add(0, func(m *_Encoding, v []interface{}) {
 49670              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 49671              m.emit(0x6f)
 49672              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 49673              m.emit(hlcode(v[1]) << 4)
 49674          })
 49675      }
 49676      // VFMSUBSD xmm, m64, xmm, xmm
 49677      if isXMM(v0) && isM64(v1) && isXMM(v2) && isXMM(v3) {
 49678          self.require(ISA_FMA4)
 49679          p.domain = DomainFMA
 49680          p.add(0, func(m *_Encoding, v []interface{}) {
 49681              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 49682              m.emit(0x6f)
 49683              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 49684              m.emit(hlcode(v[0]) << 4)
 49685          })
 49686      }
 49687      if p.len == 0 {
 49688          panic("invalid operands for VFMSUBSD")
 49689      }
 49690      return p
 49691  }
 49692  
 49693  // VFMSUBSS performs "Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values".
 49694  //
 49695  // Mnemonic        : VFMSUBSS
 49696  // Supported forms : (3 forms)
 49697  //
 49698  //    * VFMSUBSS xmm, xmm, xmm, xmm    [FMA4]
 49699  //    * VFMSUBSS m32, xmm, xmm, xmm    [FMA4]
 49700  //    * VFMSUBSS xmm, m32, xmm, xmm    [FMA4]
 49701  //
 49702  func (self *Program) VFMSUBSS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 49703      p := self.alloc("VFMSUBSS", 4, Operands { v0, v1, v2, v3 })
 49704      // VFMSUBSS xmm, xmm, xmm, xmm
 49705      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49706          self.require(ISA_FMA4)
 49707          p.domain = DomainFMA
 49708          p.add(0, func(m *_Encoding, v []interface{}) {
 49709              m.emit(0xc4)
 49710              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 49711              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 49712              m.emit(0x6e)
 49713              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 49714              m.emit(hlcode(v[1]) << 4)
 49715          })
 49716          p.add(0, func(m *_Encoding, v []interface{}) {
 49717              m.emit(0xc4)
 49718              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 49719              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 49720              m.emit(0x6e)
 49721              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49722              m.emit(hlcode(v[0]) << 4)
 49723          })
 49724      }
 49725      // VFMSUBSS m32, xmm, xmm, xmm
 49726      if isM32(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 49727          self.require(ISA_FMA4)
 49728          p.domain = DomainFMA
 49729          p.add(0, func(m *_Encoding, v []interface{}) {
 49730              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 49731              m.emit(0x6e)
 49732              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 49733              m.emit(hlcode(v[1]) << 4)
 49734          })
 49735      }
 49736      // VFMSUBSS xmm, m32, xmm, xmm
 49737      if isXMM(v0) && isM32(v1) && isXMM(v2) && isXMM(v3) {
 49738          self.require(ISA_FMA4)
 49739          p.domain = DomainFMA
 49740          p.add(0, func(m *_Encoding, v []interface{}) {
 49741              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 49742              m.emit(0x6e)
 49743              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 49744              m.emit(hlcode(v[0]) << 4)
 49745          })
 49746      }
 49747      if p.len == 0 {
 49748          panic("invalid operands for VFMSUBSS")
 49749      }
 49750      return p
 49751  }
 49752  
 49753  // VFNMADD132PD performs "Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values".
 49754  //
 49755  // Mnemonic        : VFNMADD132PD
 49756  // Supported forms : (11 forms)
 49757  //
 49758  //    * VFNMADD132PD xmm, xmm, xmm                   [FMA3]
 49759  //    * VFNMADD132PD m128, xmm, xmm                  [FMA3]
 49760  //    * VFNMADD132PD ymm, ymm, ymm                   [FMA3]
 49761  //    * VFNMADD132PD m256, ymm, ymm                  [FMA3]
 49762  //    * VFNMADD132PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 49763  //    * VFNMADD132PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 49764  //    * VFNMADD132PD zmm, zmm, zmm{k}{z}             [AVX512F]
 49765  //    * VFNMADD132PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 49766  //    * VFNMADD132PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 49767  //    * VFNMADD132PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 49768  //    * VFNMADD132PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 49769  //
 49770  func (self *Program) VFNMADD132PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 49771      var p *Instruction
 49772      switch len(vv) {
 49773          case 0  : p = self.alloc("VFNMADD132PD", 3, Operands { v0, v1, v2 })
 49774          case 1  : p = self.alloc("VFNMADD132PD", 4, Operands { v0, v1, v2, vv[0] })
 49775          default : panic("instruction VFNMADD132PD takes 3 or 4 operands")
 49776      }
 49777      // VFNMADD132PD xmm, xmm, xmm
 49778      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 49779          self.require(ISA_FMA3)
 49780          p.domain = DomainFMA
 49781          p.add(0, func(m *_Encoding, v []interface{}) {
 49782              m.emit(0xc4)
 49783              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 49784              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 49785              m.emit(0x9c)
 49786              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49787          })
 49788      }
 49789      // VFNMADD132PD m128, xmm, xmm
 49790      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 49791          self.require(ISA_FMA3)
 49792          p.domain = DomainFMA
 49793          p.add(0, func(m *_Encoding, v []interface{}) {
 49794              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 49795              m.emit(0x9c)
 49796              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 49797          })
 49798      }
 49799      // VFNMADD132PD ymm, ymm, ymm
 49800      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 49801          self.require(ISA_FMA3)
 49802          p.domain = DomainFMA
 49803          p.add(0, func(m *_Encoding, v []interface{}) {
 49804              m.emit(0xc4)
 49805              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 49806              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 49807              m.emit(0x9c)
 49808              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49809          })
 49810      }
 49811      // VFNMADD132PD m256, ymm, ymm
 49812      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 49813          self.require(ISA_FMA3)
 49814          p.domain = DomainFMA
 49815          p.add(0, func(m *_Encoding, v []interface{}) {
 49816              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 49817              m.emit(0x9c)
 49818              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 49819          })
 49820      }
 49821      // VFNMADD132PD m512/m64bcst, zmm, zmm{k}{z}
 49822      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 49823          self.require(ISA_AVX512F)
 49824          p.domain = DomainFMA
 49825          p.add(0, func(m *_Encoding, v []interface{}) {
 49826              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 49827              m.emit(0x9c)
 49828              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 49829          })
 49830      }
 49831      // VFNMADD132PD {er}, zmm, zmm, zmm{k}{z}
 49832      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 49833          self.require(ISA_AVX512F)
 49834          p.domain = DomainFMA
 49835          p.add(0, func(m *_Encoding, v []interface{}) {
 49836              m.emit(0x62)
 49837              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 49838              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 49839              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 49840              m.emit(0x9c)
 49841              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49842          })
 49843      }
 49844      // VFNMADD132PD zmm, zmm, zmm{k}{z}
 49845      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 49846          self.require(ISA_AVX512F)
 49847          p.domain = DomainFMA
 49848          p.add(0, func(m *_Encoding, v []interface{}) {
 49849              m.emit(0x62)
 49850              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 49851              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 49852              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 49853              m.emit(0x9c)
 49854              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49855          })
 49856      }
 49857      // VFNMADD132PD m128/m64bcst, xmm, xmm{k}{z}
 49858      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 49859          self.require(ISA_AVX512VL | ISA_AVX512F)
 49860          p.domain = DomainFMA
 49861          p.add(0, func(m *_Encoding, v []interface{}) {
 49862              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 49863              m.emit(0x9c)
 49864              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 49865          })
 49866      }
 49867      // VFNMADD132PD xmm, xmm, xmm{k}{z}
 49868      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 49869          self.require(ISA_AVX512VL | ISA_AVX512F)
 49870          p.domain = DomainFMA
 49871          p.add(0, func(m *_Encoding, v []interface{}) {
 49872              m.emit(0x62)
 49873              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 49874              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 49875              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 49876              m.emit(0x9c)
 49877              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49878          })
 49879      }
 49880      // VFNMADD132PD m256/m64bcst, ymm, ymm{k}{z}
 49881      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 49882          self.require(ISA_AVX512VL | ISA_AVX512F)
 49883          p.domain = DomainFMA
 49884          p.add(0, func(m *_Encoding, v []interface{}) {
 49885              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 49886              m.emit(0x9c)
 49887              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 49888          })
 49889      }
 49890      // VFNMADD132PD ymm, ymm, ymm{k}{z}
 49891      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 49892          self.require(ISA_AVX512VL | ISA_AVX512F)
 49893          p.domain = DomainFMA
 49894          p.add(0, func(m *_Encoding, v []interface{}) {
 49895              m.emit(0x62)
 49896              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 49897              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 49898              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 49899              m.emit(0x9c)
 49900              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49901          })
 49902      }
 49903      if p.len == 0 {
 49904          panic("invalid operands for VFNMADD132PD")
 49905      }
 49906      return p
 49907  }
 49908  
 49909  // VFNMADD132PS performs "Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values".
 49910  //
 49911  // Mnemonic        : VFNMADD132PS
 49912  // Supported forms : (11 forms)
 49913  //
 49914  //    * VFNMADD132PS xmm, xmm, xmm                   [FMA3]
 49915  //    * VFNMADD132PS m128, xmm, xmm                  [FMA3]
 49916  //    * VFNMADD132PS ymm, ymm, ymm                   [FMA3]
 49917  //    * VFNMADD132PS m256, ymm, ymm                  [FMA3]
 49918  //    * VFNMADD132PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 49919  //    * VFNMADD132PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 49920  //    * VFNMADD132PS zmm, zmm, zmm{k}{z}             [AVX512F]
 49921  //    * VFNMADD132PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 49922  //    * VFNMADD132PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 49923  //    * VFNMADD132PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 49924  //    * VFNMADD132PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 49925  //
 49926  func (self *Program) VFNMADD132PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 49927      var p *Instruction
 49928      switch len(vv) {
 49929          case 0  : p = self.alloc("VFNMADD132PS", 3, Operands { v0, v1, v2 })
 49930          case 1  : p = self.alloc("VFNMADD132PS", 4, Operands { v0, v1, v2, vv[0] })
 49931          default : panic("instruction VFNMADD132PS takes 3 or 4 operands")
 49932      }
 49933      // VFNMADD132PS xmm, xmm, xmm
 49934      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 49935          self.require(ISA_FMA3)
 49936          p.domain = DomainFMA
 49937          p.add(0, func(m *_Encoding, v []interface{}) {
 49938              m.emit(0xc4)
 49939              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 49940              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 49941              m.emit(0x9c)
 49942              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49943          })
 49944      }
 49945      // VFNMADD132PS m128, xmm, xmm
 49946      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 49947          self.require(ISA_FMA3)
 49948          p.domain = DomainFMA
 49949          p.add(0, func(m *_Encoding, v []interface{}) {
 49950              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 49951              m.emit(0x9c)
 49952              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 49953          })
 49954      }
 49955      // VFNMADD132PS ymm, ymm, ymm
 49956      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 49957          self.require(ISA_FMA3)
 49958          p.domain = DomainFMA
 49959          p.add(0, func(m *_Encoding, v []interface{}) {
 49960              m.emit(0xc4)
 49961              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 49962              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 49963              m.emit(0x9c)
 49964              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 49965          })
 49966      }
 49967      // VFNMADD132PS m256, ymm, ymm
 49968      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 49969          self.require(ISA_FMA3)
 49970          p.domain = DomainFMA
 49971          p.add(0, func(m *_Encoding, v []interface{}) {
 49972              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 49973              m.emit(0x9c)
 49974              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 49975          })
 49976      }
 49977      // VFNMADD132PS m512/m32bcst, zmm, zmm{k}{z}
 49978      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 49979          self.require(ISA_AVX512F)
 49980          p.domain = DomainFMA
 49981          p.add(0, func(m *_Encoding, v []interface{}) {
 49982              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 49983              m.emit(0x9c)
 49984              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 49985          })
 49986      }
 49987      // VFNMADD132PS {er}, zmm, zmm, zmm{k}{z}
 49988      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 49989          self.require(ISA_AVX512F)
 49990          p.domain = DomainFMA
 49991          p.add(0, func(m *_Encoding, v []interface{}) {
 49992              m.emit(0x62)
 49993              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 49994              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 49995              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 49996              m.emit(0x9c)
 49997              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 49998          })
 49999      }
 50000      // VFNMADD132PS zmm, zmm, zmm{k}{z}
 50001      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 50002          self.require(ISA_AVX512F)
 50003          p.domain = DomainFMA
 50004          p.add(0, func(m *_Encoding, v []interface{}) {
 50005              m.emit(0x62)
 50006              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50007              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50008              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 50009              m.emit(0x9c)
 50010              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50011          })
 50012      }
 50013      // VFNMADD132PS m128/m32bcst, xmm, xmm{k}{z}
 50014      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50015          self.require(ISA_AVX512VL | ISA_AVX512F)
 50016          p.domain = DomainFMA
 50017          p.add(0, func(m *_Encoding, v []interface{}) {
 50018              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50019              m.emit(0x9c)
 50020              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 50021          })
 50022      }
 50023      // VFNMADD132PS xmm, xmm, xmm{k}{z}
 50024      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50025          self.require(ISA_AVX512VL | ISA_AVX512F)
 50026          p.domain = DomainFMA
 50027          p.add(0, func(m *_Encoding, v []interface{}) {
 50028              m.emit(0x62)
 50029              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50030              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50031              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 50032              m.emit(0x9c)
 50033              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50034          })
 50035      }
 50036      // VFNMADD132PS m256/m32bcst, ymm, ymm{k}{z}
 50037      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 50038          self.require(ISA_AVX512VL | ISA_AVX512F)
 50039          p.domain = DomainFMA
 50040          p.add(0, func(m *_Encoding, v []interface{}) {
 50041              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50042              m.emit(0x9c)
 50043              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 50044          })
 50045      }
 50046      // VFNMADD132PS ymm, ymm, ymm{k}{z}
 50047      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 50048          self.require(ISA_AVX512VL | ISA_AVX512F)
 50049          p.domain = DomainFMA
 50050          p.add(0, func(m *_Encoding, v []interface{}) {
 50051              m.emit(0x62)
 50052              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50053              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50054              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 50055              m.emit(0x9c)
 50056              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50057          })
 50058      }
 50059      if p.len == 0 {
 50060          panic("invalid operands for VFNMADD132PS")
 50061      }
 50062      return p
 50063  }
 50064  
 50065  // VFNMADD132SD performs "Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values".
 50066  //
 50067  // Mnemonic        : VFNMADD132SD
 50068  // Supported forms : (5 forms)
 50069  //
 50070  //    * VFNMADD132SD xmm, xmm, xmm                [FMA3]
 50071  //    * VFNMADD132SD m64, xmm, xmm                [FMA3]
 50072  //    * VFNMADD132SD m64, xmm, xmm{k}{z}          [AVX512F]
 50073  //    * VFNMADD132SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 50074  //    * VFNMADD132SD xmm, xmm, xmm{k}{z}          [AVX512F]
 50075  //
 50076  func (self *Program) VFNMADD132SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 50077      var p *Instruction
 50078      switch len(vv) {
 50079          case 0  : p = self.alloc("VFNMADD132SD", 3, Operands { v0, v1, v2 })
 50080          case 1  : p = self.alloc("VFNMADD132SD", 4, Operands { v0, v1, v2, vv[0] })
 50081          default : panic("instruction VFNMADD132SD takes 3 or 4 operands")
 50082      }
 50083      // VFNMADD132SD xmm, xmm, xmm
 50084      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 50085          self.require(ISA_FMA3)
 50086          p.domain = DomainFMA
 50087          p.add(0, func(m *_Encoding, v []interface{}) {
 50088              m.emit(0xc4)
 50089              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50090              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 50091              m.emit(0x9d)
 50092              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50093          })
 50094      }
 50095      // VFNMADD132SD m64, xmm, xmm
 50096      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 50097          self.require(ISA_FMA3)
 50098          p.domain = DomainFMA
 50099          p.add(0, func(m *_Encoding, v []interface{}) {
 50100              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50101              m.emit(0x9d)
 50102              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50103          })
 50104      }
 50105      // VFNMADD132SD m64, xmm, xmm{k}{z}
 50106      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50107          self.require(ISA_AVX512F)
 50108          p.domain = DomainFMA
 50109          p.add(0, func(m *_Encoding, v []interface{}) {
 50110              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 50111              m.emit(0x9d)
 50112              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 50113          })
 50114      }
 50115      // VFNMADD132SD {er}, xmm, xmm, xmm{k}{z}
 50116      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 50117          self.require(ISA_AVX512F)
 50118          p.domain = DomainFMA
 50119          p.add(0, func(m *_Encoding, v []interface{}) {
 50120              m.emit(0x62)
 50121              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 50122              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 50123              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 50124              m.emit(0x9d)
 50125              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 50126          })
 50127      }
 50128      // VFNMADD132SD xmm, xmm, xmm{k}{z}
 50129      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50130          self.require(ISA_AVX512F)
 50131          p.domain = DomainFMA
 50132          p.add(0, func(m *_Encoding, v []interface{}) {
 50133              m.emit(0x62)
 50134              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50135              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 50136              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 50137              m.emit(0x9d)
 50138              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50139          })
 50140      }
 50141      if p.len == 0 {
 50142          panic("invalid operands for VFNMADD132SD")
 50143      }
 50144      return p
 50145  }
 50146  
 50147  // VFNMADD132SS performs "Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values".
 50148  //
 50149  // Mnemonic        : VFNMADD132SS
 50150  // Supported forms : (5 forms)
 50151  //
 50152  //    * VFNMADD132SS xmm, xmm, xmm                [FMA3]
 50153  //    * VFNMADD132SS m32, xmm, xmm                [FMA3]
 50154  //    * VFNMADD132SS m32, xmm, xmm{k}{z}          [AVX512F]
 50155  //    * VFNMADD132SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 50156  //    * VFNMADD132SS xmm, xmm, xmm{k}{z}          [AVX512F]
 50157  //
 50158  func (self *Program) VFNMADD132SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 50159      var p *Instruction
 50160      switch len(vv) {
 50161          case 0  : p = self.alloc("VFNMADD132SS", 3, Operands { v0, v1, v2 })
 50162          case 1  : p = self.alloc("VFNMADD132SS", 4, Operands { v0, v1, v2, vv[0] })
 50163          default : panic("instruction VFNMADD132SS takes 3 or 4 operands")
 50164      }
 50165      // VFNMADD132SS xmm, xmm, xmm
 50166      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 50167          self.require(ISA_FMA3)
 50168          p.domain = DomainFMA
 50169          p.add(0, func(m *_Encoding, v []interface{}) {
 50170              m.emit(0xc4)
 50171              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50172              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 50173              m.emit(0x9d)
 50174              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50175          })
 50176      }
 50177      // VFNMADD132SS m32, xmm, xmm
 50178      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 50179          self.require(ISA_FMA3)
 50180          p.domain = DomainFMA
 50181          p.add(0, func(m *_Encoding, v []interface{}) {
 50182              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50183              m.emit(0x9d)
 50184              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50185          })
 50186      }
 50187      // VFNMADD132SS m32, xmm, xmm{k}{z}
 50188      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50189          self.require(ISA_AVX512F)
 50190          p.domain = DomainFMA
 50191          p.add(0, func(m *_Encoding, v []interface{}) {
 50192              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 50193              m.emit(0x9d)
 50194              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 50195          })
 50196      }
 50197      // VFNMADD132SS {er}, xmm, xmm, xmm{k}{z}
 50198      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 50199          self.require(ISA_AVX512F)
 50200          p.domain = DomainFMA
 50201          p.add(0, func(m *_Encoding, v []interface{}) {
 50202              m.emit(0x62)
 50203              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 50204              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 50205              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 50206              m.emit(0x9d)
 50207              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 50208          })
 50209      }
 50210      // VFNMADD132SS xmm, xmm, xmm{k}{z}
 50211      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50212          self.require(ISA_AVX512F)
 50213          p.domain = DomainFMA
 50214          p.add(0, func(m *_Encoding, v []interface{}) {
 50215              m.emit(0x62)
 50216              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50217              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50218              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 50219              m.emit(0x9d)
 50220              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50221          })
 50222      }
 50223      if p.len == 0 {
 50224          panic("invalid operands for VFNMADD132SS")
 50225      }
 50226      return p
 50227  }
 50228  
 50229  // VFNMADD213PD performs "Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values".
 50230  //
 50231  // Mnemonic        : VFNMADD213PD
 50232  // Supported forms : (11 forms)
 50233  //
 50234  //    * VFNMADD213PD xmm, xmm, xmm                   [FMA3]
 50235  //    * VFNMADD213PD m128, xmm, xmm                  [FMA3]
 50236  //    * VFNMADD213PD ymm, ymm, ymm                   [FMA3]
 50237  //    * VFNMADD213PD m256, ymm, ymm                  [FMA3]
 50238  //    * VFNMADD213PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 50239  //    * VFNMADD213PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 50240  //    * VFNMADD213PD zmm, zmm, zmm{k}{z}             [AVX512F]
 50241  //    * VFNMADD213PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 50242  //    * VFNMADD213PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 50243  //    * VFNMADD213PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 50244  //    * VFNMADD213PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 50245  //
 50246  func (self *Program) VFNMADD213PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 50247      var p *Instruction
 50248      switch len(vv) {
 50249          case 0  : p = self.alloc("VFNMADD213PD", 3, Operands { v0, v1, v2 })
 50250          case 1  : p = self.alloc("VFNMADD213PD", 4, Operands { v0, v1, v2, vv[0] })
 50251          default : panic("instruction VFNMADD213PD takes 3 or 4 operands")
 50252      }
 50253      // VFNMADD213PD xmm, xmm, xmm
 50254      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 50255          self.require(ISA_FMA3)
 50256          p.domain = DomainFMA
 50257          p.add(0, func(m *_Encoding, v []interface{}) {
 50258              m.emit(0xc4)
 50259              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50260              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 50261              m.emit(0xac)
 50262              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50263          })
 50264      }
 50265      // VFNMADD213PD m128, xmm, xmm
 50266      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 50267          self.require(ISA_FMA3)
 50268          p.domain = DomainFMA
 50269          p.add(0, func(m *_Encoding, v []interface{}) {
 50270              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50271              m.emit(0xac)
 50272              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50273          })
 50274      }
 50275      // VFNMADD213PD ymm, ymm, ymm
 50276      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 50277          self.require(ISA_FMA3)
 50278          p.domain = DomainFMA
 50279          p.add(0, func(m *_Encoding, v []interface{}) {
 50280              m.emit(0xc4)
 50281              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50282              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 50283              m.emit(0xac)
 50284              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50285          })
 50286      }
 50287      // VFNMADD213PD m256, ymm, ymm
 50288      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 50289          self.require(ISA_FMA3)
 50290          p.domain = DomainFMA
 50291          p.add(0, func(m *_Encoding, v []interface{}) {
 50292              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50293              m.emit(0xac)
 50294              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50295          })
 50296      }
 50297      // VFNMADD213PD m512/m64bcst, zmm, zmm{k}{z}
 50298      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 50299          self.require(ISA_AVX512F)
 50300          p.domain = DomainFMA
 50301          p.add(0, func(m *_Encoding, v []interface{}) {
 50302              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50303              m.emit(0xac)
 50304              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 50305          })
 50306      }
 50307      // VFNMADD213PD {er}, zmm, zmm, zmm{k}{z}
 50308      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 50309          self.require(ISA_AVX512F)
 50310          p.domain = DomainFMA
 50311          p.add(0, func(m *_Encoding, v []interface{}) {
 50312              m.emit(0x62)
 50313              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 50314              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 50315              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 50316              m.emit(0xac)
 50317              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 50318          })
 50319      }
 50320      // VFNMADD213PD zmm, zmm, zmm{k}{z}
 50321      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 50322          self.require(ISA_AVX512F)
 50323          p.domain = DomainFMA
 50324          p.add(0, func(m *_Encoding, v []interface{}) {
 50325              m.emit(0x62)
 50326              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50327              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 50328              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 50329              m.emit(0xac)
 50330              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50331          })
 50332      }
 50333      // VFNMADD213PD m128/m64bcst, xmm, xmm{k}{z}
 50334      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50335          self.require(ISA_AVX512VL | ISA_AVX512F)
 50336          p.domain = DomainFMA
 50337          p.add(0, func(m *_Encoding, v []interface{}) {
 50338              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50339              m.emit(0xac)
 50340              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 50341          })
 50342      }
 50343      // VFNMADD213PD xmm, xmm, xmm{k}{z}
 50344      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50345          self.require(ISA_AVX512VL | ISA_AVX512F)
 50346          p.domain = DomainFMA
 50347          p.add(0, func(m *_Encoding, v []interface{}) {
 50348              m.emit(0x62)
 50349              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50350              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 50351              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 50352              m.emit(0xac)
 50353              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50354          })
 50355      }
 50356      // VFNMADD213PD m256/m64bcst, ymm, ymm{k}{z}
 50357      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 50358          self.require(ISA_AVX512VL | ISA_AVX512F)
 50359          p.domain = DomainFMA
 50360          p.add(0, func(m *_Encoding, v []interface{}) {
 50361              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50362              m.emit(0xac)
 50363              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 50364          })
 50365      }
 50366      // VFNMADD213PD ymm, ymm, ymm{k}{z}
 50367      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 50368          self.require(ISA_AVX512VL | ISA_AVX512F)
 50369          p.domain = DomainFMA
 50370          p.add(0, func(m *_Encoding, v []interface{}) {
 50371              m.emit(0x62)
 50372              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50373              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 50374              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 50375              m.emit(0xac)
 50376              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50377          })
 50378      }
 50379      if p.len == 0 {
 50380          panic("invalid operands for VFNMADD213PD")
 50381      }
 50382      return p
 50383  }
 50384  
 50385  // VFNMADD213PS performs "Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values".
 50386  //
 50387  // Mnemonic        : VFNMADD213PS
 50388  // Supported forms : (11 forms)
 50389  //
 50390  //    * VFNMADD213PS xmm, xmm, xmm                   [FMA3]
 50391  //    * VFNMADD213PS m128, xmm, xmm                  [FMA3]
 50392  //    * VFNMADD213PS ymm, ymm, ymm                   [FMA3]
 50393  //    * VFNMADD213PS m256, ymm, ymm                  [FMA3]
 50394  //    * VFNMADD213PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 50395  //    * VFNMADD213PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 50396  //    * VFNMADD213PS zmm, zmm, zmm{k}{z}             [AVX512F]
 50397  //    * VFNMADD213PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 50398  //    * VFNMADD213PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 50399  //    * VFNMADD213PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 50400  //    * VFNMADD213PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 50401  //
 50402  func (self *Program) VFNMADD213PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 50403      var p *Instruction
 50404      switch len(vv) {
 50405          case 0  : p = self.alloc("VFNMADD213PS", 3, Operands { v0, v1, v2 })
 50406          case 1  : p = self.alloc("VFNMADD213PS", 4, Operands { v0, v1, v2, vv[0] })
 50407          default : panic("instruction VFNMADD213PS takes 3 or 4 operands")
 50408      }
 50409      // VFNMADD213PS xmm, xmm, xmm
 50410      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 50411          self.require(ISA_FMA3)
 50412          p.domain = DomainFMA
 50413          p.add(0, func(m *_Encoding, v []interface{}) {
 50414              m.emit(0xc4)
 50415              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50416              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 50417              m.emit(0xac)
 50418              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50419          })
 50420      }
 50421      // VFNMADD213PS m128, xmm, xmm
 50422      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 50423          self.require(ISA_FMA3)
 50424          p.domain = DomainFMA
 50425          p.add(0, func(m *_Encoding, v []interface{}) {
 50426              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50427              m.emit(0xac)
 50428              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50429          })
 50430      }
 50431      // VFNMADD213PS ymm, ymm, ymm
 50432      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 50433          self.require(ISA_FMA3)
 50434          p.domain = DomainFMA
 50435          p.add(0, func(m *_Encoding, v []interface{}) {
 50436              m.emit(0xc4)
 50437              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50438              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50439              m.emit(0xac)
 50440              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50441          })
 50442      }
 50443      // VFNMADD213PS m256, ymm, ymm
 50444      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 50445          self.require(ISA_FMA3)
 50446          p.domain = DomainFMA
 50447          p.add(0, func(m *_Encoding, v []interface{}) {
 50448              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50449              m.emit(0xac)
 50450              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50451          })
 50452      }
 50453      // VFNMADD213PS m512/m32bcst, zmm, zmm{k}{z}
 50454      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 50455          self.require(ISA_AVX512F)
 50456          p.domain = DomainFMA
 50457          p.add(0, func(m *_Encoding, v []interface{}) {
 50458              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50459              m.emit(0xac)
 50460              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 50461          })
 50462      }
 50463      // VFNMADD213PS {er}, zmm, zmm, zmm{k}{z}
 50464      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 50465          self.require(ISA_AVX512F)
 50466          p.domain = DomainFMA
 50467          p.add(0, func(m *_Encoding, v []interface{}) {
 50468              m.emit(0x62)
 50469              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 50470              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 50471              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 50472              m.emit(0xac)
 50473              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 50474          })
 50475      }
 50476      // VFNMADD213PS zmm, zmm, zmm{k}{z}
 50477      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 50478          self.require(ISA_AVX512F)
 50479          p.domain = DomainFMA
 50480          p.add(0, func(m *_Encoding, v []interface{}) {
 50481              m.emit(0x62)
 50482              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50483              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50484              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 50485              m.emit(0xac)
 50486              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50487          })
 50488      }
 50489      // VFNMADD213PS m128/m32bcst, xmm, xmm{k}{z}
 50490      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50491          self.require(ISA_AVX512VL | ISA_AVX512F)
 50492          p.domain = DomainFMA
 50493          p.add(0, func(m *_Encoding, v []interface{}) {
 50494              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50495              m.emit(0xac)
 50496              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 50497          })
 50498      }
 50499      // VFNMADD213PS xmm, xmm, xmm{k}{z}
 50500      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50501          self.require(ISA_AVX512VL | ISA_AVX512F)
 50502          p.domain = DomainFMA
 50503          p.add(0, func(m *_Encoding, v []interface{}) {
 50504              m.emit(0x62)
 50505              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50506              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50507              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 50508              m.emit(0xac)
 50509              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50510          })
 50511      }
 50512      // VFNMADD213PS m256/m32bcst, ymm, ymm{k}{z}
 50513      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 50514          self.require(ISA_AVX512VL | ISA_AVX512F)
 50515          p.domain = DomainFMA
 50516          p.add(0, func(m *_Encoding, v []interface{}) {
 50517              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50518              m.emit(0xac)
 50519              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 50520          })
 50521      }
 50522      // VFNMADD213PS ymm, ymm, ymm{k}{z}
 50523      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 50524          self.require(ISA_AVX512VL | ISA_AVX512F)
 50525          p.domain = DomainFMA
 50526          p.add(0, func(m *_Encoding, v []interface{}) {
 50527              m.emit(0x62)
 50528              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50529              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50530              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 50531              m.emit(0xac)
 50532              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50533          })
 50534      }
 50535      if p.len == 0 {
 50536          panic("invalid operands for VFNMADD213PS")
 50537      }
 50538      return p
 50539  }
 50540  
 50541  // VFNMADD213SD performs "Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values".
 50542  //
 50543  // Mnemonic        : VFNMADD213SD
 50544  // Supported forms : (5 forms)
 50545  //
 50546  //    * VFNMADD213SD xmm, xmm, xmm                [FMA3]
 50547  //    * VFNMADD213SD m64, xmm, xmm                [FMA3]
 50548  //    * VFNMADD213SD m64, xmm, xmm{k}{z}          [AVX512F]
 50549  //    * VFNMADD213SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 50550  //    * VFNMADD213SD xmm, xmm, xmm{k}{z}          [AVX512F]
 50551  //
 50552  func (self *Program) VFNMADD213SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 50553      var p *Instruction
 50554      switch len(vv) {
 50555          case 0  : p = self.alloc("VFNMADD213SD", 3, Operands { v0, v1, v2 })
 50556          case 1  : p = self.alloc("VFNMADD213SD", 4, Operands { v0, v1, v2, vv[0] })
 50557          default : panic("instruction VFNMADD213SD takes 3 or 4 operands")
 50558      }
 50559      // VFNMADD213SD xmm, xmm, xmm
 50560      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 50561          self.require(ISA_FMA3)
 50562          p.domain = DomainFMA
 50563          p.add(0, func(m *_Encoding, v []interface{}) {
 50564              m.emit(0xc4)
 50565              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50566              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 50567              m.emit(0xad)
 50568              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50569          })
 50570      }
 50571      // VFNMADD213SD m64, xmm, xmm
 50572      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 50573          self.require(ISA_FMA3)
 50574          p.domain = DomainFMA
 50575          p.add(0, func(m *_Encoding, v []interface{}) {
 50576              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50577              m.emit(0xad)
 50578              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50579          })
 50580      }
 50581      // VFNMADD213SD m64, xmm, xmm{k}{z}
 50582      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50583          self.require(ISA_AVX512F)
 50584          p.domain = DomainFMA
 50585          p.add(0, func(m *_Encoding, v []interface{}) {
 50586              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 50587              m.emit(0xad)
 50588              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 50589          })
 50590      }
 50591      // VFNMADD213SD {er}, xmm, xmm, xmm{k}{z}
 50592      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 50593          self.require(ISA_AVX512F)
 50594          p.domain = DomainFMA
 50595          p.add(0, func(m *_Encoding, v []interface{}) {
 50596              m.emit(0x62)
 50597              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 50598              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 50599              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 50600              m.emit(0xad)
 50601              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 50602          })
 50603      }
 50604      // VFNMADD213SD xmm, xmm, xmm{k}{z}
 50605      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50606          self.require(ISA_AVX512F)
 50607          p.domain = DomainFMA
 50608          p.add(0, func(m *_Encoding, v []interface{}) {
 50609              m.emit(0x62)
 50610              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50611              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 50612              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 50613              m.emit(0xad)
 50614              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50615          })
 50616      }
 50617      if p.len == 0 {
 50618          panic("invalid operands for VFNMADD213SD")
 50619      }
 50620      return p
 50621  }
 50622  
 50623  // VFNMADD213SS performs "Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values".
 50624  //
 50625  // Mnemonic        : VFNMADD213SS
 50626  // Supported forms : (5 forms)
 50627  //
 50628  //    * VFNMADD213SS xmm, xmm, xmm                [FMA3]
 50629  //    * VFNMADD213SS m32, xmm, xmm                [FMA3]
 50630  //    * VFNMADD213SS m32, xmm, xmm{k}{z}          [AVX512F]
 50631  //    * VFNMADD213SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 50632  //    * VFNMADD213SS xmm, xmm, xmm{k}{z}          [AVX512F]
 50633  //
 50634  func (self *Program) VFNMADD213SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 50635      var p *Instruction
 50636      switch len(vv) {
 50637          case 0  : p = self.alloc("VFNMADD213SS", 3, Operands { v0, v1, v2 })
 50638          case 1  : p = self.alloc("VFNMADD213SS", 4, Operands { v0, v1, v2, vv[0] })
 50639          default : panic("instruction VFNMADD213SS takes 3 or 4 operands")
 50640      }
 50641      // VFNMADD213SS xmm, xmm, xmm
 50642      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 50643          self.require(ISA_FMA3)
 50644          p.domain = DomainFMA
 50645          p.add(0, func(m *_Encoding, v []interface{}) {
 50646              m.emit(0xc4)
 50647              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50648              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 50649              m.emit(0xad)
 50650              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50651          })
 50652      }
 50653      // VFNMADD213SS m32, xmm, xmm
 50654      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 50655          self.require(ISA_FMA3)
 50656          p.domain = DomainFMA
 50657          p.add(0, func(m *_Encoding, v []interface{}) {
 50658              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50659              m.emit(0xad)
 50660              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50661          })
 50662      }
 50663      // VFNMADD213SS m32, xmm, xmm{k}{z}
 50664      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50665          self.require(ISA_AVX512F)
 50666          p.domain = DomainFMA
 50667          p.add(0, func(m *_Encoding, v []interface{}) {
 50668              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 50669              m.emit(0xad)
 50670              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 50671          })
 50672      }
 50673      // VFNMADD213SS {er}, xmm, xmm, xmm{k}{z}
 50674      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 50675          self.require(ISA_AVX512F)
 50676          p.domain = DomainFMA
 50677          p.add(0, func(m *_Encoding, v []interface{}) {
 50678              m.emit(0x62)
 50679              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 50680              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 50681              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 50682              m.emit(0xad)
 50683              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 50684          })
 50685      }
 50686      // VFNMADD213SS xmm, xmm, xmm{k}{z}
 50687      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50688          self.require(ISA_AVX512F)
 50689          p.domain = DomainFMA
 50690          p.add(0, func(m *_Encoding, v []interface{}) {
 50691              m.emit(0x62)
 50692              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50693              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50694              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 50695              m.emit(0xad)
 50696              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50697          })
 50698      }
 50699      if p.len == 0 {
 50700          panic("invalid operands for VFNMADD213SS")
 50701      }
 50702      return p
 50703  }
 50704  
 50705  // VFNMADD231PD performs "Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values".
 50706  //
 50707  // Mnemonic        : VFNMADD231PD
 50708  // Supported forms : (11 forms)
 50709  //
 50710  //    * VFNMADD231PD xmm, xmm, xmm                   [FMA3]
 50711  //    * VFNMADD231PD m128, xmm, xmm                  [FMA3]
 50712  //    * VFNMADD231PD ymm, ymm, ymm                   [FMA3]
 50713  //    * VFNMADD231PD m256, ymm, ymm                  [FMA3]
 50714  //    * VFNMADD231PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 50715  //    * VFNMADD231PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 50716  //    * VFNMADD231PD zmm, zmm, zmm{k}{z}             [AVX512F]
 50717  //    * VFNMADD231PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 50718  //    * VFNMADD231PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 50719  //    * VFNMADD231PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 50720  //    * VFNMADD231PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 50721  //
 50722  func (self *Program) VFNMADD231PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 50723      var p *Instruction
 50724      switch len(vv) {
 50725          case 0  : p = self.alloc("VFNMADD231PD", 3, Operands { v0, v1, v2 })
 50726          case 1  : p = self.alloc("VFNMADD231PD", 4, Operands { v0, v1, v2, vv[0] })
 50727          default : panic("instruction VFNMADD231PD takes 3 or 4 operands")
 50728      }
 50729      // VFNMADD231PD xmm, xmm, xmm
 50730      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 50731          self.require(ISA_FMA3)
 50732          p.domain = DomainFMA
 50733          p.add(0, func(m *_Encoding, v []interface{}) {
 50734              m.emit(0xc4)
 50735              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50736              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 50737              m.emit(0xbc)
 50738              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50739          })
 50740      }
 50741      // VFNMADD231PD m128, xmm, xmm
 50742      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 50743          self.require(ISA_FMA3)
 50744          p.domain = DomainFMA
 50745          p.add(0, func(m *_Encoding, v []interface{}) {
 50746              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50747              m.emit(0xbc)
 50748              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50749          })
 50750      }
 50751      // VFNMADD231PD ymm, ymm, ymm
 50752      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 50753          self.require(ISA_FMA3)
 50754          p.domain = DomainFMA
 50755          p.add(0, func(m *_Encoding, v []interface{}) {
 50756              m.emit(0xc4)
 50757              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50758              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 50759              m.emit(0xbc)
 50760              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50761          })
 50762      }
 50763      // VFNMADD231PD m256, ymm, ymm
 50764      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 50765          self.require(ISA_FMA3)
 50766          p.domain = DomainFMA
 50767          p.add(0, func(m *_Encoding, v []interface{}) {
 50768              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50769              m.emit(0xbc)
 50770              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50771          })
 50772      }
 50773      // VFNMADD231PD m512/m64bcst, zmm, zmm{k}{z}
 50774      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 50775          self.require(ISA_AVX512F)
 50776          p.domain = DomainFMA
 50777          p.add(0, func(m *_Encoding, v []interface{}) {
 50778              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50779              m.emit(0xbc)
 50780              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 50781          })
 50782      }
 50783      // VFNMADD231PD {er}, zmm, zmm, zmm{k}{z}
 50784      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 50785          self.require(ISA_AVX512F)
 50786          p.domain = DomainFMA
 50787          p.add(0, func(m *_Encoding, v []interface{}) {
 50788              m.emit(0x62)
 50789              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 50790              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 50791              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 50792              m.emit(0xbc)
 50793              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 50794          })
 50795      }
 50796      // VFNMADD231PD zmm, zmm, zmm{k}{z}
 50797      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 50798          self.require(ISA_AVX512F)
 50799          p.domain = DomainFMA
 50800          p.add(0, func(m *_Encoding, v []interface{}) {
 50801              m.emit(0x62)
 50802              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50803              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 50804              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 50805              m.emit(0xbc)
 50806              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50807          })
 50808      }
 50809      // VFNMADD231PD m128/m64bcst, xmm, xmm{k}{z}
 50810      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50811          self.require(ISA_AVX512VL | ISA_AVX512F)
 50812          p.domain = DomainFMA
 50813          p.add(0, func(m *_Encoding, v []interface{}) {
 50814              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50815              m.emit(0xbc)
 50816              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 50817          })
 50818      }
 50819      // VFNMADD231PD xmm, xmm, xmm{k}{z}
 50820      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50821          self.require(ISA_AVX512VL | ISA_AVX512F)
 50822          p.domain = DomainFMA
 50823          p.add(0, func(m *_Encoding, v []interface{}) {
 50824              m.emit(0x62)
 50825              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50826              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 50827              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 50828              m.emit(0xbc)
 50829              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50830          })
 50831      }
 50832      // VFNMADD231PD m256/m64bcst, ymm, ymm{k}{z}
 50833      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 50834          self.require(ISA_AVX512VL | ISA_AVX512F)
 50835          p.domain = DomainFMA
 50836          p.add(0, func(m *_Encoding, v []interface{}) {
 50837              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50838              m.emit(0xbc)
 50839              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 50840          })
 50841      }
 50842      // VFNMADD231PD ymm, ymm, ymm{k}{z}
 50843      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 50844          self.require(ISA_AVX512VL | ISA_AVX512F)
 50845          p.domain = DomainFMA
 50846          p.add(0, func(m *_Encoding, v []interface{}) {
 50847              m.emit(0x62)
 50848              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50849              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 50850              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 50851              m.emit(0xbc)
 50852              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50853          })
 50854      }
 50855      if p.len == 0 {
 50856          panic("invalid operands for VFNMADD231PD")
 50857      }
 50858      return p
 50859  }
 50860  
 50861  // VFNMADD231PS performs "Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values".
 50862  //
 50863  // Mnemonic        : VFNMADD231PS
 50864  // Supported forms : (11 forms)
 50865  //
 50866  //    * VFNMADD231PS xmm, xmm, xmm                   [FMA3]
 50867  //    * VFNMADD231PS m128, xmm, xmm                  [FMA3]
 50868  //    * VFNMADD231PS ymm, ymm, ymm                   [FMA3]
 50869  //    * VFNMADD231PS m256, ymm, ymm                  [FMA3]
 50870  //    * VFNMADD231PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 50871  //    * VFNMADD231PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 50872  //    * VFNMADD231PS zmm, zmm, zmm{k}{z}             [AVX512F]
 50873  //    * VFNMADD231PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 50874  //    * VFNMADD231PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 50875  //    * VFNMADD231PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 50876  //    * VFNMADD231PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 50877  //
 50878  func (self *Program) VFNMADD231PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 50879      var p *Instruction
 50880      switch len(vv) {
 50881          case 0  : p = self.alloc("VFNMADD231PS", 3, Operands { v0, v1, v2 })
 50882          case 1  : p = self.alloc("VFNMADD231PS", 4, Operands { v0, v1, v2, vv[0] })
 50883          default : panic("instruction VFNMADD231PS takes 3 or 4 operands")
 50884      }
 50885      // VFNMADD231PS xmm, xmm, xmm
 50886      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 50887          self.require(ISA_FMA3)
 50888          p.domain = DomainFMA
 50889          p.add(0, func(m *_Encoding, v []interface{}) {
 50890              m.emit(0xc4)
 50891              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50892              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 50893              m.emit(0xbc)
 50894              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50895          })
 50896      }
 50897      // VFNMADD231PS m128, xmm, xmm
 50898      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 50899          self.require(ISA_FMA3)
 50900          p.domain = DomainFMA
 50901          p.add(0, func(m *_Encoding, v []interface{}) {
 50902              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50903              m.emit(0xbc)
 50904              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50905          })
 50906      }
 50907      // VFNMADD231PS ymm, ymm, ymm
 50908      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 50909          self.require(ISA_FMA3)
 50910          p.domain = DomainFMA
 50911          p.add(0, func(m *_Encoding, v []interface{}) {
 50912              m.emit(0xc4)
 50913              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 50914              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50915              m.emit(0xbc)
 50916              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50917          })
 50918      }
 50919      // VFNMADD231PS m256, ymm, ymm
 50920      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 50921          self.require(ISA_FMA3)
 50922          p.domain = DomainFMA
 50923          p.add(0, func(m *_Encoding, v []interface{}) {
 50924              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 50925              m.emit(0xbc)
 50926              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 50927          })
 50928      }
 50929      // VFNMADD231PS m512/m32bcst, zmm, zmm{k}{z}
 50930      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 50931          self.require(ISA_AVX512F)
 50932          p.domain = DomainFMA
 50933          p.add(0, func(m *_Encoding, v []interface{}) {
 50934              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50935              m.emit(0xbc)
 50936              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 50937          })
 50938      }
 50939      // VFNMADD231PS {er}, zmm, zmm, zmm{k}{z}
 50940      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 50941          self.require(ISA_AVX512F)
 50942          p.domain = DomainFMA
 50943          p.add(0, func(m *_Encoding, v []interface{}) {
 50944              m.emit(0x62)
 50945              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 50946              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 50947              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 50948              m.emit(0xbc)
 50949              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 50950          })
 50951      }
 50952      // VFNMADD231PS zmm, zmm, zmm{k}{z}
 50953      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 50954          self.require(ISA_AVX512F)
 50955          p.domain = DomainFMA
 50956          p.add(0, func(m *_Encoding, v []interface{}) {
 50957              m.emit(0x62)
 50958              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50959              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50960              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 50961              m.emit(0xbc)
 50962              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50963          })
 50964      }
 50965      // VFNMADD231PS m128/m32bcst, xmm, xmm{k}{z}
 50966      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50967          self.require(ISA_AVX512VL | ISA_AVX512F)
 50968          p.domain = DomainFMA
 50969          p.add(0, func(m *_Encoding, v []interface{}) {
 50970              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50971              m.emit(0xbc)
 50972              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 50973          })
 50974      }
 50975      // VFNMADD231PS xmm, xmm, xmm{k}{z}
 50976      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 50977          self.require(ISA_AVX512VL | ISA_AVX512F)
 50978          p.domain = DomainFMA
 50979          p.add(0, func(m *_Encoding, v []interface{}) {
 50980              m.emit(0x62)
 50981              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 50982              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 50983              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 50984              m.emit(0xbc)
 50985              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 50986          })
 50987      }
 50988      // VFNMADD231PS m256/m32bcst, ymm, ymm{k}{z}
 50989      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 50990          self.require(ISA_AVX512VL | ISA_AVX512F)
 50991          p.domain = DomainFMA
 50992          p.add(0, func(m *_Encoding, v []interface{}) {
 50993              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 50994              m.emit(0xbc)
 50995              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 50996          })
 50997      }
 50998      // VFNMADD231PS ymm, ymm, ymm{k}{z}
 50999      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 51000          self.require(ISA_AVX512VL | ISA_AVX512F)
 51001          p.domain = DomainFMA
 51002          p.add(0, func(m *_Encoding, v []interface{}) {
 51003              m.emit(0x62)
 51004              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 51005              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 51006              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 51007              m.emit(0xbc)
 51008              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51009          })
 51010      }
 51011      if p.len == 0 {
 51012          panic("invalid operands for VFNMADD231PS")
 51013      }
 51014      return p
 51015  }
 51016  
 51017  // VFNMADD231SD performs "Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values".
 51018  //
 51019  // Mnemonic        : VFNMADD231SD
 51020  // Supported forms : (5 forms)
 51021  //
 51022  //    * VFNMADD231SD xmm, xmm, xmm                [FMA3]
 51023  //    * VFNMADD231SD m64, xmm, xmm                [FMA3]
 51024  //    * VFNMADD231SD m64, xmm, xmm{k}{z}          [AVX512F]
 51025  //    * VFNMADD231SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 51026  //    * VFNMADD231SD xmm, xmm, xmm{k}{z}          [AVX512F]
 51027  //
 51028  func (self *Program) VFNMADD231SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 51029      var p *Instruction
 51030      switch len(vv) {
 51031          case 0  : p = self.alloc("VFNMADD231SD", 3, Operands { v0, v1, v2 })
 51032          case 1  : p = self.alloc("VFNMADD231SD", 4, Operands { v0, v1, v2, vv[0] })
 51033          default : panic("instruction VFNMADD231SD takes 3 or 4 operands")
 51034      }
 51035      // VFNMADD231SD xmm, xmm, xmm
 51036      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 51037          self.require(ISA_FMA3)
 51038          p.domain = DomainFMA
 51039          p.add(0, func(m *_Encoding, v []interface{}) {
 51040              m.emit(0xc4)
 51041              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 51042              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 51043              m.emit(0xbd)
 51044              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51045          })
 51046      }
 51047      // VFNMADD231SD m64, xmm, xmm
 51048      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 51049          self.require(ISA_FMA3)
 51050          p.domain = DomainFMA
 51051          p.add(0, func(m *_Encoding, v []interface{}) {
 51052              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 51053              m.emit(0xbd)
 51054              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 51055          })
 51056      }
 51057      // VFNMADD231SD m64, xmm, xmm{k}{z}
 51058      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51059          self.require(ISA_AVX512F)
 51060          p.domain = DomainFMA
 51061          p.add(0, func(m *_Encoding, v []interface{}) {
 51062              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 51063              m.emit(0xbd)
 51064              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 51065          })
 51066      }
 51067      // VFNMADD231SD {er}, xmm, xmm, xmm{k}{z}
 51068      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 51069          self.require(ISA_AVX512F)
 51070          p.domain = DomainFMA
 51071          p.add(0, func(m *_Encoding, v []interface{}) {
 51072              m.emit(0x62)
 51073              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 51074              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 51075              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 51076              m.emit(0xbd)
 51077              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51078          })
 51079      }
 51080      // VFNMADD231SD xmm, xmm, xmm{k}{z}
 51081      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51082          self.require(ISA_AVX512F)
 51083          p.domain = DomainFMA
 51084          p.add(0, func(m *_Encoding, v []interface{}) {
 51085              m.emit(0x62)
 51086              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 51087              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 51088              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 51089              m.emit(0xbd)
 51090              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51091          })
 51092      }
 51093      if p.len == 0 {
 51094          panic("invalid operands for VFNMADD231SD")
 51095      }
 51096      return p
 51097  }
 51098  
 51099  // VFNMADD231SS performs "Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values".
 51100  //
 51101  // Mnemonic        : VFNMADD231SS
 51102  // Supported forms : (5 forms)
 51103  //
 51104  //    * VFNMADD231SS xmm, xmm, xmm                [FMA3]
 51105  //    * VFNMADD231SS m32, xmm, xmm                [FMA3]
 51106  //    * VFNMADD231SS m32, xmm, xmm{k}{z}          [AVX512F]
 51107  //    * VFNMADD231SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 51108  //    * VFNMADD231SS xmm, xmm, xmm{k}{z}          [AVX512F]
 51109  //
 51110  func (self *Program) VFNMADD231SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 51111      var p *Instruction
 51112      switch len(vv) {
 51113          case 0  : p = self.alloc("VFNMADD231SS", 3, Operands { v0, v1, v2 })
 51114          case 1  : p = self.alloc("VFNMADD231SS", 4, Operands { v0, v1, v2, vv[0] })
 51115          default : panic("instruction VFNMADD231SS takes 3 or 4 operands")
 51116      }
 51117      // VFNMADD231SS xmm, xmm, xmm
 51118      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 51119          self.require(ISA_FMA3)
 51120          p.domain = DomainFMA
 51121          p.add(0, func(m *_Encoding, v []interface{}) {
 51122              m.emit(0xc4)
 51123              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 51124              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 51125              m.emit(0xbd)
 51126              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51127          })
 51128      }
 51129      // VFNMADD231SS m32, xmm, xmm
 51130      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 51131          self.require(ISA_FMA3)
 51132          p.domain = DomainFMA
 51133          p.add(0, func(m *_Encoding, v []interface{}) {
 51134              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 51135              m.emit(0xbd)
 51136              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 51137          })
 51138      }
 51139      // VFNMADD231SS m32, xmm, xmm{k}{z}
 51140      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51141          self.require(ISA_AVX512F)
 51142          p.domain = DomainFMA
 51143          p.add(0, func(m *_Encoding, v []interface{}) {
 51144              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 51145              m.emit(0xbd)
 51146              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 51147          })
 51148      }
 51149      // VFNMADD231SS {er}, xmm, xmm, xmm{k}{z}
 51150      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 51151          self.require(ISA_AVX512F)
 51152          p.domain = DomainFMA
 51153          p.add(0, func(m *_Encoding, v []interface{}) {
 51154              m.emit(0x62)
 51155              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 51156              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 51157              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 51158              m.emit(0xbd)
 51159              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51160          })
 51161      }
 51162      // VFNMADD231SS xmm, xmm, xmm{k}{z}
 51163      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51164          self.require(ISA_AVX512F)
 51165          p.domain = DomainFMA
 51166          p.add(0, func(m *_Encoding, v []interface{}) {
 51167              m.emit(0x62)
 51168              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 51169              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 51170              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 51171              m.emit(0xbd)
 51172              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51173          })
 51174      }
 51175      if p.len == 0 {
 51176          panic("invalid operands for VFNMADD231SS")
 51177      }
 51178      return p
 51179  }
 51180  
 51181  // VFNMADDPD performs "Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values".
 51182  //
 51183  // Mnemonic        : VFNMADDPD
 51184  // Supported forms : (6 forms)
 51185  //
 51186  //    * VFNMADDPD xmm, xmm, xmm, xmm     [FMA4]
 51187  //    * VFNMADDPD m128, xmm, xmm, xmm    [FMA4]
 51188  //    * VFNMADDPD xmm, m128, xmm, xmm    [FMA4]
 51189  //    * VFNMADDPD ymm, ymm, ymm, ymm     [FMA4]
 51190  //    * VFNMADDPD m256, ymm, ymm, ymm    [FMA4]
 51191  //    * VFNMADDPD ymm, m256, ymm, ymm    [FMA4]
 51192  //
 51193  func (self *Program) VFNMADDPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 51194      p := self.alloc("VFNMADDPD", 4, Operands { v0, v1, v2, v3 })
 51195      // VFNMADDPD xmm, xmm, xmm, xmm
 51196      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 51197          self.require(ISA_FMA4)
 51198          p.domain = DomainFMA
 51199          p.add(0, func(m *_Encoding, v []interface{}) {
 51200              m.emit(0xc4)
 51201              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 51202              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 51203              m.emit(0x79)
 51204              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 51205              m.emit(hlcode(v[1]) << 4)
 51206          })
 51207          p.add(0, func(m *_Encoding, v []interface{}) {
 51208              m.emit(0xc4)
 51209              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 51210              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 51211              m.emit(0x79)
 51212              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51213              m.emit(hlcode(v[0]) << 4)
 51214          })
 51215      }
 51216      // VFNMADDPD m128, xmm, xmm, xmm
 51217      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 51218          self.require(ISA_FMA4)
 51219          p.domain = DomainFMA
 51220          p.add(0, func(m *_Encoding, v []interface{}) {
 51221              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 51222              m.emit(0x79)
 51223              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 51224              m.emit(hlcode(v[1]) << 4)
 51225          })
 51226      }
 51227      // VFNMADDPD xmm, m128, xmm, xmm
 51228      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 51229          self.require(ISA_FMA4)
 51230          p.domain = DomainFMA
 51231          p.add(0, func(m *_Encoding, v []interface{}) {
 51232              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 51233              m.emit(0x79)
 51234              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 51235              m.emit(hlcode(v[0]) << 4)
 51236          })
 51237      }
 51238      // VFNMADDPD ymm, ymm, ymm, ymm
 51239      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 51240          self.require(ISA_FMA4)
 51241          p.domain = DomainFMA
 51242          p.add(0, func(m *_Encoding, v []interface{}) {
 51243              m.emit(0xc4)
 51244              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 51245              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 51246              m.emit(0x79)
 51247              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 51248              m.emit(hlcode(v[1]) << 4)
 51249          })
 51250          p.add(0, func(m *_Encoding, v []interface{}) {
 51251              m.emit(0xc4)
 51252              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 51253              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 51254              m.emit(0x79)
 51255              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51256              m.emit(hlcode(v[0]) << 4)
 51257          })
 51258      }
 51259      // VFNMADDPD m256, ymm, ymm, ymm
 51260      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 51261          self.require(ISA_FMA4)
 51262          p.domain = DomainFMA
 51263          p.add(0, func(m *_Encoding, v []interface{}) {
 51264              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 51265              m.emit(0x79)
 51266              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 51267              m.emit(hlcode(v[1]) << 4)
 51268          })
 51269      }
 51270      // VFNMADDPD ymm, m256, ymm, ymm
 51271      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 51272          self.require(ISA_FMA4)
 51273          p.domain = DomainFMA
 51274          p.add(0, func(m *_Encoding, v []interface{}) {
 51275              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 51276              m.emit(0x79)
 51277              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 51278              m.emit(hlcode(v[0]) << 4)
 51279          })
 51280      }
 51281      if p.len == 0 {
 51282          panic("invalid operands for VFNMADDPD")
 51283      }
 51284      return p
 51285  }
 51286  
 51287  // VFNMADDPS performs "Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values".
 51288  //
 51289  // Mnemonic        : VFNMADDPS
 51290  // Supported forms : (6 forms)
 51291  //
 51292  //    * VFNMADDPS xmm, xmm, xmm, xmm     [FMA4]
 51293  //    * VFNMADDPS m128, xmm, xmm, xmm    [FMA4]
 51294  //    * VFNMADDPS xmm, m128, xmm, xmm    [FMA4]
 51295  //    * VFNMADDPS ymm, ymm, ymm, ymm     [FMA4]
 51296  //    * VFNMADDPS m256, ymm, ymm, ymm    [FMA4]
 51297  //    * VFNMADDPS ymm, m256, ymm, ymm    [FMA4]
 51298  //
 51299  func (self *Program) VFNMADDPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 51300      p := self.alloc("VFNMADDPS", 4, Operands { v0, v1, v2, v3 })
 51301      // VFNMADDPS xmm, xmm, xmm, xmm
 51302      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 51303          self.require(ISA_FMA4)
 51304          p.domain = DomainFMA
 51305          p.add(0, func(m *_Encoding, v []interface{}) {
 51306              m.emit(0xc4)
 51307              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 51308              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 51309              m.emit(0x78)
 51310              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 51311              m.emit(hlcode(v[1]) << 4)
 51312          })
 51313          p.add(0, func(m *_Encoding, v []interface{}) {
 51314              m.emit(0xc4)
 51315              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 51316              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 51317              m.emit(0x78)
 51318              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51319              m.emit(hlcode(v[0]) << 4)
 51320          })
 51321      }
 51322      // VFNMADDPS m128, xmm, xmm, xmm
 51323      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 51324          self.require(ISA_FMA4)
 51325          p.domain = DomainFMA
 51326          p.add(0, func(m *_Encoding, v []interface{}) {
 51327              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 51328              m.emit(0x78)
 51329              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 51330              m.emit(hlcode(v[1]) << 4)
 51331          })
 51332      }
 51333      // VFNMADDPS xmm, m128, xmm, xmm
 51334      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 51335          self.require(ISA_FMA4)
 51336          p.domain = DomainFMA
 51337          p.add(0, func(m *_Encoding, v []interface{}) {
 51338              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 51339              m.emit(0x78)
 51340              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 51341              m.emit(hlcode(v[0]) << 4)
 51342          })
 51343      }
 51344      // VFNMADDPS ymm, ymm, ymm, ymm
 51345      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 51346          self.require(ISA_FMA4)
 51347          p.domain = DomainFMA
 51348          p.add(0, func(m *_Encoding, v []interface{}) {
 51349              m.emit(0xc4)
 51350              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 51351              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 51352              m.emit(0x78)
 51353              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 51354              m.emit(hlcode(v[1]) << 4)
 51355          })
 51356          p.add(0, func(m *_Encoding, v []interface{}) {
 51357              m.emit(0xc4)
 51358              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 51359              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 51360              m.emit(0x78)
 51361              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51362              m.emit(hlcode(v[0]) << 4)
 51363          })
 51364      }
 51365      // VFNMADDPS m256, ymm, ymm, ymm
 51366      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 51367          self.require(ISA_FMA4)
 51368          p.domain = DomainFMA
 51369          p.add(0, func(m *_Encoding, v []interface{}) {
 51370              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 51371              m.emit(0x78)
 51372              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 51373              m.emit(hlcode(v[1]) << 4)
 51374          })
 51375      }
 51376      // VFNMADDPS ymm, m256, ymm, ymm
 51377      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 51378          self.require(ISA_FMA4)
 51379          p.domain = DomainFMA
 51380          p.add(0, func(m *_Encoding, v []interface{}) {
 51381              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 51382              m.emit(0x78)
 51383              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 51384              m.emit(hlcode(v[0]) << 4)
 51385          })
 51386      }
 51387      if p.len == 0 {
 51388          panic("invalid operands for VFNMADDPS")
 51389      }
 51390      return p
 51391  }
 51392  
 51393  // VFNMADDSD performs "Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values".
 51394  //
 51395  // Mnemonic        : VFNMADDSD
 51396  // Supported forms : (3 forms)
 51397  //
 51398  //    * VFNMADDSD xmm, xmm, xmm, xmm    [FMA4]
 51399  //    * VFNMADDSD m64, xmm, xmm, xmm    [FMA4]
 51400  //    * VFNMADDSD xmm, m64, xmm, xmm    [FMA4]
 51401  //
 51402  func (self *Program) VFNMADDSD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 51403      p := self.alloc("VFNMADDSD", 4, Operands { v0, v1, v2, v3 })
 51404      // VFNMADDSD xmm, xmm, xmm, xmm
 51405      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 51406          self.require(ISA_FMA4)
 51407          p.domain = DomainFMA
 51408          p.add(0, func(m *_Encoding, v []interface{}) {
 51409              m.emit(0xc4)
 51410              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 51411              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 51412              m.emit(0x7b)
 51413              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 51414              m.emit(hlcode(v[1]) << 4)
 51415          })
 51416          p.add(0, func(m *_Encoding, v []interface{}) {
 51417              m.emit(0xc4)
 51418              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 51419              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 51420              m.emit(0x7b)
 51421              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51422              m.emit(hlcode(v[0]) << 4)
 51423          })
 51424      }
 51425      // VFNMADDSD m64, xmm, xmm, xmm
 51426      if isM64(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 51427          self.require(ISA_FMA4)
 51428          p.domain = DomainFMA
 51429          p.add(0, func(m *_Encoding, v []interface{}) {
 51430              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 51431              m.emit(0x7b)
 51432              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 51433              m.emit(hlcode(v[1]) << 4)
 51434          })
 51435      }
 51436      // VFNMADDSD xmm, m64, xmm, xmm
 51437      if isXMM(v0) && isM64(v1) && isXMM(v2) && isXMM(v3) {
 51438          self.require(ISA_FMA4)
 51439          p.domain = DomainFMA
 51440          p.add(0, func(m *_Encoding, v []interface{}) {
 51441              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 51442              m.emit(0x7b)
 51443              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 51444              m.emit(hlcode(v[0]) << 4)
 51445          })
 51446      }
 51447      if p.len == 0 {
 51448          panic("invalid operands for VFNMADDSD")
 51449      }
 51450      return p
 51451  }
 51452  
 51453  // VFNMADDSS performs "Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values".
 51454  //
 51455  // Mnemonic        : VFNMADDSS
 51456  // Supported forms : (3 forms)
 51457  //
 51458  //    * VFNMADDSS xmm, xmm, xmm, xmm    [FMA4]
 51459  //    * VFNMADDSS m32, xmm, xmm, xmm    [FMA4]
 51460  //    * VFNMADDSS xmm, m32, xmm, xmm    [FMA4]
 51461  //
 51462  func (self *Program) VFNMADDSS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 51463      p := self.alloc("VFNMADDSS", 4, Operands { v0, v1, v2, v3 })
 51464      // VFNMADDSS xmm, xmm, xmm, xmm
 51465      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 51466          self.require(ISA_FMA4)
 51467          p.domain = DomainFMA
 51468          p.add(0, func(m *_Encoding, v []interface{}) {
 51469              m.emit(0xc4)
 51470              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 51471              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 51472              m.emit(0x7a)
 51473              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 51474              m.emit(hlcode(v[1]) << 4)
 51475          })
 51476          p.add(0, func(m *_Encoding, v []interface{}) {
 51477              m.emit(0xc4)
 51478              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 51479              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 51480              m.emit(0x7a)
 51481              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51482              m.emit(hlcode(v[0]) << 4)
 51483          })
 51484      }
 51485      // VFNMADDSS m32, xmm, xmm, xmm
 51486      if isM32(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 51487          self.require(ISA_FMA4)
 51488          p.domain = DomainFMA
 51489          p.add(0, func(m *_Encoding, v []interface{}) {
 51490              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 51491              m.emit(0x7a)
 51492              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 51493              m.emit(hlcode(v[1]) << 4)
 51494          })
 51495      }
 51496      // VFNMADDSS xmm, m32, xmm, xmm
 51497      if isXMM(v0) && isM32(v1) && isXMM(v2) && isXMM(v3) {
 51498          self.require(ISA_FMA4)
 51499          p.domain = DomainFMA
 51500          p.add(0, func(m *_Encoding, v []interface{}) {
 51501              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 51502              m.emit(0x7a)
 51503              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 51504              m.emit(hlcode(v[0]) << 4)
 51505          })
 51506      }
 51507      if p.len == 0 {
 51508          panic("invalid operands for VFNMADDSS")
 51509      }
 51510      return p
 51511  }
 51512  
 51513  // VFNMSUB132PD performs "Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values".
 51514  //
 51515  // Mnemonic        : VFNMSUB132PD
 51516  // Supported forms : (11 forms)
 51517  //
 51518  //    * VFNMSUB132PD xmm, xmm, xmm                   [FMA3]
 51519  //    * VFNMSUB132PD m128, xmm, xmm                  [FMA3]
 51520  //    * VFNMSUB132PD ymm, ymm, ymm                   [FMA3]
 51521  //    * VFNMSUB132PD m256, ymm, ymm                  [FMA3]
 51522  //    * VFNMSUB132PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 51523  //    * VFNMSUB132PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 51524  //    * VFNMSUB132PD zmm, zmm, zmm{k}{z}             [AVX512F]
 51525  //    * VFNMSUB132PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 51526  //    * VFNMSUB132PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 51527  //    * VFNMSUB132PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 51528  //    * VFNMSUB132PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 51529  //
 51530  func (self *Program) VFNMSUB132PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 51531      var p *Instruction
 51532      switch len(vv) {
 51533          case 0  : p = self.alloc("VFNMSUB132PD", 3, Operands { v0, v1, v2 })
 51534          case 1  : p = self.alloc("VFNMSUB132PD", 4, Operands { v0, v1, v2, vv[0] })
 51535          default : panic("instruction VFNMSUB132PD takes 3 or 4 operands")
 51536      }
 51537      // VFNMSUB132PD xmm, xmm, xmm
 51538      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 51539          self.require(ISA_FMA3)
 51540          p.domain = DomainFMA
 51541          p.add(0, func(m *_Encoding, v []interface{}) {
 51542              m.emit(0xc4)
 51543              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 51544              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 51545              m.emit(0x9e)
 51546              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51547          })
 51548      }
 51549      // VFNMSUB132PD m128, xmm, xmm
 51550      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 51551          self.require(ISA_FMA3)
 51552          p.domain = DomainFMA
 51553          p.add(0, func(m *_Encoding, v []interface{}) {
 51554              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 51555              m.emit(0x9e)
 51556              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 51557          })
 51558      }
 51559      // VFNMSUB132PD ymm, ymm, ymm
 51560      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 51561          self.require(ISA_FMA3)
 51562          p.domain = DomainFMA
 51563          p.add(0, func(m *_Encoding, v []interface{}) {
 51564              m.emit(0xc4)
 51565              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 51566              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 51567              m.emit(0x9e)
 51568              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51569          })
 51570      }
 51571      // VFNMSUB132PD m256, ymm, ymm
 51572      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 51573          self.require(ISA_FMA3)
 51574          p.domain = DomainFMA
 51575          p.add(0, func(m *_Encoding, v []interface{}) {
 51576              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 51577              m.emit(0x9e)
 51578              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 51579          })
 51580      }
 51581      // VFNMSUB132PD m512/m64bcst, zmm, zmm{k}{z}
 51582      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 51583          self.require(ISA_AVX512F)
 51584          p.domain = DomainFMA
 51585          p.add(0, func(m *_Encoding, v []interface{}) {
 51586              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 51587              m.emit(0x9e)
 51588              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 51589          })
 51590      }
 51591      // VFNMSUB132PD {er}, zmm, zmm, zmm{k}{z}
 51592      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 51593          self.require(ISA_AVX512F)
 51594          p.domain = DomainFMA
 51595          p.add(0, func(m *_Encoding, v []interface{}) {
 51596              m.emit(0x62)
 51597              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 51598              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 51599              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 51600              m.emit(0x9e)
 51601              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51602          })
 51603      }
 51604      // VFNMSUB132PD zmm, zmm, zmm{k}{z}
 51605      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 51606          self.require(ISA_AVX512F)
 51607          p.domain = DomainFMA
 51608          p.add(0, func(m *_Encoding, v []interface{}) {
 51609              m.emit(0x62)
 51610              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 51611              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 51612              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 51613              m.emit(0x9e)
 51614              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51615          })
 51616      }
 51617      // VFNMSUB132PD m128/m64bcst, xmm, xmm{k}{z}
 51618      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51619          self.require(ISA_AVX512VL | ISA_AVX512F)
 51620          p.domain = DomainFMA
 51621          p.add(0, func(m *_Encoding, v []interface{}) {
 51622              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 51623              m.emit(0x9e)
 51624              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 51625          })
 51626      }
 51627      // VFNMSUB132PD xmm, xmm, xmm{k}{z}
 51628      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51629          self.require(ISA_AVX512VL | ISA_AVX512F)
 51630          p.domain = DomainFMA
 51631          p.add(0, func(m *_Encoding, v []interface{}) {
 51632              m.emit(0x62)
 51633              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 51634              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 51635              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 51636              m.emit(0x9e)
 51637              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51638          })
 51639      }
 51640      // VFNMSUB132PD m256/m64bcst, ymm, ymm{k}{z}
 51641      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 51642          self.require(ISA_AVX512VL | ISA_AVX512F)
 51643          p.domain = DomainFMA
 51644          p.add(0, func(m *_Encoding, v []interface{}) {
 51645              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 51646              m.emit(0x9e)
 51647              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 51648          })
 51649      }
 51650      // VFNMSUB132PD ymm, ymm, ymm{k}{z}
 51651      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 51652          self.require(ISA_AVX512VL | ISA_AVX512F)
 51653          p.domain = DomainFMA
 51654          p.add(0, func(m *_Encoding, v []interface{}) {
 51655              m.emit(0x62)
 51656              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 51657              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 51658              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 51659              m.emit(0x9e)
 51660              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51661          })
 51662      }
 51663      if p.len == 0 {
 51664          panic("invalid operands for VFNMSUB132PD")
 51665      }
 51666      return p
 51667  }
 51668  
 51669  // VFNMSUB132PS performs "Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values".
 51670  //
 51671  // Mnemonic        : VFNMSUB132PS
 51672  // Supported forms : (11 forms)
 51673  //
 51674  //    * VFNMSUB132PS xmm, xmm, xmm                   [FMA3]
 51675  //    * VFNMSUB132PS m128, xmm, xmm                  [FMA3]
 51676  //    * VFNMSUB132PS ymm, ymm, ymm                   [FMA3]
 51677  //    * VFNMSUB132PS m256, ymm, ymm                  [FMA3]
 51678  //    * VFNMSUB132PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 51679  //    * VFNMSUB132PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 51680  //    * VFNMSUB132PS zmm, zmm, zmm{k}{z}             [AVX512F]
 51681  //    * VFNMSUB132PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 51682  //    * VFNMSUB132PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 51683  //    * VFNMSUB132PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 51684  //    * VFNMSUB132PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 51685  //
 51686  func (self *Program) VFNMSUB132PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 51687      var p *Instruction
 51688      switch len(vv) {
 51689          case 0  : p = self.alloc("VFNMSUB132PS", 3, Operands { v0, v1, v2 })
 51690          case 1  : p = self.alloc("VFNMSUB132PS", 4, Operands { v0, v1, v2, vv[0] })
 51691          default : panic("instruction VFNMSUB132PS takes 3 or 4 operands")
 51692      }
 51693      // VFNMSUB132PS xmm, xmm, xmm
 51694      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 51695          self.require(ISA_FMA3)
 51696          p.domain = DomainFMA
 51697          p.add(0, func(m *_Encoding, v []interface{}) {
 51698              m.emit(0xc4)
 51699              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 51700              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 51701              m.emit(0x9e)
 51702              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51703          })
 51704      }
 51705      // VFNMSUB132PS m128, xmm, xmm
 51706      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 51707          self.require(ISA_FMA3)
 51708          p.domain = DomainFMA
 51709          p.add(0, func(m *_Encoding, v []interface{}) {
 51710              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 51711              m.emit(0x9e)
 51712              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 51713          })
 51714      }
 51715      // VFNMSUB132PS ymm, ymm, ymm
 51716      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 51717          self.require(ISA_FMA3)
 51718          p.domain = DomainFMA
 51719          p.add(0, func(m *_Encoding, v []interface{}) {
 51720              m.emit(0xc4)
 51721              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 51722              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 51723              m.emit(0x9e)
 51724              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51725          })
 51726      }
 51727      // VFNMSUB132PS m256, ymm, ymm
 51728      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 51729          self.require(ISA_FMA3)
 51730          p.domain = DomainFMA
 51731          p.add(0, func(m *_Encoding, v []interface{}) {
 51732              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 51733              m.emit(0x9e)
 51734              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 51735          })
 51736      }
 51737      // VFNMSUB132PS m512/m32bcst, zmm, zmm{k}{z}
 51738      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 51739          self.require(ISA_AVX512F)
 51740          p.domain = DomainFMA
 51741          p.add(0, func(m *_Encoding, v []interface{}) {
 51742              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 51743              m.emit(0x9e)
 51744              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 51745          })
 51746      }
 51747      // VFNMSUB132PS {er}, zmm, zmm, zmm{k}{z}
 51748      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 51749          self.require(ISA_AVX512F)
 51750          p.domain = DomainFMA
 51751          p.add(0, func(m *_Encoding, v []interface{}) {
 51752              m.emit(0x62)
 51753              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 51754              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 51755              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 51756              m.emit(0x9e)
 51757              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51758          })
 51759      }
 51760      // VFNMSUB132PS zmm, zmm, zmm{k}{z}
 51761      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 51762          self.require(ISA_AVX512F)
 51763          p.domain = DomainFMA
 51764          p.add(0, func(m *_Encoding, v []interface{}) {
 51765              m.emit(0x62)
 51766              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 51767              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 51768              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 51769              m.emit(0x9e)
 51770              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51771          })
 51772      }
 51773      // VFNMSUB132PS m128/m32bcst, xmm, xmm{k}{z}
 51774      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51775          self.require(ISA_AVX512VL | ISA_AVX512F)
 51776          p.domain = DomainFMA
 51777          p.add(0, func(m *_Encoding, v []interface{}) {
 51778              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 51779              m.emit(0x9e)
 51780              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 51781          })
 51782      }
 51783      // VFNMSUB132PS xmm, xmm, xmm{k}{z}
 51784      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51785          self.require(ISA_AVX512VL | ISA_AVX512F)
 51786          p.domain = DomainFMA
 51787          p.add(0, func(m *_Encoding, v []interface{}) {
 51788              m.emit(0x62)
 51789              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 51790              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 51791              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 51792              m.emit(0x9e)
 51793              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51794          })
 51795      }
 51796      // VFNMSUB132PS m256/m32bcst, ymm, ymm{k}{z}
 51797      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 51798          self.require(ISA_AVX512VL | ISA_AVX512F)
 51799          p.domain = DomainFMA
 51800          p.add(0, func(m *_Encoding, v []interface{}) {
 51801              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 51802              m.emit(0x9e)
 51803              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 51804          })
 51805      }
 51806      // VFNMSUB132PS ymm, ymm, ymm{k}{z}
 51807      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 51808          self.require(ISA_AVX512VL | ISA_AVX512F)
 51809          p.domain = DomainFMA
 51810          p.add(0, func(m *_Encoding, v []interface{}) {
 51811              m.emit(0x62)
 51812              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 51813              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 51814              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 51815              m.emit(0x9e)
 51816              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51817          })
 51818      }
 51819      if p.len == 0 {
 51820          panic("invalid operands for VFNMSUB132PS")
 51821      }
 51822      return p
 51823  }
 51824  
 51825  // VFNMSUB132SD performs "Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values".
 51826  //
 51827  // Mnemonic        : VFNMSUB132SD
 51828  // Supported forms : (5 forms)
 51829  //
 51830  //    * VFNMSUB132SD xmm, xmm, xmm                [FMA3]
 51831  //    * VFNMSUB132SD m64, xmm, xmm                [FMA3]
 51832  //    * VFNMSUB132SD m64, xmm, xmm{k}{z}          [AVX512F]
 51833  //    * VFNMSUB132SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 51834  //    * VFNMSUB132SD xmm, xmm, xmm{k}{z}          [AVX512F]
 51835  //
 51836  func (self *Program) VFNMSUB132SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 51837      var p *Instruction
 51838      switch len(vv) {
 51839          case 0  : p = self.alloc("VFNMSUB132SD", 3, Operands { v0, v1, v2 })
 51840          case 1  : p = self.alloc("VFNMSUB132SD", 4, Operands { v0, v1, v2, vv[0] })
 51841          default : panic("instruction VFNMSUB132SD takes 3 or 4 operands")
 51842      }
 51843      // VFNMSUB132SD xmm, xmm, xmm
 51844      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 51845          self.require(ISA_FMA3)
 51846          p.domain = DomainFMA
 51847          p.add(0, func(m *_Encoding, v []interface{}) {
 51848              m.emit(0xc4)
 51849              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 51850              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 51851              m.emit(0x9f)
 51852              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51853          })
 51854      }
 51855      // VFNMSUB132SD m64, xmm, xmm
 51856      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 51857          self.require(ISA_FMA3)
 51858          p.domain = DomainFMA
 51859          p.add(0, func(m *_Encoding, v []interface{}) {
 51860              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 51861              m.emit(0x9f)
 51862              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 51863          })
 51864      }
 51865      // VFNMSUB132SD m64, xmm, xmm{k}{z}
 51866      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51867          self.require(ISA_AVX512F)
 51868          p.domain = DomainFMA
 51869          p.add(0, func(m *_Encoding, v []interface{}) {
 51870              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 51871              m.emit(0x9f)
 51872              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 51873          })
 51874      }
 51875      // VFNMSUB132SD {er}, xmm, xmm, xmm{k}{z}
 51876      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 51877          self.require(ISA_AVX512F)
 51878          p.domain = DomainFMA
 51879          p.add(0, func(m *_Encoding, v []interface{}) {
 51880              m.emit(0x62)
 51881              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 51882              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 51883              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 51884              m.emit(0x9f)
 51885              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51886          })
 51887      }
 51888      // VFNMSUB132SD xmm, xmm, xmm{k}{z}
 51889      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51890          self.require(ISA_AVX512F)
 51891          p.domain = DomainFMA
 51892          p.add(0, func(m *_Encoding, v []interface{}) {
 51893              m.emit(0x62)
 51894              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 51895              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 51896              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 51897              m.emit(0x9f)
 51898              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51899          })
 51900      }
 51901      if p.len == 0 {
 51902          panic("invalid operands for VFNMSUB132SD")
 51903      }
 51904      return p
 51905  }
 51906  
 51907  // VFNMSUB132SS performs "Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values".
 51908  //
 51909  // Mnemonic        : VFNMSUB132SS
 51910  // Supported forms : (5 forms)
 51911  //
 51912  //    * VFNMSUB132SS xmm, xmm, xmm                [FMA3]
 51913  //    * VFNMSUB132SS m32, xmm, xmm                [FMA3]
 51914  //    * VFNMSUB132SS m32, xmm, xmm{k}{z}          [AVX512F]
 51915  //    * VFNMSUB132SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 51916  //    * VFNMSUB132SS xmm, xmm, xmm{k}{z}          [AVX512F]
 51917  //
 51918  func (self *Program) VFNMSUB132SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 51919      var p *Instruction
 51920      switch len(vv) {
 51921          case 0  : p = self.alloc("VFNMSUB132SS", 3, Operands { v0, v1, v2 })
 51922          case 1  : p = self.alloc("VFNMSUB132SS", 4, Operands { v0, v1, v2, vv[0] })
 51923          default : panic("instruction VFNMSUB132SS takes 3 or 4 operands")
 51924      }
 51925      // VFNMSUB132SS xmm, xmm, xmm
 51926      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 51927          self.require(ISA_FMA3)
 51928          p.domain = DomainFMA
 51929          p.add(0, func(m *_Encoding, v []interface{}) {
 51930              m.emit(0xc4)
 51931              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 51932              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 51933              m.emit(0x9f)
 51934              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51935          })
 51936      }
 51937      // VFNMSUB132SS m32, xmm, xmm
 51938      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 51939          self.require(ISA_FMA3)
 51940          p.domain = DomainFMA
 51941          p.add(0, func(m *_Encoding, v []interface{}) {
 51942              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 51943              m.emit(0x9f)
 51944              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 51945          })
 51946      }
 51947      // VFNMSUB132SS m32, xmm, xmm{k}{z}
 51948      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51949          self.require(ISA_AVX512F)
 51950          p.domain = DomainFMA
 51951          p.add(0, func(m *_Encoding, v []interface{}) {
 51952              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 51953              m.emit(0x9f)
 51954              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 51955          })
 51956      }
 51957      // VFNMSUB132SS {er}, xmm, xmm, xmm{k}{z}
 51958      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 51959          self.require(ISA_AVX512F)
 51960          p.domain = DomainFMA
 51961          p.add(0, func(m *_Encoding, v []interface{}) {
 51962              m.emit(0x62)
 51963              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 51964              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 51965              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 51966              m.emit(0x9f)
 51967              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 51968          })
 51969      }
 51970      // VFNMSUB132SS xmm, xmm, xmm{k}{z}
 51971      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 51972          self.require(ISA_AVX512F)
 51973          p.domain = DomainFMA
 51974          p.add(0, func(m *_Encoding, v []interface{}) {
 51975              m.emit(0x62)
 51976              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 51977              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 51978              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 51979              m.emit(0x9f)
 51980              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 51981          })
 51982      }
 51983      if p.len == 0 {
 51984          panic("invalid operands for VFNMSUB132SS")
 51985      }
 51986      return p
 51987  }
 51988  
 51989  // VFNMSUB213PD performs "Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values".
 51990  //
 51991  // Mnemonic        : VFNMSUB213PD
 51992  // Supported forms : (11 forms)
 51993  //
 51994  //    * VFNMSUB213PD xmm, xmm, xmm                   [FMA3]
 51995  //    * VFNMSUB213PD m128, xmm, xmm                  [FMA3]
 51996  //    * VFNMSUB213PD ymm, ymm, ymm                   [FMA3]
 51997  //    * VFNMSUB213PD m256, ymm, ymm                  [FMA3]
 51998  //    * VFNMSUB213PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 51999  //    * VFNMSUB213PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 52000  //    * VFNMSUB213PD zmm, zmm, zmm{k}{z}             [AVX512F]
 52001  //    * VFNMSUB213PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 52002  //    * VFNMSUB213PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 52003  //    * VFNMSUB213PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 52004  //    * VFNMSUB213PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 52005  //
 52006  func (self *Program) VFNMSUB213PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 52007      var p *Instruction
 52008      switch len(vv) {
 52009          case 0  : p = self.alloc("VFNMSUB213PD", 3, Operands { v0, v1, v2 })
 52010          case 1  : p = self.alloc("VFNMSUB213PD", 4, Operands { v0, v1, v2, vv[0] })
 52011          default : panic("instruction VFNMSUB213PD takes 3 or 4 operands")
 52012      }
 52013      // VFNMSUB213PD xmm, xmm, xmm
 52014      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 52015          self.require(ISA_FMA3)
 52016          p.domain = DomainFMA
 52017          p.add(0, func(m *_Encoding, v []interface{}) {
 52018              m.emit(0xc4)
 52019              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52020              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 52021              m.emit(0xae)
 52022              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52023          })
 52024      }
 52025      // VFNMSUB213PD m128, xmm, xmm
 52026      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 52027          self.require(ISA_FMA3)
 52028          p.domain = DomainFMA
 52029          p.add(0, func(m *_Encoding, v []interface{}) {
 52030              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52031              m.emit(0xae)
 52032              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52033          })
 52034      }
 52035      // VFNMSUB213PD ymm, ymm, ymm
 52036      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 52037          self.require(ISA_FMA3)
 52038          p.domain = DomainFMA
 52039          p.add(0, func(m *_Encoding, v []interface{}) {
 52040              m.emit(0xc4)
 52041              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52042              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 52043              m.emit(0xae)
 52044              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52045          })
 52046      }
 52047      // VFNMSUB213PD m256, ymm, ymm
 52048      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 52049          self.require(ISA_FMA3)
 52050          p.domain = DomainFMA
 52051          p.add(0, func(m *_Encoding, v []interface{}) {
 52052              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52053              m.emit(0xae)
 52054              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52055          })
 52056      }
 52057      // VFNMSUB213PD m512/m64bcst, zmm, zmm{k}{z}
 52058      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 52059          self.require(ISA_AVX512F)
 52060          p.domain = DomainFMA
 52061          p.add(0, func(m *_Encoding, v []interface{}) {
 52062              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52063              m.emit(0xae)
 52064              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 52065          })
 52066      }
 52067      // VFNMSUB213PD {er}, zmm, zmm, zmm{k}{z}
 52068      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 52069          self.require(ISA_AVX512F)
 52070          p.domain = DomainFMA
 52071          p.add(0, func(m *_Encoding, v []interface{}) {
 52072              m.emit(0x62)
 52073              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 52074              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 52075              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 52076              m.emit(0xae)
 52077              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 52078          })
 52079      }
 52080      // VFNMSUB213PD zmm, zmm, zmm{k}{z}
 52081      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 52082          self.require(ISA_AVX512F)
 52083          p.domain = DomainFMA
 52084          p.add(0, func(m *_Encoding, v []interface{}) {
 52085              m.emit(0x62)
 52086              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52087              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 52088              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 52089              m.emit(0xae)
 52090              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52091          })
 52092      }
 52093      // VFNMSUB213PD m128/m64bcst, xmm, xmm{k}{z}
 52094      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52095          self.require(ISA_AVX512VL | ISA_AVX512F)
 52096          p.domain = DomainFMA
 52097          p.add(0, func(m *_Encoding, v []interface{}) {
 52098              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52099              m.emit(0xae)
 52100              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 52101          })
 52102      }
 52103      // VFNMSUB213PD xmm, xmm, xmm{k}{z}
 52104      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52105          self.require(ISA_AVX512VL | ISA_AVX512F)
 52106          p.domain = DomainFMA
 52107          p.add(0, func(m *_Encoding, v []interface{}) {
 52108              m.emit(0x62)
 52109              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52110              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 52111              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 52112              m.emit(0xae)
 52113              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52114          })
 52115      }
 52116      // VFNMSUB213PD m256/m64bcst, ymm, ymm{k}{z}
 52117      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 52118          self.require(ISA_AVX512VL | ISA_AVX512F)
 52119          p.domain = DomainFMA
 52120          p.add(0, func(m *_Encoding, v []interface{}) {
 52121              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52122              m.emit(0xae)
 52123              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 52124          })
 52125      }
 52126      // VFNMSUB213PD ymm, ymm, ymm{k}{z}
 52127      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 52128          self.require(ISA_AVX512VL | ISA_AVX512F)
 52129          p.domain = DomainFMA
 52130          p.add(0, func(m *_Encoding, v []interface{}) {
 52131              m.emit(0x62)
 52132              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52133              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 52134              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 52135              m.emit(0xae)
 52136              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52137          })
 52138      }
 52139      if p.len == 0 {
 52140          panic("invalid operands for VFNMSUB213PD")
 52141      }
 52142      return p
 52143  }
 52144  
 52145  // VFNMSUB213PS performs "Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values".
 52146  //
 52147  // Mnemonic        : VFNMSUB213PS
 52148  // Supported forms : (11 forms)
 52149  //
 52150  //    * VFNMSUB213PS xmm, xmm, xmm                   [FMA3]
 52151  //    * VFNMSUB213PS m128, xmm, xmm                  [FMA3]
 52152  //    * VFNMSUB213PS ymm, ymm, ymm                   [FMA3]
 52153  //    * VFNMSUB213PS m256, ymm, ymm                  [FMA3]
 52154  //    * VFNMSUB213PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 52155  //    * VFNMSUB213PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 52156  //    * VFNMSUB213PS zmm, zmm, zmm{k}{z}             [AVX512F]
 52157  //    * VFNMSUB213PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 52158  //    * VFNMSUB213PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 52159  //    * VFNMSUB213PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 52160  //    * VFNMSUB213PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 52161  //
 52162  func (self *Program) VFNMSUB213PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 52163      var p *Instruction
 52164      switch len(vv) {
 52165          case 0  : p = self.alloc("VFNMSUB213PS", 3, Operands { v0, v1, v2 })
 52166          case 1  : p = self.alloc("VFNMSUB213PS", 4, Operands { v0, v1, v2, vv[0] })
 52167          default : panic("instruction VFNMSUB213PS takes 3 or 4 operands")
 52168      }
 52169      // VFNMSUB213PS xmm, xmm, xmm
 52170      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 52171          self.require(ISA_FMA3)
 52172          p.domain = DomainFMA
 52173          p.add(0, func(m *_Encoding, v []interface{}) {
 52174              m.emit(0xc4)
 52175              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52176              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 52177              m.emit(0xae)
 52178              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52179          })
 52180      }
 52181      // VFNMSUB213PS m128, xmm, xmm
 52182      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 52183          self.require(ISA_FMA3)
 52184          p.domain = DomainFMA
 52185          p.add(0, func(m *_Encoding, v []interface{}) {
 52186              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52187              m.emit(0xae)
 52188              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52189          })
 52190      }
 52191      // VFNMSUB213PS ymm, ymm, ymm
 52192      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 52193          self.require(ISA_FMA3)
 52194          p.domain = DomainFMA
 52195          p.add(0, func(m *_Encoding, v []interface{}) {
 52196              m.emit(0xc4)
 52197              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52198              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 52199              m.emit(0xae)
 52200              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52201          })
 52202      }
 52203      // VFNMSUB213PS m256, ymm, ymm
 52204      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 52205          self.require(ISA_FMA3)
 52206          p.domain = DomainFMA
 52207          p.add(0, func(m *_Encoding, v []interface{}) {
 52208              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52209              m.emit(0xae)
 52210              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52211          })
 52212      }
 52213      // VFNMSUB213PS m512/m32bcst, zmm, zmm{k}{z}
 52214      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 52215          self.require(ISA_AVX512F)
 52216          p.domain = DomainFMA
 52217          p.add(0, func(m *_Encoding, v []interface{}) {
 52218              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52219              m.emit(0xae)
 52220              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 52221          })
 52222      }
 52223      // VFNMSUB213PS {er}, zmm, zmm, zmm{k}{z}
 52224      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 52225          self.require(ISA_AVX512F)
 52226          p.domain = DomainFMA
 52227          p.add(0, func(m *_Encoding, v []interface{}) {
 52228              m.emit(0x62)
 52229              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 52230              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 52231              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 52232              m.emit(0xae)
 52233              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 52234          })
 52235      }
 52236      // VFNMSUB213PS zmm, zmm, zmm{k}{z}
 52237      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 52238          self.require(ISA_AVX512F)
 52239          p.domain = DomainFMA
 52240          p.add(0, func(m *_Encoding, v []interface{}) {
 52241              m.emit(0x62)
 52242              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52243              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 52244              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 52245              m.emit(0xae)
 52246              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52247          })
 52248      }
 52249      // VFNMSUB213PS m128/m32bcst, xmm, xmm{k}{z}
 52250      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52251          self.require(ISA_AVX512VL | ISA_AVX512F)
 52252          p.domain = DomainFMA
 52253          p.add(0, func(m *_Encoding, v []interface{}) {
 52254              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52255              m.emit(0xae)
 52256              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 52257          })
 52258      }
 52259      // VFNMSUB213PS xmm, xmm, xmm{k}{z}
 52260      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52261          self.require(ISA_AVX512VL | ISA_AVX512F)
 52262          p.domain = DomainFMA
 52263          p.add(0, func(m *_Encoding, v []interface{}) {
 52264              m.emit(0x62)
 52265              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52266              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 52267              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 52268              m.emit(0xae)
 52269              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52270          })
 52271      }
 52272      // VFNMSUB213PS m256/m32bcst, ymm, ymm{k}{z}
 52273      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 52274          self.require(ISA_AVX512VL | ISA_AVX512F)
 52275          p.domain = DomainFMA
 52276          p.add(0, func(m *_Encoding, v []interface{}) {
 52277              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52278              m.emit(0xae)
 52279              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 52280          })
 52281      }
 52282      // VFNMSUB213PS ymm, ymm, ymm{k}{z}
 52283      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 52284          self.require(ISA_AVX512VL | ISA_AVX512F)
 52285          p.domain = DomainFMA
 52286          p.add(0, func(m *_Encoding, v []interface{}) {
 52287              m.emit(0x62)
 52288              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52289              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 52290              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 52291              m.emit(0xae)
 52292              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52293          })
 52294      }
 52295      if p.len == 0 {
 52296          panic("invalid operands for VFNMSUB213PS")
 52297      }
 52298      return p
 52299  }
 52300  
 52301  // VFNMSUB213SD performs "Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values".
 52302  //
 52303  // Mnemonic        : VFNMSUB213SD
 52304  // Supported forms : (5 forms)
 52305  //
 52306  //    * VFNMSUB213SD xmm, xmm, xmm                [FMA3]
 52307  //    * VFNMSUB213SD m64, xmm, xmm                [FMA3]
 52308  //    * VFNMSUB213SD m64, xmm, xmm{k}{z}          [AVX512F]
 52309  //    * VFNMSUB213SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 52310  //    * VFNMSUB213SD xmm, xmm, xmm{k}{z}          [AVX512F]
 52311  //
 52312  func (self *Program) VFNMSUB213SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 52313      var p *Instruction
 52314      switch len(vv) {
 52315          case 0  : p = self.alloc("VFNMSUB213SD", 3, Operands { v0, v1, v2 })
 52316          case 1  : p = self.alloc("VFNMSUB213SD", 4, Operands { v0, v1, v2, vv[0] })
 52317          default : panic("instruction VFNMSUB213SD takes 3 or 4 operands")
 52318      }
 52319      // VFNMSUB213SD xmm, xmm, xmm
 52320      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 52321          self.require(ISA_FMA3)
 52322          p.domain = DomainFMA
 52323          p.add(0, func(m *_Encoding, v []interface{}) {
 52324              m.emit(0xc4)
 52325              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52326              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 52327              m.emit(0xaf)
 52328              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52329          })
 52330      }
 52331      // VFNMSUB213SD m64, xmm, xmm
 52332      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 52333          self.require(ISA_FMA3)
 52334          p.domain = DomainFMA
 52335          p.add(0, func(m *_Encoding, v []interface{}) {
 52336              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52337              m.emit(0xaf)
 52338              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52339          })
 52340      }
 52341      // VFNMSUB213SD m64, xmm, xmm{k}{z}
 52342      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52343          self.require(ISA_AVX512F)
 52344          p.domain = DomainFMA
 52345          p.add(0, func(m *_Encoding, v []interface{}) {
 52346              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 52347              m.emit(0xaf)
 52348              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 52349          })
 52350      }
 52351      // VFNMSUB213SD {er}, xmm, xmm, xmm{k}{z}
 52352      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 52353          self.require(ISA_AVX512F)
 52354          p.domain = DomainFMA
 52355          p.add(0, func(m *_Encoding, v []interface{}) {
 52356              m.emit(0x62)
 52357              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 52358              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 52359              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 52360              m.emit(0xaf)
 52361              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 52362          })
 52363      }
 52364      // VFNMSUB213SD xmm, xmm, xmm{k}{z}
 52365      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52366          self.require(ISA_AVX512F)
 52367          p.domain = DomainFMA
 52368          p.add(0, func(m *_Encoding, v []interface{}) {
 52369              m.emit(0x62)
 52370              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52371              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 52372              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 52373              m.emit(0xaf)
 52374              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52375          })
 52376      }
 52377      if p.len == 0 {
 52378          panic("invalid operands for VFNMSUB213SD")
 52379      }
 52380      return p
 52381  }
 52382  
 52383  // VFNMSUB213SS performs "Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values".
 52384  //
 52385  // Mnemonic        : VFNMSUB213SS
 52386  // Supported forms : (5 forms)
 52387  //
 52388  //    * VFNMSUB213SS xmm, xmm, xmm                [FMA3]
 52389  //    * VFNMSUB213SS m32, xmm, xmm                [FMA3]
 52390  //    * VFNMSUB213SS m32, xmm, xmm{k}{z}          [AVX512F]
 52391  //    * VFNMSUB213SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 52392  //    * VFNMSUB213SS xmm, xmm, xmm{k}{z}          [AVX512F]
 52393  //
 52394  func (self *Program) VFNMSUB213SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 52395      var p *Instruction
 52396      switch len(vv) {
 52397          case 0  : p = self.alloc("VFNMSUB213SS", 3, Operands { v0, v1, v2 })
 52398          case 1  : p = self.alloc("VFNMSUB213SS", 4, Operands { v0, v1, v2, vv[0] })
 52399          default : panic("instruction VFNMSUB213SS takes 3 or 4 operands")
 52400      }
 52401      // VFNMSUB213SS xmm, xmm, xmm
 52402      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 52403          self.require(ISA_FMA3)
 52404          p.domain = DomainFMA
 52405          p.add(0, func(m *_Encoding, v []interface{}) {
 52406              m.emit(0xc4)
 52407              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52408              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 52409              m.emit(0xaf)
 52410              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52411          })
 52412      }
 52413      // VFNMSUB213SS m32, xmm, xmm
 52414      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 52415          self.require(ISA_FMA3)
 52416          p.domain = DomainFMA
 52417          p.add(0, func(m *_Encoding, v []interface{}) {
 52418              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52419              m.emit(0xaf)
 52420              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52421          })
 52422      }
 52423      // VFNMSUB213SS m32, xmm, xmm{k}{z}
 52424      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52425          self.require(ISA_AVX512F)
 52426          p.domain = DomainFMA
 52427          p.add(0, func(m *_Encoding, v []interface{}) {
 52428              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 52429              m.emit(0xaf)
 52430              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 52431          })
 52432      }
 52433      // VFNMSUB213SS {er}, xmm, xmm, xmm{k}{z}
 52434      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 52435          self.require(ISA_AVX512F)
 52436          p.domain = DomainFMA
 52437          p.add(0, func(m *_Encoding, v []interface{}) {
 52438              m.emit(0x62)
 52439              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 52440              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 52441              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 52442              m.emit(0xaf)
 52443              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 52444          })
 52445      }
 52446      // VFNMSUB213SS xmm, xmm, xmm{k}{z}
 52447      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52448          self.require(ISA_AVX512F)
 52449          p.domain = DomainFMA
 52450          p.add(0, func(m *_Encoding, v []interface{}) {
 52451              m.emit(0x62)
 52452              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52453              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 52454              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 52455              m.emit(0xaf)
 52456              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52457          })
 52458      }
 52459      if p.len == 0 {
 52460          panic("invalid operands for VFNMSUB213SS")
 52461      }
 52462      return p
 52463  }
 52464  
 52465  // VFNMSUB231PD performs "Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values".
 52466  //
 52467  // Mnemonic        : VFNMSUB231PD
 52468  // Supported forms : (11 forms)
 52469  //
 52470  //    * VFNMSUB231PD xmm, xmm, xmm                   [FMA3]
 52471  //    * VFNMSUB231PD m128, xmm, xmm                  [FMA3]
 52472  //    * VFNMSUB231PD ymm, ymm, ymm                   [FMA3]
 52473  //    * VFNMSUB231PD m256, ymm, ymm                  [FMA3]
 52474  //    * VFNMSUB231PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 52475  //    * VFNMSUB231PD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 52476  //    * VFNMSUB231PD zmm, zmm, zmm{k}{z}             [AVX512F]
 52477  //    * VFNMSUB231PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 52478  //    * VFNMSUB231PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 52479  //    * VFNMSUB231PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 52480  //    * VFNMSUB231PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 52481  //
 52482  func (self *Program) VFNMSUB231PD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 52483      var p *Instruction
 52484      switch len(vv) {
 52485          case 0  : p = self.alloc("VFNMSUB231PD", 3, Operands { v0, v1, v2 })
 52486          case 1  : p = self.alloc("VFNMSUB231PD", 4, Operands { v0, v1, v2, vv[0] })
 52487          default : panic("instruction VFNMSUB231PD takes 3 or 4 operands")
 52488      }
 52489      // VFNMSUB231PD xmm, xmm, xmm
 52490      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 52491          self.require(ISA_FMA3)
 52492          p.domain = DomainFMA
 52493          p.add(0, func(m *_Encoding, v []interface{}) {
 52494              m.emit(0xc4)
 52495              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52496              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 52497              m.emit(0xbe)
 52498              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52499          })
 52500      }
 52501      // VFNMSUB231PD m128, xmm, xmm
 52502      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 52503          self.require(ISA_FMA3)
 52504          p.domain = DomainFMA
 52505          p.add(0, func(m *_Encoding, v []interface{}) {
 52506              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52507              m.emit(0xbe)
 52508              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52509          })
 52510      }
 52511      // VFNMSUB231PD ymm, ymm, ymm
 52512      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 52513          self.require(ISA_FMA3)
 52514          p.domain = DomainFMA
 52515          p.add(0, func(m *_Encoding, v []interface{}) {
 52516              m.emit(0xc4)
 52517              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52518              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 52519              m.emit(0xbe)
 52520              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52521          })
 52522      }
 52523      // VFNMSUB231PD m256, ymm, ymm
 52524      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 52525          self.require(ISA_FMA3)
 52526          p.domain = DomainFMA
 52527          p.add(0, func(m *_Encoding, v []interface{}) {
 52528              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52529              m.emit(0xbe)
 52530              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52531          })
 52532      }
 52533      // VFNMSUB231PD m512/m64bcst, zmm, zmm{k}{z}
 52534      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 52535          self.require(ISA_AVX512F)
 52536          p.domain = DomainFMA
 52537          p.add(0, func(m *_Encoding, v []interface{}) {
 52538              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52539              m.emit(0xbe)
 52540              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 52541          })
 52542      }
 52543      // VFNMSUB231PD {er}, zmm, zmm, zmm{k}{z}
 52544      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 52545          self.require(ISA_AVX512F)
 52546          p.domain = DomainFMA
 52547          p.add(0, func(m *_Encoding, v []interface{}) {
 52548              m.emit(0x62)
 52549              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 52550              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 52551              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 52552              m.emit(0xbe)
 52553              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 52554          })
 52555      }
 52556      // VFNMSUB231PD zmm, zmm, zmm{k}{z}
 52557      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 52558          self.require(ISA_AVX512F)
 52559          p.domain = DomainFMA
 52560          p.add(0, func(m *_Encoding, v []interface{}) {
 52561              m.emit(0x62)
 52562              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52563              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 52564              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 52565              m.emit(0xbe)
 52566              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52567          })
 52568      }
 52569      // VFNMSUB231PD m128/m64bcst, xmm, xmm{k}{z}
 52570      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52571          self.require(ISA_AVX512VL | ISA_AVX512F)
 52572          p.domain = DomainFMA
 52573          p.add(0, func(m *_Encoding, v []interface{}) {
 52574              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52575              m.emit(0xbe)
 52576              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 52577          })
 52578      }
 52579      // VFNMSUB231PD xmm, xmm, xmm{k}{z}
 52580      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52581          self.require(ISA_AVX512VL | ISA_AVX512F)
 52582          p.domain = DomainFMA
 52583          p.add(0, func(m *_Encoding, v []interface{}) {
 52584              m.emit(0x62)
 52585              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52586              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 52587              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 52588              m.emit(0xbe)
 52589              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52590          })
 52591      }
 52592      // VFNMSUB231PD m256/m64bcst, ymm, ymm{k}{z}
 52593      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 52594          self.require(ISA_AVX512VL | ISA_AVX512F)
 52595          p.domain = DomainFMA
 52596          p.add(0, func(m *_Encoding, v []interface{}) {
 52597              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52598              m.emit(0xbe)
 52599              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 52600          })
 52601      }
 52602      // VFNMSUB231PD ymm, ymm, ymm{k}{z}
 52603      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 52604          self.require(ISA_AVX512VL | ISA_AVX512F)
 52605          p.domain = DomainFMA
 52606          p.add(0, func(m *_Encoding, v []interface{}) {
 52607              m.emit(0x62)
 52608              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52609              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 52610              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 52611              m.emit(0xbe)
 52612              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52613          })
 52614      }
 52615      if p.len == 0 {
 52616          panic("invalid operands for VFNMSUB231PD")
 52617      }
 52618      return p
 52619  }
 52620  
 52621  // VFNMSUB231PS performs "Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values".
 52622  //
 52623  // Mnemonic        : VFNMSUB231PS
 52624  // Supported forms : (11 forms)
 52625  //
 52626  //    * VFNMSUB231PS xmm, xmm, xmm                   [FMA3]
 52627  //    * VFNMSUB231PS m128, xmm, xmm                  [FMA3]
 52628  //    * VFNMSUB231PS ymm, ymm, ymm                   [FMA3]
 52629  //    * VFNMSUB231PS m256, ymm, ymm                  [FMA3]
 52630  //    * VFNMSUB231PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 52631  //    * VFNMSUB231PS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 52632  //    * VFNMSUB231PS zmm, zmm, zmm{k}{z}             [AVX512F]
 52633  //    * VFNMSUB231PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 52634  //    * VFNMSUB231PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 52635  //    * VFNMSUB231PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 52636  //    * VFNMSUB231PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 52637  //
 52638  func (self *Program) VFNMSUB231PS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 52639      var p *Instruction
 52640      switch len(vv) {
 52641          case 0  : p = self.alloc("VFNMSUB231PS", 3, Operands { v0, v1, v2 })
 52642          case 1  : p = self.alloc("VFNMSUB231PS", 4, Operands { v0, v1, v2, vv[0] })
 52643          default : panic("instruction VFNMSUB231PS takes 3 or 4 operands")
 52644      }
 52645      // VFNMSUB231PS xmm, xmm, xmm
 52646      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 52647          self.require(ISA_FMA3)
 52648          p.domain = DomainFMA
 52649          p.add(0, func(m *_Encoding, v []interface{}) {
 52650              m.emit(0xc4)
 52651              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52652              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 52653              m.emit(0xbe)
 52654              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52655          })
 52656      }
 52657      // VFNMSUB231PS m128, xmm, xmm
 52658      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 52659          self.require(ISA_FMA3)
 52660          p.domain = DomainFMA
 52661          p.add(0, func(m *_Encoding, v []interface{}) {
 52662              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52663              m.emit(0xbe)
 52664              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52665          })
 52666      }
 52667      // VFNMSUB231PS ymm, ymm, ymm
 52668      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 52669          self.require(ISA_FMA3)
 52670          p.domain = DomainFMA
 52671          p.add(0, func(m *_Encoding, v []interface{}) {
 52672              m.emit(0xc4)
 52673              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52674              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 52675              m.emit(0xbe)
 52676              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52677          })
 52678      }
 52679      // VFNMSUB231PS m256, ymm, ymm
 52680      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 52681          self.require(ISA_FMA3)
 52682          p.domain = DomainFMA
 52683          p.add(0, func(m *_Encoding, v []interface{}) {
 52684              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52685              m.emit(0xbe)
 52686              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52687          })
 52688      }
 52689      // VFNMSUB231PS m512/m32bcst, zmm, zmm{k}{z}
 52690      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 52691          self.require(ISA_AVX512F)
 52692          p.domain = DomainFMA
 52693          p.add(0, func(m *_Encoding, v []interface{}) {
 52694              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52695              m.emit(0xbe)
 52696              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 52697          })
 52698      }
 52699      // VFNMSUB231PS {er}, zmm, zmm, zmm{k}{z}
 52700      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 52701          self.require(ISA_AVX512F)
 52702          p.domain = DomainFMA
 52703          p.add(0, func(m *_Encoding, v []interface{}) {
 52704              m.emit(0x62)
 52705              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 52706              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 52707              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 52708              m.emit(0xbe)
 52709              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 52710          })
 52711      }
 52712      // VFNMSUB231PS zmm, zmm, zmm{k}{z}
 52713      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 52714          self.require(ISA_AVX512F)
 52715          p.domain = DomainFMA
 52716          p.add(0, func(m *_Encoding, v []interface{}) {
 52717              m.emit(0x62)
 52718              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52719              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 52720              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 52721              m.emit(0xbe)
 52722              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52723          })
 52724      }
 52725      // VFNMSUB231PS m128/m32bcst, xmm, xmm{k}{z}
 52726      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52727          self.require(ISA_AVX512VL | ISA_AVX512F)
 52728          p.domain = DomainFMA
 52729          p.add(0, func(m *_Encoding, v []interface{}) {
 52730              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52731              m.emit(0xbe)
 52732              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 52733          })
 52734      }
 52735      // VFNMSUB231PS xmm, xmm, xmm{k}{z}
 52736      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52737          self.require(ISA_AVX512VL | ISA_AVX512F)
 52738          p.domain = DomainFMA
 52739          p.add(0, func(m *_Encoding, v []interface{}) {
 52740              m.emit(0x62)
 52741              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52742              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 52743              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 52744              m.emit(0xbe)
 52745              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52746          })
 52747      }
 52748      // VFNMSUB231PS m256/m32bcst, ymm, ymm{k}{z}
 52749      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 52750          self.require(ISA_AVX512VL | ISA_AVX512F)
 52751          p.domain = DomainFMA
 52752          p.add(0, func(m *_Encoding, v []interface{}) {
 52753              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 52754              m.emit(0xbe)
 52755              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 52756          })
 52757      }
 52758      // VFNMSUB231PS ymm, ymm, ymm{k}{z}
 52759      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 52760          self.require(ISA_AVX512VL | ISA_AVX512F)
 52761          p.domain = DomainFMA
 52762          p.add(0, func(m *_Encoding, v []interface{}) {
 52763              m.emit(0x62)
 52764              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52765              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 52766              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 52767              m.emit(0xbe)
 52768              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52769          })
 52770      }
 52771      if p.len == 0 {
 52772          panic("invalid operands for VFNMSUB231PS")
 52773      }
 52774      return p
 52775  }
 52776  
 52777  // VFNMSUB231SD performs "Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values".
 52778  //
 52779  // Mnemonic        : VFNMSUB231SD
 52780  // Supported forms : (5 forms)
 52781  //
 52782  //    * VFNMSUB231SD xmm, xmm, xmm                [FMA3]
 52783  //    * VFNMSUB231SD m64, xmm, xmm                [FMA3]
 52784  //    * VFNMSUB231SD m64, xmm, xmm{k}{z}          [AVX512F]
 52785  //    * VFNMSUB231SD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 52786  //    * VFNMSUB231SD xmm, xmm, xmm{k}{z}          [AVX512F]
 52787  //
 52788  func (self *Program) VFNMSUB231SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 52789      var p *Instruction
 52790      switch len(vv) {
 52791          case 0  : p = self.alloc("VFNMSUB231SD", 3, Operands { v0, v1, v2 })
 52792          case 1  : p = self.alloc("VFNMSUB231SD", 4, Operands { v0, v1, v2, vv[0] })
 52793          default : panic("instruction VFNMSUB231SD takes 3 or 4 operands")
 52794      }
 52795      // VFNMSUB231SD xmm, xmm, xmm
 52796      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 52797          self.require(ISA_FMA3)
 52798          p.domain = DomainFMA
 52799          p.add(0, func(m *_Encoding, v []interface{}) {
 52800              m.emit(0xc4)
 52801              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52802              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 52803              m.emit(0xbf)
 52804              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52805          })
 52806      }
 52807      // VFNMSUB231SD m64, xmm, xmm
 52808      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 52809          self.require(ISA_FMA3)
 52810          p.domain = DomainFMA
 52811          p.add(0, func(m *_Encoding, v []interface{}) {
 52812              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52813              m.emit(0xbf)
 52814              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52815          })
 52816      }
 52817      // VFNMSUB231SD m64, xmm, xmm{k}{z}
 52818      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52819          self.require(ISA_AVX512F)
 52820          p.domain = DomainFMA
 52821          p.add(0, func(m *_Encoding, v []interface{}) {
 52822              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 52823              m.emit(0xbf)
 52824              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 52825          })
 52826      }
 52827      // VFNMSUB231SD {er}, xmm, xmm, xmm{k}{z}
 52828      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 52829          self.require(ISA_AVX512F)
 52830          p.domain = DomainFMA
 52831          p.add(0, func(m *_Encoding, v []interface{}) {
 52832              m.emit(0x62)
 52833              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 52834              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 52835              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 52836              m.emit(0xbf)
 52837              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 52838          })
 52839      }
 52840      // VFNMSUB231SD xmm, xmm, xmm{k}{z}
 52841      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52842          self.require(ISA_AVX512F)
 52843          p.domain = DomainFMA
 52844          p.add(0, func(m *_Encoding, v []interface{}) {
 52845              m.emit(0x62)
 52846              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52847              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 52848              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 52849              m.emit(0xbf)
 52850              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52851          })
 52852      }
 52853      if p.len == 0 {
 52854          panic("invalid operands for VFNMSUB231SD")
 52855      }
 52856      return p
 52857  }
 52858  
 52859  // VFNMSUB231SS performs "Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values".
 52860  //
 52861  // Mnemonic        : VFNMSUB231SS
 52862  // Supported forms : (5 forms)
 52863  //
 52864  //    * VFNMSUB231SS xmm, xmm, xmm                [FMA3]
 52865  //    * VFNMSUB231SS m32, xmm, xmm                [FMA3]
 52866  //    * VFNMSUB231SS m32, xmm, xmm{k}{z}          [AVX512F]
 52867  //    * VFNMSUB231SS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 52868  //    * VFNMSUB231SS xmm, xmm, xmm{k}{z}          [AVX512F]
 52869  //
 52870  func (self *Program) VFNMSUB231SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 52871      var p *Instruction
 52872      switch len(vv) {
 52873          case 0  : p = self.alloc("VFNMSUB231SS", 3, Operands { v0, v1, v2 })
 52874          case 1  : p = self.alloc("VFNMSUB231SS", 4, Operands { v0, v1, v2, vv[0] })
 52875          default : panic("instruction VFNMSUB231SS takes 3 or 4 operands")
 52876      }
 52877      // VFNMSUB231SS xmm, xmm, xmm
 52878      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 52879          self.require(ISA_FMA3)
 52880          p.domain = DomainFMA
 52881          p.add(0, func(m *_Encoding, v []interface{}) {
 52882              m.emit(0xc4)
 52883              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 52884              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 52885              m.emit(0xbf)
 52886              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52887          })
 52888      }
 52889      // VFNMSUB231SS m32, xmm, xmm
 52890      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 52891          self.require(ISA_FMA3)
 52892          p.domain = DomainFMA
 52893          p.add(0, func(m *_Encoding, v []interface{}) {
 52894              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 52895              m.emit(0xbf)
 52896              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 52897          })
 52898      }
 52899      // VFNMSUB231SS m32, xmm, xmm{k}{z}
 52900      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52901          self.require(ISA_AVX512F)
 52902          p.domain = DomainFMA
 52903          p.add(0, func(m *_Encoding, v []interface{}) {
 52904              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 52905              m.emit(0xbf)
 52906              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 52907          })
 52908      }
 52909      // VFNMSUB231SS {er}, xmm, xmm, xmm{k}{z}
 52910      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 52911          self.require(ISA_AVX512F)
 52912          p.domain = DomainFMA
 52913          p.add(0, func(m *_Encoding, v []interface{}) {
 52914              m.emit(0x62)
 52915              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 52916              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 52917              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 52918              m.emit(0xbf)
 52919              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 52920          })
 52921      }
 52922      // VFNMSUB231SS xmm, xmm, xmm{k}{z}
 52923      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 52924          self.require(ISA_AVX512F)
 52925          p.domain = DomainFMA
 52926          p.add(0, func(m *_Encoding, v []interface{}) {
 52927              m.emit(0x62)
 52928              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 52929              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 52930              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 52931              m.emit(0xbf)
 52932              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 52933          })
 52934      }
 52935      if p.len == 0 {
 52936          panic("invalid operands for VFNMSUB231SS")
 52937      }
 52938      return p
 52939  }
 52940  
 52941  // VFNMSUBPD performs "Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values".
 52942  //
 52943  // Mnemonic        : VFNMSUBPD
 52944  // Supported forms : (6 forms)
 52945  //
 52946  //    * VFNMSUBPD xmm, xmm, xmm, xmm     [FMA4]
 52947  //    * VFNMSUBPD m128, xmm, xmm, xmm    [FMA4]
 52948  //    * VFNMSUBPD xmm, m128, xmm, xmm    [FMA4]
 52949  //    * VFNMSUBPD ymm, ymm, ymm, ymm     [FMA4]
 52950  //    * VFNMSUBPD m256, ymm, ymm, ymm    [FMA4]
 52951  //    * VFNMSUBPD ymm, m256, ymm, ymm    [FMA4]
 52952  //
 52953  func (self *Program) VFNMSUBPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 52954      p := self.alloc("VFNMSUBPD", 4, Operands { v0, v1, v2, v3 })
 52955      // VFNMSUBPD xmm, xmm, xmm, xmm
 52956      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 52957          self.require(ISA_FMA4)
 52958          p.domain = DomainFMA
 52959          p.add(0, func(m *_Encoding, v []interface{}) {
 52960              m.emit(0xc4)
 52961              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 52962              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 52963              m.emit(0x7d)
 52964              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 52965              m.emit(hlcode(v[1]) << 4)
 52966          })
 52967          p.add(0, func(m *_Encoding, v []interface{}) {
 52968              m.emit(0xc4)
 52969              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 52970              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 52971              m.emit(0x7d)
 52972              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 52973              m.emit(hlcode(v[0]) << 4)
 52974          })
 52975      }
 52976      // VFNMSUBPD m128, xmm, xmm, xmm
 52977      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 52978          self.require(ISA_FMA4)
 52979          p.domain = DomainFMA
 52980          p.add(0, func(m *_Encoding, v []interface{}) {
 52981              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 52982              m.emit(0x7d)
 52983              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 52984              m.emit(hlcode(v[1]) << 4)
 52985          })
 52986      }
 52987      // VFNMSUBPD xmm, m128, xmm, xmm
 52988      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 52989          self.require(ISA_FMA4)
 52990          p.domain = DomainFMA
 52991          p.add(0, func(m *_Encoding, v []interface{}) {
 52992              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 52993              m.emit(0x7d)
 52994              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 52995              m.emit(hlcode(v[0]) << 4)
 52996          })
 52997      }
 52998      // VFNMSUBPD ymm, ymm, ymm, ymm
 52999      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 53000          self.require(ISA_FMA4)
 53001          p.domain = DomainFMA
 53002          p.add(0, func(m *_Encoding, v []interface{}) {
 53003              m.emit(0xc4)
 53004              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 53005              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 53006              m.emit(0x7d)
 53007              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 53008              m.emit(hlcode(v[1]) << 4)
 53009          })
 53010          p.add(0, func(m *_Encoding, v []interface{}) {
 53011              m.emit(0xc4)
 53012              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 53013              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 53014              m.emit(0x7d)
 53015              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 53016              m.emit(hlcode(v[0]) << 4)
 53017          })
 53018      }
 53019      // VFNMSUBPD m256, ymm, ymm, ymm
 53020      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 53021          self.require(ISA_FMA4)
 53022          p.domain = DomainFMA
 53023          p.add(0, func(m *_Encoding, v []interface{}) {
 53024              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 53025              m.emit(0x7d)
 53026              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 53027              m.emit(hlcode(v[1]) << 4)
 53028          })
 53029      }
 53030      // VFNMSUBPD ymm, m256, ymm, ymm
 53031      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 53032          self.require(ISA_FMA4)
 53033          p.domain = DomainFMA
 53034          p.add(0, func(m *_Encoding, v []interface{}) {
 53035              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 53036              m.emit(0x7d)
 53037              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 53038              m.emit(hlcode(v[0]) << 4)
 53039          })
 53040      }
 53041      if p.len == 0 {
 53042          panic("invalid operands for VFNMSUBPD")
 53043      }
 53044      return p
 53045  }
 53046  
 53047  // VFNMSUBPS performs "Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values".
 53048  //
 53049  // Mnemonic        : VFNMSUBPS
 53050  // Supported forms : (6 forms)
 53051  //
 53052  //    * VFNMSUBPS xmm, xmm, xmm, xmm     [FMA4]
 53053  //    * VFNMSUBPS m128, xmm, xmm, xmm    [FMA4]
 53054  //    * VFNMSUBPS xmm, m128, xmm, xmm    [FMA4]
 53055  //    * VFNMSUBPS ymm, ymm, ymm, ymm     [FMA4]
 53056  //    * VFNMSUBPS m256, ymm, ymm, ymm    [FMA4]
 53057  //    * VFNMSUBPS ymm, m256, ymm, ymm    [FMA4]
 53058  //
 53059  func (self *Program) VFNMSUBPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 53060      p := self.alloc("VFNMSUBPS", 4, Operands { v0, v1, v2, v3 })
 53061      // VFNMSUBPS xmm, xmm, xmm, xmm
 53062      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 53063          self.require(ISA_FMA4)
 53064          p.domain = DomainFMA
 53065          p.add(0, func(m *_Encoding, v []interface{}) {
 53066              m.emit(0xc4)
 53067              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 53068              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 53069              m.emit(0x7c)
 53070              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 53071              m.emit(hlcode(v[1]) << 4)
 53072          })
 53073          p.add(0, func(m *_Encoding, v []interface{}) {
 53074              m.emit(0xc4)
 53075              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 53076              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 53077              m.emit(0x7c)
 53078              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 53079              m.emit(hlcode(v[0]) << 4)
 53080          })
 53081      }
 53082      // VFNMSUBPS m128, xmm, xmm, xmm
 53083      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 53084          self.require(ISA_FMA4)
 53085          p.domain = DomainFMA
 53086          p.add(0, func(m *_Encoding, v []interface{}) {
 53087              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 53088              m.emit(0x7c)
 53089              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 53090              m.emit(hlcode(v[1]) << 4)
 53091          })
 53092      }
 53093      // VFNMSUBPS xmm, m128, xmm, xmm
 53094      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 53095          self.require(ISA_FMA4)
 53096          p.domain = DomainFMA
 53097          p.add(0, func(m *_Encoding, v []interface{}) {
 53098              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 53099              m.emit(0x7c)
 53100              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 53101              m.emit(hlcode(v[0]) << 4)
 53102          })
 53103      }
 53104      // VFNMSUBPS ymm, ymm, ymm, ymm
 53105      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 53106          self.require(ISA_FMA4)
 53107          p.domain = DomainFMA
 53108          p.add(0, func(m *_Encoding, v []interface{}) {
 53109              m.emit(0xc4)
 53110              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 53111              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 53112              m.emit(0x7c)
 53113              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 53114              m.emit(hlcode(v[1]) << 4)
 53115          })
 53116          p.add(0, func(m *_Encoding, v []interface{}) {
 53117              m.emit(0xc4)
 53118              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 53119              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 53120              m.emit(0x7c)
 53121              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 53122              m.emit(hlcode(v[0]) << 4)
 53123          })
 53124      }
 53125      // VFNMSUBPS m256, ymm, ymm, ymm
 53126      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 53127          self.require(ISA_FMA4)
 53128          p.domain = DomainFMA
 53129          p.add(0, func(m *_Encoding, v []interface{}) {
 53130              m.vex3(0xc4, 0b11, 0x85, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 53131              m.emit(0x7c)
 53132              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 53133              m.emit(hlcode(v[1]) << 4)
 53134          })
 53135      }
 53136      // VFNMSUBPS ymm, m256, ymm, ymm
 53137      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 53138          self.require(ISA_FMA4)
 53139          p.domain = DomainFMA
 53140          p.add(0, func(m *_Encoding, v []interface{}) {
 53141              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 53142              m.emit(0x7c)
 53143              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 53144              m.emit(hlcode(v[0]) << 4)
 53145          })
 53146      }
 53147      if p.len == 0 {
 53148          panic("invalid operands for VFNMSUBPS")
 53149      }
 53150      return p
 53151  }
 53152  
 53153  // VFNMSUBSD performs "Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values".
 53154  //
 53155  // Mnemonic        : VFNMSUBSD
 53156  // Supported forms : (3 forms)
 53157  //
 53158  //    * VFNMSUBSD xmm, xmm, xmm, xmm    [FMA4]
 53159  //    * VFNMSUBSD m64, xmm, xmm, xmm    [FMA4]
 53160  //    * VFNMSUBSD xmm, m64, xmm, xmm    [FMA4]
 53161  //
 53162  func (self *Program) VFNMSUBSD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 53163      p := self.alloc("VFNMSUBSD", 4, Operands { v0, v1, v2, v3 })
 53164      // VFNMSUBSD xmm, xmm, xmm, xmm
 53165      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 53166          self.require(ISA_FMA4)
 53167          p.domain = DomainFMA
 53168          p.add(0, func(m *_Encoding, v []interface{}) {
 53169              m.emit(0xc4)
 53170              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 53171              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 53172              m.emit(0x7f)
 53173              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 53174              m.emit(hlcode(v[1]) << 4)
 53175          })
 53176          p.add(0, func(m *_Encoding, v []interface{}) {
 53177              m.emit(0xc4)
 53178              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 53179              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 53180              m.emit(0x7f)
 53181              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 53182              m.emit(hlcode(v[0]) << 4)
 53183          })
 53184      }
 53185      // VFNMSUBSD m64, xmm, xmm, xmm
 53186      if isM64(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 53187          self.require(ISA_FMA4)
 53188          p.domain = DomainFMA
 53189          p.add(0, func(m *_Encoding, v []interface{}) {
 53190              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 53191              m.emit(0x7f)
 53192              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 53193              m.emit(hlcode(v[1]) << 4)
 53194          })
 53195      }
 53196      // VFNMSUBSD xmm, m64, xmm, xmm
 53197      if isXMM(v0) && isM64(v1) && isXMM(v2) && isXMM(v3) {
 53198          self.require(ISA_FMA4)
 53199          p.domain = DomainFMA
 53200          p.add(0, func(m *_Encoding, v []interface{}) {
 53201              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 53202              m.emit(0x7f)
 53203              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 53204              m.emit(hlcode(v[0]) << 4)
 53205          })
 53206      }
 53207      if p.len == 0 {
 53208          panic("invalid operands for VFNMSUBSD")
 53209      }
 53210      return p
 53211  }
 53212  
 53213  // VFNMSUBSS performs "Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values".
 53214  //
 53215  // Mnemonic        : VFNMSUBSS
 53216  // Supported forms : (3 forms)
 53217  //
 53218  //    * VFNMSUBSS xmm, xmm, xmm, xmm    [FMA4]
 53219  //    * VFNMSUBSS m32, xmm, xmm, xmm    [FMA4]
 53220  //    * VFNMSUBSS xmm, m32, xmm, xmm    [FMA4]
 53221  //
 53222  func (self *Program) VFNMSUBSS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 53223      p := self.alloc("VFNMSUBSS", 4, Operands { v0, v1, v2, v3 })
 53224      // VFNMSUBSS xmm, xmm, xmm, xmm
 53225      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 53226          self.require(ISA_FMA4)
 53227          p.domain = DomainFMA
 53228          p.add(0, func(m *_Encoding, v []interface{}) {
 53229              m.emit(0xc4)
 53230              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 53231              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 53232              m.emit(0x7e)
 53233              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 53234              m.emit(hlcode(v[1]) << 4)
 53235          })
 53236          p.add(0, func(m *_Encoding, v []interface{}) {
 53237              m.emit(0xc4)
 53238              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 53239              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 53240              m.emit(0x7e)
 53241              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 53242              m.emit(hlcode(v[0]) << 4)
 53243          })
 53244      }
 53245      // VFNMSUBSS m32, xmm, xmm, xmm
 53246      if isM32(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 53247          self.require(ISA_FMA4)
 53248          p.domain = DomainFMA
 53249          p.add(0, func(m *_Encoding, v []interface{}) {
 53250              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 53251              m.emit(0x7e)
 53252              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 53253              m.emit(hlcode(v[1]) << 4)
 53254          })
 53255      }
 53256      // VFNMSUBSS xmm, m32, xmm, xmm
 53257      if isXMM(v0) && isM32(v1) && isXMM(v2) && isXMM(v3) {
 53258          self.require(ISA_FMA4)
 53259          p.domain = DomainFMA
 53260          p.add(0, func(m *_Encoding, v []interface{}) {
 53261              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 53262              m.emit(0x7e)
 53263              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 53264              m.emit(hlcode(v[0]) << 4)
 53265          })
 53266      }
 53267      if p.len == 0 {
 53268          panic("invalid operands for VFNMSUBSS")
 53269      }
 53270      return p
 53271  }
 53272  
 53273  // VFPCLASSPD performs "Test Class of Packed Double-Precision Floating-Point Values".
 53274  //
 53275  // Mnemonic        : VFPCLASSPD
 53276  // Supported forms : (6 forms)
 53277  //
 53278  //    * VFPCLASSPD imm8, m512/m64bcst, k{k}    [AVX512DQ]
 53279  //    * VFPCLASSPD imm8, zmm, k{k}             [AVX512DQ]
 53280  //    * VFPCLASSPD imm8, m128/m64bcst, k{k}    [AVX512DQ,AVX512VL]
 53281  //    * VFPCLASSPD imm8, m256/m64bcst, k{k}    [AVX512DQ,AVX512VL]
 53282  //    * VFPCLASSPD imm8, xmm, k{k}             [AVX512DQ,AVX512VL]
 53283  //    * VFPCLASSPD imm8, ymm, k{k}             [AVX512DQ,AVX512VL]
 53284  //
 53285  func (self *Program) VFPCLASSPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 53286      p := self.alloc("VFPCLASSPD", 3, Operands { v0, v1, v2 })
 53287      // VFPCLASSPD imm8, m512/m64bcst, k{k}
 53288      if isImm8(v0) && isM512M64bcst(v1) && isKk(v2) {
 53289          self.require(ISA_AVX512DQ)
 53290          p.domain = DomainAVX
 53291          p.add(0, func(m *_Encoding, v []interface{}) {
 53292              m.evex(0b11, 0x85, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), 0, bcode(v[1]))
 53293              m.emit(0x66)
 53294              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 53295              m.imm1(toImmAny(v[0]))
 53296          })
 53297      }
 53298      // VFPCLASSPD imm8, zmm, k{k}
 53299      if isImm8(v0) && isZMM(v1) && isKk(v2) {
 53300          self.require(ISA_AVX512DQ)
 53301          p.domain = DomainAVX
 53302          p.add(0, func(m *_Encoding, v []interface{}) {
 53303              m.emit(0x62)
 53304              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 53305              m.emit(0xfd)
 53306              m.emit(kcode(v[2]) | 0x48)
 53307              m.emit(0x66)
 53308              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 53309              m.imm1(toImmAny(v[0]))
 53310          })
 53311      }
 53312      // VFPCLASSPD imm8, m128/m64bcst, k{k}
 53313      if isImm8(v0) && isM128M64bcst(v1) && isKk(v2) {
 53314          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 53315          p.domain = DomainAVX
 53316          p.add(0, func(m *_Encoding, v []interface{}) {
 53317              m.evex(0b11, 0x85, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), 0, bcode(v[1]))
 53318              m.emit(0x66)
 53319              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 53320              m.imm1(toImmAny(v[0]))
 53321          })
 53322      }
 53323      // VFPCLASSPD imm8, m256/m64bcst, k{k}
 53324      if isImm8(v0) && isM256M64bcst(v1) && isKk(v2) {
 53325          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 53326          p.domain = DomainAVX
 53327          p.add(0, func(m *_Encoding, v []interface{}) {
 53328              m.evex(0b11, 0x85, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), 0, bcode(v[1]))
 53329              m.emit(0x66)
 53330              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 53331              m.imm1(toImmAny(v[0]))
 53332          })
 53333      }
 53334      // VFPCLASSPD imm8, xmm, k{k}
 53335      if isImm8(v0) && isEVEXXMM(v1) && isKk(v2) {
 53336          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 53337          p.domain = DomainAVX
 53338          p.add(0, func(m *_Encoding, v []interface{}) {
 53339              m.emit(0x62)
 53340              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 53341              m.emit(0xfd)
 53342              m.emit(kcode(v[2]) | 0x08)
 53343              m.emit(0x66)
 53344              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 53345              m.imm1(toImmAny(v[0]))
 53346          })
 53347      }
 53348      // VFPCLASSPD imm8, ymm, k{k}
 53349      if isImm8(v0) && isEVEXYMM(v1) && isKk(v2) {
 53350          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 53351          p.domain = DomainAVX
 53352          p.add(0, func(m *_Encoding, v []interface{}) {
 53353              m.emit(0x62)
 53354              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 53355              m.emit(0xfd)
 53356              m.emit(kcode(v[2]) | 0x28)
 53357              m.emit(0x66)
 53358              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 53359              m.imm1(toImmAny(v[0]))
 53360          })
 53361      }
 53362      if p.len == 0 {
 53363          panic("invalid operands for VFPCLASSPD")
 53364      }
 53365      return p
 53366  }
 53367  
 53368  // VFPCLASSPS performs "Test Class of Packed Single-Precision Floating-Point Values".
 53369  //
 53370  // Mnemonic        : VFPCLASSPS
 53371  // Supported forms : (6 forms)
 53372  //
 53373  //    * VFPCLASSPS imm8, m512/m32bcst, k{k}    [AVX512DQ]
 53374  //    * VFPCLASSPS imm8, zmm, k{k}             [AVX512DQ]
 53375  //    * VFPCLASSPS imm8, m128/m32bcst, k{k}    [AVX512DQ,AVX512VL]
 53376  //    * VFPCLASSPS imm8, m256/m32bcst, k{k}    [AVX512DQ,AVX512VL]
 53377  //    * VFPCLASSPS imm8, xmm, k{k}             [AVX512DQ,AVX512VL]
 53378  //    * VFPCLASSPS imm8, ymm, k{k}             [AVX512DQ,AVX512VL]
 53379  //
 53380  func (self *Program) VFPCLASSPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 53381      p := self.alloc("VFPCLASSPS", 3, Operands { v0, v1, v2 })
 53382      // VFPCLASSPS imm8, m512/m32bcst, k{k}
 53383      if isImm8(v0) && isM512M32bcst(v1) && isKk(v2) {
 53384          self.require(ISA_AVX512DQ)
 53385          p.domain = DomainAVX
 53386          p.add(0, func(m *_Encoding, v []interface{}) {
 53387              m.evex(0b11, 0x05, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), 0, bcode(v[1]))
 53388              m.emit(0x66)
 53389              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 53390              m.imm1(toImmAny(v[0]))
 53391          })
 53392      }
 53393      // VFPCLASSPS imm8, zmm, k{k}
 53394      if isImm8(v0) && isZMM(v1) && isKk(v2) {
 53395          self.require(ISA_AVX512DQ)
 53396          p.domain = DomainAVX
 53397          p.add(0, func(m *_Encoding, v []interface{}) {
 53398              m.emit(0x62)
 53399              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 53400              m.emit(0x7d)
 53401              m.emit(kcode(v[2]) | 0x48)
 53402              m.emit(0x66)
 53403              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 53404              m.imm1(toImmAny(v[0]))
 53405          })
 53406      }
 53407      // VFPCLASSPS imm8, m128/m32bcst, k{k}
 53408      if isImm8(v0) && isM128M32bcst(v1) && isKk(v2) {
 53409          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 53410          p.domain = DomainAVX
 53411          p.add(0, func(m *_Encoding, v []interface{}) {
 53412              m.evex(0b11, 0x05, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), 0, bcode(v[1]))
 53413              m.emit(0x66)
 53414              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 53415              m.imm1(toImmAny(v[0]))
 53416          })
 53417      }
 53418      // VFPCLASSPS imm8, m256/m32bcst, k{k}
 53419      if isImm8(v0) && isM256M32bcst(v1) && isKk(v2) {
 53420          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 53421          p.domain = DomainAVX
 53422          p.add(0, func(m *_Encoding, v []interface{}) {
 53423              m.evex(0b11, 0x05, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), 0, bcode(v[1]))
 53424              m.emit(0x66)
 53425              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 53426              m.imm1(toImmAny(v[0]))
 53427          })
 53428      }
 53429      // VFPCLASSPS imm8, xmm, k{k}
 53430      if isImm8(v0) && isEVEXXMM(v1) && isKk(v2) {
 53431          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 53432          p.domain = DomainAVX
 53433          p.add(0, func(m *_Encoding, v []interface{}) {
 53434              m.emit(0x62)
 53435              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 53436              m.emit(0x7d)
 53437              m.emit(kcode(v[2]) | 0x08)
 53438              m.emit(0x66)
 53439              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 53440              m.imm1(toImmAny(v[0]))
 53441          })
 53442      }
 53443      // VFPCLASSPS imm8, ymm, k{k}
 53444      if isImm8(v0) && isEVEXYMM(v1) && isKk(v2) {
 53445          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 53446          p.domain = DomainAVX
 53447          p.add(0, func(m *_Encoding, v []interface{}) {
 53448              m.emit(0x62)
 53449              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 53450              m.emit(0x7d)
 53451              m.emit(kcode(v[2]) | 0x28)
 53452              m.emit(0x66)
 53453              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 53454              m.imm1(toImmAny(v[0]))
 53455          })
 53456      }
 53457      if p.len == 0 {
 53458          panic("invalid operands for VFPCLASSPS")
 53459      }
 53460      return p
 53461  }
 53462  
 53463  // VFPCLASSSD performs "Test Class of Scalar Double-Precision Floating-Point Value".
 53464  //
 53465  // Mnemonic        : VFPCLASSSD
 53466  // Supported forms : (2 forms)
 53467  //
 53468  //    * VFPCLASSSD imm8, xmm, k{k}    [AVX512DQ]
 53469  //    * VFPCLASSSD imm8, m64, k{k}    [AVX512DQ]
 53470  //
 53471  func (self *Program) VFPCLASSSD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 53472      p := self.alloc("VFPCLASSSD", 3, Operands { v0, v1, v2 })
 53473      // VFPCLASSSD imm8, xmm, k{k}
 53474      if isImm8(v0) && isEVEXXMM(v1) && isKk(v2) {
 53475          self.require(ISA_AVX512DQ)
 53476          p.domain = DomainAVX
 53477          p.add(0, func(m *_Encoding, v []interface{}) {
 53478              m.emit(0x62)
 53479              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 53480              m.emit(0xfd)
 53481              m.emit(kcode(v[2]) | 0x08)
 53482              m.emit(0x67)
 53483              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 53484              m.imm1(toImmAny(v[0]))
 53485          })
 53486      }
 53487      // VFPCLASSSD imm8, m64, k{k}
 53488      if isImm8(v0) && isM64(v1) && isKk(v2) {
 53489          self.require(ISA_AVX512DQ)
 53490          p.domain = DomainAVX
 53491          p.add(0, func(m *_Encoding, v []interface{}) {
 53492              m.evex(0b11, 0x85, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), 0, 0)
 53493              m.emit(0x67)
 53494              m.mrsd(lcode(v[2]), addr(v[1]), 8)
 53495              m.imm1(toImmAny(v[0]))
 53496          })
 53497      }
 53498      if p.len == 0 {
 53499          panic("invalid operands for VFPCLASSSD")
 53500      }
 53501      return p
 53502  }
 53503  
 53504  // VFPCLASSSS performs "Test Class of Scalar Single-Precision Floating-Point Value".
 53505  //
 53506  // Mnemonic        : VFPCLASSSS
 53507  // Supported forms : (2 forms)
 53508  //
 53509  //    * VFPCLASSSS imm8, xmm, k{k}    [AVX512DQ]
 53510  //    * VFPCLASSSS imm8, m32, k{k}    [AVX512DQ]
 53511  //
 53512  func (self *Program) VFPCLASSSS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 53513      p := self.alloc("VFPCLASSSS", 3, Operands { v0, v1, v2 })
 53514      // VFPCLASSSS imm8, xmm, k{k}
 53515      if isImm8(v0) && isEVEXXMM(v1) && isKk(v2) {
 53516          self.require(ISA_AVX512DQ)
 53517          p.domain = DomainAVX
 53518          p.add(0, func(m *_Encoding, v []interface{}) {
 53519              m.emit(0x62)
 53520              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 53521              m.emit(0x7d)
 53522              m.emit(kcode(v[2]) | 0x08)
 53523              m.emit(0x67)
 53524              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 53525              m.imm1(toImmAny(v[0]))
 53526          })
 53527      }
 53528      // VFPCLASSSS imm8, m32, k{k}
 53529      if isImm8(v0) && isM32(v1) && isKk(v2) {
 53530          self.require(ISA_AVX512DQ)
 53531          p.domain = DomainAVX
 53532          p.add(0, func(m *_Encoding, v []interface{}) {
 53533              m.evex(0b11, 0x05, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), 0, 0)
 53534              m.emit(0x67)
 53535              m.mrsd(lcode(v[2]), addr(v[1]), 4)
 53536              m.imm1(toImmAny(v[0]))
 53537          })
 53538      }
 53539      if p.len == 0 {
 53540          panic("invalid operands for VFPCLASSSS")
 53541      }
 53542      return p
 53543  }
 53544  
 53545  // VFRCZPD performs "Extract Fraction Packed Double-Precision Floating-Point".
 53546  //
 53547  // Mnemonic        : VFRCZPD
 53548  // Supported forms : (4 forms)
 53549  //
 53550  //    * VFRCZPD xmm, xmm     [XOP]
 53551  //    * VFRCZPD m128, xmm    [XOP]
 53552  //    * VFRCZPD ymm, ymm     [XOP]
 53553  //    * VFRCZPD m256, ymm    [XOP]
 53554  //
 53555  func (self *Program) VFRCZPD(v0 interface{}, v1 interface{}) *Instruction {
 53556      p := self.alloc("VFRCZPD", 2, Operands { v0, v1 })
 53557      // VFRCZPD xmm, xmm
 53558      if isXMM(v0) && isXMM(v1) {
 53559          self.require(ISA_XOP)
 53560          p.domain = DomainAMDSpecific
 53561          p.add(0, func(m *_Encoding, v []interface{}) {
 53562              m.emit(0x8f)
 53563              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 53564              m.emit(0x78)
 53565              m.emit(0x81)
 53566              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 53567          })
 53568      }
 53569      // VFRCZPD m128, xmm
 53570      if isM128(v0) && isXMM(v1) {
 53571          self.require(ISA_XOP)
 53572          p.domain = DomainAMDSpecific
 53573          p.add(0, func(m *_Encoding, v []interface{}) {
 53574              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 53575              m.emit(0x81)
 53576              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 53577          })
 53578      }
 53579      // VFRCZPD ymm, ymm
 53580      if isYMM(v0) && isYMM(v1) {
 53581          self.require(ISA_XOP)
 53582          p.domain = DomainAMDSpecific
 53583          p.add(0, func(m *_Encoding, v []interface{}) {
 53584              m.emit(0x8f)
 53585              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 53586              m.emit(0x7c)
 53587              m.emit(0x81)
 53588              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 53589          })
 53590      }
 53591      // VFRCZPD m256, ymm
 53592      if isM256(v0) && isYMM(v1) {
 53593          self.require(ISA_XOP)
 53594          p.domain = DomainAMDSpecific
 53595          p.add(0, func(m *_Encoding, v []interface{}) {
 53596              m.vex3(0x8f, 0b1001, 0x04, hcode(v[1]), addr(v[0]), 0)
 53597              m.emit(0x81)
 53598              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 53599          })
 53600      }
 53601      if p.len == 0 {
 53602          panic("invalid operands for VFRCZPD")
 53603      }
 53604      return p
 53605  }
 53606  
 53607  // VFRCZPS performs "Extract Fraction Packed Single-Precision Floating-Point".
 53608  //
 53609  // Mnemonic        : VFRCZPS
 53610  // Supported forms : (4 forms)
 53611  //
 53612  //    * VFRCZPS xmm, xmm     [XOP]
 53613  //    * VFRCZPS m128, xmm    [XOP]
 53614  //    * VFRCZPS ymm, ymm     [XOP]
 53615  //    * VFRCZPS m256, ymm    [XOP]
 53616  //
 53617  func (self *Program) VFRCZPS(v0 interface{}, v1 interface{}) *Instruction {
 53618      p := self.alloc("VFRCZPS", 2, Operands { v0, v1 })
 53619      // VFRCZPS xmm, xmm
 53620      if isXMM(v0) && isXMM(v1) {
 53621          self.require(ISA_XOP)
 53622          p.domain = DomainAMDSpecific
 53623          p.add(0, func(m *_Encoding, v []interface{}) {
 53624              m.emit(0x8f)
 53625              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 53626              m.emit(0x78)
 53627              m.emit(0x80)
 53628              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 53629          })
 53630      }
 53631      // VFRCZPS m128, xmm
 53632      if isM128(v0) && isXMM(v1) {
 53633          self.require(ISA_XOP)
 53634          p.domain = DomainAMDSpecific
 53635          p.add(0, func(m *_Encoding, v []interface{}) {
 53636              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 53637              m.emit(0x80)
 53638              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 53639          })
 53640      }
 53641      // VFRCZPS ymm, ymm
 53642      if isYMM(v0) && isYMM(v1) {
 53643          self.require(ISA_XOP)
 53644          p.domain = DomainAMDSpecific
 53645          p.add(0, func(m *_Encoding, v []interface{}) {
 53646              m.emit(0x8f)
 53647              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 53648              m.emit(0x7c)
 53649              m.emit(0x80)
 53650              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 53651          })
 53652      }
 53653      // VFRCZPS m256, ymm
 53654      if isM256(v0) && isYMM(v1) {
 53655          self.require(ISA_XOP)
 53656          p.domain = DomainAMDSpecific
 53657          p.add(0, func(m *_Encoding, v []interface{}) {
 53658              m.vex3(0x8f, 0b1001, 0x04, hcode(v[1]), addr(v[0]), 0)
 53659              m.emit(0x80)
 53660              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 53661          })
 53662      }
 53663      if p.len == 0 {
 53664          panic("invalid operands for VFRCZPS")
 53665      }
 53666      return p
 53667  }
 53668  
 53669  // VFRCZSD performs "Extract Fraction Scalar Double-Precision Floating-Point".
 53670  //
 53671  // Mnemonic        : VFRCZSD
 53672  // Supported forms : (2 forms)
 53673  //
 53674  //    * VFRCZSD xmm, xmm    [XOP]
 53675  //    * VFRCZSD m64, xmm    [XOP]
 53676  //
 53677  func (self *Program) VFRCZSD(v0 interface{}, v1 interface{}) *Instruction {
 53678      p := self.alloc("VFRCZSD", 2, Operands { v0, v1 })
 53679      // VFRCZSD xmm, xmm
 53680      if isXMM(v0) && isXMM(v1) {
 53681          self.require(ISA_XOP)
 53682          p.domain = DomainAMDSpecific
 53683          p.add(0, func(m *_Encoding, v []interface{}) {
 53684              m.emit(0x8f)
 53685              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 53686              m.emit(0x78)
 53687              m.emit(0x83)
 53688              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 53689          })
 53690      }
 53691      // VFRCZSD m64, xmm
 53692      if isM64(v0) && isXMM(v1) {
 53693          self.require(ISA_XOP)
 53694          p.domain = DomainAMDSpecific
 53695          p.add(0, func(m *_Encoding, v []interface{}) {
 53696              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 53697              m.emit(0x83)
 53698              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 53699          })
 53700      }
 53701      if p.len == 0 {
 53702          panic("invalid operands for VFRCZSD")
 53703      }
 53704      return p
 53705  }
 53706  
 53707  // VFRCZSS performs "Extract Fraction Scalar Single-Precision Floating Point".
 53708  //
 53709  // Mnemonic        : VFRCZSS
 53710  // Supported forms : (2 forms)
 53711  //
 53712  //    * VFRCZSS xmm, xmm    [XOP]
 53713  //    * VFRCZSS m32, xmm    [XOP]
 53714  //
 53715  func (self *Program) VFRCZSS(v0 interface{}, v1 interface{}) *Instruction {
 53716      p := self.alloc("VFRCZSS", 2, Operands { v0, v1 })
 53717      // VFRCZSS xmm, xmm
 53718      if isXMM(v0) && isXMM(v1) {
 53719          self.require(ISA_XOP)
 53720          p.domain = DomainAMDSpecific
 53721          p.add(0, func(m *_Encoding, v []interface{}) {
 53722              m.emit(0x8f)
 53723              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 53724              m.emit(0x78)
 53725              m.emit(0x82)
 53726              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 53727          })
 53728      }
 53729      // VFRCZSS m32, xmm
 53730      if isM32(v0) && isXMM(v1) {
 53731          self.require(ISA_XOP)
 53732          p.domain = DomainAMDSpecific
 53733          p.add(0, func(m *_Encoding, v []interface{}) {
 53734              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 53735              m.emit(0x82)
 53736              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 53737          })
 53738      }
 53739      if p.len == 0 {
 53740          panic("invalid operands for VFRCZSS")
 53741      }
 53742      return p
 53743  }
 53744  
 53745  // VGATHERDPD performs "Gather Packed Double-Precision Floating-Point Values Using Signed Doubleword Indices".
 53746  //
 53747  // Mnemonic        : VGATHERDPD
 53748  // Supported forms : (5 forms)
 53749  //
 53750  //    * VGATHERDPD xmm, vm32x, xmm    [AVX2]
 53751  //    * VGATHERDPD ymm, vm32x, ymm    [AVX2]
 53752  //    * VGATHERDPD vm32y, zmm{k}      [AVX512F]
 53753  //    * VGATHERDPD vm32x, xmm{k}      [AVX512F,AVX512VL]
 53754  //    * VGATHERDPD vm32x, ymm{k}      [AVX512F,AVX512VL]
 53755  //
 53756  func (self *Program) VGATHERDPD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 53757      var p *Instruction
 53758      switch len(vv) {
 53759          case 0  : p = self.alloc("VGATHERDPD", 2, Operands { v0, v1 })
 53760          case 1  : p = self.alloc("VGATHERDPD", 3, Operands { v0, v1, vv[0] })
 53761          default : panic("instruction VGATHERDPD takes 2 or 3 operands")
 53762      }
 53763      // VGATHERDPD xmm, vm32x, xmm
 53764      if len(vv) == 1 && isXMM(v0) && isVMX(v1) && isXMM(vv[0]) {
 53765          self.require(ISA_AVX2)
 53766          p.domain = DomainAVX
 53767          p.add(0, func(m *_Encoding, v []interface{}) {
 53768              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 53769              m.emit(0x92)
 53770              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 53771          })
 53772      }
 53773      // VGATHERDPD ymm, vm32x, ymm
 53774      if len(vv) == 1 && isYMM(v0) && isVMX(v1) && isYMM(vv[0]) {
 53775          self.require(ISA_AVX2)
 53776          p.domain = DomainAVX
 53777          p.add(0, func(m *_Encoding, v []interface{}) {
 53778              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 53779              m.emit(0x92)
 53780              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 53781          })
 53782      }
 53783      // VGATHERDPD vm32y, zmm{k}
 53784      if len(vv) == 0 && isEVEXVMY(v0) && isZMMk(v1) {
 53785          self.require(ISA_AVX512F)
 53786          p.domain = DomainAVX
 53787          p.add(0, func(m *_Encoding, v []interface{}) {
 53788              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 53789              m.emit(0x92)
 53790              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 53791          })
 53792      }
 53793      // VGATHERDPD vm32x, xmm{k}
 53794      if len(vv) == 0 && isEVEXVMX(v0) && isXMMk(v1) {
 53795          self.require(ISA_AVX512VL | ISA_AVX512F)
 53796          p.domain = DomainAVX
 53797          p.add(0, func(m *_Encoding, v []interface{}) {
 53798              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 53799              m.emit(0x92)
 53800              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 53801          })
 53802      }
 53803      // VGATHERDPD vm32x, ymm{k}
 53804      if len(vv) == 0 && isEVEXVMX(v0) && isYMMk(v1) {
 53805          self.require(ISA_AVX512VL | ISA_AVX512F)
 53806          p.domain = DomainAVX
 53807          p.add(0, func(m *_Encoding, v []interface{}) {
 53808              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 53809              m.emit(0x92)
 53810              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 53811          })
 53812      }
 53813      if p.len == 0 {
 53814          panic("invalid operands for VGATHERDPD")
 53815      }
 53816      return p
 53817  }
 53818  
 53819  // VGATHERDPS performs "Gather Packed Single-Precision Floating-Point Values Using Signed Doubleword Indices".
 53820  //
 53821  // Mnemonic        : VGATHERDPS
 53822  // Supported forms : (5 forms)
 53823  //
 53824  //    * VGATHERDPS xmm, vm32x, xmm    [AVX2]
 53825  //    * VGATHERDPS ymm, vm32y, ymm    [AVX2]
 53826  //    * VGATHERDPS vm32z, zmm{k}      [AVX512F]
 53827  //    * VGATHERDPS vm32x, xmm{k}      [AVX512F,AVX512VL]
 53828  //    * VGATHERDPS vm32y, ymm{k}      [AVX512F,AVX512VL]
 53829  //
 53830  func (self *Program) VGATHERDPS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 53831      var p *Instruction
 53832      switch len(vv) {
 53833          case 0  : p = self.alloc("VGATHERDPS", 2, Operands { v0, v1 })
 53834          case 1  : p = self.alloc("VGATHERDPS", 3, Operands { v0, v1, vv[0] })
 53835          default : panic("instruction VGATHERDPS takes 2 or 3 operands")
 53836      }
 53837      // VGATHERDPS xmm, vm32x, xmm
 53838      if len(vv) == 1 && isXMM(v0) && isVMX(v1) && isXMM(vv[0]) {
 53839          self.require(ISA_AVX2)
 53840          p.domain = DomainAVX
 53841          p.add(0, func(m *_Encoding, v []interface{}) {
 53842              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 53843              m.emit(0x92)
 53844              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 53845          })
 53846      }
 53847      // VGATHERDPS ymm, vm32y, ymm
 53848      if len(vv) == 1 && isYMM(v0) && isVMY(v1) && isYMM(vv[0]) {
 53849          self.require(ISA_AVX2)
 53850          p.domain = DomainAVX
 53851          p.add(0, func(m *_Encoding, v []interface{}) {
 53852              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 53853              m.emit(0x92)
 53854              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 53855          })
 53856      }
 53857      // VGATHERDPS vm32z, zmm{k}
 53858      if len(vv) == 0 && isVMZ(v0) && isZMMk(v1) {
 53859          self.require(ISA_AVX512F)
 53860          p.domain = DomainAVX
 53861          p.add(0, func(m *_Encoding, v []interface{}) {
 53862              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 53863              m.emit(0x92)
 53864              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 53865          })
 53866      }
 53867      // VGATHERDPS vm32x, xmm{k}
 53868      if len(vv) == 0 && isEVEXVMX(v0) && isXMMk(v1) {
 53869          self.require(ISA_AVX512VL | ISA_AVX512F)
 53870          p.domain = DomainAVX
 53871          p.add(0, func(m *_Encoding, v []interface{}) {
 53872              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 53873              m.emit(0x92)
 53874              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 53875          })
 53876      }
 53877      // VGATHERDPS vm32y, ymm{k}
 53878      if len(vv) == 0 && isEVEXVMY(v0) && isYMMk(v1) {
 53879          self.require(ISA_AVX512VL | ISA_AVX512F)
 53880          p.domain = DomainAVX
 53881          p.add(0, func(m *_Encoding, v []interface{}) {
 53882              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 53883              m.emit(0x92)
 53884              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 53885          })
 53886      }
 53887      if p.len == 0 {
 53888          panic("invalid operands for VGATHERDPS")
 53889      }
 53890      return p
 53891  }
 53892  
 53893  // VGATHERPF0DPD performs "Sparse Prefetch Packed Double-Precision Floating-Point Data Values with Signed Doubleword Indices Using T0 Hint".
 53894  //
 53895  // Mnemonic        : VGATHERPF0DPD
 53896  // Supported forms : (1 form)
 53897  //
 53898  //    * VGATHERPF0DPD vm32y{k}    [AVX512PF]
 53899  //
 53900  func (self *Program) VGATHERPF0DPD(v0 interface{}) *Instruction {
 53901      p := self.alloc("VGATHERPF0DPD", 1, Operands { v0 })
 53902      // VGATHERPF0DPD vm32y{k}
 53903      if isVMYk(v0) {
 53904          self.require(ISA_AVX512PF)
 53905          p.domain = DomainAVX
 53906          p.add(0, func(m *_Encoding, v []interface{}) {
 53907              m.evex(0b10, 0x85, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 53908              m.emit(0xc6)
 53909              m.mrsd(1, addr(v[0]), 8)
 53910          })
 53911      }
 53912      if p.len == 0 {
 53913          panic("invalid operands for VGATHERPF0DPD")
 53914      }
 53915      return p
 53916  }
 53917  
 53918  // VGATHERPF0DPS performs "Sparse Prefetch Packed Single-Precision Floating-Point Data Values with Signed Doubleword Indices Using T0 Hint".
 53919  //
 53920  // Mnemonic        : VGATHERPF0DPS
 53921  // Supported forms : (1 form)
 53922  //
 53923  //    * VGATHERPF0DPS vm32z{k}    [AVX512PF]
 53924  //
 53925  func (self *Program) VGATHERPF0DPS(v0 interface{}) *Instruction {
 53926      p := self.alloc("VGATHERPF0DPS", 1, Operands { v0 })
 53927      // VGATHERPF0DPS vm32z{k}
 53928      if isVMZk(v0) {
 53929          self.require(ISA_AVX512PF)
 53930          p.domain = DomainAVX
 53931          p.add(0, func(m *_Encoding, v []interface{}) {
 53932              m.evex(0b10, 0x05, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 53933              m.emit(0xc6)
 53934              m.mrsd(1, addr(v[0]), 4)
 53935          })
 53936      }
 53937      if p.len == 0 {
 53938          panic("invalid operands for VGATHERPF0DPS")
 53939      }
 53940      return p
 53941  }
 53942  
 53943  // VGATHERPF0QPD performs "Sparse Prefetch Packed Double-Precision Floating-Point Data Values with Signed Quadword Indices Using T0 Hint".
 53944  //
 53945  // Mnemonic        : VGATHERPF0QPD
 53946  // Supported forms : (1 form)
 53947  //
 53948  //    * VGATHERPF0QPD vm64z{k}    [AVX512PF]
 53949  //
 53950  func (self *Program) VGATHERPF0QPD(v0 interface{}) *Instruction {
 53951      p := self.alloc("VGATHERPF0QPD", 1, Operands { v0 })
 53952      // VGATHERPF0QPD vm64z{k}
 53953      if isVMZk(v0) {
 53954          self.require(ISA_AVX512PF)
 53955          p.domain = DomainAVX
 53956          p.add(0, func(m *_Encoding, v []interface{}) {
 53957              m.evex(0b10, 0x85, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 53958              m.emit(0xc7)
 53959              m.mrsd(1, addr(v[0]), 8)
 53960          })
 53961      }
 53962      if p.len == 0 {
 53963          panic("invalid operands for VGATHERPF0QPD")
 53964      }
 53965      return p
 53966  }
 53967  
 53968  // VGATHERPF0QPS performs "Sparse Prefetch Packed Single-Precision Floating-Point Data Values with Signed Quadword Indices Using T0 Hint".
 53969  //
 53970  // Mnemonic        : VGATHERPF0QPS
 53971  // Supported forms : (1 form)
 53972  //
 53973  //    * VGATHERPF0QPS vm64z{k}    [AVX512PF]
 53974  //
 53975  func (self *Program) VGATHERPF0QPS(v0 interface{}) *Instruction {
 53976      p := self.alloc("VGATHERPF0QPS", 1, Operands { v0 })
 53977      // VGATHERPF0QPS vm64z{k}
 53978      if isVMZk(v0) {
 53979          self.require(ISA_AVX512PF)
 53980          p.domain = DomainAVX
 53981          p.add(0, func(m *_Encoding, v []interface{}) {
 53982              m.evex(0b10, 0x05, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 53983              m.emit(0xc7)
 53984              m.mrsd(1, addr(v[0]), 4)
 53985          })
 53986      }
 53987      if p.len == 0 {
 53988          panic("invalid operands for VGATHERPF0QPS")
 53989      }
 53990      return p
 53991  }
 53992  
 53993  // VGATHERPF1DPD performs "Sparse Prefetch Packed Double-Precision Floating-Point Data Values with Signed Doubleword Indices Using T1 Hint".
 53994  //
 53995  // Mnemonic        : VGATHERPF1DPD
 53996  // Supported forms : (1 form)
 53997  //
 53998  //    * VGATHERPF1DPD vm32y{k}    [AVX512PF]
 53999  //
 54000  func (self *Program) VGATHERPF1DPD(v0 interface{}) *Instruction {
 54001      p := self.alloc("VGATHERPF1DPD", 1, Operands { v0 })
 54002      // VGATHERPF1DPD vm32y{k}
 54003      if isVMYk(v0) {
 54004          self.require(ISA_AVX512PF)
 54005          p.domain = DomainAVX
 54006          p.add(0, func(m *_Encoding, v []interface{}) {
 54007              m.evex(0b10, 0x85, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 54008              m.emit(0xc6)
 54009              m.mrsd(2, addr(v[0]), 8)
 54010          })
 54011      }
 54012      if p.len == 0 {
 54013          panic("invalid operands for VGATHERPF1DPD")
 54014      }
 54015      return p
 54016  }
 54017  
 54018  // VGATHERPF1DPS performs "Sparse Prefetch Packed Single-Precision Floating-Point Data Values with Signed Doubleword Indices Using T1 Hint".
 54019  //
 54020  // Mnemonic        : VGATHERPF1DPS
 54021  // Supported forms : (1 form)
 54022  //
 54023  //    * VGATHERPF1DPS vm32z{k}    [AVX512PF]
 54024  //
 54025  func (self *Program) VGATHERPF1DPS(v0 interface{}) *Instruction {
 54026      p := self.alloc("VGATHERPF1DPS", 1, Operands { v0 })
 54027      // VGATHERPF1DPS vm32z{k}
 54028      if isVMZk(v0) {
 54029          self.require(ISA_AVX512PF)
 54030          p.domain = DomainAVX
 54031          p.add(0, func(m *_Encoding, v []interface{}) {
 54032              m.evex(0b10, 0x05, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 54033              m.emit(0xc6)
 54034              m.mrsd(2, addr(v[0]), 4)
 54035          })
 54036      }
 54037      if p.len == 0 {
 54038          panic("invalid operands for VGATHERPF1DPS")
 54039      }
 54040      return p
 54041  }
 54042  
 54043  // VGATHERPF1QPD performs "Sparse Prefetch Packed Double-Precision Floating-Point Data Values with Signed Quadword Indices Using T1 Hint".
 54044  //
 54045  // Mnemonic        : VGATHERPF1QPD
 54046  // Supported forms : (1 form)
 54047  //
 54048  //    * VGATHERPF1QPD vm64z{k}    [AVX512PF]
 54049  //
 54050  func (self *Program) VGATHERPF1QPD(v0 interface{}) *Instruction {
 54051      p := self.alloc("VGATHERPF1QPD", 1, Operands { v0 })
 54052      // VGATHERPF1QPD vm64z{k}
 54053      if isVMZk(v0) {
 54054          self.require(ISA_AVX512PF)
 54055          p.domain = DomainAVX
 54056          p.add(0, func(m *_Encoding, v []interface{}) {
 54057              m.evex(0b10, 0x85, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 54058              m.emit(0xc7)
 54059              m.mrsd(2, addr(v[0]), 8)
 54060          })
 54061      }
 54062      if p.len == 0 {
 54063          panic("invalid operands for VGATHERPF1QPD")
 54064      }
 54065      return p
 54066  }
 54067  
 54068  // VGATHERPF1QPS performs "Sparse Prefetch Packed Single-Precision Floating-Point Data Values with Signed Quadword Indices Using T1 Hint".
 54069  //
 54070  // Mnemonic        : VGATHERPF1QPS
 54071  // Supported forms : (1 form)
 54072  //
 54073  //    * VGATHERPF1QPS vm64z{k}    [AVX512PF]
 54074  //
 54075  func (self *Program) VGATHERPF1QPS(v0 interface{}) *Instruction {
 54076      p := self.alloc("VGATHERPF1QPS", 1, Operands { v0 })
 54077      // VGATHERPF1QPS vm64z{k}
 54078      if isVMZk(v0) {
 54079          self.require(ISA_AVX512PF)
 54080          p.domain = DomainAVX
 54081          p.add(0, func(m *_Encoding, v []interface{}) {
 54082              m.evex(0b10, 0x05, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 54083              m.emit(0xc7)
 54084              m.mrsd(2, addr(v[0]), 4)
 54085          })
 54086      }
 54087      if p.len == 0 {
 54088          panic("invalid operands for VGATHERPF1QPS")
 54089      }
 54090      return p
 54091  }
 54092  
 54093  // VGATHERQPD performs "Gather Packed Double-Precision Floating-Point Values Using Signed Quadword Indices".
 54094  //
 54095  // Mnemonic        : VGATHERQPD
 54096  // Supported forms : (5 forms)
 54097  //
 54098  //    * VGATHERQPD xmm, vm64x, xmm    [AVX2]
 54099  //    * VGATHERQPD ymm, vm64y, ymm    [AVX2]
 54100  //    * VGATHERQPD vm64z, zmm{k}      [AVX512F]
 54101  //    * VGATHERQPD vm64x, xmm{k}      [AVX512F,AVX512VL]
 54102  //    * VGATHERQPD vm64y, ymm{k}      [AVX512F,AVX512VL]
 54103  //
 54104  func (self *Program) VGATHERQPD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 54105      var p *Instruction
 54106      switch len(vv) {
 54107          case 0  : p = self.alloc("VGATHERQPD", 2, Operands { v0, v1 })
 54108          case 1  : p = self.alloc("VGATHERQPD", 3, Operands { v0, v1, vv[0] })
 54109          default : panic("instruction VGATHERQPD takes 2 or 3 operands")
 54110      }
 54111      // VGATHERQPD xmm, vm64x, xmm
 54112      if len(vv) == 1 && isXMM(v0) && isVMX(v1) && isXMM(vv[0]) {
 54113          self.require(ISA_AVX2)
 54114          p.domain = DomainAVX
 54115          p.add(0, func(m *_Encoding, v []interface{}) {
 54116              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 54117              m.emit(0x93)
 54118              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 54119          })
 54120      }
 54121      // VGATHERQPD ymm, vm64y, ymm
 54122      if len(vv) == 1 && isYMM(v0) && isVMY(v1) && isYMM(vv[0]) {
 54123          self.require(ISA_AVX2)
 54124          p.domain = DomainAVX
 54125          p.add(0, func(m *_Encoding, v []interface{}) {
 54126              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 54127              m.emit(0x93)
 54128              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 54129          })
 54130      }
 54131      // VGATHERQPD vm64z, zmm{k}
 54132      if len(vv) == 0 && isVMZ(v0) && isZMMk(v1) {
 54133          self.require(ISA_AVX512F)
 54134          p.domain = DomainAVX
 54135          p.add(0, func(m *_Encoding, v []interface{}) {
 54136              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 54137              m.emit(0x93)
 54138              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 54139          })
 54140      }
 54141      // VGATHERQPD vm64x, xmm{k}
 54142      if len(vv) == 0 && isEVEXVMX(v0) && isXMMk(v1) {
 54143          self.require(ISA_AVX512VL | ISA_AVX512F)
 54144          p.domain = DomainAVX
 54145          p.add(0, func(m *_Encoding, v []interface{}) {
 54146              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 54147              m.emit(0x93)
 54148              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 54149          })
 54150      }
 54151      // VGATHERQPD vm64y, ymm{k}
 54152      if len(vv) == 0 && isEVEXVMY(v0) && isYMMk(v1) {
 54153          self.require(ISA_AVX512VL | ISA_AVX512F)
 54154          p.domain = DomainAVX
 54155          p.add(0, func(m *_Encoding, v []interface{}) {
 54156              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 54157              m.emit(0x93)
 54158              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 54159          })
 54160      }
 54161      if p.len == 0 {
 54162          panic("invalid operands for VGATHERQPD")
 54163      }
 54164      return p
 54165  }
 54166  
 54167  // VGATHERQPS performs "Gather Packed Single-Precision Floating-Point Values Using Signed Quadword Indices".
 54168  //
 54169  // Mnemonic        : VGATHERQPS
 54170  // Supported forms : (5 forms)
 54171  //
 54172  //    * VGATHERQPS xmm, vm64x, xmm    [AVX2]
 54173  //    * VGATHERQPS xmm, vm64y, xmm    [AVX2]
 54174  //    * VGATHERQPS vm64z, ymm{k}      [AVX512F]
 54175  //    * VGATHERQPS vm64x, xmm{k}      [AVX512F,AVX512VL]
 54176  //    * VGATHERQPS vm64y, xmm{k}      [AVX512F,AVX512VL]
 54177  //
 54178  func (self *Program) VGATHERQPS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 54179      var p *Instruction
 54180      switch len(vv) {
 54181          case 0  : p = self.alloc("VGATHERQPS", 2, Operands { v0, v1 })
 54182          case 1  : p = self.alloc("VGATHERQPS", 3, Operands { v0, v1, vv[0] })
 54183          default : panic("instruction VGATHERQPS takes 2 or 3 operands")
 54184      }
 54185      // VGATHERQPS xmm, vm64x, xmm
 54186      if len(vv) == 1 && isXMM(v0) && isVMX(v1) && isXMM(vv[0]) {
 54187          self.require(ISA_AVX2)
 54188          p.domain = DomainAVX
 54189          p.add(0, func(m *_Encoding, v []interface{}) {
 54190              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 54191              m.emit(0x93)
 54192              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 54193          })
 54194      }
 54195      // VGATHERQPS xmm, vm64y, xmm
 54196      if len(vv) == 1 && isXMM(v0) && isVMY(v1) && isXMM(vv[0]) {
 54197          self.require(ISA_AVX2)
 54198          p.domain = DomainAVX
 54199          p.add(0, func(m *_Encoding, v []interface{}) {
 54200              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 54201              m.emit(0x93)
 54202              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 54203          })
 54204      }
 54205      // VGATHERQPS vm64z, ymm{k}
 54206      if len(vv) == 0 && isVMZ(v0) && isYMMk(v1) {
 54207          self.require(ISA_AVX512F)
 54208          p.domain = DomainAVX
 54209          p.add(0, func(m *_Encoding, v []interface{}) {
 54210              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 54211              m.emit(0x93)
 54212              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 54213          })
 54214      }
 54215      // VGATHERQPS vm64x, xmm{k}
 54216      if len(vv) == 0 && isEVEXVMX(v0) && isXMMk(v1) {
 54217          self.require(ISA_AVX512VL | ISA_AVX512F)
 54218          p.domain = DomainAVX
 54219          p.add(0, func(m *_Encoding, v []interface{}) {
 54220              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 54221              m.emit(0x93)
 54222              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 54223          })
 54224      }
 54225      // VGATHERQPS vm64y, xmm{k}
 54226      if len(vv) == 0 && isEVEXVMY(v0) && isXMMk(v1) {
 54227          self.require(ISA_AVX512VL | ISA_AVX512F)
 54228          p.domain = DomainAVX
 54229          p.add(0, func(m *_Encoding, v []interface{}) {
 54230              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 54231              m.emit(0x93)
 54232              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 54233          })
 54234      }
 54235      if p.len == 0 {
 54236          panic("invalid operands for VGATHERQPS")
 54237      }
 54238      return p
 54239  }
 54240  
 54241  // VGETEXPPD performs "Extract Exponents of Packed Double-Precision Floating-Point Values as Double-Precision Floating-Point Values".
 54242  //
 54243  // Mnemonic        : VGETEXPPD
 54244  // Supported forms : (7 forms)
 54245  //
 54246  //    * VGETEXPPD m512/m64bcst, zmm{k}{z}    [AVX512F]
 54247  //    * VGETEXPPD {sae}, zmm, zmm{k}{z}      [AVX512F]
 54248  //    * VGETEXPPD zmm, zmm{k}{z}             [AVX512F]
 54249  //    * VGETEXPPD m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 54250  //    * VGETEXPPD m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 54251  //    * VGETEXPPD xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 54252  //    * VGETEXPPD ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 54253  //
 54254  func (self *Program) VGETEXPPD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 54255      var p *Instruction
 54256      switch len(vv) {
 54257          case 0  : p = self.alloc("VGETEXPPD", 2, Operands { v0, v1 })
 54258          case 1  : p = self.alloc("VGETEXPPD", 3, Operands { v0, v1, vv[0] })
 54259          default : panic("instruction VGETEXPPD takes 2 or 3 operands")
 54260      }
 54261      // VGETEXPPD m512/m64bcst, zmm{k}{z}
 54262      if len(vv) == 0 && isM512M64bcst(v0) && isZMMkz(v1) {
 54263          self.require(ISA_AVX512F)
 54264          p.domain = DomainAVX
 54265          p.add(0, func(m *_Encoding, v []interface{}) {
 54266              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 54267              m.emit(0x42)
 54268              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 54269          })
 54270      }
 54271      // VGETEXPPD {sae}, zmm, zmm{k}{z}
 54272      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 54273          self.require(ISA_AVX512F)
 54274          p.domain = DomainAVX
 54275          p.add(0, func(m *_Encoding, v []interface{}) {
 54276              m.emit(0x62)
 54277              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 54278              m.emit(0xfd)
 54279              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 54280              m.emit(0x42)
 54281              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 54282          })
 54283      }
 54284      // VGETEXPPD zmm, zmm{k}{z}
 54285      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 54286          self.require(ISA_AVX512F)
 54287          p.domain = DomainAVX
 54288          p.add(0, func(m *_Encoding, v []interface{}) {
 54289              m.emit(0x62)
 54290              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 54291              m.emit(0xfd)
 54292              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 54293              m.emit(0x42)
 54294              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 54295          })
 54296      }
 54297      // VGETEXPPD m128/m64bcst, xmm{k}{z}
 54298      if len(vv) == 0 && isM128M64bcst(v0) && isXMMkz(v1) {
 54299          self.require(ISA_AVX512VL | ISA_AVX512F)
 54300          p.domain = DomainAVX
 54301          p.add(0, func(m *_Encoding, v []interface{}) {
 54302              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 54303              m.emit(0x42)
 54304              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 54305          })
 54306      }
 54307      // VGETEXPPD m256/m64bcst, ymm{k}{z}
 54308      if len(vv) == 0 && isM256M64bcst(v0) && isYMMkz(v1) {
 54309          self.require(ISA_AVX512VL | ISA_AVX512F)
 54310          p.domain = DomainAVX
 54311          p.add(0, func(m *_Encoding, v []interface{}) {
 54312              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 54313              m.emit(0x42)
 54314              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 54315          })
 54316      }
 54317      // VGETEXPPD xmm, xmm{k}{z}
 54318      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 54319          self.require(ISA_AVX512VL | ISA_AVX512F)
 54320          p.domain = DomainAVX
 54321          p.add(0, func(m *_Encoding, v []interface{}) {
 54322              m.emit(0x62)
 54323              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 54324              m.emit(0xfd)
 54325              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 54326              m.emit(0x42)
 54327              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 54328          })
 54329      }
 54330      // VGETEXPPD ymm, ymm{k}{z}
 54331      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 54332          self.require(ISA_AVX512VL | ISA_AVX512F)
 54333          p.domain = DomainAVX
 54334          p.add(0, func(m *_Encoding, v []interface{}) {
 54335              m.emit(0x62)
 54336              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 54337              m.emit(0xfd)
 54338              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 54339              m.emit(0x42)
 54340              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 54341          })
 54342      }
 54343      if p.len == 0 {
 54344          panic("invalid operands for VGETEXPPD")
 54345      }
 54346      return p
 54347  }
 54348  
 54349  // VGETEXPPS performs "Extract Exponents of Packed Single-Precision Floating-Point Values as Single-Precision Floating-Point Values".
 54350  //
 54351  // Mnemonic        : VGETEXPPS
 54352  // Supported forms : (7 forms)
 54353  //
 54354  //    * VGETEXPPS m512/m32bcst, zmm{k}{z}    [AVX512F]
 54355  //    * VGETEXPPS {sae}, zmm, zmm{k}{z}      [AVX512F]
 54356  //    * VGETEXPPS zmm, zmm{k}{z}             [AVX512F]
 54357  //    * VGETEXPPS m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 54358  //    * VGETEXPPS m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 54359  //    * VGETEXPPS xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 54360  //    * VGETEXPPS ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 54361  //
 54362  func (self *Program) VGETEXPPS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 54363      var p *Instruction
 54364      switch len(vv) {
 54365          case 0  : p = self.alloc("VGETEXPPS", 2, Operands { v0, v1 })
 54366          case 1  : p = self.alloc("VGETEXPPS", 3, Operands { v0, v1, vv[0] })
 54367          default : panic("instruction VGETEXPPS takes 2 or 3 operands")
 54368      }
 54369      // VGETEXPPS m512/m32bcst, zmm{k}{z}
 54370      if len(vv) == 0 && isM512M32bcst(v0) && isZMMkz(v1) {
 54371          self.require(ISA_AVX512F)
 54372          p.domain = DomainAVX
 54373          p.add(0, func(m *_Encoding, v []interface{}) {
 54374              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 54375              m.emit(0x42)
 54376              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 54377          })
 54378      }
 54379      // VGETEXPPS {sae}, zmm, zmm{k}{z}
 54380      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 54381          self.require(ISA_AVX512F)
 54382          p.domain = DomainAVX
 54383          p.add(0, func(m *_Encoding, v []interface{}) {
 54384              m.emit(0x62)
 54385              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 54386              m.emit(0x7d)
 54387              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 54388              m.emit(0x42)
 54389              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 54390          })
 54391      }
 54392      // VGETEXPPS zmm, zmm{k}{z}
 54393      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 54394          self.require(ISA_AVX512F)
 54395          p.domain = DomainAVX
 54396          p.add(0, func(m *_Encoding, v []interface{}) {
 54397              m.emit(0x62)
 54398              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 54399              m.emit(0x7d)
 54400              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 54401              m.emit(0x42)
 54402              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 54403          })
 54404      }
 54405      // VGETEXPPS m128/m32bcst, xmm{k}{z}
 54406      if len(vv) == 0 && isM128M32bcst(v0) && isXMMkz(v1) {
 54407          self.require(ISA_AVX512VL | ISA_AVX512F)
 54408          p.domain = DomainAVX
 54409          p.add(0, func(m *_Encoding, v []interface{}) {
 54410              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 54411              m.emit(0x42)
 54412              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 54413          })
 54414      }
 54415      // VGETEXPPS m256/m32bcst, ymm{k}{z}
 54416      if len(vv) == 0 && isM256M32bcst(v0) && isYMMkz(v1) {
 54417          self.require(ISA_AVX512VL | ISA_AVX512F)
 54418          p.domain = DomainAVX
 54419          p.add(0, func(m *_Encoding, v []interface{}) {
 54420              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 54421              m.emit(0x42)
 54422              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 54423          })
 54424      }
 54425      // VGETEXPPS xmm, xmm{k}{z}
 54426      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 54427          self.require(ISA_AVX512VL | ISA_AVX512F)
 54428          p.domain = DomainAVX
 54429          p.add(0, func(m *_Encoding, v []interface{}) {
 54430              m.emit(0x62)
 54431              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 54432              m.emit(0x7d)
 54433              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 54434              m.emit(0x42)
 54435              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 54436          })
 54437      }
 54438      // VGETEXPPS ymm, ymm{k}{z}
 54439      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 54440          self.require(ISA_AVX512VL | ISA_AVX512F)
 54441          p.domain = DomainAVX
 54442          p.add(0, func(m *_Encoding, v []interface{}) {
 54443              m.emit(0x62)
 54444              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 54445              m.emit(0x7d)
 54446              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 54447              m.emit(0x42)
 54448              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 54449          })
 54450      }
 54451      if p.len == 0 {
 54452          panic("invalid operands for VGETEXPPS")
 54453      }
 54454      return p
 54455  }
 54456  
 54457  // VGETEXPSD performs "Extract Exponent of Scalar Double-Precision Floating-Point Value as Double-Precision Floating-Point Value".
 54458  //
 54459  // Mnemonic        : VGETEXPSD
 54460  // Supported forms : (3 forms)
 54461  //
 54462  //    * VGETEXPSD m64, xmm, xmm{k}{z}           [AVX512F]
 54463  //    * VGETEXPSD {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 54464  //    * VGETEXPSD xmm, xmm, xmm{k}{z}           [AVX512F]
 54465  //
 54466  func (self *Program) VGETEXPSD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 54467      var p *Instruction
 54468      switch len(vv) {
 54469          case 0  : p = self.alloc("VGETEXPSD", 3, Operands { v0, v1, v2 })
 54470          case 1  : p = self.alloc("VGETEXPSD", 4, Operands { v0, v1, v2, vv[0] })
 54471          default : panic("instruction VGETEXPSD takes 3 or 4 operands")
 54472      }
 54473      // VGETEXPSD m64, xmm, xmm{k}{z}
 54474      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 54475          self.require(ISA_AVX512F)
 54476          p.domain = DomainAVX
 54477          p.add(0, func(m *_Encoding, v []interface{}) {
 54478              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 54479              m.emit(0x43)
 54480              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 54481          })
 54482      }
 54483      // VGETEXPSD {sae}, xmm, xmm, xmm{k}{z}
 54484      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 54485          self.require(ISA_AVX512F)
 54486          p.domain = DomainAVX
 54487          p.add(0, func(m *_Encoding, v []interface{}) {
 54488              m.emit(0x62)
 54489              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 54490              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 54491              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 54492              m.emit(0x43)
 54493              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 54494          })
 54495      }
 54496      // VGETEXPSD xmm, xmm, xmm{k}{z}
 54497      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 54498          self.require(ISA_AVX512F)
 54499          p.domain = DomainAVX
 54500          p.add(0, func(m *_Encoding, v []interface{}) {
 54501              m.emit(0x62)
 54502              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 54503              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 54504              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 54505              m.emit(0x43)
 54506              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 54507          })
 54508      }
 54509      if p.len == 0 {
 54510          panic("invalid operands for VGETEXPSD")
 54511      }
 54512      return p
 54513  }
 54514  
 54515  // VGETEXPSS performs "Extract Exponent of Scalar Single-Precision Floating-Point Value as Single-Precision Floating-Point Value".
 54516  //
 54517  // Mnemonic        : VGETEXPSS
 54518  // Supported forms : (3 forms)
 54519  //
 54520  //    * VGETEXPSS m32, xmm, xmm{k}{z}           [AVX512F]
 54521  //    * VGETEXPSS {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 54522  //    * VGETEXPSS xmm, xmm, xmm{k}{z}           [AVX512F]
 54523  //
 54524  func (self *Program) VGETEXPSS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 54525      var p *Instruction
 54526      switch len(vv) {
 54527          case 0  : p = self.alloc("VGETEXPSS", 3, Operands { v0, v1, v2 })
 54528          case 1  : p = self.alloc("VGETEXPSS", 4, Operands { v0, v1, v2, vv[0] })
 54529          default : panic("instruction VGETEXPSS takes 3 or 4 operands")
 54530      }
 54531      // VGETEXPSS m32, xmm, xmm{k}{z}
 54532      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 54533          self.require(ISA_AVX512F)
 54534          p.domain = DomainAVX
 54535          p.add(0, func(m *_Encoding, v []interface{}) {
 54536              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 54537              m.emit(0x43)
 54538              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 54539          })
 54540      }
 54541      // VGETEXPSS {sae}, xmm, xmm, xmm{k}{z}
 54542      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 54543          self.require(ISA_AVX512F)
 54544          p.domain = DomainAVX
 54545          p.add(0, func(m *_Encoding, v []interface{}) {
 54546              m.emit(0x62)
 54547              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 54548              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 54549              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 54550              m.emit(0x43)
 54551              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 54552          })
 54553      }
 54554      // VGETEXPSS xmm, xmm, xmm{k}{z}
 54555      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 54556          self.require(ISA_AVX512F)
 54557          p.domain = DomainAVX
 54558          p.add(0, func(m *_Encoding, v []interface{}) {
 54559              m.emit(0x62)
 54560              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 54561              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 54562              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 54563              m.emit(0x43)
 54564              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 54565          })
 54566      }
 54567      if p.len == 0 {
 54568          panic("invalid operands for VGETEXPSS")
 54569      }
 54570      return p
 54571  }
 54572  
 54573  // VGETMANTPD performs "Extract Normalized Mantissas from Packed Double-Precision Floating-Point Values".
 54574  //
 54575  // Mnemonic        : VGETMANTPD
 54576  // Supported forms : (7 forms)
 54577  //
 54578  //    * VGETMANTPD imm8, m512/m64bcst, zmm{k}{z}    [AVX512F]
 54579  //    * VGETMANTPD imm8, {sae}, zmm, zmm{k}{z}      [AVX512F]
 54580  //    * VGETMANTPD imm8, zmm, zmm{k}{z}             [AVX512F]
 54581  //    * VGETMANTPD imm8, m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 54582  //    * VGETMANTPD imm8, m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 54583  //    * VGETMANTPD imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 54584  //    * VGETMANTPD imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 54585  //
 54586  func (self *Program) VGETMANTPD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 54587      var p *Instruction
 54588      switch len(vv) {
 54589          case 0  : p = self.alloc("VGETMANTPD", 3, Operands { v0, v1, v2 })
 54590          case 1  : p = self.alloc("VGETMANTPD", 4, Operands { v0, v1, v2, vv[0] })
 54591          default : panic("instruction VGETMANTPD takes 3 or 4 operands")
 54592      }
 54593      // VGETMANTPD imm8, m512/m64bcst, zmm{k}{z}
 54594      if len(vv) == 0 && isImm8(v0) && isM512M64bcst(v1) && isZMMkz(v2) {
 54595          self.require(ISA_AVX512F)
 54596          p.domain = DomainAVX
 54597          p.add(0, func(m *_Encoding, v []interface{}) {
 54598              m.evex(0b11, 0x85, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 54599              m.emit(0x26)
 54600              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 54601              m.imm1(toImmAny(v[0]))
 54602          })
 54603      }
 54604      // VGETMANTPD imm8, {sae}, zmm, zmm{k}{z}
 54605      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 54606          self.require(ISA_AVX512F)
 54607          p.domain = DomainAVX
 54608          p.add(0, func(m *_Encoding, v []interface{}) {
 54609              m.emit(0x62)
 54610              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[3]) << 4)))
 54611              m.emit(0xfd)
 54612              m.emit((zcode(v[3]) << 7) | kcode(v[3]) | 0x18)
 54613              m.emit(0x26)
 54614              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[2]))
 54615              m.imm1(toImmAny(v[0]))
 54616          })
 54617      }
 54618      // VGETMANTPD imm8, zmm, zmm{k}{z}
 54619      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 54620          self.require(ISA_AVX512F)
 54621          p.domain = DomainAVX
 54622          p.add(0, func(m *_Encoding, v []interface{}) {
 54623              m.emit(0x62)
 54624              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 54625              m.emit(0xfd)
 54626              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 54627              m.emit(0x26)
 54628              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 54629              m.imm1(toImmAny(v[0]))
 54630          })
 54631      }
 54632      // VGETMANTPD imm8, m128/m64bcst, xmm{k}{z}
 54633      if len(vv) == 0 && isImm8(v0) && isM128M64bcst(v1) && isXMMkz(v2) {
 54634          self.require(ISA_AVX512VL | ISA_AVX512F)
 54635          p.domain = DomainAVX
 54636          p.add(0, func(m *_Encoding, v []interface{}) {
 54637              m.evex(0b11, 0x85, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 54638              m.emit(0x26)
 54639              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 54640              m.imm1(toImmAny(v[0]))
 54641          })
 54642      }
 54643      // VGETMANTPD imm8, m256/m64bcst, ymm{k}{z}
 54644      if len(vv) == 0 && isImm8(v0) && isM256M64bcst(v1) && isYMMkz(v2) {
 54645          self.require(ISA_AVX512VL | ISA_AVX512F)
 54646          p.domain = DomainAVX
 54647          p.add(0, func(m *_Encoding, v []interface{}) {
 54648              m.evex(0b11, 0x85, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 54649              m.emit(0x26)
 54650              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 54651              m.imm1(toImmAny(v[0]))
 54652          })
 54653      }
 54654      // VGETMANTPD imm8, xmm, xmm{k}{z}
 54655      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 54656          self.require(ISA_AVX512VL | ISA_AVX512F)
 54657          p.domain = DomainAVX
 54658          p.add(0, func(m *_Encoding, v []interface{}) {
 54659              m.emit(0x62)
 54660              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 54661              m.emit(0xfd)
 54662              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 54663              m.emit(0x26)
 54664              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 54665              m.imm1(toImmAny(v[0]))
 54666          })
 54667      }
 54668      // VGETMANTPD imm8, ymm, ymm{k}{z}
 54669      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 54670          self.require(ISA_AVX512VL | ISA_AVX512F)
 54671          p.domain = DomainAVX
 54672          p.add(0, func(m *_Encoding, v []interface{}) {
 54673              m.emit(0x62)
 54674              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 54675              m.emit(0xfd)
 54676              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 54677              m.emit(0x26)
 54678              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 54679              m.imm1(toImmAny(v[0]))
 54680          })
 54681      }
 54682      if p.len == 0 {
 54683          panic("invalid operands for VGETMANTPD")
 54684      }
 54685      return p
 54686  }
 54687  
 54688  // VGETMANTPS performs "Extract Normalized Mantissas from Packed Single-Precision Floating-Point Values".
 54689  //
 54690  // Mnemonic        : VGETMANTPS
 54691  // Supported forms : (7 forms)
 54692  //
 54693  //    * VGETMANTPS imm8, m512/m32bcst, zmm{k}{z}    [AVX512F]
 54694  //    * VGETMANTPS imm8, {sae}, zmm, zmm{k}{z}      [AVX512F]
 54695  //    * VGETMANTPS imm8, zmm, zmm{k}{z}             [AVX512F]
 54696  //    * VGETMANTPS imm8, m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 54697  //    * VGETMANTPS imm8, m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 54698  //    * VGETMANTPS imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 54699  //    * VGETMANTPS imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 54700  //
 54701  func (self *Program) VGETMANTPS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 54702      var p *Instruction
 54703      switch len(vv) {
 54704          case 0  : p = self.alloc("VGETMANTPS", 3, Operands { v0, v1, v2 })
 54705          case 1  : p = self.alloc("VGETMANTPS", 4, Operands { v0, v1, v2, vv[0] })
 54706          default : panic("instruction VGETMANTPS takes 3 or 4 operands")
 54707      }
 54708      // VGETMANTPS imm8, m512/m32bcst, zmm{k}{z}
 54709      if len(vv) == 0 && isImm8(v0) && isM512M32bcst(v1) && isZMMkz(v2) {
 54710          self.require(ISA_AVX512F)
 54711          p.domain = DomainAVX
 54712          p.add(0, func(m *_Encoding, v []interface{}) {
 54713              m.evex(0b11, 0x05, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 54714              m.emit(0x26)
 54715              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 54716              m.imm1(toImmAny(v[0]))
 54717          })
 54718      }
 54719      // VGETMANTPS imm8, {sae}, zmm, zmm{k}{z}
 54720      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 54721          self.require(ISA_AVX512F)
 54722          p.domain = DomainAVX
 54723          p.add(0, func(m *_Encoding, v []interface{}) {
 54724              m.emit(0x62)
 54725              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[3]) << 4)))
 54726              m.emit(0x7d)
 54727              m.emit((zcode(v[3]) << 7) | kcode(v[3]) | 0x18)
 54728              m.emit(0x26)
 54729              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[2]))
 54730              m.imm1(toImmAny(v[0]))
 54731          })
 54732      }
 54733      // VGETMANTPS imm8, zmm, zmm{k}{z}
 54734      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 54735          self.require(ISA_AVX512F)
 54736          p.domain = DomainAVX
 54737          p.add(0, func(m *_Encoding, v []interface{}) {
 54738              m.emit(0x62)
 54739              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 54740              m.emit(0x7d)
 54741              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 54742              m.emit(0x26)
 54743              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 54744              m.imm1(toImmAny(v[0]))
 54745          })
 54746      }
 54747      // VGETMANTPS imm8, m128/m32bcst, xmm{k}{z}
 54748      if len(vv) == 0 && isImm8(v0) && isM128M32bcst(v1) && isXMMkz(v2) {
 54749          self.require(ISA_AVX512VL | ISA_AVX512F)
 54750          p.domain = DomainAVX
 54751          p.add(0, func(m *_Encoding, v []interface{}) {
 54752              m.evex(0b11, 0x05, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 54753              m.emit(0x26)
 54754              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 54755              m.imm1(toImmAny(v[0]))
 54756          })
 54757      }
 54758      // VGETMANTPS imm8, m256/m32bcst, ymm{k}{z}
 54759      if len(vv) == 0 && isImm8(v0) && isM256M32bcst(v1) && isYMMkz(v2) {
 54760          self.require(ISA_AVX512VL | ISA_AVX512F)
 54761          p.domain = DomainAVX
 54762          p.add(0, func(m *_Encoding, v []interface{}) {
 54763              m.evex(0b11, 0x05, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 54764              m.emit(0x26)
 54765              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 54766              m.imm1(toImmAny(v[0]))
 54767          })
 54768      }
 54769      // VGETMANTPS imm8, xmm, xmm{k}{z}
 54770      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 54771          self.require(ISA_AVX512VL | ISA_AVX512F)
 54772          p.domain = DomainAVX
 54773          p.add(0, func(m *_Encoding, v []interface{}) {
 54774              m.emit(0x62)
 54775              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 54776              m.emit(0x7d)
 54777              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 54778              m.emit(0x26)
 54779              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 54780              m.imm1(toImmAny(v[0]))
 54781          })
 54782      }
 54783      // VGETMANTPS imm8, ymm, ymm{k}{z}
 54784      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 54785          self.require(ISA_AVX512VL | ISA_AVX512F)
 54786          p.domain = DomainAVX
 54787          p.add(0, func(m *_Encoding, v []interface{}) {
 54788              m.emit(0x62)
 54789              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 54790              m.emit(0x7d)
 54791              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 54792              m.emit(0x26)
 54793              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 54794              m.imm1(toImmAny(v[0]))
 54795          })
 54796      }
 54797      if p.len == 0 {
 54798          panic("invalid operands for VGETMANTPS")
 54799      }
 54800      return p
 54801  }
 54802  
 54803  // VGETMANTSD performs "Extract Normalized Mantissa from Scalar Double-Precision Floating-Point Value".
 54804  //
 54805  // Mnemonic        : VGETMANTSD
 54806  // Supported forms : (3 forms)
 54807  //
 54808  //    * VGETMANTSD imm8, m64, xmm, xmm{k}{z}           [AVX512F]
 54809  //    * VGETMANTSD imm8, {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 54810  //    * VGETMANTSD imm8, xmm, xmm, xmm{k}{z}           [AVX512F]
 54811  //
 54812  func (self *Program) VGETMANTSD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 54813      var p *Instruction
 54814      switch len(vv) {
 54815          case 0  : p = self.alloc("VGETMANTSD", 4, Operands { v0, v1, v2, v3 })
 54816          case 1  : p = self.alloc("VGETMANTSD", 5, Operands { v0, v1, v2, v3, vv[0] })
 54817          default : panic("instruction VGETMANTSD takes 4 or 5 operands")
 54818      }
 54819      // VGETMANTSD imm8, m64, xmm, xmm{k}{z}
 54820      if len(vv) == 0 && isImm8(v0) && isM64(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 54821          self.require(ISA_AVX512F)
 54822          p.domain = DomainAVX
 54823          p.add(0, func(m *_Encoding, v []interface{}) {
 54824              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 54825              m.emit(0x27)
 54826              m.mrsd(lcode(v[3]), addr(v[1]), 8)
 54827              m.imm1(toImmAny(v[0]))
 54828          })
 54829      }
 54830      // VGETMANTSD imm8, {sae}, xmm, xmm, xmm{k}{z}
 54831      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) && isXMMkz(vv[0]) {
 54832          self.require(ISA_AVX512F)
 54833          p.domain = DomainAVX
 54834          p.add(0, func(m *_Encoding, v []interface{}) {
 54835              m.emit(0x62)
 54836              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 54837              m.emit(0xfd ^ (hlcode(v[3]) << 3))
 54838              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 54839              m.emit(0x27)
 54840              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 54841              m.imm1(toImmAny(v[0]))
 54842          })
 54843      }
 54844      // VGETMANTSD imm8, xmm, xmm, xmm{k}{z}
 54845      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 54846          self.require(ISA_AVX512F)
 54847          p.domain = DomainAVX
 54848          p.add(0, func(m *_Encoding, v []interface{}) {
 54849              m.emit(0x62)
 54850              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 54851              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 54852              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 54853              m.emit(0x27)
 54854              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 54855              m.imm1(toImmAny(v[0]))
 54856          })
 54857      }
 54858      if p.len == 0 {
 54859          panic("invalid operands for VGETMANTSD")
 54860      }
 54861      return p
 54862  }
 54863  
 54864  // VGETMANTSS performs "Extract Normalized Mantissa from Scalar Single-Precision Floating-Point Value".
 54865  //
 54866  // Mnemonic        : VGETMANTSS
 54867  // Supported forms : (3 forms)
 54868  //
 54869  //    * VGETMANTSS imm8, m32, xmm, xmm{k}{z}           [AVX512F]
 54870  //    * VGETMANTSS imm8, {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 54871  //    * VGETMANTSS imm8, xmm, xmm, xmm{k}{z}           [AVX512F]
 54872  //
 54873  func (self *Program) VGETMANTSS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 54874      var p *Instruction
 54875      switch len(vv) {
 54876          case 0  : p = self.alloc("VGETMANTSS", 4, Operands { v0, v1, v2, v3 })
 54877          case 1  : p = self.alloc("VGETMANTSS", 5, Operands { v0, v1, v2, v3, vv[0] })
 54878          default : panic("instruction VGETMANTSS takes 4 or 5 operands")
 54879      }
 54880      // VGETMANTSS imm8, m32, xmm, xmm{k}{z}
 54881      if len(vv) == 0 && isImm8(v0) && isM32(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 54882          self.require(ISA_AVX512F)
 54883          p.domain = DomainAVX
 54884          p.add(0, func(m *_Encoding, v []interface{}) {
 54885              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 54886              m.emit(0x27)
 54887              m.mrsd(lcode(v[3]), addr(v[1]), 4)
 54888              m.imm1(toImmAny(v[0]))
 54889          })
 54890      }
 54891      // VGETMANTSS imm8, {sae}, xmm, xmm, xmm{k}{z}
 54892      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) && isXMMkz(vv[0]) {
 54893          self.require(ISA_AVX512F)
 54894          p.domain = DomainAVX
 54895          p.add(0, func(m *_Encoding, v []interface{}) {
 54896              m.emit(0x62)
 54897              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 54898              m.emit(0x7d ^ (hlcode(v[3]) << 3))
 54899              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 54900              m.emit(0x27)
 54901              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 54902              m.imm1(toImmAny(v[0]))
 54903          })
 54904      }
 54905      // VGETMANTSS imm8, xmm, xmm, xmm{k}{z}
 54906      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 54907          self.require(ISA_AVX512F)
 54908          p.domain = DomainAVX
 54909          p.add(0, func(m *_Encoding, v []interface{}) {
 54910              m.emit(0x62)
 54911              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 54912              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 54913              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 54914              m.emit(0x27)
 54915              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 54916              m.imm1(toImmAny(v[0]))
 54917          })
 54918      }
 54919      if p.len == 0 {
 54920          panic("invalid operands for VGETMANTSS")
 54921      }
 54922      return p
 54923  }
 54924  
 54925  // VHADDPD performs "Packed Double-FP Horizontal Add".
 54926  //
 54927  // Mnemonic        : VHADDPD
 54928  // Supported forms : (4 forms)
 54929  //
 54930  //    * VHADDPD xmm, xmm, xmm     [AVX]
 54931  //    * VHADDPD m128, xmm, xmm    [AVX]
 54932  //    * VHADDPD ymm, ymm, ymm     [AVX]
 54933  //    * VHADDPD m256, ymm, ymm    [AVX]
 54934  //
 54935  func (self *Program) VHADDPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 54936      p := self.alloc("VHADDPD", 3, Operands { v0, v1, v2 })
 54937      // VHADDPD xmm, xmm, xmm
 54938      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 54939          self.require(ISA_AVX)
 54940          p.domain = DomainAVX
 54941          p.add(0, func(m *_Encoding, v []interface{}) {
 54942              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 54943              m.emit(0x7c)
 54944              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 54945          })
 54946      }
 54947      // VHADDPD m128, xmm, xmm
 54948      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 54949          self.require(ISA_AVX)
 54950          p.domain = DomainAVX
 54951          p.add(0, func(m *_Encoding, v []interface{}) {
 54952              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 54953              m.emit(0x7c)
 54954              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 54955          })
 54956      }
 54957      // VHADDPD ymm, ymm, ymm
 54958      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 54959          self.require(ISA_AVX)
 54960          p.domain = DomainAVX
 54961          p.add(0, func(m *_Encoding, v []interface{}) {
 54962              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 54963              m.emit(0x7c)
 54964              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 54965          })
 54966      }
 54967      // VHADDPD m256, ymm, ymm
 54968      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 54969          self.require(ISA_AVX)
 54970          p.domain = DomainAVX
 54971          p.add(0, func(m *_Encoding, v []interface{}) {
 54972              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 54973              m.emit(0x7c)
 54974              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 54975          })
 54976      }
 54977      if p.len == 0 {
 54978          panic("invalid operands for VHADDPD")
 54979      }
 54980      return p
 54981  }
 54982  
 54983  // VHADDPS performs "Packed Single-FP Horizontal Add".
 54984  //
 54985  // Mnemonic        : VHADDPS
 54986  // Supported forms : (4 forms)
 54987  //
 54988  //    * VHADDPS xmm, xmm, xmm     [AVX]
 54989  //    * VHADDPS m128, xmm, xmm    [AVX]
 54990  //    * VHADDPS ymm, ymm, ymm     [AVX]
 54991  //    * VHADDPS m256, ymm, ymm    [AVX]
 54992  //
 54993  func (self *Program) VHADDPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 54994      p := self.alloc("VHADDPS", 3, Operands { v0, v1, v2 })
 54995      // VHADDPS xmm, xmm, xmm
 54996      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 54997          self.require(ISA_AVX)
 54998          p.domain = DomainAVX
 54999          p.add(0, func(m *_Encoding, v []interface{}) {
 55000              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 55001              m.emit(0x7c)
 55002              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 55003          })
 55004      }
 55005      // VHADDPS m128, xmm, xmm
 55006      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 55007          self.require(ISA_AVX)
 55008          p.domain = DomainAVX
 55009          p.add(0, func(m *_Encoding, v []interface{}) {
 55010              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 55011              m.emit(0x7c)
 55012              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 55013          })
 55014      }
 55015      // VHADDPS ymm, ymm, ymm
 55016      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 55017          self.require(ISA_AVX)
 55018          p.domain = DomainAVX
 55019          p.add(0, func(m *_Encoding, v []interface{}) {
 55020              m.vex2(7, hcode(v[2]), v[0], hlcode(v[1]))
 55021              m.emit(0x7c)
 55022              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 55023          })
 55024      }
 55025      // VHADDPS m256, ymm, ymm
 55026      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 55027          self.require(ISA_AVX)
 55028          p.domain = DomainAVX
 55029          p.add(0, func(m *_Encoding, v []interface{}) {
 55030              m.vex2(7, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 55031              m.emit(0x7c)
 55032              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 55033          })
 55034      }
 55035      if p.len == 0 {
 55036          panic("invalid operands for VHADDPS")
 55037      }
 55038      return p
 55039  }
 55040  
 55041  // VHSUBPD performs "Packed Double-FP Horizontal Subtract".
 55042  //
 55043  // Mnemonic        : VHSUBPD
 55044  // Supported forms : (4 forms)
 55045  //
 55046  //    * VHSUBPD xmm, xmm, xmm     [AVX]
 55047  //    * VHSUBPD m128, xmm, xmm    [AVX]
 55048  //    * VHSUBPD ymm, ymm, ymm     [AVX]
 55049  //    * VHSUBPD m256, ymm, ymm    [AVX]
 55050  //
 55051  func (self *Program) VHSUBPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 55052      p := self.alloc("VHSUBPD", 3, Operands { v0, v1, v2 })
 55053      // VHSUBPD xmm, xmm, xmm
 55054      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 55055          self.require(ISA_AVX)
 55056          p.domain = DomainAVX
 55057          p.add(0, func(m *_Encoding, v []interface{}) {
 55058              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 55059              m.emit(0x7d)
 55060              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 55061          })
 55062      }
 55063      // VHSUBPD m128, xmm, xmm
 55064      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 55065          self.require(ISA_AVX)
 55066          p.domain = DomainAVX
 55067          p.add(0, func(m *_Encoding, v []interface{}) {
 55068              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 55069              m.emit(0x7d)
 55070              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 55071          })
 55072      }
 55073      // VHSUBPD ymm, ymm, ymm
 55074      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 55075          self.require(ISA_AVX)
 55076          p.domain = DomainAVX
 55077          p.add(0, func(m *_Encoding, v []interface{}) {
 55078              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 55079              m.emit(0x7d)
 55080              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 55081          })
 55082      }
 55083      // VHSUBPD m256, ymm, ymm
 55084      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 55085          self.require(ISA_AVX)
 55086          p.domain = DomainAVX
 55087          p.add(0, func(m *_Encoding, v []interface{}) {
 55088              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 55089              m.emit(0x7d)
 55090              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 55091          })
 55092      }
 55093      if p.len == 0 {
 55094          panic("invalid operands for VHSUBPD")
 55095      }
 55096      return p
 55097  }
 55098  
 55099  // VHSUBPS performs "Packed Single-FP Horizontal Subtract".
 55100  //
 55101  // Mnemonic        : VHSUBPS
 55102  // Supported forms : (4 forms)
 55103  //
 55104  //    * VHSUBPS xmm, xmm, xmm     [AVX]
 55105  //    * VHSUBPS m128, xmm, xmm    [AVX]
 55106  //    * VHSUBPS ymm, ymm, ymm     [AVX]
 55107  //    * VHSUBPS m256, ymm, ymm    [AVX]
 55108  //
 55109  func (self *Program) VHSUBPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 55110      p := self.alloc("VHSUBPS", 3, Operands { v0, v1, v2 })
 55111      // VHSUBPS xmm, xmm, xmm
 55112      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 55113          self.require(ISA_AVX)
 55114          p.domain = DomainAVX
 55115          p.add(0, func(m *_Encoding, v []interface{}) {
 55116              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 55117              m.emit(0x7d)
 55118              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 55119          })
 55120      }
 55121      // VHSUBPS m128, xmm, xmm
 55122      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 55123          self.require(ISA_AVX)
 55124          p.domain = DomainAVX
 55125          p.add(0, func(m *_Encoding, v []interface{}) {
 55126              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 55127              m.emit(0x7d)
 55128              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 55129          })
 55130      }
 55131      // VHSUBPS ymm, ymm, ymm
 55132      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 55133          self.require(ISA_AVX)
 55134          p.domain = DomainAVX
 55135          p.add(0, func(m *_Encoding, v []interface{}) {
 55136              m.vex2(7, hcode(v[2]), v[0], hlcode(v[1]))
 55137              m.emit(0x7d)
 55138              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 55139          })
 55140      }
 55141      // VHSUBPS m256, ymm, ymm
 55142      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 55143          self.require(ISA_AVX)
 55144          p.domain = DomainAVX
 55145          p.add(0, func(m *_Encoding, v []interface{}) {
 55146              m.vex2(7, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 55147              m.emit(0x7d)
 55148              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 55149          })
 55150      }
 55151      if p.len == 0 {
 55152          panic("invalid operands for VHSUBPS")
 55153      }
 55154      return p
 55155  }
 55156  
 55157  // VINSERTF128 performs "Insert Packed Floating-Point Values".
 55158  //
 55159  // Mnemonic        : VINSERTF128
 55160  // Supported forms : (2 forms)
 55161  //
 55162  //    * VINSERTF128 imm8, xmm, ymm, ymm     [AVX]
 55163  //    * VINSERTF128 imm8, m128, ymm, ymm    [AVX]
 55164  //
 55165  func (self *Program) VINSERTF128(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 55166      p := self.alloc("VINSERTF128", 4, Operands { v0, v1, v2, v3 })
 55167      // VINSERTF128 imm8, xmm, ymm, ymm
 55168      if isImm8(v0) && isXMM(v1) && isYMM(v2) && isYMM(v3) {
 55169          self.require(ISA_AVX)
 55170          p.domain = DomainAVX
 55171          p.add(0, func(m *_Encoding, v []interface{}) {
 55172              m.emit(0xc4)
 55173              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 55174              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 55175              m.emit(0x18)
 55176              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55177              m.imm1(toImmAny(v[0]))
 55178          })
 55179      }
 55180      // VINSERTF128 imm8, m128, ymm, ymm
 55181      if isImm8(v0) && isM128(v1) && isYMM(v2) && isYMM(v3) {
 55182          self.require(ISA_AVX)
 55183          p.domain = DomainAVX
 55184          p.add(0, func(m *_Encoding, v []interface{}) {
 55185              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 55186              m.emit(0x18)
 55187              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 55188              m.imm1(toImmAny(v[0]))
 55189          })
 55190      }
 55191      if p.len == 0 {
 55192          panic("invalid operands for VINSERTF128")
 55193      }
 55194      return p
 55195  }
 55196  
 55197  // VINSERTF32X4 performs "Insert 128 Bits of Packed Single-Precision Floating-Point Values".
 55198  //
 55199  // Mnemonic        : VINSERTF32X4
 55200  // Supported forms : (4 forms)
 55201  //
 55202  //    * VINSERTF32X4 imm8, xmm, zmm, zmm{k}{z}     [AVX512F]
 55203  //    * VINSERTF32X4 imm8, m128, zmm, zmm{k}{z}    [AVX512F]
 55204  //    * VINSERTF32X4 imm8, xmm, ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 55205  //    * VINSERTF32X4 imm8, m128, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 55206  //
 55207  func (self *Program) VINSERTF32X4(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 55208      p := self.alloc("VINSERTF32X4", 4, Operands { v0, v1, v2, v3 })
 55209      // VINSERTF32X4 imm8, xmm, zmm, zmm{k}{z}
 55210      if isImm8(v0) && isEVEXXMM(v1) && isZMM(v2) && isZMMkz(v3) {
 55211          self.require(ISA_AVX512F)
 55212          p.domain = DomainAVX
 55213          p.add(0, func(m *_Encoding, v []interface{}) {
 55214              m.emit(0x62)
 55215              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55216              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 55217              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 55218              m.emit(0x18)
 55219              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55220              m.imm1(toImmAny(v[0]))
 55221          })
 55222      }
 55223      // VINSERTF32X4 imm8, m128, zmm, zmm{k}{z}
 55224      if isImm8(v0) && isM128(v1) && isZMM(v2) && isZMMkz(v3) {
 55225          self.require(ISA_AVX512F)
 55226          p.domain = DomainAVX
 55227          p.add(0, func(m *_Encoding, v []interface{}) {
 55228              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55229              m.emit(0x18)
 55230              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 55231              m.imm1(toImmAny(v[0]))
 55232          })
 55233      }
 55234      // VINSERTF32X4 imm8, xmm, ymm, ymm{k}{z}
 55235      if isImm8(v0) && isEVEXXMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 55236          self.require(ISA_AVX512VL | ISA_AVX512F)
 55237          p.domain = DomainAVX
 55238          p.add(0, func(m *_Encoding, v []interface{}) {
 55239              m.emit(0x62)
 55240              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55241              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 55242              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 55243              m.emit(0x18)
 55244              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55245              m.imm1(toImmAny(v[0]))
 55246          })
 55247      }
 55248      // VINSERTF32X4 imm8, m128, ymm, ymm{k}{z}
 55249      if isImm8(v0) && isM128(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 55250          self.require(ISA_AVX512VL | ISA_AVX512F)
 55251          p.domain = DomainAVX
 55252          p.add(0, func(m *_Encoding, v []interface{}) {
 55253              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55254              m.emit(0x18)
 55255              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 55256              m.imm1(toImmAny(v[0]))
 55257          })
 55258      }
 55259      if p.len == 0 {
 55260          panic("invalid operands for VINSERTF32X4")
 55261      }
 55262      return p
 55263  }
 55264  
 55265  // VINSERTF32X8 performs "Insert 256 Bits of Packed Single-Precision Floating-Point Values".
 55266  //
 55267  // Mnemonic        : VINSERTF32X8
 55268  // Supported forms : (2 forms)
 55269  //
 55270  //    * VINSERTF32X8 imm8, ymm, zmm, zmm{k}{z}     [AVX512DQ]
 55271  //    * VINSERTF32X8 imm8, m256, zmm, zmm{k}{z}    [AVX512DQ]
 55272  //
 55273  func (self *Program) VINSERTF32X8(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 55274      p := self.alloc("VINSERTF32X8", 4, Operands { v0, v1, v2, v3 })
 55275      // VINSERTF32X8 imm8, ymm, zmm, zmm{k}{z}
 55276      if isImm8(v0) && isEVEXYMM(v1) && isZMM(v2) && isZMMkz(v3) {
 55277          self.require(ISA_AVX512DQ)
 55278          p.domain = DomainAVX
 55279          p.add(0, func(m *_Encoding, v []interface{}) {
 55280              m.emit(0x62)
 55281              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55282              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 55283              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 55284              m.emit(0x1a)
 55285              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55286              m.imm1(toImmAny(v[0]))
 55287          })
 55288      }
 55289      // VINSERTF32X8 imm8, m256, zmm, zmm{k}{z}
 55290      if isImm8(v0) && isM256(v1) && isZMM(v2) && isZMMkz(v3) {
 55291          self.require(ISA_AVX512DQ)
 55292          p.domain = DomainAVX
 55293          p.add(0, func(m *_Encoding, v []interface{}) {
 55294              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55295              m.emit(0x1a)
 55296              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 55297              m.imm1(toImmAny(v[0]))
 55298          })
 55299      }
 55300      if p.len == 0 {
 55301          panic("invalid operands for VINSERTF32X8")
 55302      }
 55303      return p
 55304  }
 55305  
 55306  // VINSERTF64X2 performs "Insert 128 Bits of Packed Double-Precision Floating-Point Values".
 55307  //
 55308  // Mnemonic        : VINSERTF64X2
 55309  // Supported forms : (4 forms)
 55310  //
 55311  //    * VINSERTF64X2 imm8, xmm, zmm, zmm{k}{z}     [AVX512DQ]
 55312  //    * VINSERTF64X2 imm8, m128, zmm, zmm{k}{z}    [AVX512DQ]
 55313  //    * VINSERTF64X2 imm8, xmm, ymm, ymm{k}{z}     [AVX512DQ,AVX512VL]
 55314  //    * VINSERTF64X2 imm8, m128, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 55315  //
 55316  func (self *Program) VINSERTF64X2(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 55317      p := self.alloc("VINSERTF64X2", 4, Operands { v0, v1, v2, v3 })
 55318      // VINSERTF64X2 imm8, xmm, zmm, zmm{k}{z}
 55319      if isImm8(v0) && isEVEXXMM(v1) && isZMM(v2) && isZMMkz(v3) {
 55320          self.require(ISA_AVX512DQ)
 55321          p.domain = DomainAVX
 55322          p.add(0, func(m *_Encoding, v []interface{}) {
 55323              m.emit(0x62)
 55324              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55325              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 55326              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 55327              m.emit(0x18)
 55328              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55329              m.imm1(toImmAny(v[0]))
 55330          })
 55331      }
 55332      // VINSERTF64X2 imm8, m128, zmm, zmm{k}{z}
 55333      if isImm8(v0) && isM128(v1) && isZMM(v2) && isZMMkz(v3) {
 55334          self.require(ISA_AVX512DQ)
 55335          p.domain = DomainAVX
 55336          p.add(0, func(m *_Encoding, v []interface{}) {
 55337              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55338              m.emit(0x18)
 55339              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 55340              m.imm1(toImmAny(v[0]))
 55341          })
 55342      }
 55343      // VINSERTF64X2 imm8, xmm, ymm, ymm{k}{z}
 55344      if isImm8(v0) && isEVEXXMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 55345          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 55346          p.domain = DomainAVX
 55347          p.add(0, func(m *_Encoding, v []interface{}) {
 55348              m.emit(0x62)
 55349              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55350              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 55351              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 55352              m.emit(0x18)
 55353              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55354              m.imm1(toImmAny(v[0]))
 55355          })
 55356      }
 55357      // VINSERTF64X2 imm8, m128, ymm, ymm{k}{z}
 55358      if isImm8(v0) && isM128(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 55359          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 55360          p.domain = DomainAVX
 55361          p.add(0, func(m *_Encoding, v []interface{}) {
 55362              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55363              m.emit(0x18)
 55364              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 55365              m.imm1(toImmAny(v[0]))
 55366          })
 55367      }
 55368      if p.len == 0 {
 55369          panic("invalid operands for VINSERTF64X2")
 55370      }
 55371      return p
 55372  }
 55373  
 55374  // VINSERTF64X4 performs "Insert 256 Bits of Packed Double-Precision Floating-Point Values".
 55375  //
 55376  // Mnemonic        : VINSERTF64X4
 55377  // Supported forms : (2 forms)
 55378  //
 55379  //    * VINSERTF64X4 imm8, ymm, zmm, zmm{k}{z}     [AVX512F]
 55380  //    * VINSERTF64X4 imm8, m256, zmm, zmm{k}{z}    [AVX512F]
 55381  //
 55382  func (self *Program) VINSERTF64X4(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 55383      p := self.alloc("VINSERTF64X4", 4, Operands { v0, v1, v2, v3 })
 55384      // VINSERTF64X4 imm8, ymm, zmm, zmm{k}{z}
 55385      if isImm8(v0) && isEVEXYMM(v1) && isZMM(v2) && isZMMkz(v3) {
 55386          self.require(ISA_AVX512F)
 55387          p.domain = DomainAVX
 55388          p.add(0, func(m *_Encoding, v []interface{}) {
 55389              m.emit(0x62)
 55390              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55391              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 55392              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 55393              m.emit(0x1a)
 55394              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55395              m.imm1(toImmAny(v[0]))
 55396          })
 55397      }
 55398      // VINSERTF64X4 imm8, m256, zmm, zmm{k}{z}
 55399      if isImm8(v0) && isM256(v1) && isZMM(v2) && isZMMkz(v3) {
 55400          self.require(ISA_AVX512F)
 55401          p.domain = DomainAVX
 55402          p.add(0, func(m *_Encoding, v []interface{}) {
 55403              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55404              m.emit(0x1a)
 55405              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 55406              m.imm1(toImmAny(v[0]))
 55407          })
 55408      }
 55409      if p.len == 0 {
 55410          panic("invalid operands for VINSERTF64X4")
 55411      }
 55412      return p
 55413  }
 55414  
 55415  // VINSERTI128 performs "Insert Packed Integer Values".
 55416  //
 55417  // Mnemonic        : VINSERTI128
 55418  // Supported forms : (2 forms)
 55419  //
 55420  //    * VINSERTI128 imm8, xmm, ymm, ymm     [AVX2]
 55421  //    * VINSERTI128 imm8, m128, ymm, ymm    [AVX2]
 55422  //
 55423  func (self *Program) VINSERTI128(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 55424      p := self.alloc("VINSERTI128", 4, Operands { v0, v1, v2, v3 })
 55425      // VINSERTI128 imm8, xmm, ymm, ymm
 55426      if isImm8(v0) && isXMM(v1) && isYMM(v2) && isYMM(v3) {
 55427          self.require(ISA_AVX2)
 55428          p.domain = DomainAVX
 55429          p.add(0, func(m *_Encoding, v []interface{}) {
 55430              m.emit(0xc4)
 55431              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 55432              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 55433              m.emit(0x38)
 55434              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55435              m.imm1(toImmAny(v[0]))
 55436          })
 55437      }
 55438      // VINSERTI128 imm8, m128, ymm, ymm
 55439      if isImm8(v0) && isM128(v1) && isYMM(v2) && isYMM(v3) {
 55440          self.require(ISA_AVX2)
 55441          p.domain = DomainAVX
 55442          p.add(0, func(m *_Encoding, v []interface{}) {
 55443              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 55444              m.emit(0x38)
 55445              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 55446              m.imm1(toImmAny(v[0]))
 55447          })
 55448      }
 55449      if p.len == 0 {
 55450          panic("invalid operands for VINSERTI128")
 55451      }
 55452      return p
 55453  }
 55454  
 55455  // VINSERTI32X4 performs "Insert 128 Bits of Packed Doubleword Integer Values".
 55456  //
 55457  // Mnemonic        : VINSERTI32X4
 55458  // Supported forms : (4 forms)
 55459  //
 55460  //    * VINSERTI32X4 imm8, xmm, zmm, zmm{k}{z}     [AVX512F]
 55461  //    * VINSERTI32X4 imm8, m128, zmm, zmm{k}{z}    [AVX512F]
 55462  //    * VINSERTI32X4 imm8, xmm, ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 55463  //    * VINSERTI32X4 imm8, m128, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 55464  //
 55465  func (self *Program) VINSERTI32X4(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 55466      p := self.alloc("VINSERTI32X4", 4, Operands { v0, v1, v2, v3 })
 55467      // VINSERTI32X4 imm8, xmm, zmm, zmm{k}{z}
 55468      if isImm8(v0) && isEVEXXMM(v1) && isZMM(v2) && isZMMkz(v3) {
 55469          self.require(ISA_AVX512F)
 55470          p.domain = DomainAVX
 55471          p.add(0, func(m *_Encoding, v []interface{}) {
 55472              m.emit(0x62)
 55473              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55474              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 55475              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 55476              m.emit(0x38)
 55477              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55478              m.imm1(toImmAny(v[0]))
 55479          })
 55480      }
 55481      // VINSERTI32X4 imm8, m128, zmm, zmm{k}{z}
 55482      if isImm8(v0) && isM128(v1) && isZMM(v2) && isZMMkz(v3) {
 55483          self.require(ISA_AVX512F)
 55484          p.domain = DomainAVX
 55485          p.add(0, func(m *_Encoding, v []interface{}) {
 55486              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55487              m.emit(0x38)
 55488              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 55489              m.imm1(toImmAny(v[0]))
 55490          })
 55491      }
 55492      // VINSERTI32X4 imm8, xmm, ymm, ymm{k}{z}
 55493      if isImm8(v0) && isEVEXXMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 55494          self.require(ISA_AVX512VL | ISA_AVX512F)
 55495          p.domain = DomainAVX
 55496          p.add(0, func(m *_Encoding, v []interface{}) {
 55497              m.emit(0x62)
 55498              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55499              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 55500              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 55501              m.emit(0x38)
 55502              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55503              m.imm1(toImmAny(v[0]))
 55504          })
 55505      }
 55506      // VINSERTI32X4 imm8, m128, ymm, ymm{k}{z}
 55507      if isImm8(v0) && isM128(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 55508          self.require(ISA_AVX512VL | ISA_AVX512F)
 55509          p.domain = DomainAVX
 55510          p.add(0, func(m *_Encoding, v []interface{}) {
 55511              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55512              m.emit(0x38)
 55513              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 55514              m.imm1(toImmAny(v[0]))
 55515          })
 55516      }
 55517      if p.len == 0 {
 55518          panic("invalid operands for VINSERTI32X4")
 55519      }
 55520      return p
 55521  }
 55522  
 55523  // VINSERTI32X8 performs "Insert 256 Bits of Packed Doubleword Integer Values".
 55524  //
 55525  // Mnemonic        : VINSERTI32X8
 55526  // Supported forms : (2 forms)
 55527  //
 55528  //    * VINSERTI32X8 imm8, ymm, zmm, zmm{k}{z}     [AVX512DQ]
 55529  //    * VINSERTI32X8 imm8, m256, zmm, zmm{k}{z}    [AVX512DQ]
 55530  //
 55531  func (self *Program) VINSERTI32X8(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 55532      p := self.alloc("VINSERTI32X8", 4, Operands { v0, v1, v2, v3 })
 55533      // VINSERTI32X8 imm8, ymm, zmm, zmm{k}{z}
 55534      if isImm8(v0) && isEVEXYMM(v1) && isZMM(v2) && isZMMkz(v3) {
 55535          self.require(ISA_AVX512DQ)
 55536          p.domain = DomainAVX
 55537          p.add(0, func(m *_Encoding, v []interface{}) {
 55538              m.emit(0x62)
 55539              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55540              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 55541              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 55542              m.emit(0x3a)
 55543              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55544              m.imm1(toImmAny(v[0]))
 55545          })
 55546      }
 55547      // VINSERTI32X8 imm8, m256, zmm, zmm{k}{z}
 55548      if isImm8(v0) && isM256(v1) && isZMM(v2) && isZMMkz(v3) {
 55549          self.require(ISA_AVX512DQ)
 55550          p.domain = DomainAVX
 55551          p.add(0, func(m *_Encoding, v []interface{}) {
 55552              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55553              m.emit(0x3a)
 55554              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 55555              m.imm1(toImmAny(v[0]))
 55556          })
 55557      }
 55558      if p.len == 0 {
 55559          panic("invalid operands for VINSERTI32X8")
 55560      }
 55561      return p
 55562  }
 55563  
 55564  // VINSERTI64X2 performs "Insert 128 Bits of Packed Quadword Integer Values".
 55565  //
 55566  // Mnemonic        : VINSERTI64X2
 55567  // Supported forms : (4 forms)
 55568  //
 55569  //    * VINSERTI64X2 imm8, xmm, zmm, zmm{k}{z}     [AVX512DQ]
 55570  //    * VINSERTI64X2 imm8, m128, zmm, zmm{k}{z}    [AVX512DQ]
 55571  //    * VINSERTI64X2 imm8, xmm, ymm, ymm{k}{z}     [AVX512DQ,AVX512VL]
 55572  //    * VINSERTI64X2 imm8, m128, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 55573  //
 55574  func (self *Program) VINSERTI64X2(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 55575      p := self.alloc("VINSERTI64X2", 4, Operands { v0, v1, v2, v3 })
 55576      // VINSERTI64X2 imm8, xmm, zmm, zmm{k}{z}
 55577      if isImm8(v0) && isEVEXXMM(v1) && isZMM(v2) && isZMMkz(v3) {
 55578          self.require(ISA_AVX512DQ)
 55579          p.domain = DomainAVX
 55580          p.add(0, func(m *_Encoding, v []interface{}) {
 55581              m.emit(0x62)
 55582              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55583              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 55584              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 55585              m.emit(0x38)
 55586              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55587              m.imm1(toImmAny(v[0]))
 55588          })
 55589      }
 55590      // VINSERTI64X2 imm8, m128, zmm, zmm{k}{z}
 55591      if isImm8(v0) && isM128(v1) && isZMM(v2) && isZMMkz(v3) {
 55592          self.require(ISA_AVX512DQ)
 55593          p.domain = DomainAVX
 55594          p.add(0, func(m *_Encoding, v []interface{}) {
 55595              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55596              m.emit(0x38)
 55597              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 55598              m.imm1(toImmAny(v[0]))
 55599          })
 55600      }
 55601      // VINSERTI64X2 imm8, xmm, ymm, ymm{k}{z}
 55602      if isImm8(v0) && isEVEXXMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 55603          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 55604          p.domain = DomainAVX
 55605          p.add(0, func(m *_Encoding, v []interface{}) {
 55606              m.emit(0x62)
 55607              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55608              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 55609              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 55610              m.emit(0x38)
 55611              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55612              m.imm1(toImmAny(v[0]))
 55613          })
 55614      }
 55615      // VINSERTI64X2 imm8, m128, ymm, ymm{k}{z}
 55616      if isImm8(v0) && isM128(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 55617          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 55618          p.domain = DomainAVX
 55619          p.add(0, func(m *_Encoding, v []interface{}) {
 55620              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55621              m.emit(0x38)
 55622              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 55623              m.imm1(toImmAny(v[0]))
 55624          })
 55625      }
 55626      if p.len == 0 {
 55627          panic("invalid operands for VINSERTI64X2")
 55628      }
 55629      return p
 55630  }
 55631  
 55632  // VINSERTI64X4 performs "Insert 256 Bits of Packed Quadword Integer Values".
 55633  //
 55634  // Mnemonic        : VINSERTI64X4
 55635  // Supported forms : (2 forms)
 55636  //
 55637  //    * VINSERTI64X4 imm8, ymm, zmm, zmm{k}{z}     [AVX512F]
 55638  //    * VINSERTI64X4 imm8, m256, zmm, zmm{k}{z}    [AVX512F]
 55639  //
 55640  func (self *Program) VINSERTI64X4(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 55641      p := self.alloc("VINSERTI64X4", 4, Operands { v0, v1, v2, v3 })
 55642      // VINSERTI64X4 imm8, ymm, zmm, zmm{k}{z}
 55643      if isImm8(v0) && isEVEXYMM(v1) && isZMM(v2) && isZMMkz(v3) {
 55644          self.require(ISA_AVX512F)
 55645          p.domain = DomainAVX
 55646          p.add(0, func(m *_Encoding, v []interface{}) {
 55647              m.emit(0x62)
 55648              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55649              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 55650              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 55651              m.emit(0x3a)
 55652              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55653              m.imm1(toImmAny(v[0]))
 55654          })
 55655      }
 55656      // VINSERTI64X4 imm8, m256, zmm, zmm{k}{z}
 55657      if isImm8(v0) && isM256(v1) && isZMM(v2) && isZMMkz(v3) {
 55658          self.require(ISA_AVX512F)
 55659          p.domain = DomainAVX
 55660          p.add(0, func(m *_Encoding, v []interface{}) {
 55661              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 55662              m.emit(0x3a)
 55663              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 55664              m.imm1(toImmAny(v[0]))
 55665          })
 55666      }
 55667      if p.len == 0 {
 55668          panic("invalid operands for VINSERTI64X4")
 55669      }
 55670      return p
 55671  }
 55672  
 55673  // VINSERTPS performs "Insert Packed Single Precision Floating-Point Value".
 55674  //
 55675  // Mnemonic        : VINSERTPS
 55676  // Supported forms : (4 forms)
 55677  //
 55678  //    * VINSERTPS imm8, xmm, xmm, xmm    [AVX]
 55679  //    * VINSERTPS imm8, m32, xmm, xmm    [AVX]
 55680  //    * VINSERTPS imm8, xmm, xmm, xmm    [AVX512F]
 55681  //    * VINSERTPS imm8, m32, xmm, xmm    [AVX512F]
 55682  //
 55683  func (self *Program) VINSERTPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 55684      p := self.alloc("VINSERTPS", 4, Operands { v0, v1, v2, v3 })
 55685      // VINSERTPS imm8, xmm, xmm, xmm
 55686      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 55687          self.require(ISA_AVX)
 55688          p.domain = DomainAVX
 55689          p.add(0, func(m *_Encoding, v []interface{}) {
 55690              m.emit(0xc4)
 55691              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 55692              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 55693              m.emit(0x21)
 55694              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55695              m.imm1(toImmAny(v[0]))
 55696          })
 55697      }
 55698      // VINSERTPS imm8, m32, xmm, xmm
 55699      if isImm8(v0) && isM32(v1) && isXMM(v2) && isXMM(v3) {
 55700          self.require(ISA_AVX)
 55701          p.domain = DomainAVX
 55702          p.add(0, func(m *_Encoding, v []interface{}) {
 55703              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 55704              m.emit(0x21)
 55705              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 55706              m.imm1(toImmAny(v[0]))
 55707          })
 55708      }
 55709      // VINSERTPS imm8, xmm, xmm, xmm
 55710      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) {
 55711          self.require(ISA_AVX512F)
 55712          p.domain = DomainAVX
 55713          p.add(0, func(m *_Encoding, v []interface{}) {
 55714              m.emit(0x62)
 55715              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 55716              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 55717              m.emit((0x08 ^ (ecode(v[2]) << 3)) | 0x00)
 55718              m.emit(0x21)
 55719              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 55720              m.imm1(toImmAny(v[0]))
 55721          })
 55722      }
 55723      // VINSERTPS imm8, m32, xmm, xmm
 55724      if isImm8(v0) && isM32(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) {
 55725          self.require(ISA_AVX512F)
 55726          p.domain = DomainAVX
 55727          p.add(0, func(m *_Encoding, v []interface{}) {
 55728              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), 0, 0, 0)
 55729              m.emit(0x21)
 55730              m.mrsd(lcode(v[3]), addr(v[1]), 4)
 55731              m.imm1(toImmAny(v[0]))
 55732          })
 55733      }
 55734      if p.len == 0 {
 55735          panic("invalid operands for VINSERTPS")
 55736      }
 55737      return p
 55738  }
 55739  
 55740  // VLDDQU performs "Load Unaligned Integer 128 Bits".
 55741  //
 55742  // Mnemonic        : VLDDQU
 55743  // Supported forms : (2 forms)
 55744  //
 55745  //    * VLDDQU m128, xmm    [AVX]
 55746  //    * VLDDQU m256, ymm    [AVX]
 55747  //
 55748  func (self *Program) VLDDQU(v0 interface{}, v1 interface{}) *Instruction {
 55749      p := self.alloc("VLDDQU", 2, Operands { v0, v1 })
 55750      // VLDDQU m128, xmm
 55751      if isM128(v0) && isXMM(v1) {
 55752          self.require(ISA_AVX)
 55753          p.domain = DomainAVX
 55754          p.add(0, func(m *_Encoding, v []interface{}) {
 55755              m.vex2(3, hcode(v[1]), addr(v[0]), 0)
 55756              m.emit(0xf0)
 55757              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 55758          })
 55759      }
 55760      // VLDDQU m256, ymm
 55761      if isM256(v0) && isYMM(v1) {
 55762          self.require(ISA_AVX)
 55763          p.domain = DomainAVX
 55764          p.add(0, func(m *_Encoding, v []interface{}) {
 55765              m.vex2(7, hcode(v[1]), addr(v[0]), 0)
 55766              m.emit(0xf0)
 55767              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 55768          })
 55769      }
 55770      if p.len == 0 {
 55771          panic("invalid operands for VLDDQU")
 55772      }
 55773      return p
 55774  }
 55775  
 55776  // VLDMXCSR performs "Load MXCSR Register".
 55777  //
 55778  // Mnemonic        : VLDMXCSR
 55779  // Supported forms : (1 form)
 55780  //
 55781  //    * VLDMXCSR m32    [AVX]
 55782  //
 55783  func (self *Program) VLDMXCSR(v0 interface{}) *Instruction {
 55784      p := self.alloc("VLDMXCSR", 1, Operands { v0 })
 55785      // VLDMXCSR m32
 55786      if isM32(v0) {
 55787          self.require(ISA_AVX)
 55788          p.domain = DomainAVX
 55789          p.add(0, func(m *_Encoding, v []interface{}) {
 55790              m.vex2(0, 0, addr(v[0]), 0)
 55791              m.emit(0xae)
 55792              m.mrsd(2, addr(v[0]), 1)
 55793          })
 55794      }
 55795      if p.len == 0 {
 55796          panic("invalid operands for VLDMXCSR")
 55797      }
 55798      return p
 55799  }
 55800  
 55801  // VMASKMOVDQU performs "Store Selected Bytes of Double Quadword".
 55802  //
 55803  // Mnemonic        : VMASKMOVDQU
 55804  // Supported forms : (1 form)
 55805  //
 55806  //    * VMASKMOVDQU xmm, xmm    [AVX]
 55807  //
 55808  func (self *Program) VMASKMOVDQU(v0 interface{}, v1 interface{}) *Instruction {
 55809      p := self.alloc("VMASKMOVDQU", 2, Operands { v0, v1 })
 55810      // VMASKMOVDQU xmm, xmm
 55811      if isXMM(v0) && isXMM(v1) {
 55812          self.require(ISA_AVX)
 55813          p.domain = DomainAVX
 55814          p.add(0, func(m *_Encoding, v []interface{}) {
 55815              m.vex2(1, hcode(v[1]), v[0], 0)
 55816              m.emit(0xf7)
 55817              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 55818          })
 55819      }
 55820      if p.len == 0 {
 55821          panic("invalid operands for VMASKMOVDQU")
 55822      }
 55823      return p
 55824  }
 55825  
 55826  // VMASKMOVPD performs "Conditional Move Packed Double-Precision Floating-Point Values".
 55827  //
 55828  // Mnemonic        : VMASKMOVPD
 55829  // Supported forms : (4 forms)
 55830  //
 55831  //    * VMASKMOVPD m128, xmm, xmm    [AVX]
 55832  //    * VMASKMOVPD m256, ymm, ymm    [AVX]
 55833  //    * VMASKMOVPD xmm, xmm, m128    [AVX]
 55834  //    * VMASKMOVPD ymm, ymm, m256    [AVX]
 55835  //
 55836  func (self *Program) VMASKMOVPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 55837      p := self.alloc("VMASKMOVPD", 3, Operands { v0, v1, v2 })
 55838      // VMASKMOVPD m128, xmm, xmm
 55839      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 55840          self.require(ISA_AVX)
 55841          p.domain = DomainAVX
 55842          p.add(0, func(m *_Encoding, v []interface{}) {
 55843              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 55844              m.emit(0x2d)
 55845              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 55846          })
 55847      }
 55848      // VMASKMOVPD m256, ymm, ymm
 55849      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 55850          self.require(ISA_AVX)
 55851          p.domain = DomainAVX
 55852          p.add(0, func(m *_Encoding, v []interface{}) {
 55853              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 55854              m.emit(0x2d)
 55855              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 55856          })
 55857      }
 55858      // VMASKMOVPD xmm, xmm, m128
 55859      if isXMM(v0) && isXMM(v1) && isM128(v2) {
 55860          self.require(ISA_AVX)
 55861          p.domain = DomainAVX
 55862          p.add(0, func(m *_Encoding, v []interface{}) {
 55863              m.vex3(0xc4, 0b10, 0x01, hcode(v[0]), addr(v[2]), hlcode(v[1]))
 55864              m.emit(0x2f)
 55865              m.mrsd(lcode(v[0]), addr(v[2]), 1)
 55866          })
 55867      }
 55868      // VMASKMOVPD ymm, ymm, m256
 55869      if isYMM(v0) && isYMM(v1) && isM256(v2) {
 55870          self.require(ISA_AVX)
 55871          p.domain = DomainAVX
 55872          p.add(0, func(m *_Encoding, v []interface{}) {
 55873              m.vex3(0xc4, 0b10, 0x05, hcode(v[0]), addr(v[2]), hlcode(v[1]))
 55874              m.emit(0x2f)
 55875              m.mrsd(lcode(v[0]), addr(v[2]), 1)
 55876          })
 55877      }
 55878      if p.len == 0 {
 55879          panic("invalid operands for VMASKMOVPD")
 55880      }
 55881      return p
 55882  }
 55883  
 55884  // VMASKMOVPS performs "Conditional Move Packed Single-Precision Floating-Point Values".
 55885  //
 55886  // Mnemonic        : VMASKMOVPS
 55887  // Supported forms : (4 forms)
 55888  //
 55889  //    * VMASKMOVPS m128, xmm, xmm    [AVX]
 55890  //    * VMASKMOVPS m256, ymm, ymm    [AVX]
 55891  //    * VMASKMOVPS xmm, xmm, m128    [AVX]
 55892  //    * VMASKMOVPS ymm, ymm, m256    [AVX]
 55893  //
 55894  func (self *Program) VMASKMOVPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 55895      p := self.alloc("VMASKMOVPS", 3, Operands { v0, v1, v2 })
 55896      // VMASKMOVPS m128, xmm, xmm
 55897      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 55898          self.require(ISA_AVX)
 55899          p.domain = DomainAVX
 55900          p.add(0, func(m *_Encoding, v []interface{}) {
 55901              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 55902              m.emit(0x2c)
 55903              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 55904          })
 55905      }
 55906      // VMASKMOVPS m256, ymm, ymm
 55907      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 55908          self.require(ISA_AVX)
 55909          p.domain = DomainAVX
 55910          p.add(0, func(m *_Encoding, v []interface{}) {
 55911              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 55912              m.emit(0x2c)
 55913              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 55914          })
 55915      }
 55916      // VMASKMOVPS xmm, xmm, m128
 55917      if isXMM(v0) && isXMM(v1) && isM128(v2) {
 55918          self.require(ISA_AVX)
 55919          p.domain = DomainAVX
 55920          p.add(0, func(m *_Encoding, v []interface{}) {
 55921              m.vex3(0xc4, 0b10, 0x01, hcode(v[0]), addr(v[2]), hlcode(v[1]))
 55922              m.emit(0x2e)
 55923              m.mrsd(lcode(v[0]), addr(v[2]), 1)
 55924          })
 55925      }
 55926      // VMASKMOVPS ymm, ymm, m256
 55927      if isYMM(v0) && isYMM(v1) && isM256(v2) {
 55928          self.require(ISA_AVX)
 55929          p.domain = DomainAVX
 55930          p.add(0, func(m *_Encoding, v []interface{}) {
 55931              m.vex3(0xc4, 0b10, 0x05, hcode(v[0]), addr(v[2]), hlcode(v[1]))
 55932              m.emit(0x2e)
 55933              m.mrsd(lcode(v[0]), addr(v[2]), 1)
 55934          })
 55935      }
 55936      if p.len == 0 {
 55937          panic("invalid operands for VMASKMOVPS")
 55938      }
 55939      return p
 55940  }
 55941  
 55942  // VMAXPD performs "Return Maximum Packed Double-Precision Floating-Point Values".
 55943  //
 55944  // Mnemonic        : VMAXPD
 55945  // Supported forms : (11 forms)
 55946  //
 55947  //    * VMAXPD xmm, xmm, xmm                   [AVX]
 55948  //    * VMAXPD m128, xmm, xmm                  [AVX]
 55949  //    * VMAXPD ymm, ymm, ymm                   [AVX]
 55950  //    * VMAXPD m256, ymm, ymm                  [AVX]
 55951  //    * VMAXPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 55952  //    * VMAXPD {sae}, zmm, zmm, zmm{k}{z}      [AVX512F]
 55953  //    * VMAXPD zmm, zmm, zmm{k}{z}             [AVX512F]
 55954  //    * VMAXPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 55955  //    * VMAXPD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 55956  //    * VMAXPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 55957  //    * VMAXPD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 55958  //
 55959  func (self *Program) VMAXPD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 55960      var p *Instruction
 55961      switch len(vv) {
 55962          case 0  : p = self.alloc("VMAXPD", 3, Operands { v0, v1, v2 })
 55963          case 1  : p = self.alloc("VMAXPD", 4, Operands { v0, v1, v2, vv[0] })
 55964          default : panic("instruction VMAXPD takes 3 or 4 operands")
 55965      }
 55966      // VMAXPD xmm, xmm, xmm
 55967      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 55968          self.require(ISA_AVX)
 55969          p.domain = DomainAVX
 55970          p.add(0, func(m *_Encoding, v []interface{}) {
 55971              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 55972              m.emit(0x5f)
 55973              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 55974          })
 55975      }
 55976      // VMAXPD m128, xmm, xmm
 55977      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 55978          self.require(ISA_AVX)
 55979          p.domain = DomainAVX
 55980          p.add(0, func(m *_Encoding, v []interface{}) {
 55981              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 55982              m.emit(0x5f)
 55983              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 55984          })
 55985      }
 55986      // VMAXPD ymm, ymm, ymm
 55987      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 55988          self.require(ISA_AVX)
 55989          p.domain = DomainAVX
 55990          p.add(0, func(m *_Encoding, v []interface{}) {
 55991              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 55992              m.emit(0x5f)
 55993              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 55994          })
 55995      }
 55996      // VMAXPD m256, ymm, ymm
 55997      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 55998          self.require(ISA_AVX)
 55999          p.domain = DomainAVX
 56000          p.add(0, func(m *_Encoding, v []interface{}) {
 56001              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 56002              m.emit(0x5f)
 56003              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 56004          })
 56005      }
 56006      // VMAXPD m512/m64bcst, zmm, zmm{k}{z}
 56007      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 56008          self.require(ISA_AVX512F)
 56009          p.domain = DomainAVX
 56010          p.add(0, func(m *_Encoding, v []interface{}) {
 56011              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56012              m.emit(0x5f)
 56013              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 56014          })
 56015      }
 56016      // VMAXPD {sae}, zmm, zmm, zmm{k}{z}
 56017      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 56018          self.require(ISA_AVX512F)
 56019          p.domain = DomainAVX
 56020          p.add(0, func(m *_Encoding, v []interface{}) {
 56021              m.emit(0x62)
 56022              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 56023              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 56024              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 56025              m.emit(0x5f)
 56026              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 56027          })
 56028      }
 56029      // VMAXPD zmm, zmm, zmm{k}{z}
 56030      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 56031          self.require(ISA_AVX512F)
 56032          p.domain = DomainAVX
 56033          p.add(0, func(m *_Encoding, v []interface{}) {
 56034              m.emit(0x62)
 56035              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56036              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 56037              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 56038              m.emit(0x5f)
 56039              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56040          })
 56041      }
 56042      // VMAXPD m128/m64bcst, xmm, xmm{k}{z}
 56043      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56044          self.require(ISA_AVX512VL | ISA_AVX512F)
 56045          p.domain = DomainAVX
 56046          p.add(0, func(m *_Encoding, v []interface{}) {
 56047              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56048              m.emit(0x5f)
 56049              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 56050          })
 56051      }
 56052      // VMAXPD xmm, xmm, xmm{k}{z}
 56053      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56054          self.require(ISA_AVX512VL | ISA_AVX512F)
 56055          p.domain = DomainAVX
 56056          p.add(0, func(m *_Encoding, v []interface{}) {
 56057              m.emit(0x62)
 56058              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56059              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 56060              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 56061              m.emit(0x5f)
 56062              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56063          })
 56064      }
 56065      // VMAXPD m256/m64bcst, ymm, ymm{k}{z}
 56066      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 56067          self.require(ISA_AVX512VL | ISA_AVX512F)
 56068          p.domain = DomainAVX
 56069          p.add(0, func(m *_Encoding, v []interface{}) {
 56070              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56071              m.emit(0x5f)
 56072              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 56073          })
 56074      }
 56075      // VMAXPD ymm, ymm, ymm{k}{z}
 56076      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 56077          self.require(ISA_AVX512VL | ISA_AVX512F)
 56078          p.domain = DomainAVX
 56079          p.add(0, func(m *_Encoding, v []interface{}) {
 56080              m.emit(0x62)
 56081              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56082              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 56083              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 56084              m.emit(0x5f)
 56085              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56086          })
 56087      }
 56088      if p.len == 0 {
 56089          panic("invalid operands for VMAXPD")
 56090      }
 56091      return p
 56092  }
 56093  
 56094  // VMAXPS performs "Return Maximum Packed Single-Precision Floating-Point Values".
 56095  //
 56096  // Mnemonic        : VMAXPS
 56097  // Supported forms : (11 forms)
 56098  //
 56099  //    * VMAXPS xmm, xmm, xmm                   [AVX]
 56100  //    * VMAXPS m128, xmm, xmm                  [AVX]
 56101  //    * VMAXPS ymm, ymm, ymm                   [AVX]
 56102  //    * VMAXPS m256, ymm, ymm                  [AVX]
 56103  //    * VMAXPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 56104  //    * VMAXPS {sae}, zmm, zmm, zmm{k}{z}      [AVX512F]
 56105  //    * VMAXPS zmm, zmm, zmm{k}{z}             [AVX512F]
 56106  //    * VMAXPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 56107  //    * VMAXPS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 56108  //    * VMAXPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 56109  //    * VMAXPS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 56110  //
 56111  func (self *Program) VMAXPS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 56112      var p *Instruction
 56113      switch len(vv) {
 56114          case 0  : p = self.alloc("VMAXPS", 3, Operands { v0, v1, v2 })
 56115          case 1  : p = self.alloc("VMAXPS", 4, Operands { v0, v1, v2, vv[0] })
 56116          default : panic("instruction VMAXPS takes 3 or 4 operands")
 56117      }
 56118      // VMAXPS xmm, xmm, xmm
 56119      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 56120          self.require(ISA_AVX)
 56121          p.domain = DomainAVX
 56122          p.add(0, func(m *_Encoding, v []interface{}) {
 56123              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 56124              m.emit(0x5f)
 56125              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56126          })
 56127      }
 56128      // VMAXPS m128, xmm, xmm
 56129      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 56130          self.require(ISA_AVX)
 56131          p.domain = DomainAVX
 56132          p.add(0, func(m *_Encoding, v []interface{}) {
 56133              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 56134              m.emit(0x5f)
 56135              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 56136          })
 56137      }
 56138      // VMAXPS ymm, ymm, ymm
 56139      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 56140          self.require(ISA_AVX)
 56141          p.domain = DomainAVX
 56142          p.add(0, func(m *_Encoding, v []interface{}) {
 56143              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 56144              m.emit(0x5f)
 56145              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56146          })
 56147      }
 56148      // VMAXPS m256, ymm, ymm
 56149      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 56150          self.require(ISA_AVX)
 56151          p.domain = DomainAVX
 56152          p.add(0, func(m *_Encoding, v []interface{}) {
 56153              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 56154              m.emit(0x5f)
 56155              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 56156          })
 56157      }
 56158      // VMAXPS m512/m32bcst, zmm, zmm{k}{z}
 56159      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 56160          self.require(ISA_AVX512F)
 56161          p.domain = DomainAVX
 56162          p.add(0, func(m *_Encoding, v []interface{}) {
 56163              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56164              m.emit(0x5f)
 56165              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 56166          })
 56167      }
 56168      // VMAXPS {sae}, zmm, zmm, zmm{k}{z}
 56169      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 56170          self.require(ISA_AVX512F)
 56171          p.domain = DomainAVX
 56172          p.add(0, func(m *_Encoding, v []interface{}) {
 56173              m.emit(0x62)
 56174              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 56175              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 56176              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 56177              m.emit(0x5f)
 56178              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 56179          })
 56180      }
 56181      // VMAXPS zmm, zmm, zmm{k}{z}
 56182      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 56183          self.require(ISA_AVX512F)
 56184          p.domain = DomainAVX
 56185          p.add(0, func(m *_Encoding, v []interface{}) {
 56186              m.emit(0x62)
 56187              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56188              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 56189              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 56190              m.emit(0x5f)
 56191              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56192          })
 56193      }
 56194      // VMAXPS m128/m32bcst, xmm, xmm{k}{z}
 56195      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56196          self.require(ISA_AVX512VL | ISA_AVX512F)
 56197          p.domain = DomainAVX
 56198          p.add(0, func(m *_Encoding, v []interface{}) {
 56199              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56200              m.emit(0x5f)
 56201              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 56202          })
 56203      }
 56204      // VMAXPS xmm, xmm, xmm{k}{z}
 56205      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56206          self.require(ISA_AVX512VL | ISA_AVX512F)
 56207          p.domain = DomainAVX
 56208          p.add(0, func(m *_Encoding, v []interface{}) {
 56209              m.emit(0x62)
 56210              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56211              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 56212              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 56213              m.emit(0x5f)
 56214              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56215          })
 56216      }
 56217      // VMAXPS m256/m32bcst, ymm, ymm{k}{z}
 56218      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 56219          self.require(ISA_AVX512VL | ISA_AVX512F)
 56220          p.domain = DomainAVX
 56221          p.add(0, func(m *_Encoding, v []interface{}) {
 56222              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56223              m.emit(0x5f)
 56224              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 56225          })
 56226      }
 56227      // VMAXPS ymm, ymm, ymm{k}{z}
 56228      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 56229          self.require(ISA_AVX512VL | ISA_AVX512F)
 56230          p.domain = DomainAVX
 56231          p.add(0, func(m *_Encoding, v []interface{}) {
 56232              m.emit(0x62)
 56233              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56234              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 56235              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 56236              m.emit(0x5f)
 56237              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56238          })
 56239      }
 56240      if p.len == 0 {
 56241          panic("invalid operands for VMAXPS")
 56242      }
 56243      return p
 56244  }
 56245  
 56246  // VMAXSD performs "Return Maximum Scalar Double-Precision Floating-Point Value".
 56247  //
 56248  // Mnemonic        : VMAXSD
 56249  // Supported forms : (5 forms)
 56250  //
 56251  //    * VMAXSD xmm, xmm, xmm                 [AVX]
 56252  //    * VMAXSD m64, xmm, xmm                 [AVX]
 56253  //    * VMAXSD m64, xmm, xmm{k}{z}           [AVX512F]
 56254  //    * VMAXSD {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 56255  //    * VMAXSD xmm, xmm, xmm{k}{z}           [AVX512F]
 56256  //
 56257  func (self *Program) VMAXSD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 56258      var p *Instruction
 56259      switch len(vv) {
 56260          case 0  : p = self.alloc("VMAXSD", 3, Operands { v0, v1, v2 })
 56261          case 1  : p = self.alloc("VMAXSD", 4, Operands { v0, v1, v2, vv[0] })
 56262          default : panic("instruction VMAXSD takes 3 or 4 operands")
 56263      }
 56264      // VMAXSD xmm, xmm, xmm
 56265      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 56266          self.require(ISA_AVX)
 56267          p.domain = DomainAVX
 56268          p.add(0, func(m *_Encoding, v []interface{}) {
 56269              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 56270              m.emit(0x5f)
 56271              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56272          })
 56273      }
 56274      // VMAXSD m64, xmm, xmm
 56275      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 56276          self.require(ISA_AVX)
 56277          p.domain = DomainAVX
 56278          p.add(0, func(m *_Encoding, v []interface{}) {
 56279              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 56280              m.emit(0x5f)
 56281              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 56282          })
 56283      }
 56284      // VMAXSD m64, xmm, xmm{k}{z}
 56285      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56286          self.require(ISA_AVX512F)
 56287          p.domain = DomainAVX
 56288          p.add(0, func(m *_Encoding, v []interface{}) {
 56289              m.evex(0b01, 0x87, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 56290              m.emit(0x5f)
 56291              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 56292          })
 56293      }
 56294      // VMAXSD {sae}, xmm, xmm, xmm{k}{z}
 56295      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 56296          self.require(ISA_AVX512F)
 56297          p.domain = DomainAVX
 56298          p.add(0, func(m *_Encoding, v []interface{}) {
 56299              m.emit(0x62)
 56300              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 56301              m.emit(0xff ^ (hlcode(v[2]) << 3))
 56302              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 56303              m.emit(0x5f)
 56304              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 56305          })
 56306      }
 56307      // VMAXSD xmm, xmm, xmm{k}{z}
 56308      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56309          self.require(ISA_AVX512F)
 56310          p.domain = DomainAVX
 56311          p.add(0, func(m *_Encoding, v []interface{}) {
 56312              m.emit(0x62)
 56313              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56314              m.emit(0xff ^ (hlcode(v[1]) << 3))
 56315              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 56316              m.emit(0x5f)
 56317              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56318          })
 56319      }
 56320      if p.len == 0 {
 56321          panic("invalid operands for VMAXSD")
 56322      }
 56323      return p
 56324  }
 56325  
 56326  // VMAXSS performs "Return Maximum Scalar Single-Precision Floating-Point Value".
 56327  //
 56328  // Mnemonic        : VMAXSS
 56329  // Supported forms : (5 forms)
 56330  //
 56331  //    * VMAXSS xmm, xmm, xmm                 [AVX]
 56332  //    * VMAXSS m32, xmm, xmm                 [AVX]
 56333  //    * VMAXSS m32, xmm, xmm{k}{z}           [AVX512F]
 56334  //    * VMAXSS {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 56335  //    * VMAXSS xmm, xmm, xmm{k}{z}           [AVX512F]
 56336  //
 56337  func (self *Program) VMAXSS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 56338      var p *Instruction
 56339      switch len(vv) {
 56340          case 0  : p = self.alloc("VMAXSS", 3, Operands { v0, v1, v2 })
 56341          case 1  : p = self.alloc("VMAXSS", 4, Operands { v0, v1, v2, vv[0] })
 56342          default : panic("instruction VMAXSS takes 3 or 4 operands")
 56343      }
 56344      // VMAXSS xmm, xmm, xmm
 56345      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 56346          self.require(ISA_AVX)
 56347          p.domain = DomainAVX
 56348          p.add(0, func(m *_Encoding, v []interface{}) {
 56349              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 56350              m.emit(0x5f)
 56351              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56352          })
 56353      }
 56354      // VMAXSS m32, xmm, xmm
 56355      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 56356          self.require(ISA_AVX)
 56357          p.domain = DomainAVX
 56358          p.add(0, func(m *_Encoding, v []interface{}) {
 56359              m.vex2(2, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 56360              m.emit(0x5f)
 56361              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 56362          })
 56363      }
 56364      // VMAXSS m32, xmm, xmm{k}{z}
 56365      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56366          self.require(ISA_AVX512F)
 56367          p.domain = DomainAVX
 56368          p.add(0, func(m *_Encoding, v []interface{}) {
 56369              m.evex(0b01, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 56370              m.emit(0x5f)
 56371              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 56372          })
 56373      }
 56374      // VMAXSS {sae}, xmm, xmm, xmm{k}{z}
 56375      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 56376          self.require(ISA_AVX512F)
 56377          p.domain = DomainAVX
 56378          p.add(0, func(m *_Encoding, v []interface{}) {
 56379              m.emit(0x62)
 56380              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 56381              m.emit(0x7e ^ (hlcode(v[2]) << 3))
 56382              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 56383              m.emit(0x5f)
 56384              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 56385          })
 56386      }
 56387      // VMAXSS xmm, xmm, xmm{k}{z}
 56388      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56389          self.require(ISA_AVX512F)
 56390          p.domain = DomainAVX
 56391          p.add(0, func(m *_Encoding, v []interface{}) {
 56392              m.emit(0x62)
 56393              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56394              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 56395              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 56396              m.emit(0x5f)
 56397              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56398          })
 56399      }
 56400      if p.len == 0 {
 56401          panic("invalid operands for VMAXSS")
 56402      }
 56403      return p
 56404  }
 56405  
 56406  // VMINPD performs "Return Minimum Packed Double-Precision Floating-Point Values".
 56407  //
 56408  // Mnemonic        : VMINPD
 56409  // Supported forms : (11 forms)
 56410  //
 56411  //    * VMINPD xmm, xmm, xmm                   [AVX]
 56412  //    * VMINPD m128, xmm, xmm                  [AVX]
 56413  //    * VMINPD ymm, ymm, ymm                   [AVX]
 56414  //    * VMINPD m256, ymm, ymm                  [AVX]
 56415  //    * VMINPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 56416  //    * VMINPD {sae}, zmm, zmm, zmm{k}{z}      [AVX512F]
 56417  //    * VMINPD zmm, zmm, zmm{k}{z}             [AVX512F]
 56418  //    * VMINPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 56419  //    * VMINPD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 56420  //    * VMINPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 56421  //    * VMINPD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 56422  //
 56423  func (self *Program) VMINPD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 56424      var p *Instruction
 56425      switch len(vv) {
 56426          case 0  : p = self.alloc("VMINPD", 3, Operands { v0, v1, v2 })
 56427          case 1  : p = self.alloc("VMINPD", 4, Operands { v0, v1, v2, vv[0] })
 56428          default : panic("instruction VMINPD takes 3 or 4 operands")
 56429      }
 56430      // VMINPD xmm, xmm, xmm
 56431      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 56432          self.require(ISA_AVX)
 56433          p.domain = DomainAVX
 56434          p.add(0, func(m *_Encoding, v []interface{}) {
 56435              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 56436              m.emit(0x5d)
 56437              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56438          })
 56439      }
 56440      // VMINPD m128, xmm, xmm
 56441      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 56442          self.require(ISA_AVX)
 56443          p.domain = DomainAVX
 56444          p.add(0, func(m *_Encoding, v []interface{}) {
 56445              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 56446              m.emit(0x5d)
 56447              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 56448          })
 56449      }
 56450      // VMINPD ymm, ymm, ymm
 56451      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 56452          self.require(ISA_AVX)
 56453          p.domain = DomainAVX
 56454          p.add(0, func(m *_Encoding, v []interface{}) {
 56455              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 56456              m.emit(0x5d)
 56457              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56458          })
 56459      }
 56460      // VMINPD m256, ymm, ymm
 56461      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 56462          self.require(ISA_AVX)
 56463          p.domain = DomainAVX
 56464          p.add(0, func(m *_Encoding, v []interface{}) {
 56465              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 56466              m.emit(0x5d)
 56467              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 56468          })
 56469      }
 56470      // VMINPD m512/m64bcst, zmm, zmm{k}{z}
 56471      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 56472          self.require(ISA_AVX512F)
 56473          p.domain = DomainAVX
 56474          p.add(0, func(m *_Encoding, v []interface{}) {
 56475              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56476              m.emit(0x5d)
 56477              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 56478          })
 56479      }
 56480      // VMINPD {sae}, zmm, zmm, zmm{k}{z}
 56481      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 56482          self.require(ISA_AVX512F)
 56483          p.domain = DomainAVX
 56484          p.add(0, func(m *_Encoding, v []interface{}) {
 56485              m.emit(0x62)
 56486              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 56487              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 56488              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 56489              m.emit(0x5d)
 56490              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 56491          })
 56492      }
 56493      // VMINPD zmm, zmm, zmm{k}{z}
 56494      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 56495          self.require(ISA_AVX512F)
 56496          p.domain = DomainAVX
 56497          p.add(0, func(m *_Encoding, v []interface{}) {
 56498              m.emit(0x62)
 56499              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56500              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 56501              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 56502              m.emit(0x5d)
 56503              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56504          })
 56505      }
 56506      // VMINPD m128/m64bcst, xmm, xmm{k}{z}
 56507      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56508          self.require(ISA_AVX512VL | ISA_AVX512F)
 56509          p.domain = DomainAVX
 56510          p.add(0, func(m *_Encoding, v []interface{}) {
 56511              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56512              m.emit(0x5d)
 56513              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 56514          })
 56515      }
 56516      // VMINPD xmm, xmm, xmm{k}{z}
 56517      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56518          self.require(ISA_AVX512VL | ISA_AVX512F)
 56519          p.domain = DomainAVX
 56520          p.add(0, func(m *_Encoding, v []interface{}) {
 56521              m.emit(0x62)
 56522              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56523              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 56524              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 56525              m.emit(0x5d)
 56526              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56527          })
 56528      }
 56529      // VMINPD m256/m64bcst, ymm, ymm{k}{z}
 56530      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 56531          self.require(ISA_AVX512VL | ISA_AVX512F)
 56532          p.domain = DomainAVX
 56533          p.add(0, func(m *_Encoding, v []interface{}) {
 56534              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56535              m.emit(0x5d)
 56536              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 56537          })
 56538      }
 56539      // VMINPD ymm, ymm, ymm{k}{z}
 56540      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 56541          self.require(ISA_AVX512VL | ISA_AVX512F)
 56542          p.domain = DomainAVX
 56543          p.add(0, func(m *_Encoding, v []interface{}) {
 56544              m.emit(0x62)
 56545              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56546              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 56547              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 56548              m.emit(0x5d)
 56549              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56550          })
 56551      }
 56552      if p.len == 0 {
 56553          panic("invalid operands for VMINPD")
 56554      }
 56555      return p
 56556  }
 56557  
 56558  // VMINPS performs "Return Minimum Packed Single-Precision Floating-Point Values".
 56559  //
 56560  // Mnemonic        : VMINPS
 56561  // Supported forms : (11 forms)
 56562  //
 56563  //    * VMINPS xmm, xmm, xmm                   [AVX]
 56564  //    * VMINPS m128, xmm, xmm                  [AVX]
 56565  //    * VMINPS ymm, ymm, ymm                   [AVX]
 56566  //    * VMINPS m256, ymm, ymm                  [AVX]
 56567  //    * VMINPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 56568  //    * VMINPS {sae}, zmm, zmm, zmm{k}{z}      [AVX512F]
 56569  //    * VMINPS zmm, zmm, zmm{k}{z}             [AVX512F]
 56570  //    * VMINPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 56571  //    * VMINPS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 56572  //    * VMINPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 56573  //    * VMINPS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 56574  //
 56575  func (self *Program) VMINPS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 56576      var p *Instruction
 56577      switch len(vv) {
 56578          case 0  : p = self.alloc("VMINPS", 3, Operands { v0, v1, v2 })
 56579          case 1  : p = self.alloc("VMINPS", 4, Operands { v0, v1, v2, vv[0] })
 56580          default : panic("instruction VMINPS takes 3 or 4 operands")
 56581      }
 56582      // VMINPS xmm, xmm, xmm
 56583      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 56584          self.require(ISA_AVX)
 56585          p.domain = DomainAVX
 56586          p.add(0, func(m *_Encoding, v []interface{}) {
 56587              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 56588              m.emit(0x5d)
 56589              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56590          })
 56591      }
 56592      // VMINPS m128, xmm, xmm
 56593      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 56594          self.require(ISA_AVX)
 56595          p.domain = DomainAVX
 56596          p.add(0, func(m *_Encoding, v []interface{}) {
 56597              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 56598              m.emit(0x5d)
 56599              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 56600          })
 56601      }
 56602      // VMINPS ymm, ymm, ymm
 56603      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 56604          self.require(ISA_AVX)
 56605          p.domain = DomainAVX
 56606          p.add(0, func(m *_Encoding, v []interface{}) {
 56607              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 56608              m.emit(0x5d)
 56609              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56610          })
 56611      }
 56612      // VMINPS m256, ymm, ymm
 56613      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 56614          self.require(ISA_AVX)
 56615          p.domain = DomainAVX
 56616          p.add(0, func(m *_Encoding, v []interface{}) {
 56617              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 56618              m.emit(0x5d)
 56619              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 56620          })
 56621      }
 56622      // VMINPS m512/m32bcst, zmm, zmm{k}{z}
 56623      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 56624          self.require(ISA_AVX512F)
 56625          p.domain = DomainAVX
 56626          p.add(0, func(m *_Encoding, v []interface{}) {
 56627              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56628              m.emit(0x5d)
 56629              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 56630          })
 56631      }
 56632      // VMINPS {sae}, zmm, zmm, zmm{k}{z}
 56633      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 56634          self.require(ISA_AVX512F)
 56635          p.domain = DomainAVX
 56636          p.add(0, func(m *_Encoding, v []interface{}) {
 56637              m.emit(0x62)
 56638              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 56639              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 56640              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 56641              m.emit(0x5d)
 56642              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 56643          })
 56644      }
 56645      // VMINPS zmm, zmm, zmm{k}{z}
 56646      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 56647          self.require(ISA_AVX512F)
 56648          p.domain = DomainAVX
 56649          p.add(0, func(m *_Encoding, v []interface{}) {
 56650              m.emit(0x62)
 56651              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56652              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 56653              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 56654              m.emit(0x5d)
 56655              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56656          })
 56657      }
 56658      // VMINPS m128/m32bcst, xmm, xmm{k}{z}
 56659      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56660          self.require(ISA_AVX512VL | ISA_AVX512F)
 56661          p.domain = DomainAVX
 56662          p.add(0, func(m *_Encoding, v []interface{}) {
 56663              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56664              m.emit(0x5d)
 56665              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 56666          })
 56667      }
 56668      // VMINPS xmm, xmm, xmm{k}{z}
 56669      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56670          self.require(ISA_AVX512VL | ISA_AVX512F)
 56671          p.domain = DomainAVX
 56672          p.add(0, func(m *_Encoding, v []interface{}) {
 56673              m.emit(0x62)
 56674              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56675              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 56676              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 56677              m.emit(0x5d)
 56678              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56679          })
 56680      }
 56681      // VMINPS m256/m32bcst, ymm, ymm{k}{z}
 56682      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 56683          self.require(ISA_AVX512VL | ISA_AVX512F)
 56684          p.domain = DomainAVX
 56685          p.add(0, func(m *_Encoding, v []interface{}) {
 56686              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 56687              m.emit(0x5d)
 56688              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 56689          })
 56690      }
 56691      // VMINPS ymm, ymm, ymm{k}{z}
 56692      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 56693          self.require(ISA_AVX512VL | ISA_AVX512F)
 56694          p.domain = DomainAVX
 56695          p.add(0, func(m *_Encoding, v []interface{}) {
 56696              m.emit(0x62)
 56697              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56698              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 56699              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 56700              m.emit(0x5d)
 56701              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56702          })
 56703      }
 56704      if p.len == 0 {
 56705          panic("invalid operands for VMINPS")
 56706      }
 56707      return p
 56708  }
 56709  
 56710  // VMINSD performs "Return Minimum Scalar Double-Precision Floating-Point Value".
 56711  //
 56712  // Mnemonic        : VMINSD
 56713  // Supported forms : (5 forms)
 56714  //
 56715  //    * VMINSD xmm, xmm, xmm                 [AVX]
 56716  //    * VMINSD m64, xmm, xmm                 [AVX]
 56717  //    * VMINSD m64, xmm, xmm{k}{z}           [AVX512F]
 56718  //    * VMINSD {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 56719  //    * VMINSD xmm, xmm, xmm{k}{z}           [AVX512F]
 56720  //
 56721  func (self *Program) VMINSD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 56722      var p *Instruction
 56723      switch len(vv) {
 56724          case 0  : p = self.alloc("VMINSD", 3, Operands { v0, v1, v2 })
 56725          case 1  : p = self.alloc("VMINSD", 4, Operands { v0, v1, v2, vv[0] })
 56726          default : panic("instruction VMINSD takes 3 or 4 operands")
 56727      }
 56728      // VMINSD xmm, xmm, xmm
 56729      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 56730          self.require(ISA_AVX)
 56731          p.domain = DomainAVX
 56732          p.add(0, func(m *_Encoding, v []interface{}) {
 56733              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 56734              m.emit(0x5d)
 56735              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56736          })
 56737      }
 56738      // VMINSD m64, xmm, xmm
 56739      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 56740          self.require(ISA_AVX)
 56741          p.domain = DomainAVX
 56742          p.add(0, func(m *_Encoding, v []interface{}) {
 56743              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 56744              m.emit(0x5d)
 56745              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 56746          })
 56747      }
 56748      // VMINSD m64, xmm, xmm{k}{z}
 56749      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56750          self.require(ISA_AVX512F)
 56751          p.domain = DomainAVX
 56752          p.add(0, func(m *_Encoding, v []interface{}) {
 56753              m.evex(0b01, 0x87, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 56754              m.emit(0x5d)
 56755              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 56756          })
 56757      }
 56758      // VMINSD {sae}, xmm, xmm, xmm{k}{z}
 56759      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 56760          self.require(ISA_AVX512F)
 56761          p.domain = DomainAVX
 56762          p.add(0, func(m *_Encoding, v []interface{}) {
 56763              m.emit(0x62)
 56764              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 56765              m.emit(0xff ^ (hlcode(v[2]) << 3))
 56766              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 56767              m.emit(0x5d)
 56768              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 56769          })
 56770      }
 56771      // VMINSD xmm, xmm, xmm{k}{z}
 56772      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56773          self.require(ISA_AVX512F)
 56774          p.domain = DomainAVX
 56775          p.add(0, func(m *_Encoding, v []interface{}) {
 56776              m.emit(0x62)
 56777              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56778              m.emit(0xff ^ (hlcode(v[1]) << 3))
 56779              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 56780              m.emit(0x5d)
 56781              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56782          })
 56783      }
 56784      if p.len == 0 {
 56785          panic("invalid operands for VMINSD")
 56786      }
 56787      return p
 56788  }
 56789  
 56790  // VMINSS performs "Return Minimum Scalar Single-Precision Floating-Point Value".
 56791  //
 56792  // Mnemonic        : VMINSS
 56793  // Supported forms : (5 forms)
 56794  //
 56795  //    * VMINSS xmm, xmm, xmm                 [AVX]
 56796  //    * VMINSS m32, xmm, xmm                 [AVX]
 56797  //    * VMINSS m32, xmm, xmm{k}{z}           [AVX512F]
 56798  //    * VMINSS {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 56799  //    * VMINSS xmm, xmm, xmm{k}{z}           [AVX512F]
 56800  //
 56801  func (self *Program) VMINSS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 56802      var p *Instruction
 56803      switch len(vv) {
 56804          case 0  : p = self.alloc("VMINSS", 3, Operands { v0, v1, v2 })
 56805          case 1  : p = self.alloc("VMINSS", 4, Operands { v0, v1, v2, vv[0] })
 56806          default : panic("instruction VMINSS takes 3 or 4 operands")
 56807      }
 56808      // VMINSS xmm, xmm, xmm
 56809      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 56810          self.require(ISA_AVX)
 56811          p.domain = DomainAVX
 56812          p.add(0, func(m *_Encoding, v []interface{}) {
 56813              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 56814              m.emit(0x5d)
 56815              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56816          })
 56817      }
 56818      // VMINSS m32, xmm, xmm
 56819      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 56820          self.require(ISA_AVX)
 56821          p.domain = DomainAVX
 56822          p.add(0, func(m *_Encoding, v []interface{}) {
 56823              m.vex2(2, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 56824              m.emit(0x5d)
 56825              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 56826          })
 56827      }
 56828      // VMINSS m32, xmm, xmm{k}{z}
 56829      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56830          self.require(ISA_AVX512F)
 56831          p.domain = DomainAVX
 56832          p.add(0, func(m *_Encoding, v []interface{}) {
 56833              m.evex(0b01, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 56834              m.emit(0x5d)
 56835              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 56836          })
 56837      }
 56838      // VMINSS {sae}, xmm, xmm, xmm{k}{z}
 56839      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 56840          self.require(ISA_AVX512F)
 56841          p.domain = DomainAVX
 56842          p.add(0, func(m *_Encoding, v []interface{}) {
 56843              m.emit(0x62)
 56844              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 56845              m.emit(0x7e ^ (hlcode(v[2]) << 3))
 56846              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 56847              m.emit(0x5d)
 56848              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 56849          })
 56850      }
 56851      // VMINSS xmm, xmm, xmm{k}{z}
 56852      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 56853          self.require(ISA_AVX512F)
 56854          p.domain = DomainAVX
 56855          p.add(0, func(m *_Encoding, v []interface{}) {
 56856              m.emit(0x62)
 56857              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 56858              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 56859              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 56860              m.emit(0x5d)
 56861              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 56862          })
 56863      }
 56864      if p.len == 0 {
 56865          panic("invalid operands for VMINSS")
 56866      }
 56867      return p
 56868  }
 56869  
 56870  // VMOVAPD performs "Move Aligned Packed Double-Precision Floating-Point Values".
 56871  //
 56872  // Mnemonic        : VMOVAPD
 56873  // Supported forms : (15 forms)
 56874  //
 56875  //    * VMOVAPD xmm, xmm           [AVX]
 56876  //    * VMOVAPD m128, xmm          [AVX]
 56877  //    * VMOVAPD ymm, ymm           [AVX]
 56878  //    * VMOVAPD m256, ymm          [AVX]
 56879  //    * VMOVAPD xmm, m128          [AVX]
 56880  //    * VMOVAPD ymm, m256          [AVX]
 56881  //    * VMOVAPD zmm, m512{k}{z}    [AVX512F]
 56882  //    * VMOVAPD zmm, zmm{k}{z}     [AVX512F]
 56883  //    * VMOVAPD m512, zmm{k}{z}    [AVX512F]
 56884  //    * VMOVAPD xmm, m128{k}{z}    [AVX512F,AVX512VL]
 56885  //    * VMOVAPD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 56886  //    * VMOVAPD ymm, m256{k}{z}    [AVX512F,AVX512VL]
 56887  //    * VMOVAPD ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 56888  //    * VMOVAPD m128, xmm{k}{z}    [AVX512F,AVX512VL]
 56889  //    * VMOVAPD m256, ymm{k}{z}    [AVX512F,AVX512VL]
 56890  //
 56891  func (self *Program) VMOVAPD(v0 interface{}, v1 interface{}) *Instruction {
 56892      p := self.alloc("VMOVAPD", 2, Operands { v0, v1 })
 56893      // VMOVAPD xmm, xmm
 56894      if isXMM(v0) && isXMM(v1) {
 56895          self.require(ISA_AVX)
 56896          p.domain = DomainAVX
 56897          p.add(0, func(m *_Encoding, v []interface{}) {
 56898              m.vex2(1, hcode(v[1]), v[0], 0)
 56899              m.emit(0x28)
 56900              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 56901          })
 56902          p.add(0, func(m *_Encoding, v []interface{}) {
 56903              m.vex2(1, hcode(v[0]), v[1], 0)
 56904              m.emit(0x29)
 56905              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 56906          })
 56907      }
 56908      // VMOVAPD m128, xmm
 56909      if isM128(v0) && isXMM(v1) {
 56910          self.require(ISA_AVX)
 56911          p.domain = DomainAVX
 56912          p.add(0, func(m *_Encoding, v []interface{}) {
 56913              m.vex2(1, hcode(v[1]), addr(v[0]), 0)
 56914              m.emit(0x28)
 56915              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 56916          })
 56917      }
 56918      // VMOVAPD ymm, ymm
 56919      if isYMM(v0) && isYMM(v1) {
 56920          self.require(ISA_AVX)
 56921          p.domain = DomainAVX
 56922          p.add(0, func(m *_Encoding, v []interface{}) {
 56923              m.vex2(5, hcode(v[1]), v[0], 0)
 56924              m.emit(0x28)
 56925              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 56926          })
 56927          p.add(0, func(m *_Encoding, v []interface{}) {
 56928              m.vex2(5, hcode(v[0]), v[1], 0)
 56929              m.emit(0x29)
 56930              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 56931          })
 56932      }
 56933      // VMOVAPD m256, ymm
 56934      if isM256(v0) && isYMM(v1) {
 56935          self.require(ISA_AVX)
 56936          p.domain = DomainAVX
 56937          p.add(0, func(m *_Encoding, v []interface{}) {
 56938              m.vex2(5, hcode(v[1]), addr(v[0]), 0)
 56939              m.emit(0x28)
 56940              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 56941          })
 56942      }
 56943      // VMOVAPD xmm, m128
 56944      if isXMM(v0) && isM128(v1) {
 56945          self.require(ISA_AVX)
 56946          p.domain = DomainAVX
 56947          p.add(0, func(m *_Encoding, v []interface{}) {
 56948              m.vex2(1, hcode(v[0]), addr(v[1]), 0)
 56949              m.emit(0x29)
 56950              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 56951          })
 56952      }
 56953      // VMOVAPD ymm, m256
 56954      if isYMM(v0) && isM256(v1) {
 56955          self.require(ISA_AVX)
 56956          p.domain = DomainAVX
 56957          p.add(0, func(m *_Encoding, v []interface{}) {
 56958              m.vex2(5, hcode(v[0]), addr(v[1]), 0)
 56959              m.emit(0x29)
 56960              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 56961          })
 56962      }
 56963      // VMOVAPD zmm, m512{k}{z}
 56964      if isZMM(v0) && isM512kz(v1) {
 56965          self.require(ISA_AVX512F)
 56966          p.domain = DomainAVX
 56967          p.add(0, func(m *_Encoding, v []interface{}) {
 56968              m.evex(0b01, 0x85, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 56969              m.emit(0x29)
 56970              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 56971          })
 56972      }
 56973      // VMOVAPD zmm, zmm{k}{z}
 56974      if isZMM(v0) && isZMMkz(v1) {
 56975          self.require(ISA_AVX512F)
 56976          p.domain = DomainAVX
 56977          p.add(0, func(m *_Encoding, v []interface{}) {
 56978              m.emit(0x62)
 56979              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 56980              m.emit(0xfd)
 56981              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 56982              m.emit(0x28)
 56983              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 56984          })
 56985          p.add(0, func(m *_Encoding, v []interface{}) {
 56986              m.emit(0x62)
 56987              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 56988              m.emit(0xfd)
 56989              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 56990              m.emit(0x29)
 56991              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 56992          })
 56993      }
 56994      // VMOVAPD m512, zmm{k}{z}
 56995      if isM512(v0) && isZMMkz(v1) {
 56996          self.require(ISA_AVX512F)
 56997          p.domain = DomainAVX
 56998          p.add(0, func(m *_Encoding, v []interface{}) {
 56999              m.evex(0b01, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57000              m.emit(0x28)
 57001              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 57002          })
 57003      }
 57004      // VMOVAPD xmm, m128{k}{z}
 57005      if isEVEXXMM(v0) && isM128kz(v1) {
 57006          self.require(ISA_AVX512VL | ISA_AVX512F)
 57007          p.domain = DomainAVX
 57008          p.add(0, func(m *_Encoding, v []interface{}) {
 57009              m.evex(0b01, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 57010              m.emit(0x29)
 57011              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 57012          })
 57013      }
 57014      // VMOVAPD xmm, xmm{k}{z}
 57015      if isEVEXXMM(v0) && isXMMkz(v1) {
 57016          self.require(ISA_AVX512VL | ISA_AVX512F)
 57017          p.domain = DomainAVX
 57018          p.add(0, func(m *_Encoding, v []interface{}) {
 57019              m.emit(0x62)
 57020              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57021              m.emit(0xfd)
 57022              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 57023              m.emit(0x28)
 57024              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57025          })
 57026          p.add(0, func(m *_Encoding, v []interface{}) {
 57027              m.emit(0x62)
 57028              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57029              m.emit(0xfd)
 57030              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 57031              m.emit(0x29)
 57032              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57033          })
 57034      }
 57035      // VMOVAPD ymm, m256{k}{z}
 57036      if isEVEXYMM(v0) && isM256kz(v1) {
 57037          self.require(ISA_AVX512VL | ISA_AVX512F)
 57038          p.domain = DomainAVX
 57039          p.add(0, func(m *_Encoding, v []interface{}) {
 57040              m.evex(0b01, 0x85, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 57041              m.emit(0x29)
 57042              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 57043          })
 57044      }
 57045      // VMOVAPD ymm, ymm{k}{z}
 57046      if isEVEXYMM(v0) && isYMMkz(v1) {
 57047          self.require(ISA_AVX512VL | ISA_AVX512F)
 57048          p.domain = DomainAVX
 57049          p.add(0, func(m *_Encoding, v []interface{}) {
 57050              m.emit(0x62)
 57051              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57052              m.emit(0xfd)
 57053              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 57054              m.emit(0x28)
 57055              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57056          })
 57057          p.add(0, func(m *_Encoding, v []interface{}) {
 57058              m.emit(0x62)
 57059              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57060              m.emit(0xfd)
 57061              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 57062              m.emit(0x29)
 57063              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57064          })
 57065      }
 57066      // VMOVAPD m128, xmm{k}{z}
 57067      if isM128(v0) && isXMMkz(v1) {
 57068          self.require(ISA_AVX512VL | ISA_AVX512F)
 57069          p.domain = DomainAVX
 57070          p.add(0, func(m *_Encoding, v []interface{}) {
 57071              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57072              m.emit(0x28)
 57073              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 57074          })
 57075      }
 57076      // VMOVAPD m256, ymm{k}{z}
 57077      if isM256(v0) && isYMMkz(v1) {
 57078          self.require(ISA_AVX512VL | ISA_AVX512F)
 57079          p.domain = DomainAVX
 57080          p.add(0, func(m *_Encoding, v []interface{}) {
 57081              m.evex(0b01, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57082              m.emit(0x28)
 57083              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 57084          })
 57085      }
 57086      if p.len == 0 {
 57087          panic("invalid operands for VMOVAPD")
 57088      }
 57089      return p
 57090  }
 57091  
 57092  // VMOVAPS performs "Move Aligned Packed Single-Precision Floating-Point Values".
 57093  //
 57094  // Mnemonic        : VMOVAPS
 57095  // Supported forms : (15 forms)
 57096  //
 57097  //    * VMOVAPS xmm, xmm           [AVX]
 57098  //    * VMOVAPS m128, xmm          [AVX]
 57099  //    * VMOVAPS ymm, ymm           [AVX]
 57100  //    * VMOVAPS m256, ymm          [AVX]
 57101  //    * VMOVAPS xmm, m128          [AVX]
 57102  //    * VMOVAPS ymm, m256          [AVX]
 57103  //    * VMOVAPS zmm, m512{k}{z}    [AVX512F]
 57104  //    * VMOVAPS zmm, zmm{k}{z}     [AVX512F]
 57105  //    * VMOVAPS m512, zmm{k}{z}    [AVX512F]
 57106  //    * VMOVAPS xmm, m128{k}{z}    [AVX512F,AVX512VL]
 57107  //    * VMOVAPS xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 57108  //    * VMOVAPS ymm, m256{k}{z}    [AVX512F,AVX512VL]
 57109  //    * VMOVAPS ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 57110  //    * VMOVAPS m128, xmm{k}{z}    [AVX512F,AVX512VL]
 57111  //    * VMOVAPS m256, ymm{k}{z}    [AVX512F,AVX512VL]
 57112  //
 57113  func (self *Program) VMOVAPS(v0 interface{}, v1 interface{}) *Instruction {
 57114      p := self.alloc("VMOVAPS", 2, Operands { v0, v1 })
 57115      // VMOVAPS xmm, xmm
 57116      if isXMM(v0) && isXMM(v1) {
 57117          self.require(ISA_AVX)
 57118          p.domain = DomainAVX
 57119          p.add(0, func(m *_Encoding, v []interface{}) {
 57120              m.vex2(0, hcode(v[1]), v[0], 0)
 57121              m.emit(0x28)
 57122              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57123          })
 57124          p.add(0, func(m *_Encoding, v []interface{}) {
 57125              m.vex2(0, hcode(v[0]), v[1], 0)
 57126              m.emit(0x29)
 57127              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57128          })
 57129      }
 57130      // VMOVAPS m128, xmm
 57131      if isM128(v0) && isXMM(v1) {
 57132          self.require(ISA_AVX)
 57133          p.domain = DomainAVX
 57134          p.add(0, func(m *_Encoding, v []interface{}) {
 57135              m.vex2(0, hcode(v[1]), addr(v[0]), 0)
 57136              m.emit(0x28)
 57137              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 57138          })
 57139      }
 57140      // VMOVAPS ymm, ymm
 57141      if isYMM(v0) && isYMM(v1) {
 57142          self.require(ISA_AVX)
 57143          p.domain = DomainAVX
 57144          p.add(0, func(m *_Encoding, v []interface{}) {
 57145              m.vex2(4, hcode(v[1]), v[0], 0)
 57146              m.emit(0x28)
 57147              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57148          })
 57149          p.add(0, func(m *_Encoding, v []interface{}) {
 57150              m.vex2(4, hcode(v[0]), v[1], 0)
 57151              m.emit(0x29)
 57152              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57153          })
 57154      }
 57155      // VMOVAPS m256, ymm
 57156      if isM256(v0) && isYMM(v1) {
 57157          self.require(ISA_AVX)
 57158          p.domain = DomainAVX
 57159          p.add(0, func(m *_Encoding, v []interface{}) {
 57160              m.vex2(4, hcode(v[1]), addr(v[0]), 0)
 57161              m.emit(0x28)
 57162              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 57163          })
 57164      }
 57165      // VMOVAPS xmm, m128
 57166      if isXMM(v0) && isM128(v1) {
 57167          self.require(ISA_AVX)
 57168          p.domain = DomainAVX
 57169          p.add(0, func(m *_Encoding, v []interface{}) {
 57170              m.vex2(0, hcode(v[0]), addr(v[1]), 0)
 57171              m.emit(0x29)
 57172              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 57173          })
 57174      }
 57175      // VMOVAPS ymm, m256
 57176      if isYMM(v0) && isM256(v1) {
 57177          self.require(ISA_AVX)
 57178          p.domain = DomainAVX
 57179          p.add(0, func(m *_Encoding, v []interface{}) {
 57180              m.vex2(4, hcode(v[0]), addr(v[1]), 0)
 57181              m.emit(0x29)
 57182              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 57183          })
 57184      }
 57185      // VMOVAPS zmm, m512{k}{z}
 57186      if isZMM(v0) && isM512kz(v1) {
 57187          self.require(ISA_AVX512F)
 57188          p.domain = DomainAVX
 57189          p.add(0, func(m *_Encoding, v []interface{}) {
 57190              m.evex(0b01, 0x04, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 57191              m.emit(0x29)
 57192              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 57193          })
 57194      }
 57195      // VMOVAPS zmm, zmm{k}{z}
 57196      if isZMM(v0) && isZMMkz(v1) {
 57197          self.require(ISA_AVX512F)
 57198          p.domain = DomainAVX
 57199          p.add(0, func(m *_Encoding, v []interface{}) {
 57200              m.emit(0x62)
 57201              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57202              m.emit(0x7c)
 57203              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 57204              m.emit(0x28)
 57205              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57206          })
 57207          p.add(0, func(m *_Encoding, v []interface{}) {
 57208              m.emit(0x62)
 57209              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57210              m.emit(0x7c)
 57211              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 57212              m.emit(0x29)
 57213              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57214          })
 57215      }
 57216      // VMOVAPS m512, zmm{k}{z}
 57217      if isM512(v0) && isZMMkz(v1) {
 57218          self.require(ISA_AVX512F)
 57219          p.domain = DomainAVX
 57220          p.add(0, func(m *_Encoding, v []interface{}) {
 57221              m.evex(0b01, 0x04, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57222              m.emit(0x28)
 57223              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 57224          })
 57225      }
 57226      // VMOVAPS xmm, m128{k}{z}
 57227      if isEVEXXMM(v0) && isM128kz(v1) {
 57228          self.require(ISA_AVX512VL | ISA_AVX512F)
 57229          p.domain = DomainAVX
 57230          p.add(0, func(m *_Encoding, v []interface{}) {
 57231              m.evex(0b01, 0x04, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 57232              m.emit(0x29)
 57233              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 57234          })
 57235      }
 57236      // VMOVAPS xmm, xmm{k}{z}
 57237      if isEVEXXMM(v0) && isXMMkz(v1) {
 57238          self.require(ISA_AVX512VL | ISA_AVX512F)
 57239          p.domain = DomainAVX
 57240          p.add(0, func(m *_Encoding, v []interface{}) {
 57241              m.emit(0x62)
 57242              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57243              m.emit(0x7c)
 57244              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 57245              m.emit(0x28)
 57246              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57247          })
 57248          p.add(0, func(m *_Encoding, v []interface{}) {
 57249              m.emit(0x62)
 57250              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57251              m.emit(0x7c)
 57252              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 57253              m.emit(0x29)
 57254              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57255          })
 57256      }
 57257      // VMOVAPS ymm, m256{k}{z}
 57258      if isEVEXYMM(v0) && isM256kz(v1) {
 57259          self.require(ISA_AVX512VL | ISA_AVX512F)
 57260          p.domain = DomainAVX
 57261          p.add(0, func(m *_Encoding, v []interface{}) {
 57262              m.evex(0b01, 0x04, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 57263              m.emit(0x29)
 57264              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 57265          })
 57266      }
 57267      // VMOVAPS ymm, ymm{k}{z}
 57268      if isEVEXYMM(v0) && isYMMkz(v1) {
 57269          self.require(ISA_AVX512VL | ISA_AVX512F)
 57270          p.domain = DomainAVX
 57271          p.add(0, func(m *_Encoding, v []interface{}) {
 57272              m.emit(0x62)
 57273              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57274              m.emit(0x7c)
 57275              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 57276              m.emit(0x28)
 57277              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57278          })
 57279          p.add(0, func(m *_Encoding, v []interface{}) {
 57280              m.emit(0x62)
 57281              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57282              m.emit(0x7c)
 57283              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 57284              m.emit(0x29)
 57285              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57286          })
 57287      }
 57288      // VMOVAPS m128, xmm{k}{z}
 57289      if isM128(v0) && isXMMkz(v1) {
 57290          self.require(ISA_AVX512VL | ISA_AVX512F)
 57291          p.domain = DomainAVX
 57292          p.add(0, func(m *_Encoding, v []interface{}) {
 57293              m.evex(0b01, 0x04, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57294              m.emit(0x28)
 57295              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 57296          })
 57297      }
 57298      // VMOVAPS m256, ymm{k}{z}
 57299      if isM256(v0) && isYMMkz(v1) {
 57300          self.require(ISA_AVX512VL | ISA_AVX512F)
 57301          p.domain = DomainAVX
 57302          p.add(0, func(m *_Encoding, v []interface{}) {
 57303              m.evex(0b01, 0x04, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57304              m.emit(0x28)
 57305              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 57306          })
 57307      }
 57308      if p.len == 0 {
 57309          panic("invalid operands for VMOVAPS")
 57310      }
 57311      return p
 57312  }
 57313  
 57314  // VMOVD performs "Move Doubleword".
 57315  //
 57316  // Mnemonic        : VMOVD
 57317  // Supported forms : (8 forms)
 57318  //
 57319  //    * VMOVD xmm, r32    [AVX]
 57320  //    * VMOVD r32, xmm    [AVX]
 57321  //    * VMOVD m32, xmm    [AVX]
 57322  //    * VMOVD xmm, m32    [AVX]
 57323  //    * VMOVD xmm, r32    [AVX512F]
 57324  //    * VMOVD r32, xmm    [AVX512F]
 57325  //    * VMOVD m32, xmm    [AVX512F]
 57326  //    * VMOVD xmm, m32    [AVX512F]
 57327  //
 57328  func (self *Program) VMOVD(v0 interface{}, v1 interface{}) *Instruction {
 57329      p := self.alloc("VMOVD", 2, Operands { v0, v1 })
 57330      // VMOVD xmm, r32
 57331      if isXMM(v0) && isReg32(v1) {
 57332          self.require(ISA_AVX)
 57333          p.domain = DomainAVX
 57334          p.add(0, func(m *_Encoding, v []interface{}) {
 57335              m.vex2(1, hcode(v[0]), v[1], 0)
 57336              m.emit(0x7e)
 57337              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57338          })
 57339      }
 57340      // VMOVD r32, xmm
 57341      if isReg32(v0) && isXMM(v1) {
 57342          self.require(ISA_AVX)
 57343          p.domain = DomainAVX
 57344          p.add(0, func(m *_Encoding, v []interface{}) {
 57345              m.vex2(1, hcode(v[1]), v[0], 0)
 57346              m.emit(0x6e)
 57347              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57348          })
 57349      }
 57350      // VMOVD m32, xmm
 57351      if isM32(v0) && isXMM(v1) {
 57352          self.require(ISA_AVX)
 57353          p.domain = DomainAVX
 57354          p.add(0, func(m *_Encoding, v []interface{}) {
 57355              m.vex2(1, hcode(v[1]), addr(v[0]), 0)
 57356              m.emit(0x6e)
 57357              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 57358          })
 57359      }
 57360      // VMOVD xmm, m32
 57361      if isXMM(v0) && isM32(v1) {
 57362          self.require(ISA_AVX)
 57363          p.domain = DomainAVX
 57364          p.add(0, func(m *_Encoding, v []interface{}) {
 57365              m.vex2(1, hcode(v[0]), addr(v[1]), 0)
 57366              m.emit(0x7e)
 57367              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 57368          })
 57369      }
 57370      // VMOVD xmm, r32
 57371      if isEVEXXMM(v0) && isReg32(v1) {
 57372          self.require(ISA_AVX512F)
 57373          p.domain = DomainAVX
 57374          p.add(0, func(m *_Encoding, v []interface{}) {
 57375              m.emit(0x62)
 57376              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57377              m.emit(0x7d)
 57378              m.emit(0x08)
 57379              m.emit(0x7e)
 57380              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57381          })
 57382      }
 57383      // VMOVD r32, xmm
 57384      if isReg32(v0) && isEVEXXMM(v1) {
 57385          self.require(ISA_AVX512F)
 57386          p.domain = DomainAVX
 57387          p.add(0, func(m *_Encoding, v []interface{}) {
 57388              m.emit(0x62)
 57389              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57390              m.emit(0x7d)
 57391              m.emit(0x08)
 57392              m.emit(0x6e)
 57393              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57394          })
 57395      }
 57396      // VMOVD m32, xmm
 57397      if isM32(v0) && isEVEXXMM(v1) {
 57398          self.require(ISA_AVX512F)
 57399          p.domain = DomainAVX
 57400          p.add(0, func(m *_Encoding, v []interface{}) {
 57401              m.evex(0b01, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 57402              m.emit(0x6e)
 57403              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 57404          })
 57405      }
 57406      // VMOVD xmm, m32
 57407      if isEVEXXMM(v0) && isM32(v1) {
 57408          self.require(ISA_AVX512F)
 57409          p.domain = DomainAVX
 57410          p.add(0, func(m *_Encoding, v []interface{}) {
 57411              m.evex(0b01, 0x05, 0b00, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 57412              m.emit(0x7e)
 57413              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 57414          })
 57415      }
 57416      if p.len == 0 {
 57417          panic("invalid operands for VMOVD")
 57418      }
 57419      return p
 57420  }
 57421  
 57422  // VMOVDDUP performs "Move One Double-FP and Duplicate".
 57423  //
 57424  // Mnemonic        : VMOVDDUP
 57425  // Supported forms : (10 forms)
 57426  //
 57427  //    * VMOVDDUP xmm, xmm           [AVX]
 57428  //    * VMOVDDUP m64, xmm           [AVX]
 57429  //    * VMOVDDUP ymm, ymm           [AVX]
 57430  //    * VMOVDDUP m256, ymm          [AVX]
 57431  //    * VMOVDDUP zmm, zmm{k}{z}     [AVX512F]
 57432  //    * VMOVDDUP m512, zmm{k}{z}    [AVX512F]
 57433  //    * VMOVDDUP xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 57434  //    * VMOVDDUP ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 57435  //    * VMOVDDUP m64, xmm{k}{z}     [AVX512F,AVX512VL]
 57436  //    * VMOVDDUP m256, ymm{k}{z}    [AVX512F,AVX512VL]
 57437  //
 57438  func (self *Program) VMOVDDUP(v0 interface{}, v1 interface{}) *Instruction {
 57439      p := self.alloc("VMOVDDUP", 2, Operands { v0, v1 })
 57440      // VMOVDDUP xmm, xmm
 57441      if isXMM(v0) && isXMM(v1) {
 57442          self.require(ISA_AVX)
 57443          p.domain = DomainAVX
 57444          p.add(0, func(m *_Encoding, v []interface{}) {
 57445              m.vex2(3, hcode(v[1]), v[0], 0)
 57446              m.emit(0x12)
 57447              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57448          })
 57449      }
 57450      // VMOVDDUP m64, xmm
 57451      if isM64(v0) && isXMM(v1) {
 57452          self.require(ISA_AVX)
 57453          p.domain = DomainAVX
 57454          p.add(0, func(m *_Encoding, v []interface{}) {
 57455              m.vex2(3, hcode(v[1]), addr(v[0]), 0)
 57456              m.emit(0x12)
 57457              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 57458          })
 57459      }
 57460      // VMOVDDUP ymm, ymm
 57461      if isYMM(v0) && isYMM(v1) {
 57462          self.require(ISA_AVX)
 57463          p.domain = DomainAVX
 57464          p.add(0, func(m *_Encoding, v []interface{}) {
 57465              m.vex2(7, hcode(v[1]), v[0], 0)
 57466              m.emit(0x12)
 57467              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57468          })
 57469      }
 57470      // VMOVDDUP m256, ymm
 57471      if isM256(v0) && isYMM(v1) {
 57472          self.require(ISA_AVX)
 57473          p.domain = DomainAVX
 57474          p.add(0, func(m *_Encoding, v []interface{}) {
 57475              m.vex2(7, hcode(v[1]), addr(v[0]), 0)
 57476              m.emit(0x12)
 57477              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 57478          })
 57479      }
 57480      // VMOVDDUP zmm, zmm{k}{z}
 57481      if isZMM(v0) && isZMMkz(v1) {
 57482          self.require(ISA_AVX512F)
 57483          p.domain = DomainAVX
 57484          p.add(0, func(m *_Encoding, v []interface{}) {
 57485              m.emit(0x62)
 57486              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57487              m.emit(0xff)
 57488              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 57489              m.emit(0x12)
 57490              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57491          })
 57492      }
 57493      // VMOVDDUP m512, zmm{k}{z}
 57494      if isM512(v0) && isZMMkz(v1) {
 57495          self.require(ISA_AVX512F)
 57496          p.domain = DomainAVX
 57497          p.add(0, func(m *_Encoding, v []interface{}) {
 57498              m.evex(0b01, 0x87, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57499              m.emit(0x12)
 57500              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 57501          })
 57502      }
 57503      // VMOVDDUP xmm, xmm{k}{z}
 57504      if isEVEXXMM(v0) && isXMMkz(v1) {
 57505          self.require(ISA_AVX512VL | ISA_AVX512F)
 57506          p.domain = DomainAVX
 57507          p.add(0, func(m *_Encoding, v []interface{}) {
 57508              m.emit(0x62)
 57509              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57510              m.emit(0xff)
 57511              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 57512              m.emit(0x12)
 57513              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57514          })
 57515      }
 57516      // VMOVDDUP ymm, ymm{k}{z}
 57517      if isEVEXYMM(v0) && isYMMkz(v1) {
 57518          self.require(ISA_AVX512VL | ISA_AVX512F)
 57519          p.domain = DomainAVX
 57520          p.add(0, func(m *_Encoding, v []interface{}) {
 57521              m.emit(0x62)
 57522              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57523              m.emit(0xff)
 57524              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 57525              m.emit(0x12)
 57526              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57527          })
 57528      }
 57529      // VMOVDDUP m64, xmm{k}{z}
 57530      if isM64(v0) && isXMMkz(v1) {
 57531          self.require(ISA_AVX512VL | ISA_AVX512F)
 57532          p.domain = DomainAVX
 57533          p.add(0, func(m *_Encoding, v []interface{}) {
 57534              m.evex(0b01, 0x87, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57535              m.emit(0x12)
 57536              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 57537          })
 57538      }
 57539      // VMOVDDUP m256, ymm{k}{z}
 57540      if isM256(v0) && isYMMkz(v1) {
 57541          self.require(ISA_AVX512VL | ISA_AVX512F)
 57542          p.domain = DomainAVX
 57543          p.add(0, func(m *_Encoding, v []interface{}) {
 57544              m.evex(0b01, 0x87, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57545              m.emit(0x12)
 57546              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 57547          })
 57548      }
 57549      if p.len == 0 {
 57550          panic("invalid operands for VMOVDDUP")
 57551      }
 57552      return p
 57553  }
 57554  
 57555  // VMOVDQA performs "Move Aligned Double Quadword".
 57556  //
 57557  // Mnemonic        : VMOVDQA
 57558  // Supported forms : (6 forms)
 57559  //
 57560  //    * VMOVDQA xmm, xmm     [AVX]
 57561  //    * VMOVDQA m128, xmm    [AVX]
 57562  //    * VMOVDQA ymm, ymm     [AVX]
 57563  //    * VMOVDQA m256, ymm    [AVX]
 57564  //    * VMOVDQA xmm, m128    [AVX]
 57565  //    * VMOVDQA ymm, m256    [AVX]
 57566  //
 57567  func (self *Program) VMOVDQA(v0 interface{}, v1 interface{}) *Instruction {
 57568      p := self.alloc("VMOVDQA", 2, Operands { v0, v1 })
 57569      // VMOVDQA xmm, xmm
 57570      if isXMM(v0) && isXMM(v1) {
 57571          self.require(ISA_AVX)
 57572          p.domain = DomainAVX
 57573          p.add(0, func(m *_Encoding, v []interface{}) {
 57574              m.vex2(1, hcode(v[1]), v[0], 0)
 57575              m.emit(0x6f)
 57576              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57577          })
 57578          p.add(0, func(m *_Encoding, v []interface{}) {
 57579              m.vex2(1, hcode(v[0]), v[1], 0)
 57580              m.emit(0x7f)
 57581              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57582          })
 57583      }
 57584      // VMOVDQA m128, xmm
 57585      if isM128(v0) && isXMM(v1) {
 57586          self.require(ISA_AVX)
 57587          p.domain = DomainAVX
 57588          p.add(0, func(m *_Encoding, v []interface{}) {
 57589              m.vex2(1, hcode(v[1]), addr(v[0]), 0)
 57590              m.emit(0x6f)
 57591              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 57592          })
 57593      }
 57594      // VMOVDQA ymm, ymm
 57595      if isYMM(v0) && isYMM(v1) {
 57596          self.require(ISA_AVX)
 57597          p.domain = DomainAVX
 57598          p.add(0, func(m *_Encoding, v []interface{}) {
 57599              m.vex2(5, hcode(v[1]), v[0], 0)
 57600              m.emit(0x6f)
 57601              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57602          })
 57603          p.add(0, func(m *_Encoding, v []interface{}) {
 57604              m.vex2(5, hcode(v[0]), v[1], 0)
 57605              m.emit(0x7f)
 57606              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57607          })
 57608      }
 57609      // VMOVDQA m256, ymm
 57610      if isM256(v0) && isYMM(v1) {
 57611          self.require(ISA_AVX)
 57612          p.domain = DomainAVX
 57613          p.add(0, func(m *_Encoding, v []interface{}) {
 57614              m.vex2(5, hcode(v[1]), addr(v[0]), 0)
 57615              m.emit(0x6f)
 57616              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 57617          })
 57618      }
 57619      // VMOVDQA xmm, m128
 57620      if isXMM(v0) && isM128(v1) {
 57621          self.require(ISA_AVX)
 57622          p.domain = DomainAVX
 57623          p.add(0, func(m *_Encoding, v []interface{}) {
 57624              m.vex2(1, hcode(v[0]), addr(v[1]), 0)
 57625              m.emit(0x7f)
 57626              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 57627          })
 57628      }
 57629      // VMOVDQA ymm, m256
 57630      if isYMM(v0) && isM256(v1) {
 57631          self.require(ISA_AVX)
 57632          p.domain = DomainAVX
 57633          p.add(0, func(m *_Encoding, v []interface{}) {
 57634              m.vex2(5, hcode(v[0]), addr(v[1]), 0)
 57635              m.emit(0x7f)
 57636              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 57637          })
 57638      }
 57639      if p.len == 0 {
 57640          panic("invalid operands for VMOVDQA")
 57641      }
 57642      return p
 57643  }
 57644  
 57645  // VMOVDQA32 performs "Move Aligned Doubleword Values".
 57646  //
 57647  // Mnemonic        : VMOVDQA32
 57648  // Supported forms : (9 forms)
 57649  //
 57650  //    * VMOVDQA32 zmm, m512{k}{z}    [AVX512F]
 57651  //    * VMOVDQA32 zmm, zmm{k}{z}     [AVX512F]
 57652  //    * VMOVDQA32 m512, zmm{k}{z}    [AVX512F]
 57653  //    * VMOVDQA32 xmm, m128{k}{z}    [AVX512F,AVX512VL]
 57654  //    * VMOVDQA32 xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 57655  //    * VMOVDQA32 ymm, m256{k}{z}    [AVX512F,AVX512VL]
 57656  //    * VMOVDQA32 ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 57657  //    * VMOVDQA32 m128, xmm{k}{z}    [AVX512F,AVX512VL]
 57658  //    * VMOVDQA32 m256, ymm{k}{z}    [AVX512F,AVX512VL]
 57659  //
 57660  func (self *Program) VMOVDQA32(v0 interface{}, v1 interface{}) *Instruction {
 57661      p := self.alloc("VMOVDQA32", 2, Operands { v0, v1 })
 57662      // VMOVDQA32 zmm, m512{k}{z}
 57663      if isZMM(v0) && isM512kz(v1) {
 57664          self.require(ISA_AVX512F)
 57665          p.domain = DomainAVX
 57666          p.add(0, func(m *_Encoding, v []interface{}) {
 57667              m.evex(0b01, 0x05, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 57668              m.emit(0x7f)
 57669              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 57670          })
 57671      }
 57672      // VMOVDQA32 zmm, zmm{k}{z}
 57673      if isZMM(v0) && isZMMkz(v1) {
 57674          self.require(ISA_AVX512F)
 57675          p.domain = DomainAVX
 57676          p.add(0, func(m *_Encoding, v []interface{}) {
 57677              m.emit(0x62)
 57678              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57679              m.emit(0x7d)
 57680              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 57681              m.emit(0x6f)
 57682              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57683          })
 57684          p.add(0, func(m *_Encoding, v []interface{}) {
 57685              m.emit(0x62)
 57686              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57687              m.emit(0x7d)
 57688              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 57689              m.emit(0x7f)
 57690              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57691          })
 57692      }
 57693      // VMOVDQA32 m512, zmm{k}{z}
 57694      if isM512(v0) && isZMMkz(v1) {
 57695          self.require(ISA_AVX512F)
 57696          p.domain = DomainAVX
 57697          p.add(0, func(m *_Encoding, v []interface{}) {
 57698              m.evex(0b01, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57699              m.emit(0x6f)
 57700              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 57701          })
 57702      }
 57703      // VMOVDQA32 xmm, m128{k}{z}
 57704      if isEVEXXMM(v0) && isM128kz(v1) {
 57705          self.require(ISA_AVX512VL | ISA_AVX512F)
 57706          p.domain = DomainAVX
 57707          p.add(0, func(m *_Encoding, v []interface{}) {
 57708              m.evex(0b01, 0x05, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 57709              m.emit(0x7f)
 57710              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 57711          })
 57712      }
 57713      // VMOVDQA32 xmm, xmm{k}{z}
 57714      if isEVEXXMM(v0) && isXMMkz(v1) {
 57715          self.require(ISA_AVX512VL | ISA_AVX512F)
 57716          p.domain = DomainAVX
 57717          p.add(0, func(m *_Encoding, v []interface{}) {
 57718              m.emit(0x62)
 57719              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57720              m.emit(0x7d)
 57721              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 57722              m.emit(0x6f)
 57723              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57724          })
 57725          p.add(0, func(m *_Encoding, v []interface{}) {
 57726              m.emit(0x62)
 57727              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57728              m.emit(0x7d)
 57729              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 57730              m.emit(0x7f)
 57731              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57732          })
 57733      }
 57734      // VMOVDQA32 ymm, m256{k}{z}
 57735      if isEVEXYMM(v0) && isM256kz(v1) {
 57736          self.require(ISA_AVX512VL | ISA_AVX512F)
 57737          p.domain = DomainAVX
 57738          p.add(0, func(m *_Encoding, v []interface{}) {
 57739              m.evex(0b01, 0x05, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 57740              m.emit(0x7f)
 57741              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 57742          })
 57743      }
 57744      // VMOVDQA32 ymm, ymm{k}{z}
 57745      if isEVEXYMM(v0) && isYMMkz(v1) {
 57746          self.require(ISA_AVX512VL | ISA_AVX512F)
 57747          p.domain = DomainAVX
 57748          p.add(0, func(m *_Encoding, v []interface{}) {
 57749              m.emit(0x62)
 57750              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57751              m.emit(0x7d)
 57752              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 57753              m.emit(0x6f)
 57754              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57755          })
 57756          p.add(0, func(m *_Encoding, v []interface{}) {
 57757              m.emit(0x62)
 57758              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57759              m.emit(0x7d)
 57760              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 57761              m.emit(0x7f)
 57762              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57763          })
 57764      }
 57765      // VMOVDQA32 m128, xmm{k}{z}
 57766      if isM128(v0) && isXMMkz(v1) {
 57767          self.require(ISA_AVX512VL | ISA_AVX512F)
 57768          p.domain = DomainAVX
 57769          p.add(0, func(m *_Encoding, v []interface{}) {
 57770              m.evex(0b01, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57771              m.emit(0x6f)
 57772              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 57773          })
 57774      }
 57775      // VMOVDQA32 m256, ymm{k}{z}
 57776      if isM256(v0) && isYMMkz(v1) {
 57777          self.require(ISA_AVX512VL | ISA_AVX512F)
 57778          p.domain = DomainAVX
 57779          p.add(0, func(m *_Encoding, v []interface{}) {
 57780              m.evex(0b01, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57781              m.emit(0x6f)
 57782              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 57783          })
 57784      }
 57785      if p.len == 0 {
 57786          panic("invalid operands for VMOVDQA32")
 57787      }
 57788      return p
 57789  }
 57790  
 57791  // VMOVDQA64 performs "Move Aligned Quadword Values".
 57792  //
 57793  // Mnemonic        : VMOVDQA64
 57794  // Supported forms : (9 forms)
 57795  //
 57796  //    * VMOVDQA64 zmm, m512{k}{z}    [AVX512F]
 57797  //    * VMOVDQA64 zmm, zmm{k}{z}     [AVX512F]
 57798  //    * VMOVDQA64 m512, zmm{k}{z}    [AVX512F]
 57799  //    * VMOVDQA64 xmm, m128{k}{z}    [AVX512F,AVX512VL]
 57800  //    * VMOVDQA64 xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 57801  //    * VMOVDQA64 ymm, m256{k}{z}    [AVX512F,AVX512VL]
 57802  //    * VMOVDQA64 ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 57803  //    * VMOVDQA64 m128, xmm{k}{z}    [AVX512F,AVX512VL]
 57804  //    * VMOVDQA64 m256, ymm{k}{z}    [AVX512F,AVX512VL]
 57805  //
 57806  func (self *Program) VMOVDQA64(v0 interface{}, v1 interface{}) *Instruction {
 57807      p := self.alloc("VMOVDQA64", 2, Operands { v0, v1 })
 57808      // VMOVDQA64 zmm, m512{k}{z}
 57809      if isZMM(v0) && isM512kz(v1) {
 57810          self.require(ISA_AVX512F)
 57811          p.domain = DomainAVX
 57812          p.add(0, func(m *_Encoding, v []interface{}) {
 57813              m.evex(0b01, 0x85, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 57814              m.emit(0x7f)
 57815              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 57816          })
 57817      }
 57818      // VMOVDQA64 zmm, zmm{k}{z}
 57819      if isZMM(v0) && isZMMkz(v1) {
 57820          self.require(ISA_AVX512F)
 57821          p.domain = DomainAVX
 57822          p.add(0, func(m *_Encoding, v []interface{}) {
 57823              m.emit(0x62)
 57824              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57825              m.emit(0xfd)
 57826              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 57827              m.emit(0x6f)
 57828              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57829          })
 57830          p.add(0, func(m *_Encoding, v []interface{}) {
 57831              m.emit(0x62)
 57832              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57833              m.emit(0xfd)
 57834              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 57835              m.emit(0x7f)
 57836              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57837          })
 57838      }
 57839      // VMOVDQA64 m512, zmm{k}{z}
 57840      if isM512(v0) && isZMMkz(v1) {
 57841          self.require(ISA_AVX512F)
 57842          p.domain = DomainAVX
 57843          p.add(0, func(m *_Encoding, v []interface{}) {
 57844              m.evex(0b01, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57845              m.emit(0x6f)
 57846              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 57847          })
 57848      }
 57849      // VMOVDQA64 xmm, m128{k}{z}
 57850      if isEVEXXMM(v0) && isM128kz(v1) {
 57851          self.require(ISA_AVX512VL | ISA_AVX512F)
 57852          p.domain = DomainAVX
 57853          p.add(0, func(m *_Encoding, v []interface{}) {
 57854              m.evex(0b01, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 57855              m.emit(0x7f)
 57856              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 57857          })
 57858      }
 57859      // VMOVDQA64 xmm, xmm{k}{z}
 57860      if isEVEXXMM(v0) && isXMMkz(v1) {
 57861          self.require(ISA_AVX512VL | ISA_AVX512F)
 57862          p.domain = DomainAVX
 57863          p.add(0, func(m *_Encoding, v []interface{}) {
 57864              m.emit(0x62)
 57865              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57866              m.emit(0xfd)
 57867              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 57868              m.emit(0x6f)
 57869              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57870          })
 57871          p.add(0, func(m *_Encoding, v []interface{}) {
 57872              m.emit(0x62)
 57873              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57874              m.emit(0xfd)
 57875              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 57876              m.emit(0x7f)
 57877              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57878          })
 57879      }
 57880      // VMOVDQA64 ymm, m256{k}{z}
 57881      if isEVEXYMM(v0) && isM256kz(v1) {
 57882          self.require(ISA_AVX512VL | ISA_AVX512F)
 57883          p.domain = DomainAVX
 57884          p.add(0, func(m *_Encoding, v []interface{}) {
 57885              m.evex(0b01, 0x85, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 57886              m.emit(0x7f)
 57887              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 57888          })
 57889      }
 57890      // VMOVDQA64 ymm, ymm{k}{z}
 57891      if isEVEXYMM(v0) && isYMMkz(v1) {
 57892          self.require(ISA_AVX512VL | ISA_AVX512F)
 57893          p.domain = DomainAVX
 57894          p.add(0, func(m *_Encoding, v []interface{}) {
 57895              m.emit(0x62)
 57896              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 57897              m.emit(0xfd)
 57898              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 57899              m.emit(0x6f)
 57900              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57901          })
 57902          p.add(0, func(m *_Encoding, v []interface{}) {
 57903              m.emit(0x62)
 57904              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 57905              m.emit(0xfd)
 57906              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 57907              m.emit(0x7f)
 57908              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57909          })
 57910      }
 57911      // VMOVDQA64 m128, xmm{k}{z}
 57912      if isM128(v0) && isXMMkz(v1) {
 57913          self.require(ISA_AVX512VL | ISA_AVX512F)
 57914          p.domain = DomainAVX
 57915          p.add(0, func(m *_Encoding, v []interface{}) {
 57916              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57917              m.emit(0x6f)
 57918              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 57919          })
 57920      }
 57921      // VMOVDQA64 m256, ymm{k}{z}
 57922      if isM256(v0) && isYMMkz(v1) {
 57923          self.require(ISA_AVX512VL | ISA_AVX512F)
 57924          p.domain = DomainAVX
 57925          p.add(0, func(m *_Encoding, v []interface{}) {
 57926              m.evex(0b01, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 57927              m.emit(0x6f)
 57928              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 57929          })
 57930      }
 57931      if p.len == 0 {
 57932          panic("invalid operands for VMOVDQA64")
 57933      }
 57934      return p
 57935  }
 57936  
 57937  // VMOVDQU performs "Move Unaligned Double Quadword".
 57938  //
 57939  // Mnemonic        : VMOVDQU
 57940  // Supported forms : (6 forms)
 57941  //
 57942  //    * VMOVDQU xmm, xmm     [AVX]
 57943  //    * VMOVDQU m128, xmm    [AVX]
 57944  //    * VMOVDQU ymm, ymm     [AVX]
 57945  //    * VMOVDQU m256, ymm    [AVX]
 57946  //    * VMOVDQU xmm, m128    [AVX]
 57947  //    * VMOVDQU ymm, m256    [AVX]
 57948  //
 57949  func (self *Program) VMOVDQU(v0 interface{}, v1 interface{}) *Instruction {
 57950      p := self.alloc("VMOVDQU", 2, Operands { v0, v1 })
 57951      // VMOVDQU xmm, xmm
 57952      if isXMM(v0) && isXMM(v1) {
 57953          self.require(ISA_AVX)
 57954          p.domain = DomainAVX
 57955          p.add(0, func(m *_Encoding, v []interface{}) {
 57956              m.vex2(2, hcode(v[1]), v[0], 0)
 57957              m.emit(0x6f)
 57958              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57959          })
 57960          p.add(0, func(m *_Encoding, v []interface{}) {
 57961              m.vex2(2, hcode(v[0]), v[1], 0)
 57962              m.emit(0x7f)
 57963              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57964          })
 57965      }
 57966      // VMOVDQU m128, xmm
 57967      if isM128(v0) && isXMM(v1) {
 57968          self.require(ISA_AVX)
 57969          p.domain = DomainAVX
 57970          p.add(0, func(m *_Encoding, v []interface{}) {
 57971              m.vex2(2, hcode(v[1]), addr(v[0]), 0)
 57972              m.emit(0x6f)
 57973              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 57974          })
 57975      }
 57976      // VMOVDQU ymm, ymm
 57977      if isYMM(v0) && isYMM(v1) {
 57978          self.require(ISA_AVX)
 57979          p.domain = DomainAVX
 57980          p.add(0, func(m *_Encoding, v []interface{}) {
 57981              m.vex2(6, hcode(v[1]), v[0], 0)
 57982              m.emit(0x6f)
 57983              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 57984          })
 57985          p.add(0, func(m *_Encoding, v []interface{}) {
 57986              m.vex2(6, hcode(v[0]), v[1], 0)
 57987              m.emit(0x7f)
 57988              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 57989          })
 57990      }
 57991      // VMOVDQU m256, ymm
 57992      if isM256(v0) && isYMM(v1) {
 57993          self.require(ISA_AVX)
 57994          p.domain = DomainAVX
 57995          p.add(0, func(m *_Encoding, v []interface{}) {
 57996              m.vex2(6, hcode(v[1]), addr(v[0]), 0)
 57997              m.emit(0x6f)
 57998              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 57999          })
 58000      }
 58001      // VMOVDQU xmm, m128
 58002      if isXMM(v0) && isM128(v1) {
 58003          self.require(ISA_AVX)
 58004          p.domain = DomainAVX
 58005          p.add(0, func(m *_Encoding, v []interface{}) {
 58006              m.vex2(2, hcode(v[0]), addr(v[1]), 0)
 58007              m.emit(0x7f)
 58008              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 58009          })
 58010      }
 58011      // VMOVDQU ymm, m256
 58012      if isYMM(v0) && isM256(v1) {
 58013          self.require(ISA_AVX)
 58014          p.domain = DomainAVX
 58015          p.add(0, func(m *_Encoding, v []interface{}) {
 58016              m.vex2(6, hcode(v[0]), addr(v[1]), 0)
 58017              m.emit(0x7f)
 58018              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 58019          })
 58020      }
 58021      if p.len == 0 {
 58022          panic("invalid operands for VMOVDQU")
 58023      }
 58024      return p
 58025  }
 58026  
 58027  // VMOVDQU16 performs "Move Unaligned Word Values".
 58028  //
 58029  // Mnemonic        : VMOVDQU16
 58030  // Supported forms : (9 forms)
 58031  //
 58032  //    * VMOVDQU16 zmm, m512{k}{z}    [AVX512BW]
 58033  //    * VMOVDQU16 zmm, zmm{k}{z}     [AVX512BW]
 58034  //    * VMOVDQU16 m512, zmm{k}{z}    [AVX512BW]
 58035  //    * VMOVDQU16 xmm, m128{k}{z}    [AVX512BW,AVX512VL]
 58036  //    * VMOVDQU16 xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 58037  //    * VMOVDQU16 ymm, m256{k}{z}    [AVX512BW,AVX512VL]
 58038  //    * VMOVDQU16 ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 58039  //    * VMOVDQU16 m128, xmm{k}{z}    [AVX512BW,AVX512VL]
 58040  //    * VMOVDQU16 m256, ymm{k}{z}    [AVX512BW,AVX512VL]
 58041  //
 58042  func (self *Program) VMOVDQU16(v0 interface{}, v1 interface{}) *Instruction {
 58043      p := self.alloc("VMOVDQU16", 2, Operands { v0, v1 })
 58044      // VMOVDQU16 zmm, m512{k}{z}
 58045      if isZMM(v0) && isM512kz(v1) {
 58046          self.require(ISA_AVX512BW)
 58047          p.domain = DomainAVX
 58048          p.add(0, func(m *_Encoding, v []interface{}) {
 58049              m.evex(0b01, 0x87, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58050              m.emit(0x7f)
 58051              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 58052          })
 58053      }
 58054      // VMOVDQU16 zmm, zmm{k}{z}
 58055      if isZMM(v0) && isZMMkz(v1) {
 58056          self.require(ISA_AVX512BW)
 58057          p.domain = DomainAVX
 58058          p.add(0, func(m *_Encoding, v []interface{}) {
 58059              m.emit(0x62)
 58060              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58061              m.emit(0xff)
 58062              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 58063              m.emit(0x6f)
 58064              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58065          })
 58066          p.add(0, func(m *_Encoding, v []interface{}) {
 58067              m.emit(0x62)
 58068              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58069              m.emit(0xff)
 58070              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 58071              m.emit(0x7f)
 58072              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58073          })
 58074      }
 58075      // VMOVDQU16 m512, zmm{k}{z}
 58076      if isM512(v0) && isZMMkz(v1) {
 58077          self.require(ISA_AVX512BW)
 58078          p.domain = DomainAVX
 58079          p.add(0, func(m *_Encoding, v []interface{}) {
 58080              m.evex(0b01, 0x87, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58081              m.emit(0x6f)
 58082              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 58083          })
 58084      }
 58085      // VMOVDQU16 xmm, m128{k}{z}
 58086      if isEVEXXMM(v0) && isM128kz(v1) {
 58087          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58088          p.domain = DomainAVX
 58089          p.add(0, func(m *_Encoding, v []interface{}) {
 58090              m.evex(0b01, 0x87, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58091              m.emit(0x7f)
 58092              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 58093          })
 58094      }
 58095      // VMOVDQU16 xmm, xmm{k}{z}
 58096      if isEVEXXMM(v0) && isXMMkz(v1) {
 58097          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58098          p.domain = DomainAVX
 58099          p.add(0, func(m *_Encoding, v []interface{}) {
 58100              m.emit(0x62)
 58101              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58102              m.emit(0xff)
 58103              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 58104              m.emit(0x6f)
 58105              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58106          })
 58107          p.add(0, func(m *_Encoding, v []interface{}) {
 58108              m.emit(0x62)
 58109              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58110              m.emit(0xff)
 58111              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 58112              m.emit(0x7f)
 58113              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58114          })
 58115      }
 58116      // VMOVDQU16 ymm, m256{k}{z}
 58117      if isEVEXYMM(v0) && isM256kz(v1) {
 58118          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58119          p.domain = DomainAVX
 58120          p.add(0, func(m *_Encoding, v []interface{}) {
 58121              m.evex(0b01, 0x87, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58122              m.emit(0x7f)
 58123              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 58124          })
 58125      }
 58126      // VMOVDQU16 ymm, ymm{k}{z}
 58127      if isEVEXYMM(v0) && isYMMkz(v1) {
 58128          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58129          p.domain = DomainAVX
 58130          p.add(0, func(m *_Encoding, v []interface{}) {
 58131              m.emit(0x62)
 58132              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58133              m.emit(0xff)
 58134              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 58135              m.emit(0x6f)
 58136              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58137          })
 58138          p.add(0, func(m *_Encoding, v []interface{}) {
 58139              m.emit(0x62)
 58140              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58141              m.emit(0xff)
 58142              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 58143              m.emit(0x7f)
 58144              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58145          })
 58146      }
 58147      // VMOVDQU16 m128, xmm{k}{z}
 58148      if isM128(v0) && isXMMkz(v1) {
 58149          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58150          p.domain = DomainAVX
 58151          p.add(0, func(m *_Encoding, v []interface{}) {
 58152              m.evex(0b01, 0x87, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58153              m.emit(0x6f)
 58154              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 58155          })
 58156      }
 58157      // VMOVDQU16 m256, ymm{k}{z}
 58158      if isM256(v0) && isYMMkz(v1) {
 58159          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58160          p.domain = DomainAVX
 58161          p.add(0, func(m *_Encoding, v []interface{}) {
 58162              m.evex(0b01, 0x87, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58163              m.emit(0x6f)
 58164              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 58165          })
 58166      }
 58167      if p.len == 0 {
 58168          panic("invalid operands for VMOVDQU16")
 58169      }
 58170      return p
 58171  }
 58172  
 58173  // VMOVDQU32 performs "Move Unaligned Doubleword Values".
 58174  //
 58175  // Mnemonic        : VMOVDQU32
 58176  // Supported forms : (9 forms)
 58177  //
 58178  //    * VMOVDQU32 zmm, m512{k}{z}    [AVX512F]
 58179  //    * VMOVDQU32 zmm, zmm{k}{z}     [AVX512F]
 58180  //    * VMOVDQU32 m512, zmm{k}{z}    [AVX512F]
 58181  //    * VMOVDQU32 xmm, m128{k}{z}    [AVX512F,AVX512VL]
 58182  //    * VMOVDQU32 xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 58183  //    * VMOVDQU32 ymm, m256{k}{z}    [AVX512F,AVX512VL]
 58184  //    * VMOVDQU32 ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 58185  //    * VMOVDQU32 m128, xmm{k}{z}    [AVX512F,AVX512VL]
 58186  //    * VMOVDQU32 m256, ymm{k}{z}    [AVX512F,AVX512VL]
 58187  //
 58188  func (self *Program) VMOVDQU32(v0 interface{}, v1 interface{}) *Instruction {
 58189      p := self.alloc("VMOVDQU32", 2, Operands { v0, v1 })
 58190      // VMOVDQU32 zmm, m512{k}{z}
 58191      if isZMM(v0) && isM512kz(v1) {
 58192          self.require(ISA_AVX512F)
 58193          p.domain = DomainAVX
 58194          p.add(0, func(m *_Encoding, v []interface{}) {
 58195              m.evex(0b01, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58196              m.emit(0x7f)
 58197              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 58198          })
 58199      }
 58200      // VMOVDQU32 zmm, zmm{k}{z}
 58201      if isZMM(v0) && isZMMkz(v1) {
 58202          self.require(ISA_AVX512F)
 58203          p.domain = DomainAVX
 58204          p.add(0, func(m *_Encoding, v []interface{}) {
 58205              m.emit(0x62)
 58206              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58207              m.emit(0x7e)
 58208              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 58209              m.emit(0x6f)
 58210              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58211          })
 58212          p.add(0, func(m *_Encoding, v []interface{}) {
 58213              m.emit(0x62)
 58214              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58215              m.emit(0x7e)
 58216              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 58217              m.emit(0x7f)
 58218              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58219          })
 58220      }
 58221      // VMOVDQU32 m512, zmm{k}{z}
 58222      if isM512(v0) && isZMMkz(v1) {
 58223          self.require(ISA_AVX512F)
 58224          p.domain = DomainAVX
 58225          p.add(0, func(m *_Encoding, v []interface{}) {
 58226              m.evex(0b01, 0x06, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58227              m.emit(0x6f)
 58228              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 58229          })
 58230      }
 58231      // VMOVDQU32 xmm, m128{k}{z}
 58232      if isEVEXXMM(v0) && isM128kz(v1) {
 58233          self.require(ISA_AVX512VL | ISA_AVX512F)
 58234          p.domain = DomainAVX
 58235          p.add(0, func(m *_Encoding, v []interface{}) {
 58236              m.evex(0b01, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58237              m.emit(0x7f)
 58238              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 58239          })
 58240      }
 58241      // VMOVDQU32 xmm, xmm{k}{z}
 58242      if isEVEXXMM(v0) && isXMMkz(v1) {
 58243          self.require(ISA_AVX512VL | ISA_AVX512F)
 58244          p.domain = DomainAVX
 58245          p.add(0, func(m *_Encoding, v []interface{}) {
 58246              m.emit(0x62)
 58247              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58248              m.emit(0x7e)
 58249              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 58250              m.emit(0x6f)
 58251              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58252          })
 58253          p.add(0, func(m *_Encoding, v []interface{}) {
 58254              m.emit(0x62)
 58255              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58256              m.emit(0x7e)
 58257              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 58258              m.emit(0x7f)
 58259              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58260          })
 58261      }
 58262      // VMOVDQU32 ymm, m256{k}{z}
 58263      if isEVEXYMM(v0) && isM256kz(v1) {
 58264          self.require(ISA_AVX512VL | ISA_AVX512F)
 58265          p.domain = DomainAVX
 58266          p.add(0, func(m *_Encoding, v []interface{}) {
 58267              m.evex(0b01, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58268              m.emit(0x7f)
 58269              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 58270          })
 58271      }
 58272      // VMOVDQU32 ymm, ymm{k}{z}
 58273      if isEVEXYMM(v0) && isYMMkz(v1) {
 58274          self.require(ISA_AVX512VL | ISA_AVX512F)
 58275          p.domain = DomainAVX
 58276          p.add(0, func(m *_Encoding, v []interface{}) {
 58277              m.emit(0x62)
 58278              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58279              m.emit(0x7e)
 58280              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 58281              m.emit(0x6f)
 58282              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58283          })
 58284          p.add(0, func(m *_Encoding, v []interface{}) {
 58285              m.emit(0x62)
 58286              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58287              m.emit(0x7e)
 58288              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 58289              m.emit(0x7f)
 58290              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58291          })
 58292      }
 58293      // VMOVDQU32 m128, xmm{k}{z}
 58294      if isM128(v0) && isXMMkz(v1) {
 58295          self.require(ISA_AVX512VL | ISA_AVX512F)
 58296          p.domain = DomainAVX
 58297          p.add(0, func(m *_Encoding, v []interface{}) {
 58298              m.evex(0b01, 0x06, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58299              m.emit(0x6f)
 58300              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 58301          })
 58302      }
 58303      // VMOVDQU32 m256, ymm{k}{z}
 58304      if isM256(v0) && isYMMkz(v1) {
 58305          self.require(ISA_AVX512VL | ISA_AVX512F)
 58306          p.domain = DomainAVX
 58307          p.add(0, func(m *_Encoding, v []interface{}) {
 58308              m.evex(0b01, 0x06, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58309              m.emit(0x6f)
 58310              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 58311          })
 58312      }
 58313      if p.len == 0 {
 58314          panic("invalid operands for VMOVDQU32")
 58315      }
 58316      return p
 58317  }
 58318  
 58319  // VMOVDQU64 performs "Move Unaligned Quadword Values".
 58320  //
 58321  // Mnemonic        : VMOVDQU64
 58322  // Supported forms : (9 forms)
 58323  //
 58324  //    * VMOVDQU64 zmm, m512{k}{z}    [AVX512F]
 58325  //    * VMOVDQU64 zmm, zmm{k}{z}     [AVX512F]
 58326  //    * VMOVDQU64 m512, zmm{k}{z}    [AVX512F]
 58327  //    * VMOVDQU64 xmm, m128{k}{z}    [AVX512F,AVX512VL]
 58328  //    * VMOVDQU64 xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 58329  //    * VMOVDQU64 ymm, m256{k}{z}    [AVX512F,AVX512VL]
 58330  //    * VMOVDQU64 ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 58331  //    * VMOVDQU64 m128, xmm{k}{z}    [AVX512F,AVX512VL]
 58332  //    * VMOVDQU64 m256, ymm{k}{z}    [AVX512F,AVX512VL]
 58333  //
 58334  func (self *Program) VMOVDQU64(v0 interface{}, v1 interface{}) *Instruction {
 58335      p := self.alloc("VMOVDQU64", 2, Operands { v0, v1 })
 58336      // VMOVDQU64 zmm, m512{k}{z}
 58337      if isZMM(v0) && isM512kz(v1) {
 58338          self.require(ISA_AVX512F)
 58339          p.domain = DomainAVX
 58340          p.add(0, func(m *_Encoding, v []interface{}) {
 58341              m.evex(0b01, 0x86, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58342              m.emit(0x7f)
 58343              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 58344          })
 58345      }
 58346      // VMOVDQU64 zmm, zmm{k}{z}
 58347      if isZMM(v0) && isZMMkz(v1) {
 58348          self.require(ISA_AVX512F)
 58349          p.domain = DomainAVX
 58350          p.add(0, func(m *_Encoding, v []interface{}) {
 58351              m.emit(0x62)
 58352              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58353              m.emit(0xfe)
 58354              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 58355              m.emit(0x6f)
 58356              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58357          })
 58358          p.add(0, func(m *_Encoding, v []interface{}) {
 58359              m.emit(0x62)
 58360              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58361              m.emit(0xfe)
 58362              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 58363              m.emit(0x7f)
 58364              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58365          })
 58366      }
 58367      // VMOVDQU64 m512, zmm{k}{z}
 58368      if isM512(v0) && isZMMkz(v1) {
 58369          self.require(ISA_AVX512F)
 58370          p.domain = DomainAVX
 58371          p.add(0, func(m *_Encoding, v []interface{}) {
 58372              m.evex(0b01, 0x86, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58373              m.emit(0x6f)
 58374              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 58375          })
 58376      }
 58377      // VMOVDQU64 xmm, m128{k}{z}
 58378      if isEVEXXMM(v0) && isM128kz(v1) {
 58379          self.require(ISA_AVX512VL | ISA_AVX512F)
 58380          p.domain = DomainAVX
 58381          p.add(0, func(m *_Encoding, v []interface{}) {
 58382              m.evex(0b01, 0x86, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58383              m.emit(0x7f)
 58384              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 58385          })
 58386      }
 58387      // VMOVDQU64 xmm, xmm{k}{z}
 58388      if isEVEXXMM(v0) && isXMMkz(v1) {
 58389          self.require(ISA_AVX512VL | ISA_AVX512F)
 58390          p.domain = DomainAVX
 58391          p.add(0, func(m *_Encoding, v []interface{}) {
 58392              m.emit(0x62)
 58393              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58394              m.emit(0xfe)
 58395              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 58396              m.emit(0x6f)
 58397              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58398          })
 58399          p.add(0, func(m *_Encoding, v []interface{}) {
 58400              m.emit(0x62)
 58401              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58402              m.emit(0xfe)
 58403              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 58404              m.emit(0x7f)
 58405              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58406          })
 58407      }
 58408      // VMOVDQU64 ymm, m256{k}{z}
 58409      if isEVEXYMM(v0) && isM256kz(v1) {
 58410          self.require(ISA_AVX512VL | ISA_AVX512F)
 58411          p.domain = DomainAVX
 58412          p.add(0, func(m *_Encoding, v []interface{}) {
 58413              m.evex(0b01, 0x86, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58414              m.emit(0x7f)
 58415              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 58416          })
 58417      }
 58418      // VMOVDQU64 ymm, ymm{k}{z}
 58419      if isEVEXYMM(v0) && isYMMkz(v1) {
 58420          self.require(ISA_AVX512VL | ISA_AVX512F)
 58421          p.domain = DomainAVX
 58422          p.add(0, func(m *_Encoding, v []interface{}) {
 58423              m.emit(0x62)
 58424              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58425              m.emit(0xfe)
 58426              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 58427              m.emit(0x6f)
 58428              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58429          })
 58430          p.add(0, func(m *_Encoding, v []interface{}) {
 58431              m.emit(0x62)
 58432              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58433              m.emit(0xfe)
 58434              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 58435              m.emit(0x7f)
 58436              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58437          })
 58438      }
 58439      // VMOVDQU64 m128, xmm{k}{z}
 58440      if isM128(v0) && isXMMkz(v1) {
 58441          self.require(ISA_AVX512VL | ISA_AVX512F)
 58442          p.domain = DomainAVX
 58443          p.add(0, func(m *_Encoding, v []interface{}) {
 58444              m.evex(0b01, 0x86, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58445              m.emit(0x6f)
 58446              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 58447          })
 58448      }
 58449      // VMOVDQU64 m256, ymm{k}{z}
 58450      if isM256(v0) && isYMMkz(v1) {
 58451          self.require(ISA_AVX512VL | ISA_AVX512F)
 58452          p.domain = DomainAVX
 58453          p.add(0, func(m *_Encoding, v []interface{}) {
 58454              m.evex(0b01, 0x86, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58455              m.emit(0x6f)
 58456              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 58457          })
 58458      }
 58459      if p.len == 0 {
 58460          panic("invalid operands for VMOVDQU64")
 58461      }
 58462      return p
 58463  }
 58464  
 58465  // VMOVDQU8 performs "Move Unaligned Byte Values".
 58466  //
 58467  // Mnemonic        : VMOVDQU8
 58468  // Supported forms : (9 forms)
 58469  //
 58470  //    * VMOVDQU8 zmm, m512{k}{z}    [AVX512BW]
 58471  //    * VMOVDQU8 zmm, zmm{k}{z}     [AVX512BW]
 58472  //    * VMOVDQU8 m512, zmm{k}{z}    [AVX512BW]
 58473  //    * VMOVDQU8 xmm, m128{k}{z}    [AVX512BW,AVX512VL]
 58474  //    * VMOVDQU8 xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 58475  //    * VMOVDQU8 ymm, m256{k}{z}    [AVX512BW,AVX512VL]
 58476  //    * VMOVDQU8 ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 58477  //    * VMOVDQU8 m128, xmm{k}{z}    [AVX512BW,AVX512VL]
 58478  //    * VMOVDQU8 m256, ymm{k}{z}    [AVX512BW,AVX512VL]
 58479  //
 58480  func (self *Program) VMOVDQU8(v0 interface{}, v1 interface{}) *Instruction {
 58481      p := self.alloc("VMOVDQU8", 2, Operands { v0, v1 })
 58482      // VMOVDQU8 zmm, m512{k}{z}
 58483      if isZMM(v0) && isM512kz(v1) {
 58484          self.require(ISA_AVX512BW)
 58485          p.domain = DomainAVX
 58486          p.add(0, func(m *_Encoding, v []interface{}) {
 58487              m.evex(0b01, 0x07, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58488              m.emit(0x7f)
 58489              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 58490          })
 58491      }
 58492      // VMOVDQU8 zmm, zmm{k}{z}
 58493      if isZMM(v0) && isZMMkz(v1) {
 58494          self.require(ISA_AVX512BW)
 58495          p.domain = DomainAVX
 58496          p.add(0, func(m *_Encoding, v []interface{}) {
 58497              m.emit(0x62)
 58498              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58499              m.emit(0x7f)
 58500              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 58501              m.emit(0x6f)
 58502              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58503          })
 58504          p.add(0, func(m *_Encoding, v []interface{}) {
 58505              m.emit(0x62)
 58506              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58507              m.emit(0x7f)
 58508              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 58509              m.emit(0x7f)
 58510              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58511          })
 58512      }
 58513      // VMOVDQU8 m512, zmm{k}{z}
 58514      if isM512(v0) && isZMMkz(v1) {
 58515          self.require(ISA_AVX512BW)
 58516          p.domain = DomainAVX
 58517          p.add(0, func(m *_Encoding, v []interface{}) {
 58518              m.evex(0b01, 0x07, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58519              m.emit(0x6f)
 58520              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 58521          })
 58522      }
 58523      // VMOVDQU8 xmm, m128{k}{z}
 58524      if isEVEXXMM(v0) && isM128kz(v1) {
 58525          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58526          p.domain = DomainAVX
 58527          p.add(0, func(m *_Encoding, v []interface{}) {
 58528              m.evex(0b01, 0x07, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58529              m.emit(0x7f)
 58530              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 58531          })
 58532      }
 58533      // VMOVDQU8 xmm, xmm{k}{z}
 58534      if isEVEXXMM(v0) && isXMMkz(v1) {
 58535          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58536          p.domain = DomainAVX
 58537          p.add(0, func(m *_Encoding, v []interface{}) {
 58538              m.emit(0x62)
 58539              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58540              m.emit(0x7f)
 58541              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 58542              m.emit(0x6f)
 58543              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58544          })
 58545          p.add(0, func(m *_Encoding, v []interface{}) {
 58546              m.emit(0x62)
 58547              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58548              m.emit(0x7f)
 58549              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 58550              m.emit(0x7f)
 58551              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58552          })
 58553      }
 58554      // VMOVDQU8 ymm, m256{k}{z}
 58555      if isEVEXYMM(v0) && isM256kz(v1) {
 58556          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58557          p.domain = DomainAVX
 58558          p.add(0, func(m *_Encoding, v []interface{}) {
 58559              m.evex(0b01, 0x07, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 58560              m.emit(0x7f)
 58561              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 58562          })
 58563      }
 58564      // VMOVDQU8 ymm, ymm{k}{z}
 58565      if isEVEXYMM(v0) && isYMMkz(v1) {
 58566          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58567          p.domain = DomainAVX
 58568          p.add(0, func(m *_Encoding, v []interface{}) {
 58569              m.emit(0x62)
 58570              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 58571              m.emit(0x7f)
 58572              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 58573              m.emit(0x6f)
 58574              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58575          })
 58576          p.add(0, func(m *_Encoding, v []interface{}) {
 58577              m.emit(0x62)
 58578              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 58579              m.emit(0x7f)
 58580              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 58581              m.emit(0x7f)
 58582              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 58583          })
 58584      }
 58585      // VMOVDQU8 m128, xmm{k}{z}
 58586      if isM128(v0) && isXMMkz(v1) {
 58587          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58588          p.domain = DomainAVX
 58589          p.add(0, func(m *_Encoding, v []interface{}) {
 58590              m.evex(0b01, 0x07, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58591              m.emit(0x6f)
 58592              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 58593          })
 58594      }
 58595      // VMOVDQU8 m256, ymm{k}{z}
 58596      if isM256(v0) && isYMMkz(v1) {
 58597          self.require(ISA_AVX512VL | ISA_AVX512BW)
 58598          p.domain = DomainAVX
 58599          p.add(0, func(m *_Encoding, v []interface{}) {
 58600              m.evex(0b01, 0x07, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 58601              m.emit(0x6f)
 58602              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 58603          })
 58604      }
 58605      if p.len == 0 {
 58606          panic("invalid operands for VMOVDQU8")
 58607      }
 58608      return p
 58609  }
 58610  
 58611  // VMOVHLPS performs "Move Packed Single-Precision Floating-Point Values High to Low".
 58612  //
 58613  // Mnemonic        : VMOVHLPS
 58614  // Supported forms : (2 forms)
 58615  //
 58616  //    * VMOVHLPS xmm, xmm, xmm    [AVX]
 58617  //    * VMOVHLPS xmm, xmm, xmm    [AVX512F]
 58618  //
 58619  func (self *Program) VMOVHLPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 58620      p := self.alloc("VMOVHLPS", 3, Operands { v0, v1, v2 })
 58621      // VMOVHLPS xmm, xmm, xmm
 58622      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 58623          self.require(ISA_AVX)
 58624          p.domain = DomainAVX
 58625          p.add(0, func(m *_Encoding, v []interface{}) {
 58626              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 58627              m.emit(0x12)
 58628              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 58629          })
 58630      }
 58631      // VMOVHLPS xmm, xmm, xmm
 58632      if isEVEXXMM(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 58633          self.require(ISA_AVX512F)
 58634          p.domain = DomainAVX
 58635          p.add(0, func(m *_Encoding, v []interface{}) {
 58636              m.emit(0x62)
 58637              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 58638              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 58639              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x00)
 58640              m.emit(0x12)
 58641              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 58642          })
 58643      }
 58644      if p.len == 0 {
 58645          panic("invalid operands for VMOVHLPS")
 58646      }
 58647      return p
 58648  }
 58649  
 58650  // VMOVHPD performs "Move High Packed Double-Precision Floating-Point Value".
 58651  //
 58652  // Mnemonic        : VMOVHPD
 58653  // Supported forms : (4 forms)
 58654  //
 58655  //    * VMOVHPD xmm, m64         [AVX]
 58656  //    * VMOVHPD m64, xmm, xmm    [AVX]
 58657  //    * VMOVHPD xmm, m64         [AVX512F]
 58658  //    * VMOVHPD m64, xmm, xmm    [AVX512F]
 58659  //
 58660  func (self *Program) VMOVHPD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 58661      var p *Instruction
 58662      switch len(vv) {
 58663          case 0  : p = self.alloc("VMOVHPD", 2, Operands { v0, v1 })
 58664          case 1  : p = self.alloc("VMOVHPD", 3, Operands { v0, v1, vv[0] })
 58665          default : panic("instruction VMOVHPD takes 2 or 3 operands")
 58666      }
 58667      // VMOVHPD xmm, m64
 58668      if len(vv) == 0 && isXMM(v0) && isM64(v1) {
 58669          self.require(ISA_AVX)
 58670          p.domain = DomainAVX
 58671          p.add(0, func(m *_Encoding, v []interface{}) {
 58672              m.vex2(1, hcode(v[0]), addr(v[1]), 0)
 58673              m.emit(0x17)
 58674              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 58675          })
 58676      }
 58677      // VMOVHPD m64, xmm, xmm
 58678      if len(vv) == 1 && isM64(v0) && isXMM(v1) && isXMM(vv[0]) {
 58679          self.require(ISA_AVX)
 58680          p.domain = DomainAVX
 58681          p.add(0, func(m *_Encoding, v []interface{}) {
 58682              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 58683              m.emit(0x16)
 58684              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 58685          })
 58686      }
 58687      // VMOVHPD xmm, m64
 58688      if len(vv) == 0 && isEVEXXMM(v0) && isM64(v1) {
 58689          self.require(ISA_AVX512F)
 58690          p.domain = DomainAVX
 58691          p.add(0, func(m *_Encoding, v []interface{}) {
 58692              m.evex(0b01, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 58693              m.emit(0x17)
 58694              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 58695          })
 58696      }
 58697      // VMOVHPD m64, xmm, xmm
 58698      if len(vv) == 1 && isM64(v0) && isEVEXXMM(v1) && isEVEXXMM(vv[0]) {
 58699          self.require(ISA_AVX512F)
 58700          p.domain = DomainAVX
 58701          p.add(0, func(m *_Encoding, v []interface{}) {
 58702              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 58703              m.emit(0x16)
 58704              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 58705          })
 58706      }
 58707      if p.len == 0 {
 58708          panic("invalid operands for VMOVHPD")
 58709      }
 58710      return p
 58711  }
 58712  
 58713  // VMOVHPS performs "Move High Packed Single-Precision Floating-Point Values".
 58714  //
 58715  // Mnemonic        : VMOVHPS
 58716  // Supported forms : (4 forms)
 58717  //
 58718  //    * VMOVHPS xmm, m64         [AVX]
 58719  //    * VMOVHPS m64, xmm, xmm    [AVX]
 58720  //    * VMOVHPS xmm, m64         [AVX512F]
 58721  //    * VMOVHPS m64, xmm, xmm    [AVX512F]
 58722  //
 58723  func (self *Program) VMOVHPS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 58724      var p *Instruction
 58725      switch len(vv) {
 58726          case 0  : p = self.alloc("VMOVHPS", 2, Operands { v0, v1 })
 58727          case 1  : p = self.alloc("VMOVHPS", 3, Operands { v0, v1, vv[0] })
 58728          default : panic("instruction VMOVHPS takes 2 or 3 operands")
 58729      }
 58730      // VMOVHPS xmm, m64
 58731      if len(vv) == 0 && isXMM(v0) && isM64(v1) {
 58732          self.require(ISA_AVX)
 58733          p.domain = DomainAVX
 58734          p.add(0, func(m *_Encoding, v []interface{}) {
 58735              m.vex2(0, hcode(v[0]), addr(v[1]), 0)
 58736              m.emit(0x17)
 58737              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 58738          })
 58739      }
 58740      // VMOVHPS m64, xmm, xmm
 58741      if len(vv) == 1 && isM64(v0) && isXMM(v1) && isXMM(vv[0]) {
 58742          self.require(ISA_AVX)
 58743          p.domain = DomainAVX
 58744          p.add(0, func(m *_Encoding, v []interface{}) {
 58745              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 58746              m.emit(0x16)
 58747              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 58748          })
 58749      }
 58750      // VMOVHPS xmm, m64
 58751      if len(vv) == 0 && isEVEXXMM(v0) && isM64(v1) {
 58752          self.require(ISA_AVX512F)
 58753          p.domain = DomainAVX
 58754          p.add(0, func(m *_Encoding, v []interface{}) {
 58755              m.evex(0b01, 0x04, 0b00, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 58756              m.emit(0x17)
 58757              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 58758          })
 58759      }
 58760      // VMOVHPS m64, xmm, xmm
 58761      if len(vv) == 1 && isM64(v0) && isEVEXXMM(v1) && isEVEXXMM(vv[0]) {
 58762          self.require(ISA_AVX512F)
 58763          p.domain = DomainAVX
 58764          p.add(0, func(m *_Encoding, v []interface{}) {
 58765              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 58766              m.emit(0x16)
 58767              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 58768          })
 58769      }
 58770      if p.len == 0 {
 58771          panic("invalid operands for VMOVHPS")
 58772      }
 58773      return p
 58774  }
 58775  
 58776  // VMOVLHPS performs "Move Packed Single-Precision Floating-Point Values Low to High".
 58777  //
 58778  // Mnemonic        : VMOVLHPS
 58779  // Supported forms : (2 forms)
 58780  //
 58781  //    * VMOVLHPS xmm, xmm, xmm    [AVX]
 58782  //    * VMOVLHPS xmm, xmm, xmm    [AVX512F]
 58783  //
 58784  func (self *Program) VMOVLHPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 58785      p := self.alloc("VMOVLHPS", 3, Operands { v0, v1, v2 })
 58786      // VMOVLHPS xmm, xmm, xmm
 58787      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 58788          self.require(ISA_AVX)
 58789          p.domain = DomainAVX
 58790          p.add(0, func(m *_Encoding, v []interface{}) {
 58791              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 58792              m.emit(0x16)
 58793              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 58794          })
 58795      }
 58796      // VMOVLHPS xmm, xmm, xmm
 58797      if isEVEXXMM(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 58798          self.require(ISA_AVX512F)
 58799          p.domain = DomainAVX
 58800          p.add(0, func(m *_Encoding, v []interface{}) {
 58801              m.emit(0x62)
 58802              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 58803              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 58804              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x00)
 58805              m.emit(0x16)
 58806              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 58807          })
 58808      }
 58809      if p.len == 0 {
 58810          panic("invalid operands for VMOVLHPS")
 58811      }
 58812      return p
 58813  }
 58814  
 58815  // VMOVLPD performs "Move Low Packed Double-Precision Floating-Point Value".
 58816  //
 58817  // Mnemonic        : VMOVLPD
 58818  // Supported forms : (4 forms)
 58819  //
 58820  //    * VMOVLPD xmm, m64         [AVX]
 58821  //    * VMOVLPD m64, xmm, xmm    [AVX]
 58822  //    * VMOVLPD xmm, m64         [AVX512F]
 58823  //    * VMOVLPD m64, xmm, xmm    [AVX512F]
 58824  //
 58825  func (self *Program) VMOVLPD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 58826      var p *Instruction
 58827      switch len(vv) {
 58828          case 0  : p = self.alloc("VMOVLPD", 2, Operands { v0, v1 })
 58829          case 1  : p = self.alloc("VMOVLPD", 3, Operands { v0, v1, vv[0] })
 58830          default : panic("instruction VMOVLPD takes 2 or 3 operands")
 58831      }
 58832      // VMOVLPD xmm, m64
 58833      if len(vv) == 0 && isXMM(v0) && isM64(v1) {
 58834          self.require(ISA_AVX)
 58835          p.domain = DomainAVX
 58836          p.add(0, func(m *_Encoding, v []interface{}) {
 58837              m.vex2(1, hcode(v[0]), addr(v[1]), 0)
 58838              m.emit(0x13)
 58839              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 58840          })
 58841      }
 58842      // VMOVLPD m64, xmm, xmm
 58843      if len(vv) == 1 && isM64(v0) && isXMM(v1) && isXMM(vv[0]) {
 58844          self.require(ISA_AVX)
 58845          p.domain = DomainAVX
 58846          p.add(0, func(m *_Encoding, v []interface{}) {
 58847              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 58848              m.emit(0x12)
 58849              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 58850          })
 58851      }
 58852      // VMOVLPD xmm, m64
 58853      if len(vv) == 0 && isEVEXXMM(v0) && isM64(v1) {
 58854          self.require(ISA_AVX512F)
 58855          p.domain = DomainAVX
 58856          p.add(0, func(m *_Encoding, v []interface{}) {
 58857              m.evex(0b01, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 58858              m.emit(0x13)
 58859              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 58860          })
 58861      }
 58862      // VMOVLPD m64, xmm, xmm
 58863      if len(vv) == 1 && isM64(v0) && isEVEXXMM(v1) && isEVEXXMM(vv[0]) {
 58864          self.require(ISA_AVX512F)
 58865          p.domain = DomainAVX
 58866          p.add(0, func(m *_Encoding, v []interface{}) {
 58867              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 58868              m.emit(0x12)
 58869              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 58870          })
 58871      }
 58872      if p.len == 0 {
 58873          panic("invalid operands for VMOVLPD")
 58874      }
 58875      return p
 58876  }
 58877  
 58878  // VMOVLPS performs "Move Low Packed Single-Precision Floating-Point Values".
 58879  //
 58880  // Mnemonic        : VMOVLPS
 58881  // Supported forms : (4 forms)
 58882  //
 58883  //    * VMOVLPS xmm, m64         [AVX]
 58884  //    * VMOVLPS m64, xmm, xmm    [AVX]
 58885  //    * VMOVLPS xmm, m64         [AVX512F]
 58886  //    * VMOVLPS m64, xmm, xmm    [AVX512F]
 58887  //
 58888  func (self *Program) VMOVLPS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 58889      var p *Instruction
 58890      switch len(vv) {
 58891          case 0  : p = self.alloc("VMOVLPS", 2, Operands { v0, v1 })
 58892          case 1  : p = self.alloc("VMOVLPS", 3, Operands { v0, v1, vv[0] })
 58893          default : panic("instruction VMOVLPS takes 2 or 3 operands")
 58894      }
 58895      // VMOVLPS xmm, m64
 58896      if len(vv) == 0 && isXMM(v0) && isM64(v1) {
 58897          self.require(ISA_AVX)
 58898          p.domain = DomainAVX
 58899          p.add(0, func(m *_Encoding, v []interface{}) {
 58900              m.vex2(0, hcode(v[0]), addr(v[1]), 0)
 58901              m.emit(0x13)
 58902              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 58903          })
 58904      }
 58905      // VMOVLPS m64, xmm, xmm
 58906      if len(vv) == 1 && isM64(v0) && isXMM(v1) && isXMM(vv[0]) {
 58907          self.require(ISA_AVX)
 58908          p.domain = DomainAVX
 58909          p.add(0, func(m *_Encoding, v []interface{}) {
 58910              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 58911              m.emit(0x12)
 58912              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 58913          })
 58914      }
 58915      // VMOVLPS xmm, m64
 58916      if len(vv) == 0 && isEVEXXMM(v0) && isM64(v1) {
 58917          self.require(ISA_AVX512F)
 58918          p.domain = DomainAVX
 58919          p.add(0, func(m *_Encoding, v []interface{}) {
 58920              m.evex(0b01, 0x04, 0b00, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 58921              m.emit(0x13)
 58922              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 58923          })
 58924      }
 58925      // VMOVLPS m64, xmm, xmm
 58926      if len(vv) == 1 && isM64(v0) && isEVEXXMM(v1) && isEVEXXMM(vv[0]) {
 58927          self.require(ISA_AVX512F)
 58928          p.domain = DomainAVX
 58929          p.add(0, func(m *_Encoding, v []interface{}) {
 58930              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 58931              m.emit(0x12)
 58932              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 58933          })
 58934      }
 58935      if p.len == 0 {
 58936          panic("invalid operands for VMOVLPS")
 58937      }
 58938      return p
 58939  }
 58940  
 58941  // VMOVMSKPD performs "Extract Packed Double-Precision Floating-Point Sign Mask".
 58942  //
 58943  // Mnemonic        : VMOVMSKPD
 58944  // Supported forms : (2 forms)
 58945  //
 58946  //    * VMOVMSKPD xmm, r32    [AVX]
 58947  //    * VMOVMSKPD ymm, r32    [AVX]
 58948  //
 58949  func (self *Program) VMOVMSKPD(v0 interface{}, v1 interface{}) *Instruction {
 58950      p := self.alloc("VMOVMSKPD", 2, Operands { v0, v1 })
 58951      // VMOVMSKPD xmm, r32
 58952      if isXMM(v0) && isReg32(v1) {
 58953          self.require(ISA_AVX)
 58954          p.domain = DomainAVX
 58955          p.add(0, func(m *_Encoding, v []interface{}) {
 58956              m.vex2(1, hcode(v[1]), v[0], 0)
 58957              m.emit(0x50)
 58958              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58959          })
 58960      }
 58961      // VMOVMSKPD ymm, r32
 58962      if isYMM(v0) && isReg32(v1) {
 58963          self.require(ISA_AVX)
 58964          p.domain = DomainAVX
 58965          p.add(0, func(m *_Encoding, v []interface{}) {
 58966              m.vex2(5, hcode(v[1]), v[0], 0)
 58967              m.emit(0x50)
 58968              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58969          })
 58970      }
 58971      if p.len == 0 {
 58972          panic("invalid operands for VMOVMSKPD")
 58973      }
 58974      return p
 58975  }
 58976  
 58977  // VMOVMSKPS performs "Extract Packed Single-Precision Floating-Point Sign Mask".
 58978  //
 58979  // Mnemonic        : VMOVMSKPS
 58980  // Supported forms : (2 forms)
 58981  //
 58982  //    * VMOVMSKPS xmm, r32    [AVX]
 58983  //    * VMOVMSKPS ymm, r32    [AVX]
 58984  //
 58985  func (self *Program) VMOVMSKPS(v0 interface{}, v1 interface{}) *Instruction {
 58986      p := self.alloc("VMOVMSKPS", 2, Operands { v0, v1 })
 58987      // VMOVMSKPS xmm, r32
 58988      if isXMM(v0) && isReg32(v1) {
 58989          self.require(ISA_AVX)
 58990          p.domain = DomainAVX
 58991          p.add(0, func(m *_Encoding, v []interface{}) {
 58992              m.vex2(0, hcode(v[1]), v[0], 0)
 58993              m.emit(0x50)
 58994              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 58995          })
 58996      }
 58997      // VMOVMSKPS ymm, r32
 58998      if isYMM(v0) && isReg32(v1) {
 58999          self.require(ISA_AVX)
 59000          p.domain = DomainAVX
 59001          p.add(0, func(m *_Encoding, v []interface{}) {
 59002              m.vex2(4, hcode(v[1]), v[0], 0)
 59003              m.emit(0x50)
 59004              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59005          })
 59006      }
 59007      if p.len == 0 {
 59008          panic("invalid operands for VMOVMSKPS")
 59009      }
 59010      return p
 59011  }
 59012  
 59013  // VMOVNTDQ performs "Store Double Quadword Using Non-Temporal Hint".
 59014  //
 59015  // Mnemonic        : VMOVNTDQ
 59016  // Supported forms : (5 forms)
 59017  //
 59018  //    * VMOVNTDQ xmm, m128    [AVX]
 59019  //    * VMOVNTDQ ymm, m256    [AVX]
 59020  //    * VMOVNTDQ zmm, m512    [AVX512F]
 59021  //    * VMOVNTDQ xmm, m128    [AVX512F,AVX512VL]
 59022  //    * VMOVNTDQ ymm, m256    [AVX512F,AVX512VL]
 59023  //
 59024  func (self *Program) VMOVNTDQ(v0 interface{}, v1 interface{}) *Instruction {
 59025      p := self.alloc("VMOVNTDQ", 2, Operands { v0, v1 })
 59026      // VMOVNTDQ xmm, m128
 59027      if isXMM(v0) && isM128(v1) {
 59028          self.require(ISA_AVX)
 59029          p.domain = DomainAVX
 59030          p.add(0, func(m *_Encoding, v []interface{}) {
 59031              m.vex2(1, hcode(v[0]), addr(v[1]), 0)
 59032              m.emit(0xe7)
 59033              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 59034          })
 59035      }
 59036      // VMOVNTDQ ymm, m256
 59037      if isYMM(v0) && isM256(v1) {
 59038          self.require(ISA_AVX)
 59039          p.domain = DomainAVX
 59040          p.add(0, func(m *_Encoding, v []interface{}) {
 59041              m.vex2(5, hcode(v[0]), addr(v[1]), 0)
 59042              m.emit(0xe7)
 59043              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 59044          })
 59045      }
 59046      // VMOVNTDQ zmm, m512
 59047      if isZMM(v0) && isM512(v1) {
 59048          self.require(ISA_AVX512F)
 59049          p.domain = DomainAVX
 59050          p.add(0, func(m *_Encoding, v []interface{}) {
 59051              m.evex(0b01, 0x05, 0b10, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 59052              m.emit(0xe7)
 59053              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 59054          })
 59055      }
 59056      // VMOVNTDQ xmm, m128
 59057      if isEVEXXMM(v0) && isM128(v1) {
 59058          self.require(ISA_AVX512VL | ISA_AVX512F)
 59059          p.domain = DomainAVX
 59060          p.add(0, func(m *_Encoding, v []interface{}) {
 59061              m.evex(0b01, 0x05, 0b00, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 59062              m.emit(0xe7)
 59063              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 59064          })
 59065      }
 59066      // VMOVNTDQ ymm, m256
 59067      if isEVEXYMM(v0) && isM256(v1) {
 59068          self.require(ISA_AVX512VL | ISA_AVX512F)
 59069          p.domain = DomainAVX
 59070          p.add(0, func(m *_Encoding, v []interface{}) {
 59071              m.evex(0b01, 0x05, 0b01, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 59072              m.emit(0xe7)
 59073              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 59074          })
 59075      }
 59076      if p.len == 0 {
 59077          panic("invalid operands for VMOVNTDQ")
 59078      }
 59079      return p
 59080  }
 59081  
 59082  // VMOVNTDQA performs "Load Double Quadword Non-Temporal Aligned Hint".
 59083  //
 59084  // Mnemonic        : VMOVNTDQA
 59085  // Supported forms : (5 forms)
 59086  //
 59087  //    * VMOVNTDQA m128, xmm    [AVX]
 59088  //    * VMOVNTDQA m256, ymm    [AVX2]
 59089  //    * VMOVNTDQA m512, zmm    [AVX512F]
 59090  //    * VMOVNTDQA m128, xmm    [AVX512F,AVX512VL]
 59091  //    * VMOVNTDQA m256, ymm    [AVX512F,AVX512VL]
 59092  //
 59093  func (self *Program) VMOVNTDQA(v0 interface{}, v1 interface{}) *Instruction {
 59094      p := self.alloc("VMOVNTDQA", 2, Operands { v0, v1 })
 59095      // VMOVNTDQA m128, xmm
 59096      if isM128(v0) && isXMM(v1) {
 59097          self.require(ISA_AVX)
 59098          p.domain = DomainAVX
 59099          p.add(0, func(m *_Encoding, v []interface{}) {
 59100              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 59101              m.emit(0x2a)
 59102              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59103          })
 59104      }
 59105      // VMOVNTDQA m256, ymm
 59106      if isM256(v0) && isYMM(v1) {
 59107          self.require(ISA_AVX2)
 59108          p.domain = DomainAVX
 59109          p.add(0, func(m *_Encoding, v []interface{}) {
 59110              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 59111              m.emit(0x2a)
 59112              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59113          })
 59114      }
 59115      // VMOVNTDQA m512, zmm
 59116      if isM512(v0) && isZMM(v1) {
 59117          self.require(ISA_AVX512F)
 59118          p.domain = DomainAVX
 59119          p.add(0, func(m *_Encoding, v []interface{}) {
 59120              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 59121              m.emit(0x2a)
 59122              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 59123          })
 59124      }
 59125      // VMOVNTDQA m128, xmm
 59126      if isM128(v0) && isEVEXXMM(v1) {
 59127          self.require(ISA_AVX512VL | ISA_AVX512F)
 59128          p.domain = DomainAVX
 59129          p.add(0, func(m *_Encoding, v []interface{}) {
 59130              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 59131              m.emit(0x2a)
 59132              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 59133          })
 59134      }
 59135      // VMOVNTDQA m256, ymm
 59136      if isM256(v0) && isEVEXYMM(v1) {
 59137          self.require(ISA_AVX512VL | ISA_AVX512F)
 59138          p.domain = DomainAVX
 59139          p.add(0, func(m *_Encoding, v []interface{}) {
 59140              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 59141              m.emit(0x2a)
 59142              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 59143          })
 59144      }
 59145      if p.len == 0 {
 59146          panic("invalid operands for VMOVNTDQA")
 59147      }
 59148      return p
 59149  }
 59150  
 59151  // VMOVNTPD performs "Store Packed Double-Precision Floating-Point Values Using Non-Temporal Hint".
 59152  //
 59153  // Mnemonic        : VMOVNTPD
 59154  // Supported forms : (5 forms)
 59155  //
 59156  //    * VMOVNTPD xmm, m128    [AVX]
 59157  //    * VMOVNTPD ymm, m256    [AVX]
 59158  //    * VMOVNTPD zmm, m512    [AVX512F]
 59159  //    * VMOVNTPD xmm, m128    [AVX512F,AVX512VL]
 59160  //    * VMOVNTPD ymm, m256    [AVX512F,AVX512VL]
 59161  //
 59162  func (self *Program) VMOVNTPD(v0 interface{}, v1 interface{}) *Instruction {
 59163      p := self.alloc("VMOVNTPD", 2, Operands { v0, v1 })
 59164      // VMOVNTPD xmm, m128
 59165      if isXMM(v0) && isM128(v1) {
 59166          self.require(ISA_AVX)
 59167          p.domain = DomainAVX
 59168          p.add(0, func(m *_Encoding, v []interface{}) {
 59169              m.vex2(1, hcode(v[0]), addr(v[1]), 0)
 59170              m.emit(0x2b)
 59171              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 59172          })
 59173      }
 59174      // VMOVNTPD ymm, m256
 59175      if isYMM(v0) && isM256(v1) {
 59176          self.require(ISA_AVX)
 59177          p.domain = DomainAVX
 59178          p.add(0, func(m *_Encoding, v []interface{}) {
 59179              m.vex2(5, hcode(v[0]), addr(v[1]), 0)
 59180              m.emit(0x2b)
 59181              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 59182          })
 59183      }
 59184      // VMOVNTPD zmm, m512
 59185      if isZMM(v0) && isM512(v1) {
 59186          self.require(ISA_AVX512F)
 59187          p.domain = DomainAVX
 59188          p.add(0, func(m *_Encoding, v []interface{}) {
 59189              m.evex(0b01, 0x85, 0b10, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 59190              m.emit(0x2b)
 59191              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 59192          })
 59193      }
 59194      // VMOVNTPD xmm, m128
 59195      if isEVEXXMM(v0) && isM128(v1) {
 59196          self.require(ISA_AVX512VL | ISA_AVX512F)
 59197          p.domain = DomainAVX
 59198          p.add(0, func(m *_Encoding, v []interface{}) {
 59199              m.evex(0b01, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 59200              m.emit(0x2b)
 59201              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 59202          })
 59203      }
 59204      // VMOVNTPD ymm, m256
 59205      if isEVEXYMM(v0) && isM256(v1) {
 59206          self.require(ISA_AVX512VL | ISA_AVX512F)
 59207          p.domain = DomainAVX
 59208          p.add(0, func(m *_Encoding, v []interface{}) {
 59209              m.evex(0b01, 0x85, 0b01, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 59210              m.emit(0x2b)
 59211              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 59212          })
 59213      }
 59214      if p.len == 0 {
 59215          panic("invalid operands for VMOVNTPD")
 59216      }
 59217      return p
 59218  }
 59219  
 59220  // VMOVNTPS performs "Store Packed Single-Precision Floating-Point Values Using Non-Temporal Hint".
 59221  //
 59222  // Mnemonic        : VMOVNTPS
 59223  // Supported forms : (5 forms)
 59224  //
 59225  //    * VMOVNTPS xmm, m128    [AVX]
 59226  //    * VMOVNTPS ymm, m256    [AVX]
 59227  //    * VMOVNTPS zmm, m512    [AVX512F]
 59228  //    * VMOVNTPS xmm, m128    [AVX512F,AVX512VL]
 59229  //    * VMOVNTPS ymm, m256    [AVX512F,AVX512VL]
 59230  //
 59231  func (self *Program) VMOVNTPS(v0 interface{}, v1 interface{}) *Instruction {
 59232      p := self.alloc("VMOVNTPS", 2, Operands { v0, v1 })
 59233      // VMOVNTPS xmm, m128
 59234      if isXMM(v0) && isM128(v1) {
 59235          self.require(ISA_AVX)
 59236          p.domain = DomainAVX
 59237          p.add(0, func(m *_Encoding, v []interface{}) {
 59238              m.vex2(0, hcode(v[0]), addr(v[1]), 0)
 59239              m.emit(0x2b)
 59240              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 59241          })
 59242      }
 59243      // VMOVNTPS ymm, m256
 59244      if isYMM(v0) && isM256(v1) {
 59245          self.require(ISA_AVX)
 59246          p.domain = DomainAVX
 59247          p.add(0, func(m *_Encoding, v []interface{}) {
 59248              m.vex2(4, hcode(v[0]), addr(v[1]), 0)
 59249              m.emit(0x2b)
 59250              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 59251          })
 59252      }
 59253      // VMOVNTPS zmm, m512
 59254      if isZMM(v0) && isM512(v1) {
 59255          self.require(ISA_AVX512F)
 59256          p.domain = DomainAVX
 59257          p.add(0, func(m *_Encoding, v []interface{}) {
 59258              m.evex(0b01, 0x04, 0b10, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 59259              m.emit(0x2b)
 59260              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 59261          })
 59262      }
 59263      // VMOVNTPS xmm, m128
 59264      if isEVEXXMM(v0) && isM128(v1) {
 59265          self.require(ISA_AVX512VL | ISA_AVX512F)
 59266          p.domain = DomainAVX
 59267          p.add(0, func(m *_Encoding, v []interface{}) {
 59268              m.evex(0b01, 0x05, 0b00, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 59269              m.emit(0x2b)
 59270              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 59271          })
 59272      }
 59273      // VMOVNTPS ymm, m256
 59274      if isEVEXYMM(v0) && isM256(v1) {
 59275          self.require(ISA_AVX512VL | ISA_AVX512F)
 59276          p.domain = DomainAVX
 59277          p.add(0, func(m *_Encoding, v []interface{}) {
 59278              m.evex(0b01, 0x05, 0b01, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 59279              m.emit(0x2b)
 59280              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 59281          })
 59282      }
 59283      if p.len == 0 {
 59284          panic("invalid operands for VMOVNTPS")
 59285      }
 59286      return p
 59287  }
 59288  
 59289  // VMOVQ performs "Move Quadword".
 59290  //
 59291  // Mnemonic        : VMOVQ
 59292  // Supported forms : (10 forms)
 59293  //
 59294  //    * VMOVQ xmm, r64    [AVX]
 59295  //    * VMOVQ r64, xmm    [AVX]
 59296  //    * VMOVQ xmm, xmm    [AVX]
 59297  //    * VMOVQ m64, xmm    [AVX]
 59298  //    * VMOVQ xmm, m64    [AVX]
 59299  //    * VMOVQ xmm, r64    [AVX512F]
 59300  //    * VMOVQ r64, xmm    [AVX512F]
 59301  //    * VMOVQ xmm, xmm    [AVX512F]
 59302  //    * VMOVQ m64, xmm    [AVX512F]
 59303  //    * VMOVQ xmm, m64    [AVX512F]
 59304  //
 59305  func (self *Program) VMOVQ(v0 interface{}, v1 interface{}) *Instruction {
 59306      p := self.alloc("VMOVQ", 2, Operands { v0, v1 })
 59307      // VMOVQ xmm, r64
 59308      if isXMM(v0) && isReg64(v1) {
 59309          self.require(ISA_AVX)
 59310          p.domain = DomainAVX
 59311          p.add(0, func(m *_Encoding, v []interface{}) {
 59312              m.emit(0xc4)
 59313              m.emit(0xe1 ^ (hcode(v[0]) << 7) ^ (hcode(v[1]) << 5))
 59314              m.emit(0xf9)
 59315              m.emit(0x7e)
 59316              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 59317          })
 59318      }
 59319      // VMOVQ r64, xmm
 59320      if isReg64(v0) && isXMM(v1) {
 59321          self.require(ISA_AVX)
 59322          p.domain = DomainAVX
 59323          p.add(0, func(m *_Encoding, v []interface{}) {
 59324              m.emit(0xc4)
 59325              m.emit(0xe1 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 59326              m.emit(0xf9)
 59327              m.emit(0x6e)
 59328              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59329          })
 59330      }
 59331      // VMOVQ xmm, xmm
 59332      if isXMM(v0) && isXMM(v1) {
 59333          self.require(ISA_AVX)
 59334          p.domain = DomainAVX
 59335          p.add(0, func(m *_Encoding, v []interface{}) {
 59336              m.vex2(2, hcode(v[1]), v[0], 0)
 59337              m.emit(0x7e)
 59338              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59339          })
 59340          p.add(0, func(m *_Encoding, v []interface{}) {
 59341              m.vex2(1, hcode(v[0]), v[1], 0)
 59342              m.emit(0xd6)
 59343              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 59344          })
 59345      }
 59346      // VMOVQ m64, xmm
 59347      if isM64(v0) && isXMM(v1) {
 59348          self.require(ISA_AVX)
 59349          p.domain = DomainAVX
 59350          p.add(0, func(m *_Encoding, v []interface{}) {
 59351              m.vex2(2, hcode(v[1]), addr(v[0]), 0)
 59352              m.emit(0x7e)
 59353              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59354          })
 59355          p.add(0, func(m *_Encoding, v []interface{}) {
 59356              m.vex3(0xc4, 0b1, 0x81, hcode(v[1]), addr(v[0]), 0)
 59357              m.emit(0x6e)
 59358              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59359          })
 59360      }
 59361      // VMOVQ xmm, m64
 59362      if isXMM(v0) && isM64(v1) {
 59363          self.require(ISA_AVX)
 59364          p.domain = DomainAVX
 59365          p.add(0, func(m *_Encoding, v []interface{}) {
 59366              m.vex2(1, hcode(v[0]), addr(v[1]), 0)
 59367              m.emit(0xd6)
 59368              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 59369          })
 59370          p.add(0, func(m *_Encoding, v []interface{}) {
 59371              m.vex3(0xc4, 0b1, 0x81, hcode(v[0]), addr(v[1]), 0)
 59372              m.emit(0x7e)
 59373              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 59374          })
 59375      }
 59376      // VMOVQ xmm, r64
 59377      if isEVEXXMM(v0) && isReg64(v1) {
 59378          self.require(ISA_AVX512F)
 59379          p.domain = DomainAVX
 59380          p.add(0, func(m *_Encoding, v []interface{}) {
 59381              m.emit(0x62)
 59382              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 59383              m.emit(0xfd)
 59384              m.emit(0x08)
 59385              m.emit(0x7e)
 59386              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 59387          })
 59388      }
 59389      // VMOVQ r64, xmm
 59390      if isReg64(v0) && isEVEXXMM(v1) {
 59391          self.require(ISA_AVX512F)
 59392          p.domain = DomainAVX
 59393          p.add(0, func(m *_Encoding, v []interface{}) {
 59394              m.emit(0x62)
 59395              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 59396              m.emit(0xfd)
 59397              m.emit(0x08)
 59398              m.emit(0x6e)
 59399              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59400          })
 59401      }
 59402      // VMOVQ xmm, xmm
 59403      if isEVEXXMM(v0) && isEVEXXMM(v1) {
 59404          self.require(ISA_AVX512F)
 59405          p.domain = DomainAVX
 59406          p.add(0, func(m *_Encoding, v []interface{}) {
 59407              m.emit(0x62)
 59408              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 59409              m.emit(0xfe)
 59410              m.emit(0x08)
 59411              m.emit(0x7e)
 59412              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59413          })
 59414          p.add(0, func(m *_Encoding, v []interface{}) {
 59415              m.emit(0x62)
 59416              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 59417              m.emit(0xfd)
 59418              m.emit(0x08)
 59419              m.emit(0xd6)
 59420              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 59421          })
 59422      }
 59423      // VMOVQ m64, xmm
 59424      if isM64(v0) && isEVEXXMM(v1) {
 59425          self.require(ISA_AVX512F)
 59426          p.domain = DomainAVX
 59427          p.add(0, func(m *_Encoding, v []interface{}) {
 59428              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 59429              m.emit(0x6e)
 59430              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 59431          })
 59432          p.add(0, func(m *_Encoding, v []interface{}) {
 59433              m.evex(0b01, 0x86, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 59434              m.emit(0x7e)
 59435              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 59436          })
 59437      }
 59438      // VMOVQ xmm, m64
 59439      if isEVEXXMM(v0) && isM64(v1) {
 59440          self.require(ISA_AVX512F)
 59441          p.domain = DomainAVX
 59442          p.add(0, func(m *_Encoding, v []interface{}) {
 59443              m.evex(0b01, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 59444              m.emit(0x7e)
 59445              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 59446          })
 59447          p.add(0, func(m *_Encoding, v []interface{}) {
 59448              m.evex(0b01, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, 0, 0, 0)
 59449              m.emit(0xd6)
 59450              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 59451          })
 59452      }
 59453      if p.len == 0 {
 59454          panic("invalid operands for VMOVQ")
 59455      }
 59456      return p
 59457  }
 59458  
 59459  // VMOVSD performs "Move Scalar Double-Precision Floating-Point Value".
 59460  //
 59461  // Mnemonic        : VMOVSD
 59462  // Supported forms : (6 forms)
 59463  //
 59464  //    * VMOVSD m64, xmm               [AVX]
 59465  //    * VMOVSD xmm, m64               [AVX]
 59466  //    * VMOVSD xmm, xmm, xmm          [AVX]
 59467  //    * VMOVSD xmm, m64{k}            [AVX512F]
 59468  //    * VMOVSD m64, xmm{k}{z}         [AVX512F]
 59469  //    * VMOVSD xmm, xmm, xmm{k}{z}    [AVX512F]
 59470  //
 59471  func (self *Program) VMOVSD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 59472      var p *Instruction
 59473      switch len(vv) {
 59474          case 0  : p = self.alloc("VMOVSD", 2, Operands { v0, v1 })
 59475          case 1  : p = self.alloc("VMOVSD", 3, Operands { v0, v1, vv[0] })
 59476          default : panic("instruction VMOVSD takes 2 or 3 operands")
 59477      }
 59478      // VMOVSD m64, xmm
 59479      if len(vv) == 0 && isM64(v0) && isXMM(v1) {
 59480          self.require(ISA_AVX)
 59481          p.domain = DomainAVX
 59482          p.add(0, func(m *_Encoding, v []interface{}) {
 59483              m.vex2(3, hcode(v[1]), addr(v[0]), 0)
 59484              m.emit(0x10)
 59485              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59486          })
 59487      }
 59488      // VMOVSD xmm, m64
 59489      if len(vv) == 0 && isXMM(v0) && isM64(v1) {
 59490          self.require(ISA_AVX)
 59491          p.domain = DomainAVX
 59492          p.add(0, func(m *_Encoding, v []interface{}) {
 59493              m.vex2(3, hcode(v[0]), addr(v[1]), 0)
 59494              m.emit(0x11)
 59495              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 59496          })
 59497      }
 59498      // VMOVSD xmm, xmm, xmm
 59499      if len(vv) == 1 && isXMM(v0) && isXMM(v1) && isXMM(vv[0]) {
 59500          self.require(ISA_AVX)
 59501          p.domain = DomainAVX
 59502          p.add(0, func(m *_Encoding, v []interface{}) {
 59503              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 59504              m.emit(0x10)
 59505              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 59506          })
 59507          p.add(0, func(m *_Encoding, v []interface{}) {
 59508              m.vex2(3, hcode(v[0]), v[2], hlcode(v[1]))
 59509              m.emit(0x11)
 59510              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[2]))
 59511          })
 59512      }
 59513      // VMOVSD xmm, m64{k}
 59514      if len(vv) == 0 && isEVEXXMM(v0) && isM64k(v1) {
 59515          self.require(ISA_AVX512F)
 59516          p.domain = DomainAVX
 59517          p.add(0, func(m *_Encoding, v []interface{}) {
 59518              m.evex(0b01, 0x87, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 59519              m.emit(0x11)
 59520              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 59521          })
 59522      }
 59523      // VMOVSD m64, xmm{k}{z}
 59524      if len(vv) == 0 && isM64(v0) && isXMMkz(v1) {
 59525          self.require(ISA_AVX512F)
 59526          p.domain = DomainAVX
 59527          p.add(0, func(m *_Encoding, v []interface{}) {
 59528              m.evex(0b01, 0x87, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 59529              m.emit(0x10)
 59530              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 59531          })
 59532      }
 59533      // VMOVSD xmm, xmm, xmm{k}{z}
 59534      if len(vv) == 1 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(vv[0]) {
 59535          self.require(ISA_AVX512F)
 59536          p.domain = DomainAVX
 59537          p.add(0, func(m *_Encoding, v []interface{}) {
 59538              m.emit(0x62)
 59539              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 59540              m.emit(0xff ^ (hlcode(v[1]) << 3))
 59541              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 59542              m.emit(0x10)
 59543              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 59544          })
 59545          p.add(0, func(m *_Encoding, v []interface{}) {
 59546              m.emit(0x62)
 59547              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[0]) << 4)))
 59548              m.emit(0xff ^ (hlcode(v[1]) << 3))
 59549              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 59550              m.emit(0x11)
 59551              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[2]))
 59552          })
 59553      }
 59554      if p.len == 0 {
 59555          panic("invalid operands for VMOVSD")
 59556      }
 59557      return p
 59558  }
 59559  
 59560  // VMOVSHDUP performs "Move Packed Single-FP High and Duplicate".
 59561  //
 59562  // Mnemonic        : VMOVSHDUP
 59563  // Supported forms : (10 forms)
 59564  //
 59565  //    * VMOVSHDUP xmm, xmm           [AVX]
 59566  //    * VMOVSHDUP m128, xmm          [AVX]
 59567  //    * VMOVSHDUP ymm, ymm           [AVX]
 59568  //    * VMOVSHDUP m256, ymm          [AVX]
 59569  //    * VMOVSHDUP zmm, zmm{k}{z}     [AVX512F]
 59570  //    * VMOVSHDUP m512, zmm{k}{z}    [AVX512F]
 59571  //    * VMOVSHDUP xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 59572  //    * VMOVSHDUP ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 59573  //    * VMOVSHDUP m128, xmm{k}{z}    [AVX512F,AVX512VL]
 59574  //    * VMOVSHDUP m256, ymm{k}{z}    [AVX512F,AVX512VL]
 59575  //
 59576  func (self *Program) VMOVSHDUP(v0 interface{}, v1 interface{}) *Instruction {
 59577      p := self.alloc("VMOVSHDUP", 2, Operands { v0, v1 })
 59578      // VMOVSHDUP xmm, xmm
 59579      if isXMM(v0) && isXMM(v1) {
 59580          self.require(ISA_AVX)
 59581          p.domain = DomainAVX
 59582          p.add(0, func(m *_Encoding, v []interface{}) {
 59583              m.vex2(2, hcode(v[1]), v[0], 0)
 59584              m.emit(0x16)
 59585              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59586          })
 59587      }
 59588      // VMOVSHDUP m128, xmm
 59589      if isM128(v0) && isXMM(v1) {
 59590          self.require(ISA_AVX)
 59591          p.domain = DomainAVX
 59592          p.add(0, func(m *_Encoding, v []interface{}) {
 59593              m.vex2(2, hcode(v[1]), addr(v[0]), 0)
 59594              m.emit(0x16)
 59595              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59596          })
 59597      }
 59598      // VMOVSHDUP ymm, ymm
 59599      if isYMM(v0) && isYMM(v1) {
 59600          self.require(ISA_AVX)
 59601          p.domain = DomainAVX
 59602          p.add(0, func(m *_Encoding, v []interface{}) {
 59603              m.vex2(6, hcode(v[1]), v[0], 0)
 59604              m.emit(0x16)
 59605              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59606          })
 59607      }
 59608      // VMOVSHDUP m256, ymm
 59609      if isM256(v0) && isYMM(v1) {
 59610          self.require(ISA_AVX)
 59611          p.domain = DomainAVX
 59612          p.add(0, func(m *_Encoding, v []interface{}) {
 59613              m.vex2(6, hcode(v[1]), addr(v[0]), 0)
 59614              m.emit(0x16)
 59615              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59616          })
 59617      }
 59618      // VMOVSHDUP zmm, zmm{k}{z}
 59619      if isZMM(v0) && isZMMkz(v1) {
 59620          self.require(ISA_AVX512F)
 59621          p.domain = DomainAVX
 59622          p.add(0, func(m *_Encoding, v []interface{}) {
 59623              m.emit(0x62)
 59624              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 59625              m.emit(0x7e)
 59626              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 59627              m.emit(0x16)
 59628              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59629          })
 59630      }
 59631      // VMOVSHDUP m512, zmm{k}{z}
 59632      if isM512(v0) && isZMMkz(v1) {
 59633          self.require(ISA_AVX512F)
 59634          p.domain = DomainAVX
 59635          p.add(0, func(m *_Encoding, v []interface{}) {
 59636              m.evex(0b01, 0x06, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 59637              m.emit(0x16)
 59638              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 59639          })
 59640      }
 59641      // VMOVSHDUP xmm, xmm{k}{z}
 59642      if isEVEXXMM(v0) && isXMMkz(v1) {
 59643          self.require(ISA_AVX512VL | ISA_AVX512F)
 59644          p.domain = DomainAVX
 59645          p.add(0, func(m *_Encoding, v []interface{}) {
 59646              m.emit(0x62)
 59647              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 59648              m.emit(0x7e)
 59649              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 59650              m.emit(0x16)
 59651              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59652          })
 59653      }
 59654      // VMOVSHDUP ymm, ymm{k}{z}
 59655      if isEVEXYMM(v0) && isYMMkz(v1) {
 59656          self.require(ISA_AVX512VL | ISA_AVX512F)
 59657          p.domain = DomainAVX
 59658          p.add(0, func(m *_Encoding, v []interface{}) {
 59659              m.emit(0x62)
 59660              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 59661              m.emit(0x7e)
 59662              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 59663              m.emit(0x16)
 59664              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59665          })
 59666      }
 59667      // VMOVSHDUP m128, xmm{k}{z}
 59668      if isM128(v0) && isXMMkz(v1) {
 59669          self.require(ISA_AVX512VL | ISA_AVX512F)
 59670          p.domain = DomainAVX
 59671          p.add(0, func(m *_Encoding, v []interface{}) {
 59672              m.evex(0b01, 0x06, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 59673              m.emit(0x16)
 59674              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 59675          })
 59676      }
 59677      // VMOVSHDUP m256, ymm{k}{z}
 59678      if isM256(v0) && isYMMkz(v1) {
 59679          self.require(ISA_AVX512VL | ISA_AVX512F)
 59680          p.domain = DomainAVX
 59681          p.add(0, func(m *_Encoding, v []interface{}) {
 59682              m.evex(0b01, 0x06, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 59683              m.emit(0x16)
 59684              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 59685          })
 59686      }
 59687      if p.len == 0 {
 59688          panic("invalid operands for VMOVSHDUP")
 59689      }
 59690      return p
 59691  }
 59692  
 59693  // VMOVSLDUP performs "Move Packed Single-FP Low and Duplicate".
 59694  //
 59695  // Mnemonic        : VMOVSLDUP
 59696  // Supported forms : (10 forms)
 59697  //
 59698  //    * VMOVSLDUP xmm, xmm           [AVX]
 59699  //    * VMOVSLDUP m128, xmm          [AVX]
 59700  //    * VMOVSLDUP ymm, ymm           [AVX]
 59701  //    * VMOVSLDUP m256, ymm          [AVX]
 59702  //    * VMOVSLDUP zmm, zmm{k}{z}     [AVX512F]
 59703  //    * VMOVSLDUP m512, zmm{k}{z}    [AVX512F]
 59704  //    * VMOVSLDUP xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 59705  //    * VMOVSLDUP ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 59706  //    * VMOVSLDUP m128, xmm{k}{z}    [AVX512F,AVX512VL]
 59707  //    * VMOVSLDUP m256, ymm{k}{z}    [AVX512F,AVX512VL]
 59708  //
 59709  func (self *Program) VMOVSLDUP(v0 interface{}, v1 interface{}) *Instruction {
 59710      p := self.alloc("VMOVSLDUP", 2, Operands { v0, v1 })
 59711      // VMOVSLDUP xmm, xmm
 59712      if isXMM(v0) && isXMM(v1) {
 59713          self.require(ISA_AVX)
 59714          p.domain = DomainAVX
 59715          p.add(0, func(m *_Encoding, v []interface{}) {
 59716              m.vex2(2, hcode(v[1]), v[0], 0)
 59717              m.emit(0x12)
 59718              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59719          })
 59720      }
 59721      // VMOVSLDUP m128, xmm
 59722      if isM128(v0) && isXMM(v1) {
 59723          self.require(ISA_AVX)
 59724          p.domain = DomainAVX
 59725          p.add(0, func(m *_Encoding, v []interface{}) {
 59726              m.vex2(2, hcode(v[1]), addr(v[0]), 0)
 59727              m.emit(0x12)
 59728              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59729          })
 59730      }
 59731      // VMOVSLDUP ymm, ymm
 59732      if isYMM(v0) && isYMM(v1) {
 59733          self.require(ISA_AVX)
 59734          p.domain = DomainAVX
 59735          p.add(0, func(m *_Encoding, v []interface{}) {
 59736              m.vex2(6, hcode(v[1]), v[0], 0)
 59737              m.emit(0x12)
 59738              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59739          })
 59740      }
 59741      // VMOVSLDUP m256, ymm
 59742      if isM256(v0) && isYMM(v1) {
 59743          self.require(ISA_AVX)
 59744          p.domain = DomainAVX
 59745          p.add(0, func(m *_Encoding, v []interface{}) {
 59746              m.vex2(6, hcode(v[1]), addr(v[0]), 0)
 59747              m.emit(0x12)
 59748              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59749          })
 59750      }
 59751      // VMOVSLDUP zmm, zmm{k}{z}
 59752      if isZMM(v0) && isZMMkz(v1) {
 59753          self.require(ISA_AVX512F)
 59754          p.domain = DomainAVX
 59755          p.add(0, func(m *_Encoding, v []interface{}) {
 59756              m.emit(0x62)
 59757              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 59758              m.emit(0x7e)
 59759              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 59760              m.emit(0x12)
 59761              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59762          })
 59763      }
 59764      // VMOVSLDUP m512, zmm{k}{z}
 59765      if isM512(v0) && isZMMkz(v1) {
 59766          self.require(ISA_AVX512F)
 59767          p.domain = DomainAVX
 59768          p.add(0, func(m *_Encoding, v []interface{}) {
 59769              m.evex(0b01, 0x06, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 59770              m.emit(0x12)
 59771              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 59772          })
 59773      }
 59774      // VMOVSLDUP xmm, xmm{k}{z}
 59775      if isEVEXXMM(v0) && isXMMkz(v1) {
 59776          self.require(ISA_AVX512VL | ISA_AVX512F)
 59777          p.domain = DomainAVX
 59778          p.add(0, func(m *_Encoding, v []interface{}) {
 59779              m.emit(0x62)
 59780              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 59781              m.emit(0x7e)
 59782              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 59783              m.emit(0x12)
 59784              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59785          })
 59786      }
 59787      // VMOVSLDUP ymm, ymm{k}{z}
 59788      if isEVEXYMM(v0) && isYMMkz(v1) {
 59789          self.require(ISA_AVX512VL | ISA_AVX512F)
 59790          p.domain = DomainAVX
 59791          p.add(0, func(m *_Encoding, v []interface{}) {
 59792              m.emit(0x62)
 59793              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 59794              m.emit(0x7e)
 59795              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 59796              m.emit(0x12)
 59797              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59798          })
 59799      }
 59800      // VMOVSLDUP m128, xmm{k}{z}
 59801      if isM128(v0) && isXMMkz(v1) {
 59802          self.require(ISA_AVX512VL | ISA_AVX512F)
 59803          p.domain = DomainAVX
 59804          p.add(0, func(m *_Encoding, v []interface{}) {
 59805              m.evex(0b01, 0x06, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 59806              m.emit(0x12)
 59807              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 59808          })
 59809      }
 59810      // VMOVSLDUP m256, ymm{k}{z}
 59811      if isM256(v0) && isYMMkz(v1) {
 59812          self.require(ISA_AVX512VL | ISA_AVX512F)
 59813          p.domain = DomainAVX
 59814          p.add(0, func(m *_Encoding, v []interface{}) {
 59815              m.evex(0b01, 0x06, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 59816              m.emit(0x12)
 59817              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 59818          })
 59819      }
 59820      if p.len == 0 {
 59821          panic("invalid operands for VMOVSLDUP")
 59822      }
 59823      return p
 59824  }
 59825  
 59826  // VMOVSS performs "Move Scalar Single-Precision Floating-Point Values".
 59827  //
 59828  // Mnemonic        : VMOVSS
 59829  // Supported forms : (6 forms)
 59830  //
 59831  //    * VMOVSS m32, xmm               [AVX]
 59832  //    * VMOVSS xmm, m32               [AVX]
 59833  //    * VMOVSS xmm, xmm, xmm          [AVX]
 59834  //    * VMOVSS xmm, m32{k}            [AVX512F]
 59835  //    * VMOVSS m32, xmm{k}{z}         [AVX512F]
 59836  //    * VMOVSS xmm, xmm, xmm{k}{z}    [AVX512F]
 59837  //
 59838  func (self *Program) VMOVSS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 59839      var p *Instruction
 59840      switch len(vv) {
 59841          case 0  : p = self.alloc("VMOVSS", 2, Operands { v0, v1 })
 59842          case 1  : p = self.alloc("VMOVSS", 3, Operands { v0, v1, vv[0] })
 59843          default : panic("instruction VMOVSS takes 2 or 3 operands")
 59844      }
 59845      // VMOVSS m32, xmm
 59846      if len(vv) == 0 && isM32(v0) && isXMM(v1) {
 59847          self.require(ISA_AVX)
 59848          p.domain = DomainAVX
 59849          p.add(0, func(m *_Encoding, v []interface{}) {
 59850              m.vex2(2, hcode(v[1]), addr(v[0]), 0)
 59851              m.emit(0x10)
 59852              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59853          })
 59854      }
 59855      // VMOVSS xmm, m32
 59856      if len(vv) == 0 && isXMM(v0) && isM32(v1) {
 59857          self.require(ISA_AVX)
 59858          p.domain = DomainAVX
 59859          p.add(0, func(m *_Encoding, v []interface{}) {
 59860              m.vex2(2, hcode(v[0]), addr(v[1]), 0)
 59861              m.emit(0x11)
 59862              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 59863          })
 59864      }
 59865      // VMOVSS xmm, xmm, xmm
 59866      if len(vv) == 1 && isXMM(v0) && isXMM(v1) && isXMM(vv[0]) {
 59867          self.require(ISA_AVX)
 59868          p.domain = DomainAVX
 59869          p.add(0, func(m *_Encoding, v []interface{}) {
 59870              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 59871              m.emit(0x10)
 59872              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 59873          })
 59874          p.add(0, func(m *_Encoding, v []interface{}) {
 59875              m.vex2(2, hcode(v[0]), v[2], hlcode(v[1]))
 59876              m.emit(0x11)
 59877              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[2]))
 59878          })
 59879      }
 59880      // VMOVSS xmm, m32{k}
 59881      if len(vv) == 0 && isEVEXXMM(v0) && isM32k(v1) {
 59882          self.require(ISA_AVX512F)
 59883          p.domain = DomainAVX
 59884          p.add(0, func(m *_Encoding, v []interface{}) {
 59885              m.evex(0b01, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 59886              m.emit(0x11)
 59887              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 59888          })
 59889      }
 59890      // VMOVSS m32, xmm{k}{z}
 59891      if len(vv) == 0 && isM32(v0) && isXMMkz(v1) {
 59892          self.require(ISA_AVX512F)
 59893          p.domain = DomainAVX
 59894          p.add(0, func(m *_Encoding, v []interface{}) {
 59895              m.evex(0b01, 0x06, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 59896              m.emit(0x10)
 59897              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 59898          })
 59899      }
 59900      // VMOVSS xmm, xmm, xmm{k}{z}
 59901      if len(vv) == 1 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(vv[0]) {
 59902          self.require(ISA_AVX512F)
 59903          p.domain = DomainAVX
 59904          p.add(0, func(m *_Encoding, v []interface{}) {
 59905              m.emit(0x62)
 59906              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 59907              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 59908              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 59909              m.emit(0x10)
 59910              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 59911          })
 59912          p.add(0, func(m *_Encoding, v []interface{}) {
 59913              m.emit(0x62)
 59914              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[0]) << 4)))
 59915              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 59916              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 59917              m.emit(0x11)
 59918              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[2]))
 59919          })
 59920      }
 59921      if p.len == 0 {
 59922          panic("invalid operands for VMOVSS")
 59923      }
 59924      return p
 59925  }
 59926  
 59927  // VMOVUPD performs "Move Unaligned Packed Double-Precision Floating-Point Values".
 59928  //
 59929  // Mnemonic        : VMOVUPD
 59930  // Supported forms : (15 forms)
 59931  //
 59932  //    * VMOVUPD xmm, xmm           [AVX]
 59933  //    * VMOVUPD m128, xmm          [AVX]
 59934  //    * VMOVUPD ymm, ymm           [AVX]
 59935  //    * VMOVUPD m256, ymm          [AVX]
 59936  //    * VMOVUPD xmm, m128          [AVX]
 59937  //    * VMOVUPD ymm, m256          [AVX]
 59938  //    * VMOVUPD zmm, m512{k}{z}    [AVX512F]
 59939  //    * VMOVUPD zmm, zmm{k}{z}     [AVX512F]
 59940  //    * VMOVUPD m512, zmm{k}{z}    [AVX512F]
 59941  //    * VMOVUPD xmm, m128{k}{z}    [AVX512F,AVX512VL]
 59942  //    * VMOVUPD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 59943  //    * VMOVUPD ymm, m256{k}{z}    [AVX512F,AVX512VL]
 59944  //    * VMOVUPD ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 59945  //    * VMOVUPD m128, xmm{k}{z}    [AVX512F,AVX512VL]
 59946  //    * VMOVUPD m256, ymm{k}{z}    [AVX512F,AVX512VL]
 59947  //
 59948  func (self *Program) VMOVUPD(v0 interface{}, v1 interface{}) *Instruction {
 59949      p := self.alloc("VMOVUPD", 2, Operands { v0, v1 })
 59950      // VMOVUPD xmm, xmm
 59951      if isXMM(v0) && isXMM(v1) {
 59952          self.require(ISA_AVX)
 59953          p.domain = DomainAVX
 59954          p.add(0, func(m *_Encoding, v []interface{}) {
 59955              m.vex2(1, hcode(v[1]), v[0], 0)
 59956              m.emit(0x10)
 59957              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59958          })
 59959          p.add(0, func(m *_Encoding, v []interface{}) {
 59960              m.vex2(1, hcode(v[0]), v[1], 0)
 59961              m.emit(0x11)
 59962              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 59963          })
 59964      }
 59965      // VMOVUPD m128, xmm
 59966      if isM128(v0) && isXMM(v1) {
 59967          self.require(ISA_AVX)
 59968          p.domain = DomainAVX
 59969          p.add(0, func(m *_Encoding, v []interface{}) {
 59970              m.vex2(1, hcode(v[1]), addr(v[0]), 0)
 59971              m.emit(0x10)
 59972              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59973          })
 59974      }
 59975      // VMOVUPD ymm, ymm
 59976      if isYMM(v0) && isYMM(v1) {
 59977          self.require(ISA_AVX)
 59978          p.domain = DomainAVX
 59979          p.add(0, func(m *_Encoding, v []interface{}) {
 59980              m.vex2(5, hcode(v[1]), v[0], 0)
 59981              m.emit(0x10)
 59982              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 59983          })
 59984          p.add(0, func(m *_Encoding, v []interface{}) {
 59985              m.vex2(5, hcode(v[0]), v[1], 0)
 59986              m.emit(0x11)
 59987              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 59988          })
 59989      }
 59990      // VMOVUPD m256, ymm
 59991      if isM256(v0) && isYMM(v1) {
 59992          self.require(ISA_AVX)
 59993          p.domain = DomainAVX
 59994          p.add(0, func(m *_Encoding, v []interface{}) {
 59995              m.vex2(5, hcode(v[1]), addr(v[0]), 0)
 59996              m.emit(0x10)
 59997              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 59998          })
 59999      }
 60000      // VMOVUPD xmm, m128
 60001      if isXMM(v0) && isM128(v1) {
 60002          self.require(ISA_AVX)
 60003          p.domain = DomainAVX
 60004          p.add(0, func(m *_Encoding, v []interface{}) {
 60005              m.vex2(1, hcode(v[0]), addr(v[1]), 0)
 60006              m.emit(0x11)
 60007              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 60008          })
 60009      }
 60010      // VMOVUPD ymm, m256
 60011      if isYMM(v0) && isM256(v1) {
 60012          self.require(ISA_AVX)
 60013          p.domain = DomainAVX
 60014          p.add(0, func(m *_Encoding, v []interface{}) {
 60015              m.vex2(5, hcode(v[0]), addr(v[1]), 0)
 60016              m.emit(0x11)
 60017              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 60018          })
 60019      }
 60020      // VMOVUPD zmm, m512{k}{z}
 60021      if isZMM(v0) && isM512kz(v1) {
 60022          self.require(ISA_AVX512F)
 60023          p.domain = DomainAVX
 60024          p.add(0, func(m *_Encoding, v []interface{}) {
 60025              m.evex(0b01, 0x85, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 60026              m.emit(0x11)
 60027              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 60028          })
 60029      }
 60030      // VMOVUPD zmm, zmm{k}{z}
 60031      if isZMM(v0) && isZMMkz(v1) {
 60032          self.require(ISA_AVX512F)
 60033          p.domain = DomainAVX
 60034          p.add(0, func(m *_Encoding, v []interface{}) {
 60035              m.emit(0x62)
 60036              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 60037              m.emit(0xfd)
 60038              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 60039              m.emit(0x10)
 60040              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 60041          })
 60042          p.add(0, func(m *_Encoding, v []interface{}) {
 60043              m.emit(0x62)
 60044              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 60045              m.emit(0xfd)
 60046              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 60047              m.emit(0x11)
 60048              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 60049          })
 60050      }
 60051      // VMOVUPD m512, zmm{k}{z}
 60052      if isM512(v0) && isZMMkz(v1) {
 60053          self.require(ISA_AVX512F)
 60054          p.domain = DomainAVX
 60055          p.add(0, func(m *_Encoding, v []interface{}) {
 60056              m.evex(0b01, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 60057              m.emit(0x10)
 60058              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 60059          })
 60060      }
 60061      // VMOVUPD xmm, m128{k}{z}
 60062      if isEVEXXMM(v0) && isM128kz(v1) {
 60063          self.require(ISA_AVX512VL | ISA_AVX512F)
 60064          p.domain = DomainAVX
 60065          p.add(0, func(m *_Encoding, v []interface{}) {
 60066              m.evex(0b01, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 60067              m.emit(0x11)
 60068              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 60069          })
 60070      }
 60071      // VMOVUPD xmm, xmm{k}{z}
 60072      if isEVEXXMM(v0) && isXMMkz(v1) {
 60073          self.require(ISA_AVX512VL | ISA_AVX512F)
 60074          p.domain = DomainAVX
 60075          p.add(0, func(m *_Encoding, v []interface{}) {
 60076              m.emit(0x62)
 60077              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 60078              m.emit(0xfd)
 60079              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 60080              m.emit(0x10)
 60081              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 60082          })
 60083          p.add(0, func(m *_Encoding, v []interface{}) {
 60084              m.emit(0x62)
 60085              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 60086              m.emit(0xfd)
 60087              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 60088              m.emit(0x11)
 60089              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 60090          })
 60091      }
 60092      // VMOVUPD ymm, m256{k}{z}
 60093      if isEVEXYMM(v0) && isM256kz(v1) {
 60094          self.require(ISA_AVX512VL | ISA_AVX512F)
 60095          p.domain = DomainAVX
 60096          p.add(0, func(m *_Encoding, v []interface{}) {
 60097              m.evex(0b01, 0x85, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 60098              m.emit(0x11)
 60099              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 60100          })
 60101      }
 60102      // VMOVUPD ymm, ymm{k}{z}
 60103      if isEVEXYMM(v0) && isYMMkz(v1) {
 60104          self.require(ISA_AVX512VL | ISA_AVX512F)
 60105          p.domain = DomainAVX
 60106          p.add(0, func(m *_Encoding, v []interface{}) {
 60107              m.emit(0x62)
 60108              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 60109              m.emit(0xfd)
 60110              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 60111              m.emit(0x10)
 60112              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 60113          })
 60114          p.add(0, func(m *_Encoding, v []interface{}) {
 60115              m.emit(0x62)
 60116              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 60117              m.emit(0xfd)
 60118              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 60119              m.emit(0x11)
 60120              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 60121          })
 60122      }
 60123      // VMOVUPD m128, xmm{k}{z}
 60124      if isM128(v0) && isXMMkz(v1) {
 60125          self.require(ISA_AVX512VL | ISA_AVX512F)
 60126          p.domain = DomainAVX
 60127          p.add(0, func(m *_Encoding, v []interface{}) {
 60128              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 60129              m.emit(0x10)
 60130              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 60131          })
 60132      }
 60133      // VMOVUPD m256, ymm{k}{z}
 60134      if isM256(v0) && isYMMkz(v1) {
 60135          self.require(ISA_AVX512VL | ISA_AVX512F)
 60136          p.domain = DomainAVX
 60137          p.add(0, func(m *_Encoding, v []interface{}) {
 60138              m.evex(0b01, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 60139              m.emit(0x10)
 60140              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 60141          })
 60142      }
 60143      if p.len == 0 {
 60144          panic("invalid operands for VMOVUPD")
 60145      }
 60146      return p
 60147  }
 60148  
 60149  // VMOVUPS performs "Move Unaligned Packed Single-Precision Floating-Point Values".
 60150  //
 60151  // Mnemonic        : VMOVUPS
 60152  // Supported forms : (15 forms)
 60153  //
 60154  //    * VMOVUPS xmm, xmm           [AVX]
 60155  //    * VMOVUPS m128, xmm          [AVX]
 60156  //    * VMOVUPS ymm, ymm           [AVX]
 60157  //    * VMOVUPS m256, ymm          [AVX]
 60158  //    * VMOVUPS xmm, m128          [AVX]
 60159  //    * VMOVUPS ymm, m256          [AVX]
 60160  //    * VMOVUPS zmm, m512{k}{z}    [AVX512F]
 60161  //    * VMOVUPS zmm, zmm{k}{z}     [AVX512F]
 60162  //    * VMOVUPS m512, zmm{k}{z}    [AVX512F]
 60163  //    * VMOVUPS xmm, m128{k}{z}    [AVX512F,AVX512VL]
 60164  //    * VMOVUPS xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 60165  //    * VMOVUPS ymm, m256{k}{z}    [AVX512F,AVX512VL]
 60166  //    * VMOVUPS ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 60167  //    * VMOVUPS m128, xmm{k}{z}    [AVX512F,AVX512VL]
 60168  //    * VMOVUPS m256, ymm{k}{z}    [AVX512F,AVX512VL]
 60169  //
 60170  func (self *Program) VMOVUPS(v0 interface{}, v1 interface{}) *Instruction {
 60171      p := self.alloc("VMOVUPS", 2, Operands { v0, v1 })
 60172      // VMOVUPS xmm, xmm
 60173      if isXMM(v0) && isXMM(v1) {
 60174          self.require(ISA_AVX)
 60175          p.domain = DomainAVX
 60176          p.add(0, func(m *_Encoding, v []interface{}) {
 60177              m.vex2(0, hcode(v[1]), v[0], 0)
 60178              m.emit(0x10)
 60179              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 60180          })
 60181          p.add(0, func(m *_Encoding, v []interface{}) {
 60182              m.vex2(0, hcode(v[0]), v[1], 0)
 60183              m.emit(0x11)
 60184              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 60185          })
 60186      }
 60187      // VMOVUPS m128, xmm
 60188      if isM128(v0) && isXMM(v1) {
 60189          self.require(ISA_AVX)
 60190          p.domain = DomainAVX
 60191          p.add(0, func(m *_Encoding, v []interface{}) {
 60192              m.vex2(0, hcode(v[1]), addr(v[0]), 0)
 60193              m.emit(0x10)
 60194              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 60195          })
 60196      }
 60197      // VMOVUPS ymm, ymm
 60198      if isYMM(v0) && isYMM(v1) {
 60199          self.require(ISA_AVX)
 60200          p.domain = DomainAVX
 60201          p.add(0, func(m *_Encoding, v []interface{}) {
 60202              m.vex2(4, hcode(v[1]), v[0], 0)
 60203              m.emit(0x10)
 60204              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 60205          })
 60206          p.add(0, func(m *_Encoding, v []interface{}) {
 60207              m.vex2(4, hcode(v[0]), v[1], 0)
 60208              m.emit(0x11)
 60209              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 60210          })
 60211      }
 60212      // VMOVUPS m256, ymm
 60213      if isM256(v0) && isYMM(v1) {
 60214          self.require(ISA_AVX)
 60215          p.domain = DomainAVX
 60216          p.add(0, func(m *_Encoding, v []interface{}) {
 60217              m.vex2(4, hcode(v[1]), addr(v[0]), 0)
 60218              m.emit(0x10)
 60219              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 60220          })
 60221      }
 60222      // VMOVUPS xmm, m128
 60223      if isXMM(v0) && isM128(v1) {
 60224          self.require(ISA_AVX)
 60225          p.domain = DomainAVX
 60226          p.add(0, func(m *_Encoding, v []interface{}) {
 60227              m.vex2(0, hcode(v[0]), addr(v[1]), 0)
 60228              m.emit(0x11)
 60229              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 60230          })
 60231      }
 60232      // VMOVUPS ymm, m256
 60233      if isYMM(v0) && isM256(v1) {
 60234          self.require(ISA_AVX)
 60235          p.domain = DomainAVX
 60236          p.add(0, func(m *_Encoding, v []interface{}) {
 60237              m.vex2(4, hcode(v[0]), addr(v[1]), 0)
 60238              m.emit(0x11)
 60239              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 60240          })
 60241      }
 60242      // VMOVUPS zmm, m512{k}{z}
 60243      if isZMM(v0) && isM512kz(v1) {
 60244          self.require(ISA_AVX512F)
 60245          p.domain = DomainAVX
 60246          p.add(0, func(m *_Encoding, v []interface{}) {
 60247              m.evex(0b01, 0x04, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 60248              m.emit(0x11)
 60249              m.mrsd(lcode(v[0]), addr(v[1]), 64)
 60250          })
 60251      }
 60252      // VMOVUPS zmm, zmm{k}{z}
 60253      if isZMM(v0) && isZMMkz(v1) {
 60254          self.require(ISA_AVX512F)
 60255          p.domain = DomainAVX
 60256          p.add(0, func(m *_Encoding, v []interface{}) {
 60257              m.emit(0x62)
 60258              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 60259              m.emit(0x7c)
 60260              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 60261              m.emit(0x10)
 60262              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 60263          })
 60264          p.add(0, func(m *_Encoding, v []interface{}) {
 60265              m.emit(0x62)
 60266              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 60267              m.emit(0x7c)
 60268              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 60269              m.emit(0x11)
 60270              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 60271          })
 60272      }
 60273      // VMOVUPS m512, zmm{k}{z}
 60274      if isM512(v0) && isZMMkz(v1) {
 60275          self.require(ISA_AVX512F)
 60276          p.domain = DomainAVX
 60277          p.add(0, func(m *_Encoding, v []interface{}) {
 60278              m.evex(0b01, 0x04, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 60279              m.emit(0x10)
 60280              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 60281          })
 60282      }
 60283      // VMOVUPS xmm, m128{k}{z}
 60284      if isEVEXXMM(v0) && isM128kz(v1) {
 60285          self.require(ISA_AVX512VL | ISA_AVX512F)
 60286          p.domain = DomainAVX
 60287          p.add(0, func(m *_Encoding, v []interface{}) {
 60288              m.evex(0b01, 0x04, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 60289              m.emit(0x11)
 60290              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 60291          })
 60292      }
 60293      // VMOVUPS xmm, xmm{k}{z}
 60294      if isEVEXXMM(v0) && isXMMkz(v1) {
 60295          self.require(ISA_AVX512VL | ISA_AVX512F)
 60296          p.domain = DomainAVX
 60297          p.add(0, func(m *_Encoding, v []interface{}) {
 60298              m.emit(0x62)
 60299              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 60300              m.emit(0x7c)
 60301              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 60302              m.emit(0x10)
 60303              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 60304          })
 60305          p.add(0, func(m *_Encoding, v []interface{}) {
 60306              m.emit(0x62)
 60307              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 60308              m.emit(0x7c)
 60309              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 60310              m.emit(0x11)
 60311              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 60312          })
 60313      }
 60314      // VMOVUPS ymm, m256{k}{z}
 60315      if isEVEXYMM(v0) && isM256kz(v1) {
 60316          self.require(ISA_AVX512VL | ISA_AVX512F)
 60317          p.domain = DomainAVX
 60318          p.add(0, func(m *_Encoding, v []interface{}) {
 60319              m.evex(0b01, 0x04, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 60320              m.emit(0x11)
 60321              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 60322          })
 60323      }
 60324      // VMOVUPS ymm, ymm{k}{z}
 60325      if isEVEXYMM(v0) && isYMMkz(v1) {
 60326          self.require(ISA_AVX512VL | ISA_AVX512F)
 60327          p.domain = DomainAVX
 60328          p.add(0, func(m *_Encoding, v []interface{}) {
 60329              m.emit(0x62)
 60330              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 60331              m.emit(0x7c)
 60332              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 60333              m.emit(0x10)
 60334              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 60335          })
 60336          p.add(0, func(m *_Encoding, v []interface{}) {
 60337              m.emit(0x62)
 60338              m.emit(0xf1 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 60339              m.emit(0x7c)
 60340              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 60341              m.emit(0x11)
 60342              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 60343          })
 60344      }
 60345      // VMOVUPS m128, xmm{k}{z}
 60346      if isM128(v0) && isXMMkz(v1) {
 60347          self.require(ISA_AVX512VL | ISA_AVX512F)
 60348          p.domain = DomainAVX
 60349          p.add(0, func(m *_Encoding, v []interface{}) {
 60350              m.evex(0b01, 0x04, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 60351              m.emit(0x10)
 60352              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 60353          })
 60354      }
 60355      // VMOVUPS m256, ymm{k}{z}
 60356      if isM256(v0) && isYMMkz(v1) {
 60357          self.require(ISA_AVX512VL | ISA_AVX512F)
 60358          p.domain = DomainAVX
 60359          p.add(0, func(m *_Encoding, v []interface{}) {
 60360              m.evex(0b01, 0x04, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 60361              m.emit(0x10)
 60362              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 60363          })
 60364      }
 60365      if p.len == 0 {
 60366          panic("invalid operands for VMOVUPS")
 60367      }
 60368      return p
 60369  }
 60370  
 60371  // VMPSADBW performs "Compute Multiple Packed Sums of Absolute Difference".
 60372  //
 60373  // Mnemonic        : VMPSADBW
 60374  // Supported forms : (4 forms)
 60375  //
 60376  //    * VMPSADBW imm8, xmm, xmm, xmm     [AVX]
 60377  //    * VMPSADBW imm8, m128, xmm, xmm    [AVX]
 60378  //    * VMPSADBW imm8, ymm, ymm, ymm     [AVX2]
 60379  //    * VMPSADBW imm8, m256, ymm, ymm    [AVX2]
 60380  //
 60381  func (self *Program) VMPSADBW(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 60382      p := self.alloc("VMPSADBW", 4, Operands { v0, v1, v2, v3 })
 60383      // VMPSADBW imm8, xmm, xmm, xmm
 60384      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 60385          self.require(ISA_AVX)
 60386          p.domain = DomainAVX
 60387          p.add(0, func(m *_Encoding, v []interface{}) {
 60388              m.emit(0xc4)
 60389              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 60390              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 60391              m.emit(0x42)
 60392              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 60393              m.imm1(toImmAny(v[0]))
 60394          })
 60395      }
 60396      // VMPSADBW imm8, m128, xmm, xmm
 60397      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 60398          self.require(ISA_AVX)
 60399          p.domain = DomainAVX
 60400          p.add(0, func(m *_Encoding, v []interface{}) {
 60401              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 60402              m.emit(0x42)
 60403              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 60404              m.imm1(toImmAny(v[0]))
 60405          })
 60406      }
 60407      // VMPSADBW imm8, ymm, ymm, ymm
 60408      if isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 60409          self.require(ISA_AVX2)
 60410          p.domain = DomainAVX
 60411          p.add(0, func(m *_Encoding, v []interface{}) {
 60412              m.emit(0xc4)
 60413              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 60414              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 60415              m.emit(0x42)
 60416              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 60417              m.imm1(toImmAny(v[0]))
 60418          })
 60419      }
 60420      // VMPSADBW imm8, m256, ymm, ymm
 60421      if isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 60422          self.require(ISA_AVX2)
 60423          p.domain = DomainAVX
 60424          p.add(0, func(m *_Encoding, v []interface{}) {
 60425              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 60426              m.emit(0x42)
 60427              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 60428              m.imm1(toImmAny(v[0]))
 60429          })
 60430      }
 60431      if p.len == 0 {
 60432          panic("invalid operands for VMPSADBW")
 60433      }
 60434      return p
 60435  }
 60436  
 60437  // VMULPD performs "Multiply Packed Double-Precision Floating-Point Values".
 60438  //
 60439  // Mnemonic        : VMULPD
 60440  // Supported forms : (11 forms)
 60441  //
 60442  //    * VMULPD xmm, xmm, xmm                   [AVX]
 60443  //    * VMULPD m128, xmm, xmm                  [AVX]
 60444  //    * VMULPD ymm, ymm, ymm                   [AVX]
 60445  //    * VMULPD m256, ymm, ymm                  [AVX]
 60446  //    * VMULPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 60447  //    * VMULPD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 60448  //    * VMULPD zmm, zmm, zmm{k}{z}             [AVX512F]
 60449  //    * VMULPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 60450  //    * VMULPD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 60451  //    * VMULPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 60452  //    * VMULPD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 60453  //
 60454  func (self *Program) VMULPD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 60455      var p *Instruction
 60456      switch len(vv) {
 60457          case 0  : p = self.alloc("VMULPD", 3, Operands { v0, v1, v2 })
 60458          case 1  : p = self.alloc("VMULPD", 4, Operands { v0, v1, v2, vv[0] })
 60459          default : panic("instruction VMULPD takes 3 or 4 operands")
 60460      }
 60461      // VMULPD xmm, xmm, xmm
 60462      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 60463          self.require(ISA_AVX)
 60464          p.domain = DomainAVX
 60465          p.add(0, func(m *_Encoding, v []interface{}) {
 60466              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 60467              m.emit(0x59)
 60468              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60469          })
 60470      }
 60471      // VMULPD m128, xmm, xmm
 60472      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 60473          self.require(ISA_AVX)
 60474          p.domain = DomainAVX
 60475          p.add(0, func(m *_Encoding, v []interface{}) {
 60476              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 60477              m.emit(0x59)
 60478              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 60479          })
 60480      }
 60481      // VMULPD ymm, ymm, ymm
 60482      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 60483          self.require(ISA_AVX)
 60484          p.domain = DomainAVX
 60485          p.add(0, func(m *_Encoding, v []interface{}) {
 60486              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 60487              m.emit(0x59)
 60488              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60489          })
 60490      }
 60491      // VMULPD m256, ymm, ymm
 60492      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 60493          self.require(ISA_AVX)
 60494          p.domain = DomainAVX
 60495          p.add(0, func(m *_Encoding, v []interface{}) {
 60496              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 60497              m.emit(0x59)
 60498              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 60499          })
 60500      }
 60501      // VMULPD m512/m64bcst, zmm, zmm{k}{z}
 60502      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 60503          self.require(ISA_AVX512F)
 60504          p.domain = DomainAVX
 60505          p.add(0, func(m *_Encoding, v []interface{}) {
 60506              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 60507              m.emit(0x59)
 60508              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 60509          })
 60510      }
 60511      // VMULPD {er}, zmm, zmm, zmm{k}{z}
 60512      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 60513          self.require(ISA_AVX512F)
 60514          p.domain = DomainAVX
 60515          p.add(0, func(m *_Encoding, v []interface{}) {
 60516              m.emit(0x62)
 60517              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 60518              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 60519              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 60520              m.emit(0x59)
 60521              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 60522          })
 60523      }
 60524      // VMULPD zmm, zmm, zmm{k}{z}
 60525      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 60526          self.require(ISA_AVX512F)
 60527          p.domain = DomainAVX
 60528          p.add(0, func(m *_Encoding, v []interface{}) {
 60529              m.emit(0x62)
 60530              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 60531              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 60532              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 60533              m.emit(0x59)
 60534              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60535          })
 60536      }
 60537      // VMULPD m128/m64bcst, xmm, xmm{k}{z}
 60538      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 60539          self.require(ISA_AVX512VL | ISA_AVX512F)
 60540          p.domain = DomainAVX
 60541          p.add(0, func(m *_Encoding, v []interface{}) {
 60542              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 60543              m.emit(0x59)
 60544              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 60545          })
 60546      }
 60547      // VMULPD xmm, xmm, xmm{k}{z}
 60548      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 60549          self.require(ISA_AVX512VL | ISA_AVX512F)
 60550          p.domain = DomainAVX
 60551          p.add(0, func(m *_Encoding, v []interface{}) {
 60552              m.emit(0x62)
 60553              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 60554              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 60555              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 60556              m.emit(0x59)
 60557              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60558          })
 60559      }
 60560      // VMULPD m256/m64bcst, ymm, ymm{k}{z}
 60561      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 60562          self.require(ISA_AVX512VL | ISA_AVX512F)
 60563          p.domain = DomainAVX
 60564          p.add(0, func(m *_Encoding, v []interface{}) {
 60565              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 60566              m.emit(0x59)
 60567              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 60568          })
 60569      }
 60570      // VMULPD ymm, ymm, ymm{k}{z}
 60571      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 60572          self.require(ISA_AVX512VL | ISA_AVX512F)
 60573          p.domain = DomainAVX
 60574          p.add(0, func(m *_Encoding, v []interface{}) {
 60575              m.emit(0x62)
 60576              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 60577              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 60578              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 60579              m.emit(0x59)
 60580              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60581          })
 60582      }
 60583      if p.len == 0 {
 60584          panic("invalid operands for VMULPD")
 60585      }
 60586      return p
 60587  }
 60588  
 60589  // VMULPS performs "Multiply Packed Single-Precision Floating-Point Values".
 60590  //
 60591  // Mnemonic        : VMULPS
 60592  // Supported forms : (11 forms)
 60593  //
 60594  //    * VMULPS xmm, xmm, xmm                   [AVX]
 60595  //    * VMULPS m128, xmm, xmm                  [AVX]
 60596  //    * VMULPS ymm, ymm, ymm                   [AVX]
 60597  //    * VMULPS m256, ymm, ymm                  [AVX]
 60598  //    * VMULPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 60599  //    * VMULPS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 60600  //    * VMULPS zmm, zmm, zmm{k}{z}             [AVX512F]
 60601  //    * VMULPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 60602  //    * VMULPS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 60603  //    * VMULPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 60604  //    * VMULPS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 60605  //
 60606  func (self *Program) VMULPS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 60607      var p *Instruction
 60608      switch len(vv) {
 60609          case 0  : p = self.alloc("VMULPS", 3, Operands { v0, v1, v2 })
 60610          case 1  : p = self.alloc("VMULPS", 4, Operands { v0, v1, v2, vv[0] })
 60611          default : panic("instruction VMULPS takes 3 or 4 operands")
 60612      }
 60613      // VMULPS xmm, xmm, xmm
 60614      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 60615          self.require(ISA_AVX)
 60616          p.domain = DomainAVX
 60617          p.add(0, func(m *_Encoding, v []interface{}) {
 60618              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 60619              m.emit(0x59)
 60620              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60621          })
 60622      }
 60623      // VMULPS m128, xmm, xmm
 60624      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 60625          self.require(ISA_AVX)
 60626          p.domain = DomainAVX
 60627          p.add(0, func(m *_Encoding, v []interface{}) {
 60628              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 60629              m.emit(0x59)
 60630              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 60631          })
 60632      }
 60633      // VMULPS ymm, ymm, ymm
 60634      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 60635          self.require(ISA_AVX)
 60636          p.domain = DomainAVX
 60637          p.add(0, func(m *_Encoding, v []interface{}) {
 60638              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 60639              m.emit(0x59)
 60640              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60641          })
 60642      }
 60643      // VMULPS m256, ymm, ymm
 60644      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 60645          self.require(ISA_AVX)
 60646          p.domain = DomainAVX
 60647          p.add(0, func(m *_Encoding, v []interface{}) {
 60648              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 60649              m.emit(0x59)
 60650              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 60651          })
 60652      }
 60653      // VMULPS m512/m32bcst, zmm, zmm{k}{z}
 60654      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 60655          self.require(ISA_AVX512F)
 60656          p.domain = DomainAVX
 60657          p.add(0, func(m *_Encoding, v []interface{}) {
 60658              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 60659              m.emit(0x59)
 60660              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 60661          })
 60662      }
 60663      // VMULPS {er}, zmm, zmm, zmm{k}{z}
 60664      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 60665          self.require(ISA_AVX512F)
 60666          p.domain = DomainAVX
 60667          p.add(0, func(m *_Encoding, v []interface{}) {
 60668              m.emit(0x62)
 60669              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 60670              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 60671              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 60672              m.emit(0x59)
 60673              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 60674          })
 60675      }
 60676      // VMULPS zmm, zmm, zmm{k}{z}
 60677      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 60678          self.require(ISA_AVX512F)
 60679          p.domain = DomainAVX
 60680          p.add(0, func(m *_Encoding, v []interface{}) {
 60681              m.emit(0x62)
 60682              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 60683              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 60684              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 60685              m.emit(0x59)
 60686              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60687          })
 60688      }
 60689      // VMULPS m128/m32bcst, xmm, xmm{k}{z}
 60690      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 60691          self.require(ISA_AVX512VL | ISA_AVX512F)
 60692          p.domain = DomainAVX
 60693          p.add(0, func(m *_Encoding, v []interface{}) {
 60694              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 60695              m.emit(0x59)
 60696              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 60697          })
 60698      }
 60699      // VMULPS xmm, xmm, xmm{k}{z}
 60700      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 60701          self.require(ISA_AVX512VL | ISA_AVX512F)
 60702          p.domain = DomainAVX
 60703          p.add(0, func(m *_Encoding, v []interface{}) {
 60704              m.emit(0x62)
 60705              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 60706              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 60707              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 60708              m.emit(0x59)
 60709              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60710          })
 60711      }
 60712      // VMULPS m256/m32bcst, ymm, ymm{k}{z}
 60713      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 60714          self.require(ISA_AVX512VL | ISA_AVX512F)
 60715          p.domain = DomainAVX
 60716          p.add(0, func(m *_Encoding, v []interface{}) {
 60717              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 60718              m.emit(0x59)
 60719              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 60720          })
 60721      }
 60722      // VMULPS ymm, ymm, ymm{k}{z}
 60723      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 60724          self.require(ISA_AVX512VL | ISA_AVX512F)
 60725          p.domain = DomainAVX
 60726          p.add(0, func(m *_Encoding, v []interface{}) {
 60727              m.emit(0x62)
 60728              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 60729              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 60730              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 60731              m.emit(0x59)
 60732              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60733          })
 60734      }
 60735      if p.len == 0 {
 60736          panic("invalid operands for VMULPS")
 60737      }
 60738      return p
 60739  }
 60740  
 60741  // VMULSD performs "Multiply Scalar Double-Precision Floating-Point Values".
 60742  //
 60743  // Mnemonic        : VMULSD
 60744  // Supported forms : (5 forms)
 60745  //
 60746  //    * VMULSD xmm, xmm, xmm                [AVX]
 60747  //    * VMULSD m64, xmm, xmm                [AVX]
 60748  //    * VMULSD m64, xmm, xmm{k}{z}          [AVX512F]
 60749  //    * VMULSD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 60750  //    * VMULSD xmm, xmm, xmm{k}{z}          [AVX512F]
 60751  //
 60752  func (self *Program) VMULSD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 60753      var p *Instruction
 60754      switch len(vv) {
 60755          case 0  : p = self.alloc("VMULSD", 3, Operands { v0, v1, v2 })
 60756          case 1  : p = self.alloc("VMULSD", 4, Operands { v0, v1, v2, vv[0] })
 60757          default : panic("instruction VMULSD takes 3 or 4 operands")
 60758      }
 60759      // VMULSD xmm, xmm, xmm
 60760      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 60761          self.require(ISA_AVX)
 60762          p.domain = DomainAVX
 60763          p.add(0, func(m *_Encoding, v []interface{}) {
 60764              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 60765              m.emit(0x59)
 60766              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60767          })
 60768      }
 60769      // VMULSD m64, xmm, xmm
 60770      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 60771          self.require(ISA_AVX)
 60772          p.domain = DomainAVX
 60773          p.add(0, func(m *_Encoding, v []interface{}) {
 60774              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 60775              m.emit(0x59)
 60776              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 60777          })
 60778      }
 60779      // VMULSD m64, xmm, xmm{k}{z}
 60780      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 60781          self.require(ISA_AVX512F)
 60782          p.domain = DomainAVX
 60783          p.add(0, func(m *_Encoding, v []interface{}) {
 60784              m.evex(0b01, 0x87, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 60785              m.emit(0x59)
 60786              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 60787          })
 60788      }
 60789      // VMULSD {er}, xmm, xmm, xmm{k}{z}
 60790      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 60791          self.require(ISA_AVX512F)
 60792          p.domain = DomainAVX
 60793          p.add(0, func(m *_Encoding, v []interface{}) {
 60794              m.emit(0x62)
 60795              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 60796              m.emit(0xff ^ (hlcode(v[2]) << 3))
 60797              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 60798              m.emit(0x59)
 60799              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 60800          })
 60801      }
 60802      // VMULSD xmm, xmm, xmm{k}{z}
 60803      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 60804          self.require(ISA_AVX512F)
 60805          p.domain = DomainAVX
 60806          p.add(0, func(m *_Encoding, v []interface{}) {
 60807              m.emit(0x62)
 60808              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 60809              m.emit(0xff ^ (hlcode(v[1]) << 3))
 60810              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 60811              m.emit(0x59)
 60812              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60813          })
 60814      }
 60815      if p.len == 0 {
 60816          panic("invalid operands for VMULSD")
 60817      }
 60818      return p
 60819  }
 60820  
 60821  // VMULSS performs "Multiply Scalar Single-Precision Floating-Point Values".
 60822  //
 60823  // Mnemonic        : VMULSS
 60824  // Supported forms : (5 forms)
 60825  //
 60826  //    * VMULSS xmm, xmm, xmm                [AVX]
 60827  //    * VMULSS m32, xmm, xmm                [AVX]
 60828  //    * VMULSS m32, xmm, xmm{k}{z}          [AVX512F]
 60829  //    * VMULSS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 60830  //    * VMULSS xmm, xmm, xmm{k}{z}          [AVX512F]
 60831  //
 60832  func (self *Program) VMULSS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 60833      var p *Instruction
 60834      switch len(vv) {
 60835          case 0  : p = self.alloc("VMULSS", 3, Operands { v0, v1, v2 })
 60836          case 1  : p = self.alloc("VMULSS", 4, Operands { v0, v1, v2, vv[0] })
 60837          default : panic("instruction VMULSS takes 3 or 4 operands")
 60838      }
 60839      // VMULSS xmm, xmm, xmm
 60840      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 60841          self.require(ISA_AVX)
 60842          p.domain = DomainAVX
 60843          p.add(0, func(m *_Encoding, v []interface{}) {
 60844              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 60845              m.emit(0x59)
 60846              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60847          })
 60848      }
 60849      // VMULSS m32, xmm, xmm
 60850      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 60851          self.require(ISA_AVX)
 60852          p.domain = DomainAVX
 60853          p.add(0, func(m *_Encoding, v []interface{}) {
 60854              m.vex2(2, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 60855              m.emit(0x59)
 60856              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 60857          })
 60858      }
 60859      // VMULSS m32, xmm, xmm{k}{z}
 60860      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 60861          self.require(ISA_AVX512F)
 60862          p.domain = DomainAVX
 60863          p.add(0, func(m *_Encoding, v []interface{}) {
 60864              m.evex(0b01, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 60865              m.emit(0x59)
 60866              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 60867          })
 60868      }
 60869      // VMULSS {er}, xmm, xmm, xmm{k}{z}
 60870      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 60871          self.require(ISA_AVX512F)
 60872          p.domain = DomainAVX
 60873          p.add(0, func(m *_Encoding, v []interface{}) {
 60874              m.emit(0x62)
 60875              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 60876              m.emit(0x7e ^ (hlcode(v[2]) << 3))
 60877              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 60878              m.emit(0x59)
 60879              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 60880          })
 60881      }
 60882      // VMULSS xmm, xmm, xmm{k}{z}
 60883      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 60884          self.require(ISA_AVX512F)
 60885          p.domain = DomainAVX
 60886          p.add(0, func(m *_Encoding, v []interface{}) {
 60887              m.emit(0x62)
 60888              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 60889              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 60890              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 60891              m.emit(0x59)
 60892              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60893          })
 60894      }
 60895      if p.len == 0 {
 60896          panic("invalid operands for VMULSS")
 60897      }
 60898      return p
 60899  }
 60900  
 60901  // VORPD performs "Bitwise Logical OR of Double-Precision Floating-Point Values".
 60902  //
 60903  // Mnemonic        : VORPD
 60904  // Supported forms : (10 forms)
 60905  //
 60906  //    * VORPD xmm, xmm, xmm                   [AVX]
 60907  //    * VORPD m128, xmm, xmm                  [AVX]
 60908  //    * VORPD ymm, ymm, ymm                   [AVX]
 60909  //    * VORPD m256, ymm, ymm                  [AVX]
 60910  //    * VORPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512DQ]
 60911  //    * VORPD zmm, zmm, zmm{k}{z}             [AVX512DQ]
 60912  //    * VORPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 60913  //    * VORPD xmm, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 60914  //    * VORPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 60915  //    * VORPD ymm, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 60916  //
 60917  func (self *Program) VORPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 60918      p := self.alloc("VORPD", 3, Operands { v0, v1, v2 })
 60919      // VORPD xmm, xmm, xmm
 60920      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 60921          self.require(ISA_AVX)
 60922          p.domain = DomainAVX
 60923          p.add(0, func(m *_Encoding, v []interface{}) {
 60924              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 60925              m.emit(0x56)
 60926              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60927          })
 60928      }
 60929      // VORPD m128, xmm, xmm
 60930      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 60931          self.require(ISA_AVX)
 60932          p.domain = DomainAVX
 60933          p.add(0, func(m *_Encoding, v []interface{}) {
 60934              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 60935              m.emit(0x56)
 60936              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 60937          })
 60938      }
 60939      // VORPD ymm, ymm, ymm
 60940      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 60941          self.require(ISA_AVX)
 60942          p.domain = DomainAVX
 60943          p.add(0, func(m *_Encoding, v []interface{}) {
 60944              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 60945              m.emit(0x56)
 60946              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60947          })
 60948      }
 60949      // VORPD m256, ymm, ymm
 60950      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 60951          self.require(ISA_AVX)
 60952          p.domain = DomainAVX
 60953          p.add(0, func(m *_Encoding, v []interface{}) {
 60954              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 60955              m.emit(0x56)
 60956              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 60957          })
 60958      }
 60959      // VORPD m512/m64bcst, zmm, zmm{k}{z}
 60960      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 60961          self.require(ISA_AVX512DQ)
 60962          p.domain = DomainAVX
 60963          p.add(0, func(m *_Encoding, v []interface{}) {
 60964              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 60965              m.emit(0x56)
 60966              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 60967          })
 60968      }
 60969      // VORPD zmm, zmm, zmm{k}{z}
 60970      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 60971          self.require(ISA_AVX512DQ)
 60972          p.domain = DomainAVX
 60973          p.add(0, func(m *_Encoding, v []interface{}) {
 60974              m.emit(0x62)
 60975              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 60976              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 60977              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 60978              m.emit(0x56)
 60979              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 60980          })
 60981      }
 60982      // VORPD m128/m64bcst, xmm, xmm{k}{z}
 60983      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 60984          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 60985          p.domain = DomainAVX
 60986          p.add(0, func(m *_Encoding, v []interface{}) {
 60987              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 60988              m.emit(0x56)
 60989              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 60990          })
 60991      }
 60992      // VORPD xmm, xmm, xmm{k}{z}
 60993      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 60994          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 60995          p.domain = DomainAVX
 60996          p.add(0, func(m *_Encoding, v []interface{}) {
 60997              m.emit(0x62)
 60998              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 60999              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 61000              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 61001              m.emit(0x56)
 61002              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61003          })
 61004      }
 61005      // VORPD m256/m64bcst, ymm, ymm{k}{z}
 61006      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 61007          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 61008          p.domain = DomainAVX
 61009          p.add(0, func(m *_Encoding, v []interface{}) {
 61010              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 61011              m.emit(0x56)
 61012              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 61013          })
 61014      }
 61015      // VORPD ymm, ymm, ymm{k}{z}
 61016      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 61017          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 61018          p.domain = DomainAVX
 61019          p.add(0, func(m *_Encoding, v []interface{}) {
 61020              m.emit(0x62)
 61021              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 61022              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 61023              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 61024              m.emit(0x56)
 61025              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61026          })
 61027      }
 61028      if p.len == 0 {
 61029          panic("invalid operands for VORPD")
 61030      }
 61031      return p
 61032  }
 61033  
 61034  // VORPS performs "Bitwise Logical OR of Single-Precision Floating-Point Values".
 61035  //
 61036  // Mnemonic        : VORPS
 61037  // Supported forms : (10 forms)
 61038  //
 61039  //    * VORPS xmm, xmm, xmm                   [AVX]
 61040  //    * VORPS m128, xmm, xmm                  [AVX]
 61041  //    * VORPS ymm, ymm, ymm                   [AVX]
 61042  //    * VORPS m256, ymm, ymm                  [AVX]
 61043  //    * VORPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512DQ]
 61044  //    * VORPS zmm, zmm, zmm{k}{z}             [AVX512DQ]
 61045  //    * VORPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 61046  //    * VORPS xmm, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 61047  //    * VORPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 61048  //    * VORPS ymm, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 61049  //
 61050  func (self *Program) VORPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 61051      p := self.alloc("VORPS", 3, Operands { v0, v1, v2 })
 61052      // VORPS xmm, xmm, xmm
 61053      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 61054          self.require(ISA_AVX)
 61055          p.domain = DomainAVX
 61056          p.add(0, func(m *_Encoding, v []interface{}) {
 61057              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 61058              m.emit(0x56)
 61059              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61060          })
 61061      }
 61062      // VORPS m128, xmm, xmm
 61063      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 61064          self.require(ISA_AVX)
 61065          p.domain = DomainAVX
 61066          p.add(0, func(m *_Encoding, v []interface{}) {
 61067              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 61068              m.emit(0x56)
 61069              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 61070          })
 61071      }
 61072      // VORPS ymm, ymm, ymm
 61073      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 61074          self.require(ISA_AVX)
 61075          p.domain = DomainAVX
 61076          p.add(0, func(m *_Encoding, v []interface{}) {
 61077              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 61078              m.emit(0x56)
 61079              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61080          })
 61081      }
 61082      // VORPS m256, ymm, ymm
 61083      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 61084          self.require(ISA_AVX)
 61085          p.domain = DomainAVX
 61086          p.add(0, func(m *_Encoding, v []interface{}) {
 61087              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 61088              m.emit(0x56)
 61089              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 61090          })
 61091      }
 61092      // VORPS m512/m32bcst, zmm, zmm{k}{z}
 61093      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 61094          self.require(ISA_AVX512DQ)
 61095          p.domain = DomainAVX
 61096          p.add(0, func(m *_Encoding, v []interface{}) {
 61097              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 61098              m.emit(0x56)
 61099              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 61100          })
 61101      }
 61102      // VORPS zmm, zmm, zmm{k}{z}
 61103      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 61104          self.require(ISA_AVX512DQ)
 61105          p.domain = DomainAVX
 61106          p.add(0, func(m *_Encoding, v []interface{}) {
 61107              m.emit(0x62)
 61108              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 61109              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 61110              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 61111              m.emit(0x56)
 61112              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61113          })
 61114      }
 61115      // VORPS m128/m32bcst, xmm, xmm{k}{z}
 61116      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 61117          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 61118          p.domain = DomainAVX
 61119          p.add(0, func(m *_Encoding, v []interface{}) {
 61120              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 61121              m.emit(0x56)
 61122              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 61123          })
 61124      }
 61125      // VORPS xmm, xmm, xmm{k}{z}
 61126      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 61127          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 61128          p.domain = DomainAVX
 61129          p.add(0, func(m *_Encoding, v []interface{}) {
 61130              m.emit(0x62)
 61131              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 61132              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 61133              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 61134              m.emit(0x56)
 61135              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61136          })
 61137      }
 61138      // VORPS m256/m32bcst, ymm, ymm{k}{z}
 61139      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 61140          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 61141          p.domain = DomainAVX
 61142          p.add(0, func(m *_Encoding, v []interface{}) {
 61143              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 61144              m.emit(0x56)
 61145              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 61146          })
 61147      }
 61148      // VORPS ymm, ymm, ymm{k}{z}
 61149      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 61150          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 61151          p.domain = DomainAVX
 61152          p.add(0, func(m *_Encoding, v []interface{}) {
 61153              m.emit(0x62)
 61154              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 61155              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 61156              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 61157              m.emit(0x56)
 61158              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61159          })
 61160      }
 61161      if p.len == 0 {
 61162          panic("invalid operands for VORPS")
 61163      }
 61164      return p
 61165  }
 61166  
 61167  // VPABSB performs "Packed Absolute Value of Byte Integers".
 61168  //
 61169  // Mnemonic        : VPABSB
 61170  // Supported forms : (10 forms)
 61171  //
 61172  //    * VPABSB xmm, xmm           [AVX]
 61173  //    * VPABSB m128, xmm          [AVX]
 61174  //    * VPABSB ymm, ymm           [AVX2]
 61175  //    * VPABSB m256, ymm          [AVX2]
 61176  //    * VPABSB zmm, zmm{k}{z}     [AVX512BW]
 61177  //    * VPABSB m512, zmm{k}{z}    [AVX512BW]
 61178  //    * VPABSB xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 61179  //    * VPABSB ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 61180  //    * VPABSB m128, xmm{k}{z}    [AVX512BW,AVX512VL]
 61181  //    * VPABSB m256, ymm{k}{z}    [AVX512BW,AVX512VL]
 61182  //
 61183  func (self *Program) VPABSB(v0 interface{}, v1 interface{}) *Instruction {
 61184      p := self.alloc("VPABSB", 2, Operands { v0, v1 })
 61185      // VPABSB xmm, xmm
 61186      if isXMM(v0) && isXMM(v1) {
 61187          self.require(ISA_AVX)
 61188          p.domain = DomainAVX
 61189          p.add(0, func(m *_Encoding, v []interface{}) {
 61190              m.emit(0xc4)
 61191              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 61192              m.emit(0x79)
 61193              m.emit(0x1c)
 61194              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61195          })
 61196      }
 61197      // VPABSB m128, xmm
 61198      if isM128(v0) && isXMM(v1) {
 61199          self.require(ISA_AVX)
 61200          p.domain = DomainAVX
 61201          p.add(0, func(m *_Encoding, v []interface{}) {
 61202              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 61203              m.emit(0x1c)
 61204              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 61205          })
 61206      }
 61207      // VPABSB ymm, ymm
 61208      if isYMM(v0) && isYMM(v1) {
 61209          self.require(ISA_AVX2)
 61210          p.domain = DomainAVX
 61211          p.add(0, func(m *_Encoding, v []interface{}) {
 61212              m.emit(0xc4)
 61213              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 61214              m.emit(0x7d)
 61215              m.emit(0x1c)
 61216              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61217          })
 61218      }
 61219      // VPABSB m256, ymm
 61220      if isM256(v0) && isYMM(v1) {
 61221          self.require(ISA_AVX2)
 61222          p.domain = DomainAVX
 61223          p.add(0, func(m *_Encoding, v []interface{}) {
 61224              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 61225              m.emit(0x1c)
 61226              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 61227          })
 61228      }
 61229      // VPABSB zmm, zmm{k}{z}
 61230      if isZMM(v0) && isZMMkz(v1) {
 61231          self.require(ISA_AVX512BW)
 61232          p.domain = DomainAVX
 61233          p.add(0, func(m *_Encoding, v []interface{}) {
 61234              m.emit(0x62)
 61235              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61236              m.emit(0x7d)
 61237              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 61238              m.emit(0x1c)
 61239              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61240          })
 61241      }
 61242      // VPABSB m512, zmm{k}{z}
 61243      if isM512(v0) && isZMMkz(v1) {
 61244          self.require(ISA_AVX512BW)
 61245          p.domain = DomainAVX
 61246          p.add(0, func(m *_Encoding, v []interface{}) {
 61247              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 61248              m.emit(0x1c)
 61249              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 61250          })
 61251      }
 61252      // VPABSB xmm, xmm{k}{z}
 61253      if isEVEXXMM(v0) && isXMMkz(v1) {
 61254          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61255          p.domain = DomainAVX
 61256          p.add(0, func(m *_Encoding, v []interface{}) {
 61257              m.emit(0x62)
 61258              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61259              m.emit(0x7d)
 61260              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 61261              m.emit(0x1c)
 61262              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61263          })
 61264      }
 61265      // VPABSB ymm, ymm{k}{z}
 61266      if isEVEXYMM(v0) && isYMMkz(v1) {
 61267          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61268          p.domain = DomainAVX
 61269          p.add(0, func(m *_Encoding, v []interface{}) {
 61270              m.emit(0x62)
 61271              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61272              m.emit(0x7d)
 61273              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 61274              m.emit(0x1c)
 61275              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61276          })
 61277      }
 61278      // VPABSB m128, xmm{k}{z}
 61279      if isM128(v0) && isXMMkz(v1) {
 61280          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61281          p.domain = DomainAVX
 61282          p.add(0, func(m *_Encoding, v []interface{}) {
 61283              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 61284              m.emit(0x1c)
 61285              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 61286          })
 61287      }
 61288      // VPABSB m256, ymm{k}{z}
 61289      if isM256(v0) && isYMMkz(v1) {
 61290          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61291          p.domain = DomainAVX
 61292          p.add(0, func(m *_Encoding, v []interface{}) {
 61293              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 61294              m.emit(0x1c)
 61295              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 61296          })
 61297      }
 61298      if p.len == 0 {
 61299          panic("invalid operands for VPABSB")
 61300      }
 61301      return p
 61302  }
 61303  
 61304  // VPABSD performs "Packed Absolute Value of Doubleword Integers".
 61305  //
 61306  // Mnemonic        : VPABSD
 61307  // Supported forms : (10 forms)
 61308  //
 61309  //    * VPABSD xmm, xmm                   [AVX]
 61310  //    * VPABSD m128, xmm                  [AVX]
 61311  //    * VPABSD ymm, ymm                   [AVX2]
 61312  //    * VPABSD m256, ymm                  [AVX2]
 61313  //    * VPABSD m512/m32bcst, zmm{k}{z}    [AVX512F]
 61314  //    * VPABSD zmm, zmm{k}{z}             [AVX512F]
 61315  //    * VPABSD m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 61316  //    * VPABSD m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 61317  //    * VPABSD xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 61318  //    * VPABSD ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 61319  //
 61320  func (self *Program) VPABSD(v0 interface{}, v1 interface{}) *Instruction {
 61321      p := self.alloc("VPABSD", 2, Operands { v0, v1 })
 61322      // VPABSD xmm, xmm
 61323      if isXMM(v0) && isXMM(v1) {
 61324          self.require(ISA_AVX)
 61325          p.domain = DomainAVX
 61326          p.add(0, func(m *_Encoding, v []interface{}) {
 61327              m.emit(0xc4)
 61328              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 61329              m.emit(0x79)
 61330              m.emit(0x1e)
 61331              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61332          })
 61333      }
 61334      // VPABSD m128, xmm
 61335      if isM128(v0) && isXMM(v1) {
 61336          self.require(ISA_AVX)
 61337          p.domain = DomainAVX
 61338          p.add(0, func(m *_Encoding, v []interface{}) {
 61339              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 61340              m.emit(0x1e)
 61341              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 61342          })
 61343      }
 61344      // VPABSD ymm, ymm
 61345      if isYMM(v0) && isYMM(v1) {
 61346          self.require(ISA_AVX2)
 61347          p.domain = DomainAVX
 61348          p.add(0, func(m *_Encoding, v []interface{}) {
 61349              m.emit(0xc4)
 61350              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 61351              m.emit(0x7d)
 61352              m.emit(0x1e)
 61353              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61354          })
 61355      }
 61356      // VPABSD m256, ymm
 61357      if isM256(v0) && isYMM(v1) {
 61358          self.require(ISA_AVX2)
 61359          p.domain = DomainAVX
 61360          p.add(0, func(m *_Encoding, v []interface{}) {
 61361              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 61362              m.emit(0x1e)
 61363              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 61364          })
 61365      }
 61366      // VPABSD m512/m32bcst, zmm{k}{z}
 61367      if isM512M32bcst(v0) && isZMMkz(v1) {
 61368          self.require(ISA_AVX512F)
 61369          p.domain = DomainAVX
 61370          p.add(0, func(m *_Encoding, v []interface{}) {
 61371              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 61372              m.emit(0x1e)
 61373              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 61374          })
 61375      }
 61376      // VPABSD zmm, zmm{k}{z}
 61377      if isZMM(v0) && isZMMkz(v1) {
 61378          self.require(ISA_AVX512F)
 61379          p.domain = DomainAVX
 61380          p.add(0, func(m *_Encoding, v []interface{}) {
 61381              m.emit(0x62)
 61382              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61383              m.emit(0x7d)
 61384              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 61385              m.emit(0x1e)
 61386              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61387          })
 61388      }
 61389      // VPABSD m128/m32bcst, xmm{k}{z}
 61390      if isM128M32bcst(v0) && isXMMkz(v1) {
 61391          self.require(ISA_AVX512VL | ISA_AVX512F)
 61392          p.domain = DomainAVX
 61393          p.add(0, func(m *_Encoding, v []interface{}) {
 61394              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 61395              m.emit(0x1e)
 61396              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 61397          })
 61398      }
 61399      // VPABSD m256/m32bcst, ymm{k}{z}
 61400      if isM256M32bcst(v0) && isYMMkz(v1) {
 61401          self.require(ISA_AVX512VL | ISA_AVX512F)
 61402          p.domain = DomainAVX
 61403          p.add(0, func(m *_Encoding, v []interface{}) {
 61404              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 61405              m.emit(0x1e)
 61406              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 61407          })
 61408      }
 61409      // VPABSD xmm, xmm{k}{z}
 61410      if isEVEXXMM(v0) && isXMMkz(v1) {
 61411          self.require(ISA_AVX512VL | ISA_AVX512F)
 61412          p.domain = DomainAVX
 61413          p.add(0, func(m *_Encoding, v []interface{}) {
 61414              m.emit(0x62)
 61415              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61416              m.emit(0x7d)
 61417              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 61418              m.emit(0x1e)
 61419              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61420          })
 61421      }
 61422      // VPABSD ymm, ymm{k}{z}
 61423      if isEVEXYMM(v0) && isYMMkz(v1) {
 61424          self.require(ISA_AVX512VL | ISA_AVX512F)
 61425          p.domain = DomainAVX
 61426          p.add(0, func(m *_Encoding, v []interface{}) {
 61427              m.emit(0x62)
 61428              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61429              m.emit(0x7d)
 61430              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 61431              m.emit(0x1e)
 61432              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61433          })
 61434      }
 61435      if p.len == 0 {
 61436          panic("invalid operands for VPABSD")
 61437      }
 61438      return p
 61439  }
 61440  
 61441  // VPABSQ performs "Packed Absolute Value of Quadword Integers".
 61442  //
 61443  // Mnemonic        : VPABSQ
 61444  // Supported forms : (6 forms)
 61445  //
 61446  //    * VPABSQ m512/m64bcst, zmm{k}{z}    [AVX512F]
 61447  //    * VPABSQ zmm, zmm{k}{z}             [AVX512F]
 61448  //    * VPABSQ m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 61449  //    * VPABSQ m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 61450  //    * VPABSQ xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 61451  //    * VPABSQ ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 61452  //
 61453  func (self *Program) VPABSQ(v0 interface{}, v1 interface{}) *Instruction {
 61454      p := self.alloc("VPABSQ", 2, Operands { v0, v1 })
 61455      // VPABSQ m512/m64bcst, zmm{k}{z}
 61456      if isM512M64bcst(v0) && isZMMkz(v1) {
 61457          self.require(ISA_AVX512F)
 61458          p.domain = DomainAVX
 61459          p.add(0, func(m *_Encoding, v []interface{}) {
 61460              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 61461              m.emit(0x1f)
 61462              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 61463          })
 61464      }
 61465      // VPABSQ zmm, zmm{k}{z}
 61466      if isZMM(v0) && isZMMkz(v1) {
 61467          self.require(ISA_AVX512F)
 61468          p.domain = DomainAVX
 61469          p.add(0, func(m *_Encoding, v []interface{}) {
 61470              m.emit(0x62)
 61471              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61472              m.emit(0xfd)
 61473              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 61474              m.emit(0x1f)
 61475              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61476          })
 61477      }
 61478      // VPABSQ m128/m64bcst, xmm{k}{z}
 61479      if isM128M64bcst(v0) && isXMMkz(v1) {
 61480          self.require(ISA_AVX512VL | ISA_AVX512F)
 61481          p.domain = DomainAVX
 61482          p.add(0, func(m *_Encoding, v []interface{}) {
 61483              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 61484              m.emit(0x1f)
 61485              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 61486          })
 61487      }
 61488      // VPABSQ m256/m64bcst, ymm{k}{z}
 61489      if isM256M64bcst(v0) && isYMMkz(v1) {
 61490          self.require(ISA_AVX512VL | ISA_AVX512F)
 61491          p.domain = DomainAVX
 61492          p.add(0, func(m *_Encoding, v []interface{}) {
 61493              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 61494              m.emit(0x1f)
 61495              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 61496          })
 61497      }
 61498      // VPABSQ xmm, xmm{k}{z}
 61499      if isEVEXXMM(v0) && isXMMkz(v1) {
 61500          self.require(ISA_AVX512VL | ISA_AVX512F)
 61501          p.domain = DomainAVX
 61502          p.add(0, func(m *_Encoding, v []interface{}) {
 61503              m.emit(0x62)
 61504              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61505              m.emit(0xfd)
 61506              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 61507              m.emit(0x1f)
 61508              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61509          })
 61510      }
 61511      // VPABSQ ymm, ymm{k}{z}
 61512      if isEVEXYMM(v0) && isYMMkz(v1) {
 61513          self.require(ISA_AVX512VL | ISA_AVX512F)
 61514          p.domain = DomainAVX
 61515          p.add(0, func(m *_Encoding, v []interface{}) {
 61516              m.emit(0x62)
 61517              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61518              m.emit(0xfd)
 61519              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 61520              m.emit(0x1f)
 61521              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61522          })
 61523      }
 61524      if p.len == 0 {
 61525          panic("invalid operands for VPABSQ")
 61526      }
 61527      return p
 61528  }
 61529  
 61530  // VPABSW performs "Packed Absolute Value of Word Integers".
 61531  //
 61532  // Mnemonic        : VPABSW
 61533  // Supported forms : (10 forms)
 61534  //
 61535  //    * VPABSW xmm, xmm           [AVX]
 61536  //    * VPABSW m128, xmm          [AVX]
 61537  //    * VPABSW ymm, ymm           [AVX2]
 61538  //    * VPABSW m256, ymm          [AVX2]
 61539  //    * VPABSW zmm, zmm{k}{z}     [AVX512BW]
 61540  //    * VPABSW m512, zmm{k}{z}    [AVX512BW]
 61541  //    * VPABSW xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 61542  //    * VPABSW ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 61543  //    * VPABSW m128, xmm{k}{z}    [AVX512BW,AVX512VL]
 61544  //    * VPABSW m256, ymm{k}{z}    [AVX512BW,AVX512VL]
 61545  //
 61546  func (self *Program) VPABSW(v0 interface{}, v1 interface{}) *Instruction {
 61547      p := self.alloc("VPABSW", 2, Operands { v0, v1 })
 61548      // VPABSW xmm, xmm
 61549      if isXMM(v0) && isXMM(v1) {
 61550          self.require(ISA_AVX)
 61551          p.domain = DomainAVX
 61552          p.add(0, func(m *_Encoding, v []interface{}) {
 61553              m.emit(0xc4)
 61554              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 61555              m.emit(0x79)
 61556              m.emit(0x1d)
 61557              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61558          })
 61559      }
 61560      // VPABSW m128, xmm
 61561      if isM128(v0) && isXMM(v1) {
 61562          self.require(ISA_AVX)
 61563          p.domain = DomainAVX
 61564          p.add(0, func(m *_Encoding, v []interface{}) {
 61565              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 61566              m.emit(0x1d)
 61567              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 61568          })
 61569      }
 61570      // VPABSW ymm, ymm
 61571      if isYMM(v0) && isYMM(v1) {
 61572          self.require(ISA_AVX2)
 61573          p.domain = DomainAVX
 61574          p.add(0, func(m *_Encoding, v []interface{}) {
 61575              m.emit(0xc4)
 61576              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 61577              m.emit(0x7d)
 61578              m.emit(0x1d)
 61579              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61580          })
 61581      }
 61582      // VPABSW m256, ymm
 61583      if isM256(v0) && isYMM(v1) {
 61584          self.require(ISA_AVX2)
 61585          p.domain = DomainAVX
 61586          p.add(0, func(m *_Encoding, v []interface{}) {
 61587              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 61588              m.emit(0x1d)
 61589              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 61590          })
 61591      }
 61592      // VPABSW zmm, zmm{k}{z}
 61593      if isZMM(v0) && isZMMkz(v1) {
 61594          self.require(ISA_AVX512BW)
 61595          p.domain = DomainAVX
 61596          p.add(0, func(m *_Encoding, v []interface{}) {
 61597              m.emit(0x62)
 61598              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61599              m.emit(0x7d)
 61600              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 61601              m.emit(0x1d)
 61602              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61603          })
 61604      }
 61605      // VPABSW m512, zmm{k}{z}
 61606      if isM512(v0) && isZMMkz(v1) {
 61607          self.require(ISA_AVX512BW)
 61608          p.domain = DomainAVX
 61609          p.add(0, func(m *_Encoding, v []interface{}) {
 61610              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 61611              m.emit(0x1d)
 61612              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 61613          })
 61614      }
 61615      // VPABSW xmm, xmm{k}{z}
 61616      if isEVEXXMM(v0) && isXMMkz(v1) {
 61617          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61618          p.domain = DomainAVX
 61619          p.add(0, func(m *_Encoding, v []interface{}) {
 61620              m.emit(0x62)
 61621              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61622              m.emit(0x7d)
 61623              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 61624              m.emit(0x1d)
 61625              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61626          })
 61627      }
 61628      // VPABSW ymm, ymm{k}{z}
 61629      if isEVEXYMM(v0) && isYMMkz(v1) {
 61630          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61631          p.domain = DomainAVX
 61632          p.add(0, func(m *_Encoding, v []interface{}) {
 61633              m.emit(0x62)
 61634              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 61635              m.emit(0x7d)
 61636              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 61637              m.emit(0x1d)
 61638              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 61639          })
 61640      }
 61641      // VPABSW m128, xmm{k}{z}
 61642      if isM128(v0) && isXMMkz(v1) {
 61643          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61644          p.domain = DomainAVX
 61645          p.add(0, func(m *_Encoding, v []interface{}) {
 61646              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 61647              m.emit(0x1d)
 61648              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 61649          })
 61650      }
 61651      // VPABSW m256, ymm{k}{z}
 61652      if isM256(v0) && isYMMkz(v1) {
 61653          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61654          p.domain = DomainAVX
 61655          p.add(0, func(m *_Encoding, v []interface{}) {
 61656              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 61657              m.emit(0x1d)
 61658              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 61659          })
 61660      }
 61661      if p.len == 0 {
 61662          panic("invalid operands for VPABSW")
 61663      }
 61664      return p
 61665  }
 61666  
 61667  // VPACKSSDW performs "Pack Doublewords into Words with Signed Saturation".
 61668  //
 61669  // Mnemonic        : VPACKSSDW
 61670  // Supported forms : (10 forms)
 61671  //
 61672  //    * VPACKSSDW xmm, xmm, xmm                   [AVX]
 61673  //    * VPACKSSDW m128, xmm, xmm                  [AVX]
 61674  //    * VPACKSSDW ymm, ymm, ymm                   [AVX2]
 61675  //    * VPACKSSDW m256, ymm, ymm                  [AVX2]
 61676  //    * VPACKSSDW m512/m32bcst, zmm, zmm{k}{z}    [AVX512BW]
 61677  //    * VPACKSSDW zmm, zmm, zmm{k}{z}             [AVX512BW]
 61678  //    * VPACKSSDW m128/m32bcst, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 61679  //    * VPACKSSDW xmm, xmm, xmm{k}{z}             [AVX512BW,AVX512VL]
 61680  //    * VPACKSSDW m256/m32bcst, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 61681  //    * VPACKSSDW ymm, ymm, ymm{k}{z}             [AVX512BW,AVX512VL]
 61682  //
 61683  func (self *Program) VPACKSSDW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 61684      p := self.alloc("VPACKSSDW", 3, Operands { v0, v1, v2 })
 61685      // VPACKSSDW xmm, xmm, xmm
 61686      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 61687          self.require(ISA_AVX)
 61688          p.domain = DomainAVX
 61689          p.add(0, func(m *_Encoding, v []interface{}) {
 61690              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 61691              m.emit(0x6b)
 61692              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61693          })
 61694      }
 61695      // VPACKSSDW m128, xmm, xmm
 61696      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 61697          self.require(ISA_AVX)
 61698          p.domain = DomainAVX
 61699          p.add(0, func(m *_Encoding, v []interface{}) {
 61700              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 61701              m.emit(0x6b)
 61702              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 61703          })
 61704      }
 61705      // VPACKSSDW ymm, ymm, ymm
 61706      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 61707          self.require(ISA_AVX2)
 61708          p.domain = DomainAVX
 61709          p.add(0, func(m *_Encoding, v []interface{}) {
 61710              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 61711              m.emit(0x6b)
 61712              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61713          })
 61714      }
 61715      // VPACKSSDW m256, ymm, ymm
 61716      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 61717          self.require(ISA_AVX2)
 61718          p.domain = DomainAVX
 61719          p.add(0, func(m *_Encoding, v []interface{}) {
 61720              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 61721              m.emit(0x6b)
 61722              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 61723          })
 61724      }
 61725      // VPACKSSDW m512/m32bcst, zmm, zmm{k}{z}
 61726      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 61727          self.require(ISA_AVX512BW)
 61728          p.domain = DomainAVX
 61729          p.add(0, func(m *_Encoding, v []interface{}) {
 61730              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 61731              m.emit(0x6b)
 61732              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 61733          })
 61734      }
 61735      // VPACKSSDW zmm, zmm, zmm{k}{z}
 61736      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 61737          self.require(ISA_AVX512BW)
 61738          p.domain = DomainAVX
 61739          p.add(0, func(m *_Encoding, v []interface{}) {
 61740              m.emit(0x62)
 61741              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 61742              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 61743              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 61744              m.emit(0x6b)
 61745              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61746          })
 61747      }
 61748      // VPACKSSDW m128/m32bcst, xmm, xmm{k}{z}
 61749      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 61750          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61751          p.domain = DomainAVX
 61752          p.add(0, func(m *_Encoding, v []interface{}) {
 61753              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 61754              m.emit(0x6b)
 61755              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 61756          })
 61757      }
 61758      // VPACKSSDW xmm, xmm, xmm{k}{z}
 61759      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 61760          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61761          p.domain = DomainAVX
 61762          p.add(0, func(m *_Encoding, v []interface{}) {
 61763              m.emit(0x62)
 61764              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 61765              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 61766              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 61767              m.emit(0x6b)
 61768              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61769          })
 61770      }
 61771      // VPACKSSDW m256/m32bcst, ymm, ymm{k}{z}
 61772      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 61773          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61774          p.domain = DomainAVX
 61775          p.add(0, func(m *_Encoding, v []interface{}) {
 61776              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 61777              m.emit(0x6b)
 61778              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 61779          })
 61780      }
 61781      // VPACKSSDW ymm, ymm, ymm{k}{z}
 61782      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 61783          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61784          p.domain = DomainAVX
 61785          p.add(0, func(m *_Encoding, v []interface{}) {
 61786              m.emit(0x62)
 61787              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 61788              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 61789              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 61790              m.emit(0x6b)
 61791              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61792          })
 61793      }
 61794      if p.len == 0 {
 61795          panic("invalid operands for VPACKSSDW")
 61796      }
 61797      return p
 61798  }
 61799  
 61800  // VPACKSSWB performs "Pack Words into Bytes with Signed Saturation".
 61801  //
 61802  // Mnemonic        : VPACKSSWB
 61803  // Supported forms : (10 forms)
 61804  //
 61805  //    * VPACKSSWB xmm, xmm, xmm           [AVX]
 61806  //    * VPACKSSWB m128, xmm, xmm          [AVX]
 61807  //    * VPACKSSWB ymm, ymm, ymm           [AVX2]
 61808  //    * VPACKSSWB m256, ymm, ymm          [AVX2]
 61809  //    * VPACKSSWB zmm, zmm, zmm{k}{z}     [AVX512BW]
 61810  //    * VPACKSSWB m512, zmm, zmm{k}{z}    [AVX512BW]
 61811  //    * VPACKSSWB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 61812  //    * VPACKSSWB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 61813  //    * VPACKSSWB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 61814  //    * VPACKSSWB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 61815  //
 61816  func (self *Program) VPACKSSWB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 61817      p := self.alloc("VPACKSSWB", 3, Operands { v0, v1, v2 })
 61818      // VPACKSSWB xmm, xmm, xmm
 61819      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 61820          self.require(ISA_AVX)
 61821          p.domain = DomainAVX
 61822          p.add(0, func(m *_Encoding, v []interface{}) {
 61823              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 61824              m.emit(0x63)
 61825              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61826          })
 61827      }
 61828      // VPACKSSWB m128, xmm, xmm
 61829      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 61830          self.require(ISA_AVX)
 61831          p.domain = DomainAVX
 61832          p.add(0, func(m *_Encoding, v []interface{}) {
 61833              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 61834              m.emit(0x63)
 61835              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 61836          })
 61837      }
 61838      // VPACKSSWB ymm, ymm, ymm
 61839      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 61840          self.require(ISA_AVX2)
 61841          p.domain = DomainAVX
 61842          p.add(0, func(m *_Encoding, v []interface{}) {
 61843              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 61844              m.emit(0x63)
 61845              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61846          })
 61847      }
 61848      // VPACKSSWB m256, ymm, ymm
 61849      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 61850          self.require(ISA_AVX2)
 61851          p.domain = DomainAVX
 61852          p.add(0, func(m *_Encoding, v []interface{}) {
 61853              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 61854              m.emit(0x63)
 61855              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 61856          })
 61857      }
 61858      // VPACKSSWB zmm, zmm, zmm{k}{z}
 61859      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 61860          self.require(ISA_AVX512BW)
 61861          p.domain = DomainAVX
 61862          p.add(0, func(m *_Encoding, v []interface{}) {
 61863              m.emit(0x62)
 61864              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 61865              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 61866              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 61867              m.emit(0x63)
 61868              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61869          })
 61870      }
 61871      // VPACKSSWB m512, zmm, zmm{k}{z}
 61872      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 61873          self.require(ISA_AVX512BW)
 61874          p.domain = DomainAVX
 61875          p.add(0, func(m *_Encoding, v []interface{}) {
 61876              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 61877              m.emit(0x63)
 61878              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 61879          })
 61880      }
 61881      // VPACKSSWB xmm, xmm, xmm{k}{z}
 61882      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 61883          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61884          p.domain = DomainAVX
 61885          p.add(0, func(m *_Encoding, v []interface{}) {
 61886              m.emit(0x62)
 61887              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 61888              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 61889              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 61890              m.emit(0x63)
 61891              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61892          })
 61893      }
 61894      // VPACKSSWB m128, xmm, xmm{k}{z}
 61895      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 61896          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61897          p.domain = DomainAVX
 61898          p.add(0, func(m *_Encoding, v []interface{}) {
 61899              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 61900              m.emit(0x63)
 61901              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 61902          })
 61903      }
 61904      // VPACKSSWB ymm, ymm, ymm{k}{z}
 61905      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 61906          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61907          p.domain = DomainAVX
 61908          p.add(0, func(m *_Encoding, v []interface{}) {
 61909              m.emit(0x62)
 61910              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 61911              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 61912              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 61913              m.emit(0x63)
 61914              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61915          })
 61916      }
 61917      // VPACKSSWB m256, ymm, ymm{k}{z}
 61918      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 61919          self.require(ISA_AVX512VL | ISA_AVX512BW)
 61920          p.domain = DomainAVX
 61921          p.add(0, func(m *_Encoding, v []interface{}) {
 61922              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 61923              m.emit(0x63)
 61924              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 61925          })
 61926      }
 61927      if p.len == 0 {
 61928          panic("invalid operands for VPACKSSWB")
 61929      }
 61930      return p
 61931  }
 61932  
 61933  // VPACKUSDW performs "Pack Doublewords into Words with Unsigned Saturation".
 61934  //
 61935  // Mnemonic        : VPACKUSDW
 61936  // Supported forms : (10 forms)
 61937  //
 61938  //    * VPACKUSDW xmm, xmm, xmm                   [AVX]
 61939  //    * VPACKUSDW m128, xmm, xmm                  [AVX]
 61940  //    * VPACKUSDW ymm, ymm, ymm                   [AVX2]
 61941  //    * VPACKUSDW m256, ymm, ymm                  [AVX2]
 61942  //    * VPACKUSDW m512/m32bcst, zmm, zmm{k}{z}    [AVX512BW]
 61943  //    * VPACKUSDW zmm, zmm, zmm{k}{z}             [AVX512BW]
 61944  //    * VPACKUSDW m128/m32bcst, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 61945  //    * VPACKUSDW xmm, xmm, xmm{k}{z}             [AVX512BW,AVX512VL]
 61946  //    * VPACKUSDW m256/m32bcst, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 61947  //    * VPACKUSDW ymm, ymm, ymm{k}{z}             [AVX512BW,AVX512VL]
 61948  //
 61949  func (self *Program) VPACKUSDW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 61950      p := self.alloc("VPACKUSDW", 3, Operands { v0, v1, v2 })
 61951      // VPACKUSDW xmm, xmm, xmm
 61952      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 61953          self.require(ISA_AVX)
 61954          p.domain = DomainAVX
 61955          p.add(0, func(m *_Encoding, v []interface{}) {
 61956              m.emit(0xc4)
 61957              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 61958              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 61959              m.emit(0x2b)
 61960              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61961          })
 61962      }
 61963      // VPACKUSDW m128, xmm, xmm
 61964      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 61965          self.require(ISA_AVX)
 61966          p.domain = DomainAVX
 61967          p.add(0, func(m *_Encoding, v []interface{}) {
 61968              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 61969              m.emit(0x2b)
 61970              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 61971          })
 61972      }
 61973      // VPACKUSDW ymm, ymm, ymm
 61974      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 61975          self.require(ISA_AVX2)
 61976          p.domain = DomainAVX
 61977          p.add(0, func(m *_Encoding, v []interface{}) {
 61978              m.emit(0xc4)
 61979              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 61980              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 61981              m.emit(0x2b)
 61982              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 61983          })
 61984      }
 61985      // VPACKUSDW m256, ymm, ymm
 61986      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 61987          self.require(ISA_AVX2)
 61988          p.domain = DomainAVX
 61989          p.add(0, func(m *_Encoding, v []interface{}) {
 61990              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 61991              m.emit(0x2b)
 61992              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 61993          })
 61994      }
 61995      // VPACKUSDW m512/m32bcst, zmm, zmm{k}{z}
 61996      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 61997          self.require(ISA_AVX512BW)
 61998          p.domain = DomainAVX
 61999          p.add(0, func(m *_Encoding, v []interface{}) {
 62000              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 62001              m.emit(0x2b)
 62002              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 62003          })
 62004      }
 62005      // VPACKUSDW zmm, zmm, zmm{k}{z}
 62006      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 62007          self.require(ISA_AVX512BW)
 62008          p.domain = DomainAVX
 62009          p.add(0, func(m *_Encoding, v []interface{}) {
 62010              m.emit(0x62)
 62011              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62012              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62013              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 62014              m.emit(0x2b)
 62015              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62016          })
 62017      }
 62018      // VPACKUSDW m128/m32bcst, xmm, xmm{k}{z}
 62019      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62020          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62021          p.domain = DomainAVX
 62022          p.add(0, func(m *_Encoding, v []interface{}) {
 62023              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 62024              m.emit(0x2b)
 62025              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 62026          })
 62027      }
 62028      // VPACKUSDW xmm, xmm, xmm{k}{z}
 62029      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62030          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62031          p.domain = DomainAVX
 62032          p.add(0, func(m *_Encoding, v []interface{}) {
 62033              m.emit(0x62)
 62034              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62035              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62036              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 62037              m.emit(0x2b)
 62038              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62039          })
 62040      }
 62041      // VPACKUSDW m256/m32bcst, ymm, ymm{k}{z}
 62042      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62043          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62044          p.domain = DomainAVX
 62045          p.add(0, func(m *_Encoding, v []interface{}) {
 62046              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 62047              m.emit(0x2b)
 62048              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 62049          })
 62050      }
 62051      // VPACKUSDW ymm, ymm, ymm{k}{z}
 62052      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62053          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62054          p.domain = DomainAVX
 62055          p.add(0, func(m *_Encoding, v []interface{}) {
 62056              m.emit(0x62)
 62057              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62058              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62059              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 62060              m.emit(0x2b)
 62061              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62062          })
 62063      }
 62064      if p.len == 0 {
 62065          panic("invalid operands for VPACKUSDW")
 62066      }
 62067      return p
 62068  }
 62069  
 62070  // VPACKUSWB performs "Pack Words into Bytes with Unsigned Saturation".
 62071  //
 62072  // Mnemonic        : VPACKUSWB
 62073  // Supported forms : (10 forms)
 62074  //
 62075  //    * VPACKUSWB xmm, xmm, xmm           [AVX]
 62076  //    * VPACKUSWB m128, xmm, xmm          [AVX]
 62077  //    * VPACKUSWB ymm, ymm, ymm           [AVX2]
 62078  //    * VPACKUSWB m256, ymm, ymm          [AVX2]
 62079  //    * VPACKUSWB zmm, zmm, zmm{k}{z}     [AVX512BW]
 62080  //    * VPACKUSWB m512, zmm, zmm{k}{z}    [AVX512BW]
 62081  //    * VPACKUSWB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 62082  //    * VPACKUSWB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 62083  //    * VPACKUSWB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 62084  //    * VPACKUSWB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 62085  //
 62086  func (self *Program) VPACKUSWB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 62087      p := self.alloc("VPACKUSWB", 3, Operands { v0, v1, v2 })
 62088      // VPACKUSWB xmm, xmm, xmm
 62089      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 62090          self.require(ISA_AVX)
 62091          p.domain = DomainAVX
 62092          p.add(0, func(m *_Encoding, v []interface{}) {
 62093              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 62094              m.emit(0x67)
 62095              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62096          })
 62097      }
 62098      // VPACKUSWB m128, xmm, xmm
 62099      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 62100          self.require(ISA_AVX)
 62101          p.domain = DomainAVX
 62102          p.add(0, func(m *_Encoding, v []interface{}) {
 62103              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62104              m.emit(0x67)
 62105              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62106          })
 62107      }
 62108      // VPACKUSWB ymm, ymm, ymm
 62109      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 62110          self.require(ISA_AVX2)
 62111          p.domain = DomainAVX
 62112          p.add(0, func(m *_Encoding, v []interface{}) {
 62113              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 62114              m.emit(0x67)
 62115              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62116          })
 62117      }
 62118      // VPACKUSWB m256, ymm, ymm
 62119      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 62120          self.require(ISA_AVX2)
 62121          p.domain = DomainAVX
 62122          p.add(0, func(m *_Encoding, v []interface{}) {
 62123              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62124              m.emit(0x67)
 62125              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62126          })
 62127      }
 62128      // VPACKUSWB zmm, zmm, zmm{k}{z}
 62129      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 62130          self.require(ISA_AVX512BW)
 62131          p.domain = DomainAVX
 62132          p.add(0, func(m *_Encoding, v []interface{}) {
 62133              m.emit(0x62)
 62134              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62135              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62136              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 62137              m.emit(0x67)
 62138              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62139          })
 62140      }
 62141      // VPACKUSWB m512, zmm, zmm{k}{z}
 62142      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 62143          self.require(ISA_AVX512BW)
 62144          p.domain = DomainAVX
 62145          p.add(0, func(m *_Encoding, v []interface{}) {
 62146              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62147              m.emit(0x67)
 62148              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 62149          })
 62150      }
 62151      // VPACKUSWB xmm, xmm, xmm{k}{z}
 62152      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62153          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62154          p.domain = DomainAVX
 62155          p.add(0, func(m *_Encoding, v []interface{}) {
 62156              m.emit(0x62)
 62157              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62158              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62159              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 62160              m.emit(0x67)
 62161              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62162          })
 62163      }
 62164      // VPACKUSWB m128, xmm, xmm{k}{z}
 62165      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62166          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62167          p.domain = DomainAVX
 62168          p.add(0, func(m *_Encoding, v []interface{}) {
 62169              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62170              m.emit(0x67)
 62171              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 62172          })
 62173      }
 62174      // VPACKUSWB ymm, ymm, ymm{k}{z}
 62175      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62176          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62177          p.domain = DomainAVX
 62178          p.add(0, func(m *_Encoding, v []interface{}) {
 62179              m.emit(0x62)
 62180              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62181              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62182              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 62183              m.emit(0x67)
 62184              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62185          })
 62186      }
 62187      // VPACKUSWB m256, ymm, ymm{k}{z}
 62188      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62189          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62190          p.domain = DomainAVX
 62191          p.add(0, func(m *_Encoding, v []interface{}) {
 62192              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62193              m.emit(0x67)
 62194              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 62195          })
 62196      }
 62197      if p.len == 0 {
 62198          panic("invalid operands for VPACKUSWB")
 62199      }
 62200      return p
 62201  }
 62202  
 62203  // VPADDB performs "Add Packed Byte Integers".
 62204  //
 62205  // Mnemonic        : VPADDB
 62206  // Supported forms : (10 forms)
 62207  //
 62208  //    * VPADDB xmm, xmm, xmm           [AVX]
 62209  //    * VPADDB m128, xmm, xmm          [AVX]
 62210  //    * VPADDB ymm, ymm, ymm           [AVX2]
 62211  //    * VPADDB m256, ymm, ymm          [AVX2]
 62212  //    * VPADDB zmm, zmm, zmm{k}{z}     [AVX512BW]
 62213  //    * VPADDB m512, zmm, zmm{k}{z}    [AVX512BW]
 62214  //    * VPADDB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 62215  //    * VPADDB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 62216  //    * VPADDB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 62217  //    * VPADDB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 62218  //
 62219  func (self *Program) VPADDB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 62220      p := self.alloc("VPADDB", 3, Operands { v0, v1, v2 })
 62221      // VPADDB xmm, xmm, xmm
 62222      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 62223          self.require(ISA_AVX)
 62224          p.domain = DomainAVX
 62225          p.add(0, func(m *_Encoding, v []interface{}) {
 62226              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 62227              m.emit(0xfc)
 62228              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62229          })
 62230      }
 62231      // VPADDB m128, xmm, xmm
 62232      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 62233          self.require(ISA_AVX)
 62234          p.domain = DomainAVX
 62235          p.add(0, func(m *_Encoding, v []interface{}) {
 62236              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62237              m.emit(0xfc)
 62238              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62239          })
 62240      }
 62241      // VPADDB ymm, ymm, ymm
 62242      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 62243          self.require(ISA_AVX2)
 62244          p.domain = DomainAVX
 62245          p.add(0, func(m *_Encoding, v []interface{}) {
 62246              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 62247              m.emit(0xfc)
 62248              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62249          })
 62250      }
 62251      // VPADDB m256, ymm, ymm
 62252      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 62253          self.require(ISA_AVX2)
 62254          p.domain = DomainAVX
 62255          p.add(0, func(m *_Encoding, v []interface{}) {
 62256              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62257              m.emit(0xfc)
 62258              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62259          })
 62260      }
 62261      // VPADDB zmm, zmm, zmm{k}{z}
 62262      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 62263          self.require(ISA_AVX512BW)
 62264          p.domain = DomainAVX
 62265          p.add(0, func(m *_Encoding, v []interface{}) {
 62266              m.emit(0x62)
 62267              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62268              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62269              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 62270              m.emit(0xfc)
 62271              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62272          })
 62273      }
 62274      // VPADDB m512, zmm, zmm{k}{z}
 62275      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 62276          self.require(ISA_AVX512BW)
 62277          p.domain = DomainAVX
 62278          p.add(0, func(m *_Encoding, v []interface{}) {
 62279              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62280              m.emit(0xfc)
 62281              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 62282          })
 62283      }
 62284      // VPADDB xmm, xmm, xmm{k}{z}
 62285      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62286          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62287          p.domain = DomainAVX
 62288          p.add(0, func(m *_Encoding, v []interface{}) {
 62289              m.emit(0x62)
 62290              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62291              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62292              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 62293              m.emit(0xfc)
 62294              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62295          })
 62296      }
 62297      // VPADDB m128, xmm, xmm{k}{z}
 62298      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62299          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62300          p.domain = DomainAVX
 62301          p.add(0, func(m *_Encoding, v []interface{}) {
 62302              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62303              m.emit(0xfc)
 62304              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 62305          })
 62306      }
 62307      // VPADDB ymm, ymm, ymm{k}{z}
 62308      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62309          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62310          p.domain = DomainAVX
 62311          p.add(0, func(m *_Encoding, v []interface{}) {
 62312              m.emit(0x62)
 62313              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62314              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62315              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 62316              m.emit(0xfc)
 62317              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62318          })
 62319      }
 62320      // VPADDB m256, ymm, ymm{k}{z}
 62321      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62322          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62323          p.domain = DomainAVX
 62324          p.add(0, func(m *_Encoding, v []interface{}) {
 62325              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62326              m.emit(0xfc)
 62327              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 62328          })
 62329      }
 62330      if p.len == 0 {
 62331          panic("invalid operands for VPADDB")
 62332      }
 62333      return p
 62334  }
 62335  
 62336  // VPADDD performs "Add Packed Doubleword Integers".
 62337  //
 62338  // Mnemonic        : VPADDD
 62339  // Supported forms : (10 forms)
 62340  //
 62341  //    * VPADDD xmm, xmm, xmm                   [AVX]
 62342  //    * VPADDD m128, xmm, xmm                  [AVX]
 62343  //    * VPADDD ymm, ymm, ymm                   [AVX2]
 62344  //    * VPADDD m256, ymm, ymm                  [AVX2]
 62345  //    * VPADDD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 62346  //    * VPADDD zmm, zmm, zmm{k}{z}             [AVX512F]
 62347  //    * VPADDD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 62348  //    * VPADDD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 62349  //    * VPADDD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 62350  //    * VPADDD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 62351  //
 62352  func (self *Program) VPADDD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 62353      p := self.alloc("VPADDD", 3, Operands { v0, v1, v2 })
 62354      // VPADDD xmm, xmm, xmm
 62355      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 62356          self.require(ISA_AVX)
 62357          p.domain = DomainAVX
 62358          p.add(0, func(m *_Encoding, v []interface{}) {
 62359              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 62360              m.emit(0xfe)
 62361              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62362          })
 62363      }
 62364      // VPADDD m128, xmm, xmm
 62365      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 62366          self.require(ISA_AVX)
 62367          p.domain = DomainAVX
 62368          p.add(0, func(m *_Encoding, v []interface{}) {
 62369              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62370              m.emit(0xfe)
 62371              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62372          })
 62373      }
 62374      // VPADDD ymm, ymm, ymm
 62375      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 62376          self.require(ISA_AVX2)
 62377          p.domain = DomainAVX
 62378          p.add(0, func(m *_Encoding, v []interface{}) {
 62379              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 62380              m.emit(0xfe)
 62381              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62382          })
 62383      }
 62384      // VPADDD m256, ymm, ymm
 62385      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 62386          self.require(ISA_AVX2)
 62387          p.domain = DomainAVX
 62388          p.add(0, func(m *_Encoding, v []interface{}) {
 62389              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62390              m.emit(0xfe)
 62391              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62392          })
 62393      }
 62394      // VPADDD m512/m32bcst, zmm, zmm{k}{z}
 62395      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 62396          self.require(ISA_AVX512F)
 62397          p.domain = DomainAVX
 62398          p.add(0, func(m *_Encoding, v []interface{}) {
 62399              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 62400              m.emit(0xfe)
 62401              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 62402          })
 62403      }
 62404      // VPADDD zmm, zmm, zmm{k}{z}
 62405      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 62406          self.require(ISA_AVX512F)
 62407          p.domain = DomainAVX
 62408          p.add(0, func(m *_Encoding, v []interface{}) {
 62409              m.emit(0x62)
 62410              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62411              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62412              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 62413              m.emit(0xfe)
 62414              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62415          })
 62416      }
 62417      // VPADDD m128/m32bcst, xmm, xmm{k}{z}
 62418      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62419          self.require(ISA_AVX512VL | ISA_AVX512F)
 62420          p.domain = DomainAVX
 62421          p.add(0, func(m *_Encoding, v []interface{}) {
 62422              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 62423              m.emit(0xfe)
 62424              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 62425          })
 62426      }
 62427      // VPADDD xmm, xmm, xmm{k}{z}
 62428      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62429          self.require(ISA_AVX512VL | ISA_AVX512F)
 62430          p.domain = DomainAVX
 62431          p.add(0, func(m *_Encoding, v []interface{}) {
 62432              m.emit(0x62)
 62433              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62434              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62435              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 62436              m.emit(0xfe)
 62437              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62438          })
 62439      }
 62440      // VPADDD m256/m32bcst, ymm, ymm{k}{z}
 62441      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62442          self.require(ISA_AVX512VL | ISA_AVX512F)
 62443          p.domain = DomainAVX
 62444          p.add(0, func(m *_Encoding, v []interface{}) {
 62445              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 62446              m.emit(0xfe)
 62447              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 62448          })
 62449      }
 62450      // VPADDD ymm, ymm, ymm{k}{z}
 62451      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62452          self.require(ISA_AVX512VL | ISA_AVX512F)
 62453          p.domain = DomainAVX
 62454          p.add(0, func(m *_Encoding, v []interface{}) {
 62455              m.emit(0x62)
 62456              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62457              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62458              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 62459              m.emit(0xfe)
 62460              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62461          })
 62462      }
 62463      if p.len == 0 {
 62464          panic("invalid operands for VPADDD")
 62465      }
 62466      return p
 62467  }
 62468  
 62469  // VPADDQ performs "Add Packed Quadword Integers".
 62470  //
 62471  // Mnemonic        : VPADDQ
 62472  // Supported forms : (10 forms)
 62473  //
 62474  //    * VPADDQ xmm, xmm, xmm                   [AVX]
 62475  //    * VPADDQ m128, xmm, xmm                  [AVX]
 62476  //    * VPADDQ ymm, ymm, ymm                   [AVX2]
 62477  //    * VPADDQ m256, ymm, ymm                  [AVX2]
 62478  //    * VPADDQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 62479  //    * VPADDQ zmm, zmm, zmm{k}{z}             [AVX512F]
 62480  //    * VPADDQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 62481  //    * VPADDQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 62482  //    * VPADDQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 62483  //    * VPADDQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 62484  //
 62485  func (self *Program) VPADDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 62486      p := self.alloc("VPADDQ", 3, Operands { v0, v1, v2 })
 62487      // VPADDQ xmm, xmm, xmm
 62488      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 62489          self.require(ISA_AVX)
 62490          p.domain = DomainAVX
 62491          p.add(0, func(m *_Encoding, v []interface{}) {
 62492              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 62493              m.emit(0xd4)
 62494              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62495          })
 62496      }
 62497      // VPADDQ m128, xmm, xmm
 62498      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 62499          self.require(ISA_AVX)
 62500          p.domain = DomainAVX
 62501          p.add(0, func(m *_Encoding, v []interface{}) {
 62502              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62503              m.emit(0xd4)
 62504              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62505          })
 62506      }
 62507      // VPADDQ ymm, ymm, ymm
 62508      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 62509          self.require(ISA_AVX2)
 62510          p.domain = DomainAVX
 62511          p.add(0, func(m *_Encoding, v []interface{}) {
 62512              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 62513              m.emit(0xd4)
 62514              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62515          })
 62516      }
 62517      // VPADDQ m256, ymm, ymm
 62518      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 62519          self.require(ISA_AVX2)
 62520          p.domain = DomainAVX
 62521          p.add(0, func(m *_Encoding, v []interface{}) {
 62522              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62523              m.emit(0xd4)
 62524              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62525          })
 62526      }
 62527      // VPADDQ m512/m64bcst, zmm, zmm{k}{z}
 62528      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 62529          self.require(ISA_AVX512F)
 62530          p.domain = DomainAVX
 62531          p.add(0, func(m *_Encoding, v []interface{}) {
 62532              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 62533              m.emit(0xd4)
 62534              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 62535          })
 62536      }
 62537      // VPADDQ zmm, zmm, zmm{k}{z}
 62538      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 62539          self.require(ISA_AVX512F)
 62540          p.domain = DomainAVX
 62541          p.add(0, func(m *_Encoding, v []interface{}) {
 62542              m.emit(0x62)
 62543              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62544              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 62545              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 62546              m.emit(0xd4)
 62547              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62548          })
 62549      }
 62550      // VPADDQ m128/m64bcst, xmm, xmm{k}{z}
 62551      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62552          self.require(ISA_AVX512VL | ISA_AVX512F)
 62553          p.domain = DomainAVX
 62554          p.add(0, func(m *_Encoding, v []interface{}) {
 62555              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 62556              m.emit(0xd4)
 62557              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 62558          })
 62559      }
 62560      // VPADDQ xmm, xmm, xmm{k}{z}
 62561      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62562          self.require(ISA_AVX512VL | ISA_AVX512F)
 62563          p.domain = DomainAVX
 62564          p.add(0, func(m *_Encoding, v []interface{}) {
 62565              m.emit(0x62)
 62566              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62567              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 62568              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 62569              m.emit(0xd4)
 62570              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62571          })
 62572      }
 62573      // VPADDQ m256/m64bcst, ymm, ymm{k}{z}
 62574      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62575          self.require(ISA_AVX512VL | ISA_AVX512F)
 62576          p.domain = DomainAVX
 62577          p.add(0, func(m *_Encoding, v []interface{}) {
 62578              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 62579              m.emit(0xd4)
 62580              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 62581          })
 62582      }
 62583      // VPADDQ ymm, ymm, ymm{k}{z}
 62584      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62585          self.require(ISA_AVX512VL | ISA_AVX512F)
 62586          p.domain = DomainAVX
 62587          p.add(0, func(m *_Encoding, v []interface{}) {
 62588              m.emit(0x62)
 62589              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62590              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 62591              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 62592              m.emit(0xd4)
 62593              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62594          })
 62595      }
 62596      if p.len == 0 {
 62597          panic("invalid operands for VPADDQ")
 62598      }
 62599      return p
 62600  }
 62601  
 62602  // VPADDSB performs "Add Packed Signed Byte Integers with Signed Saturation".
 62603  //
 62604  // Mnemonic        : VPADDSB
 62605  // Supported forms : (10 forms)
 62606  //
 62607  //    * VPADDSB xmm, xmm, xmm           [AVX]
 62608  //    * VPADDSB m128, xmm, xmm          [AVX]
 62609  //    * VPADDSB ymm, ymm, ymm           [AVX2]
 62610  //    * VPADDSB m256, ymm, ymm          [AVX2]
 62611  //    * VPADDSB zmm, zmm, zmm{k}{z}     [AVX512BW]
 62612  //    * VPADDSB m512, zmm, zmm{k}{z}    [AVX512BW]
 62613  //    * VPADDSB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 62614  //    * VPADDSB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 62615  //    * VPADDSB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 62616  //    * VPADDSB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 62617  //
 62618  func (self *Program) VPADDSB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 62619      p := self.alloc("VPADDSB", 3, Operands { v0, v1, v2 })
 62620      // VPADDSB xmm, xmm, xmm
 62621      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 62622          self.require(ISA_AVX)
 62623          p.domain = DomainAVX
 62624          p.add(0, func(m *_Encoding, v []interface{}) {
 62625              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 62626              m.emit(0xec)
 62627              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62628          })
 62629      }
 62630      // VPADDSB m128, xmm, xmm
 62631      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 62632          self.require(ISA_AVX)
 62633          p.domain = DomainAVX
 62634          p.add(0, func(m *_Encoding, v []interface{}) {
 62635              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62636              m.emit(0xec)
 62637              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62638          })
 62639      }
 62640      // VPADDSB ymm, ymm, ymm
 62641      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 62642          self.require(ISA_AVX2)
 62643          p.domain = DomainAVX
 62644          p.add(0, func(m *_Encoding, v []interface{}) {
 62645              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 62646              m.emit(0xec)
 62647              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62648          })
 62649      }
 62650      // VPADDSB m256, ymm, ymm
 62651      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 62652          self.require(ISA_AVX2)
 62653          p.domain = DomainAVX
 62654          p.add(0, func(m *_Encoding, v []interface{}) {
 62655              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62656              m.emit(0xec)
 62657              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62658          })
 62659      }
 62660      // VPADDSB zmm, zmm, zmm{k}{z}
 62661      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 62662          self.require(ISA_AVX512BW)
 62663          p.domain = DomainAVX
 62664          p.add(0, func(m *_Encoding, v []interface{}) {
 62665              m.emit(0x62)
 62666              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62667              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62668              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 62669              m.emit(0xec)
 62670              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62671          })
 62672      }
 62673      // VPADDSB m512, zmm, zmm{k}{z}
 62674      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 62675          self.require(ISA_AVX512BW)
 62676          p.domain = DomainAVX
 62677          p.add(0, func(m *_Encoding, v []interface{}) {
 62678              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62679              m.emit(0xec)
 62680              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 62681          })
 62682      }
 62683      // VPADDSB xmm, xmm, xmm{k}{z}
 62684      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62685          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62686          p.domain = DomainAVX
 62687          p.add(0, func(m *_Encoding, v []interface{}) {
 62688              m.emit(0x62)
 62689              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62690              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62691              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 62692              m.emit(0xec)
 62693              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62694          })
 62695      }
 62696      // VPADDSB m128, xmm, xmm{k}{z}
 62697      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62698          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62699          p.domain = DomainAVX
 62700          p.add(0, func(m *_Encoding, v []interface{}) {
 62701              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62702              m.emit(0xec)
 62703              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 62704          })
 62705      }
 62706      // VPADDSB ymm, ymm, ymm{k}{z}
 62707      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62708          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62709          p.domain = DomainAVX
 62710          p.add(0, func(m *_Encoding, v []interface{}) {
 62711              m.emit(0x62)
 62712              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62713              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62714              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 62715              m.emit(0xec)
 62716              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62717          })
 62718      }
 62719      // VPADDSB m256, ymm, ymm{k}{z}
 62720      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62721          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62722          p.domain = DomainAVX
 62723          p.add(0, func(m *_Encoding, v []interface{}) {
 62724              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62725              m.emit(0xec)
 62726              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 62727          })
 62728      }
 62729      if p.len == 0 {
 62730          panic("invalid operands for VPADDSB")
 62731      }
 62732      return p
 62733  }
 62734  
 62735  // VPADDSW performs "Add Packed Signed Word Integers with Signed Saturation".
 62736  //
 62737  // Mnemonic        : VPADDSW
 62738  // Supported forms : (10 forms)
 62739  //
 62740  //    * VPADDSW xmm, xmm, xmm           [AVX]
 62741  //    * VPADDSW m128, xmm, xmm          [AVX]
 62742  //    * VPADDSW ymm, ymm, ymm           [AVX2]
 62743  //    * VPADDSW m256, ymm, ymm          [AVX2]
 62744  //    * VPADDSW zmm, zmm, zmm{k}{z}     [AVX512BW]
 62745  //    * VPADDSW m512, zmm, zmm{k}{z}    [AVX512BW]
 62746  //    * VPADDSW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 62747  //    * VPADDSW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 62748  //    * VPADDSW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 62749  //    * VPADDSW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 62750  //
 62751  func (self *Program) VPADDSW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 62752      p := self.alloc("VPADDSW", 3, Operands { v0, v1, v2 })
 62753      // VPADDSW xmm, xmm, xmm
 62754      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 62755          self.require(ISA_AVX)
 62756          p.domain = DomainAVX
 62757          p.add(0, func(m *_Encoding, v []interface{}) {
 62758              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 62759              m.emit(0xed)
 62760              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62761          })
 62762      }
 62763      // VPADDSW m128, xmm, xmm
 62764      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 62765          self.require(ISA_AVX)
 62766          p.domain = DomainAVX
 62767          p.add(0, func(m *_Encoding, v []interface{}) {
 62768              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62769              m.emit(0xed)
 62770              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62771          })
 62772      }
 62773      // VPADDSW ymm, ymm, ymm
 62774      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 62775          self.require(ISA_AVX2)
 62776          p.domain = DomainAVX
 62777          p.add(0, func(m *_Encoding, v []interface{}) {
 62778              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 62779              m.emit(0xed)
 62780              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62781          })
 62782      }
 62783      // VPADDSW m256, ymm, ymm
 62784      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 62785          self.require(ISA_AVX2)
 62786          p.domain = DomainAVX
 62787          p.add(0, func(m *_Encoding, v []interface{}) {
 62788              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62789              m.emit(0xed)
 62790              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62791          })
 62792      }
 62793      // VPADDSW zmm, zmm, zmm{k}{z}
 62794      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 62795          self.require(ISA_AVX512BW)
 62796          p.domain = DomainAVX
 62797          p.add(0, func(m *_Encoding, v []interface{}) {
 62798              m.emit(0x62)
 62799              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62800              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62801              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 62802              m.emit(0xed)
 62803              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62804          })
 62805      }
 62806      // VPADDSW m512, zmm, zmm{k}{z}
 62807      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 62808          self.require(ISA_AVX512BW)
 62809          p.domain = DomainAVX
 62810          p.add(0, func(m *_Encoding, v []interface{}) {
 62811              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62812              m.emit(0xed)
 62813              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 62814          })
 62815      }
 62816      // VPADDSW xmm, xmm, xmm{k}{z}
 62817      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62818          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62819          p.domain = DomainAVX
 62820          p.add(0, func(m *_Encoding, v []interface{}) {
 62821              m.emit(0x62)
 62822              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62823              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62824              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 62825              m.emit(0xed)
 62826              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62827          })
 62828      }
 62829      // VPADDSW m128, xmm, xmm{k}{z}
 62830      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62831          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62832          p.domain = DomainAVX
 62833          p.add(0, func(m *_Encoding, v []interface{}) {
 62834              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62835              m.emit(0xed)
 62836              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 62837          })
 62838      }
 62839      // VPADDSW ymm, ymm, ymm{k}{z}
 62840      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62841          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62842          p.domain = DomainAVX
 62843          p.add(0, func(m *_Encoding, v []interface{}) {
 62844              m.emit(0x62)
 62845              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62846              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62847              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 62848              m.emit(0xed)
 62849              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62850          })
 62851      }
 62852      // VPADDSW m256, ymm, ymm{k}{z}
 62853      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62854          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62855          p.domain = DomainAVX
 62856          p.add(0, func(m *_Encoding, v []interface{}) {
 62857              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62858              m.emit(0xed)
 62859              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 62860          })
 62861      }
 62862      if p.len == 0 {
 62863          panic("invalid operands for VPADDSW")
 62864      }
 62865      return p
 62866  }
 62867  
 62868  // VPADDUSB performs "Add Packed Unsigned Byte Integers with Unsigned Saturation".
 62869  //
 62870  // Mnemonic        : VPADDUSB
 62871  // Supported forms : (10 forms)
 62872  //
 62873  //    * VPADDUSB xmm, xmm, xmm           [AVX]
 62874  //    * VPADDUSB m128, xmm, xmm          [AVX]
 62875  //    * VPADDUSB ymm, ymm, ymm           [AVX2]
 62876  //    * VPADDUSB m256, ymm, ymm          [AVX2]
 62877  //    * VPADDUSB zmm, zmm, zmm{k}{z}     [AVX512BW]
 62878  //    * VPADDUSB m512, zmm, zmm{k}{z}    [AVX512BW]
 62879  //    * VPADDUSB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 62880  //    * VPADDUSB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 62881  //    * VPADDUSB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 62882  //    * VPADDUSB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 62883  //
 62884  func (self *Program) VPADDUSB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 62885      p := self.alloc("VPADDUSB", 3, Operands { v0, v1, v2 })
 62886      // VPADDUSB xmm, xmm, xmm
 62887      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 62888          self.require(ISA_AVX)
 62889          p.domain = DomainAVX
 62890          p.add(0, func(m *_Encoding, v []interface{}) {
 62891              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 62892              m.emit(0xdc)
 62893              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62894          })
 62895      }
 62896      // VPADDUSB m128, xmm, xmm
 62897      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 62898          self.require(ISA_AVX)
 62899          p.domain = DomainAVX
 62900          p.add(0, func(m *_Encoding, v []interface{}) {
 62901              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62902              m.emit(0xdc)
 62903              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62904          })
 62905      }
 62906      // VPADDUSB ymm, ymm, ymm
 62907      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 62908          self.require(ISA_AVX2)
 62909          p.domain = DomainAVX
 62910          p.add(0, func(m *_Encoding, v []interface{}) {
 62911              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 62912              m.emit(0xdc)
 62913              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62914          })
 62915      }
 62916      // VPADDUSB m256, ymm, ymm
 62917      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 62918          self.require(ISA_AVX2)
 62919          p.domain = DomainAVX
 62920          p.add(0, func(m *_Encoding, v []interface{}) {
 62921              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 62922              m.emit(0xdc)
 62923              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 62924          })
 62925      }
 62926      // VPADDUSB zmm, zmm, zmm{k}{z}
 62927      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 62928          self.require(ISA_AVX512BW)
 62929          p.domain = DomainAVX
 62930          p.add(0, func(m *_Encoding, v []interface{}) {
 62931              m.emit(0x62)
 62932              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62933              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62934              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 62935              m.emit(0xdc)
 62936              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62937          })
 62938      }
 62939      // VPADDUSB m512, zmm, zmm{k}{z}
 62940      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 62941          self.require(ISA_AVX512BW)
 62942          p.domain = DomainAVX
 62943          p.add(0, func(m *_Encoding, v []interface{}) {
 62944              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62945              m.emit(0xdc)
 62946              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 62947          })
 62948      }
 62949      // VPADDUSB xmm, xmm, xmm{k}{z}
 62950      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62951          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62952          p.domain = DomainAVX
 62953          p.add(0, func(m *_Encoding, v []interface{}) {
 62954              m.emit(0x62)
 62955              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62956              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62957              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 62958              m.emit(0xdc)
 62959              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62960          })
 62961      }
 62962      // VPADDUSB m128, xmm, xmm{k}{z}
 62963      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 62964          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62965          p.domain = DomainAVX
 62966          p.add(0, func(m *_Encoding, v []interface{}) {
 62967              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62968              m.emit(0xdc)
 62969              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 62970          })
 62971      }
 62972      // VPADDUSB ymm, ymm, ymm{k}{z}
 62973      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62974          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62975          p.domain = DomainAVX
 62976          p.add(0, func(m *_Encoding, v []interface{}) {
 62977              m.emit(0x62)
 62978              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 62979              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 62980              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 62981              m.emit(0xdc)
 62982              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 62983          })
 62984      }
 62985      // VPADDUSB m256, ymm, ymm{k}{z}
 62986      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 62987          self.require(ISA_AVX512VL | ISA_AVX512BW)
 62988          p.domain = DomainAVX
 62989          p.add(0, func(m *_Encoding, v []interface{}) {
 62990              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 62991              m.emit(0xdc)
 62992              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 62993          })
 62994      }
 62995      if p.len == 0 {
 62996          panic("invalid operands for VPADDUSB")
 62997      }
 62998      return p
 62999  }
 63000  
 63001  // VPADDUSW performs "Add Packed Unsigned Word Integers with Unsigned Saturation".
 63002  //
 63003  // Mnemonic        : VPADDUSW
 63004  // Supported forms : (10 forms)
 63005  //
 63006  //    * VPADDUSW xmm, xmm, xmm           [AVX]
 63007  //    * VPADDUSW m128, xmm, xmm          [AVX]
 63008  //    * VPADDUSW ymm, ymm, ymm           [AVX2]
 63009  //    * VPADDUSW m256, ymm, ymm          [AVX2]
 63010  //    * VPADDUSW zmm, zmm, zmm{k}{z}     [AVX512BW]
 63011  //    * VPADDUSW m512, zmm, zmm{k}{z}    [AVX512BW]
 63012  //    * VPADDUSW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 63013  //    * VPADDUSW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 63014  //    * VPADDUSW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 63015  //    * VPADDUSW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 63016  //
 63017  func (self *Program) VPADDUSW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 63018      p := self.alloc("VPADDUSW", 3, Operands { v0, v1, v2 })
 63019      // VPADDUSW xmm, xmm, xmm
 63020      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 63021          self.require(ISA_AVX)
 63022          p.domain = DomainAVX
 63023          p.add(0, func(m *_Encoding, v []interface{}) {
 63024              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 63025              m.emit(0xdd)
 63026              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63027          })
 63028      }
 63029      // VPADDUSW m128, xmm, xmm
 63030      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 63031          self.require(ISA_AVX)
 63032          p.domain = DomainAVX
 63033          p.add(0, func(m *_Encoding, v []interface{}) {
 63034              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 63035              m.emit(0xdd)
 63036              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 63037          })
 63038      }
 63039      // VPADDUSW ymm, ymm, ymm
 63040      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 63041          self.require(ISA_AVX2)
 63042          p.domain = DomainAVX
 63043          p.add(0, func(m *_Encoding, v []interface{}) {
 63044              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 63045              m.emit(0xdd)
 63046              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63047          })
 63048      }
 63049      // VPADDUSW m256, ymm, ymm
 63050      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 63051          self.require(ISA_AVX2)
 63052          p.domain = DomainAVX
 63053          p.add(0, func(m *_Encoding, v []interface{}) {
 63054              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 63055              m.emit(0xdd)
 63056              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 63057          })
 63058      }
 63059      // VPADDUSW zmm, zmm, zmm{k}{z}
 63060      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 63061          self.require(ISA_AVX512BW)
 63062          p.domain = DomainAVX
 63063          p.add(0, func(m *_Encoding, v []interface{}) {
 63064              m.emit(0x62)
 63065              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63066              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63067              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 63068              m.emit(0xdd)
 63069              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63070          })
 63071      }
 63072      // VPADDUSW m512, zmm, zmm{k}{z}
 63073      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 63074          self.require(ISA_AVX512BW)
 63075          p.domain = DomainAVX
 63076          p.add(0, func(m *_Encoding, v []interface{}) {
 63077              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 63078              m.emit(0xdd)
 63079              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 63080          })
 63081      }
 63082      // VPADDUSW xmm, xmm, xmm{k}{z}
 63083      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63084          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63085          p.domain = DomainAVX
 63086          p.add(0, func(m *_Encoding, v []interface{}) {
 63087              m.emit(0x62)
 63088              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63089              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63090              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 63091              m.emit(0xdd)
 63092              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63093          })
 63094      }
 63095      // VPADDUSW m128, xmm, xmm{k}{z}
 63096      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63097          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63098          p.domain = DomainAVX
 63099          p.add(0, func(m *_Encoding, v []interface{}) {
 63100              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 63101              m.emit(0xdd)
 63102              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 63103          })
 63104      }
 63105      // VPADDUSW ymm, ymm, ymm{k}{z}
 63106      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63107          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63108          p.domain = DomainAVX
 63109          p.add(0, func(m *_Encoding, v []interface{}) {
 63110              m.emit(0x62)
 63111              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63112              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63113              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 63114              m.emit(0xdd)
 63115              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63116          })
 63117      }
 63118      // VPADDUSW m256, ymm, ymm{k}{z}
 63119      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63120          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63121          p.domain = DomainAVX
 63122          p.add(0, func(m *_Encoding, v []interface{}) {
 63123              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 63124              m.emit(0xdd)
 63125              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 63126          })
 63127      }
 63128      if p.len == 0 {
 63129          panic("invalid operands for VPADDUSW")
 63130      }
 63131      return p
 63132  }
 63133  
 63134  // VPADDW performs "Add Packed Word Integers".
 63135  //
 63136  // Mnemonic        : VPADDW
 63137  // Supported forms : (10 forms)
 63138  //
 63139  //    * VPADDW xmm, xmm, xmm           [AVX]
 63140  //    * VPADDW m128, xmm, xmm          [AVX]
 63141  //    * VPADDW ymm, ymm, ymm           [AVX2]
 63142  //    * VPADDW m256, ymm, ymm          [AVX2]
 63143  //    * VPADDW zmm, zmm, zmm{k}{z}     [AVX512BW]
 63144  //    * VPADDW m512, zmm, zmm{k}{z}    [AVX512BW]
 63145  //    * VPADDW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 63146  //    * VPADDW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 63147  //    * VPADDW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 63148  //    * VPADDW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 63149  //
 63150  func (self *Program) VPADDW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 63151      p := self.alloc("VPADDW", 3, Operands { v0, v1, v2 })
 63152      // VPADDW xmm, xmm, xmm
 63153      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 63154          self.require(ISA_AVX)
 63155          p.domain = DomainAVX
 63156          p.add(0, func(m *_Encoding, v []interface{}) {
 63157              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 63158              m.emit(0xfd)
 63159              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63160          })
 63161      }
 63162      // VPADDW m128, xmm, xmm
 63163      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 63164          self.require(ISA_AVX)
 63165          p.domain = DomainAVX
 63166          p.add(0, func(m *_Encoding, v []interface{}) {
 63167              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 63168              m.emit(0xfd)
 63169              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 63170          })
 63171      }
 63172      // VPADDW ymm, ymm, ymm
 63173      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 63174          self.require(ISA_AVX2)
 63175          p.domain = DomainAVX
 63176          p.add(0, func(m *_Encoding, v []interface{}) {
 63177              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 63178              m.emit(0xfd)
 63179              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63180          })
 63181      }
 63182      // VPADDW m256, ymm, ymm
 63183      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 63184          self.require(ISA_AVX2)
 63185          p.domain = DomainAVX
 63186          p.add(0, func(m *_Encoding, v []interface{}) {
 63187              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 63188              m.emit(0xfd)
 63189              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 63190          })
 63191      }
 63192      // VPADDW zmm, zmm, zmm{k}{z}
 63193      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 63194          self.require(ISA_AVX512BW)
 63195          p.domain = DomainAVX
 63196          p.add(0, func(m *_Encoding, v []interface{}) {
 63197              m.emit(0x62)
 63198              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63199              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63200              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 63201              m.emit(0xfd)
 63202              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63203          })
 63204      }
 63205      // VPADDW m512, zmm, zmm{k}{z}
 63206      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 63207          self.require(ISA_AVX512BW)
 63208          p.domain = DomainAVX
 63209          p.add(0, func(m *_Encoding, v []interface{}) {
 63210              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 63211              m.emit(0xfd)
 63212              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 63213          })
 63214      }
 63215      // VPADDW xmm, xmm, xmm{k}{z}
 63216      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63217          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63218          p.domain = DomainAVX
 63219          p.add(0, func(m *_Encoding, v []interface{}) {
 63220              m.emit(0x62)
 63221              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63222              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63223              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 63224              m.emit(0xfd)
 63225              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63226          })
 63227      }
 63228      // VPADDW m128, xmm, xmm{k}{z}
 63229      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63230          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63231          p.domain = DomainAVX
 63232          p.add(0, func(m *_Encoding, v []interface{}) {
 63233              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 63234              m.emit(0xfd)
 63235              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 63236          })
 63237      }
 63238      // VPADDW ymm, ymm, ymm{k}{z}
 63239      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63240          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63241          p.domain = DomainAVX
 63242          p.add(0, func(m *_Encoding, v []interface{}) {
 63243              m.emit(0x62)
 63244              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63245              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63246              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 63247              m.emit(0xfd)
 63248              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63249          })
 63250      }
 63251      // VPADDW m256, ymm, ymm{k}{z}
 63252      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63253          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63254          p.domain = DomainAVX
 63255          p.add(0, func(m *_Encoding, v []interface{}) {
 63256              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 63257              m.emit(0xfd)
 63258              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 63259          })
 63260      }
 63261      if p.len == 0 {
 63262          panic("invalid operands for VPADDW")
 63263      }
 63264      return p
 63265  }
 63266  
 63267  // VPALIGNR performs "Packed Align Right".
 63268  //
 63269  // Mnemonic        : VPALIGNR
 63270  // Supported forms : (10 forms)
 63271  //
 63272  //    * VPALIGNR imm8, xmm, xmm, xmm           [AVX]
 63273  //    * VPALIGNR imm8, m128, xmm, xmm          [AVX]
 63274  //    * VPALIGNR imm8, ymm, ymm, ymm           [AVX2]
 63275  //    * VPALIGNR imm8, m256, ymm, ymm          [AVX2]
 63276  //    * VPALIGNR imm8, zmm, zmm, zmm{k}{z}     [AVX512BW]
 63277  //    * VPALIGNR imm8, m512, zmm, zmm{k}{z}    [AVX512BW]
 63278  //    * VPALIGNR imm8, xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 63279  //    * VPALIGNR imm8, m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 63280  //    * VPALIGNR imm8, ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 63281  //    * VPALIGNR imm8, m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 63282  //
 63283  func (self *Program) VPALIGNR(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 63284      p := self.alloc("VPALIGNR", 4, Operands { v0, v1, v2, v3 })
 63285      // VPALIGNR imm8, xmm, xmm, xmm
 63286      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 63287          self.require(ISA_AVX)
 63288          p.domain = DomainAVX
 63289          p.add(0, func(m *_Encoding, v []interface{}) {
 63290              m.emit(0xc4)
 63291              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 63292              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 63293              m.emit(0x0f)
 63294              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 63295              m.imm1(toImmAny(v[0]))
 63296          })
 63297      }
 63298      // VPALIGNR imm8, m128, xmm, xmm
 63299      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 63300          self.require(ISA_AVX)
 63301          p.domain = DomainAVX
 63302          p.add(0, func(m *_Encoding, v []interface{}) {
 63303              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 63304              m.emit(0x0f)
 63305              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 63306              m.imm1(toImmAny(v[0]))
 63307          })
 63308      }
 63309      // VPALIGNR imm8, ymm, ymm, ymm
 63310      if isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 63311          self.require(ISA_AVX2)
 63312          p.domain = DomainAVX
 63313          p.add(0, func(m *_Encoding, v []interface{}) {
 63314              m.emit(0xc4)
 63315              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 63316              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 63317              m.emit(0x0f)
 63318              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 63319              m.imm1(toImmAny(v[0]))
 63320          })
 63321      }
 63322      // VPALIGNR imm8, m256, ymm, ymm
 63323      if isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 63324          self.require(ISA_AVX2)
 63325          p.domain = DomainAVX
 63326          p.add(0, func(m *_Encoding, v []interface{}) {
 63327              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 63328              m.emit(0x0f)
 63329              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 63330              m.imm1(toImmAny(v[0]))
 63331          })
 63332      }
 63333      // VPALIGNR imm8, zmm, zmm, zmm{k}{z}
 63334      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 63335          self.require(ISA_AVX512BW)
 63336          p.domain = DomainAVX
 63337          p.add(0, func(m *_Encoding, v []interface{}) {
 63338              m.emit(0x62)
 63339              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 63340              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 63341              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 63342              m.emit(0x0f)
 63343              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 63344              m.imm1(toImmAny(v[0]))
 63345          })
 63346      }
 63347      // VPALIGNR imm8, m512, zmm, zmm{k}{z}
 63348      if isImm8(v0) && isM512(v1) && isZMM(v2) && isZMMkz(v3) {
 63349          self.require(ISA_AVX512BW)
 63350          p.domain = DomainAVX
 63351          p.add(0, func(m *_Encoding, v []interface{}) {
 63352              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 63353              m.emit(0x0f)
 63354              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 63355              m.imm1(toImmAny(v[0]))
 63356          })
 63357      }
 63358      // VPALIGNR imm8, xmm, xmm, xmm{k}{z}
 63359      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 63360          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63361          p.domain = DomainAVX
 63362          p.add(0, func(m *_Encoding, v []interface{}) {
 63363              m.emit(0x62)
 63364              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 63365              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 63366              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 63367              m.emit(0x0f)
 63368              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 63369              m.imm1(toImmAny(v[0]))
 63370          })
 63371      }
 63372      // VPALIGNR imm8, m128, xmm, xmm{k}{z}
 63373      if isImm8(v0) && isM128(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 63374          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63375          p.domain = DomainAVX
 63376          p.add(0, func(m *_Encoding, v []interface{}) {
 63377              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 63378              m.emit(0x0f)
 63379              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 63380              m.imm1(toImmAny(v[0]))
 63381          })
 63382      }
 63383      // VPALIGNR imm8, ymm, ymm, ymm{k}{z}
 63384      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 63385          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63386          p.domain = DomainAVX
 63387          p.add(0, func(m *_Encoding, v []interface{}) {
 63388              m.emit(0x62)
 63389              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 63390              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 63391              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 63392              m.emit(0x0f)
 63393              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 63394              m.imm1(toImmAny(v[0]))
 63395          })
 63396      }
 63397      // VPALIGNR imm8, m256, ymm, ymm{k}{z}
 63398      if isImm8(v0) && isM256(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 63399          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63400          p.domain = DomainAVX
 63401          p.add(0, func(m *_Encoding, v []interface{}) {
 63402              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 63403              m.emit(0x0f)
 63404              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 63405              m.imm1(toImmAny(v[0]))
 63406          })
 63407      }
 63408      if p.len == 0 {
 63409          panic("invalid operands for VPALIGNR")
 63410      }
 63411      return p
 63412  }
 63413  
 63414  // VPAND performs "Packed Bitwise Logical AND".
 63415  //
 63416  // Mnemonic        : VPAND
 63417  // Supported forms : (4 forms)
 63418  //
 63419  //    * VPAND xmm, xmm, xmm     [AVX]
 63420  //    * VPAND m128, xmm, xmm    [AVX]
 63421  //    * VPAND ymm, ymm, ymm     [AVX2]
 63422  //    * VPAND m256, ymm, ymm    [AVX2]
 63423  //
 63424  func (self *Program) VPAND(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 63425      p := self.alloc("VPAND", 3, Operands { v0, v1, v2 })
 63426      // VPAND xmm, xmm, xmm
 63427      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 63428          self.require(ISA_AVX)
 63429          p.domain = DomainAVX
 63430          p.add(0, func(m *_Encoding, v []interface{}) {
 63431              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 63432              m.emit(0xdb)
 63433              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63434          })
 63435      }
 63436      // VPAND m128, xmm, xmm
 63437      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 63438          self.require(ISA_AVX)
 63439          p.domain = DomainAVX
 63440          p.add(0, func(m *_Encoding, v []interface{}) {
 63441              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 63442              m.emit(0xdb)
 63443              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 63444          })
 63445      }
 63446      // VPAND ymm, ymm, ymm
 63447      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 63448          self.require(ISA_AVX2)
 63449          p.domain = DomainAVX
 63450          p.add(0, func(m *_Encoding, v []interface{}) {
 63451              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 63452              m.emit(0xdb)
 63453              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63454          })
 63455      }
 63456      // VPAND m256, ymm, ymm
 63457      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 63458          self.require(ISA_AVX2)
 63459          p.domain = DomainAVX
 63460          p.add(0, func(m *_Encoding, v []interface{}) {
 63461              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 63462              m.emit(0xdb)
 63463              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 63464          })
 63465      }
 63466      if p.len == 0 {
 63467          panic("invalid operands for VPAND")
 63468      }
 63469      return p
 63470  }
 63471  
 63472  // VPANDD performs "Bitwise Logical AND of Packed Doubleword Integers".
 63473  //
 63474  // Mnemonic        : VPANDD
 63475  // Supported forms : (6 forms)
 63476  //
 63477  //    * VPANDD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 63478  //    * VPANDD zmm, zmm, zmm{k}{z}             [AVX512F]
 63479  //    * VPANDD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 63480  //    * VPANDD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 63481  //    * VPANDD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 63482  //    * VPANDD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 63483  //
 63484  func (self *Program) VPANDD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 63485      p := self.alloc("VPANDD", 3, Operands { v0, v1, v2 })
 63486      // VPANDD m512/m32bcst, zmm, zmm{k}{z}
 63487      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 63488          self.require(ISA_AVX512F)
 63489          p.domain = DomainAVX
 63490          p.add(0, func(m *_Encoding, v []interface{}) {
 63491              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63492              m.emit(0xdb)
 63493              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 63494          })
 63495      }
 63496      // VPANDD zmm, zmm, zmm{k}{z}
 63497      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 63498          self.require(ISA_AVX512F)
 63499          p.domain = DomainAVX
 63500          p.add(0, func(m *_Encoding, v []interface{}) {
 63501              m.emit(0x62)
 63502              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63503              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63504              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 63505              m.emit(0xdb)
 63506              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63507          })
 63508      }
 63509      // VPANDD m128/m32bcst, xmm, xmm{k}{z}
 63510      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63511          self.require(ISA_AVX512VL | ISA_AVX512F)
 63512          p.domain = DomainAVX
 63513          p.add(0, func(m *_Encoding, v []interface{}) {
 63514              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63515              m.emit(0xdb)
 63516              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 63517          })
 63518      }
 63519      // VPANDD xmm, xmm, xmm{k}{z}
 63520      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63521          self.require(ISA_AVX512VL | ISA_AVX512F)
 63522          p.domain = DomainAVX
 63523          p.add(0, func(m *_Encoding, v []interface{}) {
 63524              m.emit(0x62)
 63525              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63526              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63527              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 63528              m.emit(0xdb)
 63529              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63530          })
 63531      }
 63532      // VPANDD m256/m32bcst, ymm, ymm{k}{z}
 63533      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63534          self.require(ISA_AVX512VL | ISA_AVX512F)
 63535          p.domain = DomainAVX
 63536          p.add(0, func(m *_Encoding, v []interface{}) {
 63537              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63538              m.emit(0xdb)
 63539              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 63540          })
 63541      }
 63542      // VPANDD ymm, ymm, ymm{k}{z}
 63543      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63544          self.require(ISA_AVX512VL | ISA_AVX512F)
 63545          p.domain = DomainAVX
 63546          p.add(0, func(m *_Encoding, v []interface{}) {
 63547              m.emit(0x62)
 63548              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63549              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63550              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 63551              m.emit(0xdb)
 63552              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63553          })
 63554      }
 63555      if p.len == 0 {
 63556          panic("invalid operands for VPANDD")
 63557      }
 63558      return p
 63559  }
 63560  
 63561  // VPANDN performs "Packed Bitwise Logical AND NOT".
 63562  //
 63563  // Mnemonic        : VPANDN
 63564  // Supported forms : (4 forms)
 63565  //
 63566  //    * VPANDN xmm, xmm, xmm     [AVX]
 63567  //    * VPANDN m128, xmm, xmm    [AVX]
 63568  //    * VPANDN ymm, ymm, ymm     [AVX2]
 63569  //    * VPANDN m256, ymm, ymm    [AVX2]
 63570  //
 63571  func (self *Program) VPANDN(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 63572      p := self.alloc("VPANDN", 3, Operands { v0, v1, v2 })
 63573      // VPANDN xmm, xmm, xmm
 63574      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 63575          self.require(ISA_AVX)
 63576          p.domain = DomainAVX
 63577          p.add(0, func(m *_Encoding, v []interface{}) {
 63578              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 63579              m.emit(0xdf)
 63580              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63581          })
 63582      }
 63583      // VPANDN m128, xmm, xmm
 63584      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 63585          self.require(ISA_AVX)
 63586          p.domain = DomainAVX
 63587          p.add(0, func(m *_Encoding, v []interface{}) {
 63588              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 63589              m.emit(0xdf)
 63590              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 63591          })
 63592      }
 63593      // VPANDN ymm, ymm, ymm
 63594      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 63595          self.require(ISA_AVX2)
 63596          p.domain = DomainAVX
 63597          p.add(0, func(m *_Encoding, v []interface{}) {
 63598              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 63599              m.emit(0xdf)
 63600              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63601          })
 63602      }
 63603      // VPANDN m256, ymm, ymm
 63604      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 63605          self.require(ISA_AVX2)
 63606          p.domain = DomainAVX
 63607          p.add(0, func(m *_Encoding, v []interface{}) {
 63608              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 63609              m.emit(0xdf)
 63610              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 63611          })
 63612      }
 63613      if p.len == 0 {
 63614          panic("invalid operands for VPANDN")
 63615      }
 63616      return p
 63617  }
 63618  
 63619  // VPANDND performs "Bitwise Logical AND NOT of Packed Doubleword Integers".
 63620  //
 63621  // Mnemonic        : VPANDND
 63622  // Supported forms : (6 forms)
 63623  //
 63624  //    * VPANDND m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 63625  //    * VPANDND zmm, zmm, zmm{k}{z}             [AVX512F]
 63626  //    * VPANDND m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 63627  //    * VPANDND xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 63628  //    * VPANDND m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 63629  //    * VPANDND ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 63630  //
 63631  func (self *Program) VPANDND(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 63632      p := self.alloc("VPANDND", 3, Operands { v0, v1, v2 })
 63633      // VPANDND m512/m32bcst, zmm, zmm{k}{z}
 63634      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 63635          self.require(ISA_AVX512F)
 63636          p.domain = DomainAVX
 63637          p.add(0, func(m *_Encoding, v []interface{}) {
 63638              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63639              m.emit(0xdf)
 63640              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 63641          })
 63642      }
 63643      // VPANDND zmm, zmm, zmm{k}{z}
 63644      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 63645          self.require(ISA_AVX512F)
 63646          p.domain = DomainAVX
 63647          p.add(0, func(m *_Encoding, v []interface{}) {
 63648              m.emit(0x62)
 63649              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63650              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63651              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 63652              m.emit(0xdf)
 63653              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63654          })
 63655      }
 63656      // VPANDND m128/m32bcst, xmm, xmm{k}{z}
 63657      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63658          self.require(ISA_AVX512VL | ISA_AVX512F)
 63659          p.domain = DomainAVX
 63660          p.add(0, func(m *_Encoding, v []interface{}) {
 63661              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63662              m.emit(0xdf)
 63663              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 63664          })
 63665      }
 63666      // VPANDND xmm, xmm, xmm{k}{z}
 63667      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63668          self.require(ISA_AVX512VL | ISA_AVX512F)
 63669          p.domain = DomainAVX
 63670          p.add(0, func(m *_Encoding, v []interface{}) {
 63671              m.emit(0x62)
 63672              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63673              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63674              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 63675              m.emit(0xdf)
 63676              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63677          })
 63678      }
 63679      // VPANDND m256/m32bcst, ymm, ymm{k}{z}
 63680      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63681          self.require(ISA_AVX512VL | ISA_AVX512F)
 63682          p.domain = DomainAVX
 63683          p.add(0, func(m *_Encoding, v []interface{}) {
 63684              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63685              m.emit(0xdf)
 63686              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 63687          })
 63688      }
 63689      // VPANDND ymm, ymm, ymm{k}{z}
 63690      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63691          self.require(ISA_AVX512VL | ISA_AVX512F)
 63692          p.domain = DomainAVX
 63693          p.add(0, func(m *_Encoding, v []interface{}) {
 63694              m.emit(0x62)
 63695              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63696              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63697              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 63698              m.emit(0xdf)
 63699              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63700          })
 63701      }
 63702      if p.len == 0 {
 63703          panic("invalid operands for VPANDND")
 63704      }
 63705      return p
 63706  }
 63707  
 63708  // VPANDNQ performs "Bitwise Logical AND NOT of Packed Quadword Integers".
 63709  //
 63710  // Mnemonic        : VPANDNQ
 63711  // Supported forms : (6 forms)
 63712  //
 63713  //    * VPANDNQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 63714  //    * VPANDNQ zmm, zmm, zmm{k}{z}             [AVX512F]
 63715  //    * VPANDNQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 63716  //    * VPANDNQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 63717  //    * VPANDNQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 63718  //    * VPANDNQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 63719  //
 63720  func (self *Program) VPANDNQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 63721      p := self.alloc("VPANDNQ", 3, Operands { v0, v1, v2 })
 63722      // VPANDNQ m512/m64bcst, zmm, zmm{k}{z}
 63723      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 63724          self.require(ISA_AVX512F)
 63725          p.domain = DomainAVX
 63726          p.add(0, func(m *_Encoding, v []interface{}) {
 63727              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63728              m.emit(0xdf)
 63729              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 63730          })
 63731      }
 63732      // VPANDNQ zmm, zmm, zmm{k}{z}
 63733      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 63734          self.require(ISA_AVX512F)
 63735          p.domain = DomainAVX
 63736          p.add(0, func(m *_Encoding, v []interface{}) {
 63737              m.emit(0x62)
 63738              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63739              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 63740              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 63741              m.emit(0xdf)
 63742              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63743          })
 63744      }
 63745      // VPANDNQ m128/m64bcst, xmm, xmm{k}{z}
 63746      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63747          self.require(ISA_AVX512VL | ISA_AVX512F)
 63748          p.domain = DomainAVX
 63749          p.add(0, func(m *_Encoding, v []interface{}) {
 63750              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63751              m.emit(0xdf)
 63752              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 63753          })
 63754      }
 63755      // VPANDNQ xmm, xmm, xmm{k}{z}
 63756      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63757          self.require(ISA_AVX512VL | ISA_AVX512F)
 63758          p.domain = DomainAVX
 63759          p.add(0, func(m *_Encoding, v []interface{}) {
 63760              m.emit(0x62)
 63761              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63762              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 63763              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 63764              m.emit(0xdf)
 63765              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63766          })
 63767      }
 63768      // VPANDNQ m256/m64bcst, ymm, ymm{k}{z}
 63769      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63770          self.require(ISA_AVX512VL | ISA_AVX512F)
 63771          p.domain = DomainAVX
 63772          p.add(0, func(m *_Encoding, v []interface{}) {
 63773              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63774              m.emit(0xdf)
 63775              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 63776          })
 63777      }
 63778      // VPANDNQ ymm, ymm, ymm{k}{z}
 63779      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63780          self.require(ISA_AVX512VL | ISA_AVX512F)
 63781          p.domain = DomainAVX
 63782          p.add(0, func(m *_Encoding, v []interface{}) {
 63783              m.emit(0x62)
 63784              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63785              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 63786              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 63787              m.emit(0xdf)
 63788              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63789          })
 63790      }
 63791      if p.len == 0 {
 63792          panic("invalid operands for VPANDNQ")
 63793      }
 63794      return p
 63795  }
 63796  
 63797  // VPANDQ performs "Bitwise Logical AND of Packed Quadword Integers".
 63798  //
 63799  // Mnemonic        : VPANDQ
 63800  // Supported forms : (6 forms)
 63801  //
 63802  //    * VPANDQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 63803  //    * VPANDQ zmm, zmm, zmm{k}{z}             [AVX512F]
 63804  //    * VPANDQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 63805  //    * VPANDQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 63806  //    * VPANDQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 63807  //    * VPANDQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 63808  //
 63809  func (self *Program) VPANDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 63810      p := self.alloc("VPANDQ", 3, Operands { v0, v1, v2 })
 63811      // VPANDQ m512/m64bcst, zmm, zmm{k}{z}
 63812      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 63813          self.require(ISA_AVX512F)
 63814          p.domain = DomainAVX
 63815          p.add(0, func(m *_Encoding, v []interface{}) {
 63816              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63817              m.emit(0xdb)
 63818              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 63819          })
 63820      }
 63821      // VPANDQ zmm, zmm, zmm{k}{z}
 63822      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 63823          self.require(ISA_AVX512F)
 63824          p.domain = DomainAVX
 63825          p.add(0, func(m *_Encoding, v []interface{}) {
 63826              m.emit(0x62)
 63827              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63828              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 63829              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 63830              m.emit(0xdb)
 63831              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63832          })
 63833      }
 63834      // VPANDQ m128/m64bcst, xmm, xmm{k}{z}
 63835      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63836          self.require(ISA_AVX512VL | ISA_AVX512F)
 63837          p.domain = DomainAVX
 63838          p.add(0, func(m *_Encoding, v []interface{}) {
 63839              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63840              m.emit(0xdb)
 63841              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 63842          })
 63843      }
 63844      // VPANDQ xmm, xmm, xmm{k}{z}
 63845      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63846          self.require(ISA_AVX512VL | ISA_AVX512F)
 63847          p.domain = DomainAVX
 63848          p.add(0, func(m *_Encoding, v []interface{}) {
 63849              m.emit(0x62)
 63850              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63851              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 63852              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 63853              m.emit(0xdb)
 63854              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63855          })
 63856      }
 63857      // VPANDQ m256/m64bcst, ymm, ymm{k}{z}
 63858      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63859          self.require(ISA_AVX512VL | ISA_AVX512F)
 63860          p.domain = DomainAVX
 63861          p.add(0, func(m *_Encoding, v []interface{}) {
 63862              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 63863              m.emit(0xdb)
 63864              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 63865          })
 63866      }
 63867      // VPANDQ ymm, ymm, ymm{k}{z}
 63868      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63869          self.require(ISA_AVX512VL | ISA_AVX512F)
 63870          p.domain = DomainAVX
 63871          p.add(0, func(m *_Encoding, v []interface{}) {
 63872              m.emit(0x62)
 63873              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63874              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 63875              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 63876              m.emit(0xdb)
 63877              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63878          })
 63879      }
 63880      if p.len == 0 {
 63881          panic("invalid operands for VPANDQ")
 63882      }
 63883      return p
 63884  }
 63885  
 63886  // VPAVGB performs "Average Packed Byte Integers".
 63887  //
 63888  // Mnemonic        : VPAVGB
 63889  // Supported forms : (10 forms)
 63890  //
 63891  //    * VPAVGB xmm, xmm, xmm           [AVX]
 63892  //    * VPAVGB m128, xmm, xmm          [AVX]
 63893  //    * VPAVGB ymm, ymm, ymm           [AVX2]
 63894  //    * VPAVGB m256, ymm, ymm          [AVX2]
 63895  //    * VPAVGB zmm, zmm, zmm{k}{z}     [AVX512BW]
 63896  //    * VPAVGB m512, zmm, zmm{k}{z}    [AVX512BW]
 63897  //    * VPAVGB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 63898  //    * VPAVGB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 63899  //    * VPAVGB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 63900  //    * VPAVGB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 63901  //
 63902  func (self *Program) VPAVGB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 63903      p := self.alloc("VPAVGB", 3, Operands { v0, v1, v2 })
 63904      // VPAVGB xmm, xmm, xmm
 63905      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 63906          self.require(ISA_AVX)
 63907          p.domain = DomainAVX
 63908          p.add(0, func(m *_Encoding, v []interface{}) {
 63909              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 63910              m.emit(0xe0)
 63911              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63912          })
 63913      }
 63914      // VPAVGB m128, xmm, xmm
 63915      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 63916          self.require(ISA_AVX)
 63917          p.domain = DomainAVX
 63918          p.add(0, func(m *_Encoding, v []interface{}) {
 63919              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 63920              m.emit(0xe0)
 63921              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 63922          })
 63923      }
 63924      // VPAVGB ymm, ymm, ymm
 63925      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 63926          self.require(ISA_AVX2)
 63927          p.domain = DomainAVX
 63928          p.add(0, func(m *_Encoding, v []interface{}) {
 63929              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 63930              m.emit(0xe0)
 63931              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63932          })
 63933      }
 63934      // VPAVGB m256, ymm, ymm
 63935      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 63936          self.require(ISA_AVX2)
 63937          p.domain = DomainAVX
 63938          p.add(0, func(m *_Encoding, v []interface{}) {
 63939              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 63940              m.emit(0xe0)
 63941              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 63942          })
 63943      }
 63944      // VPAVGB zmm, zmm, zmm{k}{z}
 63945      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 63946          self.require(ISA_AVX512BW)
 63947          p.domain = DomainAVX
 63948          p.add(0, func(m *_Encoding, v []interface{}) {
 63949              m.emit(0x62)
 63950              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63951              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63952              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 63953              m.emit(0xe0)
 63954              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63955          })
 63956      }
 63957      // VPAVGB m512, zmm, zmm{k}{z}
 63958      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 63959          self.require(ISA_AVX512BW)
 63960          p.domain = DomainAVX
 63961          p.add(0, func(m *_Encoding, v []interface{}) {
 63962              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 63963              m.emit(0xe0)
 63964              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 63965          })
 63966      }
 63967      // VPAVGB xmm, xmm, xmm{k}{z}
 63968      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63969          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63970          p.domain = DomainAVX
 63971          p.add(0, func(m *_Encoding, v []interface{}) {
 63972              m.emit(0x62)
 63973              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63974              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63975              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 63976              m.emit(0xe0)
 63977              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 63978          })
 63979      }
 63980      // VPAVGB m128, xmm, xmm{k}{z}
 63981      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 63982          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63983          p.domain = DomainAVX
 63984          p.add(0, func(m *_Encoding, v []interface{}) {
 63985              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 63986              m.emit(0xe0)
 63987              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 63988          })
 63989      }
 63990      // VPAVGB ymm, ymm, ymm{k}{z}
 63991      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 63992          self.require(ISA_AVX512VL | ISA_AVX512BW)
 63993          p.domain = DomainAVX
 63994          p.add(0, func(m *_Encoding, v []interface{}) {
 63995              m.emit(0x62)
 63996              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 63997              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 63998              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 63999              m.emit(0xe0)
 64000              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64001          })
 64002      }
 64003      // VPAVGB m256, ymm, ymm{k}{z}
 64004      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 64005          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64006          p.domain = DomainAVX
 64007          p.add(0, func(m *_Encoding, v []interface{}) {
 64008              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 64009              m.emit(0xe0)
 64010              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 64011          })
 64012      }
 64013      if p.len == 0 {
 64014          panic("invalid operands for VPAVGB")
 64015      }
 64016      return p
 64017  }
 64018  
 64019  // VPAVGW performs "Average Packed Word Integers".
 64020  //
 64021  // Mnemonic        : VPAVGW
 64022  // Supported forms : (10 forms)
 64023  //
 64024  //    * VPAVGW xmm, xmm, xmm           [AVX]
 64025  //    * VPAVGW m128, xmm, xmm          [AVX]
 64026  //    * VPAVGW ymm, ymm, ymm           [AVX2]
 64027  //    * VPAVGW m256, ymm, ymm          [AVX2]
 64028  //    * VPAVGW zmm, zmm, zmm{k}{z}     [AVX512BW]
 64029  //    * VPAVGW m512, zmm, zmm{k}{z}    [AVX512BW]
 64030  //    * VPAVGW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 64031  //    * VPAVGW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 64032  //    * VPAVGW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 64033  //    * VPAVGW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 64034  //
 64035  func (self *Program) VPAVGW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 64036      p := self.alloc("VPAVGW", 3, Operands { v0, v1, v2 })
 64037      // VPAVGW xmm, xmm, xmm
 64038      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 64039          self.require(ISA_AVX)
 64040          p.domain = DomainAVX
 64041          p.add(0, func(m *_Encoding, v []interface{}) {
 64042              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 64043              m.emit(0xe3)
 64044              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64045          })
 64046      }
 64047      // VPAVGW m128, xmm, xmm
 64048      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 64049          self.require(ISA_AVX)
 64050          p.domain = DomainAVX
 64051          p.add(0, func(m *_Encoding, v []interface{}) {
 64052              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 64053              m.emit(0xe3)
 64054              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 64055          })
 64056      }
 64057      // VPAVGW ymm, ymm, ymm
 64058      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 64059          self.require(ISA_AVX2)
 64060          p.domain = DomainAVX
 64061          p.add(0, func(m *_Encoding, v []interface{}) {
 64062              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 64063              m.emit(0xe3)
 64064              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64065          })
 64066      }
 64067      // VPAVGW m256, ymm, ymm
 64068      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 64069          self.require(ISA_AVX2)
 64070          p.domain = DomainAVX
 64071          p.add(0, func(m *_Encoding, v []interface{}) {
 64072              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 64073              m.emit(0xe3)
 64074              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 64075          })
 64076      }
 64077      // VPAVGW zmm, zmm, zmm{k}{z}
 64078      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 64079          self.require(ISA_AVX512BW)
 64080          p.domain = DomainAVX
 64081          p.add(0, func(m *_Encoding, v []interface{}) {
 64082              m.emit(0x62)
 64083              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64084              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 64085              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 64086              m.emit(0xe3)
 64087              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64088          })
 64089      }
 64090      // VPAVGW m512, zmm, zmm{k}{z}
 64091      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 64092          self.require(ISA_AVX512BW)
 64093          p.domain = DomainAVX
 64094          p.add(0, func(m *_Encoding, v []interface{}) {
 64095              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 64096              m.emit(0xe3)
 64097              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 64098          })
 64099      }
 64100      // VPAVGW xmm, xmm, xmm{k}{z}
 64101      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 64102          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64103          p.domain = DomainAVX
 64104          p.add(0, func(m *_Encoding, v []interface{}) {
 64105              m.emit(0x62)
 64106              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64107              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 64108              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 64109              m.emit(0xe3)
 64110              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64111          })
 64112      }
 64113      // VPAVGW m128, xmm, xmm{k}{z}
 64114      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 64115          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64116          p.domain = DomainAVX
 64117          p.add(0, func(m *_Encoding, v []interface{}) {
 64118              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 64119              m.emit(0xe3)
 64120              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 64121          })
 64122      }
 64123      // VPAVGW ymm, ymm, ymm{k}{z}
 64124      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 64125          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64126          p.domain = DomainAVX
 64127          p.add(0, func(m *_Encoding, v []interface{}) {
 64128              m.emit(0x62)
 64129              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64130              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 64131              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 64132              m.emit(0xe3)
 64133              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64134          })
 64135      }
 64136      // VPAVGW m256, ymm, ymm{k}{z}
 64137      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 64138          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64139          p.domain = DomainAVX
 64140          p.add(0, func(m *_Encoding, v []interface{}) {
 64141              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 64142              m.emit(0xe3)
 64143              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 64144          })
 64145      }
 64146      if p.len == 0 {
 64147          panic("invalid operands for VPAVGW")
 64148      }
 64149      return p
 64150  }
 64151  
 64152  // VPBLENDD performs "Blend Packed Doublewords".
 64153  //
 64154  // Mnemonic        : VPBLENDD
 64155  // Supported forms : (4 forms)
 64156  //
 64157  //    * VPBLENDD imm8, xmm, xmm, xmm     [AVX2]
 64158  //    * VPBLENDD imm8, m128, xmm, xmm    [AVX2]
 64159  //    * VPBLENDD imm8, ymm, ymm, ymm     [AVX2]
 64160  //    * VPBLENDD imm8, m256, ymm, ymm    [AVX2]
 64161  //
 64162  func (self *Program) VPBLENDD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 64163      p := self.alloc("VPBLENDD", 4, Operands { v0, v1, v2, v3 })
 64164      // VPBLENDD imm8, xmm, xmm, xmm
 64165      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 64166          self.require(ISA_AVX2)
 64167          p.domain = DomainAVX
 64168          p.add(0, func(m *_Encoding, v []interface{}) {
 64169              m.emit(0xc4)
 64170              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 64171              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 64172              m.emit(0x02)
 64173              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 64174              m.imm1(toImmAny(v[0]))
 64175          })
 64176      }
 64177      // VPBLENDD imm8, m128, xmm, xmm
 64178      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 64179          self.require(ISA_AVX2)
 64180          p.domain = DomainAVX
 64181          p.add(0, func(m *_Encoding, v []interface{}) {
 64182              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 64183              m.emit(0x02)
 64184              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 64185              m.imm1(toImmAny(v[0]))
 64186          })
 64187      }
 64188      // VPBLENDD imm8, ymm, ymm, ymm
 64189      if isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 64190          self.require(ISA_AVX2)
 64191          p.domain = DomainAVX
 64192          p.add(0, func(m *_Encoding, v []interface{}) {
 64193              m.emit(0xc4)
 64194              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 64195              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 64196              m.emit(0x02)
 64197              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 64198              m.imm1(toImmAny(v[0]))
 64199          })
 64200      }
 64201      // VPBLENDD imm8, m256, ymm, ymm
 64202      if isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 64203          self.require(ISA_AVX2)
 64204          p.domain = DomainAVX
 64205          p.add(0, func(m *_Encoding, v []interface{}) {
 64206              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 64207              m.emit(0x02)
 64208              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 64209              m.imm1(toImmAny(v[0]))
 64210          })
 64211      }
 64212      if p.len == 0 {
 64213          panic("invalid operands for VPBLENDD")
 64214      }
 64215      return p
 64216  }
 64217  
 64218  // VPBLENDMB performs "Blend Byte Vectors Using an OpMask Control".
 64219  //
 64220  // Mnemonic        : VPBLENDMB
 64221  // Supported forms : (6 forms)
 64222  //
 64223  //    * VPBLENDMB zmm, zmm, zmm{k}{z}     [AVX512BW]
 64224  //    * VPBLENDMB m512, zmm, zmm{k}{z}    [AVX512BW]
 64225  //    * VPBLENDMB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 64226  //    * VPBLENDMB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 64227  //    * VPBLENDMB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 64228  //    * VPBLENDMB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 64229  //
 64230  func (self *Program) VPBLENDMB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 64231      p := self.alloc("VPBLENDMB", 3, Operands { v0, v1, v2 })
 64232      // VPBLENDMB zmm, zmm, zmm{k}{z}
 64233      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 64234          self.require(ISA_AVX512BW)
 64235          p.domain = DomainAVX
 64236          p.add(0, func(m *_Encoding, v []interface{}) {
 64237              m.emit(0x62)
 64238              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64239              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 64240              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 64241              m.emit(0x66)
 64242              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64243          })
 64244      }
 64245      // VPBLENDMB m512, zmm, zmm{k}{z}
 64246      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 64247          self.require(ISA_AVX512BW)
 64248          p.domain = DomainAVX
 64249          p.add(0, func(m *_Encoding, v []interface{}) {
 64250              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 64251              m.emit(0x66)
 64252              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 64253          })
 64254      }
 64255      // VPBLENDMB xmm, xmm, xmm{k}{z}
 64256      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 64257          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64258          p.domain = DomainAVX
 64259          p.add(0, func(m *_Encoding, v []interface{}) {
 64260              m.emit(0x62)
 64261              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64262              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 64263              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 64264              m.emit(0x66)
 64265              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64266          })
 64267      }
 64268      // VPBLENDMB m128, xmm, xmm{k}{z}
 64269      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 64270          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64271          p.domain = DomainAVX
 64272          p.add(0, func(m *_Encoding, v []interface{}) {
 64273              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 64274              m.emit(0x66)
 64275              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 64276          })
 64277      }
 64278      // VPBLENDMB ymm, ymm, ymm{k}{z}
 64279      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 64280          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64281          p.domain = DomainAVX
 64282          p.add(0, func(m *_Encoding, v []interface{}) {
 64283              m.emit(0x62)
 64284              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64285              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 64286              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 64287              m.emit(0x66)
 64288              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64289          })
 64290      }
 64291      // VPBLENDMB m256, ymm, ymm{k}{z}
 64292      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 64293          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64294          p.domain = DomainAVX
 64295          p.add(0, func(m *_Encoding, v []interface{}) {
 64296              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 64297              m.emit(0x66)
 64298              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 64299          })
 64300      }
 64301      if p.len == 0 {
 64302          panic("invalid operands for VPBLENDMB")
 64303      }
 64304      return p
 64305  }
 64306  
 64307  // VPBLENDMD performs "Blend Doubleword Vectors Using an OpMask Control".
 64308  //
 64309  // Mnemonic        : VPBLENDMD
 64310  // Supported forms : (6 forms)
 64311  //
 64312  //    * VPBLENDMD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 64313  //    * VPBLENDMD zmm, zmm, zmm{k}{z}             [AVX512F]
 64314  //    * VPBLENDMD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 64315  //    * VPBLENDMD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 64316  //    * VPBLENDMD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 64317  //    * VPBLENDMD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 64318  //
 64319  func (self *Program) VPBLENDMD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 64320      p := self.alloc("VPBLENDMD", 3, Operands { v0, v1, v2 })
 64321      // VPBLENDMD m512/m32bcst, zmm, zmm{k}{z}
 64322      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 64323          self.require(ISA_AVX512F)
 64324          p.domain = DomainAVX
 64325          p.add(0, func(m *_Encoding, v []interface{}) {
 64326              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 64327              m.emit(0x64)
 64328              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 64329          })
 64330      }
 64331      // VPBLENDMD zmm, zmm, zmm{k}{z}
 64332      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 64333          self.require(ISA_AVX512F)
 64334          p.domain = DomainAVX
 64335          p.add(0, func(m *_Encoding, v []interface{}) {
 64336              m.emit(0x62)
 64337              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64338              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 64339              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 64340              m.emit(0x64)
 64341              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64342          })
 64343      }
 64344      // VPBLENDMD m128/m32bcst, xmm, xmm{k}{z}
 64345      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 64346          self.require(ISA_AVX512VL | ISA_AVX512F)
 64347          p.domain = DomainAVX
 64348          p.add(0, func(m *_Encoding, v []interface{}) {
 64349              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 64350              m.emit(0x64)
 64351              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 64352          })
 64353      }
 64354      // VPBLENDMD xmm, xmm, xmm{k}{z}
 64355      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 64356          self.require(ISA_AVX512VL | ISA_AVX512F)
 64357          p.domain = DomainAVX
 64358          p.add(0, func(m *_Encoding, v []interface{}) {
 64359              m.emit(0x62)
 64360              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64361              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 64362              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 64363              m.emit(0x64)
 64364              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64365          })
 64366      }
 64367      // VPBLENDMD m256/m32bcst, ymm, ymm{k}{z}
 64368      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 64369          self.require(ISA_AVX512VL | ISA_AVX512F)
 64370          p.domain = DomainAVX
 64371          p.add(0, func(m *_Encoding, v []interface{}) {
 64372              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 64373              m.emit(0x64)
 64374              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 64375          })
 64376      }
 64377      // VPBLENDMD ymm, ymm, ymm{k}{z}
 64378      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 64379          self.require(ISA_AVX512VL | ISA_AVX512F)
 64380          p.domain = DomainAVX
 64381          p.add(0, func(m *_Encoding, v []interface{}) {
 64382              m.emit(0x62)
 64383              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64384              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 64385              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 64386              m.emit(0x64)
 64387              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64388          })
 64389      }
 64390      if p.len == 0 {
 64391          panic("invalid operands for VPBLENDMD")
 64392      }
 64393      return p
 64394  }
 64395  
 64396  // VPBLENDMQ performs "Blend Quadword Vectors Using an OpMask Control".
 64397  //
 64398  // Mnemonic        : VPBLENDMQ
 64399  // Supported forms : (6 forms)
 64400  //
 64401  //    * VPBLENDMQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 64402  //    * VPBLENDMQ zmm, zmm, zmm{k}{z}             [AVX512F]
 64403  //    * VPBLENDMQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 64404  //    * VPBLENDMQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 64405  //    * VPBLENDMQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 64406  //    * VPBLENDMQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 64407  //
 64408  func (self *Program) VPBLENDMQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 64409      p := self.alloc("VPBLENDMQ", 3, Operands { v0, v1, v2 })
 64410      // VPBLENDMQ m512/m64bcst, zmm, zmm{k}{z}
 64411      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 64412          self.require(ISA_AVX512F)
 64413          p.domain = DomainAVX
 64414          p.add(0, func(m *_Encoding, v []interface{}) {
 64415              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 64416              m.emit(0x64)
 64417              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 64418          })
 64419      }
 64420      // VPBLENDMQ zmm, zmm, zmm{k}{z}
 64421      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 64422          self.require(ISA_AVX512F)
 64423          p.domain = DomainAVX
 64424          p.add(0, func(m *_Encoding, v []interface{}) {
 64425              m.emit(0x62)
 64426              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64427              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 64428              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 64429              m.emit(0x64)
 64430              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64431          })
 64432      }
 64433      // VPBLENDMQ m128/m64bcst, xmm, xmm{k}{z}
 64434      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 64435          self.require(ISA_AVX512VL | ISA_AVX512F)
 64436          p.domain = DomainAVX
 64437          p.add(0, func(m *_Encoding, v []interface{}) {
 64438              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 64439              m.emit(0x64)
 64440              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 64441          })
 64442      }
 64443      // VPBLENDMQ xmm, xmm, xmm{k}{z}
 64444      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 64445          self.require(ISA_AVX512VL | ISA_AVX512F)
 64446          p.domain = DomainAVX
 64447          p.add(0, func(m *_Encoding, v []interface{}) {
 64448              m.emit(0x62)
 64449              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64450              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 64451              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 64452              m.emit(0x64)
 64453              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64454          })
 64455      }
 64456      // VPBLENDMQ m256/m64bcst, ymm, ymm{k}{z}
 64457      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 64458          self.require(ISA_AVX512VL | ISA_AVX512F)
 64459          p.domain = DomainAVX
 64460          p.add(0, func(m *_Encoding, v []interface{}) {
 64461              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 64462              m.emit(0x64)
 64463              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 64464          })
 64465      }
 64466      // VPBLENDMQ ymm, ymm, ymm{k}{z}
 64467      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 64468          self.require(ISA_AVX512VL | ISA_AVX512F)
 64469          p.domain = DomainAVX
 64470          p.add(0, func(m *_Encoding, v []interface{}) {
 64471              m.emit(0x62)
 64472              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64473              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 64474              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 64475              m.emit(0x64)
 64476              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64477          })
 64478      }
 64479      if p.len == 0 {
 64480          panic("invalid operands for VPBLENDMQ")
 64481      }
 64482      return p
 64483  }
 64484  
 64485  // VPBLENDMW performs "Blend Word Vectors Using an OpMask Control".
 64486  //
 64487  // Mnemonic        : VPBLENDMW
 64488  // Supported forms : (6 forms)
 64489  //
 64490  //    * VPBLENDMW zmm, zmm, zmm{k}{z}     [AVX512BW]
 64491  //    * VPBLENDMW m512, zmm, zmm{k}{z}    [AVX512BW]
 64492  //    * VPBLENDMW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 64493  //    * VPBLENDMW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 64494  //    * VPBLENDMW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 64495  //    * VPBLENDMW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 64496  //
 64497  func (self *Program) VPBLENDMW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 64498      p := self.alloc("VPBLENDMW", 3, Operands { v0, v1, v2 })
 64499      // VPBLENDMW zmm, zmm, zmm{k}{z}
 64500      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 64501          self.require(ISA_AVX512BW)
 64502          p.domain = DomainAVX
 64503          p.add(0, func(m *_Encoding, v []interface{}) {
 64504              m.emit(0x62)
 64505              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64506              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 64507              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 64508              m.emit(0x66)
 64509              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64510          })
 64511      }
 64512      // VPBLENDMW m512, zmm, zmm{k}{z}
 64513      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 64514          self.require(ISA_AVX512BW)
 64515          p.domain = DomainAVX
 64516          p.add(0, func(m *_Encoding, v []interface{}) {
 64517              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 64518              m.emit(0x66)
 64519              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 64520          })
 64521      }
 64522      // VPBLENDMW xmm, xmm, xmm{k}{z}
 64523      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 64524          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64525          p.domain = DomainAVX
 64526          p.add(0, func(m *_Encoding, v []interface{}) {
 64527              m.emit(0x62)
 64528              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64529              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 64530              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 64531              m.emit(0x66)
 64532              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64533          })
 64534      }
 64535      // VPBLENDMW m128, xmm, xmm{k}{z}
 64536      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 64537          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64538          p.domain = DomainAVX
 64539          p.add(0, func(m *_Encoding, v []interface{}) {
 64540              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 64541              m.emit(0x66)
 64542              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 64543          })
 64544      }
 64545      // VPBLENDMW ymm, ymm, ymm{k}{z}
 64546      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 64547          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64548          p.domain = DomainAVX
 64549          p.add(0, func(m *_Encoding, v []interface{}) {
 64550              m.emit(0x62)
 64551              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 64552              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 64553              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 64554              m.emit(0x66)
 64555              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 64556          })
 64557      }
 64558      // VPBLENDMW m256, ymm, ymm{k}{z}
 64559      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 64560          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64561          p.domain = DomainAVX
 64562          p.add(0, func(m *_Encoding, v []interface{}) {
 64563              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 64564              m.emit(0x66)
 64565              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 64566          })
 64567      }
 64568      if p.len == 0 {
 64569          panic("invalid operands for VPBLENDMW")
 64570      }
 64571      return p
 64572  }
 64573  
 64574  // VPBLENDVB performs "Variable Blend Packed Bytes".
 64575  //
 64576  // Mnemonic        : VPBLENDVB
 64577  // Supported forms : (4 forms)
 64578  //
 64579  //    * VPBLENDVB xmm, xmm, xmm, xmm     [AVX]
 64580  //    * VPBLENDVB xmm, m128, xmm, xmm    [AVX]
 64581  //    * VPBLENDVB ymm, ymm, ymm, ymm     [AVX2]
 64582  //    * VPBLENDVB ymm, m256, ymm, ymm    [AVX2]
 64583  //
 64584  func (self *Program) VPBLENDVB(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 64585      p := self.alloc("VPBLENDVB", 4, Operands { v0, v1, v2, v3 })
 64586      // VPBLENDVB xmm, xmm, xmm, xmm
 64587      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 64588          self.require(ISA_AVX)
 64589          p.domain = DomainAVX
 64590          p.add(0, func(m *_Encoding, v []interface{}) {
 64591              m.emit(0xc4)
 64592              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 64593              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 64594              m.emit(0x4c)
 64595              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 64596              m.emit(hlcode(v[0]) << 4)
 64597          })
 64598      }
 64599      // VPBLENDVB xmm, m128, xmm, xmm
 64600      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 64601          self.require(ISA_AVX)
 64602          p.domain = DomainAVX
 64603          p.add(0, func(m *_Encoding, v []interface{}) {
 64604              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 64605              m.emit(0x4c)
 64606              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 64607              m.emit(hlcode(v[0]) << 4)
 64608          })
 64609      }
 64610      // VPBLENDVB ymm, ymm, ymm, ymm
 64611      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 64612          self.require(ISA_AVX2)
 64613          p.domain = DomainAVX
 64614          p.add(0, func(m *_Encoding, v []interface{}) {
 64615              m.emit(0xc4)
 64616              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 64617              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 64618              m.emit(0x4c)
 64619              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 64620              m.emit(hlcode(v[0]) << 4)
 64621          })
 64622      }
 64623      // VPBLENDVB ymm, m256, ymm, ymm
 64624      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 64625          self.require(ISA_AVX2)
 64626          p.domain = DomainAVX
 64627          p.add(0, func(m *_Encoding, v []interface{}) {
 64628              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 64629              m.emit(0x4c)
 64630              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 64631              m.emit(hlcode(v[0]) << 4)
 64632          })
 64633      }
 64634      if p.len == 0 {
 64635          panic("invalid operands for VPBLENDVB")
 64636      }
 64637      return p
 64638  }
 64639  
 64640  // VPBLENDW performs "Blend Packed Words".
 64641  //
 64642  // Mnemonic        : VPBLENDW
 64643  // Supported forms : (4 forms)
 64644  //
 64645  //    * VPBLENDW imm8, xmm, xmm, xmm     [AVX]
 64646  //    * VPBLENDW imm8, m128, xmm, xmm    [AVX]
 64647  //    * VPBLENDW imm8, ymm, ymm, ymm     [AVX2]
 64648  //    * VPBLENDW imm8, m256, ymm, ymm    [AVX2]
 64649  //
 64650  func (self *Program) VPBLENDW(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 64651      p := self.alloc("VPBLENDW", 4, Operands { v0, v1, v2, v3 })
 64652      // VPBLENDW imm8, xmm, xmm, xmm
 64653      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 64654          self.require(ISA_AVX)
 64655          p.domain = DomainAVX
 64656          p.add(0, func(m *_Encoding, v []interface{}) {
 64657              m.emit(0xc4)
 64658              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 64659              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 64660              m.emit(0x0e)
 64661              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 64662              m.imm1(toImmAny(v[0]))
 64663          })
 64664      }
 64665      // VPBLENDW imm8, m128, xmm, xmm
 64666      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 64667          self.require(ISA_AVX)
 64668          p.domain = DomainAVX
 64669          p.add(0, func(m *_Encoding, v []interface{}) {
 64670              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 64671              m.emit(0x0e)
 64672              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 64673              m.imm1(toImmAny(v[0]))
 64674          })
 64675      }
 64676      // VPBLENDW imm8, ymm, ymm, ymm
 64677      if isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 64678          self.require(ISA_AVX2)
 64679          p.domain = DomainAVX
 64680          p.add(0, func(m *_Encoding, v []interface{}) {
 64681              m.emit(0xc4)
 64682              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 64683              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 64684              m.emit(0x0e)
 64685              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 64686              m.imm1(toImmAny(v[0]))
 64687          })
 64688      }
 64689      // VPBLENDW imm8, m256, ymm, ymm
 64690      if isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 64691          self.require(ISA_AVX2)
 64692          p.domain = DomainAVX
 64693          p.add(0, func(m *_Encoding, v []interface{}) {
 64694              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 64695              m.emit(0x0e)
 64696              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 64697              m.imm1(toImmAny(v[0]))
 64698          })
 64699      }
 64700      if p.len == 0 {
 64701          panic("invalid operands for VPBLENDW")
 64702      }
 64703      return p
 64704  }
 64705  
 64706  // VPBROADCASTB performs "Broadcast Byte Integer".
 64707  //
 64708  // Mnemonic        : VPBROADCASTB
 64709  // Supported forms : (13 forms)
 64710  //
 64711  //    * VPBROADCASTB xmm, xmm          [AVX2]
 64712  //    * VPBROADCASTB m8, xmm           [AVX2]
 64713  //    * VPBROADCASTB xmm, ymm          [AVX2]
 64714  //    * VPBROADCASTB m8, ymm           [AVX2]
 64715  //    * VPBROADCASTB r32, zmm{k}{z}    [AVX512BW]
 64716  //    * VPBROADCASTB xmm, zmm{k}{z}    [AVX512BW]
 64717  //    * VPBROADCASTB m8, zmm{k}{z}     [AVX512BW]
 64718  //    * VPBROADCASTB r32, xmm{k}{z}    [AVX512BW,AVX512VL]
 64719  //    * VPBROADCASTB r32, ymm{k}{z}    [AVX512BW,AVX512VL]
 64720  //    * VPBROADCASTB xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 64721  //    * VPBROADCASTB xmm, ymm{k}{z}    [AVX512BW,AVX512VL]
 64722  //    * VPBROADCASTB m8, xmm{k}{z}     [AVX512BW,AVX512VL]
 64723  //    * VPBROADCASTB m8, ymm{k}{z}     [AVX512BW,AVX512VL]
 64724  //
 64725  func (self *Program) VPBROADCASTB(v0 interface{}, v1 interface{}) *Instruction {
 64726      p := self.alloc("VPBROADCASTB", 2, Operands { v0, v1 })
 64727      // VPBROADCASTB xmm, xmm
 64728      if isXMM(v0) && isXMM(v1) {
 64729          self.require(ISA_AVX2)
 64730          p.domain = DomainAVX
 64731          p.add(0, func(m *_Encoding, v []interface{}) {
 64732              m.emit(0xc4)
 64733              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 64734              m.emit(0x79)
 64735              m.emit(0x78)
 64736              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64737          })
 64738      }
 64739      // VPBROADCASTB m8, xmm
 64740      if isM8(v0) && isXMM(v1) {
 64741          self.require(ISA_AVX2)
 64742          p.domain = DomainAVX
 64743          p.add(0, func(m *_Encoding, v []interface{}) {
 64744              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 64745              m.emit(0x78)
 64746              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 64747          })
 64748      }
 64749      // VPBROADCASTB xmm, ymm
 64750      if isXMM(v0) && isYMM(v1) {
 64751          self.require(ISA_AVX2)
 64752          p.domain = DomainAVX
 64753          p.add(0, func(m *_Encoding, v []interface{}) {
 64754              m.emit(0xc4)
 64755              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 64756              m.emit(0x7d)
 64757              m.emit(0x78)
 64758              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64759          })
 64760      }
 64761      // VPBROADCASTB m8, ymm
 64762      if isM8(v0) && isYMM(v1) {
 64763          self.require(ISA_AVX2)
 64764          p.domain = DomainAVX
 64765          p.add(0, func(m *_Encoding, v []interface{}) {
 64766              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 64767              m.emit(0x78)
 64768              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 64769          })
 64770      }
 64771      // VPBROADCASTB r32, zmm{k}{z}
 64772      if isReg32(v0) && isZMMkz(v1) {
 64773          self.require(ISA_AVX512BW)
 64774          p.domain = DomainAVX
 64775          p.add(0, func(m *_Encoding, v []interface{}) {
 64776              m.emit(0x62)
 64777              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 64778              m.emit(0x7d)
 64779              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 64780              m.emit(0x7a)
 64781              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64782          })
 64783      }
 64784      // VPBROADCASTB xmm, zmm{k}{z}
 64785      if isEVEXXMM(v0) && isZMMkz(v1) {
 64786          self.require(ISA_AVX512BW)
 64787          p.domain = DomainAVX
 64788          p.add(0, func(m *_Encoding, v []interface{}) {
 64789              m.emit(0x62)
 64790              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 64791              m.emit(0x7d)
 64792              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 64793              m.emit(0x78)
 64794              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64795          })
 64796      }
 64797      // VPBROADCASTB m8, zmm{k}{z}
 64798      if isM8(v0) && isZMMkz(v1) {
 64799          self.require(ISA_AVX512BW)
 64800          p.domain = DomainAVX
 64801          p.add(0, func(m *_Encoding, v []interface{}) {
 64802              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 64803              m.emit(0x78)
 64804              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 64805          })
 64806      }
 64807      // VPBROADCASTB r32, xmm{k}{z}
 64808      if isReg32(v0) && isXMMkz(v1) {
 64809          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64810          p.domain = DomainAVX
 64811          p.add(0, func(m *_Encoding, v []interface{}) {
 64812              m.emit(0x62)
 64813              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 64814              m.emit(0x7d)
 64815              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 64816              m.emit(0x7a)
 64817              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64818          })
 64819      }
 64820      // VPBROADCASTB r32, ymm{k}{z}
 64821      if isReg32(v0) && isYMMkz(v1) {
 64822          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64823          p.domain = DomainAVX
 64824          p.add(0, func(m *_Encoding, v []interface{}) {
 64825              m.emit(0x62)
 64826              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 64827              m.emit(0x7d)
 64828              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 64829              m.emit(0x7a)
 64830              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64831          })
 64832      }
 64833      // VPBROADCASTB xmm, xmm{k}{z}
 64834      if isEVEXXMM(v0) && isXMMkz(v1) {
 64835          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64836          p.domain = DomainAVX
 64837          p.add(0, func(m *_Encoding, v []interface{}) {
 64838              m.emit(0x62)
 64839              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 64840              m.emit(0x7d)
 64841              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 64842              m.emit(0x78)
 64843              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64844          })
 64845      }
 64846      // VPBROADCASTB xmm, ymm{k}{z}
 64847      if isEVEXXMM(v0) && isYMMkz(v1) {
 64848          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64849          p.domain = DomainAVX
 64850          p.add(0, func(m *_Encoding, v []interface{}) {
 64851              m.emit(0x62)
 64852              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 64853              m.emit(0x7d)
 64854              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 64855              m.emit(0x78)
 64856              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64857          })
 64858      }
 64859      // VPBROADCASTB m8, xmm{k}{z}
 64860      if isM8(v0) && isXMMkz(v1) {
 64861          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64862          p.domain = DomainAVX
 64863          p.add(0, func(m *_Encoding, v []interface{}) {
 64864              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 64865              m.emit(0x78)
 64866              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 64867          })
 64868      }
 64869      // VPBROADCASTB m8, ymm{k}{z}
 64870      if isM8(v0) && isYMMkz(v1) {
 64871          self.require(ISA_AVX512VL | ISA_AVX512BW)
 64872          p.domain = DomainAVX
 64873          p.add(0, func(m *_Encoding, v []interface{}) {
 64874              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 64875              m.emit(0x78)
 64876              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 64877          })
 64878      }
 64879      if p.len == 0 {
 64880          panic("invalid operands for VPBROADCASTB")
 64881      }
 64882      return p
 64883  }
 64884  
 64885  // VPBROADCASTD performs "Broadcast Doubleword Integer".
 64886  //
 64887  // Mnemonic        : VPBROADCASTD
 64888  // Supported forms : (13 forms)
 64889  //
 64890  //    * VPBROADCASTD xmm, xmm          [AVX2]
 64891  //    * VPBROADCASTD m32, xmm          [AVX2]
 64892  //    * VPBROADCASTD xmm, ymm          [AVX2]
 64893  //    * VPBROADCASTD m32, ymm          [AVX2]
 64894  //    * VPBROADCASTD r32, zmm{k}{z}    [AVX512F]
 64895  //    * VPBROADCASTD xmm, zmm{k}{z}    [AVX512F]
 64896  //    * VPBROADCASTD m32, zmm{k}{z}    [AVX512F]
 64897  //    * VPBROADCASTD r32, xmm{k}{z}    [AVX512F,AVX512VL]
 64898  //    * VPBROADCASTD r32, ymm{k}{z}    [AVX512F,AVX512VL]
 64899  //    * VPBROADCASTD xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 64900  //    * VPBROADCASTD xmm, ymm{k}{z}    [AVX512F,AVX512VL]
 64901  //    * VPBROADCASTD m32, xmm{k}{z}    [AVX512F,AVX512VL]
 64902  //    * VPBROADCASTD m32, ymm{k}{z}    [AVX512F,AVX512VL]
 64903  //
 64904  func (self *Program) VPBROADCASTD(v0 interface{}, v1 interface{}) *Instruction {
 64905      p := self.alloc("VPBROADCASTD", 2, Operands { v0, v1 })
 64906      // VPBROADCASTD xmm, xmm
 64907      if isXMM(v0) && isXMM(v1) {
 64908          self.require(ISA_AVX2)
 64909          p.domain = DomainAVX
 64910          p.add(0, func(m *_Encoding, v []interface{}) {
 64911              m.emit(0xc4)
 64912              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 64913              m.emit(0x79)
 64914              m.emit(0x58)
 64915              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64916          })
 64917      }
 64918      // VPBROADCASTD m32, xmm
 64919      if isM32(v0) && isXMM(v1) {
 64920          self.require(ISA_AVX2)
 64921          p.domain = DomainAVX
 64922          p.add(0, func(m *_Encoding, v []interface{}) {
 64923              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 64924              m.emit(0x58)
 64925              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 64926          })
 64927      }
 64928      // VPBROADCASTD xmm, ymm
 64929      if isXMM(v0) && isYMM(v1) {
 64930          self.require(ISA_AVX2)
 64931          p.domain = DomainAVX
 64932          p.add(0, func(m *_Encoding, v []interface{}) {
 64933              m.emit(0xc4)
 64934              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 64935              m.emit(0x7d)
 64936              m.emit(0x58)
 64937              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64938          })
 64939      }
 64940      // VPBROADCASTD m32, ymm
 64941      if isM32(v0) && isYMM(v1) {
 64942          self.require(ISA_AVX2)
 64943          p.domain = DomainAVX
 64944          p.add(0, func(m *_Encoding, v []interface{}) {
 64945              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 64946              m.emit(0x58)
 64947              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 64948          })
 64949      }
 64950      // VPBROADCASTD r32, zmm{k}{z}
 64951      if isReg32(v0) && isZMMkz(v1) {
 64952          self.require(ISA_AVX512F)
 64953          p.domain = DomainAVX
 64954          p.add(0, func(m *_Encoding, v []interface{}) {
 64955              m.emit(0x62)
 64956              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 64957              m.emit(0x7d)
 64958              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 64959              m.emit(0x7c)
 64960              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64961          })
 64962      }
 64963      // VPBROADCASTD xmm, zmm{k}{z}
 64964      if isEVEXXMM(v0) && isZMMkz(v1) {
 64965          self.require(ISA_AVX512F)
 64966          p.domain = DomainAVX
 64967          p.add(0, func(m *_Encoding, v []interface{}) {
 64968              m.emit(0x62)
 64969              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 64970              m.emit(0x7d)
 64971              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 64972              m.emit(0x58)
 64973              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64974          })
 64975      }
 64976      // VPBROADCASTD m32, zmm{k}{z}
 64977      if isM32(v0) && isZMMkz(v1) {
 64978          self.require(ISA_AVX512F)
 64979          p.domain = DomainAVX
 64980          p.add(0, func(m *_Encoding, v []interface{}) {
 64981              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 64982              m.emit(0x58)
 64983              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 64984          })
 64985      }
 64986      // VPBROADCASTD r32, xmm{k}{z}
 64987      if isReg32(v0) && isXMMkz(v1) {
 64988          self.require(ISA_AVX512VL | ISA_AVX512F)
 64989          p.domain = DomainAVX
 64990          p.add(0, func(m *_Encoding, v []interface{}) {
 64991              m.emit(0x62)
 64992              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 64993              m.emit(0x7d)
 64994              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 64995              m.emit(0x7c)
 64996              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 64997          })
 64998      }
 64999      // VPBROADCASTD r32, ymm{k}{z}
 65000      if isReg32(v0) && isYMMkz(v1) {
 65001          self.require(ISA_AVX512VL | ISA_AVX512F)
 65002          p.domain = DomainAVX
 65003          p.add(0, func(m *_Encoding, v []interface{}) {
 65004              m.emit(0x62)
 65005              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65006              m.emit(0x7d)
 65007              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 65008              m.emit(0x7c)
 65009              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65010          })
 65011      }
 65012      // VPBROADCASTD xmm, xmm{k}{z}
 65013      if isEVEXXMM(v0) && isXMMkz(v1) {
 65014          self.require(ISA_AVX512VL | ISA_AVX512F)
 65015          p.domain = DomainAVX
 65016          p.add(0, func(m *_Encoding, v []interface{}) {
 65017              m.emit(0x62)
 65018              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65019              m.emit(0x7d)
 65020              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 65021              m.emit(0x58)
 65022              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65023          })
 65024      }
 65025      // VPBROADCASTD xmm, ymm{k}{z}
 65026      if isEVEXXMM(v0) && isYMMkz(v1) {
 65027          self.require(ISA_AVX512VL | ISA_AVX512F)
 65028          p.domain = DomainAVX
 65029          p.add(0, func(m *_Encoding, v []interface{}) {
 65030              m.emit(0x62)
 65031              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65032              m.emit(0x7d)
 65033              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 65034              m.emit(0x58)
 65035              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65036          })
 65037      }
 65038      // VPBROADCASTD m32, xmm{k}{z}
 65039      if isM32(v0) && isXMMkz(v1) {
 65040          self.require(ISA_AVX512VL | ISA_AVX512F)
 65041          p.domain = DomainAVX
 65042          p.add(0, func(m *_Encoding, v []interface{}) {
 65043              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 65044              m.emit(0x58)
 65045              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 65046          })
 65047      }
 65048      // VPBROADCASTD m32, ymm{k}{z}
 65049      if isM32(v0) && isYMMkz(v1) {
 65050          self.require(ISA_AVX512VL | ISA_AVX512F)
 65051          p.domain = DomainAVX
 65052          p.add(0, func(m *_Encoding, v []interface{}) {
 65053              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 65054              m.emit(0x58)
 65055              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 65056          })
 65057      }
 65058      if p.len == 0 {
 65059          panic("invalid operands for VPBROADCASTD")
 65060      }
 65061      return p
 65062  }
 65063  
 65064  // VPBROADCASTMB2Q performs "Broadcast Low Byte of Mask Register to Packed Quadword Values".
 65065  //
 65066  // Mnemonic        : VPBROADCASTMB2Q
 65067  // Supported forms : (3 forms)
 65068  //
 65069  //    * VPBROADCASTMB2Q k, xmm    [AVX512CD,AVX512VL]
 65070  //    * VPBROADCASTMB2Q k, ymm    [AVX512CD,AVX512VL]
 65071  //    * VPBROADCASTMB2Q k, zmm    [AVX512CD]
 65072  //
 65073  func (self *Program) VPBROADCASTMB2Q(v0 interface{}, v1 interface{}) *Instruction {
 65074      p := self.alloc("VPBROADCASTMB2Q", 2, Operands { v0, v1 })
 65075      // VPBROADCASTMB2Q k, xmm
 65076      if isK(v0) && isEVEXXMM(v1) {
 65077          self.require(ISA_AVX512VL | ISA_AVX512CD)
 65078          p.domain = DomainAVX
 65079          p.add(0, func(m *_Encoding, v []interface{}) {
 65080              m.emit(0x62)
 65081              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65082              m.emit(0xfe)
 65083              m.emit(0x08)
 65084              m.emit(0x2a)
 65085              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65086          })
 65087      }
 65088      // VPBROADCASTMB2Q k, ymm
 65089      if isK(v0) && isEVEXYMM(v1) {
 65090          self.require(ISA_AVX512VL | ISA_AVX512CD)
 65091          p.domain = DomainAVX
 65092          p.add(0, func(m *_Encoding, v []interface{}) {
 65093              m.emit(0x62)
 65094              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65095              m.emit(0xfe)
 65096              m.emit(0x28)
 65097              m.emit(0x2a)
 65098              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65099          })
 65100      }
 65101      // VPBROADCASTMB2Q k, zmm
 65102      if isK(v0) && isZMM(v1) {
 65103          self.require(ISA_AVX512CD)
 65104          p.domain = DomainAVX
 65105          p.add(0, func(m *_Encoding, v []interface{}) {
 65106              m.emit(0x62)
 65107              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65108              m.emit(0xfe)
 65109              m.emit(0x48)
 65110              m.emit(0x2a)
 65111              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65112          })
 65113      }
 65114      if p.len == 0 {
 65115          panic("invalid operands for VPBROADCASTMB2Q")
 65116      }
 65117      return p
 65118  }
 65119  
 65120  // VPBROADCASTMW2D performs "Broadcast Low Word of Mask Register to Packed Doubleword Values".
 65121  //
 65122  // Mnemonic        : VPBROADCASTMW2D
 65123  // Supported forms : (3 forms)
 65124  //
 65125  //    * VPBROADCASTMW2D k, xmm    [AVX512CD,AVX512VL]
 65126  //    * VPBROADCASTMW2D k, ymm    [AVX512CD,AVX512VL]
 65127  //    * VPBROADCASTMW2D k, zmm    [AVX512CD]
 65128  //
 65129  func (self *Program) VPBROADCASTMW2D(v0 interface{}, v1 interface{}) *Instruction {
 65130      p := self.alloc("VPBROADCASTMW2D", 2, Operands { v0, v1 })
 65131      // VPBROADCASTMW2D k, xmm
 65132      if isK(v0) && isEVEXXMM(v1) {
 65133          self.require(ISA_AVX512VL | ISA_AVX512CD)
 65134          p.domain = DomainAVX
 65135          p.add(0, func(m *_Encoding, v []interface{}) {
 65136              m.emit(0x62)
 65137              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65138              m.emit(0x7e)
 65139              m.emit(0x08)
 65140              m.emit(0x3a)
 65141              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65142          })
 65143      }
 65144      // VPBROADCASTMW2D k, ymm
 65145      if isK(v0) && isEVEXYMM(v1) {
 65146          self.require(ISA_AVX512VL | ISA_AVX512CD)
 65147          p.domain = DomainAVX
 65148          p.add(0, func(m *_Encoding, v []interface{}) {
 65149              m.emit(0x62)
 65150              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65151              m.emit(0x7e)
 65152              m.emit(0x28)
 65153              m.emit(0x3a)
 65154              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65155          })
 65156      }
 65157      // VPBROADCASTMW2D k, zmm
 65158      if isK(v0) && isZMM(v1) {
 65159          self.require(ISA_AVX512CD)
 65160          p.domain = DomainAVX
 65161          p.add(0, func(m *_Encoding, v []interface{}) {
 65162              m.emit(0x62)
 65163              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65164              m.emit(0x7e)
 65165              m.emit(0x48)
 65166              m.emit(0x3a)
 65167              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65168          })
 65169      }
 65170      if p.len == 0 {
 65171          panic("invalid operands for VPBROADCASTMW2D")
 65172      }
 65173      return p
 65174  }
 65175  
 65176  // VPBROADCASTQ performs "Broadcast Quadword Integer".
 65177  //
 65178  // Mnemonic        : VPBROADCASTQ
 65179  // Supported forms : (13 forms)
 65180  //
 65181  //    * VPBROADCASTQ xmm, xmm          [AVX2]
 65182  //    * VPBROADCASTQ m64, xmm          [AVX2]
 65183  //    * VPBROADCASTQ xmm, ymm          [AVX2]
 65184  //    * VPBROADCASTQ m64, ymm          [AVX2]
 65185  //    * VPBROADCASTQ r64, zmm{k}{z}    [AVX512F]
 65186  //    * VPBROADCASTQ xmm, zmm{k}{z}    [AVX512F]
 65187  //    * VPBROADCASTQ m64, zmm{k}{z}    [AVX512F]
 65188  //    * VPBROADCASTQ r64, xmm{k}{z}    [AVX512F,AVX512VL]
 65189  //    * VPBROADCASTQ r64, ymm{k}{z}    [AVX512F,AVX512VL]
 65190  //    * VPBROADCASTQ xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 65191  //    * VPBROADCASTQ xmm, ymm{k}{z}    [AVX512F,AVX512VL]
 65192  //    * VPBROADCASTQ m64, xmm{k}{z}    [AVX512F,AVX512VL]
 65193  //    * VPBROADCASTQ m64, ymm{k}{z}    [AVX512F,AVX512VL]
 65194  //
 65195  func (self *Program) VPBROADCASTQ(v0 interface{}, v1 interface{}) *Instruction {
 65196      p := self.alloc("VPBROADCASTQ", 2, Operands { v0, v1 })
 65197      // VPBROADCASTQ xmm, xmm
 65198      if isXMM(v0) && isXMM(v1) {
 65199          self.require(ISA_AVX2)
 65200          p.domain = DomainAVX
 65201          p.add(0, func(m *_Encoding, v []interface{}) {
 65202              m.emit(0xc4)
 65203              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 65204              m.emit(0x79)
 65205              m.emit(0x59)
 65206              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65207          })
 65208      }
 65209      // VPBROADCASTQ m64, xmm
 65210      if isM64(v0) && isXMM(v1) {
 65211          self.require(ISA_AVX2)
 65212          p.domain = DomainAVX
 65213          p.add(0, func(m *_Encoding, v []interface{}) {
 65214              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 65215              m.emit(0x59)
 65216              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 65217          })
 65218      }
 65219      // VPBROADCASTQ xmm, ymm
 65220      if isXMM(v0) && isYMM(v1) {
 65221          self.require(ISA_AVX2)
 65222          p.domain = DomainAVX
 65223          p.add(0, func(m *_Encoding, v []interface{}) {
 65224              m.emit(0xc4)
 65225              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 65226              m.emit(0x7d)
 65227              m.emit(0x59)
 65228              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65229          })
 65230      }
 65231      // VPBROADCASTQ m64, ymm
 65232      if isM64(v0) && isYMM(v1) {
 65233          self.require(ISA_AVX2)
 65234          p.domain = DomainAVX
 65235          p.add(0, func(m *_Encoding, v []interface{}) {
 65236              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 65237              m.emit(0x59)
 65238              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 65239          })
 65240      }
 65241      // VPBROADCASTQ r64, zmm{k}{z}
 65242      if isReg64(v0) && isZMMkz(v1) {
 65243          self.require(ISA_AVX512F)
 65244          p.domain = DomainAVX
 65245          p.add(0, func(m *_Encoding, v []interface{}) {
 65246              m.emit(0x62)
 65247              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65248              m.emit(0xfd)
 65249              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 65250              m.emit(0x7c)
 65251              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65252          })
 65253      }
 65254      // VPBROADCASTQ xmm, zmm{k}{z}
 65255      if isEVEXXMM(v0) && isZMMkz(v1) {
 65256          self.require(ISA_AVX512F)
 65257          p.domain = DomainAVX
 65258          p.add(0, func(m *_Encoding, v []interface{}) {
 65259              m.emit(0x62)
 65260              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65261              m.emit(0xfd)
 65262              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 65263              m.emit(0x59)
 65264              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65265          })
 65266      }
 65267      // VPBROADCASTQ m64, zmm{k}{z}
 65268      if isM64(v0) && isZMMkz(v1) {
 65269          self.require(ISA_AVX512F)
 65270          p.domain = DomainAVX
 65271          p.add(0, func(m *_Encoding, v []interface{}) {
 65272              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 65273              m.emit(0x59)
 65274              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 65275          })
 65276      }
 65277      // VPBROADCASTQ r64, xmm{k}{z}
 65278      if isReg64(v0) && isXMMkz(v1) {
 65279          self.require(ISA_AVX512VL | ISA_AVX512F)
 65280          p.domain = DomainAVX
 65281          p.add(0, func(m *_Encoding, v []interface{}) {
 65282              m.emit(0x62)
 65283              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65284              m.emit(0xfd)
 65285              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 65286              m.emit(0x7c)
 65287              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65288          })
 65289      }
 65290      // VPBROADCASTQ r64, ymm{k}{z}
 65291      if isReg64(v0) && isYMMkz(v1) {
 65292          self.require(ISA_AVX512VL | ISA_AVX512F)
 65293          p.domain = DomainAVX
 65294          p.add(0, func(m *_Encoding, v []interface{}) {
 65295              m.emit(0x62)
 65296              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65297              m.emit(0xfd)
 65298              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 65299              m.emit(0x7c)
 65300              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65301          })
 65302      }
 65303      // VPBROADCASTQ xmm, xmm{k}{z}
 65304      if isEVEXXMM(v0) && isXMMkz(v1) {
 65305          self.require(ISA_AVX512VL | ISA_AVX512F)
 65306          p.domain = DomainAVX
 65307          p.add(0, func(m *_Encoding, v []interface{}) {
 65308              m.emit(0x62)
 65309              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65310              m.emit(0xfd)
 65311              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 65312              m.emit(0x59)
 65313              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65314          })
 65315      }
 65316      // VPBROADCASTQ xmm, ymm{k}{z}
 65317      if isEVEXXMM(v0) && isYMMkz(v1) {
 65318          self.require(ISA_AVX512VL | ISA_AVX512F)
 65319          p.domain = DomainAVX
 65320          p.add(0, func(m *_Encoding, v []interface{}) {
 65321              m.emit(0x62)
 65322              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65323              m.emit(0xfd)
 65324              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 65325              m.emit(0x59)
 65326              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65327          })
 65328      }
 65329      // VPBROADCASTQ m64, xmm{k}{z}
 65330      if isM64(v0) && isXMMkz(v1) {
 65331          self.require(ISA_AVX512VL | ISA_AVX512F)
 65332          p.domain = DomainAVX
 65333          p.add(0, func(m *_Encoding, v []interface{}) {
 65334              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 65335              m.emit(0x59)
 65336              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 65337          })
 65338      }
 65339      // VPBROADCASTQ m64, ymm{k}{z}
 65340      if isM64(v0) && isYMMkz(v1) {
 65341          self.require(ISA_AVX512VL | ISA_AVX512F)
 65342          p.domain = DomainAVX
 65343          p.add(0, func(m *_Encoding, v []interface{}) {
 65344              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 65345              m.emit(0x59)
 65346              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 65347          })
 65348      }
 65349      if p.len == 0 {
 65350          panic("invalid operands for VPBROADCASTQ")
 65351      }
 65352      return p
 65353  }
 65354  
 65355  // VPBROADCASTW performs "Broadcast Word Integer".
 65356  //
 65357  // Mnemonic        : VPBROADCASTW
 65358  // Supported forms : (13 forms)
 65359  //
 65360  //    * VPBROADCASTW xmm, xmm          [AVX2]
 65361  //    * VPBROADCASTW m16, xmm          [AVX2]
 65362  //    * VPBROADCASTW xmm, ymm          [AVX2]
 65363  //    * VPBROADCASTW m16, ymm          [AVX2]
 65364  //    * VPBROADCASTW r32, zmm{k}{z}    [AVX512BW]
 65365  //    * VPBROADCASTW xmm, zmm{k}{z}    [AVX512BW]
 65366  //    * VPBROADCASTW m16, zmm{k}{z}    [AVX512BW]
 65367  //    * VPBROADCASTW r32, xmm{k}{z}    [AVX512BW,AVX512VL]
 65368  //    * VPBROADCASTW r32, ymm{k}{z}    [AVX512BW,AVX512VL]
 65369  //    * VPBROADCASTW xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 65370  //    * VPBROADCASTW xmm, ymm{k}{z}    [AVX512BW,AVX512VL]
 65371  //    * VPBROADCASTW m16, xmm{k}{z}    [AVX512BW,AVX512VL]
 65372  //    * VPBROADCASTW m16, ymm{k}{z}    [AVX512BW,AVX512VL]
 65373  //
 65374  func (self *Program) VPBROADCASTW(v0 interface{}, v1 interface{}) *Instruction {
 65375      p := self.alloc("VPBROADCASTW", 2, Operands { v0, v1 })
 65376      // VPBROADCASTW xmm, xmm
 65377      if isXMM(v0) && isXMM(v1) {
 65378          self.require(ISA_AVX2)
 65379          p.domain = DomainAVX
 65380          p.add(0, func(m *_Encoding, v []interface{}) {
 65381              m.emit(0xc4)
 65382              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 65383              m.emit(0x79)
 65384              m.emit(0x79)
 65385              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65386          })
 65387      }
 65388      // VPBROADCASTW m16, xmm
 65389      if isM16(v0) && isXMM(v1) {
 65390          self.require(ISA_AVX2)
 65391          p.domain = DomainAVX
 65392          p.add(0, func(m *_Encoding, v []interface{}) {
 65393              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 65394              m.emit(0x79)
 65395              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 65396          })
 65397      }
 65398      // VPBROADCASTW xmm, ymm
 65399      if isXMM(v0) && isYMM(v1) {
 65400          self.require(ISA_AVX2)
 65401          p.domain = DomainAVX
 65402          p.add(0, func(m *_Encoding, v []interface{}) {
 65403              m.emit(0xc4)
 65404              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 65405              m.emit(0x7d)
 65406              m.emit(0x79)
 65407              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65408          })
 65409      }
 65410      // VPBROADCASTW m16, ymm
 65411      if isM16(v0) && isYMM(v1) {
 65412          self.require(ISA_AVX2)
 65413          p.domain = DomainAVX
 65414          p.add(0, func(m *_Encoding, v []interface{}) {
 65415              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 65416              m.emit(0x79)
 65417              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 65418          })
 65419      }
 65420      // VPBROADCASTW r32, zmm{k}{z}
 65421      if isReg32(v0) && isZMMkz(v1) {
 65422          self.require(ISA_AVX512BW)
 65423          p.domain = DomainAVX
 65424          p.add(0, func(m *_Encoding, v []interface{}) {
 65425              m.emit(0x62)
 65426              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65427              m.emit(0x7d)
 65428              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 65429              m.emit(0x7b)
 65430              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65431          })
 65432      }
 65433      // VPBROADCASTW xmm, zmm{k}{z}
 65434      if isEVEXXMM(v0) && isZMMkz(v1) {
 65435          self.require(ISA_AVX512BW)
 65436          p.domain = DomainAVX
 65437          p.add(0, func(m *_Encoding, v []interface{}) {
 65438              m.emit(0x62)
 65439              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65440              m.emit(0x7d)
 65441              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 65442              m.emit(0x79)
 65443              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65444          })
 65445      }
 65446      // VPBROADCASTW m16, zmm{k}{z}
 65447      if isM16(v0) && isZMMkz(v1) {
 65448          self.require(ISA_AVX512BW)
 65449          p.domain = DomainAVX
 65450          p.add(0, func(m *_Encoding, v []interface{}) {
 65451              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 65452              m.emit(0x79)
 65453              m.mrsd(lcode(v[1]), addr(v[0]), 2)
 65454          })
 65455      }
 65456      // VPBROADCASTW r32, xmm{k}{z}
 65457      if isReg32(v0) && isXMMkz(v1) {
 65458          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65459          p.domain = DomainAVX
 65460          p.add(0, func(m *_Encoding, v []interface{}) {
 65461              m.emit(0x62)
 65462              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65463              m.emit(0x7d)
 65464              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 65465              m.emit(0x7b)
 65466              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65467          })
 65468      }
 65469      // VPBROADCASTW r32, ymm{k}{z}
 65470      if isReg32(v0) && isYMMkz(v1) {
 65471          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65472          p.domain = DomainAVX
 65473          p.add(0, func(m *_Encoding, v []interface{}) {
 65474              m.emit(0x62)
 65475              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65476              m.emit(0x7d)
 65477              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 65478              m.emit(0x7b)
 65479              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65480          })
 65481      }
 65482      // VPBROADCASTW xmm, xmm{k}{z}
 65483      if isEVEXXMM(v0) && isXMMkz(v1) {
 65484          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65485          p.domain = DomainAVX
 65486          p.add(0, func(m *_Encoding, v []interface{}) {
 65487              m.emit(0x62)
 65488              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65489              m.emit(0x7d)
 65490              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 65491              m.emit(0x79)
 65492              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65493          })
 65494      }
 65495      // VPBROADCASTW xmm, ymm{k}{z}
 65496      if isEVEXXMM(v0) && isYMMkz(v1) {
 65497          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65498          p.domain = DomainAVX
 65499          p.add(0, func(m *_Encoding, v []interface{}) {
 65500              m.emit(0x62)
 65501              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 65502              m.emit(0x7d)
 65503              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 65504              m.emit(0x79)
 65505              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 65506          })
 65507      }
 65508      // VPBROADCASTW m16, xmm{k}{z}
 65509      if isM16(v0) && isXMMkz(v1) {
 65510          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65511          p.domain = DomainAVX
 65512          p.add(0, func(m *_Encoding, v []interface{}) {
 65513              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 65514              m.emit(0x79)
 65515              m.mrsd(lcode(v[1]), addr(v[0]), 2)
 65516          })
 65517      }
 65518      // VPBROADCASTW m16, ymm{k}{z}
 65519      if isM16(v0) && isYMMkz(v1) {
 65520          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65521          p.domain = DomainAVX
 65522          p.add(0, func(m *_Encoding, v []interface{}) {
 65523              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 65524              m.emit(0x79)
 65525              m.mrsd(lcode(v[1]), addr(v[0]), 2)
 65526          })
 65527      }
 65528      if p.len == 0 {
 65529          panic("invalid operands for VPBROADCASTW")
 65530      }
 65531      return p
 65532  }
 65533  
 65534  // VPCLMULQDQ performs "Carry-Less Quadword Multiplication".
 65535  //
 65536  // Mnemonic        : VPCLMULQDQ
 65537  // Supported forms : (2 forms)
 65538  //
 65539  //    * VPCLMULQDQ imm8, xmm, xmm, xmm     [AVX,PCLMULQDQ]
 65540  //    * VPCLMULQDQ imm8, m128, xmm, xmm    [AVX,PCLMULQDQ]
 65541  //
 65542  func (self *Program) VPCLMULQDQ(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 65543      p := self.alloc("VPCLMULQDQ", 4, Operands { v0, v1, v2, v3 })
 65544      // VPCLMULQDQ imm8, xmm, xmm, xmm
 65545      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 65546          self.require(ISA_AVX | ISA_PCLMULQDQ)
 65547          p.domain = DomainCrypto
 65548          p.add(0, func(m *_Encoding, v []interface{}) {
 65549              m.emit(0xc4)
 65550              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 65551              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 65552              m.emit(0x44)
 65553              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 65554              m.imm1(toImmAny(v[0]))
 65555          })
 65556      }
 65557      // VPCLMULQDQ imm8, m128, xmm, xmm
 65558      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 65559          self.require(ISA_AVX | ISA_PCLMULQDQ)
 65560          p.domain = DomainCrypto
 65561          p.add(0, func(m *_Encoding, v []interface{}) {
 65562              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 65563              m.emit(0x44)
 65564              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 65565              m.imm1(toImmAny(v[0]))
 65566          })
 65567      }
 65568      if p.len == 0 {
 65569          panic("invalid operands for VPCLMULQDQ")
 65570      }
 65571      return p
 65572  }
 65573  
 65574  // VPCMOV performs "Packed Conditional Move".
 65575  //
 65576  // Mnemonic        : VPCMOV
 65577  // Supported forms : (6 forms)
 65578  //
 65579  //    * VPCMOV xmm, xmm, xmm, xmm     [XOP]
 65580  //    * VPCMOV m128, xmm, xmm, xmm    [XOP]
 65581  //    * VPCMOV xmm, m128, xmm, xmm    [XOP]
 65582  //    * VPCMOV ymm, ymm, ymm, ymm     [XOP]
 65583  //    * VPCMOV m256, ymm, ymm, ymm    [XOP]
 65584  //    * VPCMOV ymm, m256, ymm, ymm    [XOP]
 65585  //
 65586  func (self *Program) VPCMOV(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 65587      p := self.alloc("VPCMOV", 4, Operands { v0, v1, v2, v3 })
 65588      // VPCMOV xmm, xmm, xmm, xmm
 65589      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 65590          self.require(ISA_XOP)
 65591          p.domain = DomainAMDSpecific
 65592          p.add(0, func(m *_Encoding, v []interface{}) {
 65593              m.emit(0x8f)
 65594              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 65595              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 65596              m.emit(0xa2)
 65597              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 65598              m.emit(hlcode(v[0]) << 4)
 65599          })
 65600          p.add(0, func(m *_Encoding, v []interface{}) {
 65601              m.emit(0x8f)
 65602              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 65603              m.emit(0xf8 ^ (hlcode(v[2]) << 3))
 65604              m.emit(0xa2)
 65605              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 65606              m.emit(hlcode(v[1]) << 4)
 65607          })
 65608      }
 65609      // VPCMOV m128, xmm, xmm, xmm
 65610      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 65611          self.require(ISA_XOP)
 65612          p.domain = DomainAMDSpecific
 65613          p.add(0, func(m *_Encoding, v []interface{}) {
 65614              m.vex3(0x8f, 0b1000, 0x80, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 65615              m.emit(0xa2)
 65616              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 65617              m.emit(hlcode(v[1]) << 4)
 65618          })
 65619      }
 65620      // VPCMOV xmm, m128, xmm, xmm
 65621      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 65622          self.require(ISA_XOP)
 65623          p.domain = DomainAMDSpecific
 65624          p.add(0, func(m *_Encoding, v []interface{}) {
 65625              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 65626              m.emit(0xa2)
 65627              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 65628              m.emit(hlcode(v[0]) << 4)
 65629          })
 65630      }
 65631      // VPCMOV ymm, ymm, ymm, ymm
 65632      if isYMM(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 65633          self.require(ISA_XOP)
 65634          p.domain = DomainAMDSpecific
 65635          p.add(0, func(m *_Encoding, v []interface{}) {
 65636              m.emit(0x8f)
 65637              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 65638              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 65639              m.emit(0xa2)
 65640              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 65641              m.emit(hlcode(v[0]) << 4)
 65642          })
 65643          p.add(0, func(m *_Encoding, v []interface{}) {
 65644              m.emit(0x8f)
 65645              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 65646              m.emit(0xfc ^ (hlcode(v[2]) << 3))
 65647              m.emit(0xa2)
 65648              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 65649              m.emit(hlcode(v[1]) << 4)
 65650          })
 65651      }
 65652      // VPCMOV m256, ymm, ymm, ymm
 65653      if isM256(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 65654          self.require(ISA_XOP)
 65655          p.domain = DomainAMDSpecific
 65656          p.add(0, func(m *_Encoding, v []interface{}) {
 65657              m.vex3(0x8f, 0b1000, 0x84, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 65658              m.emit(0xa2)
 65659              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 65660              m.emit(hlcode(v[1]) << 4)
 65661          })
 65662      }
 65663      // VPCMOV ymm, m256, ymm, ymm
 65664      if isYMM(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 65665          self.require(ISA_XOP)
 65666          p.domain = DomainAMDSpecific
 65667          p.add(0, func(m *_Encoding, v []interface{}) {
 65668              m.vex3(0x8f, 0b1000, 0x04, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 65669              m.emit(0xa2)
 65670              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 65671              m.emit(hlcode(v[0]) << 4)
 65672          })
 65673      }
 65674      if p.len == 0 {
 65675          panic("invalid operands for VPCMOV")
 65676      }
 65677      return p
 65678  }
 65679  
 65680  // VPCMPB performs "Compare Packed Signed Byte Values".
 65681  //
 65682  // Mnemonic        : VPCMPB
 65683  // Supported forms : (6 forms)
 65684  //
 65685  //    * VPCMPB imm8, zmm, zmm, k{k}     [AVX512BW]
 65686  //    * VPCMPB imm8, m512, zmm, k{k}    [AVX512BW]
 65687  //    * VPCMPB imm8, xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 65688  //    * VPCMPB imm8, m128, xmm, k{k}    [AVX512BW,AVX512VL]
 65689  //    * VPCMPB imm8, ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 65690  //    * VPCMPB imm8, m256, ymm, k{k}    [AVX512BW,AVX512VL]
 65691  //
 65692  func (self *Program) VPCMPB(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 65693      p := self.alloc("VPCMPB", 4, Operands { v0, v1, v2, v3 })
 65694      // VPCMPB imm8, zmm, zmm, k{k}
 65695      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isKk(v3) {
 65696          self.require(ISA_AVX512BW)
 65697          p.domain = DomainAVX
 65698          p.add(0, func(m *_Encoding, v []interface{}) {
 65699              m.emit(0x62)
 65700              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 65701              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 65702              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 65703              m.emit(0x3f)
 65704              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 65705              m.imm1(toImmAny(v[0]))
 65706          })
 65707      }
 65708      // VPCMPB imm8, m512, zmm, k{k}
 65709      if isImm8(v0) && isM512(v1) && isZMM(v2) && isKk(v3) {
 65710          self.require(ISA_AVX512BW)
 65711          p.domain = DomainAVX
 65712          p.add(0, func(m *_Encoding, v []interface{}) {
 65713              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 65714              m.emit(0x3f)
 65715              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 65716              m.imm1(toImmAny(v[0]))
 65717          })
 65718      }
 65719      // VPCMPB imm8, xmm, xmm, k{k}
 65720      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 65721          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65722          p.domain = DomainAVX
 65723          p.add(0, func(m *_Encoding, v []interface{}) {
 65724              m.emit(0x62)
 65725              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 65726              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 65727              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 65728              m.emit(0x3f)
 65729              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 65730              m.imm1(toImmAny(v[0]))
 65731          })
 65732      }
 65733      // VPCMPB imm8, m128, xmm, k{k}
 65734      if isImm8(v0) && isM128(v1) && isEVEXXMM(v2) && isKk(v3) {
 65735          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65736          p.domain = DomainAVX
 65737          p.add(0, func(m *_Encoding, v []interface{}) {
 65738              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 65739              m.emit(0x3f)
 65740              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 65741              m.imm1(toImmAny(v[0]))
 65742          })
 65743      }
 65744      // VPCMPB imm8, ymm, ymm, k{k}
 65745      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isKk(v3) {
 65746          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65747          p.domain = DomainAVX
 65748          p.add(0, func(m *_Encoding, v []interface{}) {
 65749              m.emit(0x62)
 65750              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 65751              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 65752              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 65753              m.emit(0x3f)
 65754              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 65755              m.imm1(toImmAny(v[0]))
 65756          })
 65757      }
 65758      // VPCMPB imm8, m256, ymm, k{k}
 65759      if isImm8(v0) && isM256(v1) && isEVEXYMM(v2) && isKk(v3) {
 65760          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65761          p.domain = DomainAVX
 65762          p.add(0, func(m *_Encoding, v []interface{}) {
 65763              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 65764              m.emit(0x3f)
 65765              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 65766              m.imm1(toImmAny(v[0]))
 65767          })
 65768      }
 65769      if p.len == 0 {
 65770          panic("invalid operands for VPCMPB")
 65771      }
 65772      return p
 65773  }
 65774  
 65775  // VPCMPD performs "Compare Packed Signed Doubleword Values".
 65776  //
 65777  // Mnemonic        : VPCMPD
 65778  // Supported forms : (6 forms)
 65779  //
 65780  //    * VPCMPD imm8, m512/m32bcst, zmm, k{k}    [AVX512F]
 65781  //    * VPCMPD imm8, zmm, zmm, k{k}             [AVX512F]
 65782  //    * VPCMPD imm8, m128/m32bcst, xmm, k{k}    [AVX512F,AVX512VL]
 65783  //    * VPCMPD imm8, xmm, xmm, k{k}             [AVX512F,AVX512VL]
 65784  //    * VPCMPD imm8, m256/m32bcst, ymm, k{k}    [AVX512F,AVX512VL]
 65785  //    * VPCMPD imm8, ymm, ymm, k{k}             [AVX512F,AVX512VL]
 65786  //
 65787  func (self *Program) VPCMPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 65788      p := self.alloc("VPCMPD", 4, Operands { v0, v1, v2, v3 })
 65789      // VPCMPD imm8, m512/m32bcst, zmm, k{k}
 65790      if isImm8(v0) && isM512M32bcst(v1) && isZMM(v2) && isKk(v3) {
 65791          self.require(ISA_AVX512F)
 65792          p.domain = DomainAVX
 65793          p.add(0, func(m *_Encoding, v []interface{}) {
 65794              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 65795              m.emit(0x1f)
 65796              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 65797              m.imm1(toImmAny(v[0]))
 65798          })
 65799      }
 65800      // VPCMPD imm8, zmm, zmm, k{k}
 65801      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isKk(v3) {
 65802          self.require(ISA_AVX512F)
 65803          p.domain = DomainAVX
 65804          p.add(0, func(m *_Encoding, v []interface{}) {
 65805              m.emit(0x62)
 65806              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 65807              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 65808              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 65809              m.emit(0x1f)
 65810              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 65811              m.imm1(toImmAny(v[0]))
 65812          })
 65813      }
 65814      // VPCMPD imm8, m128/m32bcst, xmm, k{k}
 65815      if isImm8(v0) && isM128M32bcst(v1) && isEVEXXMM(v2) && isKk(v3) {
 65816          self.require(ISA_AVX512VL | ISA_AVX512F)
 65817          p.domain = DomainAVX
 65818          p.add(0, func(m *_Encoding, v []interface{}) {
 65819              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 65820              m.emit(0x1f)
 65821              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 65822              m.imm1(toImmAny(v[0]))
 65823          })
 65824      }
 65825      // VPCMPD imm8, xmm, xmm, k{k}
 65826      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 65827          self.require(ISA_AVX512VL | ISA_AVX512F)
 65828          p.domain = DomainAVX
 65829          p.add(0, func(m *_Encoding, v []interface{}) {
 65830              m.emit(0x62)
 65831              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 65832              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 65833              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 65834              m.emit(0x1f)
 65835              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 65836              m.imm1(toImmAny(v[0]))
 65837          })
 65838      }
 65839      // VPCMPD imm8, m256/m32bcst, ymm, k{k}
 65840      if isImm8(v0) && isM256M32bcst(v1) && isEVEXYMM(v2) && isKk(v3) {
 65841          self.require(ISA_AVX512VL | ISA_AVX512F)
 65842          p.domain = DomainAVX
 65843          p.add(0, func(m *_Encoding, v []interface{}) {
 65844              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 65845              m.emit(0x1f)
 65846              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 65847              m.imm1(toImmAny(v[0]))
 65848          })
 65849      }
 65850      // VPCMPD imm8, ymm, ymm, k{k}
 65851      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isKk(v3) {
 65852          self.require(ISA_AVX512VL | ISA_AVX512F)
 65853          p.domain = DomainAVX
 65854          p.add(0, func(m *_Encoding, v []interface{}) {
 65855              m.emit(0x62)
 65856              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 65857              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 65858              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 65859              m.emit(0x1f)
 65860              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 65861              m.imm1(toImmAny(v[0]))
 65862          })
 65863      }
 65864      if p.len == 0 {
 65865          panic("invalid operands for VPCMPD")
 65866      }
 65867      return p
 65868  }
 65869  
 65870  // VPCMPEQB performs "Compare Packed Byte Data for Equality".
 65871  //
 65872  // Mnemonic        : VPCMPEQB
 65873  // Supported forms : (10 forms)
 65874  //
 65875  //    * VPCMPEQB xmm, xmm, xmm      [AVX]
 65876  //    * VPCMPEQB m128, xmm, xmm     [AVX]
 65877  //    * VPCMPEQB ymm, ymm, ymm      [AVX2]
 65878  //    * VPCMPEQB m256, ymm, ymm     [AVX2]
 65879  //    * VPCMPEQB zmm, zmm, k{k}     [AVX512BW]
 65880  //    * VPCMPEQB m512, zmm, k{k}    [AVX512BW]
 65881  //    * VPCMPEQB xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 65882  //    * VPCMPEQB m128, xmm, k{k}    [AVX512BW,AVX512VL]
 65883  //    * VPCMPEQB ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 65884  //    * VPCMPEQB m256, ymm, k{k}    [AVX512BW,AVX512VL]
 65885  //
 65886  func (self *Program) VPCMPEQB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 65887      p := self.alloc("VPCMPEQB", 3, Operands { v0, v1, v2 })
 65888      // VPCMPEQB xmm, xmm, xmm
 65889      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 65890          self.require(ISA_AVX)
 65891          p.domain = DomainAVX
 65892          p.add(0, func(m *_Encoding, v []interface{}) {
 65893              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 65894              m.emit(0x74)
 65895              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 65896          })
 65897      }
 65898      // VPCMPEQB m128, xmm, xmm
 65899      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 65900          self.require(ISA_AVX)
 65901          p.domain = DomainAVX
 65902          p.add(0, func(m *_Encoding, v []interface{}) {
 65903              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 65904              m.emit(0x74)
 65905              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 65906          })
 65907      }
 65908      // VPCMPEQB ymm, ymm, ymm
 65909      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 65910          self.require(ISA_AVX2)
 65911          p.domain = DomainAVX
 65912          p.add(0, func(m *_Encoding, v []interface{}) {
 65913              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 65914              m.emit(0x74)
 65915              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 65916          })
 65917      }
 65918      // VPCMPEQB m256, ymm, ymm
 65919      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 65920          self.require(ISA_AVX2)
 65921          p.domain = DomainAVX
 65922          p.add(0, func(m *_Encoding, v []interface{}) {
 65923              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 65924              m.emit(0x74)
 65925              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 65926          })
 65927      }
 65928      // VPCMPEQB zmm, zmm, k{k}
 65929      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 65930          self.require(ISA_AVX512BW)
 65931          p.domain = DomainAVX
 65932          p.add(0, func(m *_Encoding, v []interface{}) {
 65933              m.emit(0x62)
 65934              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 65935              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 65936              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 65937              m.emit(0x74)
 65938              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 65939          })
 65940      }
 65941      // VPCMPEQB m512, zmm, k{k}
 65942      if isM512(v0) && isZMM(v1) && isKk(v2) {
 65943          self.require(ISA_AVX512BW)
 65944          p.domain = DomainAVX
 65945          p.add(0, func(m *_Encoding, v []interface{}) {
 65946              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 65947              m.emit(0x74)
 65948              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 65949          })
 65950      }
 65951      // VPCMPEQB xmm, xmm, k{k}
 65952      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 65953          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65954          p.domain = DomainAVX
 65955          p.add(0, func(m *_Encoding, v []interface{}) {
 65956              m.emit(0x62)
 65957              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 65958              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 65959              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 65960              m.emit(0x74)
 65961              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 65962          })
 65963      }
 65964      // VPCMPEQB m128, xmm, k{k}
 65965      if isM128(v0) && isEVEXXMM(v1) && isKk(v2) {
 65966          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65967          p.domain = DomainAVX
 65968          p.add(0, func(m *_Encoding, v []interface{}) {
 65969              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 65970              m.emit(0x74)
 65971              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 65972          })
 65973      }
 65974      // VPCMPEQB ymm, ymm, k{k}
 65975      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 65976          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65977          p.domain = DomainAVX
 65978          p.add(0, func(m *_Encoding, v []interface{}) {
 65979              m.emit(0x62)
 65980              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 65981              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 65982              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 65983              m.emit(0x74)
 65984              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 65985          })
 65986      }
 65987      // VPCMPEQB m256, ymm, k{k}
 65988      if isM256(v0) && isEVEXYMM(v1) && isKk(v2) {
 65989          self.require(ISA_AVX512VL | ISA_AVX512BW)
 65990          p.domain = DomainAVX
 65991          p.add(0, func(m *_Encoding, v []interface{}) {
 65992              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 65993              m.emit(0x74)
 65994              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 65995          })
 65996      }
 65997      if p.len == 0 {
 65998          panic("invalid operands for VPCMPEQB")
 65999      }
 66000      return p
 66001  }
 66002  
 66003  // VPCMPEQD performs "Compare Packed Doubleword Data for Equality".
 66004  //
 66005  // Mnemonic        : VPCMPEQD
 66006  // Supported forms : (10 forms)
 66007  //
 66008  //    * VPCMPEQD xmm, xmm, xmm              [AVX]
 66009  //    * VPCMPEQD m128, xmm, xmm             [AVX]
 66010  //    * VPCMPEQD ymm, ymm, ymm              [AVX2]
 66011  //    * VPCMPEQD m256, ymm, ymm             [AVX2]
 66012  //    * VPCMPEQD m512/m32bcst, zmm, k{k}    [AVX512F]
 66013  //    * VPCMPEQD zmm, zmm, k{k}             [AVX512F]
 66014  //    * VPCMPEQD m128/m32bcst, xmm, k{k}    [AVX512F,AVX512VL]
 66015  //    * VPCMPEQD xmm, xmm, k{k}             [AVX512F,AVX512VL]
 66016  //    * VPCMPEQD m256/m32bcst, ymm, k{k}    [AVX512F,AVX512VL]
 66017  //    * VPCMPEQD ymm, ymm, k{k}             [AVX512F,AVX512VL]
 66018  //
 66019  func (self *Program) VPCMPEQD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 66020      p := self.alloc("VPCMPEQD", 3, Operands { v0, v1, v2 })
 66021      // VPCMPEQD xmm, xmm, xmm
 66022      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 66023          self.require(ISA_AVX)
 66024          p.domain = DomainAVX
 66025          p.add(0, func(m *_Encoding, v []interface{}) {
 66026              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 66027              m.emit(0x76)
 66028              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66029          })
 66030      }
 66031      // VPCMPEQD m128, xmm, xmm
 66032      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 66033          self.require(ISA_AVX)
 66034          p.domain = DomainAVX
 66035          p.add(0, func(m *_Encoding, v []interface{}) {
 66036              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66037              m.emit(0x76)
 66038              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66039          })
 66040      }
 66041      // VPCMPEQD ymm, ymm, ymm
 66042      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 66043          self.require(ISA_AVX2)
 66044          p.domain = DomainAVX
 66045          p.add(0, func(m *_Encoding, v []interface{}) {
 66046              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 66047              m.emit(0x76)
 66048              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66049          })
 66050      }
 66051      // VPCMPEQD m256, ymm, ymm
 66052      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 66053          self.require(ISA_AVX2)
 66054          p.domain = DomainAVX
 66055          p.add(0, func(m *_Encoding, v []interface{}) {
 66056              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66057              m.emit(0x76)
 66058              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66059          })
 66060      }
 66061      // VPCMPEQD m512/m32bcst, zmm, k{k}
 66062      if isM512M32bcst(v0) && isZMM(v1) && isKk(v2) {
 66063          self.require(ISA_AVX512F)
 66064          p.domain = DomainAVX
 66065          p.add(0, func(m *_Encoding, v []interface{}) {
 66066              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66067              m.emit(0x76)
 66068              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 66069          })
 66070      }
 66071      // VPCMPEQD zmm, zmm, k{k}
 66072      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 66073          self.require(ISA_AVX512F)
 66074          p.domain = DomainAVX
 66075          p.add(0, func(m *_Encoding, v []interface{}) {
 66076              m.emit(0x62)
 66077              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66078              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66079              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 66080              m.emit(0x76)
 66081              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66082          })
 66083      }
 66084      // VPCMPEQD m128/m32bcst, xmm, k{k}
 66085      if isM128M32bcst(v0) && isEVEXXMM(v1) && isKk(v2) {
 66086          self.require(ISA_AVX512VL | ISA_AVX512F)
 66087          p.domain = DomainAVX
 66088          p.add(0, func(m *_Encoding, v []interface{}) {
 66089              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66090              m.emit(0x76)
 66091              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 66092          })
 66093      }
 66094      // VPCMPEQD xmm, xmm, k{k}
 66095      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 66096          self.require(ISA_AVX512VL | ISA_AVX512F)
 66097          p.domain = DomainAVX
 66098          p.add(0, func(m *_Encoding, v []interface{}) {
 66099              m.emit(0x62)
 66100              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66101              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66102              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 66103              m.emit(0x76)
 66104              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66105          })
 66106      }
 66107      // VPCMPEQD m256/m32bcst, ymm, k{k}
 66108      if isM256M32bcst(v0) && isEVEXYMM(v1) && isKk(v2) {
 66109          self.require(ISA_AVX512VL | ISA_AVX512F)
 66110          p.domain = DomainAVX
 66111          p.add(0, func(m *_Encoding, v []interface{}) {
 66112              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66113              m.emit(0x76)
 66114              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 66115          })
 66116      }
 66117      // VPCMPEQD ymm, ymm, k{k}
 66118      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 66119          self.require(ISA_AVX512VL | ISA_AVX512F)
 66120          p.domain = DomainAVX
 66121          p.add(0, func(m *_Encoding, v []interface{}) {
 66122              m.emit(0x62)
 66123              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66124              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66125              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 66126              m.emit(0x76)
 66127              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66128          })
 66129      }
 66130      if p.len == 0 {
 66131          panic("invalid operands for VPCMPEQD")
 66132      }
 66133      return p
 66134  }
 66135  
 66136  // VPCMPEQQ performs "Compare Packed Quadword Data for Equality".
 66137  //
 66138  // Mnemonic        : VPCMPEQQ
 66139  // Supported forms : (10 forms)
 66140  //
 66141  //    * VPCMPEQQ xmm, xmm, xmm              [AVX]
 66142  //    * VPCMPEQQ m128, xmm, xmm             [AVX]
 66143  //    * VPCMPEQQ ymm, ymm, ymm              [AVX2]
 66144  //    * VPCMPEQQ m256, ymm, ymm             [AVX2]
 66145  //    * VPCMPEQQ m512/m64bcst, zmm, k{k}    [AVX512F]
 66146  //    * VPCMPEQQ zmm, zmm, k{k}             [AVX512F]
 66147  //    * VPCMPEQQ m128/m64bcst, xmm, k{k}    [AVX512F,AVX512VL]
 66148  //    * VPCMPEQQ xmm, xmm, k{k}             [AVX512F,AVX512VL]
 66149  //    * VPCMPEQQ m256/m64bcst, ymm, k{k}    [AVX512F,AVX512VL]
 66150  //    * VPCMPEQQ ymm, ymm, k{k}             [AVX512F,AVX512VL]
 66151  //
 66152  func (self *Program) VPCMPEQQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 66153      p := self.alloc("VPCMPEQQ", 3, Operands { v0, v1, v2 })
 66154      // VPCMPEQQ xmm, xmm, xmm
 66155      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 66156          self.require(ISA_AVX)
 66157          p.domain = DomainAVX
 66158          p.add(0, func(m *_Encoding, v []interface{}) {
 66159              m.emit(0xc4)
 66160              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 66161              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 66162              m.emit(0x29)
 66163              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66164          })
 66165      }
 66166      // VPCMPEQQ m128, xmm, xmm
 66167      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 66168          self.require(ISA_AVX)
 66169          p.domain = DomainAVX
 66170          p.add(0, func(m *_Encoding, v []interface{}) {
 66171              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66172              m.emit(0x29)
 66173              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66174          })
 66175      }
 66176      // VPCMPEQQ ymm, ymm, ymm
 66177      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 66178          self.require(ISA_AVX2)
 66179          p.domain = DomainAVX
 66180          p.add(0, func(m *_Encoding, v []interface{}) {
 66181              m.emit(0xc4)
 66182              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 66183              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66184              m.emit(0x29)
 66185              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66186          })
 66187      }
 66188      // VPCMPEQQ m256, ymm, ymm
 66189      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 66190          self.require(ISA_AVX2)
 66191          p.domain = DomainAVX
 66192          p.add(0, func(m *_Encoding, v []interface{}) {
 66193              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66194              m.emit(0x29)
 66195              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66196          })
 66197      }
 66198      // VPCMPEQQ m512/m64bcst, zmm, k{k}
 66199      if isM512M64bcst(v0) && isZMM(v1) && isKk(v2) {
 66200          self.require(ISA_AVX512F)
 66201          p.domain = DomainAVX
 66202          p.add(0, func(m *_Encoding, v []interface{}) {
 66203              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66204              m.emit(0x29)
 66205              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 66206          })
 66207      }
 66208      // VPCMPEQQ zmm, zmm, k{k}
 66209      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 66210          self.require(ISA_AVX512F)
 66211          p.domain = DomainAVX
 66212          p.add(0, func(m *_Encoding, v []interface{}) {
 66213              m.emit(0x62)
 66214              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66215              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 66216              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 66217              m.emit(0x29)
 66218              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66219          })
 66220      }
 66221      // VPCMPEQQ m128/m64bcst, xmm, k{k}
 66222      if isM128M64bcst(v0) && isEVEXXMM(v1) && isKk(v2) {
 66223          self.require(ISA_AVX512VL | ISA_AVX512F)
 66224          p.domain = DomainAVX
 66225          p.add(0, func(m *_Encoding, v []interface{}) {
 66226              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66227              m.emit(0x29)
 66228              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 66229          })
 66230      }
 66231      // VPCMPEQQ xmm, xmm, k{k}
 66232      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 66233          self.require(ISA_AVX512VL | ISA_AVX512F)
 66234          p.domain = DomainAVX
 66235          p.add(0, func(m *_Encoding, v []interface{}) {
 66236              m.emit(0x62)
 66237              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66238              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 66239              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 66240              m.emit(0x29)
 66241              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66242          })
 66243      }
 66244      // VPCMPEQQ m256/m64bcst, ymm, k{k}
 66245      if isM256M64bcst(v0) && isEVEXYMM(v1) && isKk(v2) {
 66246          self.require(ISA_AVX512VL | ISA_AVX512F)
 66247          p.domain = DomainAVX
 66248          p.add(0, func(m *_Encoding, v []interface{}) {
 66249              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66250              m.emit(0x29)
 66251              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 66252          })
 66253      }
 66254      // VPCMPEQQ ymm, ymm, k{k}
 66255      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 66256          self.require(ISA_AVX512VL | ISA_AVX512F)
 66257          p.domain = DomainAVX
 66258          p.add(0, func(m *_Encoding, v []interface{}) {
 66259              m.emit(0x62)
 66260              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66261              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 66262              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 66263              m.emit(0x29)
 66264              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66265          })
 66266      }
 66267      if p.len == 0 {
 66268          panic("invalid operands for VPCMPEQQ")
 66269      }
 66270      return p
 66271  }
 66272  
 66273  // VPCMPEQW performs "Compare Packed Word Data for Equality".
 66274  //
 66275  // Mnemonic        : VPCMPEQW
 66276  // Supported forms : (10 forms)
 66277  //
 66278  //    * VPCMPEQW xmm, xmm, xmm      [AVX]
 66279  //    * VPCMPEQW m128, xmm, xmm     [AVX]
 66280  //    * VPCMPEQW ymm, ymm, ymm      [AVX2]
 66281  //    * VPCMPEQW m256, ymm, ymm     [AVX2]
 66282  //    * VPCMPEQW zmm, zmm, k{k}     [AVX512BW]
 66283  //    * VPCMPEQW m512, zmm, k{k}    [AVX512BW]
 66284  //    * VPCMPEQW xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 66285  //    * VPCMPEQW m128, xmm, k{k}    [AVX512BW,AVX512VL]
 66286  //    * VPCMPEQW ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 66287  //    * VPCMPEQW m256, ymm, k{k}    [AVX512BW,AVX512VL]
 66288  //
 66289  func (self *Program) VPCMPEQW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 66290      p := self.alloc("VPCMPEQW", 3, Operands { v0, v1, v2 })
 66291      // VPCMPEQW xmm, xmm, xmm
 66292      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 66293          self.require(ISA_AVX)
 66294          p.domain = DomainAVX
 66295          p.add(0, func(m *_Encoding, v []interface{}) {
 66296              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 66297              m.emit(0x75)
 66298              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66299          })
 66300      }
 66301      // VPCMPEQW m128, xmm, xmm
 66302      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 66303          self.require(ISA_AVX)
 66304          p.domain = DomainAVX
 66305          p.add(0, func(m *_Encoding, v []interface{}) {
 66306              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66307              m.emit(0x75)
 66308              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66309          })
 66310      }
 66311      // VPCMPEQW ymm, ymm, ymm
 66312      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 66313          self.require(ISA_AVX2)
 66314          p.domain = DomainAVX
 66315          p.add(0, func(m *_Encoding, v []interface{}) {
 66316              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 66317              m.emit(0x75)
 66318              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66319          })
 66320      }
 66321      // VPCMPEQW m256, ymm, ymm
 66322      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 66323          self.require(ISA_AVX2)
 66324          p.domain = DomainAVX
 66325          p.add(0, func(m *_Encoding, v []interface{}) {
 66326              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66327              m.emit(0x75)
 66328              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66329          })
 66330      }
 66331      // VPCMPEQW zmm, zmm, k{k}
 66332      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 66333          self.require(ISA_AVX512BW)
 66334          p.domain = DomainAVX
 66335          p.add(0, func(m *_Encoding, v []interface{}) {
 66336              m.emit(0x62)
 66337              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66338              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66339              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 66340              m.emit(0x75)
 66341              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66342          })
 66343      }
 66344      // VPCMPEQW m512, zmm, k{k}
 66345      if isM512(v0) && isZMM(v1) && isKk(v2) {
 66346          self.require(ISA_AVX512BW)
 66347          p.domain = DomainAVX
 66348          p.add(0, func(m *_Encoding, v []interface{}) {
 66349              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 66350              m.emit(0x75)
 66351              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 66352          })
 66353      }
 66354      // VPCMPEQW xmm, xmm, k{k}
 66355      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 66356          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66357          p.domain = DomainAVX
 66358          p.add(0, func(m *_Encoding, v []interface{}) {
 66359              m.emit(0x62)
 66360              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66361              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66362              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 66363              m.emit(0x75)
 66364              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66365          })
 66366      }
 66367      // VPCMPEQW m128, xmm, k{k}
 66368      if isM128(v0) && isEVEXXMM(v1) && isKk(v2) {
 66369          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66370          p.domain = DomainAVX
 66371          p.add(0, func(m *_Encoding, v []interface{}) {
 66372              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 66373              m.emit(0x75)
 66374              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 66375          })
 66376      }
 66377      // VPCMPEQW ymm, ymm, k{k}
 66378      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 66379          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66380          p.domain = DomainAVX
 66381          p.add(0, func(m *_Encoding, v []interface{}) {
 66382              m.emit(0x62)
 66383              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66384              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66385              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 66386              m.emit(0x75)
 66387              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66388          })
 66389      }
 66390      // VPCMPEQW m256, ymm, k{k}
 66391      if isM256(v0) && isEVEXYMM(v1) && isKk(v2) {
 66392          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66393          p.domain = DomainAVX
 66394          p.add(0, func(m *_Encoding, v []interface{}) {
 66395              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 66396              m.emit(0x75)
 66397              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 66398          })
 66399      }
 66400      if p.len == 0 {
 66401          panic("invalid operands for VPCMPEQW")
 66402      }
 66403      return p
 66404  }
 66405  
 66406  // VPCMPESTRI performs "Packed Compare Explicit Length Strings, Return Index".
 66407  //
 66408  // Mnemonic        : VPCMPESTRI
 66409  // Supported forms : (2 forms)
 66410  //
 66411  //    * VPCMPESTRI imm8, xmm, xmm     [AVX]
 66412  //    * VPCMPESTRI imm8, m128, xmm    [AVX]
 66413  //
 66414  func (self *Program) VPCMPESTRI(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 66415      p := self.alloc("VPCMPESTRI", 3, Operands { v0, v1, v2 })
 66416      // VPCMPESTRI imm8, xmm, xmm
 66417      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 66418          self.require(ISA_AVX)
 66419          p.domain = DomainAVX
 66420          p.add(0, func(m *_Encoding, v []interface{}) {
 66421              m.emit(0xc4)
 66422              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 66423              m.emit(0x79)
 66424              m.emit(0x61)
 66425              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 66426              m.imm1(toImmAny(v[0]))
 66427          })
 66428      }
 66429      // VPCMPESTRI imm8, m128, xmm
 66430      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 66431          self.require(ISA_AVX)
 66432          p.domain = DomainAVX
 66433          p.add(0, func(m *_Encoding, v []interface{}) {
 66434              m.vex3(0xc4, 0b11, 0x01, hcode(v[2]), addr(v[1]), 0)
 66435              m.emit(0x61)
 66436              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 66437              m.imm1(toImmAny(v[0]))
 66438          })
 66439      }
 66440      if p.len == 0 {
 66441          panic("invalid operands for VPCMPESTRI")
 66442      }
 66443      return p
 66444  }
 66445  
 66446  // VPCMPESTRM performs "Packed Compare Explicit Length Strings, Return Mask".
 66447  //
 66448  // Mnemonic        : VPCMPESTRM
 66449  // Supported forms : (2 forms)
 66450  //
 66451  //    * VPCMPESTRM imm8, xmm, xmm     [AVX]
 66452  //    * VPCMPESTRM imm8, m128, xmm    [AVX]
 66453  //
 66454  func (self *Program) VPCMPESTRM(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 66455      p := self.alloc("VPCMPESTRM", 3, Operands { v0, v1, v2 })
 66456      // VPCMPESTRM imm8, xmm, xmm
 66457      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 66458          self.require(ISA_AVX)
 66459          p.domain = DomainAVX
 66460          p.add(0, func(m *_Encoding, v []interface{}) {
 66461              m.emit(0xc4)
 66462              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 66463              m.emit(0x79)
 66464              m.emit(0x60)
 66465              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 66466              m.imm1(toImmAny(v[0]))
 66467          })
 66468      }
 66469      // VPCMPESTRM imm8, m128, xmm
 66470      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 66471          self.require(ISA_AVX)
 66472          p.domain = DomainAVX
 66473          p.add(0, func(m *_Encoding, v []interface{}) {
 66474              m.vex3(0xc4, 0b11, 0x01, hcode(v[2]), addr(v[1]), 0)
 66475              m.emit(0x60)
 66476              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 66477              m.imm1(toImmAny(v[0]))
 66478          })
 66479      }
 66480      if p.len == 0 {
 66481          panic("invalid operands for VPCMPESTRM")
 66482      }
 66483      return p
 66484  }
 66485  
 66486  // VPCMPGTB performs "Compare Packed Signed Byte Integers for Greater Than".
 66487  //
 66488  // Mnemonic        : VPCMPGTB
 66489  // Supported forms : (10 forms)
 66490  //
 66491  //    * VPCMPGTB xmm, xmm, xmm      [AVX]
 66492  //    * VPCMPGTB m128, xmm, xmm     [AVX]
 66493  //    * VPCMPGTB ymm, ymm, ymm      [AVX2]
 66494  //    * VPCMPGTB m256, ymm, ymm     [AVX2]
 66495  //    * VPCMPGTB zmm, zmm, k{k}     [AVX512BW]
 66496  //    * VPCMPGTB m512, zmm, k{k}    [AVX512BW]
 66497  //    * VPCMPGTB xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 66498  //    * VPCMPGTB m128, xmm, k{k}    [AVX512BW,AVX512VL]
 66499  //    * VPCMPGTB ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 66500  //    * VPCMPGTB m256, ymm, k{k}    [AVX512BW,AVX512VL]
 66501  //
 66502  func (self *Program) VPCMPGTB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 66503      p := self.alloc("VPCMPGTB", 3, Operands { v0, v1, v2 })
 66504      // VPCMPGTB xmm, xmm, xmm
 66505      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 66506          self.require(ISA_AVX)
 66507          p.domain = DomainAVX
 66508          p.add(0, func(m *_Encoding, v []interface{}) {
 66509              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 66510              m.emit(0x64)
 66511              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66512          })
 66513      }
 66514      // VPCMPGTB m128, xmm, xmm
 66515      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 66516          self.require(ISA_AVX)
 66517          p.domain = DomainAVX
 66518          p.add(0, func(m *_Encoding, v []interface{}) {
 66519              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66520              m.emit(0x64)
 66521              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66522          })
 66523      }
 66524      // VPCMPGTB ymm, ymm, ymm
 66525      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 66526          self.require(ISA_AVX2)
 66527          p.domain = DomainAVX
 66528          p.add(0, func(m *_Encoding, v []interface{}) {
 66529              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 66530              m.emit(0x64)
 66531              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66532          })
 66533      }
 66534      // VPCMPGTB m256, ymm, ymm
 66535      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 66536          self.require(ISA_AVX2)
 66537          p.domain = DomainAVX
 66538          p.add(0, func(m *_Encoding, v []interface{}) {
 66539              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66540              m.emit(0x64)
 66541              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66542          })
 66543      }
 66544      // VPCMPGTB zmm, zmm, k{k}
 66545      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 66546          self.require(ISA_AVX512BW)
 66547          p.domain = DomainAVX
 66548          p.add(0, func(m *_Encoding, v []interface{}) {
 66549              m.emit(0x62)
 66550              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66551              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66552              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 66553              m.emit(0x64)
 66554              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66555          })
 66556      }
 66557      // VPCMPGTB m512, zmm, k{k}
 66558      if isM512(v0) && isZMM(v1) && isKk(v2) {
 66559          self.require(ISA_AVX512BW)
 66560          p.domain = DomainAVX
 66561          p.add(0, func(m *_Encoding, v []interface{}) {
 66562              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 66563              m.emit(0x64)
 66564              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 66565          })
 66566      }
 66567      // VPCMPGTB xmm, xmm, k{k}
 66568      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 66569          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66570          p.domain = DomainAVX
 66571          p.add(0, func(m *_Encoding, v []interface{}) {
 66572              m.emit(0x62)
 66573              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66574              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66575              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 66576              m.emit(0x64)
 66577              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66578          })
 66579      }
 66580      // VPCMPGTB m128, xmm, k{k}
 66581      if isM128(v0) && isEVEXXMM(v1) && isKk(v2) {
 66582          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66583          p.domain = DomainAVX
 66584          p.add(0, func(m *_Encoding, v []interface{}) {
 66585              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 66586              m.emit(0x64)
 66587              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 66588          })
 66589      }
 66590      // VPCMPGTB ymm, ymm, k{k}
 66591      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 66592          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66593          p.domain = DomainAVX
 66594          p.add(0, func(m *_Encoding, v []interface{}) {
 66595              m.emit(0x62)
 66596              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66597              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66598              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 66599              m.emit(0x64)
 66600              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66601          })
 66602      }
 66603      // VPCMPGTB m256, ymm, k{k}
 66604      if isM256(v0) && isEVEXYMM(v1) && isKk(v2) {
 66605          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66606          p.domain = DomainAVX
 66607          p.add(0, func(m *_Encoding, v []interface{}) {
 66608              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 66609              m.emit(0x64)
 66610              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 66611          })
 66612      }
 66613      if p.len == 0 {
 66614          panic("invalid operands for VPCMPGTB")
 66615      }
 66616      return p
 66617  }
 66618  
 66619  // VPCMPGTD performs "Compare Packed Signed Doubleword Integers for Greater Than".
 66620  //
 66621  // Mnemonic        : VPCMPGTD
 66622  // Supported forms : (10 forms)
 66623  //
 66624  //    * VPCMPGTD xmm, xmm, xmm              [AVX]
 66625  //    * VPCMPGTD m128, xmm, xmm             [AVX]
 66626  //    * VPCMPGTD ymm, ymm, ymm              [AVX2]
 66627  //    * VPCMPGTD m256, ymm, ymm             [AVX2]
 66628  //    * VPCMPGTD m512/m32bcst, zmm, k{k}    [AVX512F]
 66629  //    * VPCMPGTD zmm, zmm, k{k}             [AVX512F]
 66630  //    * VPCMPGTD m128/m32bcst, xmm, k{k}    [AVX512F,AVX512VL]
 66631  //    * VPCMPGTD xmm, xmm, k{k}             [AVX512F,AVX512VL]
 66632  //    * VPCMPGTD m256/m32bcst, ymm, k{k}    [AVX512F,AVX512VL]
 66633  //    * VPCMPGTD ymm, ymm, k{k}             [AVX512F,AVX512VL]
 66634  //
 66635  func (self *Program) VPCMPGTD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 66636      p := self.alloc("VPCMPGTD", 3, Operands { v0, v1, v2 })
 66637      // VPCMPGTD xmm, xmm, xmm
 66638      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 66639          self.require(ISA_AVX)
 66640          p.domain = DomainAVX
 66641          p.add(0, func(m *_Encoding, v []interface{}) {
 66642              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 66643              m.emit(0x66)
 66644              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66645          })
 66646      }
 66647      // VPCMPGTD m128, xmm, xmm
 66648      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 66649          self.require(ISA_AVX)
 66650          p.domain = DomainAVX
 66651          p.add(0, func(m *_Encoding, v []interface{}) {
 66652              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66653              m.emit(0x66)
 66654              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66655          })
 66656      }
 66657      // VPCMPGTD ymm, ymm, ymm
 66658      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 66659          self.require(ISA_AVX2)
 66660          p.domain = DomainAVX
 66661          p.add(0, func(m *_Encoding, v []interface{}) {
 66662              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 66663              m.emit(0x66)
 66664              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66665          })
 66666      }
 66667      // VPCMPGTD m256, ymm, ymm
 66668      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 66669          self.require(ISA_AVX2)
 66670          p.domain = DomainAVX
 66671          p.add(0, func(m *_Encoding, v []interface{}) {
 66672              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66673              m.emit(0x66)
 66674              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66675          })
 66676      }
 66677      // VPCMPGTD m512/m32bcst, zmm, k{k}
 66678      if isM512M32bcst(v0) && isZMM(v1) && isKk(v2) {
 66679          self.require(ISA_AVX512F)
 66680          p.domain = DomainAVX
 66681          p.add(0, func(m *_Encoding, v []interface{}) {
 66682              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66683              m.emit(0x66)
 66684              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 66685          })
 66686      }
 66687      // VPCMPGTD zmm, zmm, k{k}
 66688      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 66689          self.require(ISA_AVX512F)
 66690          p.domain = DomainAVX
 66691          p.add(0, func(m *_Encoding, v []interface{}) {
 66692              m.emit(0x62)
 66693              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66694              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66695              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 66696              m.emit(0x66)
 66697              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66698          })
 66699      }
 66700      // VPCMPGTD m128/m32bcst, xmm, k{k}
 66701      if isM128M32bcst(v0) && isEVEXXMM(v1) && isKk(v2) {
 66702          self.require(ISA_AVX512VL | ISA_AVX512F)
 66703          p.domain = DomainAVX
 66704          p.add(0, func(m *_Encoding, v []interface{}) {
 66705              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66706              m.emit(0x66)
 66707              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 66708          })
 66709      }
 66710      // VPCMPGTD xmm, xmm, k{k}
 66711      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 66712          self.require(ISA_AVX512VL | ISA_AVX512F)
 66713          p.domain = DomainAVX
 66714          p.add(0, func(m *_Encoding, v []interface{}) {
 66715              m.emit(0x62)
 66716              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66717              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66718              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 66719              m.emit(0x66)
 66720              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66721          })
 66722      }
 66723      // VPCMPGTD m256/m32bcst, ymm, k{k}
 66724      if isM256M32bcst(v0) && isEVEXYMM(v1) && isKk(v2) {
 66725          self.require(ISA_AVX512VL | ISA_AVX512F)
 66726          p.domain = DomainAVX
 66727          p.add(0, func(m *_Encoding, v []interface{}) {
 66728              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66729              m.emit(0x66)
 66730              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 66731          })
 66732      }
 66733      // VPCMPGTD ymm, ymm, k{k}
 66734      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 66735          self.require(ISA_AVX512VL | ISA_AVX512F)
 66736          p.domain = DomainAVX
 66737          p.add(0, func(m *_Encoding, v []interface{}) {
 66738              m.emit(0x62)
 66739              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66740              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66741              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 66742              m.emit(0x66)
 66743              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66744          })
 66745      }
 66746      if p.len == 0 {
 66747          panic("invalid operands for VPCMPGTD")
 66748      }
 66749      return p
 66750  }
 66751  
 66752  // VPCMPGTQ performs "Compare Packed Data for Greater Than".
 66753  //
 66754  // Mnemonic        : VPCMPGTQ
 66755  // Supported forms : (10 forms)
 66756  //
 66757  //    * VPCMPGTQ xmm, xmm, xmm              [AVX]
 66758  //    * VPCMPGTQ m128, xmm, xmm             [AVX]
 66759  //    * VPCMPGTQ ymm, ymm, ymm              [AVX2]
 66760  //    * VPCMPGTQ m256, ymm, ymm             [AVX2]
 66761  //    * VPCMPGTQ m512/m64bcst, zmm, k{k}    [AVX512F]
 66762  //    * VPCMPGTQ zmm, zmm, k{k}             [AVX512F]
 66763  //    * VPCMPGTQ m128/m64bcst, xmm, k{k}    [AVX512F,AVX512VL]
 66764  //    * VPCMPGTQ xmm, xmm, k{k}             [AVX512F,AVX512VL]
 66765  //    * VPCMPGTQ m256/m64bcst, ymm, k{k}    [AVX512F,AVX512VL]
 66766  //    * VPCMPGTQ ymm, ymm, k{k}             [AVX512F,AVX512VL]
 66767  //
 66768  func (self *Program) VPCMPGTQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 66769      p := self.alloc("VPCMPGTQ", 3, Operands { v0, v1, v2 })
 66770      // VPCMPGTQ xmm, xmm, xmm
 66771      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 66772          self.require(ISA_AVX)
 66773          p.domain = DomainAVX
 66774          p.add(0, func(m *_Encoding, v []interface{}) {
 66775              m.emit(0xc4)
 66776              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 66777              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 66778              m.emit(0x37)
 66779              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66780          })
 66781      }
 66782      // VPCMPGTQ m128, xmm, xmm
 66783      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 66784          self.require(ISA_AVX)
 66785          p.domain = DomainAVX
 66786          p.add(0, func(m *_Encoding, v []interface{}) {
 66787              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66788              m.emit(0x37)
 66789              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66790          })
 66791      }
 66792      // VPCMPGTQ ymm, ymm, ymm
 66793      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 66794          self.require(ISA_AVX2)
 66795          p.domain = DomainAVX
 66796          p.add(0, func(m *_Encoding, v []interface{}) {
 66797              m.emit(0xc4)
 66798              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 66799              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66800              m.emit(0x37)
 66801              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66802          })
 66803      }
 66804      // VPCMPGTQ m256, ymm, ymm
 66805      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 66806          self.require(ISA_AVX2)
 66807          p.domain = DomainAVX
 66808          p.add(0, func(m *_Encoding, v []interface{}) {
 66809              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66810              m.emit(0x37)
 66811              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66812          })
 66813      }
 66814      // VPCMPGTQ m512/m64bcst, zmm, k{k}
 66815      if isM512M64bcst(v0) && isZMM(v1) && isKk(v2) {
 66816          self.require(ISA_AVX512F)
 66817          p.domain = DomainAVX
 66818          p.add(0, func(m *_Encoding, v []interface{}) {
 66819              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66820              m.emit(0x37)
 66821              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 66822          })
 66823      }
 66824      // VPCMPGTQ zmm, zmm, k{k}
 66825      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 66826          self.require(ISA_AVX512F)
 66827          p.domain = DomainAVX
 66828          p.add(0, func(m *_Encoding, v []interface{}) {
 66829              m.emit(0x62)
 66830              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66831              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 66832              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 66833              m.emit(0x37)
 66834              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66835          })
 66836      }
 66837      // VPCMPGTQ m128/m64bcst, xmm, k{k}
 66838      if isM128M64bcst(v0) && isEVEXXMM(v1) && isKk(v2) {
 66839          self.require(ISA_AVX512VL | ISA_AVX512F)
 66840          p.domain = DomainAVX
 66841          p.add(0, func(m *_Encoding, v []interface{}) {
 66842              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66843              m.emit(0x37)
 66844              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 66845          })
 66846      }
 66847      // VPCMPGTQ xmm, xmm, k{k}
 66848      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 66849          self.require(ISA_AVX512VL | ISA_AVX512F)
 66850          p.domain = DomainAVX
 66851          p.add(0, func(m *_Encoding, v []interface{}) {
 66852              m.emit(0x62)
 66853              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66854              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 66855              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 66856              m.emit(0x37)
 66857              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66858          })
 66859      }
 66860      // VPCMPGTQ m256/m64bcst, ymm, k{k}
 66861      if isM256M64bcst(v0) && isEVEXYMM(v1) && isKk(v2) {
 66862          self.require(ISA_AVX512VL | ISA_AVX512F)
 66863          p.domain = DomainAVX
 66864          p.add(0, func(m *_Encoding, v []interface{}) {
 66865              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 66866              m.emit(0x37)
 66867              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 66868          })
 66869      }
 66870      // VPCMPGTQ ymm, ymm, k{k}
 66871      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 66872          self.require(ISA_AVX512VL | ISA_AVX512F)
 66873          p.domain = DomainAVX
 66874          p.add(0, func(m *_Encoding, v []interface{}) {
 66875              m.emit(0x62)
 66876              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66877              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 66878              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 66879              m.emit(0x37)
 66880              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66881          })
 66882      }
 66883      if p.len == 0 {
 66884          panic("invalid operands for VPCMPGTQ")
 66885      }
 66886      return p
 66887  }
 66888  
 66889  // VPCMPGTW performs "Compare Packed Signed Word Integers for Greater Than".
 66890  //
 66891  // Mnemonic        : VPCMPGTW
 66892  // Supported forms : (10 forms)
 66893  //
 66894  //    * VPCMPGTW xmm, xmm, xmm      [AVX]
 66895  //    * VPCMPGTW m128, xmm, xmm     [AVX]
 66896  //    * VPCMPGTW ymm, ymm, ymm      [AVX2]
 66897  //    * VPCMPGTW m256, ymm, ymm     [AVX2]
 66898  //    * VPCMPGTW zmm, zmm, k{k}     [AVX512BW]
 66899  //    * VPCMPGTW m512, zmm, k{k}    [AVX512BW]
 66900  //    * VPCMPGTW xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 66901  //    * VPCMPGTW m128, xmm, k{k}    [AVX512BW,AVX512VL]
 66902  //    * VPCMPGTW ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 66903  //    * VPCMPGTW m256, ymm, k{k}    [AVX512BW,AVX512VL]
 66904  //
 66905  func (self *Program) VPCMPGTW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 66906      p := self.alloc("VPCMPGTW", 3, Operands { v0, v1, v2 })
 66907      // VPCMPGTW xmm, xmm, xmm
 66908      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 66909          self.require(ISA_AVX)
 66910          p.domain = DomainAVX
 66911          p.add(0, func(m *_Encoding, v []interface{}) {
 66912              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 66913              m.emit(0x65)
 66914              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66915          })
 66916      }
 66917      // VPCMPGTW m128, xmm, xmm
 66918      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 66919          self.require(ISA_AVX)
 66920          p.domain = DomainAVX
 66921          p.add(0, func(m *_Encoding, v []interface{}) {
 66922              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66923              m.emit(0x65)
 66924              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66925          })
 66926      }
 66927      // VPCMPGTW ymm, ymm, ymm
 66928      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 66929          self.require(ISA_AVX2)
 66930          p.domain = DomainAVX
 66931          p.add(0, func(m *_Encoding, v []interface{}) {
 66932              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 66933              m.emit(0x65)
 66934              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66935          })
 66936      }
 66937      // VPCMPGTW m256, ymm, ymm
 66938      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 66939          self.require(ISA_AVX2)
 66940          p.domain = DomainAVX
 66941          p.add(0, func(m *_Encoding, v []interface{}) {
 66942              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 66943              m.emit(0x65)
 66944              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 66945          })
 66946      }
 66947      // VPCMPGTW zmm, zmm, k{k}
 66948      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 66949          self.require(ISA_AVX512BW)
 66950          p.domain = DomainAVX
 66951          p.add(0, func(m *_Encoding, v []interface{}) {
 66952              m.emit(0x62)
 66953              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66954              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66955              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 66956              m.emit(0x65)
 66957              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66958          })
 66959      }
 66960      // VPCMPGTW m512, zmm, k{k}
 66961      if isM512(v0) && isZMM(v1) && isKk(v2) {
 66962          self.require(ISA_AVX512BW)
 66963          p.domain = DomainAVX
 66964          p.add(0, func(m *_Encoding, v []interface{}) {
 66965              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 66966              m.emit(0x65)
 66967              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 66968          })
 66969      }
 66970      // VPCMPGTW xmm, xmm, k{k}
 66971      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 66972          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66973          p.domain = DomainAVX
 66974          p.add(0, func(m *_Encoding, v []interface{}) {
 66975              m.emit(0x62)
 66976              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 66977              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 66978              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 66979              m.emit(0x65)
 66980              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 66981          })
 66982      }
 66983      // VPCMPGTW m128, xmm, k{k}
 66984      if isM128(v0) && isEVEXXMM(v1) && isKk(v2) {
 66985          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66986          p.domain = DomainAVX
 66987          p.add(0, func(m *_Encoding, v []interface{}) {
 66988              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 66989              m.emit(0x65)
 66990              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 66991          })
 66992      }
 66993      // VPCMPGTW ymm, ymm, k{k}
 66994      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 66995          self.require(ISA_AVX512VL | ISA_AVX512BW)
 66996          p.domain = DomainAVX
 66997          p.add(0, func(m *_Encoding, v []interface{}) {
 66998              m.emit(0x62)
 66999              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 67000              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 67001              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 67002              m.emit(0x65)
 67003              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 67004          })
 67005      }
 67006      // VPCMPGTW m256, ymm, k{k}
 67007      if isM256(v0) && isEVEXYMM(v1) && isKk(v2) {
 67008          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67009          p.domain = DomainAVX
 67010          p.add(0, func(m *_Encoding, v []interface{}) {
 67011              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 67012              m.emit(0x65)
 67013              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 67014          })
 67015      }
 67016      if p.len == 0 {
 67017          panic("invalid operands for VPCMPGTW")
 67018      }
 67019      return p
 67020  }
 67021  
 67022  // VPCMPISTRI performs "Packed Compare Implicit Length Strings, Return Index".
 67023  //
 67024  // Mnemonic        : VPCMPISTRI
 67025  // Supported forms : (2 forms)
 67026  //
 67027  //    * VPCMPISTRI imm8, xmm, xmm     [AVX]
 67028  //    * VPCMPISTRI imm8, m128, xmm    [AVX]
 67029  //
 67030  func (self *Program) VPCMPISTRI(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 67031      p := self.alloc("VPCMPISTRI", 3, Operands { v0, v1, v2 })
 67032      // VPCMPISTRI imm8, xmm, xmm
 67033      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 67034          self.require(ISA_AVX)
 67035          p.domain = DomainAVX
 67036          p.add(0, func(m *_Encoding, v []interface{}) {
 67037              m.emit(0xc4)
 67038              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 67039              m.emit(0x79)
 67040              m.emit(0x63)
 67041              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 67042              m.imm1(toImmAny(v[0]))
 67043          })
 67044      }
 67045      // VPCMPISTRI imm8, m128, xmm
 67046      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 67047          self.require(ISA_AVX)
 67048          p.domain = DomainAVX
 67049          p.add(0, func(m *_Encoding, v []interface{}) {
 67050              m.vex3(0xc4, 0b11, 0x01, hcode(v[2]), addr(v[1]), 0)
 67051              m.emit(0x63)
 67052              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 67053              m.imm1(toImmAny(v[0]))
 67054          })
 67055      }
 67056      if p.len == 0 {
 67057          panic("invalid operands for VPCMPISTRI")
 67058      }
 67059      return p
 67060  }
 67061  
 67062  // VPCMPISTRM performs "Packed Compare Implicit Length Strings, Return Mask".
 67063  //
 67064  // Mnemonic        : VPCMPISTRM
 67065  // Supported forms : (2 forms)
 67066  //
 67067  //    * VPCMPISTRM imm8, xmm, xmm     [AVX]
 67068  //    * VPCMPISTRM imm8, m128, xmm    [AVX]
 67069  //
 67070  func (self *Program) VPCMPISTRM(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 67071      p := self.alloc("VPCMPISTRM", 3, Operands { v0, v1, v2 })
 67072      // VPCMPISTRM imm8, xmm, xmm
 67073      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 67074          self.require(ISA_AVX)
 67075          p.domain = DomainAVX
 67076          p.add(0, func(m *_Encoding, v []interface{}) {
 67077              m.emit(0xc4)
 67078              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 67079              m.emit(0x79)
 67080              m.emit(0x62)
 67081              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 67082              m.imm1(toImmAny(v[0]))
 67083          })
 67084      }
 67085      // VPCMPISTRM imm8, m128, xmm
 67086      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 67087          self.require(ISA_AVX)
 67088          p.domain = DomainAVX
 67089          p.add(0, func(m *_Encoding, v []interface{}) {
 67090              m.vex3(0xc4, 0b11, 0x01, hcode(v[2]), addr(v[1]), 0)
 67091              m.emit(0x62)
 67092              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 67093              m.imm1(toImmAny(v[0]))
 67094          })
 67095      }
 67096      if p.len == 0 {
 67097          panic("invalid operands for VPCMPISTRM")
 67098      }
 67099      return p
 67100  }
 67101  
 67102  // VPCMPQ performs "Compare Packed Signed Quadword Values".
 67103  //
 67104  // Mnemonic        : VPCMPQ
 67105  // Supported forms : (6 forms)
 67106  //
 67107  //    * VPCMPQ imm8, m512/m64bcst, zmm, k{k}    [AVX512F]
 67108  //    * VPCMPQ imm8, zmm, zmm, k{k}             [AVX512F]
 67109  //    * VPCMPQ imm8, m128/m64bcst, xmm, k{k}    [AVX512F,AVX512VL]
 67110  //    * VPCMPQ imm8, xmm, xmm, k{k}             [AVX512F,AVX512VL]
 67111  //    * VPCMPQ imm8, m256/m64bcst, ymm, k{k}    [AVX512F,AVX512VL]
 67112  //    * VPCMPQ imm8, ymm, ymm, k{k}             [AVX512F,AVX512VL]
 67113  //
 67114  func (self *Program) VPCMPQ(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 67115      p := self.alloc("VPCMPQ", 4, Operands { v0, v1, v2, v3 })
 67116      // VPCMPQ imm8, m512/m64bcst, zmm, k{k}
 67117      if isImm8(v0) && isM512M64bcst(v1) && isZMM(v2) && isKk(v3) {
 67118          self.require(ISA_AVX512F)
 67119          p.domain = DomainAVX
 67120          p.add(0, func(m *_Encoding, v []interface{}) {
 67121              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 67122              m.emit(0x1f)
 67123              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 67124              m.imm1(toImmAny(v[0]))
 67125          })
 67126      }
 67127      // VPCMPQ imm8, zmm, zmm, k{k}
 67128      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isKk(v3) {
 67129          self.require(ISA_AVX512F)
 67130          p.domain = DomainAVX
 67131          p.add(0, func(m *_Encoding, v []interface{}) {
 67132              m.emit(0x62)
 67133              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67134              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67135              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 67136              m.emit(0x1f)
 67137              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67138              m.imm1(toImmAny(v[0]))
 67139          })
 67140      }
 67141      // VPCMPQ imm8, m128/m64bcst, xmm, k{k}
 67142      if isImm8(v0) && isM128M64bcst(v1) && isEVEXXMM(v2) && isKk(v3) {
 67143          self.require(ISA_AVX512VL | ISA_AVX512F)
 67144          p.domain = DomainAVX
 67145          p.add(0, func(m *_Encoding, v []interface{}) {
 67146              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 67147              m.emit(0x1f)
 67148              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 67149              m.imm1(toImmAny(v[0]))
 67150          })
 67151      }
 67152      // VPCMPQ imm8, xmm, xmm, k{k}
 67153      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 67154          self.require(ISA_AVX512VL | ISA_AVX512F)
 67155          p.domain = DomainAVX
 67156          p.add(0, func(m *_Encoding, v []interface{}) {
 67157              m.emit(0x62)
 67158              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67159              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67160              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 67161              m.emit(0x1f)
 67162              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67163              m.imm1(toImmAny(v[0]))
 67164          })
 67165      }
 67166      // VPCMPQ imm8, m256/m64bcst, ymm, k{k}
 67167      if isImm8(v0) && isM256M64bcst(v1) && isEVEXYMM(v2) && isKk(v3) {
 67168          self.require(ISA_AVX512VL | ISA_AVX512F)
 67169          p.domain = DomainAVX
 67170          p.add(0, func(m *_Encoding, v []interface{}) {
 67171              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 67172              m.emit(0x1f)
 67173              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 67174              m.imm1(toImmAny(v[0]))
 67175          })
 67176      }
 67177      // VPCMPQ imm8, ymm, ymm, k{k}
 67178      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isKk(v3) {
 67179          self.require(ISA_AVX512VL | ISA_AVX512F)
 67180          p.domain = DomainAVX
 67181          p.add(0, func(m *_Encoding, v []interface{}) {
 67182              m.emit(0x62)
 67183              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67184              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67185              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 67186              m.emit(0x1f)
 67187              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67188              m.imm1(toImmAny(v[0]))
 67189          })
 67190      }
 67191      if p.len == 0 {
 67192          panic("invalid operands for VPCMPQ")
 67193      }
 67194      return p
 67195  }
 67196  
 67197  // VPCMPUB performs "Compare Packed Unsigned Byte Values".
 67198  //
 67199  // Mnemonic        : VPCMPUB
 67200  // Supported forms : (6 forms)
 67201  //
 67202  //    * VPCMPUB imm8, zmm, zmm, k{k}     [AVX512BW]
 67203  //    * VPCMPUB imm8, m512, zmm, k{k}    [AVX512BW]
 67204  //    * VPCMPUB imm8, xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 67205  //    * VPCMPUB imm8, m128, xmm, k{k}    [AVX512BW,AVX512VL]
 67206  //    * VPCMPUB imm8, ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 67207  //    * VPCMPUB imm8, m256, ymm, k{k}    [AVX512BW,AVX512VL]
 67208  //
 67209  func (self *Program) VPCMPUB(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 67210      p := self.alloc("VPCMPUB", 4, Operands { v0, v1, v2, v3 })
 67211      // VPCMPUB imm8, zmm, zmm, k{k}
 67212      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isKk(v3) {
 67213          self.require(ISA_AVX512BW)
 67214          p.domain = DomainAVX
 67215          p.add(0, func(m *_Encoding, v []interface{}) {
 67216              m.emit(0x62)
 67217              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67218              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 67219              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 67220              m.emit(0x3e)
 67221              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67222              m.imm1(toImmAny(v[0]))
 67223          })
 67224      }
 67225      // VPCMPUB imm8, m512, zmm, k{k}
 67226      if isImm8(v0) && isM512(v1) && isZMM(v2) && isKk(v3) {
 67227          self.require(ISA_AVX512BW)
 67228          p.domain = DomainAVX
 67229          p.add(0, func(m *_Encoding, v []interface{}) {
 67230              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 67231              m.emit(0x3e)
 67232              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 67233              m.imm1(toImmAny(v[0]))
 67234          })
 67235      }
 67236      // VPCMPUB imm8, xmm, xmm, k{k}
 67237      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 67238          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67239          p.domain = DomainAVX
 67240          p.add(0, func(m *_Encoding, v []interface{}) {
 67241              m.emit(0x62)
 67242              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67243              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 67244              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 67245              m.emit(0x3e)
 67246              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67247              m.imm1(toImmAny(v[0]))
 67248          })
 67249      }
 67250      // VPCMPUB imm8, m128, xmm, k{k}
 67251      if isImm8(v0) && isM128(v1) && isEVEXXMM(v2) && isKk(v3) {
 67252          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67253          p.domain = DomainAVX
 67254          p.add(0, func(m *_Encoding, v []interface{}) {
 67255              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 67256              m.emit(0x3e)
 67257              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 67258              m.imm1(toImmAny(v[0]))
 67259          })
 67260      }
 67261      // VPCMPUB imm8, ymm, ymm, k{k}
 67262      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isKk(v3) {
 67263          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67264          p.domain = DomainAVX
 67265          p.add(0, func(m *_Encoding, v []interface{}) {
 67266              m.emit(0x62)
 67267              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67268              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 67269              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 67270              m.emit(0x3e)
 67271              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67272              m.imm1(toImmAny(v[0]))
 67273          })
 67274      }
 67275      // VPCMPUB imm8, m256, ymm, k{k}
 67276      if isImm8(v0) && isM256(v1) && isEVEXYMM(v2) && isKk(v3) {
 67277          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67278          p.domain = DomainAVX
 67279          p.add(0, func(m *_Encoding, v []interface{}) {
 67280              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 67281              m.emit(0x3e)
 67282              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 67283              m.imm1(toImmAny(v[0]))
 67284          })
 67285      }
 67286      if p.len == 0 {
 67287          panic("invalid operands for VPCMPUB")
 67288      }
 67289      return p
 67290  }
 67291  
 67292  // VPCMPUD performs "Compare Packed Unsigned Doubleword Values".
 67293  //
 67294  // Mnemonic        : VPCMPUD
 67295  // Supported forms : (6 forms)
 67296  //
 67297  //    * VPCMPUD imm8, m512/m32bcst, zmm, k{k}    [AVX512F]
 67298  //    * VPCMPUD imm8, zmm, zmm, k{k}             [AVX512F]
 67299  //    * VPCMPUD imm8, m128/m32bcst, xmm, k{k}    [AVX512F,AVX512VL]
 67300  //    * VPCMPUD imm8, xmm, xmm, k{k}             [AVX512F,AVX512VL]
 67301  //    * VPCMPUD imm8, m256/m32bcst, ymm, k{k}    [AVX512F,AVX512VL]
 67302  //    * VPCMPUD imm8, ymm, ymm, k{k}             [AVX512F,AVX512VL]
 67303  //
 67304  func (self *Program) VPCMPUD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 67305      p := self.alloc("VPCMPUD", 4, Operands { v0, v1, v2, v3 })
 67306      // VPCMPUD imm8, m512/m32bcst, zmm, k{k}
 67307      if isImm8(v0) && isM512M32bcst(v1) && isZMM(v2) && isKk(v3) {
 67308          self.require(ISA_AVX512F)
 67309          p.domain = DomainAVX
 67310          p.add(0, func(m *_Encoding, v []interface{}) {
 67311              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 67312              m.emit(0x1e)
 67313              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 67314              m.imm1(toImmAny(v[0]))
 67315          })
 67316      }
 67317      // VPCMPUD imm8, zmm, zmm, k{k}
 67318      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isKk(v3) {
 67319          self.require(ISA_AVX512F)
 67320          p.domain = DomainAVX
 67321          p.add(0, func(m *_Encoding, v []interface{}) {
 67322              m.emit(0x62)
 67323              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67324              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 67325              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 67326              m.emit(0x1e)
 67327              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67328              m.imm1(toImmAny(v[0]))
 67329          })
 67330      }
 67331      // VPCMPUD imm8, m128/m32bcst, xmm, k{k}
 67332      if isImm8(v0) && isM128M32bcst(v1) && isEVEXXMM(v2) && isKk(v3) {
 67333          self.require(ISA_AVX512VL | ISA_AVX512F)
 67334          p.domain = DomainAVX
 67335          p.add(0, func(m *_Encoding, v []interface{}) {
 67336              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 67337              m.emit(0x1e)
 67338              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 67339              m.imm1(toImmAny(v[0]))
 67340          })
 67341      }
 67342      // VPCMPUD imm8, xmm, xmm, k{k}
 67343      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 67344          self.require(ISA_AVX512VL | ISA_AVX512F)
 67345          p.domain = DomainAVX
 67346          p.add(0, func(m *_Encoding, v []interface{}) {
 67347              m.emit(0x62)
 67348              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67349              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 67350              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 67351              m.emit(0x1e)
 67352              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67353              m.imm1(toImmAny(v[0]))
 67354          })
 67355      }
 67356      // VPCMPUD imm8, m256/m32bcst, ymm, k{k}
 67357      if isImm8(v0) && isM256M32bcst(v1) && isEVEXYMM(v2) && isKk(v3) {
 67358          self.require(ISA_AVX512VL | ISA_AVX512F)
 67359          p.domain = DomainAVX
 67360          p.add(0, func(m *_Encoding, v []interface{}) {
 67361              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 67362              m.emit(0x1e)
 67363              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 67364              m.imm1(toImmAny(v[0]))
 67365          })
 67366      }
 67367      // VPCMPUD imm8, ymm, ymm, k{k}
 67368      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isKk(v3) {
 67369          self.require(ISA_AVX512VL | ISA_AVX512F)
 67370          p.domain = DomainAVX
 67371          p.add(0, func(m *_Encoding, v []interface{}) {
 67372              m.emit(0x62)
 67373              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67374              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 67375              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 67376              m.emit(0x1e)
 67377              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67378              m.imm1(toImmAny(v[0]))
 67379          })
 67380      }
 67381      if p.len == 0 {
 67382          panic("invalid operands for VPCMPUD")
 67383      }
 67384      return p
 67385  }
 67386  
 67387  // VPCMPUQ performs "Compare Packed Unsigned Quadword Values".
 67388  //
 67389  // Mnemonic        : VPCMPUQ
 67390  // Supported forms : (6 forms)
 67391  //
 67392  //    * VPCMPUQ imm8, m512/m64bcst, zmm, k{k}    [AVX512F]
 67393  //    * VPCMPUQ imm8, zmm, zmm, k{k}             [AVX512F]
 67394  //    * VPCMPUQ imm8, m128/m64bcst, xmm, k{k}    [AVX512F,AVX512VL]
 67395  //    * VPCMPUQ imm8, xmm, xmm, k{k}             [AVX512F,AVX512VL]
 67396  //    * VPCMPUQ imm8, m256/m64bcst, ymm, k{k}    [AVX512F,AVX512VL]
 67397  //    * VPCMPUQ imm8, ymm, ymm, k{k}             [AVX512F,AVX512VL]
 67398  //
 67399  func (self *Program) VPCMPUQ(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 67400      p := self.alloc("VPCMPUQ", 4, Operands { v0, v1, v2, v3 })
 67401      // VPCMPUQ imm8, m512/m64bcst, zmm, k{k}
 67402      if isImm8(v0) && isM512M64bcst(v1) && isZMM(v2) && isKk(v3) {
 67403          self.require(ISA_AVX512F)
 67404          p.domain = DomainAVX
 67405          p.add(0, func(m *_Encoding, v []interface{}) {
 67406              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 67407              m.emit(0x1e)
 67408              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 67409              m.imm1(toImmAny(v[0]))
 67410          })
 67411      }
 67412      // VPCMPUQ imm8, zmm, zmm, k{k}
 67413      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isKk(v3) {
 67414          self.require(ISA_AVX512F)
 67415          p.domain = DomainAVX
 67416          p.add(0, func(m *_Encoding, v []interface{}) {
 67417              m.emit(0x62)
 67418              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67419              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67420              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 67421              m.emit(0x1e)
 67422              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67423              m.imm1(toImmAny(v[0]))
 67424          })
 67425      }
 67426      // VPCMPUQ imm8, m128/m64bcst, xmm, k{k}
 67427      if isImm8(v0) && isM128M64bcst(v1) && isEVEXXMM(v2) && isKk(v3) {
 67428          self.require(ISA_AVX512VL | ISA_AVX512F)
 67429          p.domain = DomainAVX
 67430          p.add(0, func(m *_Encoding, v []interface{}) {
 67431              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 67432              m.emit(0x1e)
 67433              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 67434              m.imm1(toImmAny(v[0]))
 67435          })
 67436      }
 67437      // VPCMPUQ imm8, xmm, xmm, k{k}
 67438      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 67439          self.require(ISA_AVX512VL | ISA_AVX512F)
 67440          p.domain = DomainAVX
 67441          p.add(0, func(m *_Encoding, v []interface{}) {
 67442              m.emit(0x62)
 67443              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67444              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67445              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 67446              m.emit(0x1e)
 67447              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67448              m.imm1(toImmAny(v[0]))
 67449          })
 67450      }
 67451      // VPCMPUQ imm8, m256/m64bcst, ymm, k{k}
 67452      if isImm8(v0) && isM256M64bcst(v1) && isEVEXYMM(v2) && isKk(v3) {
 67453          self.require(ISA_AVX512VL | ISA_AVX512F)
 67454          p.domain = DomainAVX
 67455          p.add(0, func(m *_Encoding, v []interface{}) {
 67456              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, bcode(v[1]))
 67457              m.emit(0x1e)
 67458              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 67459              m.imm1(toImmAny(v[0]))
 67460          })
 67461      }
 67462      // VPCMPUQ imm8, ymm, ymm, k{k}
 67463      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isKk(v3) {
 67464          self.require(ISA_AVX512VL | ISA_AVX512F)
 67465          p.domain = DomainAVX
 67466          p.add(0, func(m *_Encoding, v []interface{}) {
 67467              m.emit(0x62)
 67468              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67469              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67470              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 67471              m.emit(0x1e)
 67472              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67473              m.imm1(toImmAny(v[0]))
 67474          })
 67475      }
 67476      if p.len == 0 {
 67477          panic("invalid operands for VPCMPUQ")
 67478      }
 67479      return p
 67480  }
 67481  
 67482  // VPCMPUW performs "Compare Packed Unsigned Word Values".
 67483  //
 67484  // Mnemonic        : VPCMPUW
 67485  // Supported forms : (6 forms)
 67486  //
 67487  //    * VPCMPUW imm8, zmm, zmm, k{k}     [AVX512BW]
 67488  //    * VPCMPUW imm8, m512, zmm, k{k}    [AVX512BW]
 67489  //    * VPCMPUW imm8, xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 67490  //    * VPCMPUW imm8, m128, xmm, k{k}    [AVX512BW,AVX512VL]
 67491  //    * VPCMPUW imm8, ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 67492  //    * VPCMPUW imm8, m256, ymm, k{k}    [AVX512BW,AVX512VL]
 67493  //
 67494  func (self *Program) VPCMPUW(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 67495      p := self.alloc("VPCMPUW", 4, Operands { v0, v1, v2, v3 })
 67496      // VPCMPUW imm8, zmm, zmm, k{k}
 67497      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isKk(v3) {
 67498          self.require(ISA_AVX512BW)
 67499          p.domain = DomainAVX
 67500          p.add(0, func(m *_Encoding, v []interface{}) {
 67501              m.emit(0x62)
 67502              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67503              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67504              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 67505              m.emit(0x3e)
 67506              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67507              m.imm1(toImmAny(v[0]))
 67508          })
 67509      }
 67510      // VPCMPUW imm8, m512, zmm, k{k}
 67511      if isImm8(v0) && isM512(v1) && isZMM(v2) && isKk(v3) {
 67512          self.require(ISA_AVX512BW)
 67513          p.domain = DomainAVX
 67514          p.add(0, func(m *_Encoding, v []interface{}) {
 67515              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 67516              m.emit(0x3e)
 67517              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 67518              m.imm1(toImmAny(v[0]))
 67519          })
 67520      }
 67521      // VPCMPUW imm8, xmm, xmm, k{k}
 67522      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 67523          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67524          p.domain = DomainAVX
 67525          p.add(0, func(m *_Encoding, v []interface{}) {
 67526              m.emit(0x62)
 67527              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67528              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67529              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 67530              m.emit(0x3e)
 67531              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67532              m.imm1(toImmAny(v[0]))
 67533          })
 67534      }
 67535      // VPCMPUW imm8, m128, xmm, k{k}
 67536      if isImm8(v0) && isM128(v1) && isEVEXXMM(v2) && isKk(v3) {
 67537          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67538          p.domain = DomainAVX
 67539          p.add(0, func(m *_Encoding, v []interface{}) {
 67540              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 67541              m.emit(0x3e)
 67542              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 67543              m.imm1(toImmAny(v[0]))
 67544          })
 67545      }
 67546      // VPCMPUW imm8, ymm, ymm, k{k}
 67547      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isKk(v3) {
 67548          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67549          p.domain = DomainAVX
 67550          p.add(0, func(m *_Encoding, v []interface{}) {
 67551              m.emit(0x62)
 67552              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67553              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67554              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 67555              m.emit(0x3e)
 67556              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67557              m.imm1(toImmAny(v[0]))
 67558          })
 67559      }
 67560      // VPCMPUW imm8, m256, ymm, k{k}
 67561      if isImm8(v0) && isM256(v1) && isEVEXYMM(v2) && isKk(v3) {
 67562          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67563          p.domain = DomainAVX
 67564          p.add(0, func(m *_Encoding, v []interface{}) {
 67565              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 67566              m.emit(0x3e)
 67567              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 67568              m.imm1(toImmAny(v[0]))
 67569          })
 67570      }
 67571      if p.len == 0 {
 67572          panic("invalid operands for VPCMPUW")
 67573      }
 67574      return p
 67575  }
 67576  
 67577  // VPCMPW performs "Compare Packed Signed Word Values".
 67578  //
 67579  // Mnemonic        : VPCMPW
 67580  // Supported forms : (6 forms)
 67581  //
 67582  //    * VPCMPW imm8, zmm, zmm, k{k}     [AVX512BW]
 67583  //    * VPCMPW imm8, m512, zmm, k{k}    [AVX512BW]
 67584  //    * VPCMPW imm8, xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 67585  //    * VPCMPW imm8, m128, xmm, k{k}    [AVX512BW,AVX512VL]
 67586  //    * VPCMPW imm8, ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 67587  //    * VPCMPW imm8, m256, ymm, k{k}    [AVX512BW,AVX512VL]
 67588  //
 67589  func (self *Program) VPCMPW(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 67590      p := self.alloc("VPCMPW", 4, Operands { v0, v1, v2, v3 })
 67591      // VPCMPW imm8, zmm, zmm, k{k}
 67592      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isKk(v3) {
 67593          self.require(ISA_AVX512BW)
 67594          p.domain = DomainAVX
 67595          p.add(0, func(m *_Encoding, v []interface{}) {
 67596              m.emit(0x62)
 67597              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67598              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67599              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 67600              m.emit(0x3f)
 67601              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67602              m.imm1(toImmAny(v[0]))
 67603          })
 67604      }
 67605      // VPCMPW imm8, m512, zmm, k{k}
 67606      if isImm8(v0) && isM512(v1) && isZMM(v2) && isKk(v3) {
 67607          self.require(ISA_AVX512BW)
 67608          p.domain = DomainAVX
 67609          p.add(0, func(m *_Encoding, v []interface{}) {
 67610              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 67611              m.emit(0x3f)
 67612              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 67613              m.imm1(toImmAny(v[0]))
 67614          })
 67615      }
 67616      // VPCMPW imm8, xmm, xmm, k{k}
 67617      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isKk(v3) {
 67618          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67619          p.domain = DomainAVX
 67620          p.add(0, func(m *_Encoding, v []interface{}) {
 67621              m.emit(0x62)
 67622              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67623              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67624              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 67625              m.emit(0x3f)
 67626              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67627              m.imm1(toImmAny(v[0]))
 67628          })
 67629      }
 67630      // VPCMPW imm8, m128, xmm, k{k}
 67631      if isImm8(v0) && isM128(v1) && isEVEXXMM(v2) && isKk(v3) {
 67632          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67633          p.domain = DomainAVX
 67634          p.add(0, func(m *_Encoding, v []interface{}) {
 67635              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 67636              m.emit(0x3f)
 67637              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 67638              m.imm1(toImmAny(v[0]))
 67639          })
 67640      }
 67641      // VPCMPW imm8, ymm, ymm, k{k}
 67642      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isKk(v3) {
 67643          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67644          p.domain = DomainAVX
 67645          p.add(0, func(m *_Encoding, v []interface{}) {
 67646              m.emit(0x62)
 67647              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 67648              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 67649              m.emit((0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 67650              m.emit(0x3f)
 67651              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67652              m.imm1(toImmAny(v[0]))
 67653          })
 67654      }
 67655      // VPCMPW imm8, m256, ymm, k{k}
 67656      if isImm8(v0) && isM256(v1) && isEVEXYMM(v2) && isKk(v3) {
 67657          self.require(ISA_AVX512VL | ISA_AVX512BW)
 67658          p.domain = DomainAVX
 67659          p.add(0, func(m *_Encoding, v []interface{}) {
 67660              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), 0, 0)
 67661              m.emit(0x3f)
 67662              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 67663              m.imm1(toImmAny(v[0]))
 67664          })
 67665      }
 67666      if p.len == 0 {
 67667          panic("invalid operands for VPCMPW")
 67668      }
 67669      return p
 67670  }
 67671  
 67672  // VPCOMB performs "Compare Packed Signed Byte Integers".
 67673  //
 67674  // Mnemonic        : VPCOMB
 67675  // Supported forms : (2 forms)
 67676  //
 67677  //    * VPCOMB imm8, xmm, xmm, xmm     [XOP]
 67678  //    * VPCOMB imm8, m128, xmm, xmm    [XOP]
 67679  //
 67680  func (self *Program) VPCOMB(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 67681      p := self.alloc("VPCOMB", 4, Operands { v0, v1, v2, v3 })
 67682      // VPCOMB imm8, xmm, xmm, xmm
 67683      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 67684          self.require(ISA_XOP)
 67685          p.domain = DomainAMDSpecific
 67686          p.add(0, func(m *_Encoding, v []interface{}) {
 67687              m.emit(0x8f)
 67688              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 67689              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 67690              m.emit(0xcc)
 67691              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67692              m.imm1(toImmAny(v[0]))
 67693          })
 67694      }
 67695      // VPCOMB imm8, m128, xmm, xmm
 67696      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 67697          self.require(ISA_XOP)
 67698          p.domain = DomainAMDSpecific
 67699          p.add(0, func(m *_Encoding, v []interface{}) {
 67700              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 67701              m.emit(0xcc)
 67702              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 67703              m.imm1(toImmAny(v[0]))
 67704          })
 67705      }
 67706      if p.len == 0 {
 67707          panic("invalid operands for VPCOMB")
 67708      }
 67709      return p
 67710  }
 67711  
 67712  // VPCOMD performs "Compare Packed Signed Doubleword Integers".
 67713  //
 67714  // Mnemonic        : VPCOMD
 67715  // Supported forms : (2 forms)
 67716  //
 67717  //    * VPCOMD imm8, xmm, xmm, xmm     [XOP]
 67718  //    * VPCOMD imm8, m128, xmm, xmm    [XOP]
 67719  //
 67720  func (self *Program) VPCOMD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 67721      p := self.alloc("VPCOMD", 4, Operands { v0, v1, v2, v3 })
 67722      // VPCOMD imm8, xmm, xmm, xmm
 67723      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 67724          self.require(ISA_XOP)
 67725          p.domain = DomainAMDSpecific
 67726          p.add(0, func(m *_Encoding, v []interface{}) {
 67727              m.emit(0x8f)
 67728              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 67729              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 67730              m.emit(0xce)
 67731              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67732              m.imm1(toImmAny(v[0]))
 67733          })
 67734      }
 67735      // VPCOMD imm8, m128, xmm, xmm
 67736      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 67737          self.require(ISA_XOP)
 67738          p.domain = DomainAMDSpecific
 67739          p.add(0, func(m *_Encoding, v []interface{}) {
 67740              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 67741              m.emit(0xce)
 67742              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 67743              m.imm1(toImmAny(v[0]))
 67744          })
 67745      }
 67746      if p.len == 0 {
 67747          panic("invalid operands for VPCOMD")
 67748      }
 67749      return p
 67750  }
 67751  
 67752  // VPCOMPRESSD performs "Store Sparse Packed Doubleword Integer Values into Dense Memory/Register".
 67753  //
 67754  // Mnemonic        : VPCOMPRESSD
 67755  // Supported forms : (6 forms)
 67756  //
 67757  //    * VPCOMPRESSD zmm, zmm{k}{z}     [AVX512F]
 67758  //    * VPCOMPRESSD zmm, m512{k}{z}    [AVX512F]
 67759  //    * VPCOMPRESSD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 67760  //    * VPCOMPRESSD xmm, m128{k}{z}    [AVX512F,AVX512VL]
 67761  //    * VPCOMPRESSD ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 67762  //    * VPCOMPRESSD ymm, m256{k}{z}    [AVX512F,AVX512VL]
 67763  //
 67764  func (self *Program) VPCOMPRESSD(v0 interface{}, v1 interface{}) *Instruction {
 67765      p := self.alloc("VPCOMPRESSD", 2, Operands { v0, v1 })
 67766      // VPCOMPRESSD zmm, zmm{k}{z}
 67767      if isZMM(v0) && isZMMkz(v1) {
 67768          self.require(ISA_AVX512F)
 67769          p.domain = DomainAVX
 67770          p.add(0, func(m *_Encoding, v []interface{}) {
 67771              m.emit(0x62)
 67772              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 67773              m.emit(0x7d)
 67774              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 67775              m.emit(0x8b)
 67776              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 67777          })
 67778      }
 67779      // VPCOMPRESSD zmm, m512{k}{z}
 67780      if isZMM(v0) && isM512kz(v1) {
 67781          self.require(ISA_AVX512F)
 67782          p.domain = DomainAVX
 67783          p.add(0, func(m *_Encoding, v []interface{}) {
 67784              m.evex(0b10, 0x05, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 67785              m.emit(0x8b)
 67786              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 67787          })
 67788      }
 67789      // VPCOMPRESSD xmm, xmm{k}{z}
 67790      if isEVEXXMM(v0) && isXMMkz(v1) {
 67791          self.require(ISA_AVX512VL | ISA_AVX512F)
 67792          p.domain = DomainAVX
 67793          p.add(0, func(m *_Encoding, v []interface{}) {
 67794              m.emit(0x62)
 67795              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 67796              m.emit(0x7d)
 67797              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 67798              m.emit(0x8b)
 67799              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 67800          })
 67801      }
 67802      // VPCOMPRESSD xmm, m128{k}{z}
 67803      if isEVEXXMM(v0) && isM128kz(v1) {
 67804          self.require(ISA_AVX512VL | ISA_AVX512F)
 67805          p.domain = DomainAVX
 67806          p.add(0, func(m *_Encoding, v []interface{}) {
 67807              m.evex(0b10, 0x05, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 67808              m.emit(0x8b)
 67809              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 67810          })
 67811      }
 67812      // VPCOMPRESSD ymm, ymm{k}{z}
 67813      if isEVEXYMM(v0) && isYMMkz(v1) {
 67814          self.require(ISA_AVX512VL | ISA_AVX512F)
 67815          p.domain = DomainAVX
 67816          p.add(0, func(m *_Encoding, v []interface{}) {
 67817              m.emit(0x62)
 67818              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 67819              m.emit(0x7d)
 67820              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 67821              m.emit(0x8b)
 67822              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 67823          })
 67824      }
 67825      // VPCOMPRESSD ymm, m256{k}{z}
 67826      if isEVEXYMM(v0) && isM256kz(v1) {
 67827          self.require(ISA_AVX512VL | ISA_AVX512F)
 67828          p.domain = DomainAVX
 67829          p.add(0, func(m *_Encoding, v []interface{}) {
 67830              m.evex(0b10, 0x05, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 67831              m.emit(0x8b)
 67832              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 67833          })
 67834      }
 67835      if p.len == 0 {
 67836          panic("invalid operands for VPCOMPRESSD")
 67837      }
 67838      return p
 67839  }
 67840  
 67841  // VPCOMPRESSQ performs "Store Sparse Packed Quadword Integer Values into Dense Memory/Register".
 67842  //
 67843  // Mnemonic        : VPCOMPRESSQ
 67844  // Supported forms : (6 forms)
 67845  //
 67846  //    * VPCOMPRESSQ zmm, zmm{k}{z}     [AVX512F]
 67847  //    * VPCOMPRESSQ zmm, m512{k}{z}    [AVX512F]
 67848  //    * VPCOMPRESSQ xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 67849  //    * VPCOMPRESSQ xmm, m128{k}{z}    [AVX512F,AVX512VL]
 67850  //    * VPCOMPRESSQ ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 67851  //    * VPCOMPRESSQ ymm, m256{k}{z}    [AVX512F,AVX512VL]
 67852  //
 67853  func (self *Program) VPCOMPRESSQ(v0 interface{}, v1 interface{}) *Instruction {
 67854      p := self.alloc("VPCOMPRESSQ", 2, Operands { v0, v1 })
 67855      // VPCOMPRESSQ zmm, zmm{k}{z}
 67856      if isZMM(v0) && isZMMkz(v1) {
 67857          self.require(ISA_AVX512F)
 67858          p.domain = DomainAVX
 67859          p.add(0, func(m *_Encoding, v []interface{}) {
 67860              m.emit(0x62)
 67861              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 67862              m.emit(0xfd)
 67863              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 67864              m.emit(0x8b)
 67865              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 67866          })
 67867      }
 67868      // VPCOMPRESSQ zmm, m512{k}{z}
 67869      if isZMM(v0) && isM512kz(v1) {
 67870          self.require(ISA_AVX512F)
 67871          p.domain = DomainAVX
 67872          p.add(0, func(m *_Encoding, v []interface{}) {
 67873              m.evex(0b10, 0x85, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 67874              m.emit(0x8b)
 67875              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 67876          })
 67877      }
 67878      // VPCOMPRESSQ xmm, xmm{k}{z}
 67879      if isEVEXXMM(v0) && isXMMkz(v1) {
 67880          self.require(ISA_AVX512VL | ISA_AVX512F)
 67881          p.domain = DomainAVX
 67882          p.add(0, func(m *_Encoding, v []interface{}) {
 67883              m.emit(0x62)
 67884              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 67885              m.emit(0xfd)
 67886              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 67887              m.emit(0x8b)
 67888              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 67889          })
 67890      }
 67891      // VPCOMPRESSQ xmm, m128{k}{z}
 67892      if isEVEXXMM(v0) && isM128kz(v1) {
 67893          self.require(ISA_AVX512VL | ISA_AVX512F)
 67894          p.domain = DomainAVX
 67895          p.add(0, func(m *_Encoding, v []interface{}) {
 67896              m.evex(0b10, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 67897              m.emit(0x8b)
 67898              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 67899          })
 67900      }
 67901      // VPCOMPRESSQ ymm, ymm{k}{z}
 67902      if isEVEXYMM(v0) && isYMMkz(v1) {
 67903          self.require(ISA_AVX512VL | ISA_AVX512F)
 67904          p.domain = DomainAVX
 67905          p.add(0, func(m *_Encoding, v []interface{}) {
 67906              m.emit(0x62)
 67907              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 67908              m.emit(0xfd)
 67909              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 67910              m.emit(0x8b)
 67911              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 67912          })
 67913      }
 67914      // VPCOMPRESSQ ymm, m256{k}{z}
 67915      if isEVEXYMM(v0) && isM256kz(v1) {
 67916          self.require(ISA_AVX512VL | ISA_AVX512F)
 67917          p.domain = DomainAVX
 67918          p.add(0, func(m *_Encoding, v []interface{}) {
 67919              m.evex(0b10, 0x85, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 67920              m.emit(0x8b)
 67921              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 67922          })
 67923      }
 67924      if p.len == 0 {
 67925          panic("invalid operands for VPCOMPRESSQ")
 67926      }
 67927      return p
 67928  }
 67929  
 67930  // VPCOMQ performs "Compare Packed Signed Quadword Integers".
 67931  //
 67932  // Mnemonic        : VPCOMQ
 67933  // Supported forms : (2 forms)
 67934  //
 67935  //    * VPCOMQ imm8, xmm, xmm, xmm     [XOP]
 67936  //    * VPCOMQ imm8, m128, xmm, xmm    [XOP]
 67937  //
 67938  func (self *Program) VPCOMQ(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 67939      p := self.alloc("VPCOMQ", 4, Operands { v0, v1, v2, v3 })
 67940      // VPCOMQ imm8, xmm, xmm, xmm
 67941      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 67942          self.require(ISA_XOP)
 67943          p.domain = DomainAMDSpecific
 67944          p.add(0, func(m *_Encoding, v []interface{}) {
 67945              m.emit(0x8f)
 67946              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 67947              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 67948              m.emit(0xcf)
 67949              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67950              m.imm1(toImmAny(v[0]))
 67951          })
 67952      }
 67953      // VPCOMQ imm8, m128, xmm, xmm
 67954      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 67955          self.require(ISA_XOP)
 67956          p.domain = DomainAMDSpecific
 67957          p.add(0, func(m *_Encoding, v []interface{}) {
 67958              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 67959              m.emit(0xcf)
 67960              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 67961              m.imm1(toImmAny(v[0]))
 67962          })
 67963      }
 67964      if p.len == 0 {
 67965          panic("invalid operands for VPCOMQ")
 67966      }
 67967      return p
 67968  }
 67969  
 67970  // VPCOMUB performs "Compare Packed Unsigned Byte Integers".
 67971  //
 67972  // Mnemonic        : VPCOMUB
 67973  // Supported forms : (2 forms)
 67974  //
 67975  //    * VPCOMUB imm8, xmm, xmm, xmm     [XOP]
 67976  //    * VPCOMUB imm8, m128, xmm, xmm    [XOP]
 67977  //
 67978  func (self *Program) VPCOMUB(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 67979      p := self.alloc("VPCOMUB", 4, Operands { v0, v1, v2, v3 })
 67980      // VPCOMUB imm8, xmm, xmm, xmm
 67981      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 67982          self.require(ISA_XOP)
 67983          p.domain = DomainAMDSpecific
 67984          p.add(0, func(m *_Encoding, v []interface{}) {
 67985              m.emit(0x8f)
 67986              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 67987              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 67988              m.emit(0xec)
 67989              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 67990              m.imm1(toImmAny(v[0]))
 67991          })
 67992      }
 67993      // VPCOMUB imm8, m128, xmm, xmm
 67994      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 67995          self.require(ISA_XOP)
 67996          p.domain = DomainAMDSpecific
 67997          p.add(0, func(m *_Encoding, v []interface{}) {
 67998              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 67999              m.emit(0xec)
 68000              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 68001              m.imm1(toImmAny(v[0]))
 68002          })
 68003      }
 68004      if p.len == 0 {
 68005          panic("invalid operands for VPCOMUB")
 68006      }
 68007      return p
 68008  }
 68009  
 68010  // VPCOMUD performs "Compare Packed Unsigned Doubleword Integers".
 68011  //
 68012  // Mnemonic        : VPCOMUD
 68013  // Supported forms : (2 forms)
 68014  //
 68015  //    * VPCOMUD imm8, xmm, xmm, xmm     [XOP]
 68016  //    * VPCOMUD imm8, m128, xmm, xmm    [XOP]
 68017  //
 68018  func (self *Program) VPCOMUD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 68019      p := self.alloc("VPCOMUD", 4, Operands { v0, v1, v2, v3 })
 68020      // VPCOMUD imm8, xmm, xmm, xmm
 68021      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 68022          self.require(ISA_XOP)
 68023          p.domain = DomainAMDSpecific
 68024          p.add(0, func(m *_Encoding, v []interface{}) {
 68025              m.emit(0x8f)
 68026              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 68027              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 68028              m.emit(0xee)
 68029              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 68030              m.imm1(toImmAny(v[0]))
 68031          })
 68032      }
 68033      // VPCOMUD imm8, m128, xmm, xmm
 68034      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 68035          self.require(ISA_XOP)
 68036          p.domain = DomainAMDSpecific
 68037          p.add(0, func(m *_Encoding, v []interface{}) {
 68038              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 68039              m.emit(0xee)
 68040              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 68041              m.imm1(toImmAny(v[0]))
 68042          })
 68043      }
 68044      if p.len == 0 {
 68045          panic("invalid operands for VPCOMUD")
 68046      }
 68047      return p
 68048  }
 68049  
 68050  // VPCOMUQ performs "Compare Packed Unsigned Quadword Integers".
 68051  //
 68052  // Mnemonic        : VPCOMUQ
 68053  // Supported forms : (2 forms)
 68054  //
 68055  //    * VPCOMUQ imm8, xmm, xmm, xmm     [XOP]
 68056  //    * VPCOMUQ imm8, m128, xmm, xmm    [XOP]
 68057  //
 68058  func (self *Program) VPCOMUQ(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 68059      p := self.alloc("VPCOMUQ", 4, Operands { v0, v1, v2, v3 })
 68060      // VPCOMUQ imm8, xmm, xmm, xmm
 68061      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 68062          self.require(ISA_XOP)
 68063          p.domain = DomainAMDSpecific
 68064          p.add(0, func(m *_Encoding, v []interface{}) {
 68065              m.emit(0x8f)
 68066              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 68067              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 68068              m.emit(0xef)
 68069              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 68070              m.imm1(toImmAny(v[0]))
 68071          })
 68072      }
 68073      // VPCOMUQ imm8, m128, xmm, xmm
 68074      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 68075          self.require(ISA_XOP)
 68076          p.domain = DomainAMDSpecific
 68077          p.add(0, func(m *_Encoding, v []interface{}) {
 68078              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 68079              m.emit(0xef)
 68080              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 68081              m.imm1(toImmAny(v[0]))
 68082          })
 68083      }
 68084      if p.len == 0 {
 68085          panic("invalid operands for VPCOMUQ")
 68086      }
 68087      return p
 68088  }
 68089  
 68090  // VPCOMUW performs "Compare Packed Unsigned Word Integers".
 68091  //
 68092  // Mnemonic        : VPCOMUW
 68093  // Supported forms : (2 forms)
 68094  //
 68095  //    * VPCOMUW imm8, xmm, xmm, xmm     [XOP]
 68096  //    * VPCOMUW imm8, m128, xmm, xmm    [XOP]
 68097  //
 68098  func (self *Program) VPCOMUW(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 68099      p := self.alloc("VPCOMUW", 4, Operands { v0, v1, v2, v3 })
 68100      // VPCOMUW imm8, xmm, xmm, xmm
 68101      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 68102          self.require(ISA_XOP)
 68103          p.domain = DomainAMDSpecific
 68104          p.add(0, func(m *_Encoding, v []interface{}) {
 68105              m.emit(0x8f)
 68106              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 68107              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 68108              m.emit(0xed)
 68109              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 68110              m.imm1(toImmAny(v[0]))
 68111          })
 68112      }
 68113      // VPCOMUW imm8, m128, xmm, xmm
 68114      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 68115          self.require(ISA_XOP)
 68116          p.domain = DomainAMDSpecific
 68117          p.add(0, func(m *_Encoding, v []interface{}) {
 68118              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 68119              m.emit(0xed)
 68120              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 68121              m.imm1(toImmAny(v[0]))
 68122          })
 68123      }
 68124      if p.len == 0 {
 68125          panic("invalid operands for VPCOMUW")
 68126      }
 68127      return p
 68128  }
 68129  
 68130  // VPCOMW performs "Compare Packed Signed Word Integers".
 68131  //
 68132  // Mnemonic        : VPCOMW
 68133  // Supported forms : (2 forms)
 68134  //
 68135  //    * VPCOMW imm8, xmm, xmm, xmm     [XOP]
 68136  //    * VPCOMW imm8, m128, xmm, xmm    [XOP]
 68137  //
 68138  func (self *Program) VPCOMW(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 68139      p := self.alloc("VPCOMW", 4, Operands { v0, v1, v2, v3 })
 68140      // VPCOMW imm8, xmm, xmm, xmm
 68141      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 68142          self.require(ISA_XOP)
 68143          p.domain = DomainAMDSpecific
 68144          p.add(0, func(m *_Encoding, v []interface{}) {
 68145              m.emit(0x8f)
 68146              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 68147              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 68148              m.emit(0xcd)
 68149              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 68150              m.imm1(toImmAny(v[0]))
 68151          })
 68152      }
 68153      // VPCOMW imm8, m128, xmm, xmm
 68154      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 68155          self.require(ISA_XOP)
 68156          p.domain = DomainAMDSpecific
 68157          p.add(0, func(m *_Encoding, v []interface{}) {
 68158              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 68159              m.emit(0xcd)
 68160              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 68161              m.imm1(toImmAny(v[0]))
 68162          })
 68163      }
 68164      if p.len == 0 {
 68165          panic("invalid operands for VPCOMW")
 68166      }
 68167      return p
 68168  }
 68169  
 68170  // VPCONFLICTD performs "Detect Conflicts Within a Vector of Packed Doubleword Values into Dense Memory/Register".
 68171  //
 68172  // Mnemonic        : VPCONFLICTD
 68173  // Supported forms : (6 forms)
 68174  //
 68175  //    * VPCONFLICTD m128/m32bcst, xmm{k}{z}    [AVX512CD,AVX512VL]
 68176  //    * VPCONFLICTD m256/m32bcst, ymm{k}{z}    [AVX512CD,AVX512VL]
 68177  //    * VPCONFLICTD m512/m32bcst, zmm{k}{z}    [AVX512CD]
 68178  //    * VPCONFLICTD xmm, xmm{k}{z}             [AVX512CD,AVX512VL]
 68179  //    * VPCONFLICTD ymm, ymm{k}{z}             [AVX512CD,AVX512VL]
 68180  //    * VPCONFLICTD zmm, zmm{k}{z}             [AVX512CD]
 68181  //
 68182  func (self *Program) VPCONFLICTD(v0 interface{}, v1 interface{}) *Instruction {
 68183      p := self.alloc("VPCONFLICTD", 2, Operands { v0, v1 })
 68184      // VPCONFLICTD m128/m32bcst, xmm{k}{z}
 68185      if isM128M32bcst(v0) && isXMMkz(v1) {
 68186          self.require(ISA_AVX512VL | ISA_AVX512CD)
 68187          p.domain = DomainAVX
 68188          p.add(0, func(m *_Encoding, v []interface{}) {
 68189              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 68190              m.emit(0xc4)
 68191              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 68192          })
 68193      }
 68194      // VPCONFLICTD m256/m32bcst, ymm{k}{z}
 68195      if isM256M32bcst(v0) && isYMMkz(v1) {
 68196          self.require(ISA_AVX512VL | ISA_AVX512CD)
 68197          p.domain = DomainAVX
 68198          p.add(0, func(m *_Encoding, v []interface{}) {
 68199              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 68200              m.emit(0xc4)
 68201              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 68202          })
 68203      }
 68204      // VPCONFLICTD m512/m32bcst, zmm{k}{z}
 68205      if isM512M32bcst(v0) && isZMMkz(v1) {
 68206          self.require(ISA_AVX512CD)
 68207          p.domain = DomainAVX
 68208          p.add(0, func(m *_Encoding, v []interface{}) {
 68209              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 68210              m.emit(0xc4)
 68211              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 68212          })
 68213      }
 68214      // VPCONFLICTD xmm, xmm{k}{z}
 68215      if isEVEXXMM(v0) && isXMMkz(v1) {
 68216          self.require(ISA_AVX512VL | ISA_AVX512CD)
 68217          p.domain = DomainAVX
 68218          p.add(0, func(m *_Encoding, v []interface{}) {
 68219              m.emit(0x62)
 68220              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 68221              m.emit(0x7d)
 68222              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 68223              m.emit(0xc4)
 68224              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 68225          })
 68226      }
 68227      // VPCONFLICTD ymm, ymm{k}{z}
 68228      if isEVEXYMM(v0) && isYMMkz(v1) {
 68229          self.require(ISA_AVX512VL | ISA_AVX512CD)
 68230          p.domain = DomainAVX
 68231          p.add(0, func(m *_Encoding, v []interface{}) {
 68232              m.emit(0x62)
 68233              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 68234              m.emit(0x7d)
 68235              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 68236              m.emit(0xc4)
 68237              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 68238          })
 68239      }
 68240      // VPCONFLICTD zmm, zmm{k}{z}
 68241      if isZMM(v0) && isZMMkz(v1) {
 68242          self.require(ISA_AVX512CD)
 68243          p.domain = DomainAVX
 68244          p.add(0, func(m *_Encoding, v []interface{}) {
 68245              m.emit(0x62)
 68246              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 68247              m.emit(0x7d)
 68248              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 68249              m.emit(0xc4)
 68250              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 68251          })
 68252      }
 68253      if p.len == 0 {
 68254          panic("invalid operands for VPCONFLICTD")
 68255      }
 68256      return p
 68257  }
 68258  
 68259  // VPCONFLICTQ performs "Detect Conflicts Within a Vector of Packed Quadword Values into Dense Memory/Register".
 68260  //
 68261  // Mnemonic        : VPCONFLICTQ
 68262  // Supported forms : (6 forms)
 68263  //
 68264  //    * VPCONFLICTQ m128/m64bcst, xmm{k}{z}    [AVX512CD,AVX512VL]
 68265  //    * VPCONFLICTQ m256/m64bcst, ymm{k}{z}    [AVX512CD,AVX512VL]
 68266  //    * VPCONFLICTQ m512/m64bcst, zmm{k}{z}    [AVX512CD]
 68267  //    * VPCONFLICTQ xmm, xmm{k}{z}             [AVX512CD,AVX512VL]
 68268  //    * VPCONFLICTQ ymm, ymm{k}{z}             [AVX512CD,AVX512VL]
 68269  //    * VPCONFLICTQ zmm, zmm{k}{z}             [AVX512CD]
 68270  //
 68271  func (self *Program) VPCONFLICTQ(v0 interface{}, v1 interface{}) *Instruction {
 68272      p := self.alloc("VPCONFLICTQ", 2, Operands { v0, v1 })
 68273      // VPCONFLICTQ m128/m64bcst, xmm{k}{z}
 68274      if isM128M64bcst(v0) && isXMMkz(v1) {
 68275          self.require(ISA_AVX512VL | ISA_AVX512CD)
 68276          p.domain = DomainAVX
 68277          p.add(0, func(m *_Encoding, v []interface{}) {
 68278              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 68279              m.emit(0xc4)
 68280              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 68281          })
 68282      }
 68283      // VPCONFLICTQ m256/m64bcst, ymm{k}{z}
 68284      if isM256M64bcst(v0) && isYMMkz(v1) {
 68285          self.require(ISA_AVX512VL | ISA_AVX512CD)
 68286          p.domain = DomainAVX
 68287          p.add(0, func(m *_Encoding, v []interface{}) {
 68288              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 68289              m.emit(0xc4)
 68290              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 68291          })
 68292      }
 68293      // VPCONFLICTQ m512/m64bcst, zmm{k}{z}
 68294      if isM512M64bcst(v0) && isZMMkz(v1) {
 68295          self.require(ISA_AVX512CD)
 68296          p.domain = DomainAVX
 68297          p.add(0, func(m *_Encoding, v []interface{}) {
 68298              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 68299              m.emit(0xc4)
 68300              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 68301          })
 68302      }
 68303      // VPCONFLICTQ xmm, xmm{k}{z}
 68304      if isEVEXXMM(v0) && isXMMkz(v1) {
 68305          self.require(ISA_AVX512VL | ISA_AVX512CD)
 68306          p.domain = DomainAVX
 68307          p.add(0, func(m *_Encoding, v []interface{}) {
 68308              m.emit(0x62)
 68309              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 68310              m.emit(0xfd)
 68311              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 68312              m.emit(0xc4)
 68313              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 68314          })
 68315      }
 68316      // VPCONFLICTQ ymm, ymm{k}{z}
 68317      if isEVEXYMM(v0) && isYMMkz(v1) {
 68318          self.require(ISA_AVX512VL | ISA_AVX512CD)
 68319          p.domain = DomainAVX
 68320          p.add(0, func(m *_Encoding, v []interface{}) {
 68321              m.emit(0x62)
 68322              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 68323              m.emit(0xfd)
 68324              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 68325              m.emit(0xc4)
 68326              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 68327          })
 68328      }
 68329      // VPCONFLICTQ zmm, zmm{k}{z}
 68330      if isZMM(v0) && isZMMkz(v1) {
 68331          self.require(ISA_AVX512CD)
 68332          p.domain = DomainAVX
 68333          p.add(0, func(m *_Encoding, v []interface{}) {
 68334              m.emit(0x62)
 68335              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 68336              m.emit(0xfd)
 68337              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 68338              m.emit(0xc4)
 68339              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 68340          })
 68341      }
 68342      if p.len == 0 {
 68343          panic("invalid operands for VPCONFLICTQ")
 68344      }
 68345      return p
 68346  }
 68347  
 68348  // VPERM2F128 performs "Permute Floating-Point Values".
 68349  //
 68350  // Mnemonic        : VPERM2F128
 68351  // Supported forms : (2 forms)
 68352  //
 68353  //    * VPERM2F128 imm8, ymm, ymm, ymm     [AVX]
 68354  //    * VPERM2F128 imm8, m256, ymm, ymm    [AVX]
 68355  //
 68356  func (self *Program) VPERM2F128(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 68357      p := self.alloc("VPERM2F128", 4, Operands { v0, v1, v2, v3 })
 68358      // VPERM2F128 imm8, ymm, ymm, ymm
 68359      if isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 68360          self.require(ISA_AVX)
 68361          p.domain = DomainAVX
 68362          p.add(0, func(m *_Encoding, v []interface{}) {
 68363              m.emit(0xc4)
 68364              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 68365              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 68366              m.emit(0x06)
 68367              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 68368              m.imm1(toImmAny(v[0]))
 68369          })
 68370      }
 68371      // VPERM2F128 imm8, m256, ymm, ymm
 68372      if isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 68373          self.require(ISA_AVX)
 68374          p.domain = DomainAVX
 68375          p.add(0, func(m *_Encoding, v []interface{}) {
 68376              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 68377              m.emit(0x06)
 68378              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 68379              m.imm1(toImmAny(v[0]))
 68380          })
 68381      }
 68382      if p.len == 0 {
 68383          panic("invalid operands for VPERM2F128")
 68384      }
 68385      return p
 68386  }
 68387  
 68388  // VPERM2I128 performs "Permute 128-Bit Integer Values".
 68389  //
 68390  // Mnemonic        : VPERM2I128
 68391  // Supported forms : (2 forms)
 68392  //
 68393  //    * VPERM2I128 imm8, ymm, ymm, ymm     [AVX2]
 68394  //    * VPERM2I128 imm8, m256, ymm, ymm    [AVX2]
 68395  //
 68396  func (self *Program) VPERM2I128(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 68397      p := self.alloc("VPERM2I128", 4, Operands { v0, v1, v2, v3 })
 68398      // VPERM2I128 imm8, ymm, ymm, ymm
 68399      if isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 68400          self.require(ISA_AVX2)
 68401          p.domain = DomainAVX
 68402          p.add(0, func(m *_Encoding, v []interface{}) {
 68403              m.emit(0xc4)
 68404              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 68405              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 68406              m.emit(0x46)
 68407              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 68408              m.imm1(toImmAny(v[0]))
 68409          })
 68410      }
 68411      // VPERM2I128 imm8, m256, ymm, ymm
 68412      if isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 68413          self.require(ISA_AVX2)
 68414          p.domain = DomainAVX
 68415          p.add(0, func(m *_Encoding, v []interface{}) {
 68416              m.vex3(0xc4, 0b11, 0x05, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 68417              m.emit(0x46)
 68418              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 68419              m.imm1(toImmAny(v[0]))
 68420          })
 68421      }
 68422      if p.len == 0 {
 68423          panic("invalid operands for VPERM2I128")
 68424      }
 68425      return p
 68426  }
 68427  
 68428  // VPERMB performs "Permute Byte Integers".
 68429  //
 68430  // Mnemonic        : VPERMB
 68431  // Supported forms : (6 forms)
 68432  //
 68433  //    * VPERMB xmm, xmm, xmm{k}{z}     [AVX512VBMI,AVX512VL]
 68434  //    * VPERMB m128, xmm, xmm{k}{z}    [AVX512VBMI,AVX512VL]
 68435  //    * VPERMB ymm, ymm, ymm{k}{z}     [AVX512VBMI,AVX512VL]
 68436  //    * VPERMB m256, ymm, ymm{k}{z}    [AVX512VBMI,AVX512VL]
 68437  //    * VPERMB zmm, zmm, zmm{k}{z}     [AVX512VBMI]
 68438  //    * VPERMB m512, zmm, zmm{k}{z}    [AVX512VBMI]
 68439  //
 68440  func (self *Program) VPERMB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 68441      p := self.alloc("VPERMB", 3, Operands { v0, v1, v2 })
 68442      // VPERMB xmm, xmm, xmm{k}{z}
 68443      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 68444          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 68445          p.domain = DomainAVX
 68446          p.add(0, func(m *_Encoding, v []interface{}) {
 68447              m.emit(0x62)
 68448              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68449              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68450              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 68451              m.emit(0x8d)
 68452              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68453          })
 68454      }
 68455      // VPERMB m128, xmm, xmm{k}{z}
 68456      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 68457          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 68458          p.domain = DomainAVX
 68459          p.add(0, func(m *_Encoding, v []interface{}) {
 68460              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 68461              m.emit(0x8d)
 68462              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 68463          })
 68464      }
 68465      // VPERMB ymm, ymm, ymm{k}{z}
 68466      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68467          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 68468          p.domain = DomainAVX
 68469          p.add(0, func(m *_Encoding, v []interface{}) {
 68470              m.emit(0x62)
 68471              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68472              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68473              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 68474              m.emit(0x8d)
 68475              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68476          })
 68477      }
 68478      // VPERMB m256, ymm, ymm{k}{z}
 68479      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68480          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 68481          p.domain = DomainAVX
 68482          p.add(0, func(m *_Encoding, v []interface{}) {
 68483              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 68484              m.emit(0x8d)
 68485              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 68486          })
 68487      }
 68488      // VPERMB zmm, zmm, zmm{k}{z}
 68489      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 68490          self.require(ISA_AVX512VBMI)
 68491          p.domain = DomainAVX
 68492          p.add(0, func(m *_Encoding, v []interface{}) {
 68493              m.emit(0x62)
 68494              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68495              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68496              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 68497              m.emit(0x8d)
 68498              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68499          })
 68500      }
 68501      // VPERMB m512, zmm, zmm{k}{z}
 68502      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 68503          self.require(ISA_AVX512VBMI)
 68504          p.domain = DomainAVX
 68505          p.add(0, func(m *_Encoding, v []interface{}) {
 68506              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 68507              m.emit(0x8d)
 68508              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 68509          })
 68510      }
 68511      if p.len == 0 {
 68512          panic("invalid operands for VPERMB")
 68513      }
 68514      return p
 68515  }
 68516  
 68517  // VPERMD performs "Permute Doubleword Integers".
 68518  //
 68519  // Mnemonic        : VPERMD
 68520  // Supported forms : (6 forms)
 68521  //
 68522  //    * VPERMD ymm, ymm, ymm                   [AVX2]
 68523  //    * VPERMD m256, ymm, ymm                  [AVX2]
 68524  //    * VPERMD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 68525  //    * VPERMD zmm, zmm, zmm{k}{z}             [AVX512F]
 68526  //    * VPERMD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 68527  //    * VPERMD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 68528  //
 68529  func (self *Program) VPERMD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 68530      p := self.alloc("VPERMD", 3, Operands { v0, v1, v2 })
 68531      // VPERMD ymm, ymm, ymm
 68532      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 68533          self.require(ISA_AVX2)
 68534          p.domain = DomainAVX
 68535          p.add(0, func(m *_Encoding, v []interface{}) {
 68536              m.emit(0xc4)
 68537              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 68538              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68539              m.emit(0x36)
 68540              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68541          })
 68542      }
 68543      // VPERMD m256, ymm, ymm
 68544      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 68545          self.require(ISA_AVX2)
 68546          p.domain = DomainAVX
 68547          p.add(0, func(m *_Encoding, v []interface{}) {
 68548              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 68549              m.emit(0x36)
 68550              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 68551          })
 68552      }
 68553      // VPERMD m512/m32bcst, zmm, zmm{k}{z}
 68554      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 68555          self.require(ISA_AVX512F)
 68556          p.domain = DomainAVX
 68557          p.add(0, func(m *_Encoding, v []interface{}) {
 68558              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68559              m.emit(0x36)
 68560              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 68561          })
 68562      }
 68563      // VPERMD zmm, zmm, zmm{k}{z}
 68564      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 68565          self.require(ISA_AVX512F)
 68566          p.domain = DomainAVX
 68567          p.add(0, func(m *_Encoding, v []interface{}) {
 68568              m.emit(0x62)
 68569              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68570              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68571              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 68572              m.emit(0x36)
 68573              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68574          })
 68575      }
 68576      // VPERMD m256/m32bcst, ymm, ymm{k}{z}
 68577      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68578          self.require(ISA_AVX512VL | ISA_AVX512F)
 68579          p.domain = DomainAVX
 68580          p.add(0, func(m *_Encoding, v []interface{}) {
 68581              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68582              m.emit(0x36)
 68583              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 68584          })
 68585      }
 68586      // VPERMD ymm, ymm, ymm{k}{z}
 68587      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68588          self.require(ISA_AVX512VL | ISA_AVX512F)
 68589          p.domain = DomainAVX
 68590          p.add(0, func(m *_Encoding, v []interface{}) {
 68591              m.emit(0x62)
 68592              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68593              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68594              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 68595              m.emit(0x36)
 68596              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68597          })
 68598      }
 68599      if p.len == 0 {
 68600          panic("invalid operands for VPERMD")
 68601      }
 68602      return p
 68603  }
 68604  
 68605  // VPERMI2B performs "Full Permute of Bytes From Two Tables Overwriting the Index".
 68606  //
 68607  // Mnemonic        : VPERMI2B
 68608  // Supported forms : (6 forms)
 68609  //
 68610  //    * VPERMI2B xmm, xmm, xmm{k}{z}     [AVX512VBMI,AVX512VL]
 68611  //    * VPERMI2B m128, xmm, xmm{k}{z}    [AVX512VBMI,AVX512VL]
 68612  //    * VPERMI2B ymm, ymm, ymm{k}{z}     [AVX512VBMI,AVX512VL]
 68613  //    * VPERMI2B m256, ymm, ymm{k}{z}    [AVX512VBMI,AVX512VL]
 68614  //    * VPERMI2B zmm, zmm, zmm{k}{z}     [AVX512VBMI]
 68615  //    * VPERMI2B m512, zmm, zmm{k}{z}    [AVX512VBMI]
 68616  //
 68617  func (self *Program) VPERMI2B(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 68618      p := self.alloc("VPERMI2B", 3, Operands { v0, v1, v2 })
 68619      // VPERMI2B xmm, xmm, xmm{k}{z}
 68620      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 68621          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 68622          p.domain = DomainAVX
 68623          p.add(0, func(m *_Encoding, v []interface{}) {
 68624              m.emit(0x62)
 68625              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68626              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68627              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 68628              m.emit(0x75)
 68629              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68630          })
 68631      }
 68632      // VPERMI2B m128, xmm, xmm{k}{z}
 68633      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 68634          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 68635          p.domain = DomainAVX
 68636          p.add(0, func(m *_Encoding, v []interface{}) {
 68637              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 68638              m.emit(0x75)
 68639              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 68640          })
 68641      }
 68642      // VPERMI2B ymm, ymm, ymm{k}{z}
 68643      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68644          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 68645          p.domain = DomainAVX
 68646          p.add(0, func(m *_Encoding, v []interface{}) {
 68647              m.emit(0x62)
 68648              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68649              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68650              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 68651              m.emit(0x75)
 68652              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68653          })
 68654      }
 68655      // VPERMI2B m256, ymm, ymm{k}{z}
 68656      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68657          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 68658          p.domain = DomainAVX
 68659          p.add(0, func(m *_Encoding, v []interface{}) {
 68660              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 68661              m.emit(0x75)
 68662              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 68663          })
 68664      }
 68665      // VPERMI2B zmm, zmm, zmm{k}{z}
 68666      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 68667          self.require(ISA_AVX512VBMI)
 68668          p.domain = DomainAVX
 68669          p.add(0, func(m *_Encoding, v []interface{}) {
 68670              m.emit(0x62)
 68671              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68672              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68673              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 68674              m.emit(0x75)
 68675              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68676          })
 68677      }
 68678      // VPERMI2B m512, zmm, zmm{k}{z}
 68679      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 68680          self.require(ISA_AVX512VBMI)
 68681          p.domain = DomainAVX
 68682          p.add(0, func(m *_Encoding, v []interface{}) {
 68683              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 68684              m.emit(0x75)
 68685              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 68686          })
 68687      }
 68688      if p.len == 0 {
 68689          panic("invalid operands for VPERMI2B")
 68690      }
 68691      return p
 68692  }
 68693  
 68694  // VPERMI2D performs "Full Permute of Doublewords From Two Tables Overwriting the Index".
 68695  //
 68696  // Mnemonic        : VPERMI2D
 68697  // Supported forms : (6 forms)
 68698  //
 68699  //    * VPERMI2D m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 68700  //    * VPERMI2D zmm, zmm, zmm{k}{z}             [AVX512F]
 68701  //    * VPERMI2D m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 68702  //    * VPERMI2D xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 68703  //    * VPERMI2D m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 68704  //    * VPERMI2D ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 68705  //
 68706  func (self *Program) VPERMI2D(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 68707      p := self.alloc("VPERMI2D", 3, Operands { v0, v1, v2 })
 68708      // VPERMI2D m512/m32bcst, zmm, zmm{k}{z}
 68709      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 68710          self.require(ISA_AVX512F)
 68711          p.domain = DomainAVX
 68712          p.add(0, func(m *_Encoding, v []interface{}) {
 68713              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68714              m.emit(0x76)
 68715              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 68716          })
 68717      }
 68718      // VPERMI2D zmm, zmm, zmm{k}{z}
 68719      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 68720          self.require(ISA_AVX512F)
 68721          p.domain = DomainAVX
 68722          p.add(0, func(m *_Encoding, v []interface{}) {
 68723              m.emit(0x62)
 68724              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68725              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68726              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 68727              m.emit(0x76)
 68728              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68729          })
 68730      }
 68731      // VPERMI2D m128/m32bcst, xmm, xmm{k}{z}
 68732      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 68733          self.require(ISA_AVX512VL | ISA_AVX512F)
 68734          p.domain = DomainAVX
 68735          p.add(0, func(m *_Encoding, v []interface{}) {
 68736              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68737              m.emit(0x76)
 68738              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 68739          })
 68740      }
 68741      // VPERMI2D xmm, xmm, xmm{k}{z}
 68742      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 68743          self.require(ISA_AVX512VL | ISA_AVX512F)
 68744          p.domain = DomainAVX
 68745          p.add(0, func(m *_Encoding, v []interface{}) {
 68746              m.emit(0x62)
 68747              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68748              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68749              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 68750              m.emit(0x76)
 68751              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68752          })
 68753      }
 68754      // VPERMI2D m256/m32bcst, ymm, ymm{k}{z}
 68755      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68756          self.require(ISA_AVX512VL | ISA_AVX512F)
 68757          p.domain = DomainAVX
 68758          p.add(0, func(m *_Encoding, v []interface{}) {
 68759              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68760              m.emit(0x76)
 68761              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 68762          })
 68763      }
 68764      // VPERMI2D ymm, ymm, ymm{k}{z}
 68765      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68766          self.require(ISA_AVX512VL | ISA_AVX512F)
 68767          p.domain = DomainAVX
 68768          p.add(0, func(m *_Encoding, v []interface{}) {
 68769              m.emit(0x62)
 68770              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68771              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68772              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 68773              m.emit(0x76)
 68774              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68775          })
 68776      }
 68777      if p.len == 0 {
 68778          panic("invalid operands for VPERMI2D")
 68779      }
 68780      return p
 68781  }
 68782  
 68783  // VPERMI2PD performs "Full Permute of Double-Precision Floating-Point Values From Two Tables Overwriting the Index".
 68784  //
 68785  // Mnemonic        : VPERMI2PD
 68786  // Supported forms : (6 forms)
 68787  //
 68788  //    * VPERMI2PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 68789  //    * VPERMI2PD zmm, zmm, zmm{k}{z}             [AVX512F]
 68790  //    * VPERMI2PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 68791  //    * VPERMI2PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 68792  //    * VPERMI2PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 68793  //    * VPERMI2PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 68794  //
 68795  func (self *Program) VPERMI2PD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 68796      p := self.alloc("VPERMI2PD", 3, Operands { v0, v1, v2 })
 68797      // VPERMI2PD m512/m64bcst, zmm, zmm{k}{z}
 68798      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 68799          self.require(ISA_AVX512F)
 68800          p.domain = DomainAVX
 68801          p.add(0, func(m *_Encoding, v []interface{}) {
 68802              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68803              m.emit(0x77)
 68804              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 68805          })
 68806      }
 68807      // VPERMI2PD zmm, zmm, zmm{k}{z}
 68808      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 68809          self.require(ISA_AVX512F)
 68810          p.domain = DomainAVX
 68811          p.add(0, func(m *_Encoding, v []interface{}) {
 68812              m.emit(0x62)
 68813              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68814              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 68815              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 68816              m.emit(0x77)
 68817              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68818          })
 68819      }
 68820      // VPERMI2PD m128/m64bcst, xmm, xmm{k}{z}
 68821      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 68822          self.require(ISA_AVX512VL | ISA_AVX512F)
 68823          p.domain = DomainAVX
 68824          p.add(0, func(m *_Encoding, v []interface{}) {
 68825              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68826              m.emit(0x77)
 68827              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 68828          })
 68829      }
 68830      // VPERMI2PD xmm, xmm, xmm{k}{z}
 68831      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 68832          self.require(ISA_AVX512VL | ISA_AVX512F)
 68833          p.domain = DomainAVX
 68834          p.add(0, func(m *_Encoding, v []interface{}) {
 68835              m.emit(0x62)
 68836              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68837              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 68838              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 68839              m.emit(0x77)
 68840              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68841          })
 68842      }
 68843      // VPERMI2PD m256/m64bcst, ymm, ymm{k}{z}
 68844      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68845          self.require(ISA_AVX512VL | ISA_AVX512F)
 68846          p.domain = DomainAVX
 68847          p.add(0, func(m *_Encoding, v []interface{}) {
 68848              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68849              m.emit(0x77)
 68850              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 68851          })
 68852      }
 68853      // VPERMI2PD ymm, ymm, ymm{k}{z}
 68854      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68855          self.require(ISA_AVX512VL | ISA_AVX512F)
 68856          p.domain = DomainAVX
 68857          p.add(0, func(m *_Encoding, v []interface{}) {
 68858              m.emit(0x62)
 68859              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68860              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 68861              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 68862              m.emit(0x77)
 68863              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68864          })
 68865      }
 68866      if p.len == 0 {
 68867          panic("invalid operands for VPERMI2PD")
 68868      }
 68869      return p
 68870  }
 68871  
 68872  // VPERMI2PS performs "Full Permute of Single-Precision Floating-Point Values From Two Tables Overwriting the Index".
 68873  //
 68874  // Mnemonic        : VPERMI2PS
 68875  // Supported forms : (6 forms)
 68876  //
 68877  //    * VPERMI2PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 68878  //    * VPERMI2PS zmm, zmm, zmm{k}{z}             [AVX512F]
 68879  //    * VPERMI2PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 68880  //    * VPERMI2PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 68881  //    * VPERMI2PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 68882  //    * VPERMI2PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 68883  //
 68884  func (self *Program) VPERMI2PS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 68885      p := self.alloc("VPERMI2PS", 3, Operands { v0, v1, v2 })
 68886      // VPERMI2PS m512/m32bcst, zmm, zmm{k}{z}
 68887      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 68888          self.require(ISA_AVX512F)
 68889          p.domain = DomainAVX
 68890          p.add(0, func(m *_Encoding, v []interface{}) {
 68891              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68892              m.emit(0x77)
 68893              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 68894          })
 68895      }
 68896      // VPERMI2PS zmm, zmm, zmm{k}{z}
 68897      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 68898          self.require(ISA_AVX512F)
 68899          p.domain = DomainAVX
 68900          p.add(0, func(m *_Encoding, v []interface{}) {
 68901              m.emit(0x62)
 68902              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68903              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68904              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 68905              m.emit(0x77)
 68906              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68907          })
 68908      }
 68909      // VPERMI2PS m128/m32bcst, xmm, xmm{k}{z}
 68910      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 68911          self.require(ISA_AVX512VL | ISA_AVX512F)
 68912          p.domain = DomainAVX
 68913          p.add(0, func(m *_Encoding, v []interface{}) {
 68914              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68915              m.emit(0x77)
 68916              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 68917          })
 68918      }
 68919      // VPERMI2PS xmm, xmm, xmm{k}{z}
 68920      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 68921          self.require(ISA_AVX512VL | ISA_AVX512F)
 68922          p.domain = DomainAVX
 68923          p.add(0, func(m *_Encoding, v []interface{}) {
 68924              m.emit(0x62)
 68925              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68926              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68927              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 68928              m.emit(0x77)
 68929              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68930          })
 68931      }
 68932      // VPERMI2PS m256/m32bcst, ymm, ymm{k}{z}
 68933      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68934          self.require(ISA_AVX512VL | ISA_AVX512F)
 68935          p.domain = DomainAVX
 68936          p.add(0, func(m *_Encoding, v []interface{}) {
 68937              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68938              m.emit(0x77)
 68939              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 68940          })
 68941      }
 68942      // VPERMI2PS ymm, ymm, ymm{k}{z}
 68943      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 68944          self.require(ISA_AVX512VL | ISA_AVX512F)
 68945          p.domain = DomainAVX
 68946          p.add(0, func(m *_Encoding, v []interface{}) {
 68947              m.emit(0x62)
 68948              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68949              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 68950              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 68951              m.emit(0x77)
 68952              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68953          })
 68954      }
 68955      if p.len == 0 {
 68956          panic("invalid operands for VPERMI2PS")
 68957      }
 68958      return p
 68959  }
 68960  
 68961  // VPERMI2Q performs "Full Permute of Quadwords From Two Tables Overwriting the Index".
 68962  //
 68963  // Mnemonic        : VPERMI2Q
 68964  // Supported forms : (6 forms)
 68965  //
 68966  //    * VPERMI2Q m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 68967  //    * VPERMI2Q zmm, zmm, zmm{k}{z}             [AVX512F]
 68968  //    * VPERMI2Q m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 68969  //    * VPERMI2Q xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 68970  //    * VPERMI2Q m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 68971  //    * VPERMI2Q ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 68972  //
 68973  func (self *Program) VPERMI2Q(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 68974      p := self.alloc("VPERMI2Q", 3, Operands { v0, v1, v2 })
 68975      // VPERMI2Q m512/m64bcst, zmm, zmm{k}{z}
 68976      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 68977          self.require(ISA_AVX512F)
 68978          p.domain = DomainAVX
 68979          p.add(0, func(m *_Encoding, v []interface{}) {
 68980              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 68981              m.emit(0x76)
 68982              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 68983          })
 68984      }
 68985      // VPERMI2Q zmm, zmm, zmm{k}{z}
 68986      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 68987          self.require(ISA_AVX512F)
 68988          p.domain = DomainAVX
 68989          p.add(0, func(m *_Encoding, v []interface{}) {
 68990              m.emit(0x62)
 68991              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 68992              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 68993              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 68994              m.emit(0x76)
 68995              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 68996          })
 68997      }
 68998      // VPERMI2Q m128/m64bcst, xmm, xmm{k}{z}
 68999      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 69000          self.require(ISA_AVX512VL | ISA_AVX512F)
 69001          p.domain = DomainAVX
 69002          p.add(0, func(m *_Encoding, v []interface{}) {
 69003              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 69004              m.emit(0x76)
 69005              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 69006          })
 69007      }
 69008      // VPERMI2Q xmm, xmm, xmm{k}{z}
 69009      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 69010          self.require(ISA_AVX512VL | ISA_AVX512F)
 69011          p.domain = DomainAVX
 69012          p.add(0, func(m *_Encoding, v []interface{}) {
 69013              m.emit(0x62)
 69014              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69015              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 69016              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 69017              m.emit(0x76)
 69018              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69019          })
 69020      }
 69021      // VPERMI2Q m256/m64bcst, ymm, ymm{k}{z}
 69022      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69023          self.require(ISA_AVX512VL | ISA_AVX512F)
 69024          p.domain = DomainAVX
 69025          p.add(0, func(m *_Encoding, v []interface{}) {
 69026              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 69027              m.emit(0x76)
 69028              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 69029          })
 69030      }
 69031      // VPERMI2Q ymm, ymm, ymm{k}{z}
 69032      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69033          self.require(ISA_AVX512VL | ISA_AVX512F)
 69034          p.domain = DomainAVX
 69035          p.add(0, func(m *_Encoding, v []interface{}) {
 69036              m.emit(0x62)
 69037              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69038              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 69039              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 69040              m.emit(0x76)
 69041              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69042          })
 69043      }
 69044      if p.len == 0 {
 69045          panic("invalid operands for VPERMI2Q")
 69046      }
 69047      return p
 69048  }
 69049  
 69050  // VPERMI2W performs "Full Permute of Words From Two Tables Overwriting the Index".
 69051  //
 69052  // Mnemonic        : VPERMI2W
 69053  // Supported forms : (6 forms)
 69054  //
 69055  //    * VPERMI2W zmm, zmm, zmm{k}{z}     [AVX512BW]
 69056  //    * VPERMI2W m512, zmm, zmm{k}{z}    [AVX512BW]
 69057  //    * VPERMI2W xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 69058  //    * VPERMI2W m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 69059  //    * VPERMI2W ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 69060  //    * VPERMI2W m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 69061  //
 69062  func (self *Program) VPERMI2W(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 69063      p := self.alloc("VPERMI2W", 3, Operands { v0, v1, v2 })
 69064      // VPERMI2W zmm, zmm, zmm{k}{z}
 69065      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 69066          self.require(ISA_AVX512BW)
 69067          p.domain = DomainAVX
 69068          p.add(0, func(m *_Encoding, v []interface{}) {
 69069              m.emit(0x62)
 69070              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69071              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 69072              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 69073              m.emit(0x75)
 69074              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69075          })
 69076      }
 69077      // VPERMI2W m512, zmm, zmm{k}{z}
 69078      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 69079          self.require(ISA_AVX512BW)
 69080          p.domain = DomainAVX
 69081          p.add(0, func(m *_Encoding, v []interface{}) {
 69082              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 69083              m.emit(0x75)
 69084              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 69085          })
 69086      }
 69087      // VPERMI2W xmm, xmm, xmm{k}{z}
 69088      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 69089          self.require(ISA_AVX512VL | ISA_AVX512BW)
 69090          p.domain = DomainAVX
 69091          p.add(0, func(m *_Encoding, v []interface{}) {
 69092              m.emit(0x62)
 69093              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69094              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 69095              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 69096              m.emit(0x75)
 69097              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69098          })
 69099      }
 69100      // VPERMI2W m128, xmm, xmm{k}{z}
 69101      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 69102          self.require(ISA_AVX512VL | ISA_AVX512BW)
 69103          p.domain = DomainAVX
 69104          p.add(0, func(m *_Encoding, v []interface{}) {
 69105              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 69106              m.emit(0x75)
 69107              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 69108          })
 69109      }
 69110      // VPERMI2W ymm, ymm, ymm{k}{z}
 69111      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69112          self.require(ISA_AVX512VL | ISA_AVX512BW)
 69113          p.domain = DomainAVX
 69114          p.add(0, func(m *_Encoding, v []interface{}) {
 69115              m.emit(0x62)
 69116              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69117              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 69118              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 69119              m.emit(0x75)
 69120              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69121          })
 69122      }
 69123      // VPERMI2W m256, ymm, ymm{k}{z}
 69124      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69125          self.require(ISA_AVX512VL | ISA_AVX512BW)
 69126          p.domain = DomainAVX
 69127          p.add(0, func(m *_Encoding, v []interface{}) {
 69128              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 69129              m.emit(0x75)
 69130              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 69131          })
 69132      }
 69133      if p.len == 0 {
 69134          panic("invalid operands for VPERMI2W")
 69135      }
 69136      return p
 69137  }
 69138  
 69139  // VPERMIL2PD performs "Permute Two-Source Double-Precision Floating-Point Vectors".
 69140  //
 69141  // Mnemonic        : VPERMIL2PD
 69142  // Supported forms : (6 forms)
 69143  //
 69144  //    * VPERMIL2PD imm4, xmm, xmm, xmm, xmm     [XOP]
 69145  //    * VPERMIL2PD imm4, m128, xmm, xmm, xmm    [XOP]
 69146  //    * VPERMIL2PD imm4, xmm, m128, xmm, xmm    [XOP]
 69147  //    * VPERMIL2PD imm4, ymm, ymm, ymm, ymm     [XOP]
 69148  //    * VPERMIL2PD imm4, m256, ymm, ymm, ymm    [XOP]
 69149  //    * VPERMIL2PD imm4, ymm, m256, ymm, ymm    [XOP]
 69150  //
 69151  func (self *Program) VPERMIL2PD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, v4 interface{}) *Instruction {
 69152      p := self.alloc("VPERMIL2PD", 5, Operands { v0, v1, v2, v3, v4 })
 69153      // VPERMIL2PD imm4, xmm, xmm, xmm, xmm
 69154      if isImm4(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) && isXMM(v4) {
 69155          self.require(ISA_XOP)
 69156          p.domain = DomainAMDSpecific
 69157          p.add(0, func(m *_Encoding, v []interface{}) {
 69158              m.emit(0xc4)
 69159              m.emit(0xe3 ^ (hcode(v[4]) << 7) ^ (hcode(v[2]) << 5))
 69160              m.emit(0x79 ^ (hlcode(v[3]) << 3))
 69161              m.emit(0x49)
 69162              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 69163              m.emit((hlcode(v[1]) << 4) | imml(v[0]))
 69164          })
 69165          p.add(0, func(m *_Encoding, v []interface{}) {
 69166              m.emit(0xc4)
 69167              m.emit(0xe3 ^ (hcode(v[4]) << 7) ^ (hcode(v[1]) << 5))
 69168              m.emit(0xf9 ^ (hlcode(v[3]) << 3))
 69169              m.emit(0x49)
 69170              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[1]))
 69171              m.emit((hlcode(v[2]) << 4) | imml(v[0]))
 69172          })
 69173      }
 69174      // VPERMIL2PD imm4, m128, xmm, xmm, xmm
 69175      if isImm4(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) && isXMM(v4) {
 69176          self.require(ISA_XOP)
 69177          p.domain = DomainAMDSpecific
 69178          p.add(0, func(m *_Encoding, v []interface{}) {
 69179              m.vex3(0xc4, 0b11, 0x81, hcode(v[4]), addr(v[1]), hlcode(v[3]))
 69180              m.emit(0x49)
 69181              m.mrsd(lcode(v[4]), addr(v[1]), 1)
 69182              m.emit((hlcode(v[2]) << 4) | imml(v[0]))
 69183          })
 69184      }
 69185      // VPERMIL2PD imm4, xmm, m128, xmm, xmm
 69186      if isImm4(v0) && isXMM(v1) && isM128(v2) && isXMM(v3) && isXMM(v4) {
 69187          self.require(ISA_XOP)
 69188          p.domain = DomainAMDSpecific
 69189          p.add(0, func(m *_Encoding, v []interface{}) {
 69190              m.vex3(0xc4, 0b11, 0x01, hcode(v[4]), addr(v[2]), hlcode(v[3]))
 69191              m.emit(0x49)
 69192              m.mrsd(lcode(v[4]), addr(v[2]), 1)
 69193              m.emit((hlcode(v[1]) << 4) | imml(v[0]))
 69194          })
 69195      }
 69196      // VPERMIL2PD imm4, ymm, ymm, ymm, ymm
 69197      if isImm4(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) && isYMM(v4) {
 69198          self.require(ISA_XOP)
 69199          p.domain = DomainAMDSpecific
 69200          p.add(0, func(m *_Encoding, v []interface{}) {
 69201              m.emit(0xc4)
 69202              m.emit(0xe3 ^ (hcode(v[4]) << 7) ^ (hcode(v[2]) << 5))
 69203              m.emit(0x7d ^ (hlcode(v[3]) << 3))
 69204              m.emit(0x49)
 69205              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 69206              m.emit((hlcode(v[1]) << 4) | imml(v[0]))
 69207          })
 69208          p.add(0, func(m *_Encoding, v []interface{}) {
 69209              m.emit(0xc4)
 69210              m.emit(0xe3 ^ (hcode(v[4]) << 7) ^ (hcode(v[1]) << 5))
 69211              m.emit(0xfd ^ (hlcode(v[3]) << 3))
 69212              m.emit(0x49)
 69213              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[1]))
 69214              m.emit((hlcode(v[2]) << 4) | imml(v[0]))
 69215          })
 69216      }
 69217      // VPERMIL2PD imm4, m256, ymm, ymm, ymm
 69218      if isImm4(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) && isYMM(v4) {
 69219          self.require(ISA_XOP)
 69220          p.domain = DomainAMDSpecific
 69221          p.add(0, func(m *_Encoding, v []interface{}) {
 69222              m.vex3(0xc4, 0b11, 0x85, hcode(v[4]), addr(v[1]), hlcode(v[3]))
 69223              m.emit(0x49)
 69224              m.mrsd(lcode(v[4]), addr(v[1]), 1)
 69225              m.emit((hlcode(v[2]) << 4) | imml(v[0]))
 69226          })
 69227      }
 69228      // VPERMIL2PD imm4, ymm, m256, ymm, ymm
 69229      if isImm4(v0) && isYMM(v1) && isM256(v2) && isYMM(v3) && isYMM(v4) {
 69230          self.require(ISA_XOP)
 69231          p.domain = DomainAMDSpecific
 69232          p.add(0, func(m *_Encoding, v []interface{}) {
 69233              m.vex3(0xc4, 0b11, 0x05, hcode(v[4]), addr(v[2]), hlcode(v[3]))
 69234              m.emit(0x49)
 69235              m.mrsd(lcode(v[4]), addr(v[2]), 1)
 69236              m.emit((hlcode(v[1]) << 4) | imml(v[0]))
 69237          })
 69238      }
 69239      if p.len == 0 {
 69240          panic("invalid operands for VPERMIL2PD")
 69241      }
 69242      return p
 69243  }
 69244  
 69245  // VPERMIL2PS performs "Permute Two-Source Single-Precision Floating-Point Vectors".
 69246  //
 69247  // Mnemonic        : VPERMIL2PS
 69248  // Supported forms : (6 forms)
 69249  //
 69250  //    * VPERMIL2PS imm4, xmm, xmm, xmm, xmm     [XOP]
 69251  //    * VPERMIL2PS imm4, m128, xmm, xmm, xmm    [XOP]
 69252  //    * VPERMIL2PS imm4, xmm, m128, xmm, xmm    [XOP]
 69253  //    * VPERMIL2PS imm4, ymm, ymm, ymm, ymm     [XOP]
 69254  //    * VPERMIL2PS imm4, m256, ymm, ymm, ymm    [XOP]
 69255  //    * VPERMIL2PS imm4, ymm, m256, ymm, ymm    [XOP]
 69256  //
 69257  func (self *Program) VPERMIL2PS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, v4 interface{}) *Instruction {
 69258      p := self.alloc("VPERMIL2PS", 5, Operands { v0, v1, v2, v3, v4 })
 69259      // VPERMIL2PS imm4, xmm, xmm, xmm, xmm
 69260      if isImm4(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) && isXMM(v4) {
 69261          self.require(ISA_XOP)
 69262          p.domain = DomainAMDSpecific
 69263          p.add(0, func(m *_Encoding, v []interface{}) {
 69264              m.emit(0xc4)
 69265              m.emit(0xe3 ^ (hcode(v[4]) << 7) ^ (hcode(v[2]) << 5))
 69266              m.emit(0x79 ^ (hlcode(v[3]) << 3))
 69267              m.emit(0x48)
 69268              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 69269              m.emit((hlcode(v[1]) << 4) | imml(v[0]))
 69270          })
 69271          p.add(0, func(m *_Encoding, v []interface{}) {
 69272              m.emit(0xc4)
 69273              m.emit(0xe3 ^ (hcode(v[4]) << 7) ^ (hcode(v[1]) << 5))
 69274              m.emit(0xf9 ^ (hlcode(v[3]) << 3))
 69275              m.emit(0x48)
 69276              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[1]))
 69277              m.emit((hlcode(v[2]) << 4) | imml(v[0]))
 69278          })
 69279      }
 69280      // VPERMIL2PS imm4, m128, xmm, xmm, xmm
 69281      if isImm4(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) && isXMM(v4) {
 69282          self.require(ISA_XOP)
 69283          p.domain = DomainAMDSpecific
 69284          p.add(0, func(m *_Encoding, v []interface{}) {
 69285              m.vex3(0xc4, 0b11, 0x81, hcode(v[4]), addr(v[1]), hlcode(v[3]))
 69286              m.emit(0x48)
 69287              m.mrsd(lcode(v[4]), addr(v[1]), 1)
 69288              m.emit((hlcode(v[2]) << 4) | imml(v[0]))
 69289          })
 69290      }
 69291      // VPERMIL2PS imm4, xmm, m128, xmm, xmm
 69292      if isImm4(v0) && isXMM(v1) && isM128(v2) && isXMM(v3) && isXMM(v4) {
 69293          self.require(ISA_XOP)
 69294          p.domain = DomainAMDSpecific
 69295          p.add(0, func(m *_Encoding, v []interface{}) {
 69296              m.vex3(0xc4, 0b11, 0x01, hcode(v[4]), addr(v[2]), hlcode(v[3]))
 69297              m.emit(0x48)
 69298              m.mrsd(lcode(v[4]), addr(v[2]), 1)
 69299              m.emit((hlcode(v[1]) << 4) | imml(v[0]))
 69300          })
 69301      }
 69302      // VPERMIL2PS imm4, ymm, ymm, ymm, ymm
 69303      if isImm4(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) && isYMM(v4) {
 69304          self.require(ISA_XOP)
 69305          p.domain = DomainAMDSpecific
 69306          p.add(0, func(m *_Encoding, v []interface{}) {
 69307              m.emit(0xc4)
 69308              m.emit(0xe3 ^ (hcode(v[4]) << 7) ^ (hcode(v[2]) << 5))
 69309              m.emit(0x7d ^ (hlcode(v[3]) << 3))
 69310              m.emit(0x48)
 69311              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 69312              m.emit((hlcode(v[1]) << 4) | imml(v[0]))
 69313          })
 69314          p.add(0, func(m *_Encoding, v []interface{}) {
 69315              m.emit(0xc4)
 69316              m.emit(0xe3 ^ (hcode(v[4]) << 7) ^ (hcode(v[1]) << 5))
 69317              m.emit(0xfd ^ (hlcode(v[3]) << 3))
 69318              m.emit(0x48)
 69319              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[1]))
 69320              m.emit((hlcode(v[2]) << 4) | imml(v[0]))
 69321          })
 69322      }
 69323      // VPERMIL2PS imm4, m256, ymm, ymm, ymm
 69324      if isImm4(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) && isYMM(v4) {
 69325          self.require(ISA_XOP)
 69326          p.domain = DomainAMDSpecific
 69327          p.add(0, func(m *_Encoding, v []interface{}) {
 69328              m.vex3(0xc4, 0b11, 0x85, hcode(v[4]), addr(v[1]), hlcode(v[3]))
 69329              m.emit(0x48)
 69330              m.mrsd(lcode(v[4]), addr(v[1]), 1)
 69331              m.emit((hlcode(v[2]) << 4) | imml(v[0]))
 69332          })
 69333      }
 69334      // VPERMIL2PS imm4, ymm, m256, ymm, ymm
 69335      if isImm4(v0) && isYMM(v1) && isM256(v2) && isYMM(v3) && isYMM(v4) {
 69336          self.require(ISA_XOP)
 69337          p.domain = DomainAMDSpecific
 69338          p.add(0, func(m *_Encoding, v []interface{}) {
 69339              m.vex3(0xc4, 0b11, 0x05, hcode(v[4]), addr(v[2]), hlcode(v[3]))
 69340              m.emit(0x48)
 69341              m.mrsd(lcode(v[4]), addr(v[2]), 1)
 69342              m.emit((hlcode(v[1]) << 4) | imml(v[0]))
 69343          })
 69344      }
 69345      if p.len == 0 {
 69346          panic("invalid operands for VPERMIL2PS")
 69347      }
 69348      return p
 69349  }
 69350  
 69351  // VPERMILPD performs "Permute Double-Precision Floating-Point Values".
 69352  //
 69353  // Mnemonic        : VPERMILPD
 69354  // Supported forms : (20 forms)
 69355  //
 69356  //    * VPERMILPD imm8, xmm, xmm                   [AVX]
 69357  //    * VPERMILPD xmm, xmm, xmm                    [AVX]
 69358  //    * VPERMILPD m128, xmm, xmm                   [AVX]
 69359  //    * VPERMILPD imm8, m128, xmm                  [AVX]
 69360  //    * VPERMILPD imm8, ymm, ymm                   [AVX]
 69361  //    * VPERMILPD ymm, ymm, ymm                    [AVX]
 69362  //    * VPERMILPD m256, ymm, ymm                   [AVX]
 69363  //    * VPERMILPD imm8, m256, ymm                  [AVX]
 69364  //    * VPERMILPD imm8, m512/m64bcst, zmm{k}{z}    [AVX512F]
 69365  //    * VPERMILPD m512/m64bcst, zmm, zmm{k}{z}     [AVX512F]
 69366  //    * VPERMILPD imm8, zmm, zmm{k}{z}             [AVX512F]
 69367  //    * VPERMILPD zmm, zmm, zmm{k}{z}              [AVX512F]
 69368  //    * VPERMILPD imm8, m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 69369  //    * VPERMILPD imm8, m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 69370  //    * VPERMILPD m128/m64bcst, xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 69371  //    * VPERMILPD imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 69372  //    * VPERMILPD xmm, xmm, xmm{k}{z}              [AVX512F,AVX512VL]
 69373  //    * VPERMILPD m256/m64bcst, ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 69374  //    * VPERMILPD imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 69375  //    * VPERMILPD ymm, ymm, ymm{k}{z}              [AVX512F,AVX512VL]
 69376  //
 69377  func (self *Program) VPERMILPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 69378      p := self.alloc("VPERMILPD", 3, Operands { v0, v1, v2 })
 69379      // VPERMILPD imm8, xmm, xmm
 69380      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 69381          self.require(ISA_AVX)
 69382          p.domain = DomainAVX
 69383          p.add(0, func(m *_Encoding, v []interface{}) {
 69384              m.emit(0xc4)
 69385              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 69386              m.emit(0x79)
 69387              m.emit(0x05)
 69388              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69389              m.imm1(toImmAny(v[0]))
 69390          })
 69391      }
 69392      // VPERMILPD xmm, xmm, xmm
 69393      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 69394          self.require(ISA_AVX)
 69395          p.domain = DomainAVX
 69396          p.add(0, func(m *_Encoding, v []interface{}) {
 69397              m.emit(0xc4)
 69398              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 69399              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 69400              m.emit(0x0d)
 69401              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69402          })
 69403      }
 69404      // VPERMILPD m128, xmm, xmm
 69405      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 69406          self.require(ISA_AVX)
 69407          p.domain = DomainAVX
 69408          p.add(0, func(m *_Encoding, v []interface{}) {
 69409              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 69410              m.emit(0x0d)
 69411              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 69412          })
 69413      }
 69414      // VPERMILPD imm8, m128, xmm
 69415      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 69416          self.require(ISA_AVX)
 69417          p.domain = DomainAVX
 69418          p.add(0, func(m *_Encoding, v []interface{}) {
 69419              m.vex3(0xc4, 0b11, 0x01, hcode(v[2]), addr(v[1]), 0)
 69420              m.emit(0x05)
 69421              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 69422              m.imm1(toImmAny(v[0]))
 69423          })
 69424      }
 69425      // VPERMILPD imm8, ymm, ymm
 69426      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 69427          self.require(ISA_AVX)
 69428          p.domain = DomainAVX
 69429          p.add(0, func(m *_Encoding, v []interface{}) {
 69430              m.emit(0xc4)
 69431              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 69432              m.emit(0x7d)
 69433              m.emit(0x05)
 69434              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69435              m.imm1(toImmAny(v[0]))
 69436          })
 69437      }
 69438      // VPERMILPD ymm, ymm, ymm
 69439      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 69440          self.require(ISA_AVX)
 69441          p.domain = DomainAVX
 69442          p.add(0, func(m *_Encoding, v []interface{}) {
 69443              m.emit(0xc4)
 69444              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 69445              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 69446              m.emit(0x0d)
 69447              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69448          })
 69449      }
 69450      // VPERMILPD m256, ymm, ymm
 69451      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 69452          self.require(ISA_AVX)
 69453          p.domain = DomainAVX
 69454          p.add(0, func(m *_Encoding, v []interface{}) {
 69455              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 69456              m.emit(0x0d)
 69457              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 69458          })
 69459      }
 69460      // VPERMILPD imm8, m256, ymm
 69461      if isImm8(v0) && isM256(v1) && isYMM(v2) {
 69462          self.require(ISA_AVX)
 69463          p.domain = DomainAVX
 69464          p.add(0, func(m *_Encoding, v []interface{}) {
 69465              m.vex3(0xc4, 0b11, 0x05, hcode(v[2]), addr(v[1]), 0)
 69466              m.emit(0x05)
 69467              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 69468              m.imm1(toImmAny(v[0]))
 69469          })
 69470      }
 69471      // VPERMILPD imm8, m512/m64bcst, zmm{k}{z}
 69472      if isImm8(v0) && isM512M64bcst(v1) && isZMMkz(v2) {
 69473          self.require(ISA_AVX512F)
 69474          p.domain = DomainAVX
 69475          p.add(0, func(m *_Encoding, v []interface{}) {
 69476              m.evex(0b11, 0x85, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 69477              m.emit(0x05)
 69478              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 69479              m.imm1(toImmAny(v[0]))
 69480          })
 69481      }
 69482      // VPERMILPD m512/m64bcst, zmm, zmm{k}{z}
 69483      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 69484          self.require(ISA_AVX512F)
 69485          p.domain = DomainAVX
 69486          p.add(0, func(m *_Encoding, v []interface{}) {
 69487              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 69488              m.emit(0x0d)
 69489              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 69490          })
 69491      }
 69492      // VPERMILPD imm8, zmm, zmm{k}{z}
 69493      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 69494          self.require(ISA_AVX512F)
 69495          p.domain = DomainAVX
 69496          p.add(0, func(m *_Encoding, v []interface{}) {
 69497              m.emit(0x62)
 69498              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 69499              m.emit(0xfd)
 69500              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 69501              m.emit(0x05)
 69502              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69503              m.imm1(toImmAny(v[0]))
 69504          })
 69505      }
 69506      // VPERMILPD zmm, zmm, zmm{k}{z}
 69507      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 69508          self.require(ISA_AVX512F)
 69509          p.domain = DomainAVX
 69510          p.add(0, func(m *_Encoding, v []interface{}) {
 69511              m.emit(0x62)
 69512              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69513              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 69514              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 69515              m.emit(0x0d)
 69516              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69517          })
 69518      }
 69519      // VPERMILPD imm8, m128/m64bcst, xmm{k}{z}
 69520      if isImm8(v0) && isM128M64bcst(v1) && isXMMkz(v2) {
 69521          self.require(ISA_AVX512VL | ISA_AVX512F)
 69522          p.domain = DomainAVX
 69523          p.add(0, func(m *_Encoding, v []interface{}) {
 69524              m.evex(0b11, 0x85, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 69525              m.emit(0x05)
 69526              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 69527              m.imm1(toImmAny(v[0]))
 69528          })
 69529      }
 69530      // VPERMILPD imm8, m256/m64bcst, ymm{k}{z}
 69531      if isImm8(v0) && isM256M64bcst(v1) && isYMMkz(v2) {
 69532          self.require(ISA_AVX512VL | ISA_AVX512F)
 69533          p.domain = DomainAVX
 69534          p.add(0, func(m *_Encoding, v []interface{}) {
 69535              m.evex(0b11, 0x85, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 69536              m.emit(0x05)
 69537              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 69538              m.imm1(toImmAny(v[0]))
 69539          })
 69540      }
 69541      // VPERMILPD m128/m64bcst, xmm, xmm{k}{z}
 69542      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 69543          self.require(ISA_AVX512VL | ISA_AVX512F)
 69544          p.domain = DomainAVX
 69545          p.add(0, func(m *_Encoding, v []interface{}) {
 69546              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 69547              m.emit(0x0d)
 69548              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 69549          })
 69550      }
 69551      // VPERMILPD imm8, xmm, xmm{k}{z}
 69552      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 69553          self.require(ISA_AVX512VL | ISA_AVX512F)
 69554          p.domain = DomainAVX
 69555          p.add(0, func(m *_Encoding, v []interface{}) {
 69556              m.emit(0x62)
 69557              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 69558              m.emit(0xfd)
 69559              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 69560              m.emit(0x05)
 69561              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69562              m.imm1(toImmAny(v[0]))
 69563          })
 69564      }
 69565      // VPERMILPD xmm, xmm, xmm{k}{z}
 69566      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 69567          self.require(ISA_AVX512VL | ISA_AVX512F)
 69568          p.domain = DomainAVX
 69569          p.add(0, func(m *_Encoding, v []interface{}) {
 69570              m.emit(0x62)
 69571              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69572              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 69573              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 69574              m.emit(0x0d)
 69575              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69576          })
 69577      }
 69578      // VPERMILPD m256/m64bcst, ymm, ymm{k}{z}
 69579      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69580          self.require(ISA_AVX512VL | ISA_AVX512F)
 69581          p.domain = DomainAVX
 69582          p.add(0, func(m *_Encoding, v []interface{}) {
 69583              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 69584              m.emit(0x0d)
 69585              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 69586          })
 69587      }
 69588      // VPERMILPD imm8, ymm, ymm{k}{z}
 69589      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69590          self.require(ISA_AVX512VL | ISA_AVX512F)
 69591          p.domain = DomainAVX
 69592          p.add(0, func(m *_Encoding, v []interface{}) {
 69593              m.emit(0x62)
 69594              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 69595              m.emit(0xfd)
 69596              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 69597              m.emit(0x05)
 69598              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69599              m.imm1(toImmAny(v[0]))
 69600          })
 69601      }
 69602      // VPERMILPD ymm, ymm, ymm{k}{z}
 69603      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69604          self.require(ISA_AVX512VL | ISA_AVX512F)
 69605          p.domain = DomainAVX
 69606          p.add(0, func(m *_Encoding, v []interface{}) {
 69607              m.emit(0x62)
 69608              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69609              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 69610              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 69611              m.emit(0x0d)
 69612              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69613          })
 69614      }
 69615      if p.len == 0 {
 69616          panic("invalid operands for VPERMILPD")
 69617      }
 69618      return p
 69619  }
 69620  
 69621  // VPERMILPS performs "Permute Single-Precision Floating-Point Values".
 69622  //
 69623  // Mnemonic        : VPERMILPS
 69624  // Supported forms : (20 forms)
 69625  //
 69626  //    * VPERMILPS imm8, xmm, xmm                   [AVX]
 69627  //    * VPERMILPS xmm, xmm, xmm                    [AVX]
 69628  //    * VPERMILPS m128, xmm, xmm                   [AVX]
 69629  //    * VPERMILPS imm8, m128, xmm                  [AVX]
 69630  //    * VPERMILPS imm8, ymm, ymm                   [AVX]
 69631  //    * VPERMILPS ymm, ymm, ymm                    [AVX]
 69632  //    * VPERMILPS m256, ymm, ymm                   [AVX]
 69633  //    * VPERMILPS imm8, m256, ymm                  [AVX]
 69634  //    * VPERMILPS imm8, m512/m32bcst, zmm{k}{z}    [AVX512F]
 69635  //    * VPERMILPS m512/m32bcst, zmm, zmm{k}{z}     [AVX512F]
 69636  //    * VPERMILPS imm8, zmm, zmm{k}{z}             [AVX512F]
 69637  //    * VPERMILPS zmm, zmm, zmm{k}{z}              [AVX512F]
 69638  //    * VPERMILPS imm8, m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 69639  //    * VPERMILPS imm8, m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 69640  //    * VPERMILPS m128/m32bcst, xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 69641  //    * VPERMILPS imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 69642  //    * VPERMILPS xmm, xmm, xmm{k}{z}              [AVX512F,AVX512VL]
 69643  //    * VPERMILPS m256/m32bcst, ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 69644  //    * VPERMILPS imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 69645  //    * VPERMILPS ymm, ymm, ymm{k}{z}              [AVX512F,AVX512VL]
 69646  //
 69647  func (self *Program) VPERMILPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 69648      p := self.alloc("VPERMILPS", 3, Operands { v0, v1, v2 })
 69649      // VPERMILPS imm8, xmm, xmm
 69650      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 69651          self.require(ISA_AVX)
 69652          p.domain = DomainAVX
 69653          p.add(0, func(m *_Encoding, v []interface{}) {
 69654              m.emit(0xc4)
 69655              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 69656              m.emit(0x79)
 69657              m.emit(0x04)
 69658              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69659              m.imm1(toImmAny(v[0]))
 69660          })
 69661      }
 69662      // VPERMILPS xmm, xmm, xmm
 69663      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 69664          self.require(ISA_AVX)
 69665          p.domain = DomainAVX
 69666          p.add(0, func(m *_Encoding, v []interface{}) {
 69667              m.emit(0xc4)
 69668              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 69669              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 69670              m.emit(0x0c)
 69671              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69672          })
 69673      }
 69674      // VPERMILPS m128, xmm, xmm
 69675      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 69676          self.require(ISA_AVX)
 69677          p.domain = DomainAVX
 69678          p.add(0, func(m *_Encoding, v []interface{}) {
 69679              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 69680              m.emit(0x0c)
 69681              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 69682          })
 69683      }
 69684      // VPERMILPS imm8, m128, xmm
 69685      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 69686          self.require(ISA_AVX)
 69687          p.domain = DomainAVX
 69688          p.add(0, func(m *_Encoding, v []interface{}) {
 69689              m.vex3(0xc4, 0b11, 0x01, hcode(v[2]), addr(v[1]), 0)
 69690              m.emit(0x04)
 69691              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 69692              m.imm1(toImmAny(v[0]))
 69693          })
 69694      }
 69695      // VPERMILPS imm8, ymm, ymm
 69696      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 69697          self.require(ISA_AVX)
 69698          p.domain = DomainAVX
 69699          p.add(0, func(m *_Encoding, v []interface{}) {
 69700              m.emit(0xc4)
 69701              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 69702              m.emit(0x7d)
 69703              m.emit(0x04)
 69704              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69705              m.imm1(toImmAny(v[0]))
 69706          })
 69707      }
 69708      // VPERMILPS ymm, ymm, ymm
 69709      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 69710          self.require(ISA_AVX)
 69711          p.domain = DomainAVX
 69712          p.add(0, func(m *_Encoding, v []interface{}) {
 69713              m.emit(0xc4)
 69714              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 69715              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 69716              m.emit(0x0c)
 69717              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69718          })
 69719      }
 69720      // VPERMILPS m256, ymm, ymm
 69721      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 69722          self.require(ISA_AVX)
 69723          p.domain = DomainAVX
 69724          p.add(0, func(m *_Encoding, v []interface{}) {
 69725              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 69726              m.emit(0x0c)
 69727              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 69728          })
 69729      }
 69730      // VPERMILPS imm8, m256, ymm
 69731      if isImm8(v0) && isM256(v1) && isYMM(v2) {
 69732          self.require(ISA_AVX)
 69733          p.domain = DomainAVX
 69734          p.add(0, func(m *_Encoding, v []interface{}) {
 69735              m.vex3(0xc4, 0b11, 0x05, hcode(v[2]), addr(v[1]), 0)
 69736              m.emit(0x04)
 69737              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 69738              m.imm1(toImmAny(v[0]))
 69739          })
 69740      }
 69741      // VPERMILPS imm8, m512/m32bcst, zmm{k}{z}
 69742      if isImm8(v0) && isM512M32bcst(v1) && isZMMkz(v2) {
 69743          self.require(ISA_AVX512F)
 69744          p.domain = DomainAVX
 69745          p.add(0, func(m *_Encoding, v []interface{}) {
 69746              m.evex(0b11, 0x05, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 69747              m.emit(0x04)
 69748              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 69749              m.imm1(toImmAny(v[0]))
 69750          })
 69751      }
 69752      // VPERMILPS m512/m32bcst, zmm, zmm{k}{z}
 69753      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 69754          self.require(ISA_AVX512F)
 69755          p.domain = DomainAVX
 69756          p.add(0, func(m *_Encoding, v []interface{}) {
 69757              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 69758              m.emit(0x0c)
 69759              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 69760          })
 69761      }
 69762      // VPERMILPS imm8, zmm, zmm{k}{z}
 69763      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 69764          self.require(ISA_AVX512F)
 69765          p.domain = DomainAVX
 69766          p.add(0, func(m *_Encoding, v []interface{}) {
 69767              m.emit(0x62)
 69768              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 69769              m.emit(0x7d)
 69770              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 69771              m.emit(0x04)
 69772              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69773              m.imm1(toImmAny(v[0]))
 69774          })
 69775      }
 69776      // VPERMILPS zmm, zmm, zmm{k}{z}
 69777      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 69778          self.require(ISA_AVX512F)
 69779          p.domain = DomainAVX
 69780          p.add(0, func(m *_Encoding, v []interface{}) {
 69781              m.emit(0x62)
 69782              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69783              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 69784              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 69785              m.emit(0x0c)
 69786              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69787          })
 69788      }
 69789      // VPERMILPS imm8, m128/m32bcst, xmm{k}{z}
 69790      if isImm8(v0) && isM128M32bcst(v1) && isXMMkz(v2) {
 69791          self.require(ISA_AVX512VL | ISA_AVX512F)
 69792          p.domain = DomainAVX
 69793          p.add(0, func(m *_Encoding, v []interface{}) {
 69794              m.evex(0b11, 0x05, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 69795              m.emit(0x04)
 69796              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 69797              m.imm1(toImmAny(v[0]))
 69798          })
 69799      }
 69800      // VPERMILPS imm8, m256/m32bcst, ymm{k}{z}
 69801      if isImm8(v0) && isM256M32bcst(v1) && isYMMkz(v2) {
 69802          self.require(ISA_AVX512VL | ISA_AVX512F)
 69803          p.domain = DomainAVX
 69804          p.add(0, func(m *_Encoding, v []interface{}) {
 69805              m.evex(0b11, 0x05, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 69806              m.emit(0x04)
 69807              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 69808              m.imm1(toImmAny(v[0]))
 69809          })
 69810      }
 69811      // VPERMILPS m128/m32bcst, xmm, xmm{k}{z}
 69812      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 69813          self.require(ISA_AVX512VL | ISA_AVX512F)
 69814          p.domain = DomainAVX
 69815          p.add(0, func(m *_Encoding, v []interface{}) {
 69816              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 69817              m.emit(0x0c)
 69818              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 69819          })
 69820      }
 69821      // VPERMILPS imm8, xmm, xmm{k}{z}
 69822      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 69823          self.require(ISA_AVX512VL | ISA_AVX512F)
 69824          p.domain = DomainAVX
 69825          p.add(0, func(m *_Encoding, v []interface{}) {
 69826              m.emit(0x62)
 69827              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 69828              m.emit(0x7d)
 69829              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 69830              m.emit(0x04)
 69831              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69832              m.imm1(toImmAny(v[0]))
 69833          })
 69834      }
 69835      // VPERMILPS xmm, xmm, xmm{k}{z}
 69836      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 69837          self.require(ISA_AVX512VL | ISA_AVX512F)
 69838          p.domain = DomainAVX
 69839          p.add(0, func(m *_Encoding, v []interface{}) {
 69840              m.emit(0x62)
 69841              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69842              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 69843              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 69844              m.emit(0x0c)
 69845              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69846          })
 69847      }
 69848      // VPERMILPS m256/m32bcst, ymm, ymm{k}{z}
 69849      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69850          self.require(ISA_AVX512VL | ISA_AVX512F)
 69851          p.domain = DomainAVX
 69852          p.add(0, func(m *_Encoding, v []interface{}) {
 69853              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 69854              m.emit(0x0c)
 69855              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 69856          })
 69857      }
 69858      // VPERMILPS imm8, ymm, ymm{k}{z}
 69859      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69860          self.require(ISA_AVX512VL | ISA_AVX512F)
 69861          p.domain = DomainAVX
 69862          p.add(0, func(m *_Encoding, v []interface{}) {
 69863              m.emit(0x62)
 69864              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 69865              m.emit(0x7d)
 69866              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 69867              m.emit(0x04)
 69868              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69869              m.imm1(toImmAny(v[0]))
 69870          })
 69871      }
 69872      // VPERMILPS ymm, ymm, ymm{k}{z}
 69873      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69874          self.require(ISA_AVX512VL | ISA_AVX512F)
 69875          p.domain = DomainAVX
 69876          p.add(0, func(m *_Encoding, v []interface{}) {
 69877              m.emit(0x62)
 69878              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69879              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 69880              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 69881              m.emit(0x0c)
 69882              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69883          })
 69884      }
 69885      if p.len == 0 {
 69886          panic("invalid operands for VPERMILPS")
 69887      }
 69888      return p
 69889  }
 69890  
 69891  // VPERMPD performs "Permute Double-Precision Floating-Point Elements".
 69892  //
 69893  // Mnemonic        : VPERMPD
 69894  // Supported forms : (10 forms)
 69895  //
 69896  //    * VPERMPD imm8, ymm, ymm                   [AVX2]
 69897  //    * VPERMPD imm8, m256, ymm                  [AVX2]
 69898  //    * VPERMPD imm8, m512/m64bcst, zmm{k}{z}    [AVX512F]
 69899  //    * VPERMPD m512/m64bcst, zmm, zmm{k}{z}     [AVX512F]
 69900  //    * VPERMPD imm8, zmm, zmm{k}{z}             [AVX512F]
 69901  //    * VPERMPD zmm, zmm, zmm{k}{z}              [AVX512F]
 69902  //    * VPERMPD imm8, m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 69903  //    * VPERMPD m256/m64bcst, ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 69904  //    * VPERMPD imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 69905  //    * VPERMPD ymm, ymm, ymm{k}{z}              [AVX512F,AVX512VL]
 69906  //
 69907  func (self *Program) VPERMPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 69908      p := self.alloc("VPERMPD", 3, Operands { v0, v1, v2 })
 69909      // VPERMPD imm8, ymm, ymm
 69910      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 69911          self.require(ISA_AVX2)
 69912          p.domain = DomainAVX
 69913          p.add(0, func(m *_Encoding, v []interface{}) {
 69914              m.emit(0xc4)
 69915              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 69916              m.emit(0xfd)
 69917              m.emit(0x01)
 69918              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69919              m.imm1(toImmAny(v[0]))
 69920          })
 69921      }
 69922      // VPERMPD imm8, m256, ymm
 69923      if isImm8(v0) && isM256(v1) && isYMM(v2) {
 69924          self.require(ISA_AVX2)
 69925          p.domain = DomainAVX
 69926          p.add(0, func(m *_Encoding, v []interface{}) {
 69927              m.vex3(0xc4, 0b11, 0x85, hcode(v[2]), addr(v[1]), 0)
 69928              m.emit(0x01)
 69929              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 69930              m.imm1(toImmAny(v[0]))
 69931          })
 69932      }
 69933      // VPERMPD imm8, m512/m64bcst, zmm{k}{z}
 69934      if isImm8(v0) && isM512M64bcst(v1) && isZMMkz(v2) {
 69935          self.require(ISA_AVX512F)
 69936          p.domain = DomainAVX
 69937          p.add(0, func(m *_Encoding, v []interface{}) {
 69938              m.evex(0b11, 0x85, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 69939              m.emit(0x01)
 69940              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 69941              m.imm1(toImmAny(v[0]))
 69942          })
 69943      }
 69944      // VPERMPD m512/m64bcst, zmm, zmm{k}{z}
 69945      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 69946          self.require(ISA_AVX512F)
 69947          p.domain = DomainAVX
 69948          p.add(0, func(m *_Encoding, v []interface{}) {
 69949              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 69950              m.emit(0x16)
 69951              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 69952          })
 69953      }
 69954      // VPERMPD imm8, zmm, zmm{k}{z}
 69955      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 69956          self.require(ISA_AVX512F)
 69957          p.domain = DomainAVX
 69958          p.add(0, func(m *_Encoding, v []interface{}) {
 69959              m.emit(0x62)
 69960              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 69961              m.emit(0xfd)
 69962              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 69963              m.emit(0x01)
 69964              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 69965              m.imm1(toImmAny(v[0]))
 69966          })
 69967      }
 69968      // VPERMPD zmm, zmm, zmm{k}{z}
 69969      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 69970          self.require(ISA_AVX512F)
 69971          p.domain = DomainAVX
 69972          p.add(0, func(m *_Encoding, v []interface{}) {
 69973              m.emit(0x62)
 69974              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 69975              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 69976              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 69977              m.emit(0x16)
 69978              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 69979          })
 69980      }
 69981      // VPERMPD imm8, m256/m64bcst, ymm{k}{z}
 69982      if isImm8(v0) && isM256M64bcst(v1) && isYMMkz(v2) {
 69983          self.require(ISA_AVX512VL | ISA_AVX512F)
 69984          p.domain = DomainAVX
 69985          p.add(0, func(m *_Encoding, v []interface{}) {
 69986              m.evex(0b11, 0x85, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 69987              m.emit(0x01)
 69988              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 69989              m.imm1(toImmAny(v[0]))
 69990          })
 69991      }
 69992      // VPERMPD m256/m64bcst, ymm, ymm{k}{z}
 69993      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 69994          self.require(ISA_AVX512VL | ISA_AVX512F)
 69995          p.domain = DomainAVX
 69996          p.add(0, func(m *_Encoding, v []interface{}) {
 69997              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 69998              m.emit(0x16)
 69999              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 70000          })
 70001      }
 70002      // VPERMPD imm8, ymm, ymm{k}{z}
 70003      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70004          self.require(ISA_AVX512VL | ISA_AVX512F)
 70005          p.domain = DomainAVX
 70006          p.add(0, func(m *_Encoding, v []interface{}) {
 70007              m.emit(0x62)
 70008              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 70009              m.emit(0xfd)
 70010              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 70011              m.emit(0x01)
 70012              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 70013              m.imm1(toImmAny(v[0]))
 70014          })
 70015      }
 70016      // VPERMPD ymm, ymm, ymm{k}{z}
 70017      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70018          self.require(ISA_AVX512VL | ISA_AVX512F)
 70019          p.domain = DomainAVX
 70020          p.add(0, func(m *_Encoding, v []interface{}) {
 70021              m.emit(0x62)
 70022              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70023              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70024              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 70025              m.emit(0x16)
 70026              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70027          })
 70028      }
 70029      if p.len == 0 {
 70030          panic("invalid operands for VPERMPD")
 70031      }
 70032      return p
 70033  }
 70034  
 70035  // VPERMPS performs "Permute Single-Precision Floating-Point Elements".
 70036  //
 70037  // Mnemonic        : VPERMPS
 70038  // Supported forms : (6 forms)
 70039  //
 70040  //    * VPERMPS ymm, ymm, ymm                   [AVX2]
 70041  //    * VPERMPS m256, ymm, ymm                  [AVX2]
 70042  //    * VPERMPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 70043  //    * VPERMPS zmm, zmm, zmm{k}{z}             [AVX512F]
 70044  //    * VPERMPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 70045  //    * VPERMPS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 70046  //
 70047  func (self *Program) VPERMPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 70048      p := self.alloc("VPERMPS", 3, Operands { v0, v1, v2 })
 70049      // VPERMPS ymm, ymm, ymm
 70050      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 70051          self.require(ISA_AVX2)
 70052          p.domain = DomainAVX
 70053          p.add(0, func(m *_Encoding, v []interface{}) {
 70054              m.emit(0xc4)
 70055              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 70056              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70057              m.emit(0x16)
 70058              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70059          })
 70060      }
 70061      // VPERMPS m256, ymm, ymm
 70062      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 70063          self.require(ISA_AVX2)
 70064          p.domain = DomainAVX
 70065          p.add(0, func(m *_Encoding, v []interface{}) {
 70066              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 70067              m.emit(0x16)
 70068              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 70069          })
 70070      }
 70071      // VPERMPS m512/m32bcst, zmm, zmm{k}{z}
 70072      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 70073          self.require(ISA_AVX512F)
 70074          p.domain = DomainAVX
 70075          p.add(0, func(m *_Encoding, v []interface{}) {
 70076              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70077              m.emit(0x16)
 70078              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 70079          })
 70080      }
 70081      // VPERMPS zmm, zmm, zmm{k}{z}
 70082      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 70083          self.require(ISA_AVX512F)
 70084          p.domain = DomainAVX
 70085          p.add(0, func(m *_Encoding, v []interface{}) {
 70086              m.emit(0x62)
 70087              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70088              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70089              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 70090              m.emit(0x16)
 70091              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70092          })
 70093      }
 70094      // VPERMPS m256/m32bcst, ymm, ymm{k}{z}
 70095      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70096          self.require(ISA_AVX512VL | ISA_AVX512F)
 70097          p.domain = DomainAVX
 70098          p.add(0, func(m *_Encoding, v []interface{}) {
 70099              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70100              m.emit(0x16)
 70101              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 70102          })
 70103      }
 70104      // VPERMPS ymm, ymm, ymm{k}{z}
 70105      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70106          self.require(ISA_AVX512VL | ISA_AVX512F)
 70107          p.domain = DomainAVX
 70108          p.add(0, func(m *_Encoding, v []interface{}) {
 70109              m.emit(0x62)
 70110              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70111              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70112              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 70113              m.emit(0x16)
 70114              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70115          })
 70116      }
 70117      if p.len == 0 {
 70118          panic("invalid operands for VPERMPS")
 70119      }
 70120      return p
 70121  }
 70122  
 70123  // VPERMQ performs "Permute Quadword Integers".
 70124  //
 70125  // Mnemonic        : VPERMQ
 70126  // Supported forms : (10 forms)
 70127  //
 70128  //    * VPERMQ imm8, ymm, ymm                   [AVX2]
 70129  //    * VPERMQ imm8, m256, ymm                  [AVX2]
 70130  //    * VPERMQ imm8, m512/m64bcst, zmm{k}{z}    [AVX512F]
 70131  //    * VPERMQ m512/m64bcst, zmm, zmm{k}{z}     [AVX512F]
 70132  //    * VPERMQ imm8, zmm, zmm{k}{z}             [AVX512F]
 70133  //    * VPERMQ zmm, zmm, zmm{k}{z}              [AVX512F]
 70134  //    * VPERMQ imm8, m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 70135  //    * VPERMQ m256/m64bcst, ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 70136  //    * VPERMQ imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 70137  //    * VPERMQ ymm, ymm, ymm{k}{z}              [AVX512F,AVX512VL]
 70138  //
 70139  func (self *Program) VPERMQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 70140      p := self.alloc("VPERMQ", 3, Operands { v0, v1, v2 })
 70141      // VPERMQ imm8, ymm, ymm
 70142      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 70143          self.require(ISA_AVX2)
 70144          p.domain = DomainAVX
 70145          p.add(0, func(m *_Encoding, v []interface{}) {
 70146              m.emit(0xc4)
 70147              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 70148              m.emit(0xfd)
 70149              m.emit(0x00)
 70150              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 70151              m.imm1(toImmAny(v[0]))
 70152          })
 70153      }
 70154      // VPERMQ imm8, m256, ymm
 70155      if isImm8(v0) && isM256(v1) && isYMM(v2) {
 70156          self.require(ISA_AVX2)
 70157          p.domain = DomainAVX
 70158          p.add(0, func(m *_Encoding, v []interface{}) {
 70159              m.vex3(0xc4, 0b11, 0x85, hcode(v[2]), addr(v[1]), 0)
 70160              m.emit(0x00)
 70161              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 70162              m.imm1(toImmAny(v[0]))
 70163          })
 70164      }
 70165      // VPERMQ imm8, m512/m64bcst, zmm{k}{z}
 70166      if isImm8(v0) && isM512M64bcst(v1) && isZMMkz(v2) {
 70167          self.require(ISA_AVX512F)
 70168          p.domain = DomainAVX
 70169          p.add(0, func(m *_Encoding, v []interface{}) {
 70170              m.evex(0b11, 0x85, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 70171              m.emit(0x00)
 70172              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 70173              m.imm1(toImmAny(v[0]))
 70174          })
 70175      }
 70176      // VPERMQ m512/m64bcst, zmm, zmm{k}{z}
 70177      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 70178          self.require(ISA_AVX512F)
 70179          p.domain = DomainAVX
 70180          p.add(0, func(m *_Encoding, v []interface{}) {
 70181              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70182              m.emit(0x36)
 70183              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 70184          })
 70185      }
 70186      // VPERMQ imm8, zmm, zmm{k}{z}
 70187      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 70188          self.require(ISA_AVX512F)
 70189          p.domain = DomainAVX
 70190          p.add(0, func(m *_Encoding, v []interface{}) {
 70191              m.emit(0x62)
 70192              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 70193              m.emit(0xfd)
 70194              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 70195              m.emit(0x00)
 70196              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 70197              m.imm1(toImmAny(v[0]))
 70198          })
 70199      }
 70200      // VPERMQ zmm, zmm, zmm{k}{z}
 70201      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 70202          self.require(ISA_AVX512F)
 70203          p.domain = DomainAVX
 70204          p.add(0, func(m *_Encoding, v []interface{}) {
 70205              m.emit(0x62)
 70206              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70207              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70208              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 70209              m.emit(0x36)
 70210              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70211          })
 70212      }
 70213      // VPERMQ imm8, m256/m64bcst, ymm{k}{z}
 70214      if isImm8(v0) && isM256M64bcst(v1) && isYMMkz(v2) {
 70215          self.require(ISA_AVX512VL | ISA_AVX512F)
 70216          p.domain = DomainAVX
 70217          p.add(0, func(m *_Encoding, v []interface{}) {
 70218              m.evex(0b11, 0x85, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 70219              m.emit(0x00)
 70220              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 70221              m.imm1(toImmAny(v[0]))
 70222          })
 70223      }
 70224      // VPERMQ m256/m64bcst, ymm, ymm{k}{z}
 70225      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70226          self.require(ISA_AVX512VL | ISA_AVX512F)
 70227          p.domain = DomainAVX
 70228          p.add(0, func(m *_Encoding, v []interface{}) {
 70229              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70230              m.emit(0x36)
 70231              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 70232          })
 70233      }
 70234      // VPERMQ imm8, ymm, ymm{k}{z}
 70235      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70236          self.require(ISA_AVX512VL | ISA_AVX512F)
 70237          p.domain = DomainAVX
 70238          p.add(0, func(m *_Encoding, v []interface{}) {
 70239              m.emit(0x62)
 70240              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 70241              m.emit(0xfd)
 70242              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 70243              m.emit(0x00)
 70244              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 70245              m.imm1(toImmAny(v[0]))
 70246          })
 70247      }
 70248      // VPERMQ ymm, ymm, ymm{k}{z}
 70249      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70250          self.require(ISA_AVX512VL | ISA_AVX512F)
 70251          p.domain = DomainAVX
 70252          p.add(0, func(m *_Encoding, v []interface{}) {
 70253              m.emit(0x62)
 70254              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70255              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70256              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 70257              m.emit(0x36)
 70258              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70259          })
 70260      }
 70261      if p.len == 0 {
 70262          panic("invalid operands for VPERMQ")
 70263      }
 70264      return p
 70265  }
 70266  
 70267  // VPERMT2B performs "Full Permute of Bytes From Two Tables Overwriting a Table".
 70268  //
 70269  // Mnemonic        : VPERMT2B
 70270  // Supported forms : (6 forms)
 70271  //
 70272  //    * VPERMT2B xmm, xmm, xmm{k}{z}     [AVX512VBMI,AVX512VL]
 70273  //    * VPERMT2B m128, xmm, xmm{k}{z}    [AVX512VBMI,AVX512VL]
 70274  //    * VPERMT2B ymm, ymm, ymm{k}{z}     [AVX512VBMI,AVX512VL]
 70275  //    * VPERMT2B m256, ymm, ymm{k}{z}    [AVX512VBMI,AVX512VL]
 70276  //    * VPERMT2B zmm, zmm, zmm{k}{z}     [AVX512VBMI]
 70277  //    * VPERMT2B m512, zmm, zmm{k}{z}    [AVX512VBMI]
 70278  //
 70279  func (self *Program) VPERMT2B(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 70280      p := self.alloc("VPERMT2B", 3, Operands { v0, v1, v2 })
 70281      // VPERMT2B xmm, xmm, xmm{k}{z}
 70282      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70283          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 70284          p.domain = DomainAVX
 70285          p.add(0, func(m *_Encoding, v []interface{}) {
 70286              m.emit(0x62)
 70287              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70288              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70289              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 70290              m.emit(0x7d)
 70291              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70292          })
 70293      }
 70294      // VPERMT2B m128, xmm, xmm{k}{z}
 70295      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70296          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 70297          p.domain = DomainAVX
 70298          p.add(0, func(m *_Encoding, v []interface{}) {
 70299              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 70300              m.emit(0x7d)
 70301              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 70302          })
 70303      }
 70304      // VPERMT2B ymm, ymm, ymm{k}{z}
 70305      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70306          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 70307          p.domain = DomainAVX
 70308          p.add(0, func(m *_Encoding, v []interface{}) {
 70309              m.emit(0x62)
 70310              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70311              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70312              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 70313              m.emit(0x7d)
 70314              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70315          })
 70316      }
 70317      // VPERMT2B m256, ymm, ymm{k}{z}
 70318      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70319          self.require(ISA_AVX512VL | ISA_AVX512VBMI)
 70320          p.domain = DomainAVX
 70321          p.add(0, func(m *_Encoding, v []interface{}) {
 70322              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 70323              m.emit(0x7d)
 70324              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 70325          })
 70326      }
 70327      // VPERMT2B zmm, zmm, zmm{k}{z}
 70328      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 70329          self.require(ISA_AVX512VBMI)
 70330          p.domain = DomainAVX
 70331          p.add(0, func(m *_Encoding, v []interface{}) {
 70332              m.emit(0x62)
 70333              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70334              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70335              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 70336              m.emit(0x7d)
 70337              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70338          })
 70339      }
 70340      // VPERMT2B m512, zmm, zmm{k}{z}
 70341      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 70342          self.require(ISA_AVX512VBMI)
 70343          p.domain = DomainAVX
 70344          p.add(0, func(m *_Encoding, v []interface{}) {
 70345              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 70346              m.emit(0x7d)
 70347              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 70348          })
 70349      }
 70350      if p.len == 0 {
 70351          panic("invalid operands for VPERMT2B")
 70352      }
 70353      return p
 70354  }
 70355  
 70356  // VPERMT2D performs "Full Permute of Doublewords From Two Tables Overwriting a Table".
 70357  //
 70358  // Mnemonic        : VPERMT2D
 70359  // Supported forms : (6 forms)
 70360  //
 70361  //    * VPERMT2D m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 70362  //    * VPERMT2D zmm, zmm, zmm{k}{z}             [AVX512F]
 70363  //    * VPERMT2D m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 70364  //    * VPERMT2D xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 70365  //    * VPERMT2D m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 70366  //    * VPERMT2D ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 70367  //
 70368  func (self *Program) VPERMT2D(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 70369      p := self.alloc("VPERMT2D", 3, Operands { v0, v1, v2 })
 70370      // VPERMT2D m512/m32bcst, zmm, zmm{k}{z}
 70371      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 70372          self.require(ISA_AVX512F)
 70373          p.domain = DomainAVX
 70374          p.add(0, func(m *_Encoding, v []interface{}) {
 70375              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70376              m.emit(0x7e)
 70377              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 70378          })
 70379      }
 70380      // VPERMT2D zmm, zmm, zmm{k}{z}
 70381      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 70382          self.require(ISA_AVX512F)
 70383          p.domain = DomainAVX
 70384          p.add(0, func(m *_Encoding, v []interface{}) {
 70385              m.emit(0x62)
 70386              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70387              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70388              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 70389              m.emit(0x7e)
 70390              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70391          })
 70392      }
 70393      // VPERMT2D m128/m32bcst, xmm, xmm{k}{z}
 70394      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70395          self.require(ISA_AVX512VL | ISA_AVX512F)
 70396          p.domain = DomainAVX
 70397          p.add(0, func(m *_Encoding, v []interface{}) {
 70398              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70399              m.emit(0x7e)
 70400              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 70401          })
 70402      }
 70403      // VPERMT2D xmm, xmm, xmm{k}{z}
 70404      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70405          self.require(ISA_AVX512VL | ISA_AVX512F)
 70406          p.domain = DomainAVX
 70407          p.add(0, func(m *_Encoding, v []interface{}) {
 70408              m.emit(0x62)
 70409              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70410              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70411              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 70412              m.emit(0x7e)
 70413              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70414          })
 70415      }
 70416      // VPERMT2D m256/m32bcst, ymm, ymm{k}{z}
 70417      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70418          self.require(ISA_AVX512VL | ISA_AVX512F)
 70419          p.domain = DomainAVX
 70420          p.add(0, func(m *_Encoding, v []interface{}) {
 70421              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70422              m.emit(0x7e)
 70423              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 70424          })
 70425      }
 70426      // VPERMT2D ymm, ymm, ymm{k}{z}
 70427      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70428          self.require(ISA_AVX512VL | ISA_AVX512F)
 70429          p.domain = DomainAVX
 70430          p.add(0, func(m *_Encoding, v []interface{}) {
 70431              m.emit(0x62)
 70432              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70433              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70434              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 70435              m.emit(0x7e)
 70436              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70437          })
 70438      }
 70439      if p.len == 0 {
 70440          panic("invalid operands for VPERMT2D")
 70441      }
 70442      return p
 70443  }
 70444  
 70445  // VPERMT2PD performs "Full Permute of Double-Precision Floating-Point Values From Two Tables Overwriting a Table".
 70446  //
 70447  // Mnemonic        : VPERMT2PD
 70448  // Supported forms : (6 forms)
 70449  //
 70450  //    * VPERMT2PD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 70451  //    * VPERMT2PD zmm, zmm, zmm{k}{z}             [AVX512F]
 70452  //    * VPERMT2PD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 70453  //    * VPERMT2PD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 70454  //    * VPERMT2PD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 70455  //    * VPERMT2PD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 70456  //
 70457  func (self *Program) VPERMT2PD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 70458      p := self.alloc("VPERMT2PD", 3, Operands { v0, v1, v2 })
 70459      // VPERMT2PD m512/m64bcst, zmm, zmm{k}{z}
 70460      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 70461          self.require(ISA_AVX512F)
 70462          p.domain = DomainAVX
 70463          p.add(0, func(m *_Encoding, v []interface{}) {
 70464              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70465              m.emit(0x7f)
 70466              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 70467          })
 70468      }
 70469      // VPERMT2PD zmm, zmm, zmm{k}{z}
 70470      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 70471          self.require(ISA_AVX512F)
 70472          p.domain = DomainAVX
 70473          p.add(0, func(m *_Encoding, v []interface{}) {
 70474              m.emit(0x62)
 70475              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70476              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70477              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 70478              m.emit(0x7f)
 70479              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70480          })
 70481      }
 70482      // VPERMT2PD m128/m64bcst, xmm, xmm{k}{z}
 70483      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70484          self.require(ISA_AVX512VL | ISA_AVX512F)
 70485          p.domain = DomainAVX
 70486          p.add(0, func(m *_Encoding, v []interface{}) {
 70487              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70488              m.emit(0x7f)
 70489              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 70490          })
 70491      }
 70492      // VPERMT2PD xmm, xmm, xmm{k}{z}
 70493      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70494          self.require(ISA_AVX512VL | ISA_AVX512F)
 70495          p.domain = DomainAVX
 70496          p.add(0, func(m *_Encoding, v []interface{}) {
 70497              m.emit(0x62)
 70498              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70499              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70500              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 70501              m.emit(0x7f)
 70502              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70503          })
 70504      }
 70505      // VPERMT2PD m256/m64bcst, ymm, ymm{k}{z}
 70506      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70507          self.require(ISA_AVX512VL | ISA_AVX512F)
 70508          p.domain = DomainAVX
 70509          p.add(0, func(m *_Encoding, v []interface{}) {
 70510              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70511              m.emit(0x7f)
 70512              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 70513          })
 70514      }
 70515      // VPERMT2PD ymm, ymm, ymm{k}{z}
 70516      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70517          self.require(ISA_AVX512VL | ISA_AVX512F)
 70518          p.domain = DomainAVX
 70519          p.add(0, func(m *_Encoding, v []interface{}) {
 70520              m.emit(0x62)
 70521              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70522              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70523              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 70524              m.emit(0x7f)
 70525              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70526          })
 70527      }
 70528      if p.len == 0 {
 70529          panic("invalid operands for VPERMT2PD")
 70530      }
 70531      return p
 70532  }
 70533  
 70534  // VPERMT2PS performs "Full Permute of Single-Precision Floating-Point Values From Two Tables Overwriting a Table".
 70535  //
 70536  // Mnemonic        : VPERMT2PS
 70537  // Supported forms : (6 forms)
 70538  //
 70539  //    * VPERMT2PS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 70540  //    * VPERMT2PS zmm, zmm, zmm{k}{z}             [AVX512F]
 70541  //    * VPERMT2PS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 70542  //    * VPERMT2PS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 70543  //    * VPERMT2PS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 70544  //    * VPERMT2PS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 70545  //
 70546  func (self *Program) VPERMT2PS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 70547      p := self.alloc("VPERMT2PS", 3, Operands { v0, v1, v2 })
 70548      // VPERMT2PS m512/m32bcst, zmm, zmm{k}{z}
 70549      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 70550          self.require(ISA_AVX512F)
 70551          p.domain = DomainAVX
 70552          p.add(0, func(m *_Encoding, v []interface{}) {
 70553              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70554              m.emit(0x7f)
 70555              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 70556          })
 70557      }
 70558      // VPERMT2PS zmm, zmm, zmm{k}{z}
 70559      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 70560          self.require(ISA_AVX512F)
 70561          p.domain = DomainAVX
 70562          p.add(0, func(m *_Encoding, v []interface{}) {
 70563              m.emit(0x62)
 70564              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70565              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70566              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 70567              m.emit(0x7f)
 70568              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70569          })
 70570      }
 70571      // VPERMT2PS m128/m32bcst, xmm, xmm{k}{z}
 70572      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70573          self.require(ISA_AVX512VL | ISA_AVX512F)
 70574          p.domain = DomainAVX
 70575          p.add(0, func(m *_Encoding, v []interface{}) {
 70576              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70577              m.emit(0x7f)
 70578              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 70579          })
 70580      }
 70581      // VPERMT2PS xmm, xmm, xmm{k}{z}
 70582      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70583          self.require(ISA_AVX512VL | ISA_AVX512F)
 70584          p.domain = DomainAVX
 70585          p.add(0, func(m *_Encoding, v []interface{}) {
 70586              m.emit(0x62)
 70587              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70588              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70589              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 70590              m.emit(0x7f)
 70591              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70592          })
 70593      }
 70594      // VPERMT2PS m256/m32bcst, ymm, ymm{k}{z}
 70595      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70596          self.require(ISA_AVX512VL | ISA_AVX512F)
 70597          p.domain = DomainAVX
 70598          p.add(0, func(m *_Encoding, v []interface{}) {
 70599              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70600              m.emit(0x7f)
 70601              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 70602          })
 70603      }
 70604      // VPERMT2PS ymm, ymm, ymm{k}{z}
 70605      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70606          self.require(ISA_AVX512VL | ISA_AVX512F)
 70607          p.domain = DomainAVX
 70608          p.add(0, func(m *_Encoding, v []interface{}) {
 70609              m.emit(0x62)
 70610              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70611              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 70612              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 70613              m.emit(0x7f)
 70614              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70615          })
 70616      }
 70617      if p.len == 0 {
 70618          panic("invalid operands for VPERMT2PS")
 70619      }
 70620      return p
 70621  }
 70622  
 70623  // VPERMT2Q performs "Full Permute of Quadwords From Two Tables Overwriting a Table".
 70624  //
 70625  // Mnemonic        : VPERMT2Q
 70626  // Supported forms : (6 forms)
 70627  //
 70628  //    * VPERMT2Q m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 70629  //    * VPERMT2Q zmm, zmm, zmm{k}{z}             [AVX512F]
 70630  //    * VPERMT2Q m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 70631  //    * VPERMT2Q xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 70632  //    * VPERMT2Q m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 70633  //    * VPERMT2Q ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 70634  //
 70635  func (self *Program) VPERMT2Q(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 70636      p := self.alloc("VPERMT2Q", 3, Operands { v0, v1, v2 })
 70637      // VPERMT2Q m512/m64bcst, zmm, zmm{k}{z}
 70638      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 70639          self.require(ISA_AVX512F)
 70640          p.domain = DomainAVX
 70641          p.add(0, func(m *_Encoding, v []interface{}) {
 70642              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70643              m.emit(0x7e)
 70644              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 70645          })
 70646      }
 70647      // VPERMT2Q zmm, zmm, zmm{k}{z}
 70648      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 70649          self.require(ISA_AVX512F)
 70650          p.domain = DomainAVX
 70651          p.add(0, func(m *_Encoding, v []interface{}) {
 70652              m.emit(0x62)
 70653              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70654              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70655              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 70656              m.emit(0x7e)
 70657              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70658          })
 70659      }
 70660      // VPERMT2Q m128/m64bcst, xmm, xmm{k}{z}
 70661      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70662          self.require(ISA_AVX512VL | ISA_AVX512F)
 70663          p.domain = DomainAVX
 70664          p.add(0, func(m *_Encoding, v []interface{}) {
 70665              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70666              m.emit(0x7e)
 70667              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 70668          })
 70669      }
 70670      // VPERMT2Q xmm, xmm, xmm{k}{z}
 70671      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70672          self.require(ISA_AVX512VL | ISA_AVX512F)
 70673          p.domain = DomainAVX
 70674          p.add(0, func(m *_Encoding, v []interface{}) {
 70675              m.emit(0x62)
 70676              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70677              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70678              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 70679              m.emit(0x7e)
 70680              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70681          })
 70682      }
 70683      // VPERMT2Q m256/m64bcst, ymm, ymm{k}{z}
 70684      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70685          self.require(ISA_AVX512VL | ISA_AVX512F)
 70686          p.domain = DomainAVX
 70687          p.add(0, func(m *_Encoding, v []interface{}) {
 70688              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 70689              m.emit(0x7e)
 70690              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 70691          })
 70692      }
 70693      // VPERMT2Q ymm, ymm, ymm{k}{z}
 70694      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70695          self.require(ISA_AVX512VL | ISA_AVX512F)
 70696          p.domain = DomainAVX
 70697          p.add(0, func(m *_Encoding, v []interface{}) {
 70698              m.emit(0x62)
 70699              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70700              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70701              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 70702              m.emit(0x7e)
 70703              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70704          })
 70705      }
 70706      if p.len == 0 {
 70707          panic("invalid operands for VPERMT2Q")
 70708      }
 70709      return p
 70710  }
 70711  
 70712  // VPERMT2W performs "Full Permute of Words From Two Tables Overwriting a Table".
 70713  //
 70714  // Mnemonic        : VPERMT2W
 70715  // Supported forms : (6 forms)
 70716  //
 70717  //    * VPERMT2W zmm, zmm, zmm{k}{z}     [AVX512BW]
 70718  //    * VPERMT2W m512, zmm, zmm{k}{z}    [AVX512BW]
 70719  //    * VPERMT2W xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 70720  //    * VPERMT2W m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 70721  //    * VPERMT2W ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 70722  //    * VPERMT2W m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 70723  //
 70724  func (self *Program) VPERMT2W(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 70725      p := self.alloc("VPERMT2W", 3, Operands { v0, v1, v2 })
 70726      // VPERMT2W zmm, zmm, zmm{k}{z}
 70727      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 70728          self.require(ISA_AVX512BW)
 70729          p.domain = DomainAVX
 70730          p.add(0, func(m *_Encoding, v []interface{}) {
 70731              m.emit(0x62)
 70732              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70733              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70734              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 70735              m.emit(0x7d)
 70736              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70737          })
 70738      }
 70739      // VPERMT2W m512, zmm, zmm{k}{z}
 70740      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 70741          self.require(ISA_AVX512BW)
 70742          p.domain = DomainAVX
 70743          p.add(0, func(m *_Encoding, v []interface{}) {
 70744              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 70745              m.emit(0x7d)
 70746              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 70747          })
 70748      }
 70749      // VPERMT2W xmm, xmm, xmm{k}{z}
 70750      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70751          self.require(ISA_AVX512VL | ISA_AVX512BW)
 70752          p.domain = DomainAVX
 70753          p.add(0, func(m *_Encoding, v []interface{}) {
 70754              m.emit(0x62)
 70755              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70756              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70757              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 70758              m.emit(0x7d)
 70759              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70760          })
 70761      }
 70762      // VPERMT2W m128, xmm, xmm{k}{z}
 70763      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70764          self.require(ISA_AVX512VL | ISA_AVX512BW)
 70765          p.domain = DomainAVX
 70766          p.add(0, func(m *_Encoding, v []interface{}) {
 70767              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 70768              m.emit(0x7d)
 70769              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 70770          })
 70771      }
 70772      // VPERMT2W ymm, ymm, ymm{k}{z}
 70773      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70774          self.require(ISA_AVX512VL | ISA_AVX512BW)
 70775          p.domain = DomainAVX
 70776          p.add(0, func(m *_Encoding, v []interface{}) {
 70777              m.emit(0x62)
 70778              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70779              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70780              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 70781              m.emit(0x7d)
 70782              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70783          })
 70784      }
 70785      // VPERMT2W m256, ymm, ymm{k}{z}
 70786      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70787          self.require(ISA_AVX512VL | ISA_AVX512BW)
 70788          p.domain = DomainAVX
 70789          p.add(0, func(m *_Encoding, v []interface{}) {
 70790              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 70791              m.emit(0x7d)
 70792              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 70793          })
 70794      }
 70795      if p.len == 0 {
 70796          panic("invalid operands for VPERMT2W")
 70797      }
 70798      return p
 70799  }
 70800  
 70801  // VPERMW performs "Permute Word Integers".
 70802  //
 70803  // Mnemonic        : VPERMW
 70804  // Supported forms : (6 forms)
 70805  //
 70806  //    * VPERMW zmm, zmm, zmm{k}{z}     [AVX512BW]
 70807  //    * VPERMW m512, zmm, zmm{k}{z}    [AVX512BW]
 70808  //    * VPERMW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 70809  //    * VPERMW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 70810  //    * VPERMW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 70811  //    * VPERMW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 70812  //
 70813  func (self *Program) VPERMW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 70814      p := self.alloc("VPERMW", 3, Operands { v0, v1, v2 })
 70815      // VPERMW zmm, zmm, zmm{k}{z}
 70816      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 70817          self.require(ISA_AVX512BW)
 70818          p.domain = DomainAVX
 70819          p.add(0, func(m *_Encoding, v []interface{}) {
 70820              m.emit(0x62)
 70821              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70822              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70823              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 70824              m.emit(0x8d)
 70825              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70826          })
 70827      }
 70828      // VPERMW m512, zmm, zmm{k}{z}
 70829      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 70830          self.require(ISA_AVX512BW)
 70831          p.domain = DomainAVX
 70832          p.add(0, func(m *_Encoding, v []interface{}) {
 70833              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 70834              m.emit(0x8d)
 70835              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 70836          })
 70837      }
 70838      // VPERMW xmm, xmm, xmm{k}{z}
 70839      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70840          self.require(ISA_AVX512VL | ISA_AVX512BW)
 70841          p.domain = DomainAVX
 70842          p.add(0, func(m *_Encoding, v []interface{}) {
 70843              m.emit(0x62)
 70844              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70845              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70846              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 70847              m.emit(0x8d)
 70848              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70849          })
 70850      }
 70851      // VPERMW m128, xmm, xmm{k}{z}
 70852      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 70853          self.require(ISA_AVX512VL | ISA_AVX512BW)
 70854          p.domain = DomainAVX
 70855          p.add(0, func(m *_Encoding, v []interface{}) {
 70856              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 70857              m.emit(0x8d)
 70858              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 70859          })
 70860      }
 70861      // VPERMW ymm, ymm, ymm{k}{z}
 70862      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70863          self.require(ISA_AVX512VL | ISA_AVX512BW)
 70864          p.domain = DomainAVX
 70865          p.add(0, func(m *_Encoding, v []interface{}) {
 70866              m.emit(0x62)
 70867              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 70868              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 70869              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 70870              m.emit(0x8d)
 70871              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 70872          })
 70873      }
 70874      // VPERMW m256, ymm, ymm{k}{z}
 70875      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 70876          self.require(ISA_AVX512VL | ISA_AVX512BW)
 70877          p.domain = DomainAVX
 70878          p.add(0, func(m *_Encoding, v []interface{}) {
 70879              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 70880              m.emit(0x8d)
 70881              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 70882          })
 70883      }
 70884      if p.len == 0 {
 70885          panic("invalid operands for VPERMW")
 70886      }
 70887      return p
 70888  }
 70889  
 70890  // VPEXPANDD performs "Load Sparse Packed Doubleword Integer Values from Dense Memory/Register".
 70891  //
 70892  // Mnemonic        : VPEXPANDD
 70893  // Supported forms : (6 forms)
 70894  //
 70895  //    * VPEXPANDD zmm, zmm{k}{z}     [AVX512F]
 70896  //    * VPEXPANDD m512, zmm{k}{z}    [AVX512F]
 70897  //    * VPEXPANDD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 70898  //    * VPEXPANDD ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 70899  //    * VPEXPANDD m128, xmm{k}{z}    [AVX512F,AVX512VL]
 70900  //    * VPEXPANDD m256, ymm{k}{z}    [AVX512F,AVX512VL]
 70901  //
 70902  func (self *Program) VPEXPANDD(v0 interface{}, v1 interface{}) *Instruction {
 70903      p := self.alloc("VPEXPANDD", 2, Operands { v0, v1 })
 70904      // VPEXPANDD zmm, zmm{k}{z}
 70905      if isZMM(v0) && isZMMkz(v1) {
 70906          self.require(ISA_AVX512F)
 70907          p.domain = DomainAVX
 70908          p.add(0, func(m *_Encoding, v []interface{}) {
 70909              m.emit(0x62)
 70910              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 70911              m.emit(0x7d)
 70912              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 70913              m.emit(0x89)
 70914              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 70915          })
 70916      }
 70917      // VPEXPANDD m512, zmm{k}{z}
 70918      if isM512(v0) && isZMMkz(v1) {
 70919          self.require(ISA_AVX512F)
 70920          p.domain = DomainAVX
 70921          p.add(0, func(m *_Encoding, v []interface{}) {
 70922              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 70923              m.emit(0x89)
 70924              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 70925          })
 70926      }
 70927      // VPEXPANDD xmm, xmm{k}{z}
 70928      if isEVEXXMM(v0) && isXMMkz(v1) {
 70929          self.require(ISA_AVX512VL | ISA_AVX512F)
 70930          p.domain = DomainAVX
 70931          p.add(0, func(m *_Encoding, v []interface{}) {
 70932              m.emit(0x62)
 70933              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 70934              m.emit(0x7d)
 70935              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 70936              m.emit(0x89)
 70937              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 70938          })
 70939      }
 70940      // VPEXPANDD ymm, ymm{k}{z}
 70941      if isEVEXYMM(v0) && isYMMkz(v1) {
 70942          self.require(ISA_AVX512VL | ISA_AVX512F)
 70943          p.domain = DomainAVX
 70944          p.add(0, func(m *_Encoding, v []interface{}) {
 70945              m.emit(0x62)
 70946              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 70947              m.emit(0x7d)
 70948              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 70949              m.emit(0x89)
 70950              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 70951          })
 70952      }
 70953      // VPEXPANDD m128, xmm{k}{z}
 70954      if isM128(v0) && isXMMkz(v1) {
 70955          self.require(ISA_AVX512VL | ISA_AVX512F)
 70956          p.domain = DomainAVX
 70957          p.add(0, func(m *_Encoding, v []interface{}) {
 70958              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 70959              m.emit(0x89)
 70960              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 70961          })
 70962      }
 70963      // VPEXPANDD m256, ymm{k}{z}
 70964      if isM256(v0) && isYMMkz(v1) {
 70965          self.require(ISA_AVX512VL | ISA_AVX512F)
 70966          p.domain = DomainAVX
 70967          p.add(0, func(m *_Encoding, v []interface{}) {
 70968              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 70969              m.emit(0x89)
 70970              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 70971          })
 70972      }
 70973      if p.len == 0 {
 70974          panic("invalid operands for VPEXPANDD")
 70975      }
 70976      return p
 70977  }
 70978  
 70979  // VPEXPANDQ performs "Load Sparse Packed Quadword Integer Values from Dense Memory/Register".
 70980  //
 70981  // Mnemonic        : VPEXPANDQ
 70982  // Supported forms : (6 forms)
 70983  //
 70984  //    * VPEXPANDQ zmm, zmm{k}{z}     [AVX512F]
 70985  //    * VPEXPANDQ m512, zmm{k}{z}    [AVX512F]
 70986  //    * VPEXPANDQ xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 70987  //    * VPEXPANDQ ymm, ymm{k}{z}     [AVX512F,AVX512VL]
 70988  //    * VPEXPANDQ m128, xmm{k}{z}    [AVX512F,AVX512VL]
 70989  //    * VPEXPANDQ m256, ymm{k}{z}    [AVX512F,AVX512VL]
 70990  //
 70991  func (self *Program) VPEXPANDQ(v0 interface{}, v1 interface{}) *Instruction {
 70992      p := self.alloc("VPEXPANDQ", 2, Operands { v0, v1 })
 70993      // VPEXPANDQ zmm, zmm{k}{z}
 70994      if isZMM(v0) && isZMMkz(v1) {
 70995          self.require(ISA_AVX512F)
 70996          p.domain = DomainAVX
 70997          p.add(0, func(m *_Encoding, v []interface{}) {
 70998              m.emit(0x62)
 70999              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 71000              m.emit(0xfd)
 71001              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 71002              m.emit(0x89)
 71003              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 71004          })
 71005      }
 71006      // VPEXPANDQ m512, zmm{k}{z}
 71007      if isM512(v0) && isZMMkz(v1) {
 71008          self.require(ISA_AVX512F)
 71009          p.domain = DomainAVX
 71010          p.add(0, func(m *_Encoding, v []interface{}) {
 71011              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 71012              m.emit(0x89)
 71013              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 71014          })
 71015      }
 71016      // VPEXPANDQ xmm, xmm{k}{z}
 71017      if isEVEXXMM(v0) && isXMMkz(v1) {
 71018          self.require(ISA_AVX512VL | ISA_AVX512F)
 71019          p.domain = DomainAVX
 71020          p.add(0, func(m *_Encoding, v []interface{}) {
 71021              m.emit(0x62)
 71022              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 71023              m.emit(0xfd)
 71024              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 71025              m.emit(0x89)
 71026              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 71027          })
 71028      }
 71029      // VPEXPANDQ ymm, ymm{k}{z}
 71030      if isEVEXYMM(v0) && isYMMkz(v1) {
 71031          self.require(ISA_AVX512VL | ISA_AVX512F)
 71032          p.domain = DomainAVX
 71033          p.add(0, func(m *_Encoding, v []interface{}) {
 71034              m.emit(0x62)
 71035              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 71036              m.emit(0xfd)
 71037              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 71038              m.emit(0x89)
 71039              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 71040          })
 71041      }
 71042      // VPEXPANDQ m128, xmm{k}{z}
 71043      if isM128(v0) && isXMMkz(v1) {
 71044          self.require(ISA_AVX512VL | ISA_AVX512F)
 71045          p.domain = DomainAVX
 71046          p.add(0, func(m *_Encoding, v []interface{}) {
 71047              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 71048              m.emit(0x89)
 71049              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 71050          })
 71051      }
 71052      // VPEXPANDQ m256, ymm{k}{z}
 71053      if isM256(v0) && isYMMkz(v1) {
 71054          self.require(ISA_AVX512VL | ISA_AVX512F)
 71055          p.domain = DomainAVX
 71056          p.add(0, func(m *_Encoding, v []interface{}) {
 71057              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 71058              m.emit(0x89)
 71059              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 71060          })
 71061      }
 71062      if p.len == 0 {
 71063          panic("invalid operands for VPEXPANDQ")
 71064      }
 71065      return p
 71066  }
 71067  
 71068  // VPEXTRB performs "Extract Byte".
 71069  //
 71070  // Mnemonic        : VPEXTRB
 71071  // Supported forms : (4 forms)
 71072  //
 71073  //    * VPEXTRB imm8, xmm, r32    [AVX]
 71074  //    * VPEXTRB imm8, xmm, m8     [AVX]
 71075  //    * VPEXTRB imm8, xmm, r32    [AVX512BW]
 71076  //    * VPEXTRB imm8, xmm, m8     [AVX512BW]
 71077  //
 71078  func (self *Program) VPEXTRB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 71079      p := self.alloc("VPEXTRB", 3, Operands { v0, v1, v2 })
 71080      // VPEXTRB imm8, xmm, r32
 71081      if isImm8(v0) && isXMM(v1) && isReg32(v2) {
 71082          self.require(ISA_AVX)
 71083          p.domain = DomainAVX
 71084          p.add(0, func(m *_Encoding, v []interface{}) {
 71085              m.emit(0xc4)
 71086              m.emit(0xe3 ^ (hcode(v[1]) << 7) ^ (hcode(v[2]) << 5))
 71087              m.emit(0x79)
 71088              m.emit(0x14)
 71089              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 71090              m.imm1(toImmAny(v[0]))
 71091          })
 71092      }
 71093      // VPEXTRB imm8, xmm, m8
 71094      if isImm8(v0) && isXMM(v1) && isM8(v2) {
 71095          self.require(ISA_AVX)
 71096          p.domain = DomainAVX
 71097          p.add(0, func(m *_Encoding, v []interface{}) {
 71098              m.vex3(0xc4, 0b11, 0x01, hcode(v[1]), addr(v[2]), 0)
 71099              m.emit(0x14)
 71100              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 71101              m.imm1(toImmAny(v[0]))
 71102          })
 71103      }
 71104      // VPEXTRB imm8, xmm, r32
 71105      if isImm8(v0) && isEVEXXMM(v1) && isReg32(v2) {
 71106          self.require(ISA_AVX512BW)
 71107          p.domain = DomainAVX
 71108          p.add(0, func(m *_Encoding, v []interface{}) {
 71109              m.emit(0x62)
 71110              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 71111              m.emit(0x7d)
 71112              m.emit(0x08)
 71113              m.emit(0x14)
 71114              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 71115              m.imm1(toImmAny(v[0]))
 71116          })
 71117      }
 71118      // VPEXTRB imm8, xmm, m8
 71119      if isImm8(v0) && isEVEXXMM(v1) && isM8(v2) {
 71120          self.require(ISA_AVX512BW)
 71121          p.domain = DomainAVX
 71122          p.add(0, func(m *_Encoding, v []interface{}) {
 71123              m.evex(0b11, 0x05, 0b00, ehcode(v[1]), addr(v[2]), 0, 0, 0, 0)
 71124              m.emit(0x14)
 71125              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 71126              m.imm1(toImmAny(v[0]))
 71127          })
 71128      }
 71129      if p.len == 0 {
 71130          panic("invalid operands for VPEXTRB")
 71131      }
 71132      return p
 71133  }
 71134  
 71135  // VPEXTRD performs "Extract Doubleword".
 71136  //
 71137  // Mnemonic        : VPEXTRD
 71138  // Supported forms : (4 forms)
 71139  //
 71140  //    * VPEXTRD imm8, xmm, r32    [AVX]
 71141  //    * VPEXTRD imm8, xmm, m32    [AVX]
 71142  //    * VPEXTRD imm8, xmm, r32    [AVX512DQ]
 71143  //    * VPEXTRD imm8, xmm, m32    [AVX512DQ]
 71144  //
 71145  func (self *Program) VPEXTRD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 71146      p := self.alloc("VPEXTRD", 3, Operands { v0, v1, v2 })
 71147      // VPEXTRD imm8, xmm, r32
 71148      if isImm8(v0) && isXMM(v1) && isReg32(v2) {
 71149          self.require(ISA_AVX)
 71150          p.domain = DomainAVX
 71151          p.add(0, func(m *_Encoding, v []interface{}) {
 71152              m.emit(0xc4)
 71153              m.emit(0xe3 ^ (hcode(v[1]) << 7) ^ (hcode(v[2]) << 5))
 71154              m.emit(0x79)
 71155              m.emit(0x16)
 71156              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 71157              m.imm1(toImmAny(v[0]))
 71158          })
 71159      }
 71160      // VPEXTRD imm8, xmm, m32
 71161      if isImm8(v0) && isXMM(v1) && isM32(v2) {
 71162          self.require(ISA_AVX)
 71163          p.domain = DomainAVX
 71164          p.add(0, func(m *_Encoding, v []interface{}) {
 71165              m.vex3(0xc4, 0b11, 0x01, hcode(v[1]), addr(v[2]), 0)
 71166              m.emit(0x16)
 71167              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 71168              m.imm1(toImmAny(v[0]))
 71169          })
 71170      }
 71171      // VPEXTRD imm8, xmm, r32
 71172      if isImm8(v0) && isEVEXXMM(v1) && isReg32(v2) {
 71173          self.require(ISA_AVX512DQ)
 71174          p.domain = DomainAVX
 71175          p.add(0, func(m *_Encoding, v []interface{}) {
 71176              m.emit(0x62)
 71177              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 71178              m.emit(0x7d)
 71179              m.emit(0x08)
 71180              m.emit(0x16)
 71181              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 71182              m.imm1(toImmAny(v[0]))
 71183          })
 71184      }
 71185      // VPEXTRD imm8, xmm, m32
 71186      if isImm8(v0) && isEVEXXMM(v1) && isM32(v2) {
 71187          self.require(ISA_AVX512DQ)
 71188          p.domain = DomainAVX
 71189          p.add(0, func(m *_Encoding, v []interface{}) {
 71190              m.evex(0b11, 0x05, 0b00, ehcode(v[1]), addr(v[2]), 0, 0, 0, 0)
 71191              m.emit(0x16)
 71192              m.mrsd(lcode(v[1]), addr(v[2]), 4)
 71193              m.imm1(toImmAny(v[0]))
 71194          })
 71195      }
 71196      if p.len == 0 {
 71197          panic("invalid operands for VPEXTRD")
 71198      }
 71199      return p
 71200  }
 71201  
 71202  // VPEXTRQ performs "Extract Quadword".
 71203  //
 71204  // Mnemonic        : VPEXTRQ
 71205  // Supported forms : (4 forms)
 71206  //
 71207  //    * VPEXTRQ imm8, xmm, r64    [AVX]
 71208  //    * VPEXTRQ imm8, xmm, m64    [AVX]
 71209  //    * VPEXTRQ imm8, xmm, r64    [AVX512DQ]
 71210  //    * VPEXTRQ imm8, xmm, m64    [AVX512DQ]
 71211  //
 71212  func (self *Program) VPEXTRQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 71213      p := self.alloc("VPEXTRQ", 3, Operands { v0, v1, v2 })
 71214      // VPEXTRQ imm8, xmm, r64
 71215      if isImm8(v0) && isXMM(v1) && isReg64(v2) {
 71216          self.require(ISA_AVX)
 71217          p.domain = DomainAVX
 71218          p.add(0, func(m *_Encoding, v []interface{}) {
 71219              m.emit(0xc4)
 71220              m.emit(0xe3 ^ (hcode(v[1]) << 7) ^ (hcode(v[2]) << 5))
 71221              m.emit(0xf9)
 71222              m.emit(0x16)
 71223              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 71224              m.imm1(toImmAny(v[0]))
 71225          })
 71226      }
 71227      // VPEXTRQ imm8, xmm, m64
 71228      if isImm8(v0) && isXMM(v1) && isM64(v2) {
 71229          self.require(ISA_AVX)
 71230          p.domain = DomainAVX
 71231          p.add(0, func(m *_Encoding, v []interface{}) {
 71232              m.vex3(0xc4, 0b11, 0x81, hcode(v[1]), addr(v[2]), 0)
 71233              m.emit(0x16)
 71234              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 71235              m.imm1(toImmAny(v[0]))
 71236          })
 71237      }
 71238      // VPEXTRQ imm8, xmm, r64
 71239      if isImm8(v0) && isEVEXXMM(v1) && isReg64(v2) {
 71240          self.require(ISA_AVX512DQ)
 71241          p.domain = DomainAVX
 71242          p.add(0, func(m *_Encoding, v []interface{}) {
 71243              m.emit(0x62)
 71244              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 71245              m.emit(0xfd)
 71246              m.emit(0x08)
 71247              m.emit(0x16)
 71248              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 71249              m.imm1(toImmAny(v[0]))
 71250          })
 71251      }
 71252      // VPEXTRQ imm8, xmm, m64
 71253      if isImm8(v0) && isEVEXXMM(v1) && isM64(v2) {
 71254          self.require(ISA_AVX512DQ)
 71255          p.domain = DomainAVX
 71256          p.add(0, func(m *_Encoding, v []interface{}) {
 71257              m.evex(0b11, 0x85, 0b00, ehcode(v[1]), addr(v[2]), 0, 0, 0, 0)
 71258              m.emit(0x16)
 71259              m.mrsd(lcode(v[1]), addr(v[2]), 8)
 71260              m.imm1(toImmAny(v[0]))
 71261          })
 71262      }
 71263      if p.len == 0 {
 71264          panic("invalid operands for VPEXTRQ")
 71265      }
 71266      return p
 71267  }
 71268  
 71269  // VPEXTRW performs "Extract Word".
 71270  //
 71271  // Mnemonic        : VPEXTRW
 71272  // Supported forms : (4 forms)
 71273  //
 71274  //    * VPEXTRW imm8, xmm, r32    [AVX]
 71275  //    * VPEXTRW imm8, xmm, m16    [AVX]
 71276  //    * VPEXTRW imm8, xmm, r32    [AVX512BW]
 71277  //    * VPEXTRW imm8, xmm, m16    [AVX512BW]
 71278  //
 71279  func (self *Program) VPEXTRW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 71280      p := self.alloc("VPEXTRW", 3, Operands { v0, v1, v2 })
 71281      // VPEXTRW imm8, xmm, r32
 71282      if isImm8(v0) && isXMM(v1) && isReg32(v2) {
 71283          self.require(ISA_AVX)
 71284          p.domain = DomainAVX
 71285          p.add(0, func(m *_Encoding, v []interface{}) {
 71286              m.vex2(1, hcode(v[2]), v[1], 0)
 71287              m.emit(0xc5)
 71288              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 71289              m.imm1(toImmAny(v[0]))
 71290          })
 71291          p.add(0, func(m *_Encoding, v []interface{}) {
 71292              m.emit(0xc4)
 71293              m.emit(0xe3 ^ (hcode(v[1]) << 7) ^ (hcode(v[2]) << 5))
 71294              m.emit(0x79)
 71295              m.emit(0x15)
 71296              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 71297              m.imm1(toImmAny(v[0]))
 71298          })
 71299      }
 71300      // VPEXTRW imm8, xmm, m16
 71301      if isImm8(v0) && isXMM(v1) && isM16(v2) {
 71302          self.require(ISA_AVX)
 71303          p.domain = DomainAVX
 71304          p.add(0, func(m *_Encoding, v []interface{}) {
 71305              m.vex3(0xc4, 0b11, 0x01, hcode(v[1]), addr(v[2]), 0)
 71306              m.emit(0x15)
 71307              m.mrsd(lcode(v[1]), addr(v[2]), 1)
 71308              m.imm1(toImmAny(v[0]))
 71309          })
 71310      }
 71311      // VPEXTRW imm8, xmm, r32
 71312      if isImm8(v0) && isEVEXXMM(v1) && isReg32(v2) {
 71313          self.require(ISA_AVX512BW)
 71314          p.domain = DomainAVX
 71315          p.add(0, func(m *_Encoding, v []interface{}) {
 71316              m.emit(0x62)
 71317              m.emit(0xf3 ^ ((hcode(v[1]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[1]) << 4)))
 71318              m.emit(0x7d)
 71319              m.emit(0x08)
 71320              m.emit(0x15)
 71321              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[2]))
 71322              m.imm1(toImmAny(v[0]))
 71323          })
 71324          p.add(0, func(m *_Encoding, v []interface{}) {
 71325              m.emit(0x62)
 71326              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 71327              m.emit(0x7d)
 71328              m.emit(0x08)
 71329              m.emit(0xc5)
 71330              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 71331              m.imm1(toImmAny(v[0]))
 71332          })
 71333      }
 71334      // VPEXTRW imm8, xmm, m16
 71335      if isImm8(v0) && isEVEXXMM(v1) && isM16(v2) {
 71336          self.require(ISA_AVX512BW)
 71337          p.domain = DomainAVX
 71338          p.add(0, func(m *_Encoding, v []interface{}) {
 71339              m.evex(0b11, 0x05, 0b00, ehcode(v[1]), addr(v[2]), 0, 0, 0, 0)
 71340              m.emit(0x15)
 71341              m.mrsd(lcode(v[1]), addr(v[2]), 2)
 71342              m.imm1(toImmAny(v[0]))
 71343          })
 71344      }
 71345      if p.len == 0 {
 71346          panic("invalid operands for VPEXTRW")
 71347      }
 71348      return p
 71349  }
 71350  
 71351  // VPGATHERDD performs "Gather Packed Doubleword Values Using Signed Doubleword Indices".
 71352  //
 71353  // Mnemonic        : VPGATHERDD
 71354  // Supported forms : (5 forms)
 71355  //
 71356  //    * VPGATHERDD xmm, vm32x, xmm    [AVX2]
 71357  //    * VPGATHERDD ymm, vm32y, ymm    [AVX2]
 71358  //    * VPGATHERDD vm32z, zmm{k}      [AVX512F]
 71359  //    * VPGATHERDD vm32x, xmm{k}      [AVX512F,AVX512VL]
 71360  //    * VPGATHERDD vm32y, ymm{k}      [AVX512F,AVX512VL]
 71361  //
 71362  func (self *Program) VPGATHERDD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 71363      var p *Instruction
 71364      switch len(vv) {
 71365          case 0  : p = self.alloc("VPGATHERDD", 2, Operands { v0, v1 })
 71366          case 1  : p = self.alloc("VPGATHERDD", 3, Operands { v0, v1, vv[0] })
 71367          default : panic("instruction VPGATHERDD takes 2 or 3 operands")
 71368      }
 71369      // VPGATHERDD xmm, vm32x, xmm
 71370      if len(vv) == 1 && isXMM(v0) && isVMX(v1) && isXMM(vv[0]) {
 71371          self.require(ISA_AVX2)
 71372          p.domain = DomainAVX
 71373          p.add(0, func(m *_Encoding, v []interface{}) {
 71374              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 71375              m.emit(0x90)
 71376              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 71377          })
 71378      }
 71379      // VPGATHERDD ymm, vm32y, ymm
 71380      if len(vv) == 1 && isYMM(v0) && isVMY(v1) && isYMM(vv[0]) {
 71381          self.require(ISA_AVX2)
 71382          p.domain = DomainAVX
 71383          p.add(0, func(m *_Encoding, v []interface{}) {
 71384              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 71385              m.emit(0x90)
 71386              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 71387          })
 71388      }
 71389      // VPGATHERDD vm32z, zmm{k}
 71390      if len(vv) == 0 && isVMZ(v0) && isZMMk(v1) {
 71391          self.require(ISA_AVX512F)
 71392          p.domain = DomainAVX
 71393          p.add(0, func(m *_Encoding, v []interface{}) {
 71394              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71395              m.emit(0x90)
 71396              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 71397          })
 71398      }
 71399      // VPGATHERDD vm32x, xmm{k}
 71400      if len(vv) == 0 && isEVEXVMX(v0) && isXMMk(v1) {
 71401          self.require(ISA_AVX512VL | ISA_AVX512F)
 71402          p.domain = DomainAVX
 71403          p.add(0, func(m *_Encoding, v []interface{}) {
 71404              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71405              m.emit(0x90)
 71406              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 71407          })
 71408      }
 71409      // VPGATHERDD vm32y, ymm{k}
 71410      if len(vv) == 0 && isEVEXVMY(v0) && isYMMk(v1) {
 71411          self.require(ISA_AVX512VL | ISA_AVX512F)
 71412          p.domain = DomainAVX
 71413          p.add(0, func(m *_Encoding, v []interface{}) {
 71414              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71415              m.emit(0x90)
 71416              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 71417          })
 71418      }
 71419      if p.len == 0 {
 71420          panic("invalid operands for VPGATHERDD")
 71421      }
 71422      return p
 71423  }
 71424  
 71425  // VPGATHERDQ performs "Gather Packed Quadword Values Using Signed Doubleword Indices".
 71426  //
 71427  // Mnemonic        : VPGATHERDQ
 71428  // Supported forms : (5 forms)
 71429  //
 71430  //    * VPGATHERDQ xmm, vm32x, xmm    [AVX2]
 71431  //    * VPGATHERDQ ymm, vm32x, ymm    [AVX2]
 71432  //    * VPGATHERDQ vm32y, zmm{k}      [AVX512F]
 71433  //    * VPGATHERDQ vm32x, xmm{k}      [AVX512F,AVX512VL]
 71434  //    * VPGATHERDQ vm32x, ymm{k}      [AVX512F,AVX512VL]
 71435  //
 71436  func (self *Program) VPGATHERDQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 71437      var p *Instruction
 71438      switch len(vv) {
 71439          case 0  : p = self.alloc("VPGATHERDQ", 2, Operands { v0, v1 })
 71440          case 1  : p = self.alloc("VPGATHERDQ", 3, Operands { v0, v1, vv[0] })
 71441          default : panic("instruction VPGATHERDQ takes 2 or 3 operands")
 71442      }
 71443      // VPGATHERDQ xmm, vm32x, xmm
 71444      if len(vv) == 1 && isXMM(v0) && isVMX(v1) && isXMM(vv[0]) {
 71445          self.require(ISA_AVX2)
 71446          p.domain = DomainAVX
 71447          p.add(0, func(m *_Encoding, v []interface{}) {
 71448              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 71449              m.emit(0x90)
 71450              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 71451          })
 71452      }
 71453      // VPGATHERDQ ymm, vm32x, ymm
 71454      if len(vv) == 1 && isYMM(v0) && isVMX(v1) && isYMM(vv[0]) {
 71455          self.require(ISA_AVX2)
 71456          p.domain = DomainAVX
 71457          p.add(0, func(m *_Encoding, v []interface{}) {
 71458              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 71459              m.emit(0x90)
 71460              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 71461          })
 71462      }
 71463      // VPGATHERDQ vm32y, zmm{k}
 71464      if len(vv) == 0 && isEVEXVMY(v0) && isZMMk(v1) {
 71465          self.require(ISA_AVX512F)
 71466          p.domain = DomainAVX
 71467          p.add(0, func(m *_Encoding, v []interface{}) {
 71468              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71469              m.emit(0x90)
 71470              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 71471          })
 71472      }
 71473      // VPGATHERDQ vm32x, xmm{k}
 71474      if len(vv) == 0 && isEVEXVMX(v0) && isXMMk(v1) {
 71475          self.require(ISA_AVX512VL | ISA_AVX512F)
 71476          p.domain = DomainAVX
 71477          p.add(0, func(m *_Encoding, v []interface{}) {
 71478              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71479              m.emit(0x90)
 71480              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 71481          })
 71482      }
 71483      // VPGATHERDQ vm32x, ymm{k}
 71484      if len(vv) == 0 && isEVEXVMX(v0) && isYMMk(v1) {
 71485          self.require(ISA_AVX512VL | ISA_AVX512F)
 71486          p.domain = DomainAVX
 71487          p.add(0, func(m *_Encoding, v []interface{}) {
 71488              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71489              m.emit(0x90)
 71490              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 71491          })
 71492      }
 71493      if p.len == 0 {
 71494          panic("invalid operands for VPGATHERDQ")
 71495      }
 71496      return p
 71497  }
 71498  
 71499  // VPGATHERQD performs "Gather Packed Doubleword Values Using Signed Quadword Indices".
 71500  //
 71501  // Mnemonic        : VPGATHERQD
 71502  // Supported forms : (5 forms)
 71503  //
 71504  //    * VPGATHERQD xmm, vm64x, xmm    [AVX2]
 71505  //    * VPGATHERQD xmm, vm64y, xmm    [AVX2]
 71506  //    * VPGATHERQD vm64z, ymm{k}      [AVX512F]
 71507  //    * VPGATHERQD vm64x, xmm{k}      [AVX512F,AVX512VL]
 71508  //    * VPGATHERQD vm64y, xmm{k}      [AVX512F,AVX512VL]
 71509  //
 71510  func (self *Program) VPGATHERQD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 71511      var p *Instruction
 71512      switch len(vv) {
 71513          case 0  : p = self.alloc("VPGATHERQD", 2, Operands { v0, v1 })
 71514          case 1  : p = self.alloc("VPGATHERQD", 3, Operands { v0, v1, vv[0] })
 71515          default : panic("instruction VPGATHERQD takes 2 or 3 operands")
 71516      }
 71517      // VPGATHERQD xmm, vm64x, xmm
 71518      if len(vv) == 1 && isXMM(v0) && isVMX(v1) && isXMM(vv[0]) {
 71519          self.require(ISA_AVX2)
 71520          p.domain = DomainAVX
 71521          p.add(0, func(m *_Encoding, v []interface{}) {
 71522              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 71523              m.emit(0x91)
 71524              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 71525          })
 71526      }
 71527      // VPGATHERQD xmm, vm64y, xmm
 71528      if len(vv) == 1 && isXMM(v0) && isVMY(v1) && isXMM(vv[0]) {
 71529          self.require(ISA_AVX2)
 71530          p.domain = DomainAVX
 71531          p.add(0, func(m *_Encoding, v []interface{}) {
 71532              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 71533              m.emit(0x91)
 71534              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 71535          })
 71536      }
 71537      // VPGATHERQD vm64z, ymm{k}
 71538      if len(vv) == 0 && isVMZ(v0) && isYMMk(v1) {
 71539          self.require(ISA_AVX512F)
 71540          p.domain = DomainAVX
 71541          p.add(0, func(m *_Encoding, v []interface{}) {
 71542              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71543              m.emit(0x91)
 71544              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 71545          })
 71546      }
 71547      // VPGATHERQD vm64x, xmm{k}
 71548      if len(vv) == 0 && isEVEXVMX(v0) && isXMMk(v1) {
 71549          self.require(ISA_AVX512VL | ISA_AVX512F)
 71550          p.domain = DomainAVX
 71551          p.add(0, func(m *_Encoding, v []interface{}) {
 71552              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71553              m.emit(0x91)
 71554              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 71555          })
 71556      }
 71557      // VPGATHERQD vm64y, xmm{k}
 71558      if len(vv) == 0 && isEVEXVMY(v0) && isXMMk(v1) {
 71559          self.require(ISA_AVX512VL | ISA_AVX512F)
 71560          p.domain = DomainAVX
 71561          p.add(0, func(m *_Encoding, v []interface{}) {
 71562              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71563              m.emit(0x91)
 71564              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 71565          })
 71566      }
 71567      if p.len == 0 {
 71568          panic("invalid operands for VPGATHERQD")
 71569      }
 71570      return p
 71571  }
 71572  
 71573  // VPGATHERQQ performs "Gather Packed Quadword Values Using Signed Quadword Indices".
 71574  //
 71575  // Mnemonic        : VPGATHERQQ
 71576  // Supported forms : (5 forms)
 71577  //
 71578  //    * VPGATHERQQ xmm, vm64x, xmm    [AVX2]
 71579  //    * VPGATHERQQ ymm, vm64y, ymm    [AVX2]
 71580  //    * VPGATHERQQ vm64z, zmm{k}      [AVX512F]
 71581  //    * VPGATHERQQ vm64x, xmm{k}      [AVX512F,AVX512VL]
 71582  //    * VPGATHERQQ vm64y, ymm{k}      [AVX512F,AVX512VL]
 71583  //
 71584  func (self *Program) VPGATHERQQ(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 71585      var p *Instruction
 71586      switch len(vv) {
 71587          case 0  : p = self.alloc("VPGATHERQQ", 2, Operands { v0, v1 })
 71588          case 1  : p = self.alloc("VPGATHERQQ", 3, Operands { v0, v1, vv[0] })
 71589          default : panic("instruction VPGATHERQQ takes 2 or 3 operands")
 71590      }
 71591      // VPGATHERQQ xmm, vm64x, xmm
 71592      if len(vv) == 1 && isXMM(v0) && isVMX(v1) && isXMM(vv[0]) {
 71593          self.require(ISA_AVX2)
 71594          p.domain = DomainAVX
 71595          p.add(0, func(m *_Encoding, v []interface{}) {
 71596              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 71597              m.emit(0x91)
 71598              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 71599          })
 71600      }
 71601      // VPGATHERQQ ymm, vm64y, ymm
 71602      if len(vv) == 1 && isYMM(v0) && isVMY(v1) && isYMM(vv[0]) {
 71603          self.require(ISA_AVX2)
 71604          p.domain = DomainAVX
 71605          p.add(0, func(m *_Encoding, v []interface{}) {
 71606              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 71607              m.emit(0x91)
 71608              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 71609          })
 71610      }
 71611      // VPGATHERQQ vm64z, zmm{k}
 71612      if len(vv) == 0 && isVMZ(v0) && isZMMk(v1) {
 71613          self.require(ISA_AVX512F)
 71614          p.domain = DomainAVX
 71615          p.add(0, func(m *_Encoding, v []interface{}) {
 71616              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71617              m.emit(0x91)
 71618              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 71619          })
 71620      }
 71621      // VPGATHERQQ vm64x, xmm{k}
 71622      if len(vv) == 0 && isEVEXVMX(v0) && isXMMk(v1) {
 71623          self.require(ISA_AVX512VL | ISA_AVX512F)
 71624          p.domain = DomainAVX
 71625          p.add(0, func(m *_Encoding, v []interface{}) {
 71626              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71627              m.emit(0x91)
 71628              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 71629          })
 71630      }
 71631      // VPGATHERQQ vm64y, ymm{k}
 71632      if len(vv) == 0 && isEVEXVMY(v0) && isYMMk(v1) {
 71633          self.require(ISA_AVX512VL | ISA_AVX512F)
 71634          p.domain = DomainAVX
 71635          p.add(0, func(m *_Encoding, v []interface{}) {
 71636              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), 0, 0)
 71637              m.emit(0x91)
 71638              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 71639          })
 71640      }
 71641      if p.len == 0 {
 71642          panic("invalid operands for VPGATHERQQ")
 71643      }
 71644      return p
 71645  }
 71646  
 71647  // VPHADDBD performs "Packed Horizontal Add Signed Byte to Signed Doubleword".
 71648  //
 71649  // Mnemonic        : VPHADDBD
 71650  // Supported forms : (2 forms)
 71651  //
 71652  //    * VPHADDBD xmm, xmm     [XOP]
 71653  //    * VPHADDBD m128, xmm    [XOP]
 71654  //
 71655  func (self *Program) VPHADDBD(v0 interface{}, v1 interface{}) *Instruction {
 71656      p := self.alloc("VPHADDBD", 2, Operands { v0, v1 })
 71657      // VPHADDBD xmm, xmm
 71658      if isXMM(v0) && isXMM(v1) {
 71659          self.require(ISA_XOP)
 71660          p.domain = DomainAMDSpecific
 71661          p.add(0, func(m *_Encoding, v []interface{}) {
 71662              m.emit(0x8f)
 71663              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 71664              m.emit(0x78)
 71665              m.emit(0xc2)
 71666              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 71667          })
 71668      }
 71669      // VPHADDBD m128, xmm
 71670      if isM128(v0) && isXMM(v1) {
 71671          self.require(ISA_XOP)
 71672          p.domain = DomainAMDSpecific
 71673          p.add(0, func(m *_Encoding, v []interface{}) {
 71674              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 71675              m.emit(0xc2)
 71676              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 71677          })
 71678      }
 71679      if p.len == 0 {
 71680          panic("invalid operands for VPHADDBD")
 71681      }
 71682      return p
 71683  }
 71684  
 71685  // VPHADDBQ performs "Packed Horizontal Add Signed Byte to Signed Quadword".
 71686  //
 71687  // Mnemonic        : VPHADDBQ
 71688  // Supported forms : (2 forms)
 71689  //
 71690  //    * VPHADDBQ xmm, xmm     [XOP]
 71691  //    * VPHADDBQ m128, xmm    [XOP]
 71692  //
 71693  func (self *Program) VPHADDBQ(v0 interface{}, v1 interface{}) *Instruction {
 71694      p := self.alloc("VPHADDBQ", 2, Operands { v0, v1 })
 71695      // VPHADDBQ xmm, xmm
 71696      if isXMM(v0) && isXMM(v1) {
 71697          self.require(ISA_XOP)
 71698          p.domain = DomainAMDSpecific
 71699          p.add(0, func(m *_Encoding, v []interface{}) {
 71700              m.emit(0x8f)
 71701              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 71702              m.emit(0x78)
 71703              m.emit(0xc3)
 71704              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 71705          })
 71706      }
 71707      // VPHADDBQ m128, xmm
 71708      if isM128(v0) && isXMM(v1) {
 71709          self.require(ISA_XOP)
 71710          p.domain = DomainAMDSpecific
 71711          p.add(0, func(m *_Encoding, v []interface{}) {
 71712              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 71713              m.emit(0xc3)
 71714              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 71715          })
 71716      }
 71717      if p.len == 0 {
 71718          panic("invalid operands for VPHADDBQ")
 71719      }
 71720      return p
 71721  }
 71722  
 71723  // VPHADDBW performs "Packed Horizontal Add Signed Byte to Signed Word".
 71724  //
 71725  // Mnemonic        : VPHADDBW
 71726  // Supported forms : (2 forms)
 71727  //
 71728  //    * VPHADDBW xmm, xmm     [XOP]
 71729  //    * VPHADDBW m128, xmm    [XOP]
 71730  //
 71731  func (self *Program) VPHADDBW(v0 interface{}, v1 interface{}) *Instruction {
 71732      p := self.alloc("VPHADDBW", 2, Operands { v0, v1 })
 71733      // VPHADDBW xmm, xmm
 71734      if isXMM(v0) && isXMM(v1) {
 71735          self.require(ISA_XOP)
 71736          p.domain = DomainAMDSpecific
 71737          p.add(0, func(m *_Encoding, v []interface{}) {
 71738              m.emit(0x8f)
 71739              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 71740              m.emit(0x78)
 71741              m.emit(0xc1)
 71742              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 71743          })
 71744      }
 71745      // VPHADDBW m128, xmm
 71746      if isM128(v0) && isXMM(v1) {
 71747          self.require(ISA_XOP)
 71748          p.domain = DomainAMDSpecific
 71749          p.add(0, func(m *_Encoding, v []interface{}) {
 71750              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 71751              m.emit(0xc1)
 71752              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 71753          })
 71754      }
 71755      if p.len == 0 {
 71756          panic("invalid operands for VPHADDBW")
 71757      }
 71758      return p
 71759  }
 71760  
 71761  // VPHADDD performs "Packed Horizontal Add Doubleword Integer".
 71762  //
 71763  // Mnemonic        : VPHADDD
 71764  // Supported forms : (4 forms)
 71765  //
 71766  //    * VPHADDD xmm, xmm, xmm     [AVX]
 71767  //    * VPHADDD m128, xmm, xmm    [AVX]
 71768  //    * VPHADDD ymm, ymm, ymm     [AVX2]
 71769  //    * VPHADDD m256, ymm, ymm    [AVX2]
 71770  //
 71771  func (self *Program) VPHADDD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 71772      p := self.alloc("VPHADDD", 3, Operands { v0, v1, v2 })
 71773      // VPHADDD xmm, xmm, xmm
 71774      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 71775          self.require(ISA_AVX)
 71776          p.domain = DomainAVX
 71777          p.add(0, func(m *_Encoding, v []interface{}) {
 71778              m.emit(0xc4)
 71779              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 71780              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 71781              m.emit(0x02)
 71782              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 71783          })
 71784      }
 71785      // VPHADDD m128, xmm, xmm
 71786      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 71787          self.require(ISA_AVX)
 71788          p.domain = DomainAVX
 71789          p.add(0, func(m *_Encoding, v []interface{}) {
 71790              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 71791              m.emit(0x02)
 71792              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 71793          })
 71794      }
 71795      // VPHADDD ymm, ymm, ymm
 71796      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 71797          self.require(ISA_AVX2)
 71798          p.domain = DomainAVX
 71799          p.add(0, func(m *_Encoding, v []interface{}) {
 71800              m.emit(0xc4)
 71801              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 71802              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 71803              m.emit(0x02)
 71804              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 71805          })
 71806      }
 71807      // VPHADDD m256, ymm, ymm
 71808      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 71809          self.require(ISA_AVX2)
 71810          p.domain = DomainAVX
 71811          p.add(0, func(m *_Encoding, v []interface{}) {
 71812              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 71813              m.emit(0x02)
 71814              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 71815          })
 71816      }
 71817      if p.len == 0 {
 71818          panic("invalid operands for VPHADDD")
 71819      }
 71820      return p
 71821  }
 71822  
 71823  // VPHADDDQ performs "Packed Horizontal Add Signed Doubleword to Signed Quadword".
 71824  //
 71825  // Mnemonic        : VPHADDDQ
 71826  // Supported forms : (2 forms)
 71827  //
 71828  //    * VPHADDDQ xmm, xmm     [XOP]
 71829  //    * VPHADDDQ m128, xmm    [XOP]
 71830  //
 71831  func (self *Program) VPHADDDQ(v0 interface{}, v1 interface{}) *Instruction {
 71832      p := self.alloc("VPHADDDQ", 2, Operands { v0, v1 })
 71833      // VPHADDDQ xmm, xmm
 71834      if isXMM(v0) && isXMM(v1) {
 71835          self.require(ISA_XOP)
 71836          p.domain = DomainAMDSpecific
 71837          p.add(0, func(m *_Encoding, v []interface{}) {
 71838              m.emit(0x8f)
 71839              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 71840              m.emit(0x78)
 71841              m.emit(0xcb)
 71842              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 71843          })
 71844      }
 71845      // VPHADDDQ m128, xmm
 71846      if isM128(v0) && isXMM(v1) {
 71847          self.require(ISA_XOP)
 71848          p.domain = DomainAMDSpecific
 71849          p.add(0, func(m *_Encoding, v []interface{}) {
 71850              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 71851              m.emit(0xcb)
 71852              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 71853          })
 71854      }
 71855      if p.len == 0 {
 71856          panic("invalid operands for VPHADDDQ")
 71857      }
 71858      return p
 71859  }
 71860  
 71861  // VPHADDSW performs "Packed Horizontal Add Signed Word Integers with Signed Saturation".
 71862  //
 71863  // Mnemonic        : VPHADDSW
 71864  // Supported forms : (4 forms)
 71865  //
 71866  //    * VPHADDSW xmm, xmm, xmm     [AVX]
 71867  //    * VPHADDSW m128, xmm, xmm    [AVX]
 71868  //    * VPHADDSW ymm, ymm, ymm     [AVX2]
 71869  //    * VPHADDSW m256, ymm, ymm    [AVX2]
 71870  //
 71871  func (self *Program) VPHADDSW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 71872      p := self.alloc("VPHADDSW", 3, Operands { v0, v1, v2 })
 71873      // VPHADDSW xmm, xmm, xmm
 71874      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 71875          self.require(ISA_AVX)
 71876          p.domain = DomainAVX
 71877          p.add(0, func(m *_Encoding, v []interface{}) {
 71878              m.emit(0xc4)
 71879              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 71880              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 71881              m.emit(0x03)
 71882              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 71883          })
 71884      }
 71885      // VPHADDSW m128, xmm, xmm
 71886      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 71887          self.require(ISA_AVX)
 71888          p.domain = DomainAVX
 71889          p.add(0, func(m *_Encoding, v []interface{}) {
 71890              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 71891              m.emit(0x03)
 71892              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 71893          })
 71894      }
 71895      // VPHADDSW ymm, ymm, ymm
 71896      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 71897          self.require(ISA_AVX2)
 71898          p.domain = DomainAVX
 71899          p.add(0, func(m *_Encoding, v []interface{}) {
 71900              m.emit(0xc4)
 71901              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 71902              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 71903              m.emit(0x03)
 71904              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 71905          })
 71906      }
 71907      // VPHADDSW m256, ymm, ymm
 71908      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 71909          self.require(ISA_AVX2)
 71910          p.domain = DomainAVX
 71911          p.add(0, func(m *_Encoding, v []interface{}) {
 71912              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 71913              m.emit(0x03)
 71914              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 71915          })
 71916      }
 71917      if p.len == 0 {
 71918          panic("invalid operands for VPHADDSW")
 71919      }
 71920      return p
 71921  }
 71922  
 71923  // VPHADDUBD performs "Packed Horizontal Add Unsigned Byte to Doubleword".
 71924  //
 71925  // Mnemonic        : VPHADDUBD
 71926  // Supported forms : (2 forms)
 71927  //
 71928  //    * VPHADDUBD xmm, xmm     [XOP]
 71929  //    * VPHADDUBD m128, xmm    [XOP]
 71930  //
 71931  func (self *Program) VPHADDUBD(v0 interface{}, v1 interface{}) *Instruction {
 71932      p := self.alloc("VPHADDUBD", 2, Operands { v0, v1 })
 71933      // VPHADDUBD xmm, xmm
 71934      if isXMM(v0) && isXMM(v1) {
 71935          self.require(ISA_XOP)
 71936          p.domain = DomainAMDSpecific
 71937          p.add(0, func(m *_Encoding, v []interface{}) {
 71938              m.emit(0x8f)
 71939              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 71940              m.emit(0x78)
 71941              m.emit(0xd2)
 71942              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 71943          })
 71944      }
 71945      // VPHADDUBD m128, xmm
 71946      if isM128(v0) && isXMM(v1) {
 71947          self.require(ISA_XOP)
 71948          p.domain = DomainAMDSpecific
 71949          p.add(0, func(m *_Encoding, v []interface{}) {
 71950              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 71951              m.emit(0xd2)
 71952              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 71953          })
 71954      }
 71955      if p.len == 0 {
 71956          panic("invalid operands for VPHADDUBD")
 71957      }
 71958      return p
 71959  }
 71960  
 71961  // VPHADDUBQ performs "Packed Horizontal Add Unsigned Byte to Quadword".
 71962  //
 71963  // Mnemonic        : VPHADDUBQ
 71964  // Supported forms : (2 forms)
 71965  //
 71966  //    * VPHADDUBQ xmm, xmm     [XOP]
 71967  //    * VPHADDUBQ m128, xmm    [XOP]
 71968  //
 71969  func (self *Program) VPHADDUBQ(v0 interface{}, v1 interface{}) *Instruction {
 71970      p := self.alloc("VPHADDUBQ", 2, Operands { v0, v1 })
 71971      // VPHADDUBQ xmm, xmm
 71972      if isXMM(v0) && isXMM(v1) {
 71973          self.require(ISA_XOP)
 71974          p.domain = DomainAMDSpecific
 71975          p.add(0, func(m *_Encoding, v []interface{}) {
 71976              m.emit(0x8f)
 71977              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 71978              m.emit(0x78)
 71979              m.emit(0xd3)
 71980              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 71981          })
 71982      }
 71983      // VPHADDUBQ m128, xmm
 71984      if isM128(v0) && isXMM(v1) {
 71985          self.require(ISA_XOP)
 71986          p.domain = DomainAMDSpecific
 71987          p.add(0, func(m *_Encoding, v []interface{}) {
 71988              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 71989              m.emit(0xd3)
 71990              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 71991          })
 71992      }
 71993      if p.len == 0 {
 71994          panic("invalid operands for VPHADDUBQ")
 71995      }
 71996      return p
 71997  }
 71998  
 71999  // VPHADDUBW performs "Packed Horizontal Add Unsigned Byte to Word".
 72000  //
 72001  // Mnemonic        : VPHADDUBW
 72002  // Supported forms : (2 forms)
 72003  //
 72004  //    * VPHADDUBW xmm, xmm     [XOP]
 72005  //    * VPHADDUBW m128, xmm    [XOP]
 72006  //
 72007  func (self *Program) VPHADDUBW(v0 interface{}, v1 interface{}) *Instruction {
 72008      p := self.alloc("VPHADDUBW", 2, Operands { v0, v1 })
 72009      // VPHADDUBW xmm, xmm
 72010      if isXMM(v0) && isXMM(v1) {
 72011          self.require(ISA_XOP)
 72012          p.domain = DomainAMDSpecific
 72013          p.add(0, func(m *_Encoding, v []interface{}) {
 72014              m.emit(0x8f)
 72015              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 72016              m.emit(0x78)
 72017              m.emit(0xd1)
 72018              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72019          })
 72020      }
 72021      // VPHADDUBW m128, xmm
 72022      if isM128(v0) && isXMM(v1) {
 72023          self.require(ISA_XOP)
 72024          p.domain = DomainAMDSpecific
 72025          p.add(0, func(m *_Encoding, v []interface{}) {
 72026              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 72027              m.emit(0xd1)
 72028              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 72029          })
 72030      }
 72031      if p.len == 0 {
 72032          panic("invalid operands for VPHADDUBW")
 72033      }
 72034      return p
 72035  }
 72036  
 72037  // VPHADDUDQ performs "Packed Horizontal Add Unsigned Doubleword to Quadword".
 72038  //
 72039  // Mnemonic        : VPHADDUDQ
 72040  // Supported forms : (2 forms)
 72041  //
 72042  //    * VPHADDUDQ xmm, xmm     [XOP]
 72043  //    * VPHADDUDQ m128, xmm    [XOP]
 72044  //
 72045  func (self *Program) VPHADDUDQ(v0 interface{}, v1 interface{}) *Instruction {
 72046      p := self.alloc("VPHADDUDQ", 2, Operands { v0, v1 })
 72047      // VPHADDUDQ xmm, xmm
 72048      if isXMM(v0) && isXMM(v1) {
 72049          self.require(ISA_XOP)
 72050          p.domain = DomainAMDSpecific
 72051          p.add(0, func(m *_Encoding, v []interface{}) {
 72052              m.emit(0x8f)
 72053              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 72054              m.emit(0x78)
 72055              m.emit(0xdb)
 72056              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72057          })
 72058      }
 72059      // VPHADDUDQ m128, xmm
 72060      if isM128(v0) && isXMM(v1) {
 72061          self.require(ISA_XOP)
 72062          p.domain = DomainAMDSpecific
 72063          p.add(0, func(m *_Encoding, v []interface{}) {
 72064              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 72065              m.emit(0xdb)
 72066              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 72067          })
 72068      }
 72069      if p.len == 0 {
 72070          panic("invalid operands for VPHADDUDQ")
 72071      }
 72072      return p
 72073  }
 72074  
 72075  // VPHADDUWD performs "Packed Horizontal Add Unsigned Word to Doubleword".
 72076  //
 72077  // Mnemonic        : VPHADDUWD
 72078  // Supported forms : (2 forms)
 72079  //
 72080  //    * VPHADDUWD xmm, xmm     [XOP]
 72081  //    * VPHADDUWD m128, xmm    [XOP]
 72082  //
 72083  func (self *Program) VPHADDUWD(v0 interface{}, v1 interface{}) *Instruction {
 72084      p := self.alloc("VPHADDUWD", 2, Operands { v0, v1 })
 72085      // VPHADDUWD xmm, xmm
 72086      if isXMM(v0) && isXMM(v1) {
 72087          self.require(ISA_XOP)
 72088          p.domain = DomainAMDSpecific
 72089          p.add(0, func(m *_Encoding, v []interface{}) {
 72090              m.emit(0x8f)
 72091              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 72092              m.emit(0x78)
 72093              m.emit(0xd6)
 72094              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72095          })
 72096      }
 72097      // VPHADDUWD m128, xmm
 72098      if isM128(v0) && isXMM(v1) {
 72099          self.require(ISA_XOP)
 72100          p.domain = DomainAMDSpecific
 72101          p.add(0, func(m *_Encoding, v []interface{}) {
 72102              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 72103              m.emit(0xd6)
 72104              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 72105          })
 72106      }
 72107      if p.len == 0 {
 72108          panic("invalid operands for VPHADDUWD")
 72109      }
 72110      return p
 72111  }
 72112  
 72113  // VPHADDUWQ performs "Packed Horizontal Add Unsigned Word to Quadword".
 72114  //
 72115  // Mnemonic        : VPHADDUWQ
 72116  // Supported forms : (2 forms)
 72117  //
 72118  //    * VPHADDUWQ xmm, xmm     [XOP]
 72119  //    * VPHADDUWQ m128, xmm    [XOP]
 72120  //
 72121  func (self *Program) VPHADDUWQ(v0 interface{}, v1 interface{}) *Instruction {
 72122      p := self.alloc("VPHADDUWQ", 2, Operands { v0, v1 })
 72123      // VPHADDUWQ xmm, xmm
 72124      if isXMM(v0) && isXMM(v1) {
 72125          self.require(ISA_XOP)
 72126          p.domain = DomainAMDSpecific
 72127          p.add(0, func(m *_Encoding, v []interface{}) {
 72128              m.emit(0x8f)
 72129              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 72130              m.emit(0x78)
 72131              m.emit(0xd7)
 72132              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72133          })
 72134      }
 72135      // VPHADDUWQ m128, xmm
 72136      if isM128(v0) && isXMM(v1) {
 72137          self.require(ISA_XOP)
 72138          p.domain = DomainAMDSpecific
 72139          p.add(0, func(m *_Encoding, v []interface{}) {
 72140              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 72141              m.emit(0xd7)
 72142              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 72143          })
 72144      }
 72145      if p.len == 0 {
 72146          panic("invalid operands for VPHADDUWQ")
 72147      }
 72148      return p
 72149  }
 72150  
 72151  // VPHADDW performs "Packed Horizontal Add Word Integers".
 72152  //
 72153  // Mnemonic        : VPHADDW
 72154  // Supported forms : (4 forms)
 72155  //
 72156  //    * VPHADDW xmm, xmm, xmm     [AVX]
 72157  //    * VPHADDW m128, xmm, xmm    [AVX]
 72158  //    * VPHADDW ymm, ymm, ymm     [AVX2]
 72159  //    * VPHADDW m256, ymm, ymm    [AVX2]
 72160  //
 72161  func (self *Program) VPHADDW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 72162      p := self.alloc("VPHADDW", 3, Operands { v0, v1, v2 })
 72163      // VPHADDW xmm, xmm, xmm
 72164      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 72165          self.require(ISA_AVX)
 72166          p.domain = DomainAVX
 72167          p.add(0, func(m *_Encoding, v []interface{}) {
 72168              m.emit(0xc4)
 72169              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 72170              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 72171              m.emit(0x01)
 72172              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 72173          })
 72174      }
 72175      // VPHADDW m128, xmm, xmm
 72176      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 72177          self.require(ISA_AVX)
 72178          p.domain = DomainAVX
 72179          p.add(0, func(m *_Encoding, v []interface{}) {
 72180              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 72181              m.emit(0x01)
 72182              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 72183          })
 72184      }
 72185      // VPHADDW ymm, ymm, ymm
 72186      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 72187          self.require(ISA_AVX2)
 72188          p.domain = DomainAVX
 72189          p.add(0, func(m *_Encoding, v []interface{}) {
 72190              m.emit(0xc4)
 72191              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 72192              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 72193              m.emit(0x01)
 72194              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 72195          })
 72196      }
 72197      // VPHADDW m256, ymm, ymm
 72198      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 72199          self.require(ISA_AVX2)
 72200          p.domain = DomainAVX
 72201          p.add(0, func(m *_Encoding, v []interface{}) {
 72202              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 72203              m.emit(0x01)
 72204              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 72205          })
 72206      }
 72207      if p.len == 0 {
 72208          panic("invalid operands for VPHADDW")
 72209      }
 72210      return p
 72211  }
 72212  
 72213  // VPHADDWD performs "Packed Horizontal Add Signed Word to Signed Doubleword".
 72214  //
 72215  // Mnemonic        : VPHADDWD
 72216  // Supported forms : (2 forms)
 72217  //
 72218  //    * VPHADDWD xmm, xmm     [XOP]
 72219  //    * VPHADDWD m128, xmm    [XOP]
 72220  //
 72221  func (self *Program) VPHADDWD(v0 interface{}, v1 interface{}) *Instruction {
 72222      p := self.alloc("VPHADDWD", 2, Operands { v0, v1 })
 72223      // VPHADDWD xmm, xmm
 72224      if isXMM(v0) && isXMM(v1) {
 72225          self.require(ISA_XOP)
 72226          p.domain = DomainAMDSpecific
 72227          p.add(0, func(m *_Encoding, v []interface{}) {
 72228              m.emit(0x8f)
 72229              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 72230              m.emit(0x78)
 72231              m.emit(0xc6)
 72232              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72233          })
 72234      }
 72235      // VPHADDWD m128, xmm
 72236      if isM128(v0) && isXMM(v1) {
 72237          self.require(ISA_XOP)
 72238          p.domain = DomainAMDSpecific
 72239          p.add(0, func(m *_Encoding, v []interface{}) {
 72240              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 72241              m.emit(0xc6)
 72242              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 72243          })
 72244      }
 72245      if p.len == 0 {
 72246          panic("invalid operands for VPHADDWD")
 72247      }
 72248      return p
 72249  }
 72250  
 72251  // VPHADDWQ performs "Packed Horizontal Add Signed Word to Signed Quadword".
 72252  //
 72253  // Mnemonic        : VPHADDWQ
 72254  // Supported forms : (2 forms)
 72255  //
 72256  //    * VPHADDWQ xmm, xmm     [XOP]
 72257  //    * VPHADDWQ m128, xmm    [XOP]
 72258  //
 72259  func (self *Program) VPHADDWQ(v0 interface{}, v1 interface{}) *Instruction {
 72260      p := self.alloc("VPHADDWQ", 2, Operands { v0, v1 })
 72261      // VPHADDWQ xmm, xmm
 72262      if isXMM(v0) && isXMM(v1) {
 72263          self.require(ISA_XOP)
 72264          p.domain = DomainAMDSpecific
 72265          p.add(0, func(m *_Encoding, v []interface{}) {
 72266              m.emit(0x8f)
 72267              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 72268              m.emit(0x78)
 72269              m.emit(0xc7)
 72270              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72271          })
 72272      }
 72273      // VPHADDWQ m128, xmm
 72274      if isM128(v0) && isXMM(v1) {
 72275          self.require(ISA_XOP)
 72276          p.domain = DomainAMDSpecific
 72277          p.add(0, func(m *_Encoding, v []interface{}) {
 72278              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 72279              m.emit(0xc7)
 72280              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 72281          })
 72282      }
 72283      if p.len == 0 {
 72284          panic("invalid operands for VPHADDWQ")
 72285      }
 72286      return p
 72287  }
 72288  
 72289  // VPHMINPOSUW performs "Packed Horizontal Minimum of Unsigned Word Integers".
 72290  //
 72291  // Mnemonic        : VPHMINPOSUW
 72292  // Supported forms : (2 forms)
 72293  //
 72294  //    * VPHMINPOSUW xmm, xmm     [AVX]
 72295  //    * VPHMINPOSUW m128, xmm    [AVX]
 72296  //
 72297  func (self *Program) VPHMINPOSUW(v0 interface{}, v1 interface{}) *Instruction {
 72298      p := self.alloc("VPHMINPOSUW", 2, Operands { v0, v1 })
 72299      // VPHMINPOSUW xmm, xmm
 72300      if isXMM(v0) && isXMM(v1) {
 72301          self.require(ISA_AVX)
 72302          p.domain = DomainAVX
 72303          p.add(0, func(m *_Encoding, v []interface{}) {
 72304              m.emit(0xc4)
 72305              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 72306              m.emit(0x79)
 72307              m.emit(0x41)
 72308              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72309          })
 72310      }
 72311      // VPHMINPOSUW m128, xmm
 72312      if isM128(v0) && isXMM(v1) {
 72313          self.require(ISA_AVX)
 72314          p.domain = DomainAVX
 72315          p.add(0, func(m *_Encoding, v []interface{}) {
 72316              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 72317              m.emit(0x41)
 72318              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 72319          })
 72320      }
 72321      if p.len == 0 {
 72322          panic("invalid operands for VPHMINPOSUW")
 72323      }
 72324      return p
 72325  }
 72326  
 72327  // VPHSUBBW performs "Packed Horizontal Subtract Signed Byte to Signed Word".
 72328  //
 72329  // Mnemonic        : VPHSUBBW
 72330  // Supported forms : (2 forms)
 72331  //
 72332  //    * VPHSUBBW xmm, xmm     [XOP]
 72333  //    * VPHSUBBW m128, xmm    [XOP]
 72334  //
 72335  func (self *Program) VPHSUBBW(v0 interface{}, v1 interface{}) *Instruction {
 72336      p := self.alloc("VPHSUBBW", 2, Operands { v0, v1 })
 72337      // VPHSUBBW xmm, xmm
 72338      if isXMM(v0) && isXMM(v1) {
 72339          self.require(ISA_XOP)
 72340          p.domain = DomainAMDSpecific
 72341          p.add(0, func(m *_Encoding, v []interface{}) {
 72342              m.emit(0x8f)
 72343              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 72344              m.emit(0x78)
 72345              m.emit(0xe1)
 72346              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72347          })
 72348      }
 72349      // VPHSUBBW m128, xmm
 72350      if isM128(v0) && isXMM(v1) {
 72351          self.require(ISA_XOP)
 72352          p.domain = DomainAMDSpecific
 72353          p.add(0, func(m *_Encoding, v []interface{}) {
 72354              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 72355              m.emit(0xe1)
 72356              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 72357          })
 72358      }
 72359      if p.len == 0 {
 72360          panic("invalid operands for VPHSUBBW")
 72361      }
 72362      return p
 72363  }
 72364  
 72365  // VPHSUBD performs "Packed Horizontal Subtract Doubleword Integers".
 72366  //
 72367  // Mnemonic        : VPHSUBD
 72368  // Supported forms : (4 forms)
 72369  //
 72370  //    * VPHSUBD xmm, xmm, xmm     [AVX]
 72371  //    * VPHSUBD m128, xmm, xmm    [AVX]
 72372  //    * VPHSUBD ymm, ymm, ymm     [AVX2]
 72373  //    * VPHSUBD m256, ymm, ymm    [AVX2]
 72374  //
 72375  func (self *Program) VPHSUBD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 72376      p := self.alloc("VPHSUBD", 3, Operands { v0, v1, v2 })
 72377      // VPHSUBD xmm, xmm, xmm
 72378      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 72379          self.require(ISA_AVX)
 72380          p.domain = DomainAVX
 72381          p.add(0, func(m *_Encoding, v []interface{}) {
 72382              m.emit(0xc4)
 72383              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 72384              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 72385              m.emit(0x06)
 72386              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 72387          })
 72388      }
 72389      // VPHSUBD m128, xmm, xmm
 72390      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 72391          self.require(ISA_AVX)
 72392          p.domain = DomainAVX
 72393          p.add(0, func(m *_Encoding, v []interface{}) {
 72394              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 72395              m.emit(0x06)
 72396              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 72397          })
 72398      }
 72399      // VPHSUBD ymm, ymm, ymm
 72400      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 72401          self.require(ISA_AVX2)
 72402          p.domain = DomainAVX
 72403          p.add(0, func(m *_Encoding, v []interface{}) {
 72404              m.emit(0xc4)
 72405              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 72406              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 72407              m.emit(0x06)
 72408              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 72409          })
 72410      }
 72411      // VPHSUBD m256, ymm, ymm
 72412      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 72413          self.require(ISA_AVX2)
 72414          p.domain = DomainAVX
 72415          p.add(0, func(m *_Encoding, v []interface{}) {
 72416              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 72417              m.emit(0x06)
 72418              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 72419          })
 72420      }
 72421      if p.len == 0 {
 72422          panic("invalid operands for VPHSUBD")
 72423      }
 72424      return p
 72425  }
 72426  
 72427  // VPHSUBDQ performs "Packed Horizontal Subtract Signed Doubleword to Signed Quadword".
 72428  //
 72429  // Mnemonic        : VPHSUBDQ
 72430  // Supported forms : (2 forms)
 72431  //
 72432  //    * VPHSUBDQ xmm, xmm     [XOP]
 72433  //    * VPHSUBDQ m128, xmm    [XOP]
 72434  //
 72435  func (self *Program) VPHSUBDQ(v0 interface{}, v1 interface{}) *Instruction {
 72436      p := self.alloc("VPHSUBDQ", 2, Operands { v0, v1 })
 72437      // VPHSUBDQ xmm, xmm
 72438      if isXMM(v0) && isXMM(v1) {
 72439          self.require(ISA_XOP)
 72440          p.domain = DomainAMDSpecific
 72441          p.add(0, func(m *_Encoding, v []interface{}) {
 72442              m.emit(0x8f)
 72443              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 72444              m.emit(0x78)
 72445              m.emit(0xe3)
 72446              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72447          })
 72448      }
 72449      // VPHSUBDQ m128, xmm
 72450      if isM128(v0) && isXMM(v1) {
 72451          self.require(ISA_XOP)
 72452          p.domain = DomainAMDSpecific
 72453          p.add(0, func(m *_Encoding, v []interface{}) {
 72454              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 72455              m.emit(0xe3)
 72456              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 72457          })
 72458      }
 72459      if p.len == 0 {
 72460          panic("invalid operands for VPHSUBDQ")
 72461      }
 72462      return p
 72463  }
 72464  
 72465  // VPHSUBSW performs "Packed Horizontal Subtract Signed Word Integers with Signed Saturation".
 72466  //
 72467  // Mnemonic        : VPHSUBSW
 72468  // Supported forms : (4 forms)
 72469  //
 72470  //    * VPHSUBSW xmm, xmm, xmm     [AVX]
 72471  //    * VPHSUBSW m128, xmm, xmm    [AVX]
 72472  //    * VPHSUBSW ymm, ymm, ymm     [AVX2]
 72473  //    * VPHSUBSW m256, ymm, ymm    [AVX2]
 72474  //
 72475  func (self *Program) VPHSUBSW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 72476      p := self.alloc("VPHSUBSW", 3, Operands { v0, v1, v2 })
 72477      // VPHSUBSW xmm, xmm, xmm
 72478      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 72479          self.require(ISA_AVX)
 72480          p.domain = DomainAVX
 72481          p.add(0, func(m *_Encoding, v []interface{}) {
 72482              m.emit(0xc4)
 72483              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 72484              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 72485              m.emit(0x07)
 72486              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 72487          })
 72488      }
 72489      // VPHSUBSW m128, xmm, xmm
 72490      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 72491          self.require(ISA_AVX)
 72492          p.domain = DomainAVX
 72493          p.add(0, func(m *_Encoding, v []interface{}) {
 72494              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 72495              m.emit(0x07)
 72496              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 72497          })
 72498      }
 72499      // VPHSUBSW ymm, ymm, ymm
 72500      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 72501          self.require(ISA_AVX2)
 72502          p.domain = DomainAVX
 72503          p.add(0, func(m *_Encoding, v []interface{}) {
 72504              m.emit(0xc4)
 72505              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 72506              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 72507              m.emit(0x07)
 72508              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 72509          })
 72510      }
 72511      // VPHSUBSW m256, ymm, ymm
 72512      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 72513          self.require(ISA_AVX2)
 72514          p.domain = DomainAVX
 72515          p.add(0, func(m *_Encoding, v []interface{}) {
 72516              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 72517              m.emit(0x07)
 72518              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 72519          })
 72520      }
 72521      if p.len == 0 {
 72522          panic("invalid operands for VPHSUBSW")
 72523      }
 72524      return p
 72525  }
 72526  
 72527  // VPHSUBW performs "Packed Horizontal Subtract Word Integers".
 72528  //
 72529  // Mnemonic        : VPHSUBW
 72530  // Supported forms : (4 forms)
 72531  //
 72532  //    * VPHSUBW xmm, xmm, xmm     [AVX]
 72533  //    * VPHSUBW m128, xmm, xmm    [AVX]
 72534  //    * VPHSUBW ymm, ymm, ymm     [AVX2]
 72535  //    * VPHSUBW m256, ymm, ymm    [AVX2]
 72536  //
 72537  func (self *Program) VPHSUBW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 72538      p := self.alloc("VPHSUBW", 3, Operands { v0, v1, v2 })
 72539      // VPHSUBW xmm, xmm, xmm
 72540      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 72541          self.require(ISA_AVX)
 72542          p.domain = DomainAVX
 72543          p.add(0, func(m *_Encoding, v []interface{}) {
 72544              m.emit(0xc4)
 72545              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 72546              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 72547              m.emit(0x05)
 72548              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 72549          })
 72550      }
 72551      // VPHSUBW m128, xmm, xmm
 72552      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 72553          self.require(ISA_AVX)
 72554          p.domain = DomainAVX
 72555          p.add(0, func(m *_Encoding, v []interface{}) {
 72556              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 72557              m.emit(0x05)
 72558              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 72559          })
 72560      }
 72561      // VPHSUBW ymm, ymm, ymm
 72562      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 72563          self.require(ISA_AVX2)
 72564          p.domain = DomainAVX
 72565          p.add(0, func(m *_Encoding, v []interface{}) {
 72566              m.emit(0xc4)
 72567              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 72568              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 72569              m.emit(0x05)
 72570              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 72571          })
 72572      }
 72573      // VPHSUBW m256, ymm, ymm
 72574      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 72575          self.require(ISA_AVX2)
 72576          p.domain = DomainAVX
 72577          p.add(0, func(m *_Encoding, v []interface{}) {
 72578              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 72579              m.emit(0x05)
 72580              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 72581          })
 72582      }
 72583      if p.len == 0 {
 72584          panic("invalid operands for VPHSUBW")
 72585      }
 72586      return p
 72587  }
 72588  
 72589  // VPHSUBWD performs "Packed Horizontal Subtract Signed Word to Signed Doubleword".
 72590  //
 72591  // Mnemonic        : VPHSUBWD
 72592  // Supported forms : (2 forms)
 72593  //
 72594  //    * VPHSUBWD xmm, xmm     [XOP]
 72595  //    * VPHSUBWD m128, xmm    [XOP]
 72596  //
 72597  func (self *Program) VPHSUBWD(v0 interface{}, v1 interface{}) *Instruction {
 72598      p := self.alloc("VPHSUBWD", 2, Operands { v0, v1 })
 72599      // VPHSUBWD xmm, xmm
 72600      if isXMM(v0) && isXMM(v1) {
 72601          self.require(ISA_XOP)
 72602          p.domain = DomainAMDSpecific
 72603          p.add(0, func(m *_Encoding, v []interface{}) {
 72604              m.emit(0x8f)
 72605              m.emit(0xe9 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 72606              m.emit(0x78)
 72607              m.emit(0xe2)
 72608              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72609          })
 72610      }
 72611      // VPHSUBWD m128, xmm
 72612      if isM128(v0) && isXMM(v1) {
 72613          self.require(ISA_XOP)
 72614          p.domain = DomainAMDSpecific
 72615          p.add(0, func(m *_Encoding, v []interface{}) {
 72616              m.vex3(0x8f, 0b1001, 0x00, hcode(v[1]), addr(v[0]), 0)
 72617              m.emit(0xe2)
 72618              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 72619          })
 72620      }
 72621      if p.len == 0 {
 72622          panic("invalid operands for VPHSUBWD")
 72623      }
 72624      return p
 72625  }
 72626  
 72627  // VPINSRB performs "Insert Byte".
 72628  //
 72629  // Mnemonic        : VPINSRB
 72630  // Supported forms : (4 forms)
 72631  //
 72632  //    * VPINSRB imm8, r32, xmm, xmm    [AVX]
 72633  //    * VPINSRB imm8, m8, xmm, xmm     [AVX]
 72634  //    * VPINSRB imm8, r32, xmm, xmm    [AVX512BW]
 72635  //    * VPINSRB imm8, m8, xmm, xmm     [AVX512BW]
 72636  //
 72637  func (self *Program) VPINSRB(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 72638      p := self.alloc("VPINSRB", 4, Operands { v0, v1, v2, v3 })
 72639      // VPINSRB imm8, r32, xmm, xmm
 72640      if isImm8(v0) && isReg32(v1) && isXMM(v2) && isXMM(v3) {
 72641          self.require(ISA_AVX)
 72642          p.domain = DomainAVX
 72643          p.add(0, func(m *_Encoding, v []interface{}) {
 72644              m.emit(0xc4)
 72645              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 72646              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 72647              m.emit(0x20)
 72648              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 72649              m.imm1(toImmAny(v[0]))
 72650          })
 72651      }
 72652      // VPINSRB imm8, m8, xmm, xmm
 72653      if isImm8(v0) && isM8(v1) && isXMM(v2) && isXMM(v3) {
 72654          self.require(ISA_AVX)
 72655          p.domain = DomainAVX
 72656          p.add(0, func(m *_Encoding, v []interface{}) {
 72657              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 72658              m.emit(0x20)
 72659              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 72660              m.imm1(toImmAny(v[0]))
 72661          })
 72662      }
 72663      // VPINSRB imm8, r32, xmm, xmm
 72664      if isImm8(v0) && isReg32(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) {
 72665          self.require(ISA_AVX512BW)
 72666          p.domain = DomainAVX
 72667          p.add(0, func(m *_Encoding, v []interface{}) {
 72668              m.emit(0x62)
 72669              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 72670              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 72671              m.emit((0x08 ^ (ecode(v[2]) << 3)) | 0x00)
 72672              m.emit(0x20)
 72673              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 72674              m.imm1(toImmAny(v[0]))
 72675          })
 72676      }
 72677      // VPINSRB imm8, m8, xmm, xmm
 72678      if isImm8(v0) && isM8(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) {
 72679          self.require(ISA_AVX512BW)
 72680          p.domain = DomainAVX
 72681          p.add(0, func(m *_Encoding, v []interface{}) {
 72682              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), 0, 0, 0)
 72683              m.emit(0x20)
 72684              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 72685              m.imm1(toImmAny(v[0]))
 72686          })
 72687      }
 72688      if p.len == 0 {
 72689          panic("invalid operands for VPINSRB")
 72690      }
 72691      return p
 72692  }
 72693  
 72694  // VPINSRD performs "Insert Doubleword".
 72695  //
 72696  // Mnemonic        : VPINSRD
 72697  // Supported forms : (4 forms)
 72698  //
 72699  //    * VPINSRD imm8, r32, xmm, xmm    [AVX]
 72700  //    * VPINSRD imm8, m32, xmm, xmm    [AVX]
 72701  //    * VPINSRD imm8, r32, xmm, xmm    [AVX512DQ]
 72702  //    * VPINSRD imm8, m32, xmm, xmm    [AVX512DQ]
 72703  //
 72704  func (self *Program) VPINSRD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 72705      p := self.alloc("VPINSRD", 4, Operands { v0, v1, v2, v3 })
 72706      // VPINSRD imm8, r32, xmm, xmm
 72707      if isImm8(v0) && isReg32(v1) && isXMM(v2) && isXMM(v3) {
 72708          self.require(ISA_AVX)
 72709          p.domain = DomainAVX
 72710          p.add(0, func(m *_Encoding, v []interface{}) {
 72711              m.emit(0xc4)
 72712              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 72713              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 72714              m.emit(0x22)
 72715              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 72716              m.imm1(toImmAny(v[0]))
 72717          })
 72718      }
 72719      // VPINSRD imm8, m32, xmm, xmm
 72720      if isImm8(v0) && isM32(v1) && isXMM(v2) && isXMM(v3) {
 72721          self.require(ISA_AVX)
 72722          p.domain = DomainAVX
 72723          p.add(0, func(m *_Encoding, v []interface{}) {
 72724              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 72725              m.emit(0x22)
 72726              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 72727              m.imm1(toImmAny(v[0]))
 72728          })
 72729      }
 72730      // VPINSRD imm8, r32, xmm, xmm
 72731      if isImm8(v0) && isReg32(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) {
 72732          self.require(ISA_AVX512DQ)
 72733          p.domain = DomainAVX
 72734          p.add(0, func(m *_Encoding, v []interface{}) {
 72735              m.emit(0x62)
 72736              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 72737              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 72738              m.emit((0x08 ^ (ecode(v[2]) << 3)) | 0x00)
 72739              m.emit(0x22)
 72740              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 72741              m.imm1(toImmAny(v[0]))
 72742          })
 72743      }
 72744      // VPINSRD imm8, m32, xmm, xmm
 72745      if isImm8(v0) && isM32(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) {
 72746          self.require(ISA_AVX512DQ)
 72747          p.domain = DomainAVX
 72748          p.add(0, func(m *_Encoding, v []interface{}) {
 72749              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), 0, 0, 0)
 72750              m.emit(0x22)
 72751              m.mrsd(lcode(v[3]), addr(v[1]), 4)
 72752              m.imm1(toImmAny(v[0]))
 72753          })
 72754      }
 72755      if p.len == 0 {
 72756          panic("invalid operands for VPINSRD")
 72757      }
 72758      return p
 72759  }
 72760  
 72761  // VPINSRQ performs "Insert Quadword".
 72762  //
 72763  // Mnemonic        : VPINSRQ
 72764  // Supported forms : (4 forms)
 72765  //
 72766  //    * VPINSRQ imm8, r64, xmm, xmm    [AVX]
 72767  //    * VPINSRQ imm8, m64, xmm, xmm    [AVX]
 72768  //    * VPINSRQ imm8, r64, xmm, xmm    [AVX512DQ]
 72769  //    * VPINSRQ imm8, m64, xmm, xmm    [AVX512DQ]
 72770  //
 72771  func (self *Program) VPINSRQ(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 72772      p := self.alloc("VPINSRQ", 4, Operands { v0, v1, v2, v3 })
 72773      // VPINSRQ imm8, r64, xmm, xmm
 72774      if isImm8(v0) && isReg64(v1) && isXMM(v2) && isXMM(v3) {
 72775          self.require(ISA_AVX)
 72776          p.domain = DomainAVX
 72777          p.add(0, func(m *_Encoding, v []interface{}) {
 72778              m.emit(0xc4)
 72779              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 72780              m.emit(0xf9 ^ (hlcode(v[2]) << 3))
 72781              m.emit(0x22)
 72782              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 72783              m.imm1(toImmAny(v[0]))
 72784          })
 72785      }
 72786      // VPINSRQ imm8, m64, xmm, xmm
 72787      if isImm8(v0) && isM64(v1) && isXMM(v2) && isXMM(v3) {
 72788          self.require(ISA_AVX)
 72789          p.domain = DomainAVX
 72790          p.add(0, func(m *_Encoding, v []interface{}) {
 72791              m.vex3(0xc4, 0b11, 0x81, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 72792              m.emit(0x22)
 72793              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 72794              m.imm1(toImmAny(v[0]))
 72795          })
 72796      }
 72797      // VPINSRQ imm8, r64, xmm, xmm
 72798      if isImm8(v0) && isReg64(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) {
 72799          self.require(ISA_AVX512DQ)
 72800          p.domain = DomainAVX
 72801          p.add(0, func(m *_Encoding, v []interface{}) {
 72802              m.emit(0x62)
 72803              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 72804              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 72805              m.emit((0x08 ^ (ecode(v[2]) << 3)) | 0x00)
 72806              m.emit(0x22)
 72807              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 72808              m.imm1(toImmAny(v[0]))
 72809          })
 72810      }
 72811      // VPINSRQ imm8, m64, xmm, xmm
 72812      if isImm8(v0) && isM64(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) {
 72813          self.require(ISA_AVX512DQ)
 72814          p.domain = DomainAVX
 72815          p.add(0, func(m *_Encoding, v []interface{}) {
 72816              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), 0, 0, 0)
 72817              m.emit(0x22)
 72818              m.mrsd(lcode(v[3]), addr(v[1]), 8)
 72819              m.imm1(toImmAny(v[0]))
 72820          })
 72821      }
 72822      if p.len == 0 {
 72823          panic("invalid operands for VPINSRQ")
 72824      }
 72825      return p
 72826  }
 72827  
 72828  // VPINSRW performs "Insert Word".
 72829  //
 72830  // Mnemonic        : VPINSRW
 72831  // Supported forms : (4 forms)
 72832  //
 72833  //    * VPINSRW imm8, r32, xmm, xmm    [AVX]
 72834  //    * VPINSRW imm8, m16, xmm, xmm    [AVX]
 72835  //    * VPINSRW imm8, r32, xmm, xmm    [AVX512BW]
 72836  //    * VPINSRW imm8, m16, xmm, xmm    [AVX512BW]
 72837  //
 72838  func (self *Program) VPINSRW(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 72839      p := self.alloc("VPINSRW", 4, Operands { v0, v1, v2, v3 })
 72840      // VPINSRW imm8, r32, xmm, xmm
 72841      if isImm8(v0) && isReg32(v1) && isXMM(v2) && isXMM(v3) {
 72842          self.require(ISA_AVX)
 72843          p.domain = DomainAVX
 72844          p.add(0, func(m *_Encoding, v []interface{}) {
 72845              m.vex2(1, hcode(v[3]), v[1], hlcode(v[2]))
 72846              m.emit(0xc4)
 72847              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 72848              m.imm1(toImmAny(v[0]))
 72849          })
 72850      }
 72851      // VPINSRW imm8, m16, xmm, xmm
 72852      if isImm8(v0) && isM16(v1) && isXMM(v2) && isXMM(v3) {
 72853          self.require(ISA_AVX)
 72854          p.domain = DomainAVX
 72855          p.add(0, func(m *_Encoding, v []interface{}) {
 72856              m.vex2(1, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 72857              m.emit(0xc4)
 72858              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 72859              m.imm1(toImmAny(v[0]))
 72860          })
 72861      }
 72862      // VPINSRW imm8, r32, xmm, xmm
 72863      if isImm8(v0) && isReg32(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) {
 72864          self.require(ISA_AVX512BW)
 72865          p.domain = DomainAVX
 72866          p.add(0, func(m *_Encoding, v []interface{}) {
 72867              m.emit(0x62)
 72868              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 72869              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 72870              m.emit((0x08 ^ (ecode(v[2]) << 3)) | 0x00)
 72871              m.emit(0xc4)
 72872              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 72873              m.imm1(toImmAny(v[0]))
 72874          })
 72875      }
 72876      // VPINSRW imm8, m16, xmm, xmm
 72877      if isImm8(v0) && isM16(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) {
 72878          self.require(ISA_AVX512BW)
 72879          p.domain = DomainAVX
 72880          p.add(0, func(m *_Encoding, v []interface{}) {
 72881              m.evex(0b01, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), 0, 0, 0)
 72882              m.emit(0xc4)
 72883              m.mrsd(lcode(v[3]), addr(v[1]), 2)
 72884              m.imm1(toImmAny(v[0]))
 72885          })
 72886      }
 72887      if p.len == 0 {
 72888          panic("invalid operands for VPINSRW")
 72889      }
 72890      return p
 72891  }
 72892  
 72893  // VPLZCNTD performs "Count the Number of Leading Zero Bits for Packed Doubleword Values".
 72894  //
 72895  // Mnemonic        : VPLZCNTD
 72896  // Supported forms : (6 forms)
 72897  //
 72898  //    * VPLZCNTD m128/m32bcst, xmm{k}{z}    [AVX512CD,AVX512VL]
 72899  //    * VPLZCNTD m256/m32bcst, ymm{k}{z}    [AVX512CD,AVX512VL]
 72900  //    * VPLZCNTD m512/m32bcst, zmm{k}{z}    [AVX512CD]
 72901  //    * VPLZCNTD xmm, xmm{k}{z}             [AVX512CD,AVX512VL]
 72902  //    * VPLZCNTD ymm, ymm{k}{z}             [AVX512CD,AVX512VL]
 72903  //    * VPLZCNTD zmm, zmm{k}{z}             [AVX512CD]
 72904  //
 72905  func (self *Program) VPLZCNTD(v0 interface{}, v1 interface{}) *Instruction {
 72906      p := self.alloc("VPLZCNTD", 2, Operands { v0, v1 })
 72907      // VPLZCNTD m128/m32bcst, xmm{k}{z}
 72908      if isM128M32bcst(v0) && isXMMkz(v1) {
 72909          self.require(ISA_AVX512VL | ISA_AVX512CD)
 72910          p.domain = DomainAVX
 72911          p.add(0, func(m *_Encoding, v []interface{}) {
 72912              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 72913              m.emit(0x44)
 72914              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 72915          })
 72916      }
 72917      // VPLZCNTD m256/m32bcst, ymm{k}{z}
 72918      if isM256M32bcst(v0) && isYMMkz(v1) {
 72919          self.require(ISA_AVX512VL | ISA_AVX512CD)
 72920          p.domain = DomainAVX
 72921          p.add(0, func(m *_Encoding, v []interface{}) {
 72922              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 72923              m.emit(0x44)
 72924              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 72925          })
 72926      }
 72927      // VPLZCNTD m512/m32bcst, zmm{k}{z}
 72928      if isM512M32bcst(v0) && isZMMkz(v1) {
 72929          self.require(ISA_AVX512CD)
 72930          p.domain = DomainAVX
 72931          p.add(0, func(m *_Encoding, v []interface{}) {
 72932              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 72933              m.emit(0x44)
 72934              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 72935          })
 72936      }
 72937      // VPLZCNTD xmm, xmm{k}{z}
 72938      if isEVEXXMM(v0) && isXMMkz(v1) {
 72939          self.require(ISA_AVX512VL | ISA_AVX512CD)
 72940          p.domain = DomainAVX
 72941          p.add(0, func(m *_Encoding, v []interface{}) {
 72942              m.emit(0x62)
 72943              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 72944              m.emit(0x7d)
 72945              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 72946              m.emit(0x44)
 72947              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72948          })
 72949      }
 72950      // VPLZCNTD ymm, ymm{k}{z}
 72951      if isEVEXYMM(v0) && isYMMkz(v1) {
 72952          self.require(ISA_AVX512VL | ISA_AVX512CD)
 72953          p.domain = DomainAVX
 72954          p.add(0, func(m *_Encoding, v []interface{}) {
 72955              m.emit(0x62)
 72956              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 72957              m.emit(0x7d)
 72958              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 72959              m.emit(0x44)
 72960              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72961          })
 72962      }
 72963      // VPLZCNTD zmm, zmm{k}{z}
 72964      if isZMM(v0) && isZMMkz(v1) {
 72965          self.require(ISA_AVX512CD)
 72966          p.domain = DomainAVX
 72967          p.add(0, func(m *_Encoding, v []interface{}) {
 72968              m.emit(0x62)
 72969              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 72970              m.emit(0x7d)
 72971              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 72972              m.emit(0x44)
 72973              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 72974          })
 72975      }
 72976      if p.len == 0 {
 72977          panic("invalid operands for VPLZCNTD")
 72978      }
 72979      return p
 72980  }
 72981  
 72982  // VPLZCNTQ performs "Count the Number of Leading Zero Bits for Packed Quadword Values".
 72983  //
 72984  // Mnemonic        : VPLZCNTQ
 72985  // Supported forms : (6 forms)
 72986  //
 72987  //    * VPLZCNTQ m128/m64bcst, xmm{k}{z}    [AVX512CD,AVX512VL]
 72988  //    * VPLZCNTQ m256/m64bcst, ymm{k}{z}    [AVX512CD,AVX512VL]
 72989  //    * VPLZCNTQ m512/m64bcst, zmm{k}{z}    [AVX512CD]
 72990  //    * VPLZCNTQ xmm, xmm{k}{z}             [AVX512CD,AVX512VL]
 72991  //    * VPLZCNTQ ymm, ymm{k}{z}             [AVX512CD,AVX512VL]
 72992  //    * VPLZCNTQ zmm, zmm{k}{z}             [AVX512CD]
 72993  //
 72994  func (self *Program) VPLZCNTQ(v0 interface{}, v1 interface{}) *Instruction {
 72995      p := self.alloc("VPLZCNTQ", 2, Operands { v0, v1 })
 72996      // VPLZCNTQ m128/m64bcst, xmm{k}{z}
 72997      if isM128M64bcst(v0) && isXMMkz(v1) {
 72998          self.require(ISA_AVX512VL | ISA_AVX512CD)
 72999          p.domain = DomainAVX
 73000          p.add(0, func(m *_Encoding, v []interface{}) {
 73001              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 73002              m.emit(0x44)
 73003              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 73004          })
 73005      }
 73006      // VPLZCNTQ m256/m64bcst, ymm{k}{z}
 73007      if isM256M64bcst(v0) && isYMMkz(v1) {
 73008          self.require(ISA_AVX512VL | ISA_AVX512CD)
 73009          p.domain = DomainAVX
 73010          p.add(0, func(m *_Encoding, v []interface{}) {
 73011              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 73012              m.emit(0x44)
 73013              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 73014          })
 73015      }
 73016      // VPLZCNTQ m512/m64bcst, zmm{k}{z}
 73017      if isM512M64bcst(v0) && isZMMkz(v1) {
 73018          self.require(ISA_AVX512CD)
 73019          p.domain = DomainAVX
 73020          p.add(0, func(m *_Encoding, v []interface{}) {
 73021              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 73022              m.emit(0x44)
 73023              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 73024          })
 73025      }
 73026      // VPLZCNTQ xmm, xmm{k}{z}
 73027      if isEVEXXMM(v0) && isXMMkz(v1) {
 73028          self.require(ISA_AVX512VL | ISA_AVX512CD)
 73029          p.domain = DomainAVX
 73030          p.add(0, func(m *_Encoding, v []interface{}) {
 73031              m.emit(0x62)
 73032              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 73033              m.emit(0xfd)
 73034              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 73035              m.emit(0x44)
 73036              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 73037          })
 73038      }
 73039      // VPLZCNTQ ymm, ymm{k}{z}
 73040      if isEVEXYMM(v0) && isYMMkz(v1) {
 73041          self.require(ISA_AVX512VL | ISA_AVX512CD)
 73042          p.domain = DomainAVX
 73043          p.add(0, func(m *_Encoding, v []interface{}) {
 73044              m.emit(0x62)
 73045              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 73046              m.emit(0xfd)
 73047              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 73048              m.emit(0x44)
 73049              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 73050          })
 73051      }
 73052      // VPLZCNTQ zmm, zmm{k}{z}
 73053      if isZMM(v0) && isZMMkz(v1) {
 73054          self.require(ISA_AVX512CD)
 73055          p.domain = DomainAVX
 73056          p.add(0, func(m *_Encoding, v []interface{}) {
 73057              m.emit(0x62)
 73058              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 73059              m.emit(0xfd)
 73060              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 73061              m.emit(0x44)
 73062              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 73063          })
 73064      }
 73065      if p.len == 0 {
 73066          panic("invalid operands for VPLZCNTQ")
 73067      }
 73068      return p
 73069  }
 73070  
 73071  // VPMACSDD performs "Packed Multiply Accumulate Signed Doubleword to Signed Doubleword".
 73072  //
 73073  // Mnemonic        : VPMACSDD
 73074  // Supported forms : (2 forms)
 73075  //
 73076  //    * VPMACSDD xmm, xmm, xmm, xmm     [XOP]
 73077  //    * VPMACSDD xmm, m128, xmm, xmm    [XOP]
 73078  //
 73079  func (self *Program) VPMACSDD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73080      p := self.alloc("VPMACSDD", 4, Operands { v0, v1, v2, v3 })
 73081      // VPMACSDD xmm, xmm, xmm, xmm
 73082      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73083          self.require(ISA_XOP)
 73084          p.domain = DomainAMDSpecific
 73085          p.add(0, func(m *_Encoding, v []interface{}) {
 73086              m.emit(0x8f)
 73087              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73088              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73089              m.emit(0x9e)
 73090              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73091              m.emit(hlcode(v[0]) << 4)
 73092          })
 73093      }
 73094      // VPMACSDD xmm, m128, xmm, xmm
 73095      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73096          self.require(ISA_XOP)
 73097          p.domain = DomainAMDSpecific
 73098          p.add(0, func(m *_Encoding, v []interface{}) {
 73099              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73100              m.emit(0x9e)
 73101              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73102              m.emit(hlcode(v[0]) << 4)
 73103          })
 73104      }
 73105      if p.len == 0 {
 73106          panic("invalid operands for VPMACSDD")
 73107      }
 73108      return p
 73109  }
 73110  
 73111  // VPMACSDQH performs "Packed Multiply Accumulate Signed High Doubleword to Signed Quadword".
 73112  //
 73113  // Mnemonic        : VPMACSDQH
 73114  // Supported forms : (2 forms)
 73115  //
 73116  //    * VPMACSDQH xmm, xmm, xmm, xmm     [XOP]
 73117  //    * VPMACSDQH xmm, m128, xmm, xmm    [XOP]
 73118  //
 73119  func (self *Program) VPMACSDQH(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73120      p := self.alloc("VPMACSDQH", 4, Operands { v0, v1, v2, v3 })
 73121      // VPMACSDQH xmm, xmm, xmm, xmm
 73122      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73123          self.require(ISA_XOP)
 73124          p.domain = DomainAMDSpecific
 73125          p.add(0, func(m *_Encoding, v []interface{}) {
 73126              m.emit(0x8f)
 73127              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73128              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73129              m.emit(0x9f)
 73130              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73131              m.emit(hlcode(v[0]) << 4)
 73132          })
 73133      }
 73134      // VPMACSDQH xmm, m128, xmm, xmm
 73135      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73136          self.require(ISA_XOP)
 73137          p.domain = DomainAMDSpecific
 73138          p.add(0, func(m *_Encoding, v []interface{}) {
 73139              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73140              m.emit(0x9f)
 73141              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73142              m.emit(hlcode(v[0]) << 4)
 73143          })
 73144      }
 73145      if p.len == 0 {
 73146          panic("invalid operands for VPMACSDQH")
 73147      }
 73148      return p
 73149  }
 73150  
 73151  // VPMACSDQL performs "Packed Multiply Accumulate Signed Low Doubleword to Signed Quadword".
 73152  //
 73153  // Mnemonic        : VPMACSDQL
 73154  // Supported forms : (2 forms)
 73155  //
 73156  //    * VPMACSDQL xmm, xmm, xmm, xmm     [XOP]
 73157  //    * VPMACSDQL xmm, m128, xmm, xmm    [XOP]
 73158  //
 73159  func (self *Program) VPMACSDQL(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73160      p := self.alloc("VPMACSDQL", 4, Operands { v0, v1, v2, v3 })
 73161      // VPMACSDQL xmm, xmm, xmm, xmm
 73162      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73163          self.require(ISA_XOP)
 73164          p.domain = DomainAMDSpecific
 73165          p.add(0, func(m *_Encoding, v []interface{}) {
 73166              m.emit(0x8f)
 73167              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73168              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73169              m.emit(0x97)
 73170              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73171              m.emit(hlcode(v[0]) << 4)
 73172          })
 73173      }
 73174      // VPMACSDQL xmm, m128, xmm, xmm
 73175      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73176          self.require(ISA_XOP)
 73177          p.domain = DomainAMDSpecific
 73178          p.add(0, func(m *_Encoding, v []interface{}) {
 73179              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73180              m.emit(0x97)
 73181              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73182              m.emit(hlcode(v[0]) << 4)
 73183          })
 73184      }
 73185      if p.len == 0 {
 73186          panic("invalid operands for VPMACSDQL")
 73187      }
 73188      return p
 73189  }
 73190  
 73191  // VPMACSSDD performs "Packed Multiply Accumulate with Saturation Signed Doubleword to Signed Doubleword".
 73192  //
 73193  // Mnemonic        : VPMACSSDD
 73194  // Supported forms : (2 forms)
 73195  //
 73196  //    * VPMACSSDD xmm, xmm, xmm, xmm     [XOP]
 73197  //    * VPMACSSDD xmm, m128, xmm, xmm    [XOP]
 73198  //
 73199  func (self *Program) VPMACSSDD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73200      p := self.alloc("VPMACSSDD", 4, Operands { v0, v1, v2, v3 })
 73201      // VPMACSSDD xmm, xmm, xmm, xmm
 73202      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73203          self.require(ISA_XOP)
 73204          p.domain = DomainAMDSpecific
 73205          p.add(0, func(m *_Encoding, v []interface{}) {
 73206              m.emit(0x8f)
 73207              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73208              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73209              m.emit(0x8e)
 73210              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73211              m.emit(hlcode(v[0]) << 4)
 73212          })
 73213      }
 73214      // VPMACSSDD xmm, m128, xmm, xmm
 73215      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73216          self.require(ISA_XOP)
 73217          p.domain = DomainAMDSpecific
 73218          p.add(0, func(m *_Encoding, v []interface{}) {
 73219              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73220              m.emit(0x8e)
 73221              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73222              m.emit(hlcode(v[0]) << 4)
 73223          })
 73224      }
 73225      if p.len == 0 {
 73226          panic("invalid operands for VPMACSSDD")
 73227      }
 73228      return p
 73229  }
 73230  
 73231  // VPMACSSDQH performs "Packed Multiply Accumulate with Saturation Signed High Doubleword to Signed Quadword".
 73232  //
 73233  // Mnemonic        : VPMACSSDQH
 73234  // Supported forms : (2 forms)
 73235  //
 73236  //    * VPMACSSDQH xmm, xmm, xmm, xmm     [XOP]
 73237  //    * VPMACSSDQH xmm, m128, xmm, xmm    [XOP]
 73238  //
 73239  func (self *Program) VPMACSSDQH(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73240      p := self.alloc("VPMACSSDQH", 4, Operands { v0, v1, v2, v3 })
 73241      // VPMACSSDQH xmm, xmm, xmm, xmm
 73242      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73243          self.require(ISA_XOP)
 73244          p.domain = DomainAMDSpecific
 73245          p.add(0, func(m *_Encoding, v []interface{}) {
 73246              m.emit(0x8f)
 73247              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73248              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73249              m.emit(0x8f)
 73250              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73251              m.emit(hlcode(v[0]) << 4)
 73252          })
 73253      }
 73254      // VPMACSSDQH xmm, m128, xmm, xmm
 73255      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73256          self.require(ISA_XOP)
 73257          p.domain = DomainAMDSpecific
 73258          p.add(0, func(m *_Encoding, v []interface{}) {
 73259              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73260              m.emit(0x8f)
 73261              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73262              m.emit(hlcode(v[0]) << 4)
 73263          })
 73264      }
 73265      if p.len == 0 {
 73266          panic("invalid operands for VPMACSSDQH")
 73267      }
 73268      return p
 73269  }
 73270  
 73271  // VPMACSSDQL performs "Packed Multiply Accumulate with Saturation Signed Low Doubleword to Signed Quadword".
 73272  //
 73273  // Mnemonic        : VPMACSSDQL
 73274  // Supported forms : (2 forms)
 73275  //
 73276  //    * VPMACSSDQL xmm, xmm, xmm, xmm     [XOP]
 73277  //    * VPMACSSDQL xmm, m128, xmm, xmm    [XOP]
 73278  //
 73279  func (self *Program) VPMACSSDQL(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73280      p := self.alloc("VPMACSSDQL", 4, Operands { v0, v1, v2, v3 })
 73281      // VPMACSSDQL xmm, xmm, xmm, xmm
 73282      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73283          self.require(ISA_XOP)
 73284          p.domain = DomainAMDSpecific
 73285          p.add(0, func(m *_Encoding, v []interface{}) {
 73286              m.emit(0x8f)
 73287              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73288              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73289              m.emit(0x87)
 73290              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73291              m.emit(hlcode(v[0]) << 4)
 73292          })
 73293      }
 73294      // VPMACSSDQL xmm, m128, xmm, xmm
 73295      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73296          self.require(ISA_XOP)
 73297          p.domain = DomainAMDSpecific
 73298          p.add(0, func(m *_Encoding, v []interface{}) {
 73299              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73300              m.emit(0x87)
 73301              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73302              m.emit(hlcode(v[0]) << 4)
 73303          })
 73304      }
 73305      if p.len == 0 {
 73306          panic("invalid operands for VPMACSSDQL")
 73307      }
 73308      return p
 73309  }
 73310  
 73311  // VPMACSSWD performs "Packed Multiply Accumulate with Saturation Signed Word to Signed Doubleword".
 73312  //
 73313  // Mnemonic        : VPMACSSWD
 73314  // Supported forms : (2 forms)
 73315  //
 73316  //    * VPMACSSWD xmm, xmm, xmm, xmm     [XOP]
 73317  //    * VPMACSSWD xmm, m128, xmm, xmm    [XOP]
 73318  //
 73319  func (self *Program) VPMACSSWD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73320      p := self.alloc("VPMACSSWD", 4, Operands { v0, v1, v2, v3 })
 73321      // VPMACSSWD xmm, xmm, xmm, xmm
 73322      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73323          self.require(ISA_XOP)
 73324          p.domain = DomainAMDSpecific
 73325          p.add(0, func(m *_Encoding, v []interface{}) {
 73326              m.emit(0x8f)
 73327              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73328              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73329              m.emit(0x86)
 73330              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73331              m.emit(hlcode(v[0]) << 4)
 73332          })
 73333      }
 73334      // VPMACSSWD xmm, m128, xmm, xmm
 73335      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73336          self.require(ISA_XOP)
 73337          p.domain = DomainAMDSpecific
 73338          p.add(0, func(m *_Encoding, v []interface{}) {
 73339              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73340              m.emit(0x86)
 73341              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73342              m.emit(hlcode(v[0]) << 4)
 73343          })
 73344      }
 73345      if p.len == 0 {
 73346          panic("invalid operands for VPMACSSWD")
 73347      }
 73348      return p
 73349  }
 73350  
 73351  // VPMACSSWW performs "Packed Multiply Accumulate with Saturation Signed Word to Signed Word".
 73352  //
 73353  // Mnemonic        : VPMACSSWW
 73354  // Supported forms : (2 forms)
 73355  //
 73356  //    * VPMACSSWW xmm, xmm, xmm, xmm     [XOP]
 73357  //    * VPMACSSWW xmm, m128, xmm, xmm    [XOP]
 73358  //
 73359  func (self *Program) VPMACSSWW(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73360      p := self.alloc("VPMACSSWW", 4, Operands { v0, v1, v2, v3 })
 73361      // VPMACSSWW xmm, xmm, xmm, xmm
 73362      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73363          self.require(ISA_XOP)
 73364          p.domain = DomainAMDSpecific
 73365          p.add(0, func(m *_Encoding, v []interface{}) {
 73366              m.emit(0x8f)
 73367              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73368              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73369              m.emit(0x85)
 73370              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73371              m.emit(hlcode(v[0]) << 4)
 73372          })
 73373      }
 73374      // VPMACSSWW xmm, m128, xmm, xmm
 73375      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73376          self.require(ISA_XOP)
 73377          p.domain = DomainAMDSpecific
 73378          p.add(0, func(m *_Encoding, v []interface{}) {
 73379              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73380              m.emit(0x85)
 73381              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73382              m.emit(hlcode(v[0]) << 4)
 73383          })
 73384      }
 73385      if p.len == 0 {
 73386          panic("invalid operands for VPMACSSWW")
 73387      }
 73388      return p
 73389  }
 73390  
 73391  // VPMACSWD performs "Packed Multiply Accumulate Signed Word to Signed Doubleword".
 73392  //
 73393  // Mnemonic        : VPMACSWD
 73394  // Supported forms : (2 forms)
 73395  //
 73396  //    * VPMACSWD xmm, xmm, xmm, xmm     [XOP]
 73397  //    * VPMACSWD xmm, m128, xmm, xmm    [XOP]
 73398  //
 73399  func (self *Program) VPMACSWD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73400      p := self.alloc("VPMACSWD", 4, Operands { v0, v1, v2, v3 })
 73401      // VPMACSWD xmm, xmm, xmm, xmm
 73402      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73403          self.require(ISA_XOP)
 73404          p.domain = DomainAMDSpecific
 73405          p.add(0, func(m *_Encoding, v []interface{}) {
 73406              m.emit(0x8f)
 73407              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73408              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73409              m.emit(0x96)
 73410              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73411              m.emit(hlcode(v[0]) << 4)
 73412          })
 73413      }
 73414      // VPMACSWD xmm, m128, xmm, xmm
 73415      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73416          self.require(ISA_XOP)
 73417          p.domain = DomainAMDSpecific
 73418          p.add(0, func(m *_Encoding, v []interface{}) {
 73419              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73420              m.emit(0x96)
 73421              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73422              m.emit(hlcode(v[0]) << 4)
 73423          })
 73424      }
 73425      if p.len == 0 {
 73426          panic("invalid operands for VPMACSWD")
 73427      }
 73428      return p
 73429  }
 73430  
 73431  // VPMACSWW performs "Packed Multiply Accumulate Signed Word to Signed Word".
 73432  //
 73433  // Mnemonic        : VPMACSWW
 73434  // Supported forms : (2 forms)
 73435  //
 73436  //    * VPMACSWW xmm, xmm, xmm, xmm     [XOP]
 73437  //    * VPMACSWW xmm, m128, xmm, xmm    [XOP]
 73438  //
 73439  func (self *Program) VPMACSWW(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73440      p := self.alloc("VPMACSWW", 4, Operands { v0, v1, v2, v3 })
 73441      // VPMACSWW xmm, xmm, xmm, xmm
 73442      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73443          self.require(ISA_XOP)
 73444          p.domain = DomainAMDSpecific
 73445          p.add(0, func(m *_Encoding, v []interface{}) {
 73446              m.emit(0x8f)
 73447              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73448              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73449              m.emit(0x95)
 73450              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73451              m.emit(hlcode(v[0]) << 4)
 73452          })
 73453      }
 73454      // VPMACSWW xmm, m128, xmm, xmm
 73455      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73456          self.require(ISA_XOP)
 73457          p.domain = DomainAMDSpecific
 73458          p.add(0, func(m *_Encoding, v []interface{}) {
 73459              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73460              m.emit(0x95)
 73461              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73462              m.emit(hlcode(v[0]) << 4)
 73463          })
 73464      }
 73465      if p.len == 0 {
 73466          panic("invalid operands for VPMACSWW")
 73467      }
 73468      return p
 73469  }
 73470  
 73471  // VPMADCSSWD performs "Packed Multiply Add Accumulate with Saturation Signed Word to Signed Doubleword".
 73472  //
 73473  // Mnemonic        : VPMADCSSWD
 73474  // Supported forms : (2 forms)
 73475  //
 73476  //    * VPMADCSSWD xmm, xmm, xmm, xmm     [XOP]
 73477  //    * VPMADCSSWD xmm, m128, xmm, xmm    [XOP]
 73478  //
 73479  func (self *Program) VPMADCSSWD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73480      p := self.alloc("VPMADCSSWD", 4, Operands { v0, v1, v2, v3 })
 73481      // VPMADCSSWD xmm, xmm, xmm, xmm
 73482      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73483          self.require(ISA_XOP)
 73484          p.domain = DomainAMDSpecific
 73485          p.add(0, func(m *_Encoding, v []interface{}) {
 73486              m.emit(0x8f)
 73487              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73488              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73489              m.emit(0xa6)
 73490              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73491              m.emit(hlcode(v[0]) << 4)
 73492          })
 73493      }
 73494      // VPMADCSSWD xmm, m128, xmm, xmm
 73495      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73496          self.require(ISA_XOP)
 73497          p.domain = DomainAMDSpecific
 73498          p.add(0, func(m *_Encoding, v []interface{}) {
 73499              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73500              m.emit(0xa6)
 73501              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73502              m.emit(hlcode(v[0]) << 4)
 73503          })
 73504      }
 73505      if p.len == 0 {
 73506          panic("invalid operands for VPMADCSSWD")
 73507      }
 73508      return p
 73509  }
 73510  
 73511  // VPMADCSWD performs "Packed Multiply Add Accumulate Signed Word to Signed Doubleword".
 73512  //
 73513  // Mnemonic        : VPMADCSWD
 73514  // Supported forms : (2 forms)
 73515  //
 73516  //    * VPMADCSWD xmm, xmm, xmm, xmm     [XOP]
 73517  //    * VPMADCSWD xmm, m128, xmm, xmm    [XOP]
 73518  //
 73519  func (self *Program) VPMADCSWD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 73520      p := self.alloc("VPMADCSWD", 4, Operands { v0, v1, v2, v3 })
 73521      // VPMADCSWD xmm, xmm, xmm, xmm
 73522      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 73523          self.require(ISA_XOP)
 73524          p.domain = DomainAMDSpecific
 73525          p.add(0, func(m *_Encoding, v []interface{}) {
 73526              m.emit(0x8f)
 73527              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 73528              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 73529              m.emit(0xb6)
 73530              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 73531              m.emit(hlcode(v[0]) << 4)
 73532          })
 73533      }
 73534      // VPMADCSWD xmm, m128, xmm, xmm
 73535      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 73536          self.require(ISA_XOP)
 73537          p.domain = DomainAMDSpecific
 73538          p.add(0, func(m *_Encoding, v []interface{}) {
 73539              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 73540              m.emit(0xb6)
 73541              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 73542              m.emit(hlcode(v[0]) << 4)
 73543          })
 73544      }
 73545      if p.len == 0 {
 73546          panic("invalid operands for VPMADCSWD")
 73547      }
 73548      return p
 73549  }
 73550  
 73551  // VPMADD52HUQ performs "Packed Multiply of Unsigned 52-bit Unsigned Integers and Add High 52-bit Products to Quadword Accumulators".
 73552  //
 73553  // Mnemonic        : VPMADD52HUQ
 73554  // Supported forms : (6 forms)
 73555  //
 73556  //    * VPMADD52HUQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512IFMA,AVX512VL]
 73557  //    * VPMADD52HUQ xmm, xmm, xmm{k}{z}             [AVX512IFMA,AVX512VL]
 73558  //    * VPMADD52HUQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512IFMA,AVX512VL]
 73559  //    * VPMADD52HUQ ymm, ymm, ymm{k}{z}             [AVX512IFMA,AVX512VL]
 73560  //    * VPMADD52HUQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512IFMA]
 73561  //    * VPMADD52HUQ zmm, zmm, zmm{k}{z}             [AVX512IFMA]
 73562  //
 73563  func (self *Program) VPMADD52HUQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 73564      p := self.alloc("VPMADD52HUQ", 3, Operands { v0, v1, v2 })
 73565      // VPMADD52HUQ m128/m64bcst, xmm, xmm{k}{z}
 73566      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 73567          self.require(ISA_AVX512IFMA | ISA_AVX512VL)
 73568          p.domain = DomainAVX
 73569          p.add(0, func(m *_Encoding, v []interface{}) {
 73570              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 73571              m.emit(0xb5)
 73572              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 73573          })
 73574      }
 73575      // VPMADD52HUQ xmm, xmm, xmm{k}{z}
 73576      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 73577          self.require(ISA_AVX512IFMA | ISA_AVX512VL)
 73578          p.domain = DomainAVX
 73579          p.add(0, func(m *_Encoding, v []interface{}) {
 73580              m.emit(0x62)
 73581              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73582              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 73583              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 73584              m.emit(0xb5)
 73585              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73586          })
 73587      }
 73588      // VPMADD52HUQ m256/m64bcst, ymm, ymm{k}{z}
 73589      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 73590          self.require(ISA_AVX512IFMA | ISA_AVX512VL)
 73591          p.domain = DomainAVX
 73592          p.add(0, func(m *_Encoding, v []interface{}) {
 73593              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 73594              m.emit(0xb5)
 73595              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 73596          })
 73597      }
 73598      // VPMADD52HUQ ymm, ymm, ymm{k}{z}
 73599      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 73600          self.require(ISA_AVX512IFMA | ISA_AVX512VL)
 73601          p.domain = DomainAVX
 73602          p.add(0, func(m *_Encoding, v []interface{}) {
 73603              m.emit(0x62)
 73604              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73605              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 73606              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 73607              m.emit(0xb5)
 73608              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73609          })
 73610      }
 73611      // VPMADD52HUQ m512/m64bcst, zmm, zmm{k}{z}
 73612      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 73613          self.require(ISA_AVX512IFMA)
 73614          p.domain = DomainAVX
 73615          p.add(0, func(m *_Encoding, v []interface{}) {
 73616              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 73617              m.emit(0xb5)
 73618              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 73619          })
 73620      }
 73621      // VPMADD52HUQ zmm, zmm, zmm{k}{z}
 73622      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 73623          self.require(ISA_AVX512IFMA)
 73624          p.domain = DomainAVX
 73625          p.add(0, func(m *_Encoding, v []interface{}) {
 73626              m.emit(0x62)
 73627              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73628              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 73629              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 73630              m.emit(0xb5)
 73631              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73632          })
 73633      }
 73634      if p.len == 0 {
 73635          panic("invalid operands for VPMADD52HUQ")
 73636      }
 73637      return p
 73638  }
 73639  
 73640  // VPMADD52LUQ performs "Packed Multiply of Unsigned 52-bit Integers and Add the Low 52-bit Products to Quadword Accumulators".
 73641  //
 73642  // Mnemonic        : VPMADD52LUQ
 73643  // Supported forms : (6 forms)
 73644  //
 73645  //    * VPMADD52LUQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512IFMA,AVX512VL]
 73646  //    * VPMADD52LUQ xmm, xmm, xmm{k}{z}             [AVX512IFMA,AVX512VL]
 73647  //    * VPMADD52LUQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512IFMA,AVX512VL]
 73648  //    * VPMADD52LUQ ymm, ymm, ymm{k}{z}             [AVX512IFMA,AVX512VL]
 73649  //    * VPMADD52LUQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512IFMA]
 73650  //    * VPMADD52LUQ zmm, zmm, zmm{k}{z}             [AVX512IFMA]
 73651  //
 73652  func (self *Program) VPMADD52LUQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 73653      p := self.alloc("VPMADD52LUQ", 3, Operands { v0, v1, v2 })
 73654      // VPMADD52LUQ m128/m64bcst, xmm, xmm{k}{z}
 73655      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 73656          self.require(ISA_AVX512IFMA | ISA_AVX512VL)
 73657          p.domain = DomainAVX
 73658          p.add(0, func(m *_Encoding, v []interface{}) {
 73659              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 73660              m.emit(0xb4)
 73661              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 73662          })
 73663      }
 73664      // VPMADD52LUQ xmm, xmm, xmm{k}{z}
 73665      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 73666          self.require(ISA_AVX512IFMA | ISA_AVX512VL)
 73667          p.domain = DomainAVX
 73668          p.add(0, func(m *_Encoding, v []interface{}) {
 73669              m.emit(0x62)
 73670              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73671              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 73672              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 73673              m.emit(0xb4)
 73674              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73675          })
 73676      }
 73677      // VPMADD52LUQ m256/m64bcst, ymm, ymm{k}{z}
 73678      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 73679          self.require(ISA_AVX512IFMA | ISA_AVX512VL)
 73680          p.domain = DomainAVX
 73681          p.add(0, func(m *_Encoding, v []interface{}) {
 73682              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 73683              m.emit(0xb4)
 73684              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 73685          })
 73686      }
 73687      // VPMADD52LUQ ymm, ymm, ymm{k}{z}
 73688      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 73689          self.require(ISA_AVX512IFMA | ISA_AVX512VL)
 73690          p.domain = DomainAVX
 73691          p.add(0, func(m *_Encoding, v []interface{}) {
 73692              m.emit(0x62)
 73693              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73694              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 73695              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 73696              m.emit(0xb4)
 73697              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73698          })
 73699      }
 73700      // VPMADD52LUQ m512/m64bcst, zmm, zmm{k}{z}
 73701      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 73702          self.require(ISA_AVX512IFMA)
 73703          p.domain = DomainAVX
 73704          p.add(0, func(m *_Encoding, v []interface{}) {
 73705              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 73706              m.emit(0xb4)
 73707              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 73708          })
 73709      }
 73710      // VPMADD52LUQ zmm, zmm, zmm{k}{z}
 73711      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 73712          self.require(ISA_AVX512IFMA)
 73713          p.domain = DomainAVX
 73714          p.add(0, func(m *_Encoding, v []interface{}) {
 73715              m.emit(0x62)
 73716              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73717              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 73718              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 73719              m.emit(0xb4)
 73720              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73721          })
 73722      }
 73723      if p.len == 0 {
 73724          panic("invalid operands for VPMADD52LUQ")
 73725      }
 73726      return p
 73727  }
 73728  
 73729  // VPMADDUBSW performs "Multiply and Add Packed Signed and Unsigned Byte Integers".
 73730  //
 73731  // Mnemonic        : VPMADDUBSW
 73732  // Supported forms : (10 forms)
 73733  //
 73734  //    * VPMADDUBSW xmm, xmm, xmm           [AVX]
 73735  //    * VPMADDUBSW m128, xmm, xmm          [AVX]
 73736  //    * VPMADDUBSW ymm, ymm, ymm           [AVX2]
 73737  //    * VPMADDUBSW m256, ymm, ymm          [AVX2]
 73738  //    * VPMADDUBSW zmm, zmm, zmm{k}{z}     [AVX512BW]
 73739  //    * VPMADDUBSW m512, zmm, zmm{k}{z}    [AVX512BW]
 73740  //    * VPMADDUBSW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 73741  //    * VPMADDUBSW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 73742  //    * VPMADDUBSW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 73743  //    * VPMADDUBSW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 73744  //
 73745  func (self *Program) VPMADDUBSW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 73746      p := self.alloc("VPMADDUBSW", 3, Operands { v0, v1, v2 })
 73747      // VPMADDUBSW xmm, xmm, xmm
 73748      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 73749          self.require(ISA_AVX)
 73750          p.domain = DomainAVX
 73751          p.add(0, func(m *_Encoding, v []interface{}) {
 73752              m.emit(0xc4)
 73753              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 73754              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 73755              m.emit(0x04)
 73756              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73757          })
 73758      }
 73759      // VPMADDUBSW m128, xmm, xmm
 73760      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 73761          self.require(ISA_AVX)
 73762          p.domain = DomainAVX
 73763          p.add(0, func(m *_Encoding, v []interface{}) {
 73764              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 73765              m.emit(0x04)
 73766              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 73767          })
 73768      }
 73769      // VPMADDUBSW ymm, ymm, ymm
 73770      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 73771          self.require(ISA_AVX2)
 73772          p.domain = DomainAVX
 73773          p.add(0, func(m *_Encoding, v []interface{}) {
 73774              m.emit(0xc4)
 73775              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 73776              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 73777              m.emit(0x04)
 73778              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73779          })
 73780      }
 73781      // VPMADDUBSW m256, ymm, ymm
 73782      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 73783          self.require(ISA_AVX2)
 73784          p.domain = DomainAVX
 73785          p.add(0, func(m *_Encoding, v []interface{}) {
 73786              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 73787              m.emit(0x04)
 73788              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 73789          })
 73790      }
 73791      // VPMADDUBSW zmm, zmm, zmm{k}{z}
 73792      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 73793          self.require(ISA_AVX512BW)
 73794          p.domain = DomainAVX
 73795          p.add(0, func(m *_Encoding, v []interface{}) {
 73796              m.emit(0x62)
 73797              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73798              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 73799              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 73800              m.emit(0x04)
 73801              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73802          })
 73803      }
 73804      // VPMADDUBSW m512, zmm, zmm{k}{z}
 73805      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 73806          self.require(ISA_AVX512BW)
 73807          p.domain = DomainAVX
 73808          p.add(0, func(m *_Encoding, v []interface{}) {
 73809              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 73810              m.emit(0x04)
 73811              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 73812          })
 73813      }
 73814      // VPMADDUBSW xmm, xmm, xmm{k}{z}
 73815      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 73816          self.require(ISA_AVX512VL | ISA_AVX512BW)
 73817          p.domain = DomainAVX
 73818          p.add(0, func(m *_Encoding, v []interface{}) {
 73819              m.emit(0x62)
 73820              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73821              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 73822              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 73823              m.emit(0x04)
 73824              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73825          })
 73826      }
 73827      // VPMADDUBSW m128, xmm, xmm{k}{z}
 73828      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 73829          self.require(ISA_AVX512VL | ISA_AVX512BW)
 73830          p.domain = DomainAVX
 73831          p.add(0, func(m *_Encoding, v []interface{}) {
 73832              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 73833              m.emit(0x04)
 73834              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 73835          })
 73836      }
 73837      // VPMADDUBSW ymm, ymm, ymm{k}{z}
 73838      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 73839          self.require(ISA_AVX512VL | ISA_AVX512BW)
 73840          p.domain = DomainAVX
 73841          p.add(0, func(m *_Encoding, v []interface{}) {
 73842              m.emit(0x62)
 73843              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73844              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 73845              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 73846              m.emit(0x04)
 73847              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73848          })
 73849      }
 73850      // VPMADDUBSW m256, ymm, ymm{k}{z}
 73851      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 73852          self.require(ISA_AVX512VL | ISA_AVX512BW)
 73853          p.domain = DomainAVX
 73854          p.add(0, func(m *_Encoding, v []interface{}) {
 73855              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 73856              m.emit(0x04)
 73857              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 73858          })
 73859      }
 73860      if p.len == 0 {
 73861          panic("invalid operands for VPMADDUBSW")
 73862      }
 73863      return p
 73864  }
 73865  
 73866  // VPMADDWD performs "Multiply and Add Packed Signed Word Integers".
 73867  //
 73868  // Mnemonic        : VPMADDWD
 73869  // Supported forms : (10 forms)
 73870  //
 73871  //    * VPMADDWD xmm, xmm, xmm           [AVX]
 73872  //    * VPMADDWD m128, xmm, xmm          [AVX]
 73873  //    * VPMADDWD ymm, ymm, ymm           [AVX2]
 73874  //    * VPMADDWD m256, ymm, ymm          [AVX2]
 73875  //    * VPMADDWD zmm, zmm, zmm{k}{z}     [AVX512BW]
 73876  //    * VPMADDWD m512, zmm, zmm{k}{z}    [AVX512BW]
 73877  //    * VPMADDWD xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 73878  //    * VPMADDWD m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 73879  //    * VPMADDWD ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 73880  //    * VPMADDWD m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 73881  //
 73882  func (self *Program) VPMADDWD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 73883      p := self.alloc("VPMADDWD", 3, Operands { v0, v1, v2 })
 73884      // VPMADDWD xmm, xmm, xmm
 73885      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 73886          self.require(ISA_AVX)
 73887          p.domain = DomainAVX
 73888          p.add(0, func(m *_Encoding, v []interface{}) {
 73889              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 73890              m.emit(0xf5)
 73891              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73892          })
 73893      }
 73894      // VPMADDWD m128, xmm, xmm
 73895      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 73896          self.require(ISA_AVX)
 73897          p.domain = DomainAVX
 73898          p.add(0, func(m *_Encoding, v []interface{}) {
 73899              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 73900              m.emit(0xf5)
 73901              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 73902          })
 73903      }
 73904      // VPMADDWD ymm, ymm, ymm
 73905      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 73906          self.require(ISA_AVX2)
 73907          p.domain = DomainAVX
 73908          p.add(0, func(m *_Encoding, v []interface{}) {
 73909              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 73910              m.emit(0xf5)
 73911              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73912          })
 73913      }
 73914      // VPMADDWD m256, ymm, ymm
 73915      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 73916          self.require(ISA_AVX2)
 73917          p.domain = DomainAVX
 73918          p.add(0, func(m *_Encoding, v []interface{}) {
 73919              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 73920              m.emit(0xf5)
 73921              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 73922          })
 73923      }
 73924      // VPMADDWD zmm, zmm, zmm{k}{z}
 73925      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 73926          self.require(ISA_AVX512BW)
 73927          p.domain = DomainAVX
 73928          p.add(0, func(m *_Encoding, v []interface{}) {
 73929              m.emit(0x62)
 73930              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73931              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 73932              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 73933              m.emit(0xf5)
 73934              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73935          })
 73936      }
 73937      // VPMADDWD m512, zmm, zmm{k}{z}
 73938      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 73939          self.require(ISA_AVX512BW)
 73940          p.domain = DomainAVX
 73941          p.add(0, func(m *_Encoding, v []interface{}) {
 73942              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 73943              m.emit(0xf5)
 73944              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 73945          })
 73946      }
 73947      // VPMADDWD xmm, xmm, xmm{k}{z}
 73948      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 73949          self.require(ISA_AVX512VL | ISA_AVX512BW)
 73950          p.domain = DomainAVX
 73951          p.add(0, func(m *_Encoding, v []interface{}) {
 73952              m.emit(0x62)
 73953              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73954              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 73955              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 73956              m.emit(0xf5)
 73957              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73958          })
 73959      }
 73960      // VPMADDWD m128, xmm, xmm{k}{z}
 73961      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 73962          self.require(ISA_AVX512VL | ISA_AVX512BW)
 73963          p.domain = DomainAVX
 73964          p.add(0, func(m *_Encoding, v []interface{}) {
 73965              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 73966              m.emit(0xf5)
 73967              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 73968          })
 73969      }
 73970      // VPMADDWD ymm, ymm, ymm{k}{z}
 73971      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 73972          self.require(ISA_AVX512VL | ISA_AVX512BW)
 73973          p.domain = DomainAVX
 73974          p.add(0, func(m *_Encoding, v []interface{}) {
 73975              m.emit(0x62)
 73976              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 73977              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 73978              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 73979              m.emit(0xf5)
 73980              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 73981          })
 73982      }
 73983      // VPMADDWD m256, ymm, ymm{k}{z}
 73984      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 73985          self.require(ISA_AVX512VL | ISA_AVX512BW)
 73986          p.domain = DomainAVX
 73987          p.add(0, func(m *_Encoding, v []interface{}) {
 73988              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 73989              m.emit(0xf5)
 73990              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 73991          })
 73992      }
 73993      if p.len == 0 {
 73994          panic("invalid operands for VPMADDWD")
 73995      }
 73996      return p
 73997  }
 73998  
 73999  // VPMASKMOVD performs "Conditional Move Packed Doubleword Integers".
 74000  //
 74001  // Mnemonic        : VPMASKMOVD
 74002  // Supported forms : (4 forms)
 74003  //
 74004  //    * VPMASKMOVD m128, xmm, xmm    [AVX2]
 74005  //    * VPMASKMOVD m256, ymm, ymm    [AVX2]
 74006  //    * VPMASKMOVD xmm, xmm, m128    [AVX2]
 74007  //    * VPMASKMOVD ymm, ymm, m256    [AVX2]
 74008  //
 74009  func (self *Program) VPMASKMOVD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 74010      p := self.alloc("VPMASKMOVD", 3, Operands { v0, v1, v2 })
 74011      // VPMASKMOVD m128, xmm, xmm
 74012      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 74013          self.require(ISA_AVX2)
 74014          p.domain = DomainAVX
 74015          p.add(0, func(m *_Encoding, v []interface{}) {
 74016              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74017              m.emit(0x8c)
 74018              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74019          })
 74020      }
 74021      // VPMASKMOVD m256, ymm, ymm
 74022      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 74023          self.require(ISA_AVX2)
 74024          p.domain = DomainAVX
 74025          p.add(0, func(m *_Encoding, v []interface{}) {
 74026              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74027              m.emit(0x8c)
 74028              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74029          })
 74030      }
 74031      // VPMASKMOVD xmm, xmm, m128
 74032      if isXMM(v0) && isXMM(v1) && isM128(v2) {
 74033          self.require(ISA_AVX2)
 74034          p.domain = DomainAVX
 74035          p.add(0, func(m *_Encoding, v []interface{}) {
 74036              m.vex3(0xc4, 0b10, 0x01, hcode(v[0]), addr(v[2]), hlcode(v[1]))
 74037              m.emit(0x8e)
 74038              m.mrsd(lcode(v[0]), addr(v[2]), 1)
 74039          })
 74040      }
 74041      // VPMASKMOVD ymm, ymm, m256
 74042      if isYMM(v0) && isYMM(v1) && isM256(v2) {
 74043          self.require(ISA_AVX2)
 74044          p.domain = DomainAVX
 74045          p.add(0, func(m *_Encoding, v []interface{}) {
 74046              m.vex3(0xc4, 0b10, 0x05, hcode(v[0]), addr(v[2]), hlcode(v[1]))
 74047              m.emit(0x8e)
 74048              m.mrsd(lcode(v[0]), addr(v[2]), 1)
 74049          })
 74050      }
 74051      if p.len == 0 {
 74052          panic("invalid operands for VPMASKMOVD")
 74053      }
 74054      return p
 74055  }
 74056  
 74057  // VPMASKMOVQ performs "Conditional Move Packed Quadword Integers".
 74058  //
 74059  // Mnemonic        : VPMASKMOVQ
 74060  // Supported forms : (4 forms)
 74061  //
 74062  //    * VPMASKMOVQ m128, xmm, xmm    [AVX2]
 74063  //    * VPMASKMOVQ m256, ymm, ymm    [AVX2]
 74064  //    * VPMASKMOVQ xmm, xmm, m128    [AVX2]
 74065  //    * VPMASKMOVQ ymm, ymm, m256    [AVX2]
 74066  //
 74067  func (self *Program) VPMASKMOVQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 74068      p := self.alloc("VPMASKMOVQ", 3, Operands { v0, v1, v2 })
 74069      // VPMASKMOVQ m128, xmm, xmm
 74070      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 74071          self.require(ISA_AVX2)
 74072          p.domain = DomainAVX
 74073          p.add(0, func(m *_Encoding, v []interface{}) {
 74074              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74075              m.emit(0x8c)
 74076              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74077          })
 74078      }
 74079      // VPMASKMOVQ m256, ymm, ymm
 74080      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 74081          self.require(ISA_AVX2)
 74082          p.domain = DomainAVX
 74083          p.add(0, func(m *_Encoding, v []interface{}) {
 74084              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74085              m.emit(0x8c)
 74086              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74087          })
 74088      }
 74089      // VPMASKMOVQ xmm, xmm, m128
 74090      if isXMM(v0) && isXMM(v1) && isM128(v2) {
 74091          self.require(ISA_AVX2)
 74092          p.domain = DomainAVX
 74093          p.add(0, func(m *_Encoding, v []interface{}) {
 74094              m.vex3(0xc4, 0b10, 0x81, hcode(v[0]), addr(v[2]), hlcode(v[1]))
 74095              m.emit(0x8e)
 74096              m.mrsd(lcode(v[0]), addr(v[2]), 1)
 74097          })
 74098      }
 74099      // VPMASKMOVQ ymm, ymm, m256
 74100      if isYMM(v0) && isYMM(v1) && isM256(v2) {
 74101          self.require(ISA_AVX2)
 74102          p.domain = DomainAVX
 74103          p.add(0, func(m *_Encoding, v []interface{}) {
 74104              m.vex3(0xc4, 0b10, 0x85, hcode(v[0]), addr(v[2]), hlcode(v[1]))
 74105              m.emit(0x8e)
 74106              m.mrsd(lcode(v[0]), addr(v[2]), 1)
 74107          })
 74108      }
 74109      if p.len == 0 {
 74110          panic("invalid operands for VPMASKMOVQ")
 74111      }
 74112      return p
 74113  }
 74114  
 74115  // VPMAXSB performs "Maximum of Packed Signed Byte Integers".
 74116  //
 74117  // Mnemonic        : VPMAXSB
 74118  // Supported forms : (10 forms)
 74119  //
 74120  //    * VPMAXSB xmm, xmm, xmm           [AVX]
 74121  //    * VPMAXSB m128, xmm, xmm          [AVX]
 74122  //    * VPMAXSB ymm, ymm, ymm           [AVX2]
 74123  //    * VPMAXSB m256, ymm, ymm          [AVX2]
 74124  //    * VPMAXSB zmm, zmm, zmm{k}{z}     [AVX512BW]
 74125  //    * VPMAXSB m512, zmm, zmm{k}{z}    [AVX512BW]
 74126  //    * VPMAXSB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 74127  //    * VPMAXSB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 74128  //    * VPMAXSB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 74129  //    * VPMAXSB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 74130  //
 74131  func (self *Program) VPMAXSB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 74132      p := self.alloc("VPMAXSB", 3, Operands { v0, v1, v2 })
 74133      // VPMAXSB xmm, xmm, xmm
 74134      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 74135          self.require(ISA_AVX)
 74136          p.domain = DomainAVX
 74137          p.add(0, func(m *_Encoding, v []interface{}) {
 74138              m.emit(0xc4)
 74139              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 74140              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 74141              m.emit(0x3c)
 74142              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74143          })
 74144      }
 74145      // VPMAXSB m128, xmm, xmm
 74146      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 74147          self.require(ISA_AVX)
 74148          p.domain = DomainAVX
 74149          p.add(0, func(m *_Encoding, v []interface{}) {
 74150              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74151              m.emit(0x3c)
 74152              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74153          })
 74154      }
 74155      // VPMAXSB ymm, ymm, ymm
 74156      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 74157          self.require(ISA_AVX2)
 74158          p.domain = DomainAVX
 74159          p.add(0, func(m *_Encoding, v []interface{}) {
 74160              m.emit(0xc4)
 74161              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 74162              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74163              m.emit(0x3c)
 74164              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74165          })
 74166      }
 74167      // VPMAXSB m256, ymm, ymm
 74168      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 74169          self.require(ISA_AVX2)
 74170          p.domain = DomainAVX
 74171          p.add(0, func(m *_Encoding, v []interface{}) {
 74172              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74173              m.emit(0x3c)
 74174              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74175          })
 74176      }
 74177      // VPMAXSB zmm, zmm, zmm{k}{z}
 74178      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 74179          self.require(ISA_AVX512BW)
 74180          p.domain = DomainAVX
 74181          p.add(0, func(m *_Encoding, v []interface{}) {
 74182              m.emit(0x62)
 74183              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74184              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74185              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 74186              m.emit(0x3c)
 74187              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74188          })
 74189      }
 74190      // VPMAXSB m512, zmm, zmm{k}{z}
 74191      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 74192          self.require(ISA_AVX512BW)
 74193          p.domain = DomainAVX
 74194          p.add(0, func(m *_Encoding, v []interface{}) {
 74195              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 74196              m.emit(0x3c)
 74197              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 74198          })
 74199      }
 74200      // VPMAXSB xmm, xmm, xmm{k}{z}
 74201      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74202          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74203          p.domain = DomainAVX
 74204          p.add(0, func(m *_Encoding, v []interface{}) {
 74205              m.emit(0x62)
 74206              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74207              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74208              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 74209              m.emit(0x3c)
 74210              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74211          })
 74212      }
 74213      // VPMAXSB m128, xmm, xmm{k}{z}
 74214      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74215          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74216          p.domain = DomainAVX
 74217          p.add(0, func(m *_Encoding, v []interface{}) {
 74218              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 74219              m.emit(0x3c)
 74220              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 74221          })
 74222      }
 74223      // VPMAXSB ymm, ymm, ymm{k}{z}
 74224      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74225          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74226          p.domain = DomainAVX
 74227          p.add(0, func(m *_Encoding, v []interface{}) {
 74228              m.emit(0x62)
 74229              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74230              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74231              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 74232              m.emit(0x3c)
 74233              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74234          })
 74235      }
 74236      // VPMAXSB m256, ymm, ymm{k}{z}
 74237      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74238          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74239          p.domain = DomainAVX
 74240          p.add(0, func(m *_Encoding, v []interface{}) {
 74241              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 74242              m.emit(0x3c)
 74243              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 74244          })
 74245      }
 74246      if p.len == 0 {
 74247          panic("invalid operands for VPMAXSB")
 74248      }
 74249      return p
 74250  }
 74251  
 74252  // VPMAXSD performs "Maximum of Packed Signed Doubleword Integers".
 74253  //
 74254  // Mnemonic        : VPMAXSD
 74255  // Supported forms : (10 forms)
 74256  //
 74257  //    * VPMAXSD xmm, xmm, xmm                   [AVX]
 74258  //    * VPMAXSD m128, xmm, xmm                  [AVX]
 74259  //    * VPMAXSD ymm, ymm, ymm                   [AVX2]
 74260  //    * VPMAXSD m256, ymm, ymm                  [AVX2]
 74261  //    * VPMAXSD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 74262  //    * VPMAXSD zmm, zmm, zmm{k}{z}             [AVX512F]
 74263  //    * VPMAXSD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 74264  //    * VPMAXSD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 74265  //    * VPMAXSD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 74266  //    * VPMAXSD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 74267  //
 74268  func (self *Program) VPMAXSD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 74269      p := self.alloc("VPMAXSD", 3, Operands { v0, v1, v2 })
 74270      // VPMAXSD xmm, xmm, xmm
 74271      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 74272          self.require(ISA_AVX)
 74273          p.domain = DomainAVX
 74274          p.add(0, func(m *_Encoding, v []interface{}) {
 74275              m.emit(0xc4)
 74276              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 74277              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 74278              m.emit(0x3d)
 74279              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74280          })
 74281      }
 74282      // VPMAXSD m128, xmm, xmm
 74283      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 74284          self.require(ISA_AVX)
 74285          p.domain = DomainAVX
 74286          p.add(0, func(m *_Encoding, v []interface{}) {
 74287              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74288              m.emit(0x3d)
 74289              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74290          })
 74291      }
 74292      // VPMAXSD ymm, ymm, ymm
 74293      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 74294          self.require(ISA_AVX2)
 74295          p.domain = DomainAVX
 74296          p.add(0, func(m *_Encoding, v []interface{}) {
 74297              m.emit(0xc4)
 74298              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 74299              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74300              m.emit(0x3d)
 74301              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74302          })
 74303      }
 74304      // VPMAXSD m256, ymm, ymm
 74305      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 74306          self.require(ISA_AVX2)
 74307          p.domain = DomainAVX
 74308          p.add(0, func(m *_Encoding, v []interface{}) {
 74309              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74310              m.emit(0x3d)
 74311              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74312          })
 74313      }
 74314      // VPMAXSD m512/m32bcst, zmm, zmm{k}{z}
 74315      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 74316          self.require(ISA_AVX512F)
 74317          p.domain = DomainAVX
 74318          p.add(0, func(m *_Encoding, v []interface{}) {
 74319              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74320              m.emit(0x3d)
 74321              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 74322          })
 74323      }
 74324      // VPMAXSD zmm, zmm, zmm{k}{z}
 74325      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 74326          self.require(ISA_AVX512F)
 74327          p.domain = DomainAVX
 74328          p.add(0, func(m *_Encoding, v []interface{}) {
 74329              m.emit(0x62)
 74330              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74331              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74332              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 74333              m.emit(0x3d)
 74334              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74335          })
 74336      }
 74337      // VPMAXSD m128/m32bcst, xmm, xmm{k}{z}
 74338      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74339          self.require(ISA_AVX512VL | ISA_AVX512F)
 74340          p.domain = DomainAVX
 74341          p.add(0, func(m *_Encoding, v []interface{}) {
 74342              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74343              m.emit(0x3d)
 74344              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 74345          })
 74346      }
 74347      // VPMAXSD xmm, xmm, xmm{k}{z}
 74348      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74349          self.require(ISA_AVX512VL | ISA_AVX512F)
 74350          p.domain = DomainAVX
 74351          p.add(0, func(m *_Encoding, v []interface{}) {
 74352              m.emit(0x62)
 74353              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74354              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74355              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 74356              m.emit(0x3d)
 74357              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74358          })
 74359      }
 74360      // VPMAXSD m256/m32bcst, ymm, ymm{k}{z}
 74361      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74362          self.require(ISA_AVX512VL | ISA_AVX512F)
 74363          p.domain = DomainAVX
 74364          p.add(0, func(m *_Encoding, v []interface{}) {
 74365              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74366              m.emit(0x3d)
 74367              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 74368          })
 74369      }
 74370      // VPMAXSD ymm, ymm, ymm{k}{z}
 74371      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74372          self.require(ISA_AVX512VL | ISA_AVX512F)
 74373          p.domain = DomainAVX
 74374          p.add(0, func(m *_Encoding, v []interface{}) {
 74375              m.emit(0x62)
 74376              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74377              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74378              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 74379              m.emit(0x3d)
 74380              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74381          })
 74382      }
 74383      if p.len == 0 {
 74384          panic("invalid operands for VPMAXSD")
 74385      }
 74386      return p
 74387  }
 74388  
 74389  // VPMAXSQ performs "Maximum of Packed Signed Quadword Integers".
 74390  //
 74391  // Mnemonic        : VPMAXSQ
 74392  // Supported forms : (6 forms)
 74393  //
 74394  //    * VPMAXSQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 74395  //    * VPMAXSQ zmm, zmm, zmm{k}{z}             [AVX512F]
 74396  //    * VPMAXSQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 74397  //    * VPMAXSQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 74398  //    * VPMAXSQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 74399  //    * VPMAXSQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 74400  //
 74401  func (self *Program) VPMAXSQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 74402      p := self.alloc("VPMAXSQ", 3, Operands { v0, v1, v2 })
 74403      // VPMAXSQ m512/m64bcst, zmm, zmm{k}{z}
 74404      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 74405          self.require(ISA_AVX512F)
 74406          p.domain = DomainAVX
 74407          p.add(0, func(m *_Encoding, v []interface{}) {
 74408              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74409              m.emit(0x3d)
 74410              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 74411          })
 74412      }
 74413      // VPMAXSQ zmm, zmm, zmm{k}{z}
 74414      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 74415          self.require(ISA_AVX512F)
 74416          p.domain = DomainAVX
 74417          p.add(0, func(m *_Encoding, v []interface{}) {
 74418              m.emit(0x62)
 74419              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74420              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 74421              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 74422              m.emit(0x3d)
 74423              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74424          })
 74425      }
 74426      // VPMAXSQ m128/m64bcst, xmm, xmm{k}{z}
 74427      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74428          self.require(ISA_AVX512VL | ISA_AVX512F)
 74429          p.domain = DomainAVX
 74430          p.add(0, func(m *_Encoding, v []interface{}) {
 74431              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74432              m.emit(0x3d)
 74433              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 74434          })
 74435      }
 74436      // VPMAXSQ xmm, xmm, xmm{k}{z}
 74437      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74438          self.require(ISA_AVX512VL | ISA_AVX512F)
 74439          p.domain = DomainAVX
 74440          p.add(0, func(m *_Encoding, v []interface{}) {
 74441              m.emit(0x62)
 74442              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74443              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 74444              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 74445              m.emit(0x3d)
 74446              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74447          })
 74448      }
 74449      // VPMAXSQ m256/m64bcst, ymm, ymm{k}{z}
 74450      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74451          self.require(ISA_AVX512VL | ISA_AVX512F)
 74452          p.domain = DomainAVX
 74453          p.add(0, func(m *_Encoding, v []interface{}) {
 74454              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74455              m.emit(0x3d)
 74456              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 74457          })
 74458      }
 74459      // VPMAXSQ ymm, ymm, ymm{k}{z}
 74460      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74461          self.require(ISA_AVX512VL | ISA_AVX512F)
 74462          p.domain = DomainAVX
 74463          p.add(0, func(m *_Encoding, v []interface{}) {
 74464              m.emit(0x62)
 74465              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74466              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 74467              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 74468              m.emit(0x3d)
 74469              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74470          })
 74471      }
 74472      if p.len == 0 {
 74473          panic("invalid operands for VPMAXSQ")
 74474      }
 74475      return p
 74476  }
 74477  
 74478  // VPMAXSW performs "Maximum of Packed Signed Word Integers".
 74479  //
 74480  // Mnemonic        : VPMAXSW
 74481  // Supported forms : (10 forms)
 74482  //
 74483  //    * VPMAXSW xmm, xmm, xmm           [AVX]
 74484  //    * VPMAXSW m128, xmm, xmm          [AVX]
 74485  //    * VPMAXSW ymm, ymm, ymm           [AVX2]
 74486  //    * VPMAXSW m256, ymm, ymm          [AVX2]
 74487  //    * VPMAXSW zmm, zmm, zmm{k}{z}     [AVX512BW]
 74488  //    * VPMAXSW m512, zmm, zmm{k}{z}    [AVX512BW]
 74489  //    * VPMAXSW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 74490  //    * VPMAXSW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 74491  //    * VPMAXSW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 74492  //    * VPMAXSW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 74493  //
 74494  func (self *Program) VPMAXSW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 74495      p := self.alloc("VPMAXSW", 3, Operands { v0, v1, v2 })
 74496      // VPMAXSW xmm, xmm, xmm
 74497      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 74498          self.require(ISA_AVX)
 74499          p.domain = DomainAVX
 74500          p.add(0, func(m *_Encoding, v []interface{}) {
 74501              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 74502              m.emit(0xee)
 74503              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74504          })
 74505      }
 74506      // VPMAXSW m128, xmm, xmm
 74507      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 74508          self.require(ISA_AVX)
 74509          p.domain = DomainAVX
 74510          p.add(0, func(m *_Encoding, v []interface{}) {
 74511              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74512              m.emit(0xee)
 74513              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74514          })
 74515      }
 74516      // VPMAXSW ymm, ymm, ymm
 74517      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 74518          self.require(ISA_AVX2)
 74519          p.domain = DomainAVX
 74520          p.add(0, func(m *_Encoding, v []interface{}) {
 74521              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 74522              m.emit(0xee)
 74523              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74524          })
 74525      }
 74526      // VPMAXSW m256, ymm, ymm
 74527      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 74528          self.require(ISA_AVX2)
 74529          p.domain = DomainAVX
 74530          p.add(0, func(m *_Encoding, v []interface{}) {
 74531              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74532              m.emit(0xee)
 74533              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74534          })
 74535      }
 74536      // VPMAXSW zmm, zmm, zmm{k}{z}
 74537      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 74538          self.require(ISA_AVX512BW)
 74539          p.domain = DomainAVX
 74540          p.add(0, func(m *_Encoding, v []interface{}) {
 74541              m.emit(0x62)
 74542              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74543              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74544              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 74545              m.emit(0xee)
 74546              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74547          })
 74548      }
 74549      // VPMAXSW m512, zmm, zmm{k}{z}
 74550      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 74551          self.require(ISA_AVX512BW)
 74552          p.domain = DomainAVX
 74553          p.add(0, func(m *_Encoding, v []interface{}) {
 74554              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 74555              m.emit(0xee)
 74556              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 74557          })
 74558      }
 74559      // VPMAXSW xmm, xmm, xmm{k}{z}
 74560      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74561          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74562          p.domain = DomainAVX
 74563          p.add(0, func(m *_Encoding, v []interface{}) {
 74564              m.emit(0x62)
 74565              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74566              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74567              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 74568              m.emit(0xee)
 74569              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74570          })
 74571      }
 74572      // VPMAXSW m128, xmm, xmm{k}{z}
 74573      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74574          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74575          p.domain = DomainAVX
 74576          p.add(0, func(m *_Encoding, v []interface{}) {
 74577              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 74578              m.emit(0xee)
 74579              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 74580          })
 74581      }
 74582      // VPMAXSW ymm, ymm, ymm{k}{z}
 74583      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74584          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74585          p.domain = DomainAVX
 74586          p.add(0, func(m *_Encoding, v []interface{}) {
 74587              m.emit(0x62)
 74588              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74589              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74590              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 74591              m.emit(0xee)
 74592              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74593          })
 74594      }
 74595      // VPMAXSW m256, ymm, ymm{k}{z}
 74596      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74597          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74598          p.domain = DomainAVX
 74599          p.add(0, func(m *_Encoding, v []interface{}) {
 74600              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 74601              m.emit(0xee)
 74602              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 74603          })
 74604      }
 74605      if p.len == 0 {
 74606          panic("invalid operands for VPMAXSW")
 74607      }
 74608      return p
 74609  }
 74610  
 74611  // VPMAXUB performs "Maximum of Packed Unsigned Byte Integers".
 74612  //
 74613  // Mnemonic        : VPMAXUB
 74614  // Supported forms : (10 forms)
 74615  //
 74616  //    * VPMAXUB xmm, xmm, xmm           [AVX]
 74617  //    * VPMAXUB m128, xmm, xmm          [AVX]
 74618  //    * VPMAXUB ymm, ymm, ymm           [AVX2]
 74619  //    * VPMAXUB m256, ymm, ymm          [AVX2]
 74620  //    * VPMAXUB zmm, zmm, zmm{k}{z}     [AVX512BW]
 74621  //    * VPMAXUB m512, zmm, zmm{k}{z}    [AVX512BW]
 74622  //    * VPMAXUB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 74623  //    * VPMAXUB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 74624  //    * VPMAXUB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 74625  //    * VPMAXUB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 74626  //
 74627  func (self *Program) VPMAXUB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 74628      p := self.alloc("VPMAXUB", 3, Operands { v0, v1, v2 })
 74629      // VPMAXUB xmm, xmm, xmm
 74630      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 74631          self.require(ISA_AVX)
 74632          p.domain = DomainAVX
 74633          p.add(0, func(m *_Encoding, v []interface{}) {
 74634              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 74635              m.emit(0xde)
 74636              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74637          })
 74638      }
 74639      // VPMAXUB m128, xmm, xmm
 74640      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 74641          self.require(ISA_AVX)
 74642          p.domain = DomainAVX
 74643          p.add(0, func(m *_Encoding, v []interface{}) {
 74644              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74645              m.emit(0xde)
 74646              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74647          })
 74648      }
 74649      // VPMAXUB ymm, ymm, ymm
 74650      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 74651          self.require(ISA_AVX2)
 74652          p.domain = DomainAVX
 74653          p.add(0, func(m *_Encoding, v []interface{}) {
 74654              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 74655              m.emit(0xde)
 74656              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74657          })
 74658      }
 74659      // VPMAXUB m256, ymm, ymm
 74660      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 74661          self.require(ISA_AVX2)
 74662          p.domain = DomainAVX
 74663          p.add(0, func(m *_Encoding, v []interface{}) {
 74664              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74665              m.emit(0xde)
 74666              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74667          })
 74668      }
 74669      // VPMAXUB zmm, zmm, zmm{k}{z}
 74670      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 74671          self.require(ISA_AVX512BW)
 74672          p.domain = DomainAVX
 74673          p.add(0, func(m *_Encoding, v []interface{}) {
 74674              m.emit(0x62)
 74675              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74676              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74677              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 74678              m.emit(0xde)
 74679              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74680          })
 74681      }
 74682      // VPMAXUB m512, zmm, zmm{k}{z}
 74683      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 74684          self.require(ISA_AVX512BW)
 74685          p.domain = DomainAVX
 74686          p.add(0, func(m *_Encoding, v []interface{}) {
 74687              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 74688              m.emit(0xde)
 74689              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 74690          })
 74691      }
 74692      // VPMAXUB xmm, xmm, xmm{k}{z}
 74693      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74694          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74695          p.domain = DomainAVX
 74696          p.add(0, func(m *_Encoding, v []interface{}) {
 74697              m.emit(0x62)
 74698              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74699              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74700              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 74701              m.emit(0xde)
 74702              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74703          })
 74704      }
 74705      // VPMAXUB m128, xmm, xmm{k}{z}
 74706      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74707          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74708          p.domain = DomainAVX
 74709          p.add(0, func(m *_Encoding, v []interface{}) {
 74710              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 74711              m.emit(0xde)
 74712              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 74713          })
 74714      }
 74715      // VPMAXUB ymm, ymm, ymm{k}{z}
 74716      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74717          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74718          p.domain = DomainAVX
 74719          p.add(0, func(m *_Encoding, v []interface{}) {
 74720              m.emit(0x62)
 74721              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74722              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74723              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 74724              m.emit(0xde)
 74725              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74726          })
 74727      }
 74728      // VPMAXUB m256, ymm, ymm{k}{z}
 74729      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74730          self.require(ISA_AVX512VL | ISA_AVX512BW)
 74731          p.domain = DomainAVX
 74732          p.add(0, func(m *_Encoding, v []interface{}) {
 74733              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 74734              m.emit(0xde)
 74735              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 74736          })
 74737      }
 74738      if p.len == 0 {
 74739          panic("invalid operands for VPMAXUB")
 74740      }
 74741      return p
 74742  }
 74743  
 74744  // VPMAXUD performs "Maximum of Packed Unsigned Doubleword Integers".
 74745  //
 74746  // Mnemonic        : VPMAXUD
 74747  // Supported forms : (10 forms)
 74748  //
 74749  //    * VPMAXUD xmm, xmm, xmm                   [AVX]
 74750  //    * VPMAXUD m128, xmm, xmm                  [AVX]
 74751  //    * VPMAXUD ymm, ymm, ymm                   [AVX2]
 74752  //    * VPMAXUD m256, ymm, ymm                  [AVX2]
 74753  //    * VPMAXUD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 74754  //    * VPMAXUD zmm, zmm, zmm{k}{z}             [AVX512F]
 74755  //    * VPMAXUD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 74756  //    * VPMAXUD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 74757  //    * VPMAXUD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 74758  //    * VPMAXUD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 74759  //
 74760  func (self *Program) VPMAXUD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 74761      p := self.alloc("VPMAXUD", 3, Operands { v0, v1, v2 })
 74762      // VPMAXUD xmm, xmm, xmm
 74763      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 74764          self.require(ISA_AVX)
 74765          p.domain = DomainAVX
 74766          p.add(0, func(m *_Encoding, v []interface{}) {
 74767              m.emit(0xc4)
 74768              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 74769              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 74770              m.emit(0x3f)
 74771              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74772          })
 74773      }
 74774      // VPMAXUD m128, xmm, xmm
 74775      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 74776          self.require(ISA_AVX)
 74777          p.domain = DomainAVX
 74778          p.add(0, func(m *_Encoding, v []interface{}) {
 74779              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74780              m.emit(0x3f)
 74781              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74782          })
 74783      }
 74784      // VPMAXUD ymm, ymm, ymm
 74785      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 74786          self.require(ISA_AVX2)
 74787          p.domain = DomainAVX
 74788          p.add(0, func(m *_Encoding, v []interface{}) {
 74789              m.emit(0xc4)
 74790              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 74791              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74792              m.emit(0x3f)
 74793              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74794          })
 74795      }
 74796      // VPMAXUD m256, ymm, ymm
 74797      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 74798          self.require(ISA_AVX2)
 74799          p.domain = DomainAVX
 74800          p.add(0, func(m *_Encoding, v []interface{}) {
 74801              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 74802              m.emit(0x3f)
 74803              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 74804          })
 74805      }
 74806      // VPMAXUD m512/m32bcst, zmm, zmm{k}{z}
 74807      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 74808          self.require(ISA_AVX512F)
 74809          p.domain = DomainAVX
 74810          p.add(0, func(m *_Encoding, v []interface{}) {
 74811              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74812              m.emit(0x3f)
 74813              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 74814          })
 74815      }
 74816      // VPMAXUD zmm, zmm, zmm{k}{z}
 74817      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 74818          self.require(ISA_AVX512F)
 74819          p.domain = DomainAVX
 74820          p.add(0, func(m *_Encoding, v []interface{}) {
 74821              m.emit(0x62)
 74822              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74823              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74824              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 74825              m.emit(0x3f)
 74826              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74827          })
 74828      }
 74829      // VPMAXUD m128/m32bcst, xmm, xmm{k}{z}
 74830      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74831          self.require(ISA_AVX512VL | ISA_AVX512F)
 74832          p.domain = DomainAVX
 74833          p.add(0, func(m *_Encoding, v []interface{}) {
 74834              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74835              m.emit(0x3f)
 74836              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 74837          })
 74838      }
 74839      // VPMAXUD xmm, xmm, xmm{k}{z}
 74840      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74841          self.require(ISA_AVX512VL | ISA_AVX512F)
 74842          p.domain = DomainAVX
 74843          p.add(0, func(m *_Encoding, v []interface{}) {
 74844              m.emit(0x62)
 74845              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74846              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74847              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 74848              m.emit(0x3f)
 74849              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74850          })
 74851      }
 74852      // VPMAXUD m256/m32bcst, ymm, ymm{k}{z}
 74853      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74854          self.require(ISA_AVX512VL | ISA_AVX512F)
 74855          p.domain = DomainAVX
 74856          p.add(0, func(m *_Encoding, v []interface{}) {
 74857              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74858              m.emit(0x3f)
 74859              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 74860          })
 74861      }
 74862      // VPMAXUD ymm, ymm, ymm{k}{z}
 74863      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74864          self.require(ISA_AVX512VL | ISA_AVX512F)
 74865          p.domain = DomainAVX
 74866          p.add(0, func(m *_Encoding, v []interface{}) {
 74867              m.emit(0x62)
 74868              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74869              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 74870              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 74871              m.emit(0x3f)
 74872              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74873          })
 74874      }
 74875      if p.len == 0 {
 74876          panic("invalid operands for VPMAXUD")
 74877      }
 74878      return p
 74879  }
 74880  
 74881  // VPMAXUQ performs "Maximum of Packed Unsigned Quadword Integers".
 74882  //
 74883  // Mnemonic        : VPMAXUQ
 74884  // Supported forms : (6 forms)
 74885  //
 74886  //    * VPMAXUQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 74887  //    * VPMAXUQ zmm, zmm, zmm{k}{z}             [AVX512F]
 74888  //    * VPMAXUQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 74889  //    * VPMAXUQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 74890  //    * VPMAXUQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 74891  //    * VPMAXUQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 74892  //
 74893  func (self *Program) VPMAXUQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 74894      p := self.alloc("VPMAXUQ", 3, Operands { v0, v1, v2 })
 74895      // VPMAXUQ m512/m64bcst, zmm, zmm{k}{z}
 74896      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 74897          self.require(ISA_AVX512F)
 74898          p.domain = DomainAVX
 74899          p.add(0, func(m *_Encoding, v []interface{}) {
 74900              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74901              m.emit(0x3f)
 74902              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 74903          })
 74904      }
 74905      // VPMAXUQ zmm, zmm, zmm{k}{z}
 74906      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 74907          self.require(ISA_AVX512F)
 74908          p.domain = DomainAVX
 74909          p.add(0, func(m *_Encoding, v []interface{}) {
 74910              m.emit(0x62)
 74911              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74912              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 74913              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 74914              m.emit(0x3f)
 74915              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74916          })
 74917      }
 74918      // VPMAXUQ m128/m64bcst, xmm, xmm{k}{z}
 74919      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74920          self.require(ISA_AVX512VL | ISA_AVX512F)
 74921          p.domain = DomainAVX
 74922          p.add(0, func(m *_Encoding, v []interface{}) {
 74923              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74924              m.emit(0x3f)
 74925              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 74926          })
 74927      }
 74928      // VPMAXUQ xmm, xmm, xmm{k}{z}
 74929      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 74930          self.require(ISA_AVX512VL | ISA_AVX512F)
 74931          p.domain = DomainAVX
 74932          p.add(0, func(m *_Encoding, v []interface{}) {
 74933              m.emit(0x62)
 74934              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74935              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 74936              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 74937              m.emit(0x3f)
 74938              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74939          })
 74940      }
 74941      // VPMAXUQ m256/m64bcst, ymm, ymm{k}{z}
 74942      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74943          self.require(ISA_AVX512VL | ISA_AVX512F)
 74944          p.domain = DomainAVX
 74945          p.add(0, func(m *_Encoding, v []interface{}) {
 74946              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 74947              m.emit(0x3f)
 74948              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 74949          })
 74950      }
 74951      // VPMAXUQ ymm, ymm, ymm{k}{z}
 74952      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 74953          self.require(ISA_AVX512VL | ISA_AVX512F)
 74954          p.domain = DomainAVX
 74955          p.add(0, func(m *_Encoding, v []interface{}) {
 74956              m.emit(0x62)
 74957              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 74958              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 74959              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 74960              m.emit(0x3f)
 74961              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74962          })
 74963      }
 74964      if p.len == 0 {
 74965          panic("invalid operands for VPMAXUQ")
 74966      }
 74967      return p
 74968  }
 74969  
 74970  // VPMAXUW performs "Maximum of Packed Unsigned Word Integers".
 74971  //
 74972  // Mnemonic        : VPMAXUW
 74973  // Supported forms : (10 forms)
 74974  //
 74975  //    * VPMAXUW xmm, xmm, xmm           [AVX]
 74976  //    * VPMAXUW m128, xmm, xmm          [AVX]
 74977  //    * VPMAXUW ymm, ymm, ymm           [AVX2]
 74978  //    * VPMAXUW m256, ymm, ymm          [AVX2]
 74979  //    * VPMAXUW zmm, zmm, zmm{k}{z}     [AVX512BW]
 74980  //    * VPMAXUW m512, zmm, zmm{k}{z}    [AVX512BW]
 74981  //    * VPMAXUW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 74982  //    * VPMAXUW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 74983  //    * VPMAXUW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 74984  //    * VPMAXUW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 74985  //
 74986  func (self *Program) VPMAXUW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 74987      p := self.alloc("VPMAXUW", 3, Operands { v0, v1, v2 })
 74988      // VPMAXUW xmm, xmm, xmm
 74989      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 74990          self.require(ISA_AVX)
 74991          p.domain = DomainAVX
 74992          p.add(0, func(m *_Encoding, v []interface{}) {
 74993              m.emit(0xc4)
 74994              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 74995              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 74996              m.emit(0x3e)
 74997              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 74998          })
 74999      }
 75000      // VPMAXUW m128, xmm, xmm
 75001      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 75002          self.require(ISA_AVX)
 75003          p.domain = DomainAVX
 75004          p.add(0, func(m *_Encoding, v []interface{}) {
 75005              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75006              m.emit(0x3e)
 75007              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75008          })
 75009      }
 75010      // VPMAXUW ymm, ymm, ymm
 75011      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 75012          self.require(ISA_AVX2)
 75013          p.domain = DomainAVX
 75014          p.add(0, func(m *_Encoding, v []interface{}) {
 75015              m.emit(0xc4)
 75016              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 75017              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75018              m.emit(0x3e)
 75019              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75020          })
 75021      }
 75022      // VPMAXUW m256, ymm, ymm
 75023      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 75024          self.require(ISA_AVX2)
 75025          p.domain = DomainAVX
 75026          p.add(0, func(m *_Encoding, v []interface{}) {
 75027              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75028              m.emit(0x3e)
 75029              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75030          })
 75031      }
 75032      // VPMAXUW zmm, zmm, zmm{k}{z}
 75033      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 75034          self.require(ISA_AVX512BW)
 75035          p.domain = DomainAVX
 75036          p.add(0, func(m *_Encoding, v []interface{}) {
 75037              m.emit(0x62)
 75038              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75039              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75040              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 75041              m.emit(0x3e)
 75042              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75043          })
 75044      }
 75045      // VPMAXUW m512, zmm, zmm{k}{z}
 75046      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 75047          self.require(ISA_AVX512BW)
 75048          p.domain = DomainAVX
 75049          p.add(0, func(m *_Encoding, v []interface{}) {
 75050              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75051              m.emit(0x3e)
 75052              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 75053          })
 75054      }
 75055      // VPMAXUW xmm, xmm, xmm{k}{z}
 75056      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75057          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75058          p.domain = DomainAVX
 75059          p.add(0, func(m *_Encoding, v []interface{}) {
 75060              m.emit(0x62)
 75061              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75062              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75063              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 75064              m.emit(0x3e)
 75065              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75066          })
 75067      }
 75068      // VPMAXUW m128, xmm, xmm{k}{z}
 75069      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75070          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75071          p.domain = DomainAVX
 75072          p.add(0, func(m *_Encoding, v []interface{}) {
 75073              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75074              m.emit(0x3e)
 75075              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 75076          })
 75077      }
 75078      // VPMAXUW ymm, ymm, ymm{k}{z}
 75079      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75080          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75081          p.domain = DomainAVX
 75082          p.add(0, func(m *_Encoding, v []interface{}) {
 75083              m.emit(0x62)
 75084              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75085              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75086              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 75087              m.emit(0x3e)
 75088              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75089          })
 75090      }
 75091      // VPMAXUW m256, ymm, ymm{k}{z}
 75092      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75093          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75094          p.domain = DomainAVX
 75095          p.add(0, func(m *_Encoding, v []interface{}) {
 75096              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75097              m.emit(0x3e)
 75098              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 75099          })
 75100      }
 75101      if p.len == 0 {
 75102          panic("invalid operands for VPMAXUW")
 75103      }
 75104      return p
 75105  }
 75106  
 75107  // VPMINSB performs "Minimum of Packed Signed Byte Integers".
 75108  //
 75109  // Mnemonic        : VPMINSB
 75110  // Supported forms : (10 forms)
 75111  //
 75112  //    * VPMINSB xmm, xmm, xmm           [AVX]
 75113  //    * VPMINSB m128, xmm, xmm          [AVX]
 75114  //    * VPMINSB ymm, ymm, ymm           [AVX2]
 75115  //    * VPMINSB m256, ymm, ymm          [AVX2]
 75116  //    * VPMINSB zmm, zmm, zmm{k}{z}     [AVX512BW]
 75117  //    * VPMINSB m512, zmm, zmm{k}{z}    [AVX512BW]
 75118  //    * VPMINSB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 75119  //    * VPMINSB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 75120  //    * VPMINSB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 75121  //    * VPMINSB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 75122  //
 75123  func (self *Program) VPMINSB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 75124      p := self.alloc("VPMINSB", 3, Operands { v0, v1, v2 })
 75125      // VPMINSB xmm, xmm, xmm
 75126      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 75127          self.require(ISA_AVX)
 75128          p.domain = DomainAVX
 75129          p.add(0, func(m *_Encoding, v []interface{}) {
 75130              m.emit(0xc4)
 75131              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 75132              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 75133              m.emit(0x38)
 75134              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75135          })
 75136      }
 75137      // VPMINSB m128, xmm, xmm
 75138      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 75139          self.require(ISA_AVX)
 75140          p.domain = DomainAVX
 75141          p.add(0, func(m *_Encoding, v []interface{}) {
 75142              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75143              m.emit(0x38)
 75144              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75145          })
 75146      }
 75147      // VPMINSB ymm, ymm, ymm
 75148      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 75149          self.require(ISA_AVX2)
 75150          p.domain = DomainAVX
 75151          p.add(0, func(m *_Encoding, v []interface{}) {
 75152              m.emit(0xc4)
 75153              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 75154              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75155              m.emit(0x38)
 75156              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75157          })
 75158      }
 75159      // VPMINSB m256, ymm, ymm
 75160      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 75161          self.require(ISA_AVX2)
 75162          p.domain = DomainAVX
 75163          p.add(0, func(m *_Encoding, v []interface{}) {
 75164              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75165              m.emit(0x38)
 75166              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75167          })
 75168      }
 75169      // VPMINSB zmm, zmm, zmm{k}{z}
 75170      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 75171          self.require(ISA_AVX512BW)
 75172          p.domain = DomainAVX
 75173          p.add(0, func(m *_Encoding, v []interface{}) {
 75174              m.emit(0x62)
 75175              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75176              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75177              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 75178              m.emit(0x38)
 75179              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75180          })
 75181      }
 75182      // VPMINSB m512, zmm, zmm{k}{z}
 75183      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 75184          self.require(ISA_AVX512BW)
 75185          p.domain = DomainAVX
 75186          p.add(0, func(m *_Encoding, v []interface{}) {
 75187              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75188              m.emit(0x38)
 75189              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 75190          })
 75191      }
 75192      // VPMINSB xmm, xmm, xmm{k}{z}
 75193      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75194          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75195          p.domain = DomainAVX
 75196          p.add(0, func(m *_Encoding, v []interface{}) {
 75197              m.emit(0x62)
 75198              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75199              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75200              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 75201              m.emit(0x38)
 75202              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75203          })
 75204      }
 75205      // VPMINSB m128, xmm, xmm{k}{z}
 75206      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75207          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75208          p.domain = DomainAVX
 75209          p.add(0, func(m *_Encoding, v []interface{}) {
 75210              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75211              m.emit(0x38)
 75212              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 75213          })
 75214      }
 75215      // VPMINSB ymm, ymm, ymm{k}{z}
 75216      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75217          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75218          p.domain = DomainAVX
 75219          p.add(0, func(m *_Encoding, v []interface{}) {
 75220              m.emit(0x62)
 75221              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75222              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75223              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 75224              m.emit(0x38)
 75225              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75226          })
 75227      }
 75228      // VPMINSB m256, ymm, ymm{k}{z}
 75229      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75230          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75231          p.domain = DomainAVX
 75232          p.add(0, func(m *_Encoding, v []interface{}) {
 75233              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75234              m.emit(0x38)
 75235              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 75236          })
 75237      }
 75238      if p.len == 0 {
 75239          panic("invalid operands for VPMINSB")
 75240      }
 75241      return p
 75242  }
 75243  
 75244  // VPMINSD performs "Minimum of Packed Signed Doubleword Integers".
 75245  //
 75246  // Mnemonic        : VPMINSD
 75247  // Supported forms : (10 forms)
 75248  //
 75249  //    * VPMINSD xmm, xmm, xmm                   [AVX]
 75250  //    * VPMINSD m128, xmm, xmm                  [AVX]
 75251  //    * VPMINSD ymm, ymm, ymm                   [AVX2]
 75252  //    * VPMINSD m256, ymm, ymm                  [AVX2]
 75253  //    * VPMINSD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 75254  //    * VPMINSD zmm, zmm, zmm{k}{z}             [AVX512F]
 75255  //    * VPMINSD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 75256  //    * VPMINSD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 75257  //    * VPMINSD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 75258  //    * VPMINSD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 75259  //
 75260  func (self *Program) VPMINSD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 75261      p := self.alloc("VPMINSD", 3, Operands { v0, v1, v2 })
 75262      // VPMINSD xmm, xmm, xmm
 75263      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 75264          self.require(ISA_AVX)
 75265          p.domain = DomainAVX
 75266          p.add(0, func(m *_Encoding, v []interface{}) {
 75267              m.emit(0xc4)
 75268              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 75269              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 75270              m.emit(0x39)
 75271              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75272          })
 75273      }
 75274      // VPMINSD m128, xmm, xmm
 75275      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 75276          self.require(ISA_AVX)
 75277          p.domain = DomainAVX
 75278          p.add(0, func(m *_Encoding, v []interface{}) {
 75279              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75280              m.emit(0x39)
 75281              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75282          })
 75283      }
 75284      // VPMINSD ymm, ymm, ymm
 75285      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 75286          self.require(ISA_AVX2)
 75287          p.domain = DomainAVX
 75288          p.add(0, func(m *_Encoding, v []interface{}) {
 75289              m.emit(0xc4)
 75290              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 75291              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75292              m.emit(0x39)
 75293              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75294          })
 75295      }
 75296      // VPMINSD m256, ymm, ymm
 75297      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 75298          self.require(ISA_AVX2)
 75299          p.domain = DomainAVX
 75300          p.add(0, func(m *_Encoding, v []interface{}) {
 75301              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75302              m.emit(0x39)
 75303              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75304          })
 75305      }
 75306      // VPMINSD m512/m32bcst, zmm, zmm{k}{z}
 75307      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 75308          self.require(ISA_AVX512F)
 75309          p.domain = DomainAVX
 75310          p.add(0, func(m *_Encoding, v []interface{}) {
 75311              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75312              m.emit(0x39)
 75313              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 75314          })
 75315      }
 75316      // VPMINSD zmm, zmm, zmm{k}{z}
 75317      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 75318          self.require(ISA_AVX512F)
 75319          p.domain = DomainAVX
 75320          p.add(0, func(m *_Encoding, v []interface{}) {
 75321              m.emit(0x62)
 75322              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75323              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75324              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 75325              m.emit(0x39)
 75326              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75327          })
 75328      }
 75329      // VPMINSD m128/m32bcst, xmm, xmm{k}{z}
 75330      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75331          self.require(ISA_AVX512VL | ISA_AVX512F)
 75332          p.domain = DomainAVX
 75333          p.add(0, func(m *_Encoding, v []interface{}) {
 75334              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75335              m.emit(0x39)
 75336              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 75337          })
 75338      }
 75339      // VPMINSD xmm, xmm, xmm{k}{z}
 75340      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75341          self.require(ISA_AVX512VL | ISA_AVX512F)
 75342          p.domain = DomainAVX
 75343          p.add(0, func(m *_Encoding, v []interface{}) {
 75344              m.emit(0x62)
 75345              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75346              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75347              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 75348              m.emit(0x39)
 75349              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75350          })
 75351      }
 75352      // VPMINSD m256/m32bcst, ymm, ymm{k}{z}
 75353      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75354          self.require(ISA_AVX512VL | ISA_AVX512F)
 75355          p.domain = DomainAVX
 75356          p.add(0, func(m *_Encoding, v []interface{}) {
 75357              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75358              m.emit(0x39)
 75359              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 75360          })
 75361      }
 75362      // VPMINSD ymm, ymm, ymm{k}{z}
 75363      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75364          self.require(ISA_AVX512VL | ISA_AVX512F)
 75365          p.domain = DomainAVX
 75366          p.add(0, func(m *_Encoding, v []interface{}) {
 75367              m.emit(0x62)
 75368              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75369              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75370              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 75371              m.emit(0x39)
 75372              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75373          })
 75374      }
 75375      if p.len == 0 {
 75376          panic("invalid operands for VPMINSD")
 75377      }
 75378      return p
 75379  }
 75380  
 75381  // VPMINSQ performs "Minimum of Packed Signed Quadword Integers".
 75382  //
 75383  // Mnemonic        : VPMINSQ
 75384  // Supported forms : (6 forms)
 75385  //
 75386  //    * VPMINSQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 75387  //    * VPMINSQ zmm, zmm, zmm{k}{z}             [AVX512F]
 75388  //    * VPMINSQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 75389  //    * VPMINSQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 75390  //    * VPMINSQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 75391  //    * VPMINSQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 75392  //
 75393  func (self *Program) VPMINSQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 75394      p := self.alloc("VPMINSQ", 3, Operands { v0, v1, v2 })
 75395      // VPMINSQ m512/m64bcst, zmm, zmm{k}{z}
 75396      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 75397          self.require(ISA_AVX512F)
 75398          p.domain = DomainAVX
 75399          p.add(0, func(m *_Encoding, v []interface{}) {
 75400              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75401              m.emit(0x39)
 75402              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 75403          })
 75404      }
 75405      // VPMINSQ zmm, zmm, zmm{k}{z}
 75406      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 75407          self.require(ISA_AVX512F)
 75408          p.domain = DomainAVX
 75409          p.add(0, func(m *_Encoding, v []interface{}) {
 75410              m.emit(0x62)
 75411              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75412              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 75413              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 75414              m.emit(0x39)
 75415              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75416          })
 75417      }
 75418      // VPMINSQ m128/m64bcst, xmm, xmm{k}{z}
 75419      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75420          self.require(ISA_AVX512VL | ISA_AVX512F)
 75421          p.domain = DomainAVX
 75422          p.add(0, func(m *_Encoding, v []interface{}) {
 75423              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75424              m.emit(0x39)
 75425              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 75426          })
 75427      }
 75428      // VPMINSQ xmm, xmm, xmm{k}{z}
 75429      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75430          self.require(ISA_AVX512VL | ISA_AVX512F)
 75431          p.domain = DomainAVX
 75432          p.add(0, func(m *_Encoding, v []interface{}) {
 75433              m.emit(0x62)
 75434              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75435              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 75436              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 75437              m.emit(0x39)
 75438              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75439          })
 75440      }
 75441      // VPMINSQ m256/m64bcst, ymm, ymm{k}{z}
 75442      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75443          self.require(ISA_AVX512VL | ISA_AVX512F)
 75444          p.domain = DomainAVX
 75445          p.add(0, func(m *_Encoding, v []interface{}) {
 75446              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75447              m.emit(0x39)
 75448              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 75449          })
 75450      }
 75451      // VPMINSQ ymm, ymm, ymm{k}{z}
 75452      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75453          self.require(ISA_AVX512VL | ISA_AVX512F)
 75454          p.domain = DomainAVX
 75455          p.add(0, func(m *_Encoding, v []interface{}) {
 75456              m.emit(0x62)
 75457              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75458              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 75459              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 75460              m.emit(0x39)
 75461              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75462          })
 75463      }
 75464      if p.len == 0 {
 75465          panic("invalid operands for VPMINSQ")
 75466      }
 75467      return p
 75468  }
 75469  
 75470  // VPMINSW performs "Minimum of Packed Signed Word Integers".
 75471  //
 75472  // Mnemonic        : VPMINSW
 75473  // Supported forms : (10 forms)
 75474  //
 75475  //    * VPMINSW xmm, xmm, xmm           [AVX]
 75476  //    * VPMINSW m128, xmm, xmm          [AVX]
 75477  //    * VPMINSW ymm, ymm, ymm           [AVX2]
 75478  //    * VPMINSW m256, ymm, ymm          [AVX2]
 75479  //    * VPMINSW zmm, zmm, zmm{k}{z}     [AVX512BW]
 75480  //    * VPMINSW m512, zmm, zmm{k}{z}    [AVX512BW]
 75481  //    * VPMINSW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 75482  //    * VPMINSW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 75483  //    * VPMINSW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 75484  //    * VPMINSW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 75485  //
 75486  func (self *Program) VPMINSW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 75487      p := self.alloc("VPMINSW", 3, Operands { v0, v1, v2 })
 75488      // VPMINSW xmm, xmm, xmm
 75489      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 75490          self.require(ISA_AVX)
 75491          p.domain = DomainAVX
 75492          p.add(0, func(m *_Encoding, v []interface{}) {
 75493              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 75494              m.emit(0xea)
 75495              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75496          })
 75497      }
 75498      // VPMINSW m128, xmm, xmm
 75499      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 75500          self.require(ISA_AVX)
 75501          p.domain = DomainAVX
 75502          p.add(0, func(m *_Encoding, v []interface{}) {
 75503              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75504              m.emit(0xea)
 75505              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75506          })
 75507      }
 75508      // VPMINSW ymm, ymm, ymm
 75509      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 75510          self.require(ISA_AVX2)
 75511          p.domain = DomainAVX
 75512          p.add(0, func(m *_Encoding, v []interface{}) {
 75513              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 75514              m.emit(0xea)
 75515              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75516          })
 75517      }
 75518      // VPMINSW m256, ymm, ymm
 75519      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 75520          self.require(ISA_AVX2)
 75521          p.domain = DomainAVX
 75522          p.add(0, func(m *_Encoding, v []interface{}) {
 75523              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75524              m.emit(0xea)
 75525              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75526          })
 75527      }
 75528      // VPMINSW zmm, zmm, zmm{k}{z}
 75529      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 75530          self.require(ISA_AVX512BW)
 75531          p.domain = DomainAVX
 75532          p.add(0, func(m *_Encoding, v []interface{}) {
 75533              m.emit(0x62)
 75534              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75535              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75536              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 75537              m.emit(0xea)
 75538              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75539          })
 75540      }
 75541      // VPMINSW m512, zmm, zmm{k}{z}
 75542      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 75543          self.require(ISA_AVX512BW)
 75544          p.domain = DomainAVX
 75545          p.add(0, func(m *_Encoding, v []interface{}) {
 75546              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75547              m.emit(0xea)
 75548              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 75549          })
 75550      }
 75551      // VPMINSW xmm, xmm, xmm{k}{z}
 75552      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75553          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75554          p.domain = DomainAVX
 75555          p.add(0, func(m *_Encoding, v []interface{}) {
 75556              m.emit(0x62)
 75557              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75558              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75559              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 75560              m.emit(0xea)
 75561              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75562          })
 75563      }
 75564      // VPMINSW m128, xmm, xmm{k}{z}
 75565      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75566          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75567          p.domain = DomainAVX
 75568          p.add(0, func(m *_Encoding, v []interface{}) {
 75569              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75570              m.emit(0xea)
 75571              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 75572          })
 75573      }
 75574      // VPMINSW ymm, ymm, ymm{k}{z}
 75575      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75576          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75577          p.domain = DomainAVX
 75578          p.add(0, func(m *_Encoding, v []interface{}) {
 75579              m.emit(0x62)
 75580              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75581              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75582              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 75583              m.emit(0xea)
 75584              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75585          })
 75586      }
 75587      // VPMINSW m256, ymm, ymm{k}{z}
 75588      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75589          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75590          p.domain = DomainAVX
 75591          p.add(0, func(m *_Encoding, v []interface{}) {
 75592              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75593              m.emit(0xea)
 75594              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 75595          })
 75596      }
 75597      if p.len == 0 {
 75598          panic("invalid operands for VPMINSW")
 75599      }
 75600      return p
 75601  }
 75602  
 75603  // VPMINUB performs "Minimum of Packed Unsigned Byte Integers".
 75604  //
 75605  // Mnemonic        : VPMINUB
 75606  // Supported forms : (10 forms)
 75607  //
 75608  //    * VPMINUB xmm, xmm, xmm           [AVX]
 75609  //    * VPMINUB m128, xmm, xmm          [AVX]
 75610  //    * VPMINUB ymm, ymm, ymm           [AVX2]
 75611  //    * VPMINUB m256, ymm, ymm          [AVX2]
 75612  //    * VPMINUB zmm, zmm, zmm{k}{z}     [AVX512BW]
 75613  //    * VPMINUB m512, zmm, zmm{k}{z}    [AVX512BW]
 75614  //    * VPMINUB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 75615  //    * VPMINUB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 75616  //    * VPMINUB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 75617  //    * VPMINUB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 75618  //
 75619  func (self *Program) VPMINUB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 75620      p := self.alloc("VPMINUB", 3, Operands { v0, v1, v2 })
 75621      // VPMINUB xmm, xmm, xmm
 75622      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 75623          self.require(ISA_AVX)
 75624          p.domain = DomainAVX
 75625          p.add(0, func(m *_Encoding, v []interface{}) {
 75626              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 75627              m.emit(0xda)
 75628              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75629          })
 75630      }
 75631      // VPMINUB m128, xmm, xmm
 75632      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 75633          self.require(ISA_AVX)
 75634          p.domain = DomainAVX
 75635          p.add(0, func(m *_Encoding, v []interface{}) {
 75636              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75637              m.emit(0xda)
 75638              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75639          })
 75640      }
 75641      // VPMINUB ymm, ymm, ymm
 75642      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 75643          self.require(ISA_AVX2)
 75644          p.domain = DomainAVX
 75645          p.add(0, func(m *_Encoding, v []interface{}) {
 75646              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 75647              m.emit(0xda)
 75648              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75649          })
 75650      }
 75651      // VPMINUB m256, ymm, ymm
 75652      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 75653          self.require(ISA_AVX2)
 75654          p.domain = DomainAVX
 75655          p.add(0, func(m *_Encoding, v []interface{}) {
 75656              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75657              m.emit(0xda)
 75658              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75659          })
 75660      }
 75661      // VPMINUB zmm, zmm, zmm{k}{z}
 75662      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 75663          self.require(ISA_AVX512BW)
 75664          p.domain = DomainAVX
 75665          p.add(0, func(m *_Encoding, v []interface{}) {
 75666              m.emit(0x62)
 75667              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75668              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75669              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 75670              m.emit(0xda)
 75671              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75672          })
 75673      }
 75674      // VPMINUB m512, zmm, zmm{k}{z}
 75675      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 75676          self.require(ISA_AVX512BW)
 75677          p.domain = DomainAVX
 75678          p.add(0, func(m *_Encoding, v []interface{}) {
 75679              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75680              m.emit(0xda)
 75681              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 75682          })
 75683      }
 75684      // VPMINUB xmm, xmm, xmm{k}{z}
 75685      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75686          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75687          p.domain = DomainAVX
 75688          p.add(0, func(m *_Encoding, v []interface{}) {
 75689              m.emit(0x62)
 75690              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75691              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75692              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 75693              m.emit(0xda)
 75694              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75695          })
 75696      }
 75697      // VPMINUB m128, xmm, xmm{k}{z}
 75698      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75699          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75700          p.domain = DomainAVX
 75701          p.add(0, func(m *_Encoding, v []interface{}) {
 75702              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75703              m.emit(0xda)
 75704              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 75705          })
 75706      }
 75707      // VPMINUB ymm, ymm, ymm{k}{z}
 75708      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75709          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75710          p.domain = DomainAVX
 75711          p.add(0, func(m *_Encoding, v []interface{}) {
 75712              m.emit(0x62)
 75713              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75714              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75715              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 75716              m.emit(0xda)
 75717              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75718          })
 75719      }
 75720      // VPMINUB m256, ymm, ymm{k}{z}
 75721      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75722          self.require(ISA_AVX512VL | ISA_AVX512BW)
 75723          p.domain = DomainAVX
 75724          p.add(0, func(m *_Encoding, v []interface{}) {
 75725              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 75726              m.emit(0xda)
 75727              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 75728          })
 75729      }
 75730      if p.len == 0 {
 75731          panic("invalid operands for VPMINUB")
 75732      }
 75733      return p
 75734  }
 75735  
 75736  // VPMINUD performs "Minimum of Packed Unsigned Doubleword Integers".
 75737  //
 75738  // Mnemonic        : VPMINUD
 75739  // Supported forms : (10 forms)
 75740  //
 75741  //    * VPMINUD xmm, xmm, xmm                   [AVX]
 75742  //    * VPMINUD m128, xmm, xmm                  [AVX]
 75743  //    * VPMINUD ymm, ymm, ymm                   [AVX2]
 75744  //    * VPMINUD m256, ymm, ymm                  [AVX2]
 75745  //    * VPMINUD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 75746  //    * VPMINUD zmm, zmm, zmm{k}{z}             [AVX512F]
 75747  //    * VPMINUD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 75748  //    * VPMINUD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 75749  //    * VPMINUD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 75750  //    * VPMINUD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 75751  //
 75752  func (self *Program) VPMINUD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 75753      p := self.alloc("VPMINUD", 3, Operands { v0, v1, v2 })
 75754      // VPMINUD xmm, xmm, xmm
 75755      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 75756          self.require(ISA_AVX)
 75757          p.domain = DomainAVX
 75758          p.add(0, func(m *_Encoding, v []interface{}) {
 75759              m.emit(0xc4)
 75760              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 75761              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 75762              m.emit(0x3b)
 75763              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75764          })
 75765      }
 75766      // VPMINUD m128, xmm, xmm
 75767      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 75768          self.require(ISA_AVX)
 75769          p.domain = DomainAVX
 75770          p.add(0, func(m *_Encoding, v []interface{}) {
 75771              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75772              m.emit(0x3b)
 75773              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75774          })
 75775      }
 75776      // VPMINUD ymm, ymm, ymm
 75777      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 75778          self.require(ISA_AVX2)
 75779          p.domain = DomainAVX
 75780          p.add(0, func(m *_Encoding, v []interface{}) {
 75781              m.emit(0xc4)
 75782              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 75783              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75784              m.emit(0x3b)
 75785              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75786          })
 75787      }
 75788      // VPMINUD m256, ymm, ymm
 75789      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 75790          self.require(ISA_AVX2)
 75791          p.domain = DomainAVX
 75792          p.add(0, func(m *_Encoding, v []interface{}) {
 75793              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75794              m.emit(0x3b)
 75795              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 75796          })
 75797      }
 75798      // VPMINUD m512/m32bcst, zmm, zmm{k}{z}
 75799      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 75800          self.require(ISA_AVX512F)
 75801          p.domain = DomainAVX
 75802          p.add(0, func(m *_Encoding, v []interface{}) {
 75803              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75804              m.emit(0x3b)
 75805              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 75806          })
 75807      }
 75808      // VPMINUD zmm, zmm, zmm{k}{z}
 75809      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 75810          self.require(ISA_AVX512F)
 75811          p.domain = DomainAVX
 75812          p.add(0, func(m *_Encoding, v []interface{}) {
 75813              m.emit(0x62)
 75814              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75815              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75816              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 75817              m.emit(0x3b)
 75818              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75819          })
 75820      }
 75821      // VPMINUD m128/m32bcst, xmm, xmm{k}{z}
 75822      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75823          self.require(ISA_AVX512VL | ISA_AVX512F)
 75824          p.domain = DomainAVX
 75825          p.add(0, func(m *_Encoding, v []interface{}) {
 75826              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75827              m.emit(0x3b)
 75828              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 75829          })
 75830      }
 75831      // VPMINUD xmm, xmm, xmm{k}{z}
 75832      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75833          self.require(ISA_AVX512VL | ISA_AVX512F)
 75834          p.domain = DomainAVX
 75835          p.add(0, func(m *_Encoding, v []interface{}) {
 75836              m.emit(0x62)
 75837              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75838              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75839              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 75840              m.emit(0x3b)
 75841              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75842          })
 75843      }
 75844      // VPMINUD m256/m32bcst, ymm, ymm{k}{z}
 75845      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75846          self.require(ISA_AVX512VL | ISA_AVX512F)
 75847          p.domain = DomainAVX
 75848          p.add(0, func(m *_Encoding, v []interface{}) {
 75849              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75850              m.emit(0x3b)
 75851              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 75852          })
 75853      }
 75854      // VPMINUD ymm, ymm, ymm{k}{z}
 75855      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75856          self.require(ISA_AVX512VL | ISA_AVX512F)
 75857          p.domain = DomainAVX
 75858          p.add(0, func(m *_Encoding, v []interface{}) {
 75859              m.emit(0x62)
 75860              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75861              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 75862              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 75863              m.emit(0x3b)
 75864              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75865          })
 75866      }
 75867      if p.len == 0 {
 75868          panic("invalid operands for VPMINUD")
 75869      }
 75870      return p
 75871  }
 75872  
 75873  // VPMINUQ performs "Minimum of Packed Unsigned Quadword Integers".
 75874  //
 75875  // Mnemonic        : VPMINUQ
 75876  // Supported forms : (6 forms)
 75877  //
 75878  //    * VPMINUQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 75879  //    * VPMINUQ zmm, zmm, zmm{k}{z}             [AVX512F]
 75880  //    * VPMINUQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 75881  //    * VPMINUQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 75882  //    * VPMINUQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 75883  //    * VPMINUQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 75884  //
 75885  func (self *Program) VPMINUQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 75886      p := self.alloc("VPMINUQ", 3, Operands { v0, v1, v2 })
 75887      // VPMINUQ m512/m64bcst, zmm, zmm{k}{z}
 75888      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 75889          self.require(ISA_AVX512F)
 75890          p.domain = DomainAVX
 75891          p.add(0, func(m *_Encoding, v []interface{}) {
 75892              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75893              m.emit(0x3b)
 75894              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 75895          })
 75896      }
 75897      // VPMINUQ zmm, zmm, zmm{k}{z}
 75898      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 75899          self.require(ISA_AVX512F)
 75900          p.domain = DomainAVX
 75901          p.add(0, func(m *_Encoding, v []interface{}) {
 75902              m.emit(0x62)
 75903              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75904              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 75905              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 75906              m.emit(0x3b)
 75907              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75908          })
 75909      }
 75910      // VPMINUQ m128/m64bcst, xmm, xmm{k}{z}
 75911      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75912          self.require(ISA_AVX512VL | ISA_AVX512F)
 75913          p.domain = DomainAVX
 75914          p.add(0, func(m *_Encoding, v []interface{}) {
 75915              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75916              m.emit(0x3b)
 75917              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 75918          })
 75919      }
 75920      // VPMINUQ xmm, xmm, xmm{k}{z}
 75921      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 75922          self.require(ISA_AVX512VL | ISA_AVX512F)
 75923          p.domain = DomainAVX
 75924          p.add(0, func(m *_Encoding, v []interface{}) {
 75925              m.emit(0x62)
 75926              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75927              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 75928              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 75929              m.emit(0x3b)
 75930              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75931          })
 75932      }
 75933      // VPMINUQ m256/m64bcst, ymm, ymm{k}{z}
 75934      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75935          self.require(ISA_AVX512VL | ISA_AVX512F)
 75936          p.domain = DomainAVX
 75937          p.add(0, func(m *_Encoding, v []interface{}) {
 75938              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 75939              m.emit(0x3b)
 75940              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 75941          })
 75942      }
 75943      // VPMINUQ ymm, ymm, ymm{k}{z}
 75944      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 75945          self.require(ISA_AVX512VL | ISA_AVX512F)
 75946          p.domain = DomainAVX
 75947          p.add(0, func(m *_Encoding, v []interface{}) {
 75948              m.emit(0x62)
 75949              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 75950              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 75951              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 75952              m.emit(0x3b)
 75953              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75954          })
 75955      }
 75956      if p.len == 0 {
 75957          panic("invalid operands for VPMINUQ")
 75958      }
 75959      return p
 75960  }
 75961  
 75962  // VPMINUW performs "Minimum of Packed Unsigned Word Integers".
 75963  //
 75964  // Mnemonic        : VPMINUW
 75965  // Supported forms : (10 forms)
 75966  //
 75967  //    * VPMINUW xmm, xmm, xmm           [AVX]
 75968  //    * VPMINUW m128, xmm, xmm          [AVX]
 75969  //    * VPMINUW ymm, ymm, ymm           [AVX2]
 75970  //    * VPMINUW m256, ymm, ymm          [AVX2]
 75971  //    * VPMINUW zmm, zmm, zmm{k}{z}     [AVX512BW]
 75972  //    * VPMINUW m512, zmm, zmm{k}{z}    [AVX512BW]
 75973  //    * VPMINUW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 75974  //    * VPMINUW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 75975  //    * VPMINUW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 75976  //    * VPMINUW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 75977  //
 75978  func (self *Program) VPMINUW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 75979      p := self.alloc("VPMINUW", 3, Operands { v0, v1, v2 })
 75980      // VPMINUW xmm, xmm, xmm
 75981      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 75982          self.require(ISA_AVX)
 75983          p.domain = DomainAVX
 75984          p.add(0, func(m *_Encoding, v []interface{}) {
 75985              m.emit(0xc4)
 75986              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 75987              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 75988              m.emit(0x3a)
 75989              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 75990          })
 75991      }
 75992      // VPMINUW m128, xmm, xmm
 75993      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 75994          self.require(ISA_AVX)
 75995          p.domain = DomainAVX
 75996          p.add(0, func(m *_Encoding, v []interface{}) {
 75997              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 75998              m.emit(0x3a)
 75999              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 76000          })
 76001      }
 76002      // VPMINUW ymm, ymm, ymm
 76003      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 76004          self.require(ISA_AVX2)
 76005          p.domain = DomainAVX
 76006          p.add(0, func(m *_Encoding, v []interface{}) {
 76007              m.emit(0xc4)
 76008              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 76009              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 76010              m.emit(0x3a)
 76011              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 76012          })
 76013      }
 76014      // VPMINUW m256, ymm, ymm
 76015      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 76016          self.require(ISA_AVX2)
 76017          p.domain = DomainAVX
 76018          p.add(0, func(m *_Encoding, v []interface{}) {
 76019              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 76020              m.emit(0x3a)
 76021              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 76022          })
 76023      }
 76024      // VPMINUW zmm, zmm, zmm{k}{z}
 76025      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 76026          self.require(ISA_AVX512BW)
 76027          p.domain = DomainAVX
 76028          p.add(0, func(m *_Encoding, v []interface{}) {
 76029              m.emit(0x62)
 76030              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 76031              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 76032              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 76033              m.emit(0x3a)
 76034              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 76035          })
 76036      }
 76037      // VPMINUW m512, zmm, zmm{k}{z}
 76038      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 76039          self.require(ISA_AVX512BW)
 76040          p.domain = DomainAVX
 76041          p.add(0, func(m *_Encoding, v []interface{}) {
 76042              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 76043              m.emit(0x3a)
 76044              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 76045          })
 76046      }
 76047      // VPMINUW xmm, xmm, xmm{k}{z}
 76048      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 76049          self.require(ISA_AVX512VL | ISA_AVX512BW)
 76050          p.domain = DomainAVX
 76051          p.add(0, func(m *_Encoding, v []interface{}) {
 76052              m.emit(0x62)
 76053              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 76054              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 76055              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 76056              m.emit(0x3a)
 76057              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 76058          })
 76059      }
 76060      // VPMINUW m128, xmm, xmm{k}{z}
 76061      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 76062          self.require(ISA_AVX512VL | ISA_AVX512BW)
 76063          p.domain = DomainAVX
 76064          p.add(0, func(m *_Encoding, v []interface{}) {
 76065              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 76066              m.emit(0x3a)
 76067              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 76068          })
 76069      }
 76070      // VPMINUW ymm, ymm, ymm{k}{z}
 76071      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 76072          self.require(ISA_AVX512VL | ISA_AVX512BW)
 76073          p.domain = DomainAVX
 76074          p.add(0, func(m *_Encoding, v []interface{}) {
 76075              m.emit(0x62)
 76076              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 76077              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 76078              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 76079              m.emit(0x3a)
 76080              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 76081          })
 76082      }
 76083      // VPMINUW m256, ymm, ymm{k}{z}
 76084      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 76085          self.require(ISA_AVX512VL | ISA_AVX512BW)
 76086          p.domain = DomainAVX
 76087          p.add(0, func(m *_Encoding, v []interface{}) {
 76088              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 76089              m.emit(0x3a)
 76090              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 76091          })
 76092      }
 76093      if p.len == 0 {
 76094          panic("invalid operands for VPMINUW")
 76095      }
 76096      return p
 76097  }
 76098  
 76099  // VPMOVB2M performs "Move Signs of Packed Byte Integers to Mask Register".
 76100  //
 76101  // Mnemonic        : VPMOVB2M
 76102  // Supported forms : (3 forms)
 76103  //
 76104  //    * VPMOVB2M zmm, k    [AVX512BW]
 76105  //    * VPMOVB2M xmm, k    [AVX512BW,AVX512VL]
 76106  //    * VPMOVB2M ymm, k    [AVX512BW,AVX512VL]
 76107  //
 76108  func (self *Program) VPMOVB2M(v0 interface{}, v1 interface{}) *Instruction {
 76109      p := self.alloc("VPMOVB2M", 2, Operands { v0, v1 })
 76110      // VPMOVB2M zmm, k
 76111      if isZMM(v0) && isK(v1) {
 76112          self.require(ISA_AVX512BW)
 76113          p.domain = DomainAVX
 76114          p.add(0, func(m *_Encoding, v []interface{}) {
 76115              m.emit(0x62)
 76116              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76117              m.emit(0x7e)
 76118              m.emit(0x48)
 76119              m.emit(0x29)
 76120              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76121          })
 76122      }
 76123      // VPMOVB2M xmm, k
 76124      if isEVEXXMM(v0) && isK(v1) {
 76125          self.require(ISA_AVX512VL | ISA_AVX512BW)
 76126          p.domain = DomainAVX
 76127          p.add(0, func(m *_Encoding, v []interface{}) {
 76128              m.emit(0x62)
 76129              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76130              m.emit(0x7e)
 76131              m.emit(0x08)
 76132              m.emit(0x29)
 76133              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76134          })
 76135      }
 76136      // VPMOVB2M ymm, k
 76137      if isEVEXYMM(v0) && isK(v1) {
 76138          self.require(ISA_AVX512VL | ISA_AVX512BW)
 76139          p.domain = DomainAVX
 76140          p.add(0, func(m *_Encoding, v []interface{}) {
 76141              m.emit(0x62)
 76142              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76143              m.emit(0x7e)
 76144              m.emit(0x28)
 76145              m.emit(0x29)
 76146              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76147          })
 76148      }
 76149      if p.len == 0 {
 76150          panic("invalid operands for VPMOVB2M")
 76151      }
 76152      return p
 76153  }
 76154  
 76155  // VPMOVD2M performs "Move Signs of Packed Doubleword Integers to Mask Register".
 76156  //
 76157  // Mnemonic        : VPMOVD2M
 76158  // Supported forms : (3 forms)
 76159  //
 76160  //    * VPMOVD2M zmm, k    [AVX512DQ]
 76161  //    * VPMOVD2M xmm, k    [AVX512DQ,AVX512VL]
 76162  //    * VPMOVD2M ymm, k    [AVX512DQ,AVX512VL]
 76163  //
 76164  func (self *Program) VPMOVD2M(v0 interface{}, v1 interface{}) *Instruction {
 76165      p := self.alloc("VPMOVD2M", 2, Operands { v0, v1 })
 76166      // VPMOVD2M zmm, k
 76167      if isZMM(v0) && isK(v1) {
 76168          self.require(ISA_AVX512DQ)
 76169          p.domain = DomainAVX
 76170          p.add(0, func(m *_Encoding, v []interface{}) {
 76171              m.emit(0x62)
 76172              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76173              m.emit(0x7e)
 76174              m.emit(0x48)
 76175              m.emit(0x39)
 76176              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76177          })
 76178      }
 76179      // VPMOVD2M xmm, k
 76180      if isEVEXXMM(v0) && isK(v1) {
 76181          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 76182          p.domain = DomainAVX
 76183          p.add(0, func(m *_Encoding, v []interface{}) {
 76184              m.emit(0x62)
 76185              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76186              m.emit(0x7e)
 76187              m.emit(0x08)
 76188              m.emit(0x39)
 76189              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76190          })
 76191      }
 76192      // VPMOVD2M ymm, k
 76193      if isEVEXYMM(v0) && isK(v1) {
 76194          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 76195          p.domain = DomainAVX
 76196          p.add(0, func(m *_Encoding, v []interface{}) {
 76197              m.emit(0x62)
 76198              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76199              m.emit(0x7e)
 76200              m.emit(0x28)
 76201              m.emit(0x39)
 76202              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76203          })
 76204      }
 76205      if p.len == 0 {
 76206          panic("invalid operands for VPMOVD2M")
 76207      }
 76208      return p
 76209  }
 76210  
 76211  // VPMOVDB performs "Down Convert Packed Doubleword Values to Byte Values with Truncation".
 76212  //
 76213  // Mnemonic        : VPMOVDB
 76214  // Supported forms : (6 forms)
 76215  //
 76216  //    * VPMOVDB zmm, xmm{k}{z}     [AVX512F]
 76217  //    * VPMOVDB zmm, m128{k}{z}    [AVX512F]
 76218  //    * VPMOVDB xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 76219  //    * VPMOVDB xmm, m32{k}{z}     [AVX512F,AVX512VL]
 76220  //    * VPMOVDB ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 76221  //    * VPMOVDB ymm, m64{k}{z}     [AVX512F,AVX512VL]
 76222  //
 76223  func (self *Program) VPMOVDB(v0 interface{}, v1 interface{}) *Instruction {
 76224      p := self.alloc("VPMOVDB", 2, Operands { v0, v1 })
 76225      // VPMOVDB zmm, xmm{k}{z}
 76226      if isZMM(v0) && isXMMkz(v1) {
 76227          self.require(ISA_AVX512F)
 76228          p.domain = DomainAVX
 76229          p.add(0, func(m *_Encoding, v []interface{}) {
 76230              m.emit(0x62)
 76231              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76232              m.emit(0x7e)
 76233              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 76234              m.emit(0x31)
 76235              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76236          })
 76237      }
 76238      // VPMOVDB zmm, m128{k}{z}
 76239      if isZMM(v0) && isM128kz(v1) {
 76240          self.require(ISA_AVX512F)
 76241          p.domain = DomainAVX
 76242          p.add(0, func(m *_Encoding, v []interface{}) {
 76243              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76244              m.emit(0x31)
 76245              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 76246          })
 76247      }
 76248      // VPMOVDB xmm, xmm{k}{z}
 76249      if isEVEXXMM(v0) && isXMMkz(v1) {
 76250          self.require(ISA_AVX512VL | ISA_AVX512F)
 76251          p.domain = DomainAVX
 76252          p.add(0, func(m *_Encoding, v []interface{}) {
 76253              m.emit(0x62)
 76254              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76255              m.emit(0x7e)
 76256              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 76257              m.emit(0x31)
 76258              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76259          })
 76260      }
 76261      // VPMOVDB xmm, m32{k}{z}
 76262      if isEVEXXMM(v0) && isM32kz(v1) {
 76263          self.require(ISA_AVX512VL | ISA_AVX512F)
 76264          p.domain = DomainAVX
 76265          p.add(0, func(m *_Encoding, v []interface{}) {
 76266              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76267              m.emit(0x31)
 76268              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 76269          })
 76270      }
 76271      // VPMOVDB ymm, xmm{k}{z}
 76272      if isEVEXYMM(v0) && isXMMkz(v1) {
 76273          self.require(ISA_AVX512VL | ISA_AVX512F)
 76274          p.domain = DomainAVX
 76275          p.add(0, func(m *_Encoding, v []interface{}) {
 76276              m.emit(0x62)
 76277              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76278              m.emit(0x7e)
 76279              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 76280              m.emit(0x31)
 76281              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76282          })
 76283      }
 76284      // VPMOVDB ymm, m64{k}{z}
 76285      if isEVEXYMM(v0) && isM64kz(v1) {
 76286          self.require(ISA_AVX512VL | ISA_AVX512F)
 76287          p.domain = DomainAVX
 76288          p.add(0, func(m *_Encoding, v []interface{}) {
 76289              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76290              m.emit(0x31)
 76291              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 76292          })
 76293      }
 76294      if p.len == 0 {
 76295          panic("invalid operands for VPMOVDB")
 76296      }
 76297      return p
 76298  }
 76299  
 76300  // VPMOVDW performs "Down Convert Packed Doubleword Values to Word Values with Truncation".
 76301  //
 76302  // Mnemonic        : VPMOVDW
 76303  // Supported forms : (6 forms)
 76304  //
 76305  //    * VPMOVDW zmm, ymm{k}{z}     [AVX512F]
 76306  //    * VPMOVDW zmm, m256{k}{z}    [AVX512F]
 76307  //    * VPMOVDW xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 76308  //    * VPMOVDW xmm, m64{k}{z}     [AVX512F,AVX512VL]
 76309  //    * VPMOVDW ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 76310  //    * VPMOVDW ymm, m128{k}{z}    [AVX512F,AVX512VL]
 76311  //
 76312  func (self *Program) VPMOVDW(v0 interface{}, v1 interface{}) *Instruction {
 76313      p := self.alloc("VPMOVDW", 2, Operands { v0, v1 })
 76314      // VPMOVDW zmm, ymm{k}{z}
 76315      if isZMM(v0) && isYMMkz(v1) {
 76316          self.require(ISA_AVX512F)
 76317          p.domain = DomainAVX
 76318          p.add(0, func(m *_Encoding, v []interface{}) {
 76319              m.emit(0x62)
 76320              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76321              m.emit(0x7e)
 76322              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 76323              m.emit(0x33)
 76324              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76325          })
 76326      }
 76327      // VPMOVDW zmm, m256{k}{z}
 76328      if isZMM(v0) && isM256kz(v1) {
 76329          self.require(ISA_AVX512F)
 76330          p.domain = DomainAVX
 76331          p.add(0, func(m *_Encoding, v []interface{}) {
 76332              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76333              m.emit(0x33)
 76334              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 76335          })
 76336      }
 76337      // VPMOVDW xmm, xmm{k}{z}
 76338      if isEVEXXMM(v0) && isXMMkz(v1) {
 76339          self.require(ISA_AVX512VL | ISA_AVX512F)
 76340          p.domain = DomainAVX
 76341          p.add(0, func(m *_Encoding, v []interface{}) {
 76342              m.emit(0x62)
 76343              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76344              m.emit(0x7e)
 76345              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 76346              m.emit(0x33)
 76347              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76348          })
 76349      }
 76350      // VPMOVDW xmm, m64{k}{z}
 76351      if isEVEXXMM(v0) && isM64kz(v1) {
 76352          self.require(ISA_AVX512VL | ISA_AVX512F)
 76353          p.domain = DomainAVX
 76354          p.add(0, func(m *_Encoding, v []interface{}) {
 76355              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76356              m.emit(0x33)
 76357              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 76358          })
 76359      }
 76360      // VPMOVDW ymm, xmm{k}{z}
 76361      if isEVEXYMM(v0) && isXMMkz(v1) {
 76362          self.require(ISA_AVX512VL | ISA_AVX512F)
 76363          p.domain = DomainAVX
 76364          p.add(0, func(m *_Encoding, v []interface{}) {
 76365              m.emit(0x62)
 76366              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76367              m.emit(0x7e)
 76368              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 76369              m.emit(0x33)
 76370              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76371          })
 76372      }
 76373      // VPMOVDW ymm, m128{k}{z}
 76374      if isEVEXYMM(v0) && isM128kz(v1) {
 76375          self.require(ISA_AVX512VL | ISA_AVX512F)
 76376          p.domain = DomainAVX
 76377          p.add(0, func(m *_Encoding, v []interface{}) {
 76378              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76379              m.emit(0x33)
 76380              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 76381          })
 76382      }
 76383      if p.len == 0 {
 76384          panic("invalid operands for VPMOVDW")
 76385      }
 76386      return p
 76387  }
 76388  
 76389  // VPMOVM2B performs "Expand Bits of Mask Register to Packed Byte Integers".
 76390  //
 76391  // Mnemonic        : VPMOVM2B
 76392  // Supported forms : (3 forms)
 76393  //
 76394  //    * VPMOVM2B k, zmm    [AVX512BW]
 76395  //    * VPMOVM2B k, xmm    [AVX512BW,AVX512VL]
 76396  //    * VPMOVM2B k, ymm    [AVX512BW,AVX512VL]
 76397  //
 76398  func (self *Program) VPMOVM2B(v0 interface{}, v1 interface{}) *Instruction {
 76399      p := self.alloc("VPMOVM2B", 2, Operands { v0, v1 })
 76400      // VPMOVM2B k, zmm
 76401      if isK(v0) && isZMM(v1) {
 76402          self.require(ISA_AVX512BW)
 76403          p.domain = DomainAVX
 76404          p.add(0, func(m *_Encoding, v []interface{}) {
 76405              m.emit(0x62)
 76406              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76407              m.emit(0x7e)
 76408              m.emit(0x48)
 76409              m.emit(0x28)
 76410              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76411          })
 76412      }
 76413      // VPMOVM2B k, xmm
 76414      if isK(v0) && isEVEXXMM(v1) {
 76415          self.require(ISA_AVX512VL | ISA_AVX512BW)
 76416          p.domain = DomainAVX
 76417          p.add(0, func(m *_Encoding, v []interface{}) {
 76418              m.emit(0x62)
 76419              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76420              m.emit(0x7e)
 76421              m.emit(0x08)
 76422              m.emit(0x28)
 76423              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76424          })
 76425      }
 76426      // VPMOVM2B k, ymm
 76427      if isK(v0) && isEVEXYMM(v1) {
 76428          self.require(ISA_AVX512VL | ISA_AVX512BW)
 76429          p.domain = DomainAVX
 76430          p.add(0, func(m *_Encoding, v []interface{}) {
 76431              m.emit(0x62)
 76432              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76433              m.emit(0x7e)
 76434              m.emit(0x28)
 76435              m.emit(0x28)
 76436              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76437          })
 76438      }
 76439      if p.len == 0 {
 76440          panic("invalid operands for VPMOVM2B")
 76441      }
 76442      return p
 76443  }
 76444  
 76445  // VPMOVM2D performs "Expand Bits of Mask Register to Packed Doubleword Integers".
 76446  //
 76447  // Mnemonic        : VPMOVM2D
 76448  // Supported forms : (3 forms)
 76449  //
 76450  //    * VPMOVM2D k, zmm    [AVX512DQ]
 76451  //    * VPMOVM2D k, xmm    [AVX512DQ,AVX512VL]
 76452  //    * VPMOVM2D k, ymm    [AVX512DQ,AVX512VL]
 76453  //
 76454  func (self *Program) VPMOVM2D(v0 interface{}, v1 interface{}) *Instruction {
 76455      p := self.alloc("VPMOVM2D", 2, Operands { v0, v1 })
 76456      // VPMOVM2D k, zmm
 76457      if isK(v0) && isZMM(v1) {
 76458          self.require(ISA_AVX512DQ)
 76459          p.domain = DomainAVX
 76460          p.add(0, func(m *_Encoding, v []interface{}) {
 76461              m.emit(0x62)
 76462              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76463              m.emit(0x7e)
 76464              m.emit(0x48)
 76465              m.emit(0x38)
 76466              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76467          })
 76468      }
 76469      // VPMOVM2D k, xmm
 76470      if isK(v0) && isEVEXXMM(v1) {
 76471          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 76472          p.domain = DomainAVX
 76473          p.add(0, func(m *_Encoding, v []interface{}) {
 76474              m.emit(0x62)
 76475              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76476              m.emit(0x7e)
 76477              m.emit(0x08)
 76478              m.emit(0x38)
 76479              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76480          })
 76481      }
 76482      // VPMOVM2D k, ymm
 76483      if isK(v0) && isEVEXYMM(v1) {
 76484          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 76485          p.domain = DomainAVX
 76486          p.add(0, func(m *_Encoding, v []interface{}) {
 76487              m.emit(0x62)
 76488              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76489              m.emit(0x7e)
 76490              m.emit(0x28)
 76491              m.emit(0x38)
 76492              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76493          })
 76494      }
 76495      if p.len == 0 {
 76496          panic("invalid operands for VPMOVM2D")
 76497      }
 76498      return p
 76499  }
 76500  
 76501  // VPMOVM2Q performs "Expand Bits of Mask Register to Packed Quadword Integers".
 76502  //
 76503  // Mnemonic        : VPMOVM2Q
 76504  // Supported forms : (3 forms)
 76505  //
 76506  //    * VPMOVM2Q k, zmm    [AVX512DQ]
 76507  //    * VPMOVM2Q k, xmm    [AVX512DQ,AVX512VL]
 76508  //    * VPMOVM2Q k, ymm    [AVX512DQ,AVX512VL]
 76509  //
 76510  func (self *Program) VPMOVM2Q(v0 interface{}, v1 interface{}) *Instruction {
 76511      p := self.alloc("VPMOVM2Q", 2, Operands { v0, v1 })
 76512      // VPMOVM2Q k, zmm
 76513      if isK(v0) && isZMM(v1) {
 76514          self.require(ISA_AVX512DQ)
 76515          p.domain = DomainAVX
 76516          p.add(0, func(m *_Encoding, v []interface{}) {
 76517              m.emit(0x62)
 76518              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76519              m.emit(0xfe)
 76520              m.emit(0x48)
 76521              m.emit(0x38)
 76522              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76523          })
 76524      }
 76525      // VPMOVM2Q k, xmm
 76526      if isK(v0) && isEVEXXMM(v1) {
 76527          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 76528          p.domain = DomainAVX
 76529          p.add(0, func(m *_Encoding, v []interface{}) {
 76530              m.emit(0x62)
 76531              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76532              m.emit(0xfe)
 76533              m.emit(0x08)
 76534              m.emit(0x38)
 76535              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76536          })
 76537      }
 76538      // VPMOVM2Q k, ymm
 76539      if isK(v0) && isEVEXYMM(v1) {
 76540          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 76541          p.domain = DomainAVX
 76542          p.add(0, func(m *_Encoding, v []interface{}) {
 76543              m.emit(0x62)
 76544              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76545              m.emit(0xfe)
 76546              m.emit(0x28)
 76547              m.emit(0x38)
 76548              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76549          })
 76550      }
 76551      if p.len == 0 {
 76552          panic("invalid operands for VPMOVM2Q")
 76553      }
 76554      return p
 76555  }
 76556  
 76557  // VPMOVM2W performs "Expand Bits of Mask Register to Packed Word Integers".
 76558  //
 76559  // Mnemonic        : VPMOVM2W
 76560  // Supported forms : (3 forms)
 76561  //
 76562  //    * VPMOVM2W k, zmm    [AVX512BW]
 76563  //    * VPMOVM2W k, xmm    [AVX512BW,AVX512VL]
 76564  //    * VPMOVM2W k, ymm    [AVX512BW,AVX512VL]
 76565  //
 76566  func (self *Program) VPMOVM2W(v0 interface{}, v1 interface{}) *Instruction {
 76567      p := self.alloc("VPMOVM2W", 2, Operands { v0, v1 })
 76568      // VPMOVM2W k, zmm
 76569      if isK(v0) && isZMM(v1) {
 76570          self.require(ISA_AVX512BW)
 76571          p.domain = DomainAVX
 76572          p.add(0, func(m *_Encoding, v []interface{}) {
 76573              m.emit(0x62)
 76574              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76575              m.emit(0xfe)
 76576              m.emit(0x48)
 76577              m.emit(0x28)
 76578              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76579          })
 76580      }
 76581      // VPMOVM2W k, xmm
 76582      if isK(v0) && isEVEXXMM(v1) {
 76583          self.require(ISA_AVX512VL | ISA_AVX512BW)
 76584          p.domain = DomainAVX
 76585          p.add(0, func(m *_Encoding, v []interface{}) {
 76586              m.emit(0x62)
 76587              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76588              m.emit(0xfe)
 76589              m.emit(0x08)
 76590              m.emit(0x28)
 76591              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76592          })
 76593      }
 76594      // VPMOVM2W k, ymm
 76595      if isK(v0) && isEVEXYMM(v1) {
 76596          self.require(ISA_AVX512VL | ISA_AVX512BW)
 76597          p.domain = DomainAVX
 76598          p.add(0, func(m *_Encoding, v []interface{}) {
 76599              m.emit(0x62)
 76600              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76601              m.emit(0xfe)
 76602              m.emit(0x28)
 76603              m.emit(0x28)
 76604              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76605          })
 76606      }
 76607      if p.len == 0 {
 76608          panic("invalid operands for VPMOVM2W")
 76609      }
 76610      return p
 76611  }
 76612  
 76613  // VPMOVMSKB performs "Move Byte Mask".
 76614  //
 76615  // Mnemonic        : VPMOVMSKB
 76616  // Supported forms : (2 forms)
 76617  //
 76618  //    * VPMOVMSKB xmm, r32    [AVX]
 76619  //    * VPMOVMSKB ymm, r32    [AVX2]
 76620  //
 76621  func (self *Program) VPMOVMSKB(v0 interface{}, v1 interface{}) *Instruction {
 76622      p := self.alloc("VPMOVMSKB", 2, Operands { v0, v1 })
 76623      // VPMOVMSKB xmm, r32
 76624      if isXMM(v0) && isReg32(v1) {
 76625          self.require(ISA_AVX)
 76626          p.domain = DomainAVX
 76627          p.add(0, func(m *_Encoding, v []interface{}) {
 76628              m.vex2(1, hcode(v[1]), v[0], 0)
 76629              m.emit(0xd7)
 76630              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76631          })
 76632      }
 76633      // VPMOVMSKB ymm, r32
 76634      if isYMM(v0) && isReg32(v1) {
 76635          self.require(ISA_AVX2)
 76636          p.domain = DomainAVX
 76637          p.add(0, func(m *_Encoding, v []interface{}) {
 76638              m.vex2(5, hcode(v[1]), v[0], 0)
 76639              m.emit(0xd7)
 76640              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76641          })
 76642      }
 76643      if p.len == 0 {
 76644          panic("invalid operands for VPMOVMSKB")
 76645      }
 76646      return p
 76647  }
 76648  
 76649  // VPMOVQ2M performs "Move Signs of Packed Quadword Integers to Mask Register".
 76650  //
 76651  // Mnemonic        : VPMOVQ2M
 76652  // Supported forms : (3 forms)
 76653  //
 76654  //    * VPMOVQ2M zmm, k    [AVX512DQ]
 76655  //    * VPMOVQ2M xmm, k    [AVX512DQ,AVX512VL]
 76656  //    * VPMOVQ2M ymm, k    [AVX512DQ,AVX512VL]
 76657  //
 76658  func (self *Program) VPMOVQ2M(v0 interface{}, v1 interface{}) *Instruction {
 76659      p := self.alloc("VPMOVQ2M", 2, Operands { v0, v1 })
 76660      // VPMOVQ2M zmm, k
 76661      if isZMM(v0) && isK(v1) {
 76662          self.require(ISA_AVX512DQ)
 76663          p.domain = DomainAVX
 76664          p.add(0, func(m *_Encoding, v []interface{}) {
 76665              m.emit(0x62)
 76666              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76667              m.emit(0xfe)
 76668              m.emit(0x48)
 76669              m.emit(0x39)
 76670              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76671          })
 76672      }
 76673      // VPMOVQ2M xmm, k
 76674      if isEVEXXMM(v0) && isK(v1) {
 76675          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 76676          p.domain = DomainAVX
 76677          p.add(0, func(m *_Encoding, v []interface{}) {
 76678              m.emit(0x62)
 76679              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76680              m.emit(0xfe)
 76681              m.emit(0x08)
 76682              m.emit(0x39)
 76683              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76684          })
 76685      }
 76686      // VPMOVQ2M ymm, k
 76687      if isEVEXYMM(v0) && isK(v1) {
 76688          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 76689          p.domain = DomainAVX
 76690          p.add(0, func(m *_Encoding, v []interface{}) {
 76691              m.emit(0x62)
 76692              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 76693              m.emit(0xfe)
 76694              m.emit(0x28)
 76695              m.emit(0x39)
 76696              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 76697          })
 76698      }
 76699      if p.len == 0 {
 76700          panic("invalid operands for VPMOVQ2M")
 76701      }
 76702      return p
 76703  }
 76704  
 76705  // VPMOVQB performs "Down Convert Packed Quadword Values to Byte Values with Truncation".
 76706  //
 76707  // Mnemonic        : VPMOVQB
 76708  // Supported forms : (6 forms)
 76709  //
 76710  //    * VPMOVQB zmm, xmm{k}{z}    [AVX512F]
 76711  //    * VPMOVQB zmm, m64{k}{z}    [AVX512F]
 76712  //    * VPMOVQB xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 76713  //    * VPMOVQB xmm, m16{k}{z}    [AVX512F,AVX512VL]
 76714  //    * VPMOVQB ymm, xmm{k}{z}    [AVX512F,AVX512VL]
 76715  //    * VPMOVQB ymm, m32{k}{z}    [AVX512F,AVX512VL]
 76716  //
 76717  func (self *Program) VPMOVQB(v0 interface{}, v1 interface{}) *Instruction {
 76718      p := self.alloc("VPMOVQB", 2, Operands { v0, v1 })
 76719      // VPMOVQB zmm, xmm{k}{z}
 76720      if isZMM(v0) && isXMMkz(v1) {
 76721          self.require(ISA_AVX512F)
 76722          p.domain = DomainAVX
 76723          p.add(0, func(m *_Encoding, v []interface{}) {
 76724              m.emit(0x62)
 76725              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76726              m.emit(0x7e)
 76727              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 76728              m.emit(0x32)
 76729              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76730          })
 76731      }
 76732      // VPMOVQB zmm, m64{k}{z}
 76733      if isZMM(v0) && isM64kz(v1) {
 76734          self.require(ISA_AVX512F)
 76735          p.domain = DomainAVX
 76736          p.add(0, func(m *_Encoding, v []interface{}) {
 76737              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76738              m.emit(0x32)
 76739              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 76740          })
 76741      }
 76742      // VPMOVQB xmm, xmm{k}{z}
 76743      if isEVEXXMM(v0) && isXMMkz(v1) {
 76744          self.require(ISA_AVX512VL | ISA_AVX512F)
 76745          p.domain = DomainAVX
 76746          p.add(0, func(m *_Encoding, v []interface{}) {
 76747              m.emit(0x62)
 76748              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76749              m.emit(0x7e)
 76750              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 76751              m.emit(0x32)
 76752              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76753          })
 76754      }
 76755      // VPMOVQB xmm, m16{k}{z}
 76756      if isEVEXXMM(v0) && isM16kz(v1) {
 76757          self.require(ISA_AVX512VL | ISA_AVX512F)
 76758          p.domain = DomainAVX
 76759          p.add(0, func(m *_Encoding, v []interface{}) {
 76760              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76761              m.emit(0x32)
 76762              m.mrsd(lcode(v[0]), addr(v[1]), 2)
 76763          })
 76764      }
 76765      // VPMOVQB ymm, xmm{k}{z}
 76766      if isEVEXYMM(v0) && isXMMkz(v1) {
 76767          self.require(ISA_AVX512VL | ISA_AVX512F)
 76768          p.domain = DomainAVX
 76769          p.add(0, func(m *_Encoding, v []interface{}) {
 76770              m.emit(0x62)
 76771              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76772              m.emit(0x7e)
 76773              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 76774              m.emit(0x32)
 76775              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76776          })
 76777      }
 76778      // VPMOVQB ymm, m32{k}{z}
 76779      if isEVEXYMM(v0) && isM32kz(v1) {
 76780          self.require(ISA_AVX512VL | ISA_AVX512F)
 76781          p.domain = DomainAVX
 76782          p.add(0, func(m *_Encoding, v []interface{}) {
 76783              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76784              m.emit(0x32)
 76785              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 76786          })
 76787      }
 76788      if p.len == 0 {
 76789          panic("invalid operands for VPMOVQB")
 76790      }
 76791      return p
 76792  }
 76793  
 76794  // VPMOVQD performs "Down Convert Packed Quadword Values to Doubleword Values with Truncation".
 76795  //
 76796  // Mnemonic        : VPMOVQD
 76797  // Supported forms : (6 forms)
 76798  //
 76799  //    * VPMOVQD zmm, ymm{k}{z}     [AVX512F]
 76800  //    * VPMOVQD zmm, m256{k}{z}    [AVX512F]
 76801  //    * VPMOVQD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 76802  //    * VPMOVQD xmm, m64{k}{z}     [AVX512F,AVX512VL]
 76803  //    * VPMOVQD ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 76804  //    * VPMOVQD ymm, m128{k}{z}    [AVX512F,AVX512VL]
 76805  //
 76806  func (self *Program) VPMOVQD(v0 interface{}, v1 interface{}) *Instruction {
 76807      p := self.alloc("VPMOVQD", 2, Operands { v0, v1 })
 76808      // VPMOVQD zmm, ymm{k}{z}
 76809      if isZMM(v0) && isYMMkz(v1) {
 76810          self.require(ISA_AVX512F)
 76811          p.domain = DomainAVX
 76812          p.add(0, func(m *_Encoding, v []interface{}) {
 76813              m.emit(0x62)
 76814              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76815              m.emit(0x7e)
 76816              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 76817              m.emit(0x35)
 76818              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76819          })
 76820      }
 76821      // VPMOVQD zmm, m256{k}{z}
 76822      if isZMM(v0) && isM256kz(v1) {
 76823          self.require(ISA_AVX512F)
 76824          p.domain = DomainAVX
 76825          p.add(0, func(m *_Encoding, v []interface{}) {
 76826              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76827              m.emit(0x35)
 76828              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 76829          })
 76830      }
 76831      // VPMOVQD xmm, xmm{k}{z}
 76832      if isEVEXXMM(v0) && isXMMkz(v1) {
 76833          self.require(ISA_AVX512VL | ISA_AVX512F)
 76834          p.domain = DomainAVX
 76835          p.add(0, func(m *_Encoding, v []interface{}) {
 76836              m.emit(0x62)
 76837              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76838              m.emit(0x7e)
 76839              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 76840              m.emit(0x35)
 76841              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76842          })
 76843      }
 76844      // VPMOVQD xmm, m64{k}{z}
 76845      if isEVEXXMM(v0) && isM64kz(v1) {
 76846          self.require(ISA_AVX512VL | ISA_AVX512F)
 76847          p.domain = DomainAVX
 76848          p.add(0, func(m *_Encoding, v []interface{}) {
 76849              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76850              m.emit(0x35)
 76851              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 76852          })
 76853      }
 76854      // VPMOVQD ymm, xmm{k}{z}
 76855      if isEVEXYMM(v0) && isXMMkz(v1) {
 76856          self.require(ISA_AVX512VL | ISA_AVX512F)
 76857          p.domain = DomainAVX
 76858          p.add(0, func(m *_Encoding, v []interface{}) {
 76859              m.emit(0x62)
 76860              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76861              m.emit(0x7e)
 76862              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 76863              m.emit(0x35)
 76864              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76865          })
 76866      }
 76867      // VPMOVQD ymm, m128{k}{z}
 76868      if isEVEXYMM(v0) && isM128kz(v1) {
 76869          self.require(ISA_AVX512VL | ISA_AVX512F)
 76870          p.domain = DomainAVX
 76871          p.add(0, func(m *_Encoding, v []interface{}) {
 76872              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76873              m.emit(0x35)
 76874              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 76875          })
 76876      }
 76877      if p.len == 0 {
 76878          panic("invalid operands for VPMOVQD")
 76879      }
 76880      return p
 76881  }
 76882  
 76883  // VPMOVQW performs "Down Convert Packed Quadword Values to Word Values with Truncation".
 76884  //
 76885  // Mnemonic        : VPMOVQW
 76886  // Supported forms : (6 forms)
 76887  //
 76888  //    * VPMOVQW zmm, xmm{k}{z}     [AVX512F]
 76889  //    * VPMOVQW zmm, m128{k}{z}    [AVX512F]
 76890  //    * VPMOVQW xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 76891  //    * VPMOVQW xmm, m32{k}{z}     [AVX512F,AVX512VL]
 76892  //    * VPMOVQW ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 76893  //    * VPMOVQW ymm, m64{k}{z}     [AVX512F,AVX512VL]
 76894  //
 76895  func (self *Program) VPMOVQW(v0 interface{}, v1 interface{}) *Instruction {
 76896      p := self.alloc("VPMOVQW", 2, Operands { v0, v1 })
 76897      // VPMOVQW zmm, xmm{k}{z}
 76898      if isZMM(v0) && isXMMkz(v1) {
 76899          self.require(ISA_AVX512F)
 76900          p.domain = DomainAVX
 76901          p.add(0, func(m *_Encoding, v []interface{}) {
 76902              m.emit(0x62)
 76903              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76904              m.emit(0x7e)
 76905              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 76906              m.emit(0x34)
 76907              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76908          })
 76909      }
 76910      // VPMOVQW zmm, m128{k}{z}
 76911      if isZMM(v0) && isM128kz(v1) {
 76912          self.require(ISA_AVX512F)
 76913          p.domain = DomainAVX
 76914          p.add(0, func(m *_Encoding, v []interface{}) {
 76915              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76916              m.emit(0x34)
 76917              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 76918          })
 76919      }
 76920      // VPMOVQW xmm, xmm{k}{z}
 76921      if isEVEXXMM(v0) && isXMMkz(v1) {
 76922          self.require(ISA_AVX512VL | ISA_AVX512F)
 76923          p.domain = DomainAVX
 76924          p.add(0, func(m *_Encoding, v []interface{}) {
 76925              m.emit(0x62)
 76926              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76927              m.emit(0x7e)
 76928              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 76929              m.emit(0x34)
 76930              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76931          })
 76932      }
 76933      // VPMOVQW xmm, m32{k}{z}
 76934      if isEVEXXMM(v0) && isM32kz(v1) {
 76935          self.require(ISA_AVX512VL | ISA_AVX512F)
 76936          p.domain = DomainAVX
 76937          p.add(0, func(m *_Encoding, v []interface{}) {
 76938              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76939              m.emit(0x34)
 76940              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 76941          })
 76942      }
 76943      // VPMOVQW ymm, xmm{k}{z}
 76944      if isEVEXYMM(v0) && isXMMkz(v1) {
 76945          self.require(ISA_AVX512VL | ISA_AVX512F)
 76946          p.domain = DomainAVX
 76947          p.add(0, func(m *_Encoding, v []interface{}) {
 76948              m.emit(0x62)
 76949              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76950              m.emit(0x7e)
 76951              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 76952              m.emit(0x34)
 76953              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76954          })
 76955      }
 76956      // VPMOVQW ymm, m64{k}{z}
 76957      if isEVEXYMM(v0) && isM64kz(v1) {
 76958          self.require(ISA_AVX512VL | ISA_AVX512F)
 76959          p.domain = DomainAVX
 76960          p.add(0, func(m *_Encoding, v []interface{}) {
 76961              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 76962              m.emit(0x34)
 76963              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 76964          })
 76965      }
 76966      if p.len == 0 {
 76967          panic("invalid operands for VPMOVQW")
 76968      }
 76969      return p
 76970  }
 76971  
 76972  // VPMOVSDB performs "Down Convert Packed Doubleword Values to Byte Values with Signed Saturation".
 76973  //
 76974  // Mnemonic        : VPMOVSDB
 76975  // Supported forms : (6 forms)
 76976  //
 76977  //    * VPMOVSDB zmm, xmm{k}{z}     [AVX512F]
 76978  //    * VPMOVSDB zmm, m128{k}{z}    [AVX512F]
 76979  //    * VPMOVSDB xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 76980  //    * VPMOVSDB xmm, m32{k}{z}     [AVX512F,AVX512VL]
 76981  //    * VPMOVSDB ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 76982  //    * VPMOVSDB ymm, m64{k}{z}     [AVX512F,AVX512VL]
 76983  //
 76984  func (self *Program) VPMOVSDB(v0 interface{}, v1 interface{}) *Instruction {
 76985      p := self.alloc("VPMOVSDB", 2, Operands { v0, v1 })
 76986      // VPMOVSDB zmm, xmm{k}{z}
 76987      if isZMM(v0) && isXMMkz(v1) {
 76988          self.require(ISA_AVX512F)
 76989          p.domain = DomainAVX
 76990          p.add(0, func(m *_Encoding, v []interface{}) {
 76991              m.emit(0x62)
 76992              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 76993              m.emit(0x7e)
 76994              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 76995              m.emit(0x21)
 76996              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 76997          })
 76998      }
 76999      // VPMOVSDB zmm, m128{k}{z}
 77000      if isZMM(v0) && isM128kz(v1) {
 77001          self.require(ISA_AVX512F)
 77002          p.domain = DomainAVX
 77003          p.add(0, func(m *_Encoding, v []interface{}) {
 77004              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77005              m.emit(0x21)
 77006              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 77007          })
 77008      }
 77009      // VPMOVSDB xmm, xmm{k}{z}
 77010      if isEVEXXMM(v0) && isXMMkz(v1) {
 77011          self.require(ISA_AVX512VL | ISA_AVX512F)
 77012          p.domain = DomainAVX
 77013          p.add(0, func(m *_Encoding, v []interface{}) {
 77014              m.emit(0x62)
 77015              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77016              m.emit(0x7e)
 77017              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 77018              m.emit(0x21)
 77019              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77020          })
 77021      }
 77022      // VPMOVSDB xmm, m32{k}{z}
 77023      if isEVEXXMM(v0) && isM32kz(v1) {
 77024          self.require(ISA_AVX512VL | ISA_AVX512F)
 77025          p.domain = DomainAVX
 77026          p.add(0, func(m *_Encoding, v []interface{}) {
 77027              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77028              m.emit(0x21)
 77029              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 77030          })
 77031      }
 77032      // VPMOVSDB ymm, xmm{k}{z}
 77033      if isEVEXYMM(v0) && isXMMkz(v1) {
 77034          self.require(ISA_AVX512VL | ISA_AVX512F)
 77035          p.domain = DomainAVX
 77036          p.add(0, func(m *_Encoding, v []interface{}) {
 77037              m.emit(0x62)
 77038              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77039              m.emit(0x7e)
 77040              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 77041              m.emit(0x21)
 77042              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77043          })
 77044      }
 77045      // VPMOVSDB ymm, m64{k}{z}
 77046      if isEVEXYMM(v0) && isM64kz(v1) {
 77047          self.require(ISA_AVX512VL | ISA_AVX512F)
 77048          p.domain = DomainAVX
 77049          p.add(0, func(m *_Encoding, v []interface{}) {
 77050              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77051              m.emit(0x21)
 77052              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 77053          })
 77054      }
 77055      if p.len == 0 {
 77056          panic("invalid operands for VPMOVSDB")
 77057      }
 77058      return p
 77059  }
 77060  
 77061  // VPMOVSDW performs "Down Convert Packed Doubleword Values to Word Values with Signed Saturation".
 77062  //
 77063  // Mnemonic        : VPMOVSDW
 77064  // Supported forms : (6 forms)
 77065  //
 77066  //    * VPMOVSDW zmm, ymm{k}{z}     [AVX512F]
 77067  //    * VPMOVSDW zmm, m256{k}{z}    [AVX512F]
 77068  //    * VPMOVSDW xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 77069  //    * VPMOVSDW xmm, m64{k}{z}     [AVX512F,AVX512VL]
 77070  //    * VPMOVSDW ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 77071  //    * VPMOVSDW ymm, m128{k}{z}    [AVX512F,AVX512VL]
 77072  //
 77073  func (self *Program) VPMOVSDW(v0 interface{}, v1 interface{}) *Instruction {
 77074      p := self.alloc("VPMOVSDW", 2, Operands { v0, v1 })
 77075      // VPMOVSDW zmm, ymm{k}{z}
 77076      if isZMM(v0) && isYMMkz(v1) {
 77077          self.require(ISA_AVX512F)
 77078          p.domain = DomainAVX
 77079          p.add(0, func(m *_Encoding, v []interface{}) {
 77080              m.emit(0x62)
 77081              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77082              m.emit(0x7e)
 77083              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 77084              m.emit(0x23)
 77085              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77086          })
 77087      }
 77088      // VPMOVSDW zmm, m256{k}{z}
 77089      if isZMM(v0) && isM256kz(v1) {
 77090          self.require(ISA_AVX512F)
 77091          p.domain = DomainAVX
 77092          p.add(0, func(m *_Encoding, v []interface{}) {
 77093              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77094              m.emit(0x23)
 77095              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 77096          })
 77097      }
 77098      // VPMOVSDW xmm, xmm{k}{z}
 77099      if isEVEXXMM(v0) && isXMMkz(v1) {
 77100          self.require(ISA_AVX512VL | ISA_AVX512F)
 77101          p.domain = DomainAVX
 77102          p.add(0, func(m *_Encoding, v []interface{}) {
 77103              m.emit(0x62)
 77104              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77105              m.emit(0x7e)
 77106              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 77107              m.emit(0x23)
 77108              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77109          })
 77110      }
 77111      // VPMOVSDW xmm, m64{k}{z}
 77112      if isEVEXXMM(v0) && isM64kz(v1) {
 77113          self.require(ISA_AVX512VL | ISA_AVX512F)
 77114          p.domain = DomainAVX
 77115          p.add(0, func(m *_Encoding, v []interface{}) {
 77116              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77117              m.emit(0x23)
 77118              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 77119          })
 77120      }
 77121      // VPMOVSDW ymm, xmm{k}{z}
 77122      if isEVEXYMM(v0) && isXMMkz(v1) {
 77123          self.require(ISA_AVX512VL | ISA_AVX512F)
 77124          p.domain = DomainAVX
 77125          p.add(0, func(m *_Encoding, v []interface{}) {
 77126              m.emit(0x62)
 77127              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77128              m.emit(0x7e)
 77129              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 77130              m.emit(0x23)
 77131              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77132          })
 77133      }
 77134      // VPMOVSDW ymm, m128{k}{z}
 77135      if isEVEXYMM(v0) && isM128kz(v1) {
 77136          self.require(ISA_AVX512VL | ISA_AVX512F)
 77137          p.domain = DomainAVX
 77138          p.add(0, func(m *_Encoding, v []interface{}) {
 77139              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77140              m.emit(0x23)
 77141              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 77142          })
 77143      }
 77144      if p.len == 0 {
 77145          panic("invalid operands for VPMOVSDW")
 77146      }
 77147      return p
 77148  }
 77149  
 77150  // VPMOVSQB performs "Down Convert Packed Quadword Values to Byte Values with Signed Saturation".
 77151  //
 77152  // Mnemonic        : VPMOVSQB
 77153  // Supported forms : (6 forms)
 77154  //
 77155  //    * VPMOVSQB zmm, xmm{k}{z}    [AVX512F]
 77156  //    * VPMOVSQB zmm, m64{k}{z}    [AVX512F]
 77157  //    * VPMOVSQB xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 77158  //    * VPMOVSQB xmm, m16{k}{z}    [AVX512F,AVX512VL]
 77159  //    * VPMOVSQB ymm, xmm{k}{z}    [AVX512F,AVX512VL]
 77160  //    * VPMOVSQB ymm, m32{k}{z}    [AVX512F,AVX512VL]
 77161  //
 77162  func (self *Program) VPMOVSQB(v0 interface{}, v1 interface{}) *Instruction {
 77163      p := self.alloc("VPMOVSQB", 2, Operands { v0, v1 })
 77164      // VPMOVSQB zmm, xmm{k}{z}
 77165      if isZMM(v0) && isXMMkz(v1) {
 77166          self.require(ISA_AVX512F)
 77167          p.domain = DomainAVX
 77168          p.add(0, func(m *_Encoding, v []interface{}) {
 77169              m.emit(0x62)
 77170              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77171              m.emit(0x7e)
 77172              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 77173              m.emit(0x22)
 77174              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77175          })
 77176      }
 77177      // VPMOVSQB zmm, m64{k}{z}
 77178      if isZMM(v0) && isM64kz(v1) {
 77179          self.require(ISA_AVX512F)
 77180          p.domain = DomainAVX
 77181          p.add(0, func(m *_Encoding, v []interface{}) {
 77182              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77183              m.emit(0x22)
 77184              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 77185          })
 77186      }
 77187      // VPMOVSQB xmm, xmm{k}{z}
 77188      if isEVEXXMM(v0) && isXMMkz(v1) {
 77189          self.require(ISA_AVX512VL | ISA_AVX512F)
 77190          p.domain = DomainAVX
 77191          p.add(0, func(m *_Encoding, v []interface{}) {
 77192              m.emit(0x62)
 77193              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77194              m.emit(0x7e)
 77195              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 77196              m.emit(0x22)
 77197              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77198          })
 77199      }
 77200      // VPMOVSQB xmm, m16{k}{z}
 77201      if isEVEXXMM(v0) && isM16kz(v1) {
 77202          self.require(ISA_AVX512VL | ISA_AVX512F)
 77203          p.domain = DomainAVX
 77204          p.add(0, func(m *_Encoding, v []interface{}) {
 77205              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77206              m.emit(0x22)
 77207              m.mrsd(lcode(v[0]), addr(v[1]), 2)
 77208          })
 77209      }
 77210      // VPMOVSQB ymm, xmm{k}{z}
 77211      if isEVEXYMM(v0) && isXMMkz(v1) {
 77212          self.require(ISA_AVX512VL | ISA_AVX512F)
 77213          p.domain = DomainAVX
 77214          p.add(0, func(m *_Encoding, v []interface{}) {
 77215              m.emit(0x62)
 77216              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77217              m.emit(0x7e)
 77218              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 77219              m.emit(0x22)
 77220              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77221          })
 77222      }
 77223      // VPMOVSQB ymm, m32{k}{z}
 77224      if isEVEXYMM(v0) && isM32kz(v1) {
 77225          self.require(ISA_AVX512VL | ISA_AVX512F)
 77226          p.domain = DomainAVX
 77227          p.add(0, func(m *_Encoding, v []interface{}) {
 77228              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77229              m.emit(0x22)
 77230              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 77231          })
 77232      }
 77233      if p.len == 0 {
 77234          panic("invalid operands for VPMOVSQB")
 77235      }
 77236      return p
 77237  }
 77238  
 77239  // VPMOVSQD performs "Down Convert Packed Quadword Values to Doubleword Values with Signed Saturation".
 77240  //
 77241  // Mnemonic        : VPMOVSQD
 77242  // Supported forms : (6 forms)
 77243  //
 77244  //    * VPMOVSQD zmm, ymm{k}{z}     [AVX512F]
 77245  //    * VPMOVSQD zmm, m256{k}{z}    [AVX512F]
 77246  //    * VPMOVSQD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 77247  //    * VPMOVSQD xmm, m64{k}{z}     [AVX512F,AVX512VL]
 77248  //    * VPMOVSQD ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 77249  //    * VPMOVSQD ymm, m128{k}{z}    [AVX512F,AVX512VL]
 77250  //
 77251  func (self *Program) VPMOVSQD(v0 interface{}, v1 interface{}) *Instruction {
 77252      p := self.alloc("VPMOVSQD", 2, Operands { v0, v1 })
 77253      // VPMOVSQD zmm, ymm{k}{z}
 77254      if isZMM(v0) && isYMMkz(v1) {
 77255          self.require(ISA_AVX512F)
 77256          p.domain = DomainAVX
 77257          p.add(0, func(m *_Encoding, v []interface{}) {
 77258              m.emit(0x62)
 77259              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77260              m.emit(0x7e)
 77261              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 77262              m.emit(0x25)
 77263              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77264          })
 77265      }
 77266      // VPMOVSQD zmm, m256{k}{z}
 77267      if isZMM(v0) && isM256kz(v1) {
 77268          self.require(ISA_AVX512F)
 77269          p.domain = DomainAVX
 77270          p.add(0, func(m *_Encoding, v []interface{}) {
 77271              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77272              m.emit(0x25)
 77273              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 77274          })
 77275      }
 77276      // VPMOVSQD xmm, xmm{k}{z}
 77277      if isEVEXXMM(v0) && isXMMkz(v1) {
 77278          self.require(ISA_AVX512VL | ISA_AVX512F)
 77279          p.domain = DomainAVX
 77280          p.add(0, func(m *_Encoding, v []interface{}) {
 77281              m.emit(0x62)
 77282              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77283              m.emit(0x7e)
 77284              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 77285              m.emit(0x25)
 77286              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77287          })
 77288      }
 77289      // VPMOVSQD xmm, m64{k}{z}
 77290      if isEVEXXMM(v0) && isM64kz(v1) {
 77291          self.require(ISA_AVX512VL | ISA_AVX512F)
 77292          p.domain = DomainAVX
 77293          p.add(0, func(m *_Encoding, v []interface{}) {
 77294              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77295              m.emit(0x25)
 77296              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 77297          })
 77298      }
 77299      // VPMOVSQD ymm, xmm{k}{z}
 77300      if isEVEXYMM(v0) && isXMMkz(v1) {
 77301          self.require(ISA_AVX512VL | ISA_AVX512F)
 77302          p.domain = DomainAVX
 77303          p.add(0, func(m *_Encoding, v []interface{}) {
 77304              m.emit(0x62)
 77305              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77306              m.emit(0x7e)
 77307              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 77308              m.emit(0x25)
 77309              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77310          })
 77311      }
 77312      // VPMOVSQD ymm, m128{k}{z}
 77313      if isEVEXYMM(v0) && isM128kz(v1) {
 77314          self.require(ISA_AVX512VL | ISA_AVX512F)
 77315          p.domain = DomainAVX
 77316          p.add(0, func(m *_Encoding, v []interface{}) {
 77317              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77318              m.emit(0x25)
 77319              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 77320          })
 77321      }
 77322      if p.len == 0 {
 77323          panic("invalid operands for VPMOVSQD")
 77324      }
 77325      return p
 77326  }
 77327  
 77328  // VPMOVSQW performs "Down Convert Packed Quadword Values to Word Values with Signed Saturation".
 77329  //
 77330  // Mnemonic        : VPMOVSQW
 77331  // Supported forms : (6 forms)
 77332  //
 77333  //    * VPMOVSQW zmm, xmm{k}{z}     [AVX512F]
 77334  //    * VPMOVSQW zmm, m128{k}{z}    [AVX512F]
 77335  //    * VPMOVSQW xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 77336  //    * VPMOVSQW xmm, m32{k}{z}     [AVX512F,AVX512VL]
 77337  //    * VPMOVSQW ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 77338  //    * VPMOVSQW ymm, m64{k}{z}     [AVX512F,AVX512VL]
 77339  //
 77340  func (self *Program) VPMOVSQW(v0 interface{}, v1 interface{}) *Instruction {
 77341      p := self.alloc("VPMOVSQW", 2, Operands { v0, v1 })
 77342      // VPMOVSQW zmm, xmm{k}{z}
 77343      if isZMM(v0) && isXMMkz(v1) {
 77344          self.require(ISA_AVX512F)
 77345          p.domain = DomainAVX
 77346          p.add(0, func(m *_Encoding, v []interface{}) {
 77347              m.emit(0x62)
 77348              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77349              m.emit(0x7e)
 77350              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 77351              m.emit(0x24)
 77352              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77353          })
 77354      }
 77355      // VPMOVSQW zmm, m128{k}{z}
 77356      if isZMM(v0) && isM128kz(v1) {
 77357          self.require(ISA_AVX512F)
 77358          p.domain = DomainAVX
 77359          p.add(0, func(m *_Encoding, v []interface{}) {
 77360              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77361              m.emit(0x24)
 77362              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 77363          })
 77364      }
 77365      // VPMOVSQW xmm, xmm{k}{z}
 77366      if isEVEXXMM(v0) && isXMMkz(v1) {
 77367          self.require(ISA_AVX512VL | ISA_AVX512F)
 77368          p.domain = DomainAVX
 77369          p.add(0, func(m *_Encoding, v []interface{}) {
 77370              m.emit(0x62)
 77371              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77372              m.emit(0x7e)
 77373              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 77374              m.emit(0x24)
 77375              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77376          })
 77377      }
 77378      // VPMOVSQW xmm, m32{k}{z}
 77379      if isEVEXXMM(v0) && isM32kz(v1) {
 77380          self.require(ISA_AVX512VL | ISA_AVX512F)
 77381          p.domain = DomainAVX
 77382          p.add(0, func(m *_Encoding, v []interface{}) {
 77383              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77384              m.emit(0x24)
 77385              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 77386          })
 77387      }
 77388      // VPMOVSQW ymm, xmm{k}{z}
 77389      if isEVEXYMM(v0) && isXMMkz(v1) {
 77390          self.require(ISA_AVX512VL | ISA_AVX512F)
 77391          p.domain = DomainAVX
 77392          p.add(0, func(m *_Encoding, v []interface{}) {
 77393              m.emit(0x62)
 77394              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77395              m.emit(0x7e)
 77396              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 77397              m.emit(0x24)
 77398              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77399          })
 77400      }
 77401      // VPMOVSQW ymm, m64{k}{z}
 77402      if isEVEXYMM(v0) && isM64kz(v1) {
 77403          self.require(ISA_AVX512VL | ISA_AVX512F)
 77404          p.domain = DomainAVX
 77405          p.add(0, func(m *_Encoding, v []interface{}) {
 77406              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77407              m.emit(0x24)
 77408              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 77409          })
 77410      }
 77411      if p.len == 0 {
 77412          panic("invalid operands for VPMOVSQW")
 77413      }
 77414      return p
 77415  }
 77416  
 77417  // VPMOVSWB performs "Down Convert Packed Word Values to Byte Values with Signed Saturation".
 77418  //
 77419  // Mnemonic        : VPMOVSWB
 77420  // Supported forms : (6 forms)
 77421  //
 77422  //    * VPMOVSWB zmm, ymm{k}{z}     [AVX512BW]
 77423  //    * VPMOVSWB zmm, m256{k}{z}    [AVX512BW]
 77424  //    * VPMOVSWB xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 77425  //    * VPMOVSWB xmm, m64{k}{z}     [AVX512BW,AVX512VL]
 77426  //    * VPMOVSWB ymm, xmm{k}{z}     [AVX512BW,AVX512VL]
 77427  //    * VPMOVSWB ymm, m128{k}{z}    [AVX512BW,AVX512VL]
 77428  //
 77429  func (self *Program) VPMOVSWB(v0 interface{}, v1 interface{}) *Instruction {
 77430      p := self.alloc("VPMOVSWB", 2, Operands { v0, v1 })
 77431      // VPMOVSWB zmm, ymm{k}{z}
 77432      if isZMM(v0) && isYMMkz(v1) {
 77433          self.require(ISA_AVX512BW)
 77434          p.domain = DomainAVX
 77435          p.add(0, func(m *_Encoding, v []interface{}) {
 77436              m.emit(0x62)
 77437              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77438              m.emit(0x7e)
 77439              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 77440              m.emit(0x20)
 77441              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77442          })
 77443      }
 77444      // VPMOVSWB zmm, m256{k}{z}
 77445      if isZMM(v0) && isM256kz(v1) {
 77446          self.require(ISA_AVX512BW)
 77447          p.domain = DomainAVX
 77448          p.add(0, func(m *_Encoding, v []interface{}) {
 77449              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77450              m.emit(0x20)
 77451              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 77452          })
 77453      }
 77454      // VPMOVSWB xmm, xmm{k}{z}
 77455      if isEVEXXMM(v0) && isXMMkz(v1) {
 77456          self.require(ISA_AVX512VL | ISA_AVX512BW)
 77457          p.domain = DomainAVX
 77458          p.add(0, func(m *_Encoding, v []interface{}) {
 77459              m.emit(0x62)
 77460              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77461              m.emit(0x7e)
 77462              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 77463              m.emit(0x20)
 77464              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77465          })
 77466      }
 77467      // VPMOVSWB xmm, m64{k}{z}
 77468      if isEVEXXMM(v0) && isM64kz(v1) {
 77469          self.require(ISA_AVX512VL | ISA_AVX512BW)
 77470          p.domain = DomainAVX
 77471          p.add(0, func(m *_Encoding, v []interface{}) {
 77472              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77473              m.emit(0x20)
 77474              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 77475          })
 77476      }
 77477      // VPMOVSWB ymm, xmm{k}{z}
 77478      if isEVEXYMM(v0) && isXMMkz(v1) {
 77479          self.require(ISA_AVX512VL | ISA_AVX512BW)
 77480          p.domain = DomainAVX
 77481          p.add(0, func(m *_Encoding, v []interface{}) {
 77482              m.emit(0x62)
 77483              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 77484              m.emit(0x7e)
 77485              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 77486              m.emit(0x20)
 77487              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 77488          })
 77489      }
 77490      // VPMOVSWB ymm, m128{k}{z}
 77491      if isEVEXYMM(v0) && isM128kz(v1) {
 77492          self.require(ISA_AVX512VL | ISA_AVX512BW)
 77493          p.domain = DomainAVX
 77494          p.add(0, func(m *_Encoding, v []interface{}) {
 77495              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 77496              m.emit(0x20)
 77497              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 77498          })
 77499      }
 77500      if p.len == 0 {
 77501          panic("invalid operands for VPMOVSWB")
 77502      }
 77503      return p
 77504  }
 77505  
 77506  // VPMOVSXBD performs "Move Packed Byte Integers to Doubleword Integers with Sign Extension".
 77507  //
 77508  // Mnemonic        : VPMOVSXBD
 77509  // Supported forms : (10 forms)
 77510  //
 77511  //    * VPMOVSXBD xmm, xmm           [AVX]
 77512  //    * VPMOVSXBD m32, xmm           [AVX]
 77513  //    * VPMOVSXBD xmm, ymm           [AVX2]
 77514  //    * VPMOVSXBD m64, ymm           [AVX2]
 77515  //    * VPMOVSXBD xmm, zmm{k}{z}     [AVX512F]
 77516  //    * VPMOVSXBD m128, zmm{k}{z}    [AVX512F]
 77517  //    * VPMOVSXBD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 77518  //    * VPMOVSXBD xmm, ymm{k}{z}     [AVX512F,AVX512VL]
 77519  //    * VPMOVSXBD m32, xmm{k}{z}     [AVX512F,AVX512VL]
 77520  //    * VPMOVSXBD m64, ymm{k}{z}     [AVX512F,AVX512VL]
 77521  //
 77522  func (self *Program) VPMOVSXBD(v0 interface{}, v1 interface{}) *Instruction {
 77523      p := self.alloc("VPMOVSXBD", 2, Operands { v0, v1 })
 77524      // VPMOVSXBD xmm, xmm
 77525      if isXMM(v0) && isXMM(v1) {
 77526          self.require(ISA_AVX)
 77527          p.domain = DomainAVX
 77528          p.add(0, func(m *_Encoding, v []interface{}) {
 77529              m.emit(0xc4)
 77530              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 77531              m.emit(0x79)
 77532              m.emit(0x21)
 77533              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77534          })
 77535      }
 77536      // VPMOVSXBD m32, xmm
 77537      if isM32(v0) && isXMM(v1) {
 77538          self.require(ISA_AVX)
 77539          p.domain = DomainAVX
 77540          p.add(0, func(m *_Encoding, v []interface{}) {
 77541              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 77542              m.emit(0x21)
 77543              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 77544          })
 77545      }
 77546      // VPMOVSXBD xmm, ymm
 77547      if isXMM(v0) && isYMM(v1) {
 77548          self.require(ISA_AVX2)
 77549          p.domain = DomainAVX
 77550          p.add(0, func(m *_Encoding, v []interface{}) {
 77551              m.emit(0xc4)
 77552              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 77553              m.emit(0x7d)
 77554              m.emit(0x21)
 77555              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77556          })
 77557      }
 77558      // VPMOVSXBD m64, ymm
 77559      if isM64(v0) && isYMM(v1) {
 77560          self.require(ISA_AVX2)
 77561          p.domain = DomainAVX
 77562          p.add(0, func(m *_Encoding, v []interface{}) {
 77563              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 77564              m.emit(0x21)
 77565              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 77566          })
 77567      }
 77568      // VPMOVSXBD xmm, zmm{k}{z}
 77569      if isEVEXXMM(v0) && isZMMkz(v1) {
 77570          self.require(ISA_AVX512F)
 77571          p.domain = DomainAVX
 77572          p.add(0, func(m *_Encoding, v []interface{}) {
 77573              m.emit(0x62)
 77574              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 77575              m.emit(0x7d)
 77576              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 77577              m.emit(0x21)
 77578              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77579          })
 77580      }
 77581      // VPMOVSXBD m128, zmm{k}{z}
 77582      if isM128(v0) && isZMMkz(v1) {
 77583          self.require(ISA_AVX512F)
 77584          p.domain = DomainAVX
 77585          p.add(0, func(m *_Encoding, v []interface{}) {
 77586              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 77587              m.emit(0x21)
 77588              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 77589          })
 77590      }
 77591      // VPMOVSXBD xmm, xmm{k}{z}
 77592      if isEVEXXMM(v0) && isXMMkz(v1) {
 77593          self.require(ISA_AVX512VL | ISA_AVX512F)
 77594          p.domain = DomainAVX
 77595          p.add(0, func(m *_Encoding, v []interface{}) {
 77596              m.emit(0x62)
 77597              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 77598              m.emit(0x7d)
 77599              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 77600              m.emit(0x21)
 77601              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77602          })
 77603      }
 77604      // VPMOVSXBD xmm, ymm{k}{z}
 77605      if isEVEXXMM(v0) && isYMMkz(v1) {
 77606          self.require(ISA_AVX512VL | ISA_AVX512F)
 77607          p.domain = DomainAVX
 77608          p.add(0, func(m *_Encoding, v []interface{}) {
 77609              m.emit(0x62)
 77610              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 77611              m.emit(0x7d)
 77612              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 77613              m.emit(0x21)
 77614              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77615          })
 77616      }
 77617      // VPMOVSXBD m32, xmm{k}{z}
 77618      if isM32(v0) && isXMMkz(v1) {
 77619          self.require(ISA_AVX512VL | ISA_AVX512F)
 77620          p.domain = DomainAVX
 77621          p.add(0, func(m *_Encoding, v []interface{}) {
 77622              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 77623              m.emit(0x21)
 77624              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 77625          })
 77626      }
 77627      // VPMOVSXBD m64, ymm{k}{z}
 77628      if isM64(v0) && isYMMkz(v1) {
 77629          self.require(ISA_AVX512VL | ISA_AVX512F)
 77630          p.domain = DomainAVX
 77631          p.add(0, func(m *_Encoding, v []interface{}) {
 77632              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 77633              m.emit(0x21)
 77634              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 77635          })
 77636      }
 77637      if p.len == 0 {
 77638          panic("invalid operands for VPMOVSXBD")
 77639      }
 77640      return p
 77641  }
 77642  
 77643  // VPMOVSXBQ performs "Move Packed Byte Integers to Quadword Integers with Sign Extension".
 77644  //
 77645  // Mnemonic        : VPMOVSXBQ
 77646  // Supported forms : (10 forms)
 77647  //
 77648  //    * VPMOVSXBQ xmm, xmm          [AVX]
 77649  //    * VPMOVSXBQ m16, xmm          [AVX]
 77650  //    * VPMOVSXBQ xmm, ymm          [AVX2]
 77651  //    * VPMOVSXBQ m32, ymm          [AVX2]
 77652  //    * VPMOVSXBQ xmm, zmm{k}{z}    [AVX512F]
 77653  //    * VPMOVSXBQ m64, zmm{k}{z}    [AVX512F]
 77654  //    * VPMOVSXBQ xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 77655  //    * VPMOVSXBQ xmm, ymm{k}{z}    [AVX512F,AVX512VL]
 77656  //    * VPMOVSXBQ m16, xmm{k}{z}    [AVX512F,AVX512VL]
 77657  //    * VPMOVSXBQ m32, ymm{k}{z}    [AVX512F,AVX512VL]
 77658  //
 77659  func (self *Program) VPMOVSXBQ(v0 interface{}, v1 interface{}) *Instruction {
 77660      p := self.alloc("VPMOVSXBQ", 2, Operands { v0, v1 })
 77661      // VPMOVSXBQ xmm, xmm
 77662      if isXMM(v0) && isXMM(v1) {
 77663          self.require(ISA_AVX)
 77664          p.domain = DomainAVX
 77665          p.add(0, func(m *_Encoding, v []interface{}) {
 77666              m.emit(0xc4)
 77667              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 77668              m.emit(0x79)
 77669              m.emit(0x22)
 77670              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77671          })
 77672      }
 77673      // VPMOVSXBQ m16, xmm
 77674      if isM16(v0) && isXMM(v1) {
 77675          self.require(ISA_AVX)
 77676          p.domain = DomainAVX
 77677          p.add(0, func(m *_Encoding, v []interface{}) {
 77678              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 77679              m.emit(0x22)
 77680              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 77681          })
 77682      }
 77683      // VPMOVSXBQ xmm, ymm
 77684      if isXMM(v0) && isYMM(v1) {
 77685          self.require(ISA_AVX2)
 77686          p.domain = DomainAVX
 77687          p.add(0, func(m *_Encoding, v []interface{}) {
 77688              m.emit(0xc4)
 77689              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 77690              m.emit(0x7d)
 77691              m.emit(0x22)
 77692              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77693          })
 77694      }
 77695      // VPMOVSXBQ m32, ymm
 77696      if isM32(v0) && isYMM(v1) {
 77697          self.require(ISA_AVX2)
 77698          p.domain = DomainAVX
 77699          p.add(0, func(m *_Encoding, v []interface{}) {
 77700              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 77701              m.emit(0x22)
 77702              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 77703          })
 77704      }
 77705      // VPMOVSXBQ xmm, zmm{k}{z}
 77706      if isEVEXXMM(v0) && isZMMkz(v1) {
 77707          self.require(ISA_AVX512F)
 77708          p.domain = DomainAVX
 77709          p.add(0, func(m *_Encoding, v []interface{}) {
 77710              m.emit(0x62)
 77711              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 77712              m.emit(0x7d)
 77713              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 77714              m.emit(0x22)
 77715              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77716          })
 77717      }
 77718      // VPMOVSXBQ m64, zmm{k}{z}
 77719      if isM64(v0) && isZMMkz(v1) {
 77720          self.require(ISA_AVX512F)
 77721          p.domain = DomainAVX
 77722          p.add(0, func(m *_Encoding, v []interface{}) {
 77723              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 77724              m.emit(0x22)
 77725              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 77726          })
 77727      }
 77728      // VPMOVSXBQ xmm, xmm{k}{z}
 77729      if isEVEXXMM(v0) && isXMMkz(v1) {
 77730          self.require(ISA_AVX512VL | ISA_AVX512F)
 77731          p.domain = DomainAVX
 77732          p.add(0, func(m *_Encoding, v []interface{}) {
 77733              m.emit(0x62)
 77734              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 77735              m.emit(0x7d)
 77736              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 77737              m.emit(0x22)
 77738              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77739          })
 77740      }
 77741      // VPMOVSXBQ xmm, ymm{k}{z}
 77742      if isEVEXXMM(v0) && isYMMkz(v1) {
 77743          self.require(ISA_AVX512VL | ISA_AVX512F)
 77744          p.domain = DomainAVX
 77745          p.add(0, func(m *_Encoding, v []interface{}) {
 77746              m.emit(0x62)
 77747              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 77748              m.emit(0x7d)
 77749              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 77750              m.emit(0x22)
 77751              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77752          })
 77753      }
 77754      // VPMOVSXBQ m16, xmm{k}{z}
 77755      if isM16(v0) && isXMMkz(v1) {
 77756          self.require(ISA_AVX512VL | ISA_AVX512F)
 77757          p.domain = DomainAVX
 77758          p.add(0, func(m *_Encoding, v []interface{}) {
 77759              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 77760              m.emit(0x22)
 77761              m.mrsd(lcode(v[1]), addr(v[0]), 2)
 77762          })
 77763      }
 77764      // VPMOVSXBQ m32, ymm{k}{z}
 77765      if isM32(v0) && isYMMkz(v1) {
 77766          self.require(ISA_AVX512VL | ISA_AVX512F)
 77767          p.domain = DomainAVX
 77768          p.add(0, func(m *_Encoding, v []interface{}) {
 77769              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 77770              m.emit(0x22)
 77771              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 77772          })
 77773      }
 77774      if p.len == 0 {
 77775          panic("invalid operands for VPMOVSXBQ")
 77776      }
 77777      return p
 77778  }
 77779  
 77780  // VPMOVSXBW performs "Move Packed Byte Integers to Word Integers with Sign Extension".
 77781  //
 77782  // Mnemonic        : VPMOVSXBW
 77783  // Supported forms : (10 forms)
 77784  //
 77785  //    * VPMOVSXBW xmm, xmm           [AVX]
 77786  //    * VPMOVSXBW m64, xmm           [AVX]
 77787  //    * VPMOVSXBW xmm, ymm           [AVX2]
 77788  //    * VPMOVSXBW m128, ymm          [AVX2]
 77789  //    * VPMOVSXBW ymm, zmm{k}{z}     [AVX512BW]
 77790  //    * VPMOVSXBW m256, zmm{k}{z}    [AVX512BW]
 77791  //    * VPMOVSXBW xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 77792  //    * VPMOVSXBW xmm, ymm{k}{z}     [AVX512BW,AVX512VL]
 77793  //    * VPMOVSXBW m64, xmm{k}{z}     [AVX512BW,AVX512VL]
 77794  //    * VPMOVSXBW m128, ymm{k}{z}    [AVX512BW,AVX512VL]
 77795  //
 77796  func (self *Program) VPMOVSXBW(v0 interface{}, v1 interface{}) *Instruction {
 77797      p := self.alloc("VPMOVSXBW", 2, Operands { v0, v1 })
 77798      // VPMOVSXBW xmm, xmm
 77799      if isXMM(v0) && isXMM(v1) {
 77800          self.require(ISA_AVX)
 77801          p.domain = DomainAVX
 77802          p.add(0, func(m *_Encoding, v []interface{}) {
 77803              m.emit(0xc4)
 77804              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 77805              m.emit(0x79)
 77806              m.emit(0x20)
 77807              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77808          })
 77809      }
 77810      // VPMOVSXBW m64, xmm
 77811      if isM64(v0) && isXMM(v1) {
 77812          self.require(ISA_AVX)
 77813          p.domain = DomainAVX
 77814          p.add(0, func(m *_Encoding, v []interface{}) {
 77815              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 77816              m.emit(0x20)
 77817              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 77818          })
 77819      }
 77820      // VPMOVSXBW xmm, ymm
 77821      if isXMM(v0) && isYMM(v1) {
 77822          self.require(ISA_AVX2)
 77823          p.domain = DomainAVX
 77824          p.add(0, func(m *_Encoding, v []interface{}) {
 77825              m.emit(0xc4)
 77826              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 77827              m.emit(0x7d)
 77828              m.emit(0x20)
 77829              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77830          })
 77831      }
 77832      // VPMOVSXBW m128, ymm
 77833      if isM128(v0) && isYMM(v1) {
 77834          self.require(ISA_AVX2)
 77835          p.domain = DomainAVX
 77836          p.add(0, func(m *_Encoding, v []interface{}) {
 77837              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 77838              m.emit(0x20)
 77839              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 77840          })
 77841      }
 77842      // VPMOVSXBW ymm, zmm{k}{z}
 77843      if isEVEXYMM(v0) && isZMMkz(v1) {
 77844          self.require(ISA_AVX512BW)
 77845          p.domain = DomainAVX
 77846          p.add(0, func(m *_Encoding, v []interface{}) {
 77847              m.emit(0x62)
 77848              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 77849              m.emit(0x7d)
 77850              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 77851              m.emit(0x20)
 77852              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77853          })
 77854      }
 77855      // VPMOVSXBW m256, zmm{k}{z}
 77856      if isM256(v0) && isZMMkz(v1) {
 77857          self.require(ISA_AVX512BW)
 77858          p.domain = DomainAVX
 77859          p.add(0, func(m *_Encoding, v []interface{}) {
 77860              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 77861              m.emit(0x20)
 77862              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 77863          })
 77864      }
 77865      // VPMOVSXBW xmm, xmm{k}{z}
 77866      if isEVEXXMM(v0) && isXMMkz(v1) {
 77867          self.require(ISA_AVX512VL | ISA_AVX512BW)
 77868          p.domain = DomainAVX
 77869          p.add(0, func(m *_Encoding, v []interface{}) {
 77870              m.emit(0x62)
 77871              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 77872              m.emit(0x7d)
 77873              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 77874              m.emit(0x20)
 77875              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77876          })
 77877      }
 77878      // VPMOVSXBW xmm, ymm{k}{z}
 77879      if isEVEXXMM(v0) && isYMMkz(v1) {
 77880          self.require(ISA_AVX512VL | ISA_AVX512BW)
 77881          p.domain = DomainAVX
 77882          p.add(0, func(m *_Encoding, v []interface{}) {
 77883              m.emit(0x62)
 77884              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 77885              m.emit(0x7d)
 77886              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 77887              m.emit(0x20)
 77888              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77889          })
 77890      }
 77891      // VPMOVSXBW m64, xmm{k}{z}
 77892      if isM64(v0) && isXMMkz(v1) {
 77893          self.require(ISA_AVX512VL | ISA_AVX512BW)
 77894          p.domain = DomainAVX
 77895          p.add(0, func(m *_Encoding, v []interface{}) {
 77896              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 77897              m.emit(0x20)
 77898              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 77899          })
 77900      }
 77901      // VPMOVSXBW m128, ymm{k}{z}
 77902      if isM128(v0) && isYMMkz(v1) {
 77903          self.require(ISA_AVX512VL | ISA_AVX512BW)
 77904          p.domain = DomainAVX
 77905          p.add(0, func(m *_Encoding, v []interface{}) {
 77906              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 77907              m.emit(0x20)
 77908              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 77909          })
 77910      }
 77911      if p.len == 0 {
 77912          panic("invalid operands for VPMOVSXBW")
 77913      }
 77914      return p
 77915  }
 77916  
 77917  // VPMOVSXDQ performs "Move Packed Doubleword Integers to Quadword Integers with Sign Extension".
 77918  //
 77919  // Mnemonic        : VPMOVSXDQ
 77920  // Supported forms : (10 forms)
 77921  //
 77922  //    * VPMOVSXDQ xmm, xmm           [AVX]
 77923  //    * VPMOVSXDQ m64, xmm           [AVX]
 77924  //    * VPMOVSXDQ xmm, ymm           [AVX2]
 77925  //    * VPMOVSXDQ m128, ymm          [AVX2]
 77926  //    * VPMOVSXDQ ymm, zmm{k}{z}     [AVX512F]
 77927  //    * VPMOVSXDQ m256, zmm{k}{z}    [AVX512F]
 77928  //    * VPMOVSXDQ xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 77929  //    * VPMOVSXDQ xmm, ymm{k}{z}     [AVX512F,AVX512VL]
 77930  //    * VPMOVSXDQ m64, xmm{k}{z}     [AVX512F,AVX512VL]
 77931  //    * VPMOVSXDQ m128, ymm{k}{z}    [AVX512F,AVX512VL]
 77932  //
 77933  func (self *Program) VPMOVSXDQ(v0 interface{}, v1 interface{}) *Instruction {
 77934      p := self.alloc("VPMOVSXDQ", 2, Operands { v0, v1 })
 77935      // VPMOVSXDQ xmm, xmm
 77936      if isXMM(v0) && isXMM(v1) {
 77937          self.require(ISA_AVX)
 77938          p.domain = DomainAVX
 77939          p.add(0, func(m *_Encoding, v []interface{}) {
 77940              m.emit(0xc4)
 77941              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 77942              m.emit(0x79)
 77943              m.emit(0x25)
 77944              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77945          })
 77946      }
 77947      // VPMOVSXDQ m64, xmm
 77948      if isM64(v0) && isXMM(v1) {
 77949          self.require(ISA_AVX)
 77950          p.domain = DomainAVX
 77951          p.add(0, func(m *_Encoding, v []interface{}) {
 77952              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 77953              m.emit(0x25)
 77954              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 77955          })
 77956      }
 77957      // VPMOVSXDQ xmm, ymm
 77958      if isXMM(v0) && isYMM(v1) {
 77959          self.require(ISA_AVX2)
 77960          p.domain = DomainAVX
 77961          p.add(0, func(m *_Encoding, v []interface{}) {
 77962              m.emit(0xc4)
 77963              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 77964              m.emit(0x7d)
 77965              m.emit(0x25)
 77966              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77967          })
 77968      }
 77969      // VPMOVSXDQ m128, ymm
 77970      if isM128(v0) && isYMM(v1) {
 77971          self.require(ISA_AVX2)
 77972          p.domain = DomainAVX
 77973          p.add(0, func(m *_Encoding, v []interface{}) {
 77974              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 77975              m.emit(0x25)
 77976              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 77977          })
 77978      }
 77979      // VPMOVSXDQ ymm, zmm{k}{z}
 77980      if isEVEXYMM(v0) && isZMMkz(v1) {
 77981          self.require(ISA_AVX512F)
 77982          p.domain = DomainAVX
 77983          p.add(0, func(m *_Encoding, v []interface{}) {
 77984              m.emit(0x62)
 77985              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 77986              m.emit(0x7d)
 77987              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 77988              m.emit(0x25)
 77989              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 77990          })
 77991      }
 77992      // VPMOVSXDQ m256, zmm{k}{z}
 77993      if isM256(v0) && isZMMkz(v1) {
 77994          self.require(ISA_AVX512F)
 77995          p.domain = DomainAVX
 77996          p.add(0, func(m *_Encoding, v []interface{}) {
 77997              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 77998              m.emit(0x25)
 77999              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 78000          })
 78001      }
 78002      // VPMOVSXDQ xmm, xmm{k}{z}
 78003      if isEVEXXMM(v0) && isXMMkz(v1) {
 78004          self.require(ISA_AVX512VL | ISA_AVX512F)
 78005          p.domain = DomainAVX
 78006          p.add(0, func(m *_Encoding, v []interface{}) {
 78007              m.emit(0x62)
 78008              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 78009              m.emit(0x7d)
 78010              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 78011              m.emit(0x25)
 78012              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78013          })
 78014      }
 78015      // VPMOVSXDQ xmm, ymm{k}{z}
 78016      if isEVEXXMM(v0) && isYMMkz(v1) {
 78017          self.require(ISA_AVX512VL | ISA_AVX512F)
 78018          p.domain = DomainAVX
 78019          p.add(0, func(m *_Encoding, v []interface{}) {
 78020              m.emit(0x62)
 78021              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 78022              m.emit(0x7d)
 78023              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 78024              m.emit(0x25)
 78025              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78026          })
 78027      }
 78028      // VPMOVSXDQ m64, xmm{k}{z}
 78029      if isM64(v0) && isXMMkz(v1) {
 78030          self.require(ISA_AVX512VL | ISA_AVX512F)
 78031          p.domain = DomainAVX
 78032          p.add(0, func(m *_Encoding, v []interface{}) {
 78033              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 78034              m.emit(0x25)
 78035              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 78036          })
 78037      }
 78038      // VPMOVSXDQ m128, ymm{k}{z}
 78039      if isM128(v0) && isYMMkz(v1) {
 78040          self.require(ISA_AVX512VL | ISA_AVX512F)
 78041          p.domain = DomainAVX
 78042          p.add(0, func(m *_Encoding, v []interface{}) {
 78043              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 78044              m.emit(0x25)
 78045              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 78046          })
 78047      }
 78048      if p.len == 0 {
 78049          panic("invalid operands for VPMOVSXDQ")
 78050      }
 78051      return p
 78052  }
 78053  
 78054  // VPMOVSXWD performs "Move Packed Word Integers to Doubleword Integers with Sign Extension".
 78055  //
 78056  // Mnemonic        : VPMOVSXWD
 78057  // Supported forms : (10 forms)
 78058  //
 78059  //    * VPMOVSXWD xmm, xmm           [AVX]
 78060  //    * VPMOVSXWD m64, xmm           [AVX]
 78061  //    * VPMOVSXWD xmm, ymm           [AVX2]
 78062  //    * VPMOVSXWD m128, ymm          [AVX2]
 78063  //    * VPMOVSXWD ymm, zmm{k}{z}     [AVX512F]
 78064  //    * VPMOVSXWD m256, zmm{k}{z}    [AVX512F]
 78065  //    * VPMOVSXWD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 78066  //    * VPMOVSXWD xmm, ymm{k}{z}     [AVX512F,AVX512VL]
 78067  //    * VPMOVSXWD m64, xmm{k}{z}     [AVX512F,AVX512VL]
 78068  //    * VPMOVSXWD m128, ymm{k}{z}    [AVX512F,AVX512VL]
 78069  //
 78070  func (self *Program) VPMOVSXWD(v0 interface{}, v1 interface{}) *Instruction {
 78071      p := self.alloc("VPMOVSXWD", 2, Operands { v0, v1 })
 78072      // VPMOVSXWD xmm, xmm
 78073      if isXMM(v0) && isXMM(v1) {
 78074          self.require(ISA_AVX)
 78075          p.domain = DomainAVX
 78076          p.add(0, func(m *_Encoding, v []interface{}) {
 78077              m.emit(0xc4)
 78078              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 78079              m.emit(0x79)
 78080              m.emit(0x23)
 78081              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78082          })
 78083      }
 78084      // VPMOVSXWD m64, xmm
 78085      if isM64(v0) && isXMM(v1) {
 78086          self.require(ISA_AVX)
 78087          p.domain = DomainAVX
 78088          p.add(0, func(m *_Encoding, v []interface{}) {
 78089              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 78090              m.emit(0x23)
 78091              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 78092          })
 78093      }
 78094      // VPMOVSXWD xmm, ymm
 78095      if isXMM(v0) && isYMM(v1) {
 78096          self.require(ISA_AVX2)
 78097          p.domain = DomainAVX
 78098          p.add(0, func(m *_Encoding, v []interface{}) {
 78099              m.emit(0xc4)
 78100              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 78101              m.emit(0x7d)
 78102              m.emit(0x23)
 78103              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78104          })
 78105      }
 78106      // VPMOVSXWD m128, ymm
 78107      if isM128(v0) && isYMM(v1) {
 78108          self.require(ISA_AVX2)
 78109          p.domain = DomainAVX
 78110          p.add(0, func(m *_Encoding, v []interface{}) {
 78111              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 78112              m.emit(0x23)
 78113              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 78114          })
 78115      }
 78116      // VPMOVSXWD ymm, zmm{k}{z}
 78117      if isEVEXYMM(v0) && isZMMkz(v1) {
 78118          self.require(ISA_AVX512F)
 78119          p.domain = DomainAVX
 78120          p.add(0, func(m *_Encoding, v []interface{}) {
 78121              m.emit(0x62)
 78122              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 78123              m.emit(0x7d)
 78124              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 78125              m.emit(0x23)
 78126              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78127          })
 78128      }
 78129      // VPMOVSXWD m256, zmm{k}{z}
 78130      if isM256(v0) && isZMMkz(v1) {
 78131          self.require(ISA_AVX512F)
 78132          p.domain = DomainAVX
 78133          p.add(0, func(m *_Encoding, v []interface{}) {
 78134              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 78135              m.emit(0x23)
 78136              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 78137          })
 78138      }
 78139      // VPMOVSXWD xmm, xmm{k}{z}
 78140      if isEVEXXMM(v0) && isXMMkz(v1) {
 78141          self.require(ISA_AVX512VL | ISA_AVX512F)
 78142          p.domain = DomainAVX
 78143          p.add(0, func(m *_Encoding, v []interface{}) {
 78144              m.emit(0x62)
 78145              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 78146              m.emit(0x7d)
 78147              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 78148              m.emit(0x23)
 78149              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78150          })
 78151      }
 78152      // VPMOVSXWD xmm, ymm{k}{z}
 78153      if isEVEXXMM(v0) && isYMMkz(v1) {
 78154          self.require(ISA_AVX512VL | ISA_AVX512F)
 78155          p.domain = DomainAVX
 78156          p.add(0, func(m *_Encoding, v []interface{}) {
 78157              m.emit(0x62)
 78158              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 78159              m.emit(0x7d)
 78160              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 78161              m.emit(0x23)
 78162              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78163          })
 78164      }
 78165      // VPMOVSXWD m64, xmm{k}{z}
 78166      if isM64(v0) && isXMMkz(v1) {
 78167          self.require(ISA_AVX512VL | ISA_AVX512F)
 78168          p.domain = DomainAVX
 78169          p.add(0, func(m *_Encoding, v []interface{}) {
 78170              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 78171              m.emit(0x23)
 78172              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 78173          })
 78174      }
 78175      // VPMOVSXWD m128, ymm{k}{z}
 78176      if isM128(v0) && isYMMkz(v1) {
 78177          self.require(ISA_AVX512VL | ISA_AVX512F)
 78178          p.domain = DomainAVX
 78179          p.add(0, func(m *_Encoding, v []interface{}) {
 78180              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 78181              m.emit(0x23)
 78182              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 78183          })
 78184      }
 78185      if p.len == 0 {
 78186          panic("invalid operands for VPMOVSXWD")
 78187      }
 78188      return p
 78189  }
 78190  
 78191  // VPMOVSXWQ performs "Move Packed Word Integers to Quadword Integers with Sign Extension".
 78192  //
 78193  // Mnemonic        : VPMOVSXWQ
 78194  // Supported forms : (10 forms)
 78195  //
 78196  //    * VPMOVSXWQ xmm, xmm           [AVX]
 78197  //    * VPMOVSXWQ m32, xmm           [AVX]
 78198  //    * VPMOVSXWQ xmm, ymm           [AVX2]
 78199  //    * VPMOVSXWQ m64, ymm           [AVX2]
 78200  //    * VPMOVSXWQ xmm, zmm{k}{z}     [AVX512F]
 78201  //    * VPMOVSXWQ m128, zmm{k}{z}    [AVX512F]
 78202  //    * VPMOVSXWQ xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 78203  //    * VPMOVSXWQ xmm, ymm{k}{z}     [AVX512F,AVX512VL]
 78204  //    * VPMOVSXWQ m32, xmm{k}{z}     [AVX512F,AVX512VL]
 78205  //    * VPMOVSXWQ m64, ymm{k}{z}     [AVX512F,AVX512VL]
 78206  //
 78207  func (self *Program) VPMOVSXWQ(v0 interface{}, v1 interface{}) *Instruction {
 78208      p := self.alloc("VPMOVSXWQ", 2, Operands { v0, v1 })
 78209      // VPMOVSXWQ xmm, xmm
 78210      if isXMM(v0) && isXMM(v1) {
 78211          self.require(ISA_AVX)
 78212          p.domain = DomainAVX
 78213          p.add(0, func(m *_Encoding, v []interface{}) {
 78214              m.emit(0xc4)
 78215              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 78216              m.emit(0x79)
 78217              m.emit(0x24)
 78218              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78219          })
 78220      }
 78221      // VPMOVSXWQ m32, xmm
 78222      if isM32(v0) && isXMM(v1) {
 78223          self.require(ISA_AVX)
 78224          p.domain = DomainAVX
 78225          p.add(0, func(m *_Encoding, v []interface{}) {
 78226              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 78227              m.emit(0x24)
 78228              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 78229          })
 78230      }
 78231      // VPMOVSXWQ xmm, ymm
 78232      if isXMM(v0) && isYMM(v1) {
 78233          self.require(ISA_AVX2)
 78234          p.domain = DomainAVX
 78235          p.add(0, func(m *_Encoding, v []interface{}) {
 78236              m.emit(0xc4)
 78237              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 78238              m.emit(0x7d)
 78239              m.emit(0x24)
 78240              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78241          })
 78242      }
 78243      // VPMOVSXWQ m64, ymm
 78244      if isM64(v0) && isYMM(v1) {
 78245          self.require(ISA_AVX2)
 78246          p.domain = DomainAVX
 78247          p.add(0, func(m *_Encoding, v []interface{}) {
 78248              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 78249              m.emit(0x24)
 78250              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 78251          })
 78252      }
 78253      // VPMOVSXWQ xmm, zmm{k}{z}
 78254      if isEVEXXMM(v0) && isZMMkz(v1) {
 78255          self.require(ISA_AVX512F)
 78256          p.domain = DomainAVX
 78257          p.add(0, func(m *_Encoding, v []interface{}) {
 78258              m.emit(0x62)
 78259              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 78260              m.emit(0x7d)
 78261              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 78262              m.emit(0x24)
 78263              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78264          })
 78265      }
 78266      // VPMOVSXWQ m128, zmm{k}{z}
 78267      if isM128(v0) && isZMMkz(v1) {
 78268          self.require(ISA_AVX512F)
 78269          p.domain = DomainAVX
 78270          p.add(0, func(m *_Encoding, v []interface{}) {
 78271              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 78272              m.emit(0x24)
 78273              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 78274          })
 78275      }
 78276      // VPMOVSXWQ xmm, xmm{k}{z}
 78277      if isEVEXXMM(v0) && isXMMkz(v1) {
 78278          self.require(ISA_AVX512VL | ISA_AVX512F)
 78279          p.domain = DomainAVX
 78280          p.add(0, func(m *_Encoding, v []interface{}) {
 78281              m.emit(0x62)
 78282              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 78283              m.emit(0x7d)
 78284              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 78285              m.emit(0x24)
 78286              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78287          })
 78288      }
 78289      // VPMOVSXWQ xmm, ymm{k}{z}
 78290      if isEVEXXMM(v0) && isYMMkz(v1) {
 78291          self.require(ISA_AVX512VL | ISA_AVX512F)
 78292          p.domain = DomainAVX
 78293          p.add(0, func(m *_Encoding, v []interface{}) {
 78294              m.emit(0x62)
 78295              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 78296              m.emit(0x7d)
 78297              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 78298              m.emit(0x24)
 78299              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78300          })
 78301      }
 78302      // VPMOVSXWQ m32, xmm{k}{z}
 78303      if isM32(v0) && isXMMkz(v1) {
 78304          self.require(ISA_AVX512VL | ISA_AVX512F)
 78305          p.domain = DomainAVX
 78306          p.add(0, func(m *_Encoding, v []interface{}) {
 78307              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 78308              m.emit(0x24)
 78309              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 78310          })
 78311      }
 78312      // VPMOVSXWQ m64, ymm{k}{z}
 78313      if isM64(v0) && isYMMkz(v1) {
 78314          self.require(ISA_AVX512VL | ISA_AVX512F)
 78315          p.domain = DomainAVX
 78316          p.add(0, func(m *_Encoding, v []interface{}) {
 78317              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 78318              m.emit(0x24)
 78319              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 78320          })
 78321      }
 78322      if p.len == 0 {
 78323          panic("invalid operands for VPMOVSXWQ")
 78324      }
 78325      return p
 78326  }
 78327  
 78328  // VPMOVUSDB performs "Down Convert Packed Doubleword Values to Byte Values with Unsigned Saturation".
 78329  //
 78330  // Mnemonic        : VPMOVUSDB
 78331  // Supported forms : (6 forms)
 78332  //
 78333  //    * VPMOVUSDB zmm, xmm{k}{z}     [AVX512F]
 78334  //    * VPMOVUSDB zmm, m128{k}{z}    [AVX512F]
 78335  //    * VPMOVUSDB xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 78336  //    * VPMOVUSDB xmm, m32{k}{z}     [AVX512F,AVX512VL]
 78337  //    * VPMOVUSDB ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 78338  //    * VPMOVUSDB ymm, m64{k}{z}     [AVX512F,AVX512VL]
 78339  //
 78340  func (self *Program) VPMOVUSDB(v0 interface{}, v1 interface{}) *Instruction {
 78341      p := self.alloc("VPMOVUSDB", 2, Operands { v0, v1 })
 78342      // VPMOVUSDB zmm, xmm{k}{z}
 78343      if isZMM(v0) && isXMMkz(v1) {
 78344          self.require(ISA_AVX512F)
 78345          p.domain = DomainAVX
 78346          p.add(0, func(m *_Encoding, v []interface{}) {
 78347              m.emit(0x62)
 78348              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78349              m.emit(0x7e)
 78350              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 78351              m.emit(0x11)
 78352              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78353          })
 78354      }
 78355      // VPMOVUSDB zmm, m128{k}{z}
 78356      if isZMM(v0) && isM128kz(v1) {
 78357          self.require(ISA_AVX512F)
 78358          p.domain = DomainAVX
 78359          p.add(0, func(m *_Encoding, v []interface{}) {
 78360              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78361              m.emit(0x11)
 78362              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 78363          })
 78364      }
 78365      // VPMOVUSDB xmm, xmm{k}{z}
 78366      if isEVEXXMM(v0) && isXMMkz(v1) {
 78367          self.require(ISA_AVX512VL | ISA_AVX512F)
 78368          p.domain = DomainAVX
 78369          p.add(0, func(m *_Encoding, v []interface{}) {
 78370              m.emit(0x62)
 78371              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78372              m.emit(0x7e)
 78373              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 78374              m.emit(0x11)
 78375              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78376          })
 78377      }
 78378      // VPMOVUSDB xmm, m32{k}{z}
 78379      if isEVEXXMM(v0) && isM32kz(v1) {
 78380          self.require(ISA_AVX512VL | ISA_AVX512F)
 78381          p.domain = DomainAVX
 78382          p.add(0, func(m *_Encoding, v []interface{}) {
 78383              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78384              m.emit(0x11)
 78385              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 78386          })
 78387      }
 78388      // VPMOVUSDB ymm, xmm{k}{z}
 78389      if isEVEXYMM(v0) && isXMMkz(v1) {
 78390          self.require(ISA_AVX512VL | ISA_AVX512F)
 78391          p.domain = DomainAVX
 78392          p.add(0, func(m *_Encoding, v []interface{}) {
 78393              m.emit(0x62)
 78394              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78395              m.emit(0x7e)
 78396              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 78397              m.emit(0x11)
 78398              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78399          })
 78400      }
 78401      // VPMOVUSDB ymm, m64{k}{z}
 78402      if isEVEXYMM(v0) && isM64kz(v1) {
 78403          self.require(ISA_AVX512VL | ISA_AVX512F)
 78404          p.domain = DomainAVX
 78405          p.add(0, func(m *_Encoding, v []interface{}) {
 78406              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78407              m.emit(0x11)
 78408              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 78409          })
 78410      }
 78411      if p.len == 0 {
 78412          panic("invalid operands for VPMOVUSDB")
 78413      }
 78414      return p
 78415  }
 78416  
 78417  // VPMOVUSDW performs "Down Convert Packed Doubleword Values to Word Values with Unsigned Saturation".
 78418  //
 78419  // Mnemonic        : VPMOVUSDW
 78420  // Supported forms : (6 forms)
 78421  //
 78422  //    * VPMOVUSDW zmm, ymm{k}{z}     [AVX512F]
 78423  //    * VPMOVUSDW zmm, m256{k}{z}    [AVX512F]
 78424  //    * VPMOVUSDW xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 78425  //    * VPMOVUSDW xmm, m64{k}{z}     [AVX512F,AVX512VL]
 78426  //    * VPMOVUSDW ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 78427  //    * VPMOVUSDW ymm, m128{k}{z}    [AVX512F,AVX512VL]
 78428  //
 78429  func (self *Program) VPMOVUSDW(v0 interface{}, v1 interface{}) *Instruction {
 78430      p := self.alloc("VPMOVUSDW", 2, Operands { v0, v1 })
 78431      // VPMOVUSDW zmm, ymm{k}{z}
 78432      if isZMM(v0) && isYMMkz(v1) {
 78433          self.require(ISA_AVX512F)
 78434          p.domain = DomainAVX
 78435          p.add(0, func(m *_Encoding, v []interface{}) {
 78436              m.emit(0x62)
 78437              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78438              m.emit(0x7e)
 78439              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 78440              m.emit(0x13)
 78441              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78442          })
 78443      }
 78444      // VPMOVUSDW zmm, m256{k}{z}
 78445      if isZMM(v0) && isM256kz(v1) {
 78446          self.require(ISA_AVX512F)
 78447          p.domain = DomainAVX
 78448          p.add(0, func(m *_Encoding, v []interface{}) {
 78449              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78450              m.emit(0x13)
 78451              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 78452          })
 78453      }
 78454      // VPMOVUSDW xmm, xmm{k}{z}
 78455      if isEVEXXMM(v0) && isXMMkz(v1) {
 78456          self.require(ISA_AVX512VL | ISA_AVX512F)
 78457          p.domain = DomainAVX
 78458          p.add(0, func(m *_Encoding, v []interface{}) {
 78459              m.emit(0x62)
 78460              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78461              m.emit(0x7e)
 78462              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 78463              m.emit(0x13)
 78464              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78465          })
 78466      }
 78467      // VPMOVUSDW xmm, m64{k}{z}
 78468      if isEVEXXMM(v0) && isM64kz(v1) {
 78469          self.require(ISA_AVX512VL | ISA_AVX512F)
 78470          p.domain = DomainAVX
 78471          p.add(0, func(m *_Encoding, v []interface{}) {
 78472              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78473              m.emit(0x13)
 78474              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 78475          })
 78476      }
 78477      // VPMOVUSDW ymm, xmm{k}{z}
 78478      if isEVEXYMM(v0) && isXMMkz(v1) {
 78479          self.require(ISA_AVX512VL | ISA_AVX512F)
 78480          p.domain = DomainAVX
 78481          p.add(0, func(m *_Encoding, v []interface{}) {
 78482              m.emit(0x62)
 78483              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78484              m.emit(0x7e)
 78485              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 78486              m.emit(0x13)
 78487              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78488          })
 78489      }
 78490      // VPMOVUSDW ymm, m128{k}{z}
 78491      if isEVEXYMM(v0) && isM128kz(v1) {
 78492          self.require(ISA_AVX512VL | ISA_AVX512F)
 78493          p.domain = DomainAVX
 78494          p.add(0, func(m *_Encoding, v []interface{}) {
 78495              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78496              m.emit(0x13)
 78497              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 78498          })
 78499      }
 78500      if p.len == 0 {
 78501          panic("invalid operands for VPMOVUSDW")
 78502      }
 78503      return p
 78504  }
 78505  
 78506  // VPMOVUSQB performs "Down Convert Packed Quadword Values to Byte Values with Unsigned Saturation".
 78507  //
 78508  // Mnemonic        : VPMOVUSQB
 78509  // Supported forms : (6 forms)
 78510  //
 78511  //    * VPMOVUSQB zmm, xmm{k}{z}    [AVX512F]
 78512  //    * VPMOVUSQB zmm, m64{k}{z}    [AVX512F]
 78513  //    * VPMOVUSQB xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 78514  //    * VPMOVUSQB xmm, m16{k}{z}    [AVX512F,AVX512VL]
 78515  //    * VPMOVUSQB ymm, xmm{k}{z}    [AVX512F,AVX512VL]
 78516  //    * VPMOVUSQB ymm, m32{k}{z}    [AVX512F,AVX512VL]
 78517  //
 78518  func (self *Program) VPMOVUSQB(v0 interface{}, v1 interface{}) *Instruction {
 78519      p := self.alloc("VPMOVUSQB", 2, Operands { v0, v1 })
 78520      // VPMOVUSQB zmm, xmm{k}{z}
 78521      if isZMM(v0) && isXMMkz(v1) {
 78522          self.require(ISA_AVX512F)
 78523          p.domain = DomainAVX
 78524          p.add(0, func(m *_Encoding, v []interface{}) {
 78525              m.emit(0x62)
 78526              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78527              m.emit(0x7e)
 78528              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 78529              m.emit(0x12)
 78530              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78531          })
 78532      }
 78533      // VPMOVUSQB zmm, m64{k}{z}
 78534      if isZMM(v0) && isM64kz(v1) {
 78535          self.require(ISA_AVX512F)
 78536          p.domain = DomainAVX
 78537          p.add(0, func(m *_Encoding, v []interface{}) {
 78538              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78539              m.emit(0x12)
 78540              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 78541          })
 78542      }
 78543      // VPMOVUSQB xmm, xmm{k}{z}
 78544      if isEVEXXMM(v0) && isXMMkz(v1) {
 78545          self.require(ISA_AVX512VL | ISA_AVX512F)
 78546          p.domain = DomainAVX
 78547          p.add(0, func(m *_Encoding, v []interface{}) {
 78548              m.emit(0x62)
 78549              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78550              m.emit(0x7e)
 78551              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 78552              m.emit(0x12)
 78553              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78554          })
 78555      }
 78556      // VPMOVUSQB xmm, m16{k}{z}
 78557      if isEVEXXMM(v0) && isM16kz(v1) {
 78558          self.require(ISA_AVX512VL | ISA_AVX512F)
 78559          p.domain = DomainAVX
 78560          p.add(0, func(m *_Encoding, v []interface{}) {
 78561              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78562              m.emit(0x12)
 78563              m.mrsd(lcode(v[0]), addr(v[1]), 2)
 78564          })
 78565      }
 78566      // VPMOVUSQB ymm, xmm{k}{z}
 78567      if isEVEXYMM(v0) && isXMMkz(v1) {
 78568          self.require(ISA_AVX512VL | ISA_AVX512F)
 78569          p.domain = DomainAVX
 78570          p.add(0, func(m *_Encoding, v []interface{}) {
 78571              m.emit(0x62)
 78572              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78573              m.emit(0x7e)
 78574              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 78575              m.emit(0x12)
 78576              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78577          })
 78578      }
 78579      // VPMOVUSQB ymm, m32{k}{z}
 78580      if isEVEXYMM(v0) && isM32kz(v1) {
 78581          self.require(ISA_AVX512VL | ISA_AVX512F)
 78582          p.domain = DomainAVX
 78583          p.add(0, func(m *_Encoding, v []interface{}) {
 78584              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78585              m.emit(0x12)
 78586              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 78587          })
 78588      }
 78589      if p.len == 0 {
 78590          panic("invalid operands for VPMOVUSQB")
 78591      }
 78592      return p
 78593  }
 78594  
 78595  // VPMOVUSQD performs "Down Convert Packed Quadword Values to Doubleword Values with Unsigned Saturation".
 78596  //
 78597  // Mnemonic        : VPMOVUSQD
 78598  // Supported forms : (6 forms)
 78599  //
 78600  //    * VPMOVUSQD zmm, ymm{k}{z}     [AVX512F]
 78601  //    * VPMOVUSQD zmm, m256{k}{z}    [AVX512F]
 78602  //    * VPMOVUSQD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 78603  //    * VPMOVUSQD xmm, m64{k}{z}     [AVX512F,AVX512VL]
 78604  //    * VPMOVUSQD ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 78605  //    * VPMOVUSQD ymm, m128{k}{z}    [AVX512F,AVX512VL]
 78606  //
 78607  func (self *Program) VPMOVUSQD(v0 interface{}, v1 interface{}) *Instruction {
 78608      p := self.alloc("VPMOVUSQD", 2, Operands { v0, v1 })
 78609      // VPMOVUSQD zmm, ymm{k}{z}
 78610      if isZMM(v0) && isYMMkz(v1) {
 78611          self.require(ISA_AVX512F)
 78612          p.domain = DomainAVX
 78613          p.add(0, func(m *_Encoding, v []interface{}) {
 78614              m.emit(0x62)
 78615              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78616              m.emit(0x7e)
 78617              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 78618              m.emit(0x15)
 78619              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78620          })
 78621      }
 78622      // VPMOVUSQD zmm, m256{k}{z}
 78623      if isZMM(v0) && isM256kz(v1) {
 78624          self.require(ISA_AVX512F)
 78625          p.domain = DomainAVX
 78626          p.add(0, func(m *_Encoding, v []interface{}) {
 78627              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78628              m.emit(0x15)
 78629              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 78630          })
 78631      }
 78632      // VPMOVUSQD xmm, xmm{k}{z}
 78633      if isEVEXXMM(v0) && isXMMkz(v1) {
 78634          self.require(ISA_AVX512VL | ISA_AVX512F)
 78635          p.domain = DomainAVX
 78636          p.add(0, func(m *_Encoding, v []interface{}) {
 78637              m.emit(0x62)
 78638              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78639              m.emit(0x7e)
 78640              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 78641              m.emit(0x15)
 78642              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78643          })
 78644      }
 78645      // VPMOVUSQD xmm, m64{k}{z}
 78646      if isEVEXXMM(v0) && isM64kz(v1) {
 78647          self.require(ISA_AVX512VL | ISA_AVX512F)
 78648          p.domain = DomainAVX
 78649          p.add(0, func(m *_Encoding, v []interface{}) {
 78650              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78651              m.emit(0x15)
 78652              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 78653          })
 78654      }
 78655      // VPMOVUSQD ymm, xmm{k}{z}
 78656      if isEVEXYMM(v0) && isXMMkz(v1) {
 78657          self.require(ISA_AVX512VL | ISA_AVX512F)
 78658          p.domain = DomainAVX
 78659          p.add(0, func(m *_Encoding, v []interface{}) {
 78660              m.emit(0x62)
 78661              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78662              m.emit(0x7e)
 78663              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 78664              m.emit(0x15)
 78665              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78666          })
 78667      }
 78668      // VPMOVUSQD ymm, m128{k}{z}
 78669      if isEVEXYMM(v0) && isM128kz(v1) {
 78670          self.require(ISA_AVX512VL | ISA_AVX512F)
 78671          p.domain = DomainAVX
 78672          p.add(0, func(m *_Encoding, v []interface{}) {
 78673              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78674              m.emit(0x15)
 78675              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 78676          })
 78677      }
 78678      if p.len == 0 {
 78679          panic("invalid operands for VPMOVUSQD")
 78680      }
 78681      return p
 78682  }
 78683  
 78684  // VPMOVUSQW performs "Down Convert Packed Quadword Values to Word Values with Unsigned Saturation".
 78685  //
 78686  // Mnemonic        : VPMOVUSQW
 78687  // Supported forms : (6 forms)
 78688  //
 78689  //    * VPMOVUSQW zmm, xmm{k}{z}     [AVX512F]
 78690  //    * VPMOVUSQW zmm, m128{k}{z}    [AVX512F]
 78691  //    * VPMOVUSQW xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 78692  //    * VPMOVUSQW xmm, m32{k}{z}     [AVX512F,AVX512VL]
 78693  //    * VPMOVUSQW ymm, xmm{k}{z}     [AVX512F,AVX512VL]
 78694  //    * VPMOVUSQW ymm, m64{k}{z}     [AVX512F,AVX512VL]
 78695  //
 78696  func (self *Program) VPMOVUSQW(v0 interface{}, v1 interface{}) *Instruction {
 78697      p := self.alloc("VPMOVUSQW", 2, Operands { v0, v1 })
 78698      // VPMOVUSQW zmm, xmm{k}{z}
 78699      if isZMM(v0) && isXMMkz(v1) {
 78700          self.require(ISA_AVX512F)
 78701          p.domain = DomainAVX
 78702          p.add(0, func(m *_Encoding, v []interface{}) {
 78703              m.emit(0x62)
 78704              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78705              m.emit(0x7e)
 78706              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 78707              m.emit(0x14)
 78708              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78709          })
 78710      }
 78711      // VPMOVUSQW zmm, m128{k}{z}
 78712      if isZMM(v0) && isM128kz(v1) {
 78713          self.require(ISA_AVX512F)
 78714          p.domain = DomainAVX
 78715          p.add(0, func(m *_Encoding, v []interface{}) {
 78716              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78717              m.emit(0x14)
 78718              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 78719          })
 78720      }
 78721      // VPMOVUSQW xmm, xmm{k}{z}
 78722      if isEVEXXMM(v0) && isXMMkz(v1) {
 78723          self.require(ISA_AVX512VL | ISA_AVX512F)
 78724          p.domain = DomainAVX
 78725          p.add(0, func(m *_Encoding, v []interface{}) {
 78726              m.emit(0x62)
 78727              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78728              m.emit(0x7e)
 78729              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 78730              m.emit(0x14)
 78731              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78732          })
 78733      }
 78734      // VPMOVUSQW xmm, m32{k}{z}
 78735      if isEVEXXMM(v0) && isM32kz(v1) {
 78736          self.require(ISA_AVX512VL | ISA_AVX512F)
 78737          p.domain = DomainAVX
 78738          p.add(0, func(m *_Encoding, v []interface{}) {
 78739              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78740              m.emit(0x14)
 78741              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 78742          })
 78743      }
 78744      // VPMOVUSQW ymm, xmm{k}{z}
 78745      if isEVEXYMM(v0) && isXMMkz(v1) {
 78746          self.require(ISA_AVX512VL | ISA_AVX512F)
 78747          p.domain = DomainAVX
 78748          p.add(0, func(m *_Encoding, v []interface{}) {
 78749              m.emit(0x62)
 78750              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78751              m.emit(0x7e)
 78752              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 78753              m.emit(0x14)
 78754              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78755          })
 78756      }
 78757      // VPMOVUSQW ymm, m64{k}{z}
 78758      if isEVEXYMM(v0) && isM64kz(v1) {
 78759          self.require(ISA_AVX512VL | ISA_AVX512F)
 78760          p.domain = DomainAVX
 78761          p.add(0, func(m *_Encoding, v []interface{}) {
 78762              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78763              m.emit(0x14)
 78764              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 78765          })
 78766      }
 78767      if p.len == 0 {
 78768          panic("invalid operands for VPMOVUSQW")
 78769      }
 78770      return p
 78771  }
 78772  
 78773  // VPMOVUSWB performs "Down Convert Packed Word Values to Byte Values with Unsigned Saturation".
 78774  //
 78775  // Mnemonic        : VPMOVUSWB
 78776  // Supported forms : (6 forms)
 78777  //
 78778  //    * VPMOVUSWB zmm, ymm{k}{z}     [AVX512BW]
 78779  //    * VPMOVUSWB zmm, m256{k}{z}    [AVX512BW]
 78780  //    * VPMOVUSWB xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 78781  //    * VPMOVUSWB xmm, m64{k}{z}     [AVX512BW,AVX512VL]
 78782  //    * VPMOVUSWB ymm, xmm{k}{z}     [AVX512BW,AVX512VL]
 78783  //    * VPMOVUSWB ymm, m128{k}{z}    [AVX512BW,AVX512VL]
 78784  //
 78785  func (self *Program) VPMOVUSWB(v0 interface{}, v1 interface{}) *Instruction {
 78786      p := self.alloc("VPMOVUSWB", 2, Operands { v0, v1 })
 78787      // VPMOVUSWB zmm, ymm{k}{z}
 78788      if isZMM(v0) && isYMMkz(v1) {
 78789          self.require(ISA_AVX512BW)
 78790          p.domain = DomainAVX
 78791          p.add(0, func(m *_Encoding, v []interface{}) {
 78792              m.emit(0x62)
 78793              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78794              m.emit(0x7e)
 78795              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 78796              m.emit(0x10)
 78797              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78798          })
 78799      }
 78800      // VPMOVUSWB zmm, m256{k}{z}
 78801      if isZMM(v0) && isM256kz(v1) {
 78802          self.require(ISA_AVX512BW)
 78803          p.domain = DomainAVX
 78804          p.add(0, func(m *_Encoding, v []interface{}) {
 78805              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78806              m.emit(0x10)
 78807              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 78808          })
 78809      }
 78810      // VPMOVUSWB xmm, xmm{k}{z}
 78811      if isEVEXXMM(v0) && isXMMkz(v1) {
 78812          self.require(ISA_AVX512VL | ISA_AVX512BW)
 78813          p.domain = DomainAVX
 78814          p.add(0, func(m *_Encoding, v []interface{}) {
 78815              m.emit(0x62)
 78816              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78817              m.emit(0x7e)
 78818              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 78819              m.emit(0x10)
 78820              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78821          })
 78822      }
 78823      // VPMOVUSWB xmm, m64{k}{z}
 78824      if isEVEXXMM(v0) && isM64kz(v1) {
 78825          self.require(ISA_AVX512VL | ISA_AVX512BW)
 78826          p.domain = DomainAVX
 78827          p.add(0, func(m *_Encoding, v []interface{}) {
 78828              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78829              m.emit(0x10)
 78830              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 78831          })
 78832      }
 78833      // VPMOVUSWB ymm, xmm{k}{z}
 78834      if isEVEXYMM(v0) && isXMMkz(v1) {
 78835          self.require(ISA_AVX512VL | ISA_AVX512BW)
 78836          p.domain = DomainAVX
 78837          p.add(0, func(m *_Encoding, v []interface{}) {
 78838              m.emit(0x62)
 78839              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78840              m.emit(0x7e)
 78841              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 78842              m.emit(0x10)
 78843              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78844          })
 78845      }
 78846      // VPMOVUSWB ymm, m128{k}{z}
 78847      if isEVEXYMM(v0) && isM128kz(v1) {
 78848          self.require(ISA_AVX512VL | ISA_AVX512BW)
 78849          p.domain = DomainAVX
 78850          p.add(0, func(m *_Encoding, v []interface{}) {
 78851              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78852              m.emit(0x10)
 78853              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 78854          })
 78855      }
 78856      if p.len == 0 {
 78857          panic("invalid operands for VPMOVUSWB")
 78858      }
 78859      return p
 78860  }
 78861  
 78862  // VPMOVW2M performs "Move Signs of Packed Word Integers to Mask Register".
 78863  //
 78864  // Mnemonic        : VPMOVW2M
 78865  // Supported forms : (3 forms)
 78866  //
 78867  //    * VPMOVW2M zmm, k    [AVX512BW]
 78868  //    * VPMOVW2M xmm, k    [AVX512BW,AVX512VL]
 78869  //    * VPMOVW2M ymm, k    [AVX512BW,AVX512VL]
 78870  //
 78871  func (self *Program) VPMOVW2M(v0 interface{}, v1 interface{}) *Instruction {
 78872      p := self.alloc("VPMOVW2M", 2, Operands { v0, v1 })
 78873      // VPMOVW2M zmm, k
 78874      if isZMM(v0) && isK(v1) {
 78875          self.require(ISA_AVX512BW)
 78876          p.domain = DomainAVX
 78877          p.add(0, func(m *_Encoding, v []interface{}) {
 78878              m.emit(0x62)
 78879              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 78880              m.emit(0xfe)
 78881              m.emit(0x48)
 78882              m.emit(0x29)
 78883              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78884          })
 78885      }
 78886      // VPMOVW2M xmm, k
 78887      if isEVEXXMM(v0) && isK(v1) {
 78888          self.require(ISA_AVX512VL | ISA_AVX512BW)
 78889          p.domain = DomainAVX
 78890          p.add(0, func(m *_Encoding, v []interface{}) {
 78891              m.emit(0x62)
 78892              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 78893              m.emit(0xfe)
 78894              m.emit(0x08)
 78895              m.emit(0x29)
 78896              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78897          })
 78898      }
 78899      // VPMOVW2M ymm, k
 78900      if isEVEXYMM(v0) && isK(v1) {
 78901          self.require(ISA_AVX512VL | ISA_AVX512BW)
 78902          p.domain = DomainAVX
 78903          p.add(0, func(m *_Encoding, v []interface{}) {
 78904              m.emit(0x62)
 78905              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 78906              m.emit(0xfe)
 78907              m.emit(0x28)
 78908              m.emit(0x29)
 78909              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 78910          })
 78911      }
 78912      if p.len == 0 {
 78913          panic("invalid operands for VPMOVW2M")
 78914      }
 78915      return p
 78916  }
 78917  
 78918  // VPMOVWB performs "Down Convert Packed Word Values to Byte Values with Truncation".
 78919  //
 78920  // Mnemonic        : VPMOVWB
 78921  // Supported forms : (6 forms)
 78922  //
 78923  //    * VPMOVWB zmm, ymm{k}{z}     [AVX512BW]
 78924  //    * VPMOVWB zmm, m256{k}{z}    [AVX512BW]
 78925  //    * VPMOVWB xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 78926  //    * VPMOVWB xmm, m64{k}{z}     [AVX512BW,AVX512VL]
 78927  //    * VPMOVWB ymm, xmm{k}{z}     [AVX512BW,AVX512VL]
 78928  //    * VPMOVWB ymm, m128{k}{z}    [AVX512BW,AVX512VL]
 78929  //
 78930  func (self *Program) VPMOVWB(v0 interface{}, v1 interface{}) *Instruction {
 78931      p := self.alloc("VPMOVWB", 2, Operands { v0, v1 })
 78932      // VPMOVWB zmm, ymm{k}{z}
 78933      if isZMM(v0) && isYMMkz(v1) {
 78934          self.require(ISA_AVX512BW)
 78935          p.domain = DomainAVX
 78936          p.add(0, func(m *_Encoding, v []interface{}) {
 78937              m.emit(0x62)
 78938              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78939              m.emit(0x7e)
 78940              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 78941              m.emit(0x30)
 78942              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78943          })
 78944      }
 78945      // VPMOVWB zmm, m256{k}{z}
 78946      if isZMM(v0) && isM256kz(v1) {
 78947          self.require(ISA_AVX512BW)
 78948          p.domain = DomainAVX
 78949          p.add(0, func(m *_Encoding, v []interface{}) {
 78950              m.evex(0b10, 0x06, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78951              m.emit(0x30)
 78952              m.mrsd(lcode(v[0]), addr(v[1]), 32)
 78953          })
 78954      }
 78955      // VPMOVWB xmm, xmm{k}{z}
 78956      if isEVEXXMM(v0) && isXMMkz(v1) {
 78957          self.require(ISA_AVX512VL | ISA_AVX512BW)
 78958          p.domain = DomainAVX
 78959          p.add(0, func(m *_Encoding, v []interface{}) {
 78960              m.emit(0x62)
 78961              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78962              m.emit(0x7e)
 78963              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 78964              m.emit(0x30)
 78965              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78966          })
 78967      }
 78968      // VPMOVWB xmm, m64{k}{z}
 78969      if isEVEXXMM(v0) && isM64kz(v1) {
 78970          self.require(ISA_AVX512VL | ISA_AVX512BW)
 78971          p.domain = DomainAVX
 78972          p.add(0, func(m *_Encoding, v []interface{}) {
 78973              m.evex(0b10, 0x06, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78974              m.emit(0x30)
 78975              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 78976          })
 78977      }
 78978      // VPMOVWB ymm, xmm{k}{z}
 78979      if isEVEXYMM(v0) && isXMMkz(v1) {
 78980          self.require(ISA_AVX512VL | ISA_AVX512BW)
 78981          p.domain = DomainAVX
 78982          p.add(0, func(m *_Encoding, v []interface{}) {
 78983              m.emit(0x62)
 78984              m.emit(0xf2 ^ ((hcode(v[0]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[0]) << 4)))
 78985              m.emit(0x7e)
 78986              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 78987              m.emit(0x30)
 78988              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 78989          })
 78990      }
 78991      // VPMOVWB ymm, m128{k}{z}
 78992      if isEVEXYMM(v0) && isM128kz(v1) {
 78993          self.require(ISA_AVX512VL | ISA_AVX512BW)
 78994          p.domain = DomainAVX
 78995          p.add(0, func(m *_Encoding, v []interface{}) {
 78996              m.evex(0b10, 0x06, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), zcode(v[1]), 0)
 78997              m.emit(0x30)
 78998              m.mrsd(lcode(v[0]), addr(v[1]), 16)
 78999          })
 79000      }
 79001      if p.len == 0 {
 79002          panic("invalid operands for VPMOVWB")
 79003      }
 79004      return p
 79005  }
 79006  
 79007  // VPMOVZXBD performs "Move Packed Byte Integers to Doubleword Integers with Zero Extension".
 79008  //
 79009  // Mnemonic        : VPMOVZXBD
 79010  // Supported forms : (10 forms)
 79011  //
 79012  //    * VPMOVZXBD xmm, xmm           [AVX]
 79013  //    * VPMOVZXBD m32, xmm           [AVX]
 79014  //    * VPMOVZXBD xmm, ymm           [AVX2]
 79015  //    * VPMOVZXBD m64, ymm           [AVX2]
 79016  //    * VPMOVZXBD xmm, zmm{k}{z}     [AVX512F]
 79017  //    * VPMOVZXBD m128, zmm{k}{z}    [AVX512F]
 79018  //    * VPMOVZXBD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 79019  //    * VPMOVZXBD xmm, ymm{k}{z}     [AVX512F,AVX512VL]
 79020  //    * VPMOVZXBD m32, xmm{k}{z}     [AVX512F,AVX512VL]
 79021  //    * VPMOVZXBD m64, ymm{k}{z}     [AVX512F,AVX512VL]
 79022  //
 79023  func (self *Program) VPMOVZXBD(v0 interface{}, v1 interface{}) *Instruction {
 79024      p := self.alloc("VPMOVZXBD", 2, Operands { v0, v1 })
 79025      // VPMOVZXBD xmm, xmm
 79026      if isXMM(v0) && isXMM(v1) {
 79027          self.require(ISA_AVX)
 79028          p.domain = DomainAVX
 79029          p.add(0, func(m *_Encoding, v []interface{}) {
 79030              m.emit(0xc4)
 79031              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79032              m.emit(0x79)
 79033              m.emit(0x31)
 79034              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79035          })
 79036      }
 79037      // VPMOVZXBD m32, xmm
 79038      if isM32(v0) && isXMM(v1) {
 79039          self.require(ISA_AVX)
 79040          p.domain = DomainAVX
 79041          p.add(0, func(m *_Encoding, v []interface{}) {
 79042              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 79043              m.emit(0x31)
 79044              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79045          })
 79046      }
 79047      // VPMOVZXBD xmm, ymm
 79048      if isXMM(v0) && isYMM(v1) {
 79049          self.require(ISA_AVX2)
 79050          p.domain = DomainAVX
 79051          p.add(0, func(m *_Encoding, v []interface{}) {
 79052              m.emit(0xc4)
 79053              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79054              m.emit(0x7d)
 79055              m.emit(0x31)
 79056              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79057          })
 79058      }
 79059      // VPMOVZXBD m64, ymm
 79060      if isM64(v0) && isYMM(v1) {
 79061          self.require(ISA_AVX2)
 79062          p.domain = DomainAVX
 79063          p.add(0, func(m *_Encoding, v []interface{}) {
 79064              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 79065              m.emit(0x31)
 79066              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79067          })
 79068      }
 79069      // VPMOVZXBD xmm, zmm{k}{z}
 79070      if isEVEXXMM(v0) && isZMMkz(v1) {
 79071          self.require(ISA_AVX512F)
 79072          p.domain = DomainAVX
 79073          p.add(0, func(m *_Encoding, v []interface{}) {
 79074              m.emit(0x62)
 79075              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79076              m.emit(0x7d)
 79077              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 79078              m.emit(0x31)
 79079              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79080          })
 79081      }
 79082      // VPMOVZXBD m128, zmm{k}{z}
 79083      if isM128(v0) && isZMMkz(v1) {
 79084          self.require(ISA_AVX512F)
 79085          p.domain = DomainAVX
 79086          p.add(0, func(m *_Encoding, v []interface{}) {
 79087              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79088              m.emit(0x31)
 79089              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 79090          })
 79091      }
 79092      // VPMOVZXBD xmm, xmm{k}{z}
 79093      if isEVEXXMM(v0) && isXMMkz(v1) {
 79094          self.require(ISA_AVX512VL | ISA_AVX512F)
 79095          p.domain = DomainAVX
 79096          p.add(0, func(m *_Encoding, v []interface{}) {
 79097              m.emit(0x62)
 79098              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79099              m.emit(0x7d)
 79100              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 79101              m.emit(0x31)
 79102              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79103          })
 79104      }
 79105      // VPMOVZXBD xmm, ymm{k}{z}
 79106      if isEVEXXMM(v0) && isYMMkz(v1) {
 79107          self.require(ISA_AVX512VL | ISA_AVX512F)
 79108          p.domain = DomainAVX
 79109          p.add(0, func(m *_Encoding, v []interface{}) {
 79110              m.emit(0x62)
 79111              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79112              m.emit(0x7d)
 79113              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 79114              m.emit(0x31)
 79115              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79116          })
 79117      }
 79118      // VPMOVZXBD m32, xmm{k}{z}
 79119      if isM32(v0) && isXMMkz(v1) {
 79120          self.require(ISA_AVX512VL | ISA_AVX512F)
 79121          p.domain = DomainAVX
 79122          p.add(0, func(m *_Encoding, v []interface{}) {
 79123              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79124              m.emit(0x31)
 79125              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 79126          })
 79127      }
 79128      // VPMOVZXBD m64, ymm{k}{z}
 79129      if isM64(v0) && isYMMkz(v1) {
 79130          self.require(ISA_AVX512VL | ISA_AVX512F)
 79131          p.domain = DomainAVX
 79132          p.add(0, func(m *_Encoding, v []interface{}) {
 79133              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79134              m.emit(0x31)
 79135              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 79136          })
 79137      }
 79138      if p.len == 0 {
 79139          panic("invalid operands for VPMOVZXBD")
 79140      }
 79141      return p
 79142  }
 79143  
 79144  // VPMOVZXBQ performs "Move Packed Byte Integers to Quadword Integers with Zero Extension".
 79145  //
 79146  // Mnemonic        : VPMOVZXBQ
 79147  // Supported forms : (10 forms)
 79148  //
 79149  //    * VPMOVZXBQ xmm, xmm          [AVX]
 79150  //    * VPMOVZXBQ m16, xmm          [AVX]
 79151  //    * VPMOVZXBQ xmm, ymm          [AVX2]
 79152  //    * VPMOVZXBQ m32, ymm          [AVX2]
 79153  //    * VPMOVZXBQ xmm, zmm{k}{z}    [AVX512F]
 79154  //    * VPMOVZXBQ m64, zmm{k}{z}    [AVX512F]
 79155  //    * VPMOVZXBQ xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 79156  //    * VPMOVZXBQ xmm, ymm{k}{z}    [AVX512F,AVX512VL]
 79157  //    * VPMOVZXBQ m16, xmm{k}{z}    [AVX512F,AVX512VL]
 79158  //    * VPMOVZXBQ m32, ymm{k}{z}    [AVX512F,AVX512VL]
 79159  //
 79160  func (self *Program) VPMOVZXBQ(v0 interface{}, v1 interface{}) *Instruction {
 79161      p := self.alloc("VPMOVZXBQ", 2, Operands { v0, v1 })
 79162      // VPMOVZXBQ xmm, xmm
 79163      if isXMM(v0) && isXMM(v1) {
 79164          self.require(ISA_AVX)
 79165          p.domain = DomainAVX
 79166          p.add(0, func(m *_Encoding, v []interface{}) {
 79167              m.emit(0xc4)
 79168              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79169              m.emit(0x79)
 79170              m.emit(0x32)
 79171              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79172          })
 79173      }
 79174      // VPMOVZXBQ m16, xmm
 79175      if isM16(v0) && isXMM(v1) {
 79176          self.require(ISA_AVX)
 79177          p.domain = DomainAVX
 79178          p.add(0, func(m *_Encoding, v []interface{}) {
 79179              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 79180              m.emit(0x32)
 79181              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79182          })
 79183      }
 79184      // VPMOVZXBQ xmm, ymm
 79185      if isXMM(v0) && isYMM(v1) {
 79186          self.require(ISA_AVX2)
 79187          p.domain = DomainAVX
 79188          p.add(0, func(m *_Encoding, v []interface{}) {
 79189              m.emit(0xc4)
 79190              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79191              m.emit(0x7d)
 79192              m.emit(0x32)
 79193              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79194          })
 79195      }
 79196      // VPMOVZXBQ m32, ymm
 79197      if isM32(v0) && isYMM(v1) {
 79198          self.require(ISA_AVX2)
 79199          p.domain = DomainAVX
 79200          p.add(0, func(m *_Encoding, v []interface{}) {
 79201              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 79202              m.emit(0x32)
 79203              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79204          })
 79205      }
 79206      // VPMOVZXBQ xmm, zmm{k}{z}
 79207      if isEVEXXMM(v0) && isZMMkz(v1) {
 79208          self.require(ISA_AVX512F)
 79209          p.domain = DomainAVX
 79210          p.add(0, func(m *_Encoding, v []interface{}) {
 79211              m.emit(0x62)
 79212              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79213              m.emit(0x7d)
 79214              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 79215              m.emit(0x32)
 79216              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79217          })
 79218      }
 79219      // VPMOVZXBQ m64, zmm{k}{z}
 79220      if isM64(v0) && isZMMkz(v1) {
 79221          self.require(ISA_AVX512F)
 79222          p.domain = DomainAVX
 79223          p.add(0, func(m *_Encoding, v []interface{}) {
 79224              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79225              m.emit(0x32)
 79226              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 79227          })
 79228      }
 79229      // VPMOVZXBQ xmm, xmm{k}{z}
 79230      if isEVEXXMM(v0) && isXMMkz(v1) {
 79231          self.require(ISA_AVX512VL | ISA_AVX512F)
 79232          p.domain = DomainAVX
 79233          p.add(0, func(m *_Encoding, v []interface{}) {
 79234              m.emit(0x62)
 79235              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79236              m.emit(0x7d)
 79237              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 79238              m.emit(0x32)
 79239              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79240          })
 79241      }
 79242      // VPMOVZXBQ xmm, ymm{k}{z}
 79243      if isEVEXXMM(v0) && isYMMkz(v1) {
 79244          self.require(ISA_AVX512VL | ISA_AVX512F)
 79245          p.domain = DomainAVX
 79246          p.add(0, func(m *_Encoding, v []interface{}) {
 79247              m.emit(0x62)
 79248              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79249              m.emit(0x7d)
 79250              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 79251              m.emit(0x32)
 79252              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79253          })
 79254      }
 79255      // VPMOVZXBQ m16, xmm{k}{z}
 79256      if isM16(v0) && isXMMkz(v1) {
 79257          self.require(ISA_AVX512VL | ISA_AVX512F)
 79258          p.domain = DomainAVX
 79259          p.add(0, func(m *_Encoding, v []interface{}) {
 79260              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79261              m.emit(0x32)
 79262              m.mrsd(lcode(v[1]), addr(v[0]), 2)
 79263          })
 79264      }
 79265      // VPMOVZXBQ m32, ymm{k}{z}
 79266      if isM32(v0) && isYMMkz(v1) {
 79267          self.require(ISA_AVX512VL | ISA_AVX512F)
 79268          p.domain = DomainAVX
 79269          p.add(0, func(m *_Encoding, v []interface{}) {
 79270              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79271              m.emit(0x32)
 79272              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 79273          })
 79274      }
 79275      if p.len == 0 {
 79276          panic("invalid operands for VPMOVZXBQ")
 79277      }
 79278      return p
 79279  }
 79280  
 79281  // VPMOVZXBW performs "Move Packed Byte Integers to Word Integers with Zero Extension".
 79282  //
 79283  // Mnemonic        : VPMOVZXBW
 79284  // Supported forms : (10 forms)
 79285  //
 79286  //    * VPMOVZXBW xmm, xmm           [AVX]
 79287  //    * VPMOVZXBW m64, xmm           [AVX]
 79288  //    * VPMOVZXBW xmm, ymm           [AVX2]
 79289  //    * VPMOVZXBW m128, ymm          [AVX2]
 79290  //    * VPMOVZXBW ymm, zmm{k}{z}     [AVX512BW]
 79291  //    * VPMOVZXBW m256, zmm{k}{z}    [AVX512BW]
 79292  //    * VPMOVZXBW xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 79293  //    * VPMOVZXBW xmm, ymm{k}{z}     [AVX512BW,AVX512VL]
 79294  //    * VPMOVZXBW m64, xmm{k}{z}     [AVX512BW,AVX512VL]
 79295  //    * VPMOVZXBW m128, ymm{k}{z}    [AVX512BW,AVX512VL]
 79296  //
 79297  func (self *Program) VPMOVZXBW(v0 interface{}, v1 interface{}) *Instruction {
 79298      p := self.alloc("VPMOVZXBW", 2, Operands { v0, v1 })
 79299      // VPMOVZXBW xmm, xmm
 79300      if isXMM(v0) && isXMM(v1) {
 79301          self.require(ISA_AVX)
 79302          p.domain = DomainAVX
 79303          p.add(0, func(m *_Encoding, v []interface{}) {
 79304              m.emit(0xc4)
 79305              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79306              m.emit(0x79)
 79307              m.emit(0x30)
 79308              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79309          })
 79310      }
 79311      // VPMOVZXBW m64, xmm
 79312      if isM64(v0) && isXMM(v1) {
 79313          self.require(ISA_AVX)
 79314          p.domain = DomainAVX
 79315          p.add(0, func(m *_Encoding, v []interface{}) {
 79316              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 79317              m.emit(0x30)
 79318              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79319          })
 79320      }
 79321      // VPMOVZXBW xmm, ymm
 79322      if isXMM(v0) && isYMM(v1) {
 79323          self.require(ISA_AVX2)
 79324          p.domain = DomainAVX
 79325          p.add(0, func(m *_Encoding, v []interface{}) {
 79326              m.emit(0xc4)
 79327              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79328              m.emit(0x7d)
 79329              m.emit(0x30)
 79330              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79331          })
 79332      }
 79333      // VPMOVZXBW m128, ymm
 79334      if isM128(v0) && isYMM(v1) {
 79335          self.require(ISA_AVX2)
 79336          p.domain = DomainAVX
 79337          p.add(0, func(m *_Encoding, v []interface{}) {
 79338              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 79339              m.emit(0x30)
 79340              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79341          })
 79342      }
 79343      // VPMOVZXBW ymm, zmm{k}{z}
 79344      if isEVEXYMM(v0) && isZMMkz(v1) {
 79345          self.require(ISA_AVX512BW)
 79346          p.domain = DomainAVX
 79347          p.add(0, func(m *_Encoding, v []interface{}) {
 79348              m.emit(0x62)
 79349              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79350              m.emit(0x7d)
 79351              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 79352              m.emit(0x30)
 79353              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79354          })
 79355      }
 79356      // VPMOVZXBW m256, zmm{k}{z}
 79357      if isM256(v0) && isZMMkz(v1) {
 79358          self.require(ISA_AVX512BW)
 79359          p.domain = DomainAVX
 79360          p.add(0, func(m *_Encoding, v []interface{}) {
 79361              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79362              m.emit(0x30)
 79363              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 79364          })
 79365      }
 79366      // VPMOVZXBW xmm, xmm{k}{z}
 79367      if isEVEXXMM(v0) && isXMMkz(v1) {
 79368          self.require(ISA_AVX512VL | ISA_AVX512BW)
 79369          p.domain = DomainAVX
 79370          p.add(0, func(m *_Encoding, v []interface{}) {
 79371              m.emit(0x62)
 79372              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79373              m.emit(0x7d)
 79374              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 79375              m.emit(0x30)
 79376              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79377          })
 79378      }
 79379      // VPMOVZXBW xmm, ymm{k}{z}
 79380      if isEVEXXMM(v0) && isYMMkz(v1) {
 79381          self.require(ISA_AVX512VL | ISA_AVX512BW)
 79382          p.domain = DomainAVX
 79383          p.add(0, func(m *_Encoding, v []interface{}) {
 79384              m.emit(0x62)
 79385              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79386              m.emit(0x7d)
 79387              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 79388              m.emit(0x30)
 79389              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79390          })
 79391      }
 79392      // VPMOVZXBW m64, xmm{k}{z}
 79393      if isM64(v0) && isXMMkz(v1) {
 79394          self.require(ISA_AVX512VL | ISA_AVX512BW)
 79395          p.domain = DomainAVX
 79396          p.add(0, func(m *_Encoding, v []interface{}) {
 79397              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79398              m.emit(0x30)
 79399              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 79400          })
 79401      }
 79402      // VPMOVZXBW m128, ymm{k}{z}
 79403      if isM128(v0) && isYMMkz(v1) {
 79404          self.require(ISA_AVX512VL | ISA_AVX512BW)
 79405          p.domain = DomainAVX
 79406          p.add(0, func(m *_Encoding, v []interface{}) {
 79407              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79408              m.emit(0x30)
 79409              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 79410          })
 79411      }
 79412      if p.len == 0 {
 79413          panic("invalid operands for VPMOVZXBW")
 79414      }
 79415      return p
 79416  }
 79417  
 79418  // VPMOVZXDQ performs "Move Packed Doubleword Integers to Quadword Integers with Zero Extension".
 79419  //
 79420  // Mnemonic        : VPMOVZXDQ
 79421  // Supported forms : (10 forms)
 79422  //
 79423  //    * VPMOVZXDQ xmm, xmm           [AVX]
 79424  //    * VPMOVZXDQ m64, xmm           [AVX]
 79425  //    * VPMOVZXDQ xmm, ymm           [AVX2]
 79426  //    * VPMOVZXDQ m128, ymm          [AVX2]
 79427  //    * VPMOVZXDQ ymm, zmm{k}{z}     [AVX512F]
 79428  //    * VPMOVZXDQ m256, zmm{k}{z}    [AVX512F]
 79429  //    * VPMOVZXDQ xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 79430  //    * VPMOVZXDQ xmm, ymm{k}{z}     [AVX512F,AVX512VL]
 79431  //    * VPMOVZXDQ m64, xmm{k}{z}     [AVX512F,AVX512VL]
 79432  //    * VPMOVZXDQ m128, ymm{k}{z}    [AVX512F,AVX512VL]
 79433  //
 79434  func (self *Program) VPMOVZXDQ(v0 interface{}, v1 interface{}) *Instruction {
 79435      p := self.alloc("VPMOVZXDQ", 2, Operands { v0, v1 })
 79436      // VPMOVZXDQ xmm, xmm
 79437      if isXMM(v0) && isXMM(v1) {
 79438          self.require(ISA_AVX)
 79439          p.domain = DomainAVX
 79440          p.add(0, func(m *_Encoding, v []interface{}) {
 79441              m.emit(0xc4)
 79442              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79443              m.emit(0x79)
 79444              m.emit(0x35)
 79445              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79446          })
 79447      }
 79448      // VPMOVZXDQ m64, xmm
 79449      if isM64(v0) && isXMM(v1) {
 79450          self.require(ISA_AVX)
 79451          p.domain = DomainAVX
 79452          p.add(0, func(m *_Encoding, v []interface{}) {
 79453              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 79454              m.emit(0x35)
 79455              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79456          })
 79457      }
 79458      // VPMOVZXDQ xmm, ymm
 79459      if isXMM(v0) && isYMM(v1) {
 79460          self.require(ISA_AVX2)
 79461          p.domain = DomainAVX
 79462          p.add(0, func(m *_Encoding, v []interface{}) {
 79463              m.emit(0xc4)
 79464              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79465              m.emit(0x7d)
 79466              m.emit(0x35)
 79467              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79468          })
 79469      }
 79470      // VPMOVZXDQ m128, ymm
 79471      if isM128(v0) && isYMM(v1) {
 79472          self.require(ISA_AVX2)
 79473          p.domain = DomainAVX
 79474          p.add(0, func(m *_Encoding, v []interface{}) {
 79475              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 79476              m.emit(0x35)
 79477              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79478          })
 79479      }
 79480      // VPMOVZXDQ ymm, zmm{k}{z}
 79481      if isEVEXYMM(v0) && isZMMkz(v1) {
 79482          self.require(ISA_AVX512F)
 79483          p.domain = DomainAVX
 79484          p.add(0, func(m *_Encoding, v []interface{}) {
 79485              m.emit(0x62)
 79486              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79487              m.emit(0x7d)
 79488              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 79489              m.emit(0x35)
 79490              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79491          })
 79492      }
 79493      // VPMOVZXDQ m256, zmm{k}{z}
 79494      if isM256(v0) && isZMMkz(v1) {
 79495          self.require(ISA_AVX512F)
 79496          p.domain = DomainAVX
 79497          p.add(0, func(m *_Encoding, v []interface{}) {
 79498              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79499              m.emit(0x35)
 79500              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 79501          })
 79502      }
 79503      // VPMOVZXDQ xmm, xmm{k}{z}
 79504      if isEVEXXMM(v0) && isXMMkz(v1) {
 79505          self.require(ISA_AVX512VL | ISA_AVX512F)
 79506          p.domain = DomainAVX
 79507          p.add(0, func(m *_Encoding, v []interface{}) {
 79508              m.emit(0x62)
 79509              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79510              m.emit(0x7d)
 79511              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 79512              m.emit(0x35)
 79513              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79514          })
 79515      }
 79516      // VPMOVZXDQ xmm, ymm{k}{z}
 79517      if isEVEXXMM(v0) && isYMMkz(v1) {
 79518          self.require(ISA_AVX512VL | ISA_AVX512F)
 79519          p.domain = DomainAVX
 79520          p.add(0, func(m *_Encoding, v []interface{}) {
 79521              m.emit(0x62)
 79522              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79523              m.emit(0x7d)
 79524              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 79525              m.emit(0x35)
 79526              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79527          })
 79528      }
 79529      // VPMOVZXDQ m64, xmm{k}{z}
 79530      if isM64(v0) && isXMMkz(v1) {
 79531          self.require(ISA_AVX512VL | ISA_AVX512F)
 79532          p.domain = DomainAVX
 79533          p.add(0, func(m *_Encoding, v []interface{}) {
 79534              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79535              m.emit(0x35)
 79536              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 79537          })
 79538      }
 79539      // VPMOVZXDQ m128, ymm{k}{z}
 79540      if isM128(v0) && isYMMkz(v1) {
 79541          self.require(ISA_AVX512VL | ISA_AVX512F)
 79542          p.domain = DomainAVX
 79543          p.add(0, func(m *_Encoding, v []interface{}) {
 79544              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79545              m.emit(0x35)
 79546              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 79547          })
 79548      }
 79549      if p.len == 0 {
 79550          panic("invalid operands for VPMOVZXDQ")
 79551      }
 79552      return p
 79553  }
 79554  
 79555  // VPMOVZXWD performs "Move Packed Word Integers to Doubleword Integers with Zero Extension".
 79556  //
 79557  // Mnemonic        : VPMOVZXWD
 79558  // Supported forms : (10 forms)
 79559  //
 79560  //    * VPMOVZXWD xmm, xmm           [AVX]
 79561  //    * VPMOVZXWD m64, xmm           [AVX]
 79562  //    * VPMOVZXWD xmm, ymm           [AVX2]
 79563  //    * VPMOVZXWD m128, ymm          [AVX2]
 79564  //    * VPMOVZXWD ymm, zmm{k}{z}     [AVX512F]
 79565  //    * VPMOVZXWD m256, zmm{k}{z}    [AVX512F]
 79566  //    * VPMOVZXWD xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 79567  //    * VPMOVZXWD xmm, ymm{k}{z}     [AVX512F,AVX512VL]
 79568  //    * VPMOVZXWD m64, xmm{k}{z}     [AVX512F,AVX512VL]
 79569  //    * VPMOVZXWD m128, ymm{k}{z}    [AVX512F,AVX512VL]
 79570  //
 79571  func (self *Program) VPMOVZXWD(v0 interface{}, v1 interface{}) *Instruction {
 79572      p := self.alloc("VPMOVZXWD", 2, Operands { v0, v1 })
 79573      // VPMOVZXWD xmm, xmm
 79574      if isXMM(v0) && isXMM(v1) {
 79575          self.require(ISA_AVX)
 79576          p.domain = DomainAVX
 79577          p.add(0, func(m *_Encoding, v []interface{}) {
 79578              m.emit(0xc4)
 79579              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79580              m.emit(0x79)
 79581              m.emit(0x33)
 79582              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79583          })
 79584      }
 79585      // VPMOVZXWD m64, xmm
 79586      if isM64(v0) && isXMM(v1) {
 79587          self.require(ISA_AVX)
 79588          p.domain = DomainAVX
 79589          p.add(0, func(m *_Encoding, v []interface{}) {
 79590              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 79591              m.emit(0x33)
 79592              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79593          })
 79594      }
 79595      // VPMOVZXWD xmm, ymm
 79596      if isXMM(v0) && isYMM(v1) {
 79597          self.require(ISA_AVX2)
 79598          p.domain = DomainAVX
 79599          p.add(0, func(m *_Encoding, v []interface{}) {
 79600              m.emit(0xc4)
 79601              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79602              m.emit(0x7d)
 79603              m.emit(0x33)
 79604              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79605          })
 79606      }
 79607      // VPMOVZXWD m128, ymm
 79608      if isM128(v0) && isYMM(v1) {
 79609          self.require(ISA_AVX2)
 79610          p.domain = DomainAVX
 79611          p.add(0, func(m *_Encoding, v []interface{}) {
 79612              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 79613              m.emit(0x33)
 79614              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79615          })
 79616      }
 79617      // VPMOVZXWD ymm, zmm{k}{z}
 79618      if isEVEXYMM(v0) && isZMMkz(v1) {
 79619          self.require(ISA_AVX512F)
 79620          p.domain = DomainAVX
 79621          p.add(0, func(m *_Encoding, v []interface{}) {
 79622              m.emit(0x62)
 79623              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79624              m.emit(0x7d)
 79625              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 79626              m.emit(0x33)
 79627              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79628          })
 79629      }
 79630      // VPMOVZXWD m256, zmm{k}{z}
 79631      if isM256(v0) && isZMMkz(v1) {
 79632          self.require(ISA_AVX512F)
 79633          p.domain = DomainAVX
 79634          p.add(0, func(m *_Encoding, v []interface{}) {
 79635              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79636              m.emit(0x33)
 79637              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 79638          })
 79639      }
 79640      // VPMOVZXWD xmm, xmm{k}{z}
 79641      if isEVEXXMM(v0) && isXMMkz(v1) {
 79642          self.require(ISA_AVX512VL | ISA_AVX512F)
 79643          p.domain = DomainAVX
 79644          p.add(0, func(m *_Encoding, v []interface{}) {
 79645              m.emit(0x62)
 79646              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79647              m.emit(0x7d)
 79648              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 79649              m.emit(0x33)
 79650              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79651          })
 79652      }
 79653      // VPMOVZXWD xmm, ymm{k}{z}
 79654      if isEVEXXMM(v0) && isYMMkz(v1) {
 79655          self.require(ISA_AVX512VL | ISA_AVX512F)
 79656          p.domain = DomainAVX
 79657          p.add(0, func(m *_Encoding, v []interface{}) {
 79658              m.emit(0x62)
 79659              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79660              m.emit(0x7d)
 79661              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 79662              m.emit(0x33)
 79663              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79664          })
 79665      }
 79666      // VPMOVZXWD m64, xmm{k}{z}
 79667      if isM64(v0) && isXMMkz(v1) {
 79668          self.require(ISA_AVX512VL | ISA_AVX512F)
 79669          p.domain = DomainAVX
 79670          p.add(0, func(m *_Encoding, v []interface{}) {
 79671              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79672              m.emit(0x33)
 79673              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 79674          })
 79675      }
 79676      // VPMOVZXWD m128, ymm{k}{z}
 79677      if isM128(v0) && isYMMkz(v1) {
 79678          self.require(ISA_AVX512VL | ISA_AVX512F)
 79679          p.domain = DomainAVX
 79680          p.add(0, func(m *_Encoding, v []interface{}) {
 79681              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79682              m.emit(0x33)
 79683              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 79684          })
 79685      }
 79686      if p.len == 0 {
 79687          panic("invalid operands for VPMOVZXWD")
 79688      }
 79689      return p
 79690  }
 79691  
 79692  // VPMOVZXWQ performs "Move Packed Word Integers to Quadword Integers with Zero Extension".
 79693  //
 79694  // Mnemonic        : VPMOVZXWQ
 79695  // Supported forms : (10 forms)
 79696  //
 79697  //    * VPMOVZXWQ xmm, xmm           [AVX]
 79698  //    * VPMOVZXWQ m32, xmm           [AVX]
 79699  //    * VPMOVZXWQ xmm, ymm           [AVX2]
 79700  //    * VPMOVZXWQ m64, ymm           [AVX2]
 79701  //    * VPMOVZXWQ xmm, zmm{k}{z}     [AVX512F]
 79702  //    * VPMOVZXWQ m128, zmm{k}{z}    [AVX512F]
 79703  //    * VPMOVZXWQ xmm, xmm{k}{z}     [AVX512F,AVX512VL]
 79704  //    * VPMOVZXWQ xmm, ymm{k}{z}     [AVX512F,AVX512VL]
 79705  //    * VPMOVZXWQ m32, xmm{k}{z}     [AVX512F,AVX512VL]
 79706  //    * VPMOVZXWQ m64, ymm{k}{z}     [AVX512F,AVX512VL]
 79707  //
 79708  func (self *Program) VPMOVZXWQ(v0 interface{}, v1 interface{}) *Instruction {
 79709      p := self.alloc("VPMOVZXWQ", 2, Operands { v0, v1 })
 79710      // VPMOVZXWQ xmm, xmm
 79711      if isXMM(v0) && isXMM(v1) {
 79712          self.require(ISA_AVX)
 79713          p.domain = DomainAVX
 79714          p.add(0, func(m *_Encoding, v []interface{}) {
 79715              m.emit(0xc4)
 79716              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79717              m.emit(0x79)
 79718              m.emit(0x34)
 79719              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79720          })
 79721      }
 79722      // VPMOVZXWQ m32, xmm
 79723      if isM32(v0) && isXMM(v1) {
 79724          self.require(ISA_AVX)
 79725          p.domain = DomainAVX
 79726          p.add(0, func(m *_Encoding, v []interface{}) {
 79727              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 79728              m.emit(0x34)
 79729              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79730          })
 79731      }
 79732      // VPMOVZXWQ xmm, ymm
 79733      if isXMM(v0) && isYMM(v1) {
 79734          self.require(ISA_AVX2)
 79735          p.domain = DomainAVX
 79736          p.add(0, func(m *_Encoding, v []interface{}) {
 79737              m.emit(0xc4)
 79738              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 79739              m.emit(0x7d)
 79740              m.emit(0x34)
 79741              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79742          })
 79743      }
 79744      // VPMOVZXWQ m64, ymm
 79745      if isM64(v0) && isYMM(v1) {
 79746          self.require(ISA_AVX2)
 79747          p.domain = DomainAVX
 79748          p.add(0, func(m *_Encoding, v []interface{}) {
 79749              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 79750              m.emit(0x34)
 79751              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 79752          })
 79753      }
 79754      // VPMOVZXWQ xmm, zmm{k}{z}
 79755      if isEVEXXMM(v0) && isZMMkz(v1) {
 79756          self.require(ISA_AVX512F)
 79757          p.domain = DomainAVX
 79758          p.add(0, func(m *_Encoding, v []interface{}) {
 79759              m.emit(0x62)
 79760              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79761              m.emit(0x7d)
 79762              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 79763              m.emit(0x34)
 79764              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79765          })
 79766      }
 79767      // VPMOVZXWQ m128, zmm{k}{z}
 79768      if isM128(v0) && isZMMkz(v1) {
 79769          self.require(ISA_AVX512F)
 79770          p.domain = DomainAVX
 79771          p.add(0, func(m *_Encoding, v []interface{}) {
 79772              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79773              m.emit(0x34)
 79774              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 79775          })
 79776      }
 79777      // VPMOVZXWQ xmm, xmm{k}{z}
 79778      if isEVEXXMM(v0) && isXMMkz(v1) {
 79779          self.require(ISA_AVX512VL | ISA_AVX512F)
 79780          p.domain = DomainAVX
 79781          p.add(0, func(m *_Encoding, v []interface{}) {
 79782              m.emit(0x62)
 79783              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79784              m.emit(0x7d)
 79785              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 79786              m.emit(0x34)
 79787              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79788          })
 79789      }
 79790      // VPMOVZXWQ xmm, ymm{k}{z}
 79791      if isEVEXXMM(v0) && isYMMkz(v1) {
 79792          self.require(ISA_AVX512VL | ISA_AVX512F)
 79793          p.domain = DomainAVX
 79794          p.add(0, func(m *_Encoding, v []interface{}) {
 79795              m.emit(0x62)
 79796              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 79797              m.emit(0x7d)
 79798              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 79799              m.emit(0x34)
 79800              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 79801          })
 79802      }
 79803      // VPMOVZXWQ m32, xmm{k}{z}
 79804      if isM32(v0) && isXMMkz(v1) {
 79805          self.require(ISA_AVX512VL | ISA_AVX512F)
 79806          p.domain = DomainAVX
 79807          p.add(0, func(m *_Encoding, v []interface{}) {
 79808              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79809              m.emit(0x34)
 79810              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 79811          })
 79812      }
 79813      // VPMOVZXWQ m64, ymm{k}{z}
 79814      if isM64(v0) && isYMMkz(v1) {
 79815          self.require(ISA_AVX512VL | ISA_AVX512F)
 79816          p.domain = DomainAVX
 79817          p.add(0, func(m *_Encoding, v []interface{}) {
 79818              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), 0)
 79819              m.emit(0x34)
 79820              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 79821          })
 79822      }
 79823      if p.len == 0 {
 79824          panic("invalid operands for VPMOVZXWQ")
 79825      }
 79826      return p
 79827  }
 79828  
 79829  // VPMULDQ performs "Multiply Packed Signed Doubleword Integers and Store Quadword Result".
 79830  //
 79831  // Mnemonic        : VPMULDQ
 79832  // Supported forms : (10 forms)
 79833  //
 79834  //    * VPMULDQ xmm, xmm, xmm                   [AVX]
 79835  //    * VPMULDQ m128, xmm, xmm                  [AVX]
 79836  //    * VPMULDQ ymm, ymm, ymm                   [AVX2]
 79837  //    * VPMULDQ m256, ymm, ymm                  [AVX2]
 79838  //    * VPMULDQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 79839  //    * VPMULDQ zmm, zmm, zmm{k}{z}             [AVX512F]
 79840  //    * VPMULDQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 79841  //    * VPMULDQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 79842  //    * VPMULDQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 79843  //    * VPMULDQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 79844  //
 79845  func (self *Program) VPMULDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 79846      p := self.alloc("VPMULDQ", 3, Operands { v0, v1, v2 })
 79847      // VPMULDQ xmm, xmm, xmm
 79848      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 79849          self.require(ISA_AVX)
 79850          p.domain = DomainAVX
 79851          p.add(0, func(m *_Encoding, v []interface{}) {
 79852              m.emit(0xc4)
 79853              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 79854              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 79855              m.emit(0x28)
 79856              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 79857          })
 79858      }
 79859      // VPMULDQ m128, xmm, xmm
 79860      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 79861          self.require(ISA_AVX)
 79862          p.domain = DomainAVX
 79863          p.add(0, func(m *_Encoding, v []interface{}) {
 79864              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 79865              m.emit(0x28)
 79866              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 79867          })
 79868      }
 79869      // VPMULDQ ymm, ymm, ymm
 79870      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 79871          self.require(ISA_AVX2)
 79872          p.domain = DomainAVX
 79873          p.add(0, func(m *_Encoding, v []interface{}) {
 79874              m.emit(0xc4)
 79875              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 79876              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 79877              m.emit(0x28)
 79878              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 79879          })
 79880      }
 79881      // VPMULDQ m256, ymm, ymm
 79882      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 79883          self.require(ISA_AVX2)
 79884          p.domain = DomainAVX
 79885          p.add(0, func(m *_Encoding, v []interface{}) {
 79886              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 79887              m.emit(0x28)
 79888              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 79889          })
 79890      }
 79891      // VPMULDQ m512/m64bcst, zmm, zmm{k}{z}
 79892      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 79893          self.require(ISA_AVX512F)
 79894          p.domain = DomainAVX
 79895          p.add(0, func(m *_Encoding, v []interface{}) {
 79896              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 79897              m.emit(0x28)
 79898              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 79899          })
 79900      }
 79901      // VPMULDQ zmm, zmm, zmm{k}{z}
 79902      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 79903          self.require(ISA_AVX512F)
 79904          p.domain = DomainAVX
 79905          p.add(0, func(m *_Encoding, v []interface{}) {
 79906              m.emit(0x62)
 79907              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 79908              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 79909              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 79910              m.emit(0x28)
 79911              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 79912          })
 79913      }
 79914      // VPMULDQ m128/m64bcst, xmm, xmm{k}{z}
 79915      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 79916          self.require(ISA_AVX512VL | ISA_AVX512F)
 79917          p.domain = DomainAVX
 79918          p.add(0, func(m *_Encoding, v []interface{}) {
 79919              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 79920              m.emit(0x28)
 79921              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 79922          })
 79923      }
 79924      // VPMULDQ xmm, xmm, xmm{k}{z}
 79925      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 79926          self.require(ISA_AVX512VL | ISA_AVX512F)
 79927          p.domain = DomainAVX
 79928          p.add(0, func(m *_Encoding, v []interface{}) {
 79929              m.emit(0x62)
 79930              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 79931              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 79932              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 79933              m.emit(0x28)
 79934              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 79935          })
 79936      }
 79937      // VPMULDQ m256/m64bcst, ymm, ymm{k}{z}
 79938      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 79939          self.require(ISA_AVX512VL | ISA_AVX512F)
 79940          p.domain = DomainAVX
 79941          p.add(0, func(m *_Encoding, v []interface{}) {
 79942              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 79943              m.emit(0x28)
 79944              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 79945          })
 79946      }
 79947      // VPMULDQ ymm, ymm, ymm{k}{z}
 79948      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 79949          self.require(ISA_AVX512VL | ISA_AVX512F)
 79950          p.domain = DomainAVX
 79951          p.add(0, func(m *_Encoding, v []interface{}) {
 79952              m.emit(0x62)
 79953              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 79954              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 79955              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 79956              m.emit(0x28)
 79957              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 79958          })
 79959      }
 79960      if p.len == 0 {
 79961          panic("invalid operands for VPMULDQ")
 79962      }
 79963      return p
 79964  }
 79965  
 79966  // VPMULHRSW performs "Packed Multiply Signed Word Integers and Store High Result with Round and Scale".
 79967  //
 79968  // Mnemonic        : VPMULHRSW
 79969  // Supported forms : (10 forms)
 79970  //
 79971  //    * VPMULHRSW xmm, xmm, xmm           [AVX]
 79972  //    * VPMULHRSW m128, xmm, xmm          [AVX]
 79973  //    * VPMULHRSW ymm, ymm, ymm           [AVX2]
 79974  //    * VPMULHRSW m256, ymm, ymm          [AVX2]
 79975  //    * VPMULHRSW zmm, zmm, zmm{k}{z}     [AVX512BW]
 79976  //    * VPMULHRSW m512, zmm, zmm{k}{z}    [AVX512BW]
 79977  //    * VPMULHRSW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 79978  //    * VPMULHRSW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 79979  //    * VPMULHRSW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 79980  //    * VPMULHRSW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 79981  //
 79982  func (self *Program) VPMULHRSW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 79983      p := self.alloc("VPMULHRSW", 3, Operands { v0, v1, v2 })
 79984      // VPMULHRSW xmm, xmm, xmm
 79985      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 79986          self.require(ISA_AVX)
 79987          p.domain = DomainAVX
 79988          p.add(0, func(m *_Encoding, v []interface{}) {
 79989              m.emit(0xc4)
 79990              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 79991              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 79992              m.emit(0x0b)
 79993              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 79994          })
 79995      }
 79996      // VPMULHRSW m128, xmm, xmm
 79997      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 79998          self.require(ISA_AVX)
 79999          p.domain = DomainAVX
 80000          p.add(0, func(m *_Encoding, v []interface{}) {
 80001              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80002              m.emit(0x0b)
 80003              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80004          })
 80005      }
 80006      // VPMULHRSW ymm, ymm, ymm
 80007      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 80008          self.require(ISA_AVX2)
 80009          p.domain = DomainAVX
 80010          p.add(0, func(m *_Encoding, v []interface{}) {
 80011              m.emit(0xc4)
 80012              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 80013              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80014              m.emit(0x0b)
 80015              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80016          })
 80017      }
 80018      // VPMULHRSW m256, ymm, ymm
 80019      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 80020          self.require(ISA_AVX2)
 80021          p.domain = DomainAVX
 80022          p.add(0, func(m *_Encoding, v []interface{}) {
 80023              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80024              m.emit(0x0b)
 80025              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80026          })
 80027      }
 80028      // VPMULHRSW zmm, zmm, zmm{k}{z}
 80029      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 80030          self.require(ISA_AVX512BW)
 80031          p.domain = DomainAVX
 80032          p.add(0, func(m *_Encoding, v []interface{}) {
 80033              m.emit(0x62)
 80034              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80035              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80036              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 80037              m.emit(0x0b)
 80038              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80039          })
 80040      }
 80041      // VPMULHRSW m512, zmm, zmm{k}{z}
 80042      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 80043          self.require(ISA_AVX512BW)
 80044          p.domain = DomainAVX
 80045          p.add(0, func(m *_Encoding, v []interface{}) {
 80046              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80047              m.emit(0x0b)
 80048              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 80049          })
 80050      }
 80051      // VPMULHRSW xmm, xmm, xmm{k}{z}
 80052      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80053          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80054          p.domain = DomainAVX
 80055          p.add(0, func(m *_Encoding, v []interface{}) {
 80056              m.emit(0x62)
 80057              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80058              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80059              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 80060              m.emit(0x0b)
 80061              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80062          })
 80063      }
 80064      // VPMULHRSW m128, xmm, xmm{k}{z}
 80065      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80066          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80067          p.domain = DomainAVX
 80068          p.add(0, func(m *_Encoding, v []interface{}) {
 80069              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80070              m.emit(0x0b)
 80071              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 80072          })
 80073      }
 80074      // VPMULHRSW ymm, ymm, ymm{k}{z}
 80075      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80076          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80077          p.domain = DomainAVX
 80078          p.add(0, func(m *_Encoding, v []interface{}) {
 80079              m.emit(0x62)
 80080              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80081              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80082              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 80083              m.emit(0x0b)
 80084              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80085          })
 80086      }
 80087      // VPMULHRSW m256, ymm, ymm{k}{z}
 80088      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80089          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80090          p.domain = DomainAVX
 80091          p.add(0, func(m *_Encoding, v []interface{}) {
 80092              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80093              m.emit(0x0b)
 80094              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 80095          })
 80096      }
 80097      if p.len == 0 {
 80098          panic("invalid operands for VPMULHRSW")
 80099      }
 80100      return p
 80101  }
 80102  
 80103  // VPMULHUW performs "Multiply Packed Unsigned Word Integers and Store High Result".
 80104  //
 80105  // Mnemonic        : VPMULHUW
 80106  // Supported forms : (10 forms)
 80107  //
 80108  //    * VPMULHUW xmm, xmm, xmm           [AVX]
 80109  //    * VPMULHUW m128, xmm, xmm          [AVX]
 80110  //    * VPMULHUW ymm, ymm, ymm           [AVX2]
 80111  //    * VPMULHUW m256, ymm, ymm          [AVX2]
 80112  //    * VPMULHUW zmm, zmm, zmm{k}{z}     [AVX512BW]
 80113  //    * VPMULHUW m512, zmm, zmm{k}{z}    [AVX512BW]
 80114  //    * VPMULHUW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 80115  //    * VPMULHUW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 80116  //    * VPMULHUW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 80117  //    * VPMULHUW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 80118  //
 80119  func (self *Program) VPMULHUW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 80120      p := self.alloc("VPMULHUW", 3, Operands { v0, v1, v2 })
 80121      // VPMULHUW xmm, xmm, xmm
 80122      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 80123          self.require(ISA_AVX)
 80124          p.domain = DomainAVX
 80125          p.add(0, func(m *_Encoding, v []interface{}) {
 80126              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 80127              m.emit(0xe4)
 80128              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80129          })
 80130      }
 80131      // VPMULHUW m128, xmm, xmm
 80132      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 80133          self.require(ISA_AVX)
 80134          p.domain = DomainAVX
 80135          p.add(0, func(m *_Encoding, v []interface{}) {
 80136              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80137              m.emit(0xe4)
 80138              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80139          })
 80140      }
 80141      // VPMULHUW ymm, ymm, ymm
 80142      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 80143          self.require(ISA_AVX2)
 80144          p.domain = DomainAVX
 80145          p.add(0, func(m *_Encoding, v []interface{}) {
 80146              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 80147              m.emit(0xe4)
 80148              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80149          })
 80150      }
 80151      // VPMULHUW m256, ymm, ymm
 80152      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 80153          self.require(ISA_AVX2)
 80154          p.domain = DomainAVX
 80155          p.add(0, func(m *_Encoding, v []interface{}) {
 80156              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80157              m.emit(0xe4)
 80158              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80159          })
 80160      }
 80161      // VPMULHUW zmm, zmm, zmm{k}{z}
 80162      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 80163          self.require(ISA_AVX512BW)
 80164          p.domain = DomainAVX
 80165          p.add(0, func(m *_Encoding, v []interface{}) {
 80166              m.emit(0x62)
 80167              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80168              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80169              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 80170              m.emit(0xe4)
 80171              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80172          })
 80173      }
 80174      // VPMULHUW m512, zmm, zmm{k}{z}
 80175      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 80176          self.require(ISA_AVX512BW)
 80177          p.domain = DomainAVX
 80178          p.add(0, func(m *_Encoding, v []interface{}) {
 80179              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80180              m.emit(0xe4)
 80181              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 80182          })
 80183      }
 80184      // VPMULHUW xmm, xmm, xmm{k}{z}
 80185      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80186          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80187          p.domain = DomainAVX
 80188          p.add(0, func(m *_Encoding, v []interface{}) {
 80189              m.emit(0x62)
 80190              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80191              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80192              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 80193              m.emit(0xe4)
 80194              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80195          })
 80196      }
 80197      // VPMULHUW m128, xmm, xmm{k}{z}
 80198      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80199          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80200          p.domain = DomainAVX
 80201          p.add(0, func(m *_Encoding, v []interface{}) {
 80202              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80203              m.emit(0xe4)
 80204              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 80205          })
 80206      }
 80207      // VPMULHUW ymm, ymm, ymm{k}{z}
 80208      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80209          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80210          p.domain = DomainAVX
 80211          p.add(0, func(m *_Encoding, v []interface{}) {
 80212              m.emit(0x62)
 80213              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80214              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80215              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 80216              m.emit(0xe4)
 80217              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80218          })
 80219      }
 80220      // VPMULHUW m256, ymm, ymm{k}{z}
 80221      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80222          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80223          p.domain = DomainAVX
 80224          p.add(0, func(m *_Encoding, v []interface{}) {
 80225              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80226              m.emit(0xe4)
 80227              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 80228          })
 80229      }
 80230      if p.len == 0 {
 80231          panic("invalid operands for VPMULHUW")
 80232      }
 80233      return p
 80234  }
 80235  
 80236  // VPMULHW performs "Multiply Packed Signed Word Integers and Store High Result".
 80237  //
 80238  // Mnemonic        : VPMULHW
 80239  // Supported forms : (10 forms)
 80240  //
 80241  //    * VPMULHW xmm, xmm, xmm           [AVX]
 80242  //    * VPMULHW m128, xmm, xmm          [AVX]
 80243  //    * VPMULHW ymm, ymm, ymm           [AVX2]
 80244  //    * VPMULHW m256, ymm, ymm          [AVX2]
 80245  //    * VPMULHW zmm, zmm, zmm{k}{z}     [AVX512BW]
 80246  //    * VPMULHW m512, zmm, zmm{k}{z}    [AVX512BW]
 80247  //    * VPMULHW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 80248  //    * VPMULHW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 80249  //    * VPMULHW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 80250  //    * VPMULHW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 80251  //
 80252  func (self *Program) VPMULHW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 80253      p := self.alloc("VPMULHW", 3, Operands { v0, v1, v2 })
 80254      // VPMULHW xmm, xmm, xmm
 80255      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 80256          self.require(ISA_AVX)
 80257          p.domain = DomainAVX
 80258          p.add(0, func(m *_Encoding, v []interface{}) {
 80259              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 80260              m.emit(0xe5)
 80261              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80262          })
 80263      }
 80264      // VPMULHW m128, xmm, xmm
 80265      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 80266          self.require(ISA_AVX)
 80267          p.domain = DomainAVX
 80268          p.add(0, func(m *_Encoding, v []interface{}) {
 80269              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80270              m.emit(0xe5)
 80271              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80272          })
 80273      }
 80274      // VPMULHW ymm, ymm, ymm
 80275      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 80276          self.require(ISA_AVX2)
 80277          p.domain = DomainAVX
 80278          p.add(0, func(m *_Encoding, v []interface{}) {
 80279              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 80280              m.emit(0xe5)
 80281              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80282          })
 80283      }
 80284      // VPMULHW m256, ymm, ymm
 80285      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 80286          self.require(ISA_AVX2)
 80287          p.domain = DomainAVX
 80288          p.add(0, func(m *_Encoding, v []interface{}) {
 80289              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80290              m.emit(0xe5)
 80291              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80292          })
 80293      }
 80294      // VPMULHW zmm, zmm, zmm{k}{z}
 80295      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 80296          self.require(ISA_AVX512BW)
 80297          p.domain = DomainAVX
 80298          p.add(0, func(m *_Encoding, v []interface{}) {
 80299              m.emit(0x62)
 80300              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80301              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80302              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 80303              m.emit(0xe5)
 80304              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80305          })
 80306      }
 80307      // VPMULHW m512, zmm, zmm{k}{z}
 80308      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 80309          self.require(ISA_AVX512BW)
 80310          p.domain = DomainAVX
 80311          p.add(0, func(m *_Encoding, v []interface{}) {
 80312              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80313              m.emit(0xe5)
 80314              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 80315          })
 80316      }
 80317      // VPMULHW xmm, xmm, xmm{k}{z}
 80318      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80319          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80320          p.domain = DomainAVX
 80321          p.add(0, func(m *_Encoding, v []interface{}) {
 80322              m.emit(0x62)
 80323              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80324              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80325              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 80326              m.emit(0xe5)
 80327              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80328          })
 80329      }
 80330      // VPMULHW m128, xmm, xmm{k}{z}
 80331      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80332          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80333          p.domain = DomainAVX
 80334          p.add(0, func(m *_Encoding, v []interface{}) {
 80335              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80336              m.emit(0xe5)
 80337              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 80338          })
 80339      }
 80340      // VPMULHW ymm, ymm, ymm{k}{z}
 80341      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80342          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80343          p.domain = DomainAVX
 80344          p.add(0, func(m *_Encoding, v []interface{}) {
 80345              m.emit(0x62)
 80346              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80347              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80348              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 80349              m.emit(0xe5)
 80350              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80351          })
 80352      }
 80353      // VPMULHW m256, ymm, ymm{k}{z}
 80354      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80355          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80356          p.domain = DomainAVX
 80357          p.add(0, func(m *_Encoding, v []interface{}) {
 80358              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80359              m.emit(0xe5)
 80360              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 80361          })
 80362      }
 80363      if p.len == 0 {
 80364          panic("invalid operands for VPMULHW")
 80365      }
 80366      return p
 80367  }
 80368  
 80369  // VPMULLD performs "Multiply Packed Signed Doubleword Integers and Store Low Result".
 80370  //
 80371  // Mnemonic        : VPMULLD
 80372  // Supported forms : (10 forms)
 80373  //
 80374  //    * VPMULLD xmm, xmm, xmm                   [AVX]
 80375  //    * VPMULLD m128, xmm, xmm                  [AVX]
 80376  //    * VPMULLD ymm, ymm, ymm                   [AVX2]
 80377  //    * VPMULLD m256, ymm, ymm                  [AVX2]
 80378  //    * VPMULLD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 80379  //    * VPMULLD zmm, zmm, zmm{k}{z}             [AVX512F]
 80380  //    * VPMULLD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 80381  //    * VPMULLD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 80382  //    * VPMULLD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 80383  //    * VPMULLD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 80384  //
 80385  func (self *Program) VPMULLD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 80386      p := self.alloc("VPMULLD", 3, Operands { v0, v1, v2 })
 80387      // VPMULLD xmm, xmm, xmm
 80388      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 80389          self.require(ISA_AVX)
 80390          p.domain = DomainAVX
 80391          p.add(0, func(m *_Encoding, v []interface{}) {
 80392              m.emit(0xc4)
 80393              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 80394              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 80395              m.emit(0x40)
 80396              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80397          })
 80398      }
 80399      // VPMULLD m128, xmm, xmm
 80400      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 80401          self.require(ISA_AVX)
 80402          p.domain = DomainAVX
 80403          p.add(0, func(m *_Encoding, v []interface{}) {
 80404              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80405              m.emit(0x40)
 80406              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80407          })
 80408      }
 80409      // VPMULLD ymm, ymm, ymm
 80410      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 80411          self.require(ISA_AVX2)
 80412          p.domain = DomainAVX
 80413          p.add(0, func(m *_Encoding, v []interface{}) {
 80414              m.emit(0xc4)
 80415              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 80416              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80417              m.emit(0x40)
 80418              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80419          })
 80420      }
 80421      // VPMULLD m256, ymm, ymm
 80422      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 80423          self.require(ISA_AVX2)
 80424          p.domain = DomainAVX
 80425          p.add(0, func(m *_Encoding, v []interface{}) {
 80426              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80427              m.emit(0x40)
 80428              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80429          })
 80430      }
 80431      // VPMULLD m512/m32bcst, zmm, zmm{k}{z}
 80432      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 80433          self.require(ISA_AVX512F)
 80434          p.domain = DomainAVX
 80435          p.add(0, func(m *_Encoding, v []interface{}) {
 80436              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80437              m.emit(0x40)
 80438              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 80439          })
 80440      }
 80441      // VPMULLD zmm, zmm, zmm{k}{z}
 80442      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 80443          self.require(ISA_AVX512F)
 80444          p.domain = DomainAVX
 80445          p.add(0, func(m *_Encoding, v []interface{}) {
 80446              m.emit(0x62)
 80447              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80448              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80449              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 80450              m.emit(0x40)
 80451              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80452          })
 80453      }
 80454      // VPMULLD m128/m32bcst, xmm, xmm{k}{z}
 80455      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80456          self.require(ISA_AVX512VL | ISA_AVX512F)
 80457          p.domain = DomainAVX
 80458          p.add(0, func(m *_Encoding, v []interface{}) {
 80459              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80460              m.emit(0x40)
 80461              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 80462          })
 80463      }
 80464      // VPMULLD xmm, xmm, xmm{k}{z}
 80465      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80466          self.require(ISA_AVX512VL | ISA_AVX512F)
 80467          p.domain = DomainAVX
 80468          p.add(0, func(m *_Encoding, v []interface{}) {
 80469              m.emit(0x62)
 80470              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80471              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80472              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 80473              m.emit(0x40)
 80474              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80475          })
 80476      }
 80477      // VPMULLD m256/m32bcst, ymm, ymm{k}{z}
 80478      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80479          self.require(ISA_AVX512VL | ISA_AVX512F)
 80480          p.domain = DomainAVX
 80481          p.add(0, func(m *_Encoding, v []interface{}) {
 80482              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80483              m.emit(0x40)
 80484              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 80485          })
 80486      }
 80487      // VPMULLD ymm, ymm, ymm{k}{z}
 80488      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80489          self.require(ISA_AVX512VL | ISA_AVX512F)
 80490          p.domain = DomainAVX
 80491          p.add(0, func(m *_Encoding, v []interface{}) {
 80492              m.emit(0x62)
 80493              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80494              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80495              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 80496              m.emit(0x40)
 80497              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80498          })
 80499      }
 80500      if p.len == 0 {
 80501          panic("invalid operands for VPMULLD")
 80502      }
 80503      return p
 80504  }
 80505  
 80506  // VPMULLQ performs "Multiply Packed Signed Quadword Integers and Store Low Result".
 80507  //
 80508  // Mnemonic        : VPMULLQ
 80509  // Supported forms : (6 forms)
 80510  //
 80511  //    * VPMULLQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512DQ]
 80512  //    * VPMULLQ zmm, zmm, zmm{k}{z}             [AVX512DQ]
 80513  //    * VPMULLQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 80514  //    * VPMULLQ xmm, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 80515  //    * VPMULLQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 80516  //    * VPMULLQ ymm, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 80517  //
 80518  func (self *Program) VPMULLQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 80519      p := self.alloc("VPMULLQ", 3, Operands { v0, v1, v2 })
 80520      // VPMULLQ m512/m64bcst, zmm, zmm{k}{z}
 80521      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 80522          self.require(ISA_AVX512DQ)
 80523          p.domain = DomainAVX
 80524          p.add(0, func(m *_Encoding, v []interface{}) {
 80525              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80526              m.emit(0x40)
 80527              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 80528          })
 80529      }
 80530      // VPMULLQ zmm, zmm, zmm{k}{z}
 80531      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 80532          self.require(ISA_AVX512DQ)
 80533          p.domain = DomainAVX
 80534          p.add(0, func(m *_Encoding, v []interface{}) {
 80535              m.emit(0x62)
 80536              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80537              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 80538              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 80539              m.emit(0x40)
 80540              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80541          })
 80542      }
 80543      // VPMULLQ m128/m64bcst, xmm, xmm{k}{z}
 80544      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80545          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 80546          p.domain = DomainAVX
 80547          p.add(0, func(m *_Encoding, v []interface{}) {
 80548              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80549              m.emit(0x40)
 80550              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 80551          })
 80552      }
 80553      // VPMULLQ xmm, xmm, xmm{k}{z}
 80554      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80555          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 80556          p.domain = DomainAVX
 80557          p.add(0, func(m *_Encoding, v []interface{}) {
 80558              m.emit(0x62)
 80559              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80560              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 80561              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 80562              m.emit(0x40)
 80563              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80564          })
 80565      }
 80566      // VPMULLQ m256/m64bcst, ymm, ymm{k}{z}
 80567      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80568          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 80569          p.domain = DomainAVX
 80570          p.add(0, func(m *_Encoding, v []interface{}) {
 80571              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80572              m.emit(0x40)
 80573              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 80574          })
 80575      }
 80576      // VPMULLQ ymm, ymm, ymm{k}{z}
 80577      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80578          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 80579          p.domain = DomainAVX
 80580          p.add(0, func(m *_Encoding, v []interface{}) {
 80581              m.emit(0x62)
 80582              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80583              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 80584              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 80585              m.emit(0x40)
 80586              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80587          })
 80588      }
 80589      if p.len == 0 {
 80590          panic("invalid operands for VPMULLQ")
 80591      }
 80592      return p
 80593  }
 80594  
 80595  // VPMULLW performs "Multiply Packed Signed Word Integers and Store Low Result".
 80596  //
 80597  // Mnemonic        : VPMULLW
 80598  // Supported forms : (10 forms)
 80599  //
 80600  //    * VPMULLW xmm, xmm, xmm           [AVX]
 80601  //    * VPMULLW m128, xmm, xmm          [AVX]
 80602  //    * VPMULLW ymm, ymm, ymm           [AVX2]
 80603  //    * VPMULLW m256, ymm, ymm          [AVX2]
 80604  //    * VPMULLW zmm, zmm, zmm{k}{z}     [AVX512BW]
 80605  //    * VPMULLW m512, zmm, zmm{k}{z}    [AVX512BW]
 80606  //    * VPMULLW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 80607  //    * VPMULLW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 80608  //    * VPMULLW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 80609  //    * VPMULLW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 80610  //
 80611  func (self *Program) VPMULLW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 80612      p := self.alloc("VPMULLW", 3, Operands { v0, v1, v2 })
 80613      // VPMULLW xmm, xmm, xmm
 80614      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 80615          self.require(ISA_AVX)
 80616          p.domain = DomainAVX
 80617          p.add(0, func(m *_Encoding, v []interface{}) {
 80618              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 80619              m.emit(0xd5)
 80620              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80621          })
 80622      }
 80623      // VPMULLW m128, xmm, xmm
 80624      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 80625          self.require(ISA_AVX)
 80626          p.domain = DomainAVX
 80627          p.add(0, func(m *_Encoding, v []interface{}) {
 80628              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80629              m.emit(0xd5)
 80630              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80631          })
 80632      }
 80633      // VPMULLW ymm, ymm, ymm
 80634      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 80635          self.require(ISA_AVX2)
 80636          p.domain = DomainAVX
 80637          p.add(0, func(m *_Encoding, v []interface{}) {
 80638              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 80639              m.emit(0xd5)
 80640              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80641          })
 80642      }
 80643      // VPMULLW m256, ymm, ymm
 80644      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 80645          self.require(ISA_AVX2)
 80646          p.domain = DomainAVX
 80647          p.add(0, func(m *_Encoding, v []interface{}) {
 80648              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80649              m.emit(0xd5)
 80650              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80651          })
 80652      }
 80653      // VPMULLW zmm, zmm, zmm{k}{z}
 80654      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 80655          self.require(ISA_AVX512BW)
 80656          p.domain = DomainAVX
 80657          p.add(0, func(m *_Encoding, v []interface{}) {
 80658              m.emit(0x62)
 80659              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80660              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80661              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 80662              m.emit(0xd5)
 80663              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80664          })
 80665      }
 80666      // VPMULLW m512, zmm, zmm{k}{z}
 80667      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 80668          self.require(ISA_AVX512BW)
 80669          p.domain = DomainAVX
 80670          p.add(0, func(m *_Encoding, v []interface{}) {
 80671              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80672              m.emit(0xd5)
 80673              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 80674          })
 80675      }
 80676      // VPMULLW xmm, xmm, xmm{k}{z}
 80677      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80678          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80679          p.domain = DomainAVX
 80680          p.add(0, func(m *_Encoding, v []interface{}) {
 80681              m.emit(0x62)
 80682              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80683              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80684              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 80685              m.emit(0xd5)
 80686              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80687          })
 80688      }
 80689      // VPMULLW m128, xmm, xmm{k}{z}
 80690      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80691          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80692          p.domain = DomainAVX
 80693          p.add(0, func(m *_Encoding, v []interface{}) {
 80694              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80695              m.emit(0xd5)
 80696              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 80697          })
 80698      }
 80699      // VPMULLW ymm, ymm, ymm{k}{z}
 80700      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80701          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80702          p.domain = DomainAVX
 80703          p.add(0, func(m *_Encoding, v []interface{}) {
 80704              m.emit(0x62)
 80705              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80706              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 80707              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 80708              m.emit(0xd5)
 80709              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80710          })
 80711      }
 80712      // VPMULLW m256, ymm, ymm{k}{z}
 80713      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80714          self.require(ISA_AVX512VL | ISA_AVX512BW)
 80715          p.domain = DomainAVX
 80716          p.add(0, func(m *_Encoding, v []interface{}) {
 80717              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 80718              m.emit(0xd5)
 80719              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 80720          })
 80721      }
 80722      if p.len == 0 {
 80723          panic("invalid operands for VPMULLW")
 80724      }
 80725      return p
 80726  }
 80727  
 80728  // VPMULTISHIFTQB performs "Select Packed Unaligned Bytes from Quadword Sources".
 80729  //
 80730  // Mnemonic        : VPMULTISHIFTQB
 80731  // Supported forms : (6 forms)
 80732  //
 80733  //    * VPMULTISHIFTQB m128/m64bcst, xmm, xmm{k}{z}    [AVX512VBMI,AVX512VL]
 80734  //    * VPMULTISHIFTQB xmm, xmm, xmm{k}{z}             [AVX512VBMI,AVX512VL]
 80735  //    * VPMULTISHIFTQB m256/m64bcst, ymm, ymm{k}{z}    [AVX512VBMI,AVX512VL]
 80736  //    * VPMULTISHIFTQB ymm, ymm, ymm{k}{z}             [AVX512VBMI,AVX512VL]
 80737  //    * VPMULTISHIFTQB m512/m64bcst, zmm, zmm{k}{z}    [AVX512VBMI]
 80738  //    * VPMULTISHIFTQB zmm, zmm, zmm{k}{z}             [AVX512VBMI]
 80739  //
 80740  func (self *Program) VPMULTISHIFTQB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 80741      p := self.alloc("VPMULTISHIFTQB", 3, Operands { v0, v1, v2 })
 80742      // VPMULTISHIFTQB m128/m64bcst, xmm, xmm{k}{z}
 80743      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80744          self.require(ISA_AVX512VBMI | ISA_AVX512VL)
 80745          p.domain = DomainAVX
 80746          p.add(0, func(m *_Encoding, v []interface{}) {
 80747              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80748              m.emit(0x83)
 80749              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 80750          })
 80751      }
 80752      // VPMULTISHIFTQB xmm, xmm, xmm{k}{z}
 80753      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80754          self.require(ISA_AVX512VBMI | ISA_AVX512VL)
 80755          p.domain = DomainAVX
 80756          p.add(0, func(m *_Encoding, v []interface{}) {
 80757              m.emit(0x62)
 80758              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80759              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 80760              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 80761              m.emit(0x83)
 80762              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80763          })
 80764      }
 80765      // VPMULTISHIFTQB m256/m64bcst, ymm, ymm{k}{z}
 80766      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80767          self.require(ISA_AVX512VBMI | ISA_AVX512VL)
 80768          p.domain = DomainAVX
 80769          p.add(0, func(m *_Encoding, v []interface{}) {
 80770              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80771              m.emit(0x83)
 80772              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 80773          })
 80774      }
 80775      // VPMULTISHIFTQB ymm, ymm, ymm{k}{z}
 80776      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80777          self.require(ISA_AVX512VBMI | ISA_AVX512VL)
 80778          p.domain = DomainAVX
 80779          p.add(0, func(m *_Encoding, v []interface{}) {
 80780              m.emit(0x62)
 80781              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80782              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 80783              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 80784              m.emit(0x83)
 80785              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80786          })
 80787      }
 80788      // VPMULTISHIFTQB m512/m64bcst, zmm, zmm{k}{z}
 80789      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 80790          self.require(ISA_AVX512VBMI)
 80791          p.domain = DomainAVX
 80792          p.add(0, func(m *_Encoding, v []interface{}) {
 80793              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80794              m.emit(0x83)
 80795              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 80796          })
 80797      }
 80798      // VPMULTISHIFTQB zmm, zmm, zmm{k}{z}
 80799      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 80800          self.require(ISA_AVX512VBMI)
 80801          p.domain = DomainAVX
 80802          p.add(0, func(m *_Encoding, v []interface{}) {
 80803              m.emit(0x62)
 80804              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80805              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 80806              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 80807              m.emit(0x83)
 80808              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80809          })
 80810      }
 80811      if p.len == 0 {
 80812          panic("invalid operands for VPMULTISHIFTQB")
 80813      }
 80814      return p
 80815  }
 80816  
 80817  // VPMULUDQ performs "Multiply Packed Unsigned Doubleword Integers".
 80818  //
 80819  // Mnemonic        : VPMULUDQ
 80820  // Supported forms : (10 forms)
 80821  //
 80822  //    * VPMULUDQ xmm, xmm, xmm                   [AVX]
 80823  //    * VPMULUDQ m128, xmm, xmm                  [AVX]
 80824  //    * VPMULUDQ ymm, ymm, ymm                   [AVX2]
 80825  //    * VPMULUDQ m256, ymm, ymm                  [AVX2]
 80826  //    * VPMULUDQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 80827  //    * VPMULUDQ zmm, zmm, zmm{k}{z}             [AVX512F]
 80828  //    * VPMULUDQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 80829  //    * VPMULUDQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 80830  //    * VPMULUDQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 80831  //    * VPMULUDQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 80832  //
 80833  func (self *Program) VPMULUDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 80834      p := self.alloc("VPMULUDQ", 3, Operands { v0, v1, v2 })
 80835      // VPMULUDQ xmm, xmm, xmm
 80836      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 80837          self.require(ISA_AVX)
 80838          p.domain = DomainAVX
 80839          p.add(0, func(m *_Encoding, v []interface{}) {
 80840              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 80841              m.emit(0xf4)
 80842              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80843          })
 80844      }
 80845      // VPMULUDQ m128, xmm, xmm
 80846      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 80847          self.require(ISA_AVX)
 80848          p.domain = DomainAVX
 80849          p.add(0, func(m *_Encoding, v []interface{}) {
 80850              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80851              m.emit(0xf4)
 80852              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80853          })
 80854      }
 80855      // VPMULUDQ ymm, ymm, ymm
 80856      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 80857          self.require(ISA_AVX2)
 80858          p.domain = DomainAVX
 80859          p.add(0, func(m *_Encoding, v []interface{}) {
 80860              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 80861              m.emit(0xf4)
 80862              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80863          })
 80864      }
 80865      // VPMULUDQ m256, ymm, ymm
 80866      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 80867          self.require(ISA_AVX2)
 80868          p.domain = DomainAVX
 80869          p.add(0, func(m *_Encoding, v []interface{}) {
 80870              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 80871              m.emit(0xf4)
 80872              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 80873          })
 80874      }
 80875      // VPMULUDQ m512/m64bcst, zmm, zmm{k}{z}
 80876      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 80877          self.require(ISA_AVX512F)
 80878          p.domain = DomainAVX
 80879          p.add(0, func(m *_Encoding, v []interface{}) {
 80880              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80881              m.emit(0xf4)
 80882              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 80883          })
 80884      }
 80885      // VPMULUDQ zmm, zmm, zmm{k}{z}
 80886      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 80887          self.require(ISA_AVX512F)
 80888          p.domain = DomainAVX
 80889          p.add(0, func(m *_Encoding, v []interface{}) {
 80890              m.emit(0x62)
 80891              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80892              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 80893              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 80894              m.emit(0xf4)
 80895              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80896          })
 80897      }
 80898      // VPMULUDQ m128/m64bcst, xmm, xmm{k}{z}
 80899      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80900          self.require(ISA_AVX512VL | ISA_AVX512F)
 80901          p.domain = DomainAVX
 80902          p.add(0, func(m *_Encoding, v []interface{}) {
 80903              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80904              m.emit(0xf4)
 80905              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 80906          })
 80907      }
 80908      // VPMULUDQ xmm, xmm, xmm{k}{z}
 80909      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 80910          self.require(ISA_AVX512VL | ISA_AVX512F)
 80911          p.domain = DomainAVX
 80912          p.add(0, func(m *_Encoding, v []interface{}) {
 80913              m.emit(0x62)
 80914              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80915              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 80916              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 80917              m.emit(0xf4)
 80918              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80919          })
 80920      }
 80921      // VPMULUDQ m256/m64bcst, ymm, ymm{k}{z}
 80922      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80923          self.require(ISA_AVX512VL | ISA_AVX512F)
 80924          p.domain = DomainAVX
 80925          p.add(0, func(m *_Encoding, v []interface{}) {
 80926              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 80927              m.emit(0xf4)
 80928              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 80929          })
 80930      }
 80931      // VPMULUDQ ymm, ymm, ymm{k}{z}
 80932      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 80933          self.require(ISA_AVX512VL | ISA_AVX512F)
 80934          p.domain = DomainAVX
 80935          p.add(0, func(m *_Encoding, v []interface{}) {
 80936              m.emit(0x62)
 80937              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 80938              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 80939              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 80940              m.emit(0xf4)
 80941              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 80942          })
 80943      }
 80944      if p.len == 0 {
 80945          panic("invalid operands for VPMULUDQ")
 80946      }
 80947      return p
 80948  }
 80949  
 80950  // VPOPCNTD performs "Packed Population Count for Doubleword Integers".
 80951  //
 80952  // Mnemonic        : VPOPCNTD
 80953  // Supported forms : (2 forms)
 80954  //
 80955  //    * VPOPCNTD m512/m32bcst, zmm{k}{z}    [AVX512VPOPCNTDQ]
 80956  //    * VPOPCNTD zmm, zmm{k}{z}             [AVX512VPOPCNTDQ]
 80957  //
 80958  func (self *Program) VPOPCNTD(v0 interface{}, v1 interface{}) *Instruction {
 80959      p := self.alloc("VPOPCNTD", 2, Operands { v0, v1 })
 80960      // VPOPCNTD m512/m32bcst, zmm{k}{z}
 80961      if isM512M32bcst(v0) && isZMMkz(v1) {
 80962          self.require(ISA_AVX512VPOPCNTDQ)
 80963          p.domain = DomainAVX
 80964          p.add(0, func(m *_Encoding, v []interface{}) {
 80965              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 80966              m.emit(0x55)
 80967              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 80968          })
 80969      }
 80970      // VPOPCNTD zmm, zmm{k}{z}
 80971      if isZMM(v0) && isZMMkz(v1) {
 80972          self.require(ISA_AVX512VPOPCNTDQ)
 80973          p.domain = DomainAVX
 80974          p.add(0, func(m *_Encoding, v []interface{}) {
 80975              m.emit(0x62)
 80976              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 80977              m.emit(0x7d)
 80978              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 80979              m.emit(0x55)
 80980              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 80981          })
 80982      }
 80983      if p.len == 0 {
 80984          panic("invalid operands for VPOPCNTD")
 80985      }
 80986      return p
 80987  }
 80988  
 80989  // VPOPCNTQ performs "Packed Population Count for Quadword Integers".
 80990  //
 80991  // Mnemonic        : VPOPCNTQ
 80992  // Supported forms : (2 forms)
 80993  //
 80994  //    * VPOPCNTQ m512/m64bcst, zmm{k}{z}    [AVX512VPOPCNTDQ]
 80995  //    * VPOPCNTQ zmm, zmm{k}{z}             [AVX512VPOPCNTDQ]
 80996  //
 80997  func (self *Program) VPOPCNTQ(v0 interface{}, v1 interface{}) *Instruction {
 80998      p := self.alloc("VPOPCNTQ", 2, Operands { v0, v1 })
 80999      // VPOPCNTQ m512/m64bcst, zmm{k}{z}
 81000      if isM512M64bcst(v0) && isZMMkz(v1) {
 81001          self.require(ISA_AVX512VPOPCNTDQ)
 81002          p.domain = DomainAVX
 81003          p.add(0, func(m *_Encoding, v []interface{}) {
 81004              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 81005              m.emit(0x55)
 81006              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 81007          })
 81008      }
 81009      // VPOPCNTQ zmm, zmm{k}{z}
 81010      if isZMM(v0) && isZMMkz(v1) {
 81011          self.require(ISA_AVX512VPOPCNTDQ)
 81012          p.domain = DomainAVX
 81013          p.add(0, func(m *_Encoding, v []interface{}) {
 81014              m.emit(0x62)
 81015              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 81016              m.emit(0xfd)
 81017              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 81018              m.emit(0x55)
 81019              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 81020          })
 81021      }
 81022      if p.len == 0 {
 81023          panic("invalid operands for VPOPCNTQ")
 81024      }
 81025      return p
 81026  }
 81027  
 81028  // VPOR performs "Packed Bitwise Logical OR".
 81029  //
 81030  // Mnemonic        : VPOR
 81031  // Supported forms : (4 forms)
 81032  //
 81033  //    * VPOR xmm, xmm, xmm     [AVX]
 81034  //    * VPOR m128, xmm, xmm    [AVX]
 81035  //    * VPOR ymm, ymm, ymm     [AVX2]
 81036  //    * VPOR m256, ymm, ymm    [AVX2]
 81037  //
 81038  func (self *Program) VPOR(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 81039      p := self.alloc("VPOR", 3, Operands { v0, v1, v2 })
 81040      // VPOR xmm, xmm, xmm
 81041      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 81042          self.require(ISA_AVX)
 81043          p.domain = DomainAVX
 81044          p.add(0, func(m *_Encoding, v []interface{}) {
 81045              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 81046              m.emit(0xeb)
 81047              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81048          })
 81049      }
 81050      // VPOR m128, xmm, xmm
 81051      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 81052          self.require(ISA_AVX)
 81053          p.domain = DomainAVX
 81054          p.add(0, func(m *_Encoding, v []interface{}) {
 81055              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 81056              m.emit(0xeb)
 81057              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 81058          })
 81059      }
 81060      // VPOR ymm, ymm, ymm
 81061      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 81062          self.require(ISA_AVX2)
 81063          p.domain = DomainAVX
 81064          p.add(0, func(m *_Encoding, v []interface{}) {
 81065              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 81066              m.emit(0xeb)
 81067              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81068          })
 81069      }
 81070      // VPOR m256, ymm, ymm
 81071      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 81072          self.require(ISA_AVX2)
 81073          p.domain = DomainAVX
 81074          p.add(0, func(m *_Encoding, v []interface{}) {
 81075              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 81076              m.emit(0xeb)
 81077              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 81078          })
 81079      }
 81080      if p.len == 0 {
 81081          panic("invalid operands for VPOR")
 81082      }
 81083      return p
 81084  }
 81085  
 81086  // VPORD performs "Bitwise Logical OR of Packed Doubleword Integers".
 81087  //
 81088  // Mnemonic        : VPORD
 81089  // Supported forms : (6 forms)
 81090  //
 81091  //    * VPORD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 81092  //    * VPORD zmm, zmm, zmm{k}{z}             [AVX512F]
 81093  //    * VPORD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 81094  //    * VPORD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 81095  //    * VPORD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 81096  //    * VPORD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 81097  //
 81098  func (self *Program) VPORD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 81099      p := self.alloc("VPORD", 3, Operands { v0, v1, v2 })
 81100      // VPORD m512/m32bcst, zmm, zmm{k}{z}
 81101      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 81102          self.require(ISA_AVX512F)
 81103          p.domain = DomainAVX
 81104          p.add(0, func(m *_Encoding, v []interface{}) {
 81105              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81106              m.emit(0xeb)
 81107              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 81108          })
 81109      }
 81110      // VPORD zmm, zmm, zmm{k}{z}
 81111      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 81112          self.require(ISA_AVX512F)
 81113          p.domain = DomainAVX
 81114          p.add(0, func(m *_Encoding, v []interface{}) {
 81115              m.emit(0x62)
 81116              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81117              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 81118              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 81119              m.emit(0xeb)
 81120              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81121          })
 81122      }
 81123      // VPORD m128/m32bcst, xmm, xmm{k}{z}
 81124      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81125          self.require(ISA_AVX512VL | ISA_AVX512F)
 81126          p.domain = DomainAVX
 81127          p.add(0, func(m *_Encoding, v []interface{}) {
 81128              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81129              m.emit(0xeb)
 81130              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 81131          })
 81132      }
 81133      // VPORD xmm, xmm, xmm{k}{z}
 81134      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81135          self.require(ISA_AVX512VL | ISA_AVX512F)
 81136          p.domain = DomainAVX
 81137          p.add(0, func(m *_Encoding, v []interface{}) {
 81138              m.emit(0x62)
 81139              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81140              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 81141              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 81142              m.emit(0xeb)
 81143              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81144          })
 81145      }
 81146      // VPORD m256/m32bcst, ymm, ymm{k}{z}
 81147      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81148          self.require(ISA_AVX512VL | ISA_AVX512F)
 81149          p.domain = DomainAVX
 81150          p.add(0, func(m *_Encoding, v []interface{}) {
 81151              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81152              m.emit(0xeb)
 81153              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 81154          })
 81155      }
 81156      // VPORD ymm, ymm, ymm{k}{z}
 81157      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81158          self.require(ISA_AVX512VL | ISA_AVX512F)
 81159          p.domain = DomainAVX
 81160          p.add(0, func(m *_Encoding, v []interface{}) {
 81161              m.emit(0x62)
 81162              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81163              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 81164              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 81165              m.emit(0xeb)
 81166              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81167          })
 81168      }
 81169      if p.len == 0 {
 81170          panic("invalid operands for VPORD")
 81171      }
 81172      return p
 81173  }
 81174  
 81175  // VPORQ performs "Bitwise Logical OR of Packed Quadword Integers".
 81176  //
 81177  // Mnemonic        : VPORQ
 81178  // Supported forms : (6 forms)
 81179  //
 81180  //    * VPORQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 81181  //    * VPORQ zmm, zmm, zmm{k}{z}             [AVX512F]
 81182  //    * VPORQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 81183  //    * VPORQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 81184  //    * VPORQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 81185  //    * VPORQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 81186  //
 81187  func (self *Program) VPORQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 81188      p := self.alloc("VPORQ", 3, Operands { v0, v1, v2 })
 81189      // VPORQ m512/m64bcst, zmm, zmm{k}{z}
 81190      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 81191          self.require(ISA_AVX512F)
 81192          p.domain = DomainAVX
 81193          p.add(0, func(m *_Encoding, v []interface{}) {
 81194              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81195              m.emit(0xeb)
 81196              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 81197          })
 81198      }
 81199      // VPORQ zmm, zmm, zmm{k}{z}
 81200      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 81201          self.require(ISA_AVX512F)
 81202          p.domain = DomainAVX
 81203          p.add(0, func(m *_Encoding, v []interface{}) {
 81204              m.emit(0x62)
 81205              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81206              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 81207              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 81208              m.emit(0xeb)
 81209              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81210          })
 81211      }
 81212      // VPORQ m128/m64bcst, xmm, xmm{k}{z}
 81213      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81214          self.require(ISA_AVX512VL | ISA_AVX512F)
 81215          p.domain = DomainAVX
 81216          p.add(0, func(m *_Encoding, v []interface{}) {
 81217              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81218              m.emit(0xeb)
 81219              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 81220          })
 81221      }
 81222      // VPORQ xmm, xmm, xmm{k}{z}
 81223      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81224          self.require(ISA_AVX512VL | ISA_AVX512F)
 81225          p.domain = DomainAVX
 81226          p.add(0, func(m *_Encoding, v []interface{}) {
 81227              m.emit(0x62)
 81228              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81229              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 81230              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 81231              m.emit(0xeb)
 81232              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81233          })
 81234      }
 81235      // VPORQ m256/m64bcst, ymm, ymm{k}{z}
 81236      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81237          self.require(ISA_AVX512VL | ISA_AVX512F)
 81238          p.domain = DomainAVX
 81239          p.add(0, func(m *_Encoding, v []interface{}) {
 81240              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81241              m.emit(0xeb)
 81242              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 81243          })
 81244      }
 81245      // VPORQ ymm, ymm, ymm{k}{z}
 81246      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81247          self.require(ISA_AVX512VL | ISA_AVX512F)
 81248          p.domain = DomainAVX
 81249          p.add(0, func(m *_Encoding, v []interface{}) {
 81250              m.emit(0x62)
 81251              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81252              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 81253              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 81254              m.emit(0xeb)
 81255              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81256          })
 81257      }
 81258      if p.len == 0 {
 81259          panic("invalid operands for VPORQ")
 81260      }
 81261      return p
 81262  }
 81263  
 81264  // VPPERM performs "Packed Permute Bytes".
 81265  //
 81266  // Mnemonic        : VPPERM
 81267  // Supported forms : (3 forms)
 81268  //
 81269  //    * VPPERM xmm, xmm, xmm, xmm     [XOP]
 81270  //    * VPPERM m128, xmm, xmm, xmm    [XOP]
 81271  //    * VPPERM xmm, m128, xmm, xmm    [XOP]
 81272  //
 81273  func (self *Program) VPPERM(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 81274      p := self.alloc("VPPERM", 4, Operands { v0, v1, v2, v3 })
 81275      // VPPERM xmm, xmm, xmm, xmm
 81276      if isXMM(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 81277          self.require(ISA_XOP)
 81278          p.domain = DomainAMDSpecific
 81279          p.add(0, func(m *_Encoding, v []interface{}) {
 81280              m.emit(0x8f)
 81281              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 81282              m.emit(0x78 ^ (hlcode(v[2]) << 3))
 81283              m.emit(0xa3)
 81284              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 81285              m.emit(hlcode(v[0]) << 4)
 81286          })
 81287          p.add(0, func(m *_Encoding, v []interface{}) {
 81288              m.emit(0x8f)
 81289              m.emit(0xe8 ^ (hcode(v[3]) << 7) ^ (hcode(v[0]) << 5))
 81290              m.emit(0xf8 ^ (hlcode(v[2]) << 3))
 81291              m.emit(0xa3)
 81292              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[0]))
 81293              m.emit(hlcode(v[1]) << 4)
 81294          })
 81295      }
 81296      // VPPERM m128, xmm, xmm, xmm
 81297      if isM128(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 81298          self.require(ISA_XOP)
 81299          p.domain = DomainAMDSpecific
 81300          p.add(0, func(m *_Encoding, v []interface{}) {
 81301              m.vex3(0x8f, 0b1000, 0x80, hcode(v[3]), addr(v[0]), hlcode(v[2]))
 81302              m.emit(0xa3)
 81303              m.mrsd(lcode(v[3]), addr(v[0]), 1)
 81304              m.emit(hlcode(v[1]) << 4)
 81305          })
 81306      }
 81307      // VPPERM xmm, m128, xmm, xmm
 81308      if isXMM(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 81309          self.require(ISA_XOP)
 81310          p.domain = DomainAMDSpecific
 81311          p.add(0, func(m *_Encoding, v []interface{}) {
 81312              m.vex3(0x8f, 0b1000, 0x00, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 81313              m.emit(0xa3)
 81314              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 81315              m.emit(hlcode(v[0]) << 4)
 81316          })
 81317      }
 81318      if p.len == 0 {
 81319          panic("invalid operands for VPPERM")
 81320      }
 81321      return p
 81322  }
 81323  
 81324  // VPROLD performs "Rotate Packed Doubleword Left".
 81325  //
 81326  // Mnemonic        : VPROLD
 81327  // Supported forms : (6 forms)
 81328  //
 81329  //    * VPROLD imm8, m512/m32bcst, zmm{k}{z}    [AVX512F]
 81330  //    * VPROLD imm8, zmm, zmm{k}{z}             [AVX512F]
 81331  //    * VPROLD imm8, m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 81332  //    * VPROLD imm8, m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 81333  //    * VPROLD imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 81334  //    * VPROLD imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 81335  //
 81336  func (self *Program) VPROLD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 81337      p := self.alloc("VPROLD", 3, Operands { v0, v1, v2 })
 81338      // VPROLD imm8, m512/m32bcst, zmm{k}{z}
 81339      if isImm8(v0) && isM512M32bcst(v1) && isZMMkz(v2) {
 81340          self.require(ISA_AVX512F)
 81341          p.domain = DomainAVX
 81342          p.add(0, func(m *_Encoding, v []interface{}) {
 81343              m.evex(0b01, 0x05, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81344              m.emit(0x72)
 81345              m.mrsd(1, addr(v[1]), 64)
 81346              m.imm1(toImmAny(v[0]))
 81347          })
 81348      }
 81349      // VPROLD imm8, zmm, zmm{k}{z}
 81350      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 81351          self.require(ISA_AVX512F)
 81352          p.domain = DomainAVX
 81353          p.add(0, func(m *_Encoding, v []interface{}) {
 81354              m.emit(0x62)
 81355              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81356              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 81357              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 81358              m.emit(0x72)
 81359              m.emit(0xc8 | lcode(v[1]))
 81360              m.imm1(toImmAny(v[0]))
 81361          })
 81362      }
 81363      // VPROLD imm8, m128/m32bcst, xmm{k}{z}
 81364      if isImm8(v0) && isM128M32bcst(v1) && isXMMkz(v2) {
 81365          self.require(ISA_AVX512VL | ISA_AVX512F)
 81366          p.domain = DomainAVX
 81367          p.add(0, func(m *_Encoding, v []interface{}) {
 81368              m.evex(0b01, 0x05, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81369              m.emit(0x72)
 81370              m.mrsd(1, addr(v[1]), 16)
 81371              m.imm1(toImmAny(v[0]))
 81372          })
 81373      }
 81374      // VPROLD imm8, m256/m32bcst, ymm{k}{z}
 81375      if isImm8(v0) && isM256M32bcst(v1) && isYMMkz(v2) {
 81376          self.require(ISA_AVX512VL | ISA_AVX512F)
 81377          p.domain = DomainAVX
 81378          p.add(0, func(m *_Encoding, v []interface{}) {
 81379              m.evex(0b01, 0x05, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81380              m.emit(0x72)
 81381              m.mrsd(1, addr(v[1]), 32)
 81382              m.imm1(toImmAny(v[0]))
 81383          })
 81384      }
 81385      // VPROLD imm8, xmm, xmm{k}{z}
 81386      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81387          self.require(ISA_AVX512VL | ISA_AVX512F)
 81388          p.domain = DomainAVX
 81389          p.add(0, func(m *_Encoding, v []interface{}) {
 81390              m.emit(0x62)
 81391              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81392              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 81393              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 81394              m.emit(0x72)
 81395              m.emit(0xc8 | lcode(v[1]))
 81396              m.imm1(toImmAny(v[0]))
 81397          })
 81398      }
 81399      // VPROLD imm8, ymm, ymm{k}{z}
 81400      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81401          self.require(ISA_AVX512VL | ISA_AVX512F)
 81402          p.domain = DomainAVX
 81403          p.add(0, func(m *_Encoding, v []interface{}) {
 81404              m.emit(0x62)
 81405              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81406              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 81407              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 81408              m.emit(0x72)
 81409              m.emit(0xc8 | lcode(v[1]))
 81410              m.imm1(toImmAny(v[0]))
 81411          })
 81412      }
 81413      if p.len == 0 {
 81414          panic("invalid operands for VPROLD")
 81415      }
 81416      return p
 81417  }
 81418  
 81419  // VPROLQ performs "Rotate Packed Quadword Left".
 81420  //
 81421  // Mnemonic        : VPROLQ
 81422  // Supported forms : (6 forms)
 81423  //
 81424  //    * VPROLQ imm8, m512/m64bcst, zmm{k}{z}    [AVX512F]
 81425  //    * VPROLQ imm8, zmm, zmm{k}{z}             [AVX512F]
 81426  //    * VPROLQ imm8, m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 81427  //    * VPROLQ imm8, m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 81428  //    * VPROLQ imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 81429  //    * VPROLQ imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 81430  //
 81431  func (self *Program) VPROLQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 81432      p := self.alloc("VPROLQ", 3, Operands { v0, v1, v2 })
 81433      // VPROLQ imm8, m512/m64bcst, zmm{k}{z}
 81434      if isImm8(v0) && isM512M64bcst(v1) && isZMMkz(v2) {
 81435          self.require(ISA_AVX512F)
 81436          p.domain = DomainAVX
 81437          p.add(0, func(m *_Encoding, v []interface{}) {
 81438              m.evex(0b01, 0x85, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81439              m.emit(0x72)
 81440              m.mrsd(1, addr(v[1]), 64)
 81441              m.imm1(toImmAny(v[0]))
 81442          })
 81443      }
 81444      // VPROLQ imm8, zmm, zmm{k}{z}
 81445      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 81446          self.require(ISA_AVX512F)
 81447          p.domain = DomainAVX
 81448          p.add(0, func(m *_Encoding, v []interface{}) {
 81449              m.emit(0x62)
 81450              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81451              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 81452              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 81453              m.emit(0x72)
 81454              m.emit(0xc8 | lcode(v[1]))
 81455              m.imm1(toImmAny(v[0]))
 81456          })
 81457      }
 81458      // VPROLQ imm8, m128/m64bcst, xmm{k}{z}
 81459      if isImm8(v0) && isM128M64bcst(v1) && isXMMkz(v2) {
 81460          self.require(ISA_AVX512VL | ISA_AVX512F)
 81461          p.domain = DomainAVX
 81462          p.add(0, func(m *_Encoding, v []interface{}) {
 81463              m.evex(0b01, 0x85, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81464              m.emit(0x72)
 81465              m.mrsd(1, addr(v[1]), 16)
 81466              m.imm1(toImmAny(v[0]))
 81467          })
 81468      }
 81469      // VPROLQ imm8, m256/m64bcst, ymm{k}{z}
 81470      if isImm8(v0) && isM256M64bcst(v1) && isYMMkz(v2) {
 81471          self.require(ISA_AVX512VL | ISA_AVX512F)
 81472          p.domain = DomainAVX
 81473          p.add(0, func(m *_Encoding, v []interface{}) {
 81474              m.evex(0b01, 0x85, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81475              m.emit(0x72)
 81476              m.mrsd(1, addr(v[1]), 32)
 81477              m.imm1(toImmAny(v[0]))
 81478          })
 81479      }
 81480      // VPROLQ imm8, xmm, xmm{k}{z}
 81481      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81482          self.require(ISA_AVX512VL | ISA_AVX512F)
 81483          p.domain = DomainAVX
 81484          p.add(0, func(m *_Encoding, v []interface{}) {
 81485              m.emit(0x62)
 81486              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81487              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 81488              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 81489              m.emit(0x72)
 81490              m.emit(0xc8 | lcode(v[1]))
 81491              m.imm1(toImmAny(v[0]))
 81492          })
 81493      }
 81494      // VPROLQ imm8, ymm, ymm{k}{z}
 81495      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81496          self.require(ISA_AVX512VL | ISA_AVX512F)
 81497          p.domain = DomainAVX
 81498          p.add(0, func(m *_Encoding, v []interface{}) {
 81499              m.emit(0x62)
 81500              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81501              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 81502              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 81503              m.emit(0x72)
 81504              m.emit(0xc8 | lcode(v[1]))
 81505              m.imm1(toImmAny(v[0]))
 81506          })
 81507      }
 81508      if p.len == 0 {
 81509          panic("invalid operands for VPROLQ")
 81510      }
 81511      return p
 81512  }
 81513  
 81514  // VPROLVD performs "Variable Rotate Packed Doubleword Left".
 81515  //
 81516  // Mnemonic        : VPROLVD
 81517  // Supported forms : (6 forms)
 81518  //
 81519  //    * VPROLVD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 81520  //    * VPROLVD zmm, zmm, zmm{k}{z}             [AVX512F]
 81521  //    * VPROLVD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 81522  //    * VPROLVD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 81523  //    * VPROLVD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 81524  //    * VPROLVD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 81525  //
 81526  func (self *Program) VPROLVD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 81527      p := self.alloc("VPROLVD", 3, Operands { v0, v1, v2 })
 81528      // VPROLVD m512/m32bcst, zmm, zmm{k}{z}
 81529      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 81530          self.require(ISA_AVX512F)
 81531          p.domain = DomainAVX
 81532          p.add(0, func(m *_Encoding, v []interface{}) {
 81533              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81534              m.emit(0x15)
 81535              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 81536          })
 81537      }
 81538      // VPROLVD zmm, zmm, zmm{k}{z}
 81539      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 81540          self.require(ISA_AVX512F)
 81541          p.domain = DomainAVX
 81542          p.add(0, func(m *_Encoding, v []interface{}) {
 81543              m.emit(0x62)
 81544              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81545              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 81546              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 81547              m.emit(0x15)
 81548              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81549          })
 81550      }
 81551      // VPROLVD m128/m32bcst, xmm, xmm{k}{z}
 81552      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81553          self.require(ISA_AVX512VL | ISA_AVX512F)
 81554          p.domain = DomainAVX
 81555          p.add(0, func(m *_Encoding, v []interface{}) {
 81556              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81557              m.emit(0x15)
 81558              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 81559          })
 81560      }
 81561      // VPROLVD xmm, xmm, xmm{k}{z}
 81562      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81563          self.require(ISA_AVX512VL | ISA_AVX512F)
 81564          p.domain = DomainAVX
 81565          p.add(0, func(m *_Encoding, v []interface{}) {
 81566              m.emit(0x62)
 81567              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81568              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 81569              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 81570              m.emit(0x15)
 81571              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81572          })
 81573      }
 81574      // VPROLVD m256/m32bcst, ymm, ymm{k}{z}
 81575      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81576          self.require(ISA_AVX512VL | ISA_AVX512F)
 81577          p.domain = DomainAVX
 81578          p.add(0, func(m *_Encoding, v []interface{}) {
 81579              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81580              m.emit(0x15)
 81581              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 81582          })
 81583      }
 81584      // VPROLVD ymm, ymm, ymm{k}{z}
 81585      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81586          self.require(ISA_AVX512VL | ISA_AVX512F)
 81587          p.domain = DomainAVX
 81588          p.add(0, func(m *_Encoding, v []interface{}) {
 81589              m.emit(0x62)
 81590              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81591              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 81592              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 81593              m.emit(0x15)
 81594              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81595          })
 81596      }
 81597      if p.len == 0 {
 81598          panic("invalid operands for VPROLVD")
 81599      }
 81600      return p
 81601  }
 81602  
 81603  // VPROLVQ performs "Variable Rotate Packed Quadword Left".
 81604  //
 81605  // Mnemonic        : VPROLVQ
 81606  // Supported forms : (6 forms)
 81607  //
 81608  //    * VPROLVQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 81609  //    * VPROLVQ zmm, zmm, zmm{k}{z}             [AVX512F]
 81610  //    * VPROLVQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 81611  //    * VPROLVQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 81612  //    * VPROLVQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 81613  //    * VPROLVQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 81614  //
 81615  func (self *Program) VPROLVQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 81616      p := self.alloc("VPROLVQ", 3, Operands { v0, v1, v2 })
 81617      // VPROLVQ m512/m64bcst, zmm, zmm{k}{z}
 81618      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 81619          self.require(ISA_AVX512F)
 81620          p.domain = DomainAVX
 81621          p.add(0, func(m *_Encoding, v []interface{}) {
 81622              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81623              m.emit(0x15)
 81624              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 81625          })
 81626      }
 81627      // VPROLVQ zmm, zmm, zmm{k}{z}
 81628      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 81629          self.require(ISA_AVX512F)
 81630          p.domain = DomainAVX
 81631          p.add(0, func(m *_Encoding, v []interface{}) {
 81632              m.emit(0x62)
 81633              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81634              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 81635              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 81636              m.emit(0x15)
 81637              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81638          })
 81639      }
 81640      // VPROLVQ m128/m64bcst, xmm, xmm{k}{z}
 81641      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81642          self.require(ISA_AVX512VL | ISA_AVX512F)
 81643          p.domain = DomainAVX
 81644          p.add(0, func(m *_Encoding, v []interface{}) {
 81645              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81646              m.emit(0x15)
 81647              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 81648          })
 81649      }
 81650      // VPROLVQ xmm, xmm, xmm{k}{z}
 81651      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81652          self.require(ISA_AVX512VL | ISA_AVX512F)
 81653          p.domain = DomainAVX
 81654          p.add(0, func(m *_Encoding, v []interface{}) {
 81655              m.emit(0x62)
 81656              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81657              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 81658              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 81659              m.emit(0x15)
 81660              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81661          })
 81662      }
 81663      // VPROLVQ m256/m64bcst, ymm, ymm{k}{z}
 81664      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81665          self.require(ISA_AVX512VL | ISA_AVX512F)
 81666          p.domain = DomainAVX
 81667          p.add(0, func(m *_Encoding, v []interface{}) {
 81668              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81669              m.emit(0x15)
 81670              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 81671          })
 81672      }
 81673      // VPROLVQ ymm, ymm, ymm{k}{z}
 81674      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81675          self.require(ISA_AVX512VL | ISA_AVX512F)
 81676          p.domain = DomainAVX
 81677          p.add(0, func(m *_Encoding, v []interface{}) {
 81678              m.emit(0x62)
 81679              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81680              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 81681              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 81682              m.emit(0x15)
 81683              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81684          })
 81685      }
 81686      if p.len == 0 {
 81687          panic("invalid operands for VPROLVQ")
 81688      }
 81689      return p
 81690  }
 81691  
 81692  // VPRORD performs "Rotate Packed Doubleword Right".
 81693  //
 81694  // Mnemonic        : VPRORD
 81695  // Supported forms : (6 forms)
 81696  //
 81697  //    * VPRORD imm8, m512/m32bcst, zmm{k}{z}    [AVX512F]
 81698  //    * VPRORD imm8, zmm, zmm{k}{z}             [AVX512F]
 81699  //    * VPRORD imm8, m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 81700  //    * VPRORD imm8, m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 81701  //    * VPRORD imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 81702  //    * VPRORD imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 81703  //
 81704  func (self *Program) VPRORD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 81705      p := self.alloc("VPRORD", 3, Operands { v0, v1, v2 })
 81706      // VPRORD imm8, m512/m32bcst, zmm{k}{z}
 81707      if isImm8(v0) && isM512M32bcst(v1) && isZMMkz(v2) {
 81708          self.require(ISA_AVX512F)
 81709          p.domain = DomainAVX
 81710          p.add(0, func(m *_Encoding, v []interface{}) {
 81711              m.evex(0b01, 0x05, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81712              m.emit(0x72)
 81713              m.mrsd(0, addr(v[1]), 64)
 81714              m.imm1(toImmAny(v[0]))
 81715          })
 81716      }
 81717      // VPRORD imm8, zmm, zmm{k}{z}
 81718      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 81719          self.require(ISA_AVX512F)
 81720          p.domain = DomainAVX
 81721          p.add(0, func(m *_Encoding, v []interface{}) {
 81722              m.emit(0x62)
 81723              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81724              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 81725              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 81726              m.emit(0x72)
 81727              m.emit(0xc0 | lcode(v[1]))
 81728              m.imm1(toImmAny(v[0]))
 81729          })
 81730      }
 81731      // VPRORD imm8, m128/m32bcst, xmm{k}{z}
 81732      if isImm8(v0) && isM128M32bcst(v1) && isXMMkz(v2) {
 81733          self.require(ISA_AVX512VL | ISA_AVX512F)
 81734          p.domain = DomainAVX
 81735          p.add(0, func(m *_Encoding, v []interface{}) {
 81736              m.evex(0b01, 0x05, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81737              m.emit(0x72)
 81738              m.mrsd(0, addr(v[1]), 16)
 81739              m.imm1(toImmAny(v[0]))
 81740          })
 81741      }
 81742      // VPRORD imm8, m256/m32bcst, ymm{k}{z}
 81743      if isImm8(v0) && isM256M32bcst(v1) && isYMMkz(v2) {
 81744          self.require(ISA_AVX512VL | ISA_AVX512F)
 81745          p.domain = DomainAVX
 81746          p.add(0, func(m *_Encoding, v []interface{}) {
 81747              m.evex(0b01, 0x05, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81748              m.emit(0x72)
 81749              m.mrsd(0, addr(v[1]), 32)
 81750              m.imm1(toImmAny(v[0]))
 81751          })
 81752      }
 81753      // VPRORD imm8, xmm, xmm{k}{z}
 81754      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81755          self.require(ISA_AVX512VL | ISA_AVX512F)
 81756          p.domain = DomainAVX
 81757          p.add(0, func(m *_Encoding, v []interface{}) {
 81758              m.emit(0x62)
 81759              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81760              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 81761              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 81762              m.emit(0x72)
 81763              m.emit(0xc0 | lcode(v[1]))
 81764              m.imm1(toImmAny(v[0]))
 81765          })
 81766      }
 81767      // VPRORD imm8, ymm, ymm{k}{z}
 81768      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81769          self.require(ISA_AVX512VL | ISA_AVX512F)
 81770          p.domain = DomainAVX
 81771          p.add(0, func(m *_Encoding, v []interface{}) {
 81772              m.emit(0x62)
 81773              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81774              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 81775              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 81776              m.emit(0x72)
 81777              m.emit(0xc0 | lcode(v[1]))
 81778              m.imm1(toImmAny(v[0]))
 81779          })
 81780      }
 81781      if p.len == 0 {
 81782          panic("invalid operands for VPRORD")
 81783      }
 81784      return p
 81785  }
 81786  
 81787  // VPRORQ performs "Rotate Packed Quadword Right".
 81788  //
 81789  // Mnemonic        : VPRORQ
 81790  // Supported forms : (6 forms)
 81791  //
 81792  //    * VPRORQ imm8, m512/m64bcst, zmm{k}{z}    [AVX512F]
 81793  //    * VPRORQ imm8, zmm, zmm{k}{z}             [AVX512F]
 81794  //    * VPRORQ imm8, m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 81795  //    * VPRORQ imm8, m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 81796  //    * VPRORQ imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 81797  //    * VPRORQ imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 81798  //
 81799  func (self *Program) VPRORQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 81800      p := self.alloc("VPRORQ", 3, Operands { v0, v1, v2 })
 81801      // VPRORQ imm8, m512/m64bcst, zmm{k}{z}
 81802      if isImm8(v0) && isM512M64bcst(v1) && isZMMkz(v2) {
 81803          self.require(ISA_AVX512F)
 81804          p.domain = DomainAVX
 81805          p.add(0, func(m *_Encoding, v []interface{}) {
 81806              m.evex(0b01, 0x85, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81807              m.emit(0x72)
 81808              m.mrsd(0, addr(v[1]), 64)
 81809              m.imm1(toImmAny(v[0]))
 81810          })
 81811      }
 81812      // VPRORQ imm8, zmm, zmm{k}{z}
 81813      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 81814          self.require(ISA_AVX512F)
 81815          p.domain = DomainAVX
 81816          p.add(0, func(m *_Encoding, v []interface{}) {
 81817              m.emit(0x62)
 81818              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81819              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 81820              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 81821              m.emit(0x72)
 81822              m.emit(0xc0 | lcode(v[1]))
 81823              m.imm1(toImmAny(v[0]))
 81824          })
 81825      }
 81826      // VPRORQ imm8, m128/m64bcst, xmm{k}{z}
 81827      if isImm8(v0) && isM128M64bcst(v1) && isXMMkz(v2) {
 81828          self.require(ISA_AVX512VL | ISA_AVX512F)
 81829          p.domain = DomainAVX
 81830          p.add(0, func(m *_Encoding, v []interface{}) {
 81831              m.evex(0b01, 0x85, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81832              m.emit(0x72)
 81833              m.mrsd(0, addr(v[1]), 16)
 81834              m.imm1(toImmAny(v[0]))
 81835          })
 81836      }
 81837      // VPRORQ imm8, m256/m64bcst, ymm{k}{z}
 81838      if isImm8(v0) && isM256M64bcst(v1) && isYMMkz(v2) {
 81839          self.require(ISA_AVX512VL | ISA_AVX512F)
 81840          p.domain = DomainAVX
 81841          p.add(0, func(m *_Encoding, v []interface{}) {
 81842              m.evex(0b01, 0x85, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 81843              m.emit(0x72)
 81844              m.mrsd(0, addr(v[1]), 32)
 81845              m.imm1(toImmAny(v[0]))
 81846          })
 81847      }
 81848      // VPRORQ imm8, xmm, xmm{k}{z}
 81849      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81850          self.require(ISA_AVX512VL | ISA_AVX512F)
 81851          p.domain = DomainAVX
 81852          p.add(0, func(m *_Encoding, v []interface{}) {
 81853              m.emit(0x62)
 81854              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81855              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 81856              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 81857              m.emit(0x72)
 81858              m.emit(0xc0 | lcode(v[1]))
 81859              m.imm1(toImmAny(v[0]))
 81860          })
 81861      }
 81862      // VPRORQ imm8, ymm, ymm{k}{z}
 81863      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81864          self.require(ISA_AVX512VL | ISA_AVX512F)
 81865          p.domain = DomainAVX
 81866          p.add(0, func(m *_Encoding, v []interface{}) {
 81867              m.emit(0x62)
 81868              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 81869              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 81870              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 81871              m.emit(0x72)
 81872              m.emit(0xc0 | lcode(v[1]))
 81873              m.imm1(toImmAny(v[0]))
 81874          })
 81875      }
 81876      if p.len == 0 {
 81877          panic("invalid operands for VPRORQ")
 81878      }
 81879      return p
 81880  }
 81881  
 81882  // VPRORVD performs "Variable Rotate Packed Doubleword Right".
 81883  //
 81884  // Mnemonic        : VPRORVD
 81885  // Supported forms : (6 forms)
 81886  //
 81887  //    * VPRORVD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 81888  //    * VPRORVD zmm, zmm, zmm{k}{z}             [AVX512F]
 81889  //    * VPRORVD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 81890  //    * VPRORVD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 81891  //    * VPRORVD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 81892  //    * VPRORVD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 81893  //
 81894  func (self *Program) VPRORVD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 81895      p := self.alloc("VPRORVD", 3, Operands { v0, v1, v2 })
 81896      // VPRORVD m512/m32bcst, zmm, zmm{k}{z}
 81897      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 81898          self.require(ISA_AVX512F)
 81899          p.domain = DomainAVX
 81900          p.add(0, func(m *_Encoding, v []interface{}) {
 81901              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81902              m.emit(0x14)
 81903              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 81904          })
 81905      }
 81906      // VPRORVD zmm, zmm, zmm{k}{z}
 81907      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 81908          self.require(ISA_AVX512F)
 81909          p.domain = DomainAVX
 81910          p.add(0, func(m *_Encoding, v []interface{}) {
 81911              m.emit(0x62)
 81912              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81913              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 81914              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 81915              m.emit(0x14)
 81916              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81917          })
 81918      }
 81919      // VPRORVD m128/m32bcst, xmm, xmm{k}{z}
 81920      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81921          self.require(ISA_AVX512VL | ISA_AVX512F)
 81922          p.domain = DomainAVX
 81923          p.add(0, func(m *_Encoding, v []interface{}) {
 81924              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81925              m.emit(0x14)
 81926              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 81927          })
 81928      }
 81929      // VPRORVD xmm, xmm, xmm{k}{z}
 81930      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 81931          self.require(ISA_AVX512VL | ISA_AVX512F)
 81932          p.domain = DomainAVX
 81933          p.add(0, func(m *_Encoding, v []interface{}) {
 81934              m.emit(0x62)
 81935              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81936              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 81937              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 81938              m.emit(0x14)
 81939              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81940          })
 81941      }
 81942      // VPRORVD m256/m32bcst, ymm, ymm{k}{z}
 81943      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81944          self.require(ISA_AVX512VL | ISA_AVX512F)
 81945          p.domain = DomainAVX
 81946          p.add(0, func(m *_Encoding, v []interface{}) {
 81947              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81948              m.emit(0x14)
 81949              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 81950          })
 81951      }
 81952      // VPRORVD ymm, ymm, ymm{k}{z}
 81953      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 81954          self.require(ISA_AVX512VL | ISA_AVX512F)
 81955          p.domain = DomainAVX
 81956          p.add(0, func(m *_Encoding, v []interface{}) {
 81957              m.emit(0x62)
 81958              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 81959              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 81960              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 81961              m.emit(0x14)
 81962              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 81963          })
 81964      }
 81965      if p.len == 0 {
 81966          panic("invalid operands for VPRORVD")
 81967      }
 81968      return p
 81969  }
 81970  
 81971  // VPRORVQ performs "Variable Rotate Packed Quadword Right".
 81972  //
 81973  // Mnemonic        : VPRORVQ
 81974  // Supported forms : (6 forms)
 81975  //
 81976  //    * VPRORVQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 81977  //    * VPRORVQ zmm, zmm, zmm{k}{z}             [AVX512F]
 81978  //    * VPRORVQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 81979  //    * VPRORVQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 81980  //    * VPRORVQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 81981  //    * VPRORVQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 81982  //
 81983  func (self *Program) VPRORVQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 81984      p := self.alloc("VPRORVQ", 3, Operands { v0, v1, v2 })
 81985      // VPRORVQ m512/m64bcst, zmm, zmm{k}{z}
 81986      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 81987          self.require(ISA_AVX512F)
 81988          p.domain = DomainAVX
 81989          p.add(0, func(m *_Encoding, v []interface{}) {
 81990              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 81991              m.emit(0x14)
 81992              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 81993          })
 81994      }
 81995      // VPRORVQ zmm, zmm, zmm{k}{z}
 81996      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 81997          self.require(ISA_AVX512F)
 81998          p.domain = DomainAVX
 81999          p.add(0, func(m *_Encoding, v []interface{}) {
 82000              m.emit(0x62)
 82001              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 82002              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 82003              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 82004              m.emit(0x14)
 82005              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82006          })
 82007      }
 82008      // VPRORVQ m128/m64bcst, xmm, xmm{k}{z}
 82009      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 82010          self.require(ISA_AVX512VL | ISA_AVX512F)
 82011          p.domain = DomainAVX
 82012          p.add(0, func(m *_Encoding, v []interface{}) {
 82013              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 82014              m.emit(0x14)
 82015              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 82016          })
 82017      }
 82018      // VPRORVQ xmm, xmm, xmm{k}{z}
 82019      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 82020          self.require(ISA_AVX512VL | ISA_AVX512F)
 82021          p.domain = DomainAVX
 82022          p.add(0, func(m *_Encoding, v []interface{}) {
 82023              m.emit(0x62)
 82024              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 82025              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 82026              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 82027              m.emit(0x14)
 82028              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82029          })
 82030      }
 82031      // VPRORVQ m256/m64bcst, ymm, ymm{k}{z}
 82032      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 82033          self.require(ISA_AVX512VL | ISA_AVX512F)
 82034          p.domain = DomainAVX
 82035          p.add(0, func(m *_Encoding, v []interface{}) {
 82036              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 82037              m.emit(0x14)
 82038              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 82039          })
 82040      }
 82041      // VPRORVQ ymm, ymm, ymm{k}{z}
 82042      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 82043          self.require(ISA_AVX512VL | ISA_AVX512F)
 82044          p.domain = DomainAVX
 82045          p.add(0, func(m *_Encoding, v []interface{}) {
 82046              m.emit(0x62)
 82047              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 82048              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 82049              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 82050              m.emit(0x14)
 82051              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82052          })
 82053      }
 82054      if p.len == 0 {
 82055          panic("invalid operands for VPRORVQ")
 82056      }
 82057      return p
 82058  }
 82059  
 82060  // VPROTB performs "Packed Rotate Bytes".
 82061  //
 82062  // Mnemonic        : VPROTB
 82063  // Supported forms : (5 forms)
 82064  //
 82065  //    * VPROTB imm8, xmm, xmm     [XOP]
 82066  //    * VPROTB xmm, xmm, xmm      [XOP]
 82067  //    * VPROTB m128, xmm, xmm     [XOP]
 82068  //    * VPROTB imm8, m128, xmm    [XOP]
 82069  //    * VPROTB xmm, m128, xmm     [XOP]
 82070  //
 82071  func (self *Program) VPROTB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 82072      p := self.alloc("VPROTB", 3, Operands { v0, v1, v2 })
 82073      // VPROTB imm8, xmm, xmm
 82074      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 82075          self.require(ISA_XOP)
 82076          p.domain = DomainAMDSpecific
 82077          p.add(0, func(m *_Encoding, v []interface{}) {
 82078              m.emit(0x8f)
 82079              m.emit(0xe8 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82080              m.emit(0x78)
 82081              m.emit(0xc0)
 82082              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82083              m.imm1(toImmAny(v[0]))
 82084          })
 82085      }
 82086      // VPROTB xmm, xmm, xmm
 82087      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 82088          self.require(ISA_XOP)
 82089          p.domain = DomainAMDSpecific
 82090          p.add(0, func(m *_Encoding, v []interface{}) {
 82091              m.emit(0x8f)
 82092              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82093              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 82094              m.emit(0x90)
 82095              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82096          })
 82097          p.add(0, func(m *_Encoding, v []interface{}) {
 82098              m.emit(0x8f)
 82099              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 82100              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 82101              m.emit(0x90)
 82102              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82103          })
 82104      }
 82105      // VPROTB m128, xmm, xmm
 82106      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 82107          self.require(ISA_XOP)
 82108          p.domain = DomainAMDSpecific
 82109          p.add(0, func(m *_Encoding, v []interface{}) {
 82110              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 82111              m.emit(0x90)
 82112              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 82113          })
 82114      }
 82115      // VPROTB imm8, m128, xmm
 82116      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 82117          self.require(ISA_XOP)
 82118          p.domain = DomainAMDSpecific
 82119          p.add(0, func(m *_Encoding, v []interface{}) {
 82120              m.vex3(0x8f, 0b1000, 0x00, hcode(v[2]), addr(v[1]), 0)
 82121              m.emit(0xc0)
 82122              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82123              m.imm1(toImmAny(v[0]))
 82124          })
 82125      }
 82126      // VPROTB xmm, m128, xmm
 82127      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 82128          self.require(ISA_XOP)
 82129          p.domain = DomainAMDSpecific
 82130          p.add(0, func(m *_Encoding, v []interface{}) {
 82131              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 82132              m.emit(0x90)
 82133              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82134          })
 82135      }
 82136      if p.len == 0 {
 82137          panic("invalid operands for VPROTB")
 82138      }
 82139      return p
 82140  }
 82141  
 82142  // VPROTD performs "Packed Rotate Doublewords".
 82143  //
 82144  // Mnemonic        : VPROTD
 82145  // Supported forms : (5 forms)
 82146  //
 82147  //    * VPROTD imm8, xmm, xmm     [XOP]
 82148  //    * VPROTD xmm, xmm, xmm      [XOP]
 82149  //    * VPROTD m128, xmm, xmm     [XOP]
 82150  //    * VPROTD imm8, m128, xmm    [XOP]
 82151  //    * VPROTD xmm, m128, xmm     [XOP]
 82152  //
 82153  func (self *Program) VPROTD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 82154      p := self.alloc("VPROTD", 3, Operands { v0, v1, v2 })
 82155      // VPROTD imm8, xmm, xmm
 82156      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 82157          self.require(ISA_XOP)
 82158          p.domain = DomainAMDSpecific
 82159          p.add(0, func(m *_Encoding, v []interface{}) {
 82160              m.emit(0x8f)
 82161              m.emit(0xe8 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82162              m.emit(0x78)
 82163              m.emit(0xc2)
 82164              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82165              m.imm1(toImmAny(v[0]))
 82166          })
 82167      }
 82168      // VPROTD xmm, xmm, xmm
 82169      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 82170          self.require(ISA_XOP)
 82171          p.domain = DomainAMDSpecific
 82172          p.add(0, func(m *_Encoding, v []interface{}) {
 82173              m.emit(0x8f)
 82174              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82175              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 82176              m.emit(0x92)
 82177              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82178          })
 82179          p.add(0, func(m *_Encoding, v []interface{}) {
 82180              m.emit(0x8f)
 82181              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 82182              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 82183              m.emit(0x92)
 82184              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82185          })
 82186      }
 82187      // VPROTD m128, xmm, xmm
 82188      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 82189          self.require(ISA_XOP)
 82190          p.domain = DomainAMDSpecific
 82191          p.add(0, func(m *_Encoding, v []interface{}) {
 82192              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 82193              m.emit(0x92)
 82194              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 82195          })
 82196      }
 82197      // VPROTD imm8, m128, xmm
 82198      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 82199          self.require(ISA_XOP)
 82200          p.domain = DomainAMDSpecific
 82201          p.add(0, func(m *_Encoding, v []interface{}) {
 82202              m.vex3(0x8f, 0b1000, 0x00, hcode(v[2]), addr(v[1]), 0)
 82203              m.emit(0xc2)
 82204              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82205              m.imm1(toImmAny(v[0]))
 82206          })
 82207      }
 82208      // VPROTD xmm, m128, xmm
 82209      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 82210          self.require(ISA_XOP)
 82211          p.domain = DomainAMDSpecific
 82212          p.add(0, func(m *_Encoding, v []interface{}) {
 82213              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 82214              m.emit(0x92)
 82215              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82216          })
 82217      }
 82218      if p.len == 0 {
 82219          panic("invalid operands for VPROTD")
 82220      }
 82221      return p
 82222  }
 82223  
 82224  // VPROTQ performs "Packed Rotate Quadwords".
 82225  //
 82226  // Mnemonic        : VPROTQ
 82227  // Supported forms : (5 forms)
 82228  //
 82229  //    * VPROTQ imm8, xmm, xmm     [XOP]
 82230  //    * VPROTQ xmm, xmm, xmm      [XOP]
 82231  //    * VPROTQ m128, xmm, xmm     [XOP]
 82232  //    * VPROTQ imm8, m128, xmm    [XOP]
 82233  //    * VPROTQ xmm, m128, xmm     [XOP]
 82234  //
 82235  func (self *Program) VPROTQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 82236      p := self.alloc("VPROTQ", 3, Operands { v0, v1, v2 })
 82237      // VPROTQ imm8, xmm, xmm
 82238      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 82239          self.require(ISA_XOP)
 82240          p.domain = DomainAMDSpecific
 82241          p.add(0, func(m *_Encoding, v []interface{}) {
 82242              m.emit(0x8f)
 82243              m.emit(0xe8 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82244              m.emit(0x78)
 82245              m.emit(0xc3)
 82246              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82247              m.imm1(toImmAny(v[0]))
 82248          })
 82249      }
 82250      // VPROTQ xmm, xmm, xmm
 82251      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 82252          self.require(ISA_XOP)
 82253          p.domain = DomainAMDSpecific
 82254          p.add(0, func(m *_Encoding, v []interface{}) {
 82255              m.emit(0x8f)
 82256              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82257              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 82258              m.emit(0x93)
 82259              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82260          })
 82261          p.add(0, func(m *_Encoding, v []interface{}) {
 82262              m.emit(0x8f)
 82263              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 82264              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 82265              m.emit(0x93)
 82266              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82267          })
 82268      }
 82269      // VPROTQ m128, xmm, xmm
 82270      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 82271          self.require(ISA_XOP)
 82272          p.domain = DomainAMDSpecific
 82273          p.add(0, func(m *_Encoding, v []interface{}) {
 82274              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 82275              m.emit(0x93)
 82276              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 82277          })
 82278      }
 82279      // VPROTQ imm8, m128, xmm
 82280      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 82281          self.require(ISA_XOP)
 82282          p.domain = DomainAMDSpecific
 82283          p.add(0, func(m *_Encoding, v []interface{}) {
 82284              m.vex3(0x8f, 0b1000, 0x00, hcode(v[2]), addr(v[1]), 0)
 82285              m.emit(0xc3)
 82286              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82287              m.imm1(toImmAny(v[0]))
 82288          })
 82289      }
 82290      // VPROTQ xmm, m128, xmm
 82291      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 82292          self.require(ISA_XOP)
 82293          p.domain = DomainAMDSpecific
 82294          p.add(0, func(m *_Encoding, v []interface{}) {
 82295              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 82296              m.emit(0x93)
 82297              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82298          })
 82299      }
 82300      if p.len == 0 {
 82301          panic("invalid operands for VPROTQ")
 82302      }
 82303      return p
 82304  }
 82305  
 82306  // VPROTW performs "Packed Rotate Words".
 82307  //
 82308  // Mnemonic        : VPROTW
 82309  // Supported forms : (5 forms)
 82310  //
 82311  //    * VPROTW imm8, xmm, xmm     [XOP]
 82312  //    * VPROTW xmm, xmm, xmm      [XOP]
 82313  //    * VPROTW m128, xmm, xmm     [XOP]
 82314  //    * VPROTW imm8, m128, xmm    [XOP]
 82315  //    * VPROTW xmm, m128, xmm     [XOP]
 82316  //
 82317  func (self *Program) VPROTW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 82318      p := self.alloc("VPROTW", 3, Operands { v0, v1, v2 })
 82319      // VPROTW imm8, xmm, xmm
 82320      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 82321          self.require(ISA_XOP)
 82322          p.domain = DomainAMDSpecific
 82323          p.add(0, func(m *_Encoding, v []interface{}) {
 82324              m.emit(0x8f)
 82325              m.emit(0xe8 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82326              m.emit(0x78)
 82327              m.emit(0xc1)
 82328              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82329              m.imm1(toImmAny(v[0]))
 82330          })
 82331      }
 82332      // VPROTW xmm, xmm, xmm
 82333      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 82334          self.require(ISA_XOP)
 82335          p.domain = DomainAMDSpecific
 82336          p.add(0, func(m *_Encoding, v []interface{}) {
 82337              m.emit(0x8f)
 82338              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82339              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 82340              m.emit(0x91)
 82341              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82342          })
 82343          p.add(0, func(m *_Encoding, v []interface{}) {
 82344              m.emit(0x8f)
 82345              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 82346              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 82347              m.emit(0x91)
 82348              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82349          })
 82350      }
 82351      // VPROTW m128, xmm, xmm
 82352      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 82353          self.require(ISA_XOP)
 82354          p.domain = DomainAMDSpecific
 82355          p.add(0, func(m *_Encoding, v []interface{}) {
 82356              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 82357              m.emit(0x91)
 82358              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 82359          })
 82360      }
 82361      // VPROTW imm8, m128, xmm
 82362      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 82363          self.require(ISA_XOP)
 82364          p.domain = DomainAMDSpecific
 82365          p.add(0, func(m *_Encoding, v []interface{}) {
 82366              m.vex3(0x8f, 0b1000, 0x00, hcode(v[2]), addr(v[1]), 0)
 82367              m.emit(0xc1)
 82368              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82369              m.imm1(toImmAny(v[0]))
 82370          })
 82371      }
 82372      // VPROTW xmm, m128, xmm
 82373      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 82374          self.require(ISA_XOP)
 82375          p.domain = DomainAMDSpecific
 82376          p.add(0, func(m *_Encoding, v []interface{}) {
 82377              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 82378              m.emit(0x91)
 82379              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82380          })
 82381      }
 82382      if p.len == 0 {
 82383          panic("invalid operands for VPROTW")
 82384      }
 82385      return p
 82386  }
 82387  
 82388  // VPSADBW performs "Compute Sum of Absolute Differences".
 82389  //
 82390  // Mnemonic        : VPSADBW
 82391  // Supported forms : (10 forms)
 82392  //
 82393  //    * VPSADBW xmm, xmm, xmm     [AVX]
 82394  //    * VPSADBW m128, xmm, xmm    [AVX]
 82395  //    * VPSADBW ymm, ymm, ymm     [AVX2]
 82396  //    * VPSADBW m256, ymm, ymm    [AVX2]
 82397  //    * VPSADBW zmm, zmm, zmm     [AVX512BW]
 82398  //    * VPSADBW m512, zmm, zmm    [AVX512BW]
 82399  //    * VPSADBW xmm, xmm, xmm     [AVX512BW,AVX512VL]
 82400  //    * VPSADBW m128, xmm, xmm    [AVX512BW,AVX512VL]
 82401  //    * VPSADBW ymm, ymm, ymm     [AVX512BW,AVX512VL]
 82402  //    * VPSADBW m256, ymm, ymm    [AVX512BW,AVX512VL]
 82403  //
 82404  func (self *Program) VPSADBW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 82405      p := self.alloc("VPSADBW", 3, Operands { v0, v1, v2 })
 82406      // VPSADBW xmm, xmm, xmm
 82407      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 82408          self.require(ISA_AVX)
 82409          p.domain = DomainAVX
 82410          p.add(0, func(m *_Encoding, v []interface{}) {
 82411              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 82412              m.emit(0xf6)
 82413              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82414          })
 82415      }
 82416      // VPSADBW m128, xmm, xmm
 82417      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 82418          self.require(ISA_AVX)
 82419          p.domain = DomainAVX
 82420          p.add(0, func(m *_Encoding, v []interface{}) {
 82421              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 82422              m.emit(0xf6)
 82423              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 82424          })
 82425      }
 82426      // VPSADBW ymm, ymm, ymm
 82427      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 82428          self.require(ISA_AVX2)
 82429          p.domain = DomainAVX
 82430          p.add(0, func(m *_Encoding, v []interface{}) {
 82431              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 82432              m.emit(0xf6)
 82433              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82434          })
 82435      }
 82436      // VPSADBW m256, ymm, ymm
 82437      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 82438          self.require(ISA_AVX2)
 82439          p.domain = DomainAVX
 82440          p.add(0, func(m *_Encoding, v []interface{}) {
 82441              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 82442              m.emit(0xf6)
 82443              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 82444          })
 82445      }
 82446      // VPSADBW zmm, zmm, zmm
 82447      if isZMM(v0) && isZMM(v1) && isZMM(v2) {
 82448          self.require(ISA_AVX512BW)
 82449          p.domain = DomainAVX
 82450          p.add(0, func(m *_Encoding, v []interface{}) {
 82451              m.emit(0x62)
 82452              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 82453              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 82454              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x40)
 82455              m.emit(0xf6)
 82456              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82457          })
 82458      }
 82459      // VPSADBW m512, zmm, zmm
 82460      if isM512(v0) && isZMM(v1) && isZMM(v2) {
 82461          self.require(ISA_AVX512BW)
 82462          p.domain = DomainAVX
 82463          p.add(0, func(m *_Encoding, v []interface{}) {
 82464              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 82465              m.emit(0xf6)
 82466              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 82467          })
 82468      }
 82469      // VPSADBW xmm, xmm, xmm
 82470      if isEVEXXMM(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 82471          self.require(ISA_AVX512VL | ISA_AVX512BW)
 82472          p.domain = DomainAVX
 82473          p.add(0, func(m *_Encoding, v []interface{}) {
 82474              m.emit(0x62)
 82475              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 82476              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 82477              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x00)
 82478              m.emit(0xf6)
 82479              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82480          })
 82481      }
 82482      // VPSADBW m128, xmm, xmm
 82483      if isM128(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 82484          self.require(ISA_AVX512VL | ISA_AVX512BW)
 82485          p.domain = DomainAVX
 82486          p.add(0, func(m *_Encoding, v []interface{}) {
 82487              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 82488              m.emit(0xf6)
 82489              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 82490          })
 82491      }
 82492      // VPSADBW ymm, ymm, ymm
 82493      if isEVEXYMM(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) {
 82494          self.require(ISA_AVX512VL | ISA_AVX512BW)
 82495          p.domain = DomainAVX
 82496          p.add(0, func(m *_Encoding, v []interface{}) {
 82497              m.emit(0x62)
 82498              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 82499              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 82500              m.emit((0x08 ^ (ecode(v[1]) << 3)) | 0x20)
 82501              m.emit(0xf6)
 82502              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82503          })
 82504      }
 82505      // VPSADBW m256, ymm, ymm
 82506      if isM256(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) {
 82507          self.require(ISA_AVX512VL | ISA_AVX512BW)
 82508          p.domain = DomainAVX
 82509          p.add(0, func(m *_Encoding, v []interface{}) {
 82510              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), 0, 0, 0)
 82511              m.emit(0xf6)
 82512              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 82513          })
 82514      }
 82515      if p.len == 0 {
 82516          panic("invalid operands for VPSADBW")
 82517      }
 82518      return p
 82519  }
 82520  
 82521  // VPSCATTERDD performs "Scatter Packed Doubleword Values with Signed Doubleword Indices".
 82522  //
 82523  // Mnemonic        : VPSCATTERDD
 82524  // Supported forms : (3 forms)
 82525  //
 82526  //    * VPSCATTERDD zmm, vm32z{k}    [AVX512F]
 82527  //    * VPSCATTERDD xmm, vm32x{k}    [AVX512F,AVX512VL]
 82528  //    * VPSCATTERDD ymm, vm32y{k}    [AVX512F,AVX512VL]
 82529  //
 82530  func (self *Program) VPSCATTERDD(v0 interface{}, v1 interface{}) *Instruction {
 82531      p := self.alloc("VPSCATTERDD", 2, Operands { v0, v1 })
 82532      // VPSCATTERDD zmm, vm32z{k}
 82533      if isZMM(v0) && isVMZk(v1) {
 82534          self.require(ISA_AVX512F)
 82535          p.domain = DomainAVX
 82536          p.add(0, func(m *_Encoding, v []interface{}) {
 82537              m.evex(0b10, 0x05, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82538              m.emit(0xa0)
 82539              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 82540          })
 82541      }
 82542      // VPSCATTERDD xmm, vm32x{k}
 82543      if isEVEXXMM(v0) && isVMXk(v1) {
 82544          self.require(ISA_AVX512VL | ISA_AVX512F)
 82545          p.domain = DomainAVX
 82546          p.add(0, func(m *_Encoding, v []interface{}) {
 82547              m.evex(0b10, 0x05, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82548              m.emit(0xa0)
 82549              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 82550          })
 82551      }
 82552      // VPSCATTERDD ymm, vm32y{k}
 82553      if isEVEXYMM(v0) && isVMYk(v1) {
 82554          self.require(ISA_AVX512VL | ISA_AVX512F)
 82555          p.domain = DomainAVX
 82556          p.add(0, func(m *_Encoding, v []interface{}) {
 82557              m.evex(0b10, 0x05, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82558              m.emit(0xa0)
 82559              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 82560          })
 82561      }
 82562      if p.len == 0 {
 82563          panic("invalid operands for VPSCATTERDD")
 82564      }
 82565      return p
 82566  }
 82567  
 82568  // VPSCATTERDQ performs "Scatter Packed Quadword Values with Signed Doubleword Indices".
 82569  //
 82570  // Mnemonic        : VPSCATTERDQ
 82571  // Supported forms : (3 forms)
 82572  //
 82573  //    * VPSCATTERDQ zmm, vm32y{k}    [AVX512F]
 82574  //    * VPSCATTERDQ xmm, vm32x{k}    [AVX512F,AVX512VL]
 82575  //    * VPSCATTERDQ ymm, vm32x{k}    [AVX512F,AVX512VL]
 82576  //
 82577  func (self *Program) VPSCATTERDQ(v0 interface{}, v1 interface{}) *Instruction {
 82578      p := self.alloc("VPSCATTERDQ", 2, Operands { v0, v1 })
 82579      // VPSCATTERDQ zmm, vm32y{k}
 82580      if isZMM(v0) && isVMYk(v1) {
 82581          self.require(ISA_AVX512F)
 82582          p.domain = DomainAVX
 82583          p.add(0, func(m *_Encoding, v []interface{}) {
 82584              m.evex(0b10, 0x85, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82585              m.emit(0xa0)
 82586              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 82587          })
 82588      }
 82589      // VPSCATTERDQ xmm, vm32x{k}
 82590      if isEVEXXMM(v0) && isVMXk(v1) {
 82591          self.require(ISA_AVX512VL | ISA_AVX512F)
 82592          p.domain = DomainAVX
 82593          p.add(0, func(m *_Encoding, v []interface{}) {
 82594              m.evex(0b10, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82595              m.emit(0xa0)
 82596              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 82597          })
 82598      }
 82599      // VPSCATTERDQ ymm, vm32x{k}
 82600      if isEVEXYMM(v0) && isVMXk(v1) {
 82601          self.require(ISA_AVX512VL | ISA_AVX512F)
 82602          p.domain = DomainAVX
 82603          p.add(0, func(m *_Encoding, v []interface{}) {
 82604              m.evex(0b10, 0x85, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82605              m.emit(0xa0)
 82606              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 82607          })
 82608      }
 82609      if p.len == 0 {
 82610          panic("invalid operands for VPSCATTERDQ")
 82611      }
 82612      return p
 82613  }
 82614  
 82615  // VPSCATTERQD performs "Scatter Packed Doubleword Values with Signed Quadword Indices".
 82616  //
 82617  // Mnemonic        : VPSCATTERQD
 82618  // Supported forms : (3 forms)
 82619  //
 82620  //    * VPSCATTERQD ymm, vm64z{k}    [AVX512F]
 82621  //    * VPSCATTERQD xmm, vm64x{k}    [AVX512F,AVX512VL]
 82622  //    * VPSCATTERQD xmm, vm64y{k}    [AVX512F,AVX512VL]
 82623  //
 82624  func (self *Program) VPSCATTERQD(v0 interface{}, v1 interface{}) *Instruction {
 82625      p := self.alloc("VPSCATTERQD", 2, Operands { v0, v1 })
 82626      // VPSCATTERQD ymm, vm64z{k}
 82627      if isEVEXYMM(v0) && isVMZk(v1) {
 82628          self.require(ISA_AVX512F)
 82629          p.domain = DomainAVX
 82630          p.add(0, func(m *_Encoding, v []interface{}) {
 82631              m.evex(0b10, 0x05, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82632              m.emit(0xa1)
 82633              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 82634          })
 82635      }
 82636      // VPSCATTERQD xmm, vm64x{k}
 82637      if isEVEXXMM(v0) && isVMXk(v1) {
 82638          self.require(ISA_AVX512VL | ISA_AVX512F)
 82639          p.domain = DomainAVX
 82640          p.add(0, func(m *_Encoding, v []interface{}) {
 82641              m.evex(0b10, 0x05, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82642              m.emit(0xa1)
 82643              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 82644          })
 82645      }
 82646      // VPSCATTERQD xmm, vm64y{k}
 82647      if isEVEXXMM(v0) && isVMYk(v1) {
 82648          self.require(ISA_AVX512VL | ISA_AVX512F)
 82649          p.domain = DomainAVX
 82650          p.add(0, func(m *_Encoding, v []interface{}) {
 82651              m.evex(0b10, 0x05, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82652              m.emit(0xa1)
 82653              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 82654          })
 82655      }
 82656      if p.len == 0 {
 82657          panic("invalid operands for VPSCATTERQD")
 82658      }
 82659      return p
 82660  }
 82661  
 82662  // VPSCATTERQQ performs "Scatter Packed Quadword Values with Signed Quadword Indices".
 82663  //
 82664  // Mnemonic        : VPSCATTERQQ
 82665  // Supported forms : (3 forms)
 82666  //
 82667  //    * VPSCATTERQQ zmm, vm64z{k}    [AVX512F]
 82668  //    * VPSCATTERQQ xmm, vm64x{k}    [AVX512F,AVX512VL]
 82669  //    * VPSCATTERQQ ymm, vm64y{k}    [AVX512F,AVX512VL]
 82670  //
 82671  func (self *Program) VPSCATTERQQ(v0 interface{}, v1 interface{}) *Instruction {
 82672      p := self.alloc("VPSCATTERQQ", 2, Operands { v0, v1 })
 82673      // VPSCATTERQQ zmm, vm64z{k}
 82674      if isZMM(v0) && isVMZk(v1) {
 82675          self.require(ISA_AVX512F)
 82676          p.domain = DomainAVX
 82677          p.add(0, func(m *_Encoding, v []interface{}) {
 82678              m.evex(0b10, 0x85, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82679              m.emit(0xa1)
 82680              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 82681          })
 82682      }
 82683      // VPSCATTERQQ xmm, vm64x{k}
 82684      if isEVEXXMM(v0) && isVMXk(v1) {
 82685          self.require(ISA_AVX512VL | ISA_AVX512F)
 82686          p.domain = DomainAVX
 82687          p.add(0, func(m *_Encoding, v []interface{}) {
 82688              m.evex(0b10, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82689              m.emit(0xa1)
 82690              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 82691          })
 82692      }
 82693      // VPSCATTERQQ ymm, vm64y{k}
 82694      if isEVEXYMM(v0) && isVMYk(v1) {
 82695          self.require(ISA_AVX512VL | ISA_AVX512F)
 82696          p.domain = DomainAVX
 82697          p.add(0, func(m *_Encoding, v []interface{}) {
 82698              m.evex(0b10, 0x85, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 82699              m.emit(0xa1)
 82700              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 82701          })
 82702      }
 82703      if p.len == 0 {
 82704          panic("invalid operands for VPSCATTERQQ")
 82705      }
 82706      return p
 82707  }
 82708  
 82709  // VPSHAB performs "Packed Shift Arithmetic Bytes".
 82710  //
 82711  // Mnemonic        : VPSHAB
 82712  // Supported forms : (3 forms)
 82713  //
 82714  //    * VPSHAB xmm, xmm, xmm     [XOP]
 82715  //    * VPSHAB m128, xmm, xmm    [XOP]
 82716  //    * VPSHAB xmm, m128, xmm    [XOP]
 82717  //
 82718  func (self *Program) VPSHAB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 82719      p := self.alloc("VPSHAB", 3, Operands { v0, v1, v2 })
 82720      // VPSHAB xmm, xmm, xmm
 82721      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 82722          self.require(ISA_XOP)
 82723          p.domain = DomainAMDSpecific
 82724          p.add(0, func(m *_Encoding, v []interface{}) {
 82725              m.emit(0x8f)
 82726              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82727              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 82728              m.emit(0x98)
 82729              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82730          })
 82731          p.add(0, func(m *_Encoding, v []interface{}) {
 82732              m.emit(0x8f)
 82733              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 82734              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 82735              m.emit(0x98)
 82736              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82737          })
 82738      }
 82739      // VPSHAB m128, xmm, xmm
 82740      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 82741          self.require(ISA_XOP)
 82742          p.domain = DomainAMDSpecific
 82743          p.add(0, func(m *_Encoding, v []interface{}) {
 82744              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 82745              m.emit(0x98)
 82746              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 82747          })
 82748      }
 82749      // VPSHAB xmm, m128, xmm
 82750      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 82751          self.require(ISA_XOP)
 82752          p.domain = DomainAMDSpecific
 82753          p.add(0, func(m *_Encoding, v []interface{}) {
 82754              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 82755              m.emit(0x98)
 82756              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82757          })
 82758      }
 82759      if p.len == 0 {
 82760          panic("invalid operands for VPSHAB")
 82761      }
 82762      return p
 82763  }
 82764  
 82765  // VPSHAD performs "Packed Shift Arithmetic Doublewords".
 82766  //
 82767  // Mnemonic        : VPSHAD
 82768  // Supported forms : (3 forms)
 82769  //
 82770  //    * VPSHAD xmm, xmm, xmm     [XOP]
 82771  //    * VPSHAD m128, xmm, xmm    [XOP]
 82772  //    * VPSHAD xmm, m128, xmm    [XOP]
 82773  //
 82774  func (self *Program) VPSHAD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 82775      p := self.alloc("VPSHAD", 3, Operands { v0, v1, v2 })
 82776      // VPSHAD xmm, xmm, xmm
 82777      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 82778          self.require(ISA_XOP)
 82779          p.domain = DomainAMDSpecific
 82780          p.add(0, func(m *_Encoding, v []interface{}) {
 82781              m.emit(0x8f)
 82782              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82783              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 82784              m.emit(0x9a)
 82785              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82786          })
 82787          p.add(0, func(m *_Encoding, v []interface{}) {
 82788              m.emit(0x8f)
 82789              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 82790              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 82791              m.emit(0x9a)
 82792              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82793          })
 82794      }
 82795      // VPSHAD m128, xmm, xmm
 82796      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 82797          self.require(ISA_XOP)
 82798          p.domain = DomainAMDSpecific
 82799          p.add(0, func(m *_Encoding, v []interface{}) {
 82800              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 82801              m.emit(0x9a)
 82802              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 82803          })
 82804      }
 82805      // VPSHAD xmm, m128, xmm
 82806      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 82807          self.require(ISA_XOP)
 82808          p.domain = DomainAMDSpecific
 82809          p.add(0, func(m *_Encoding, v []interface{}) {
 82810              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 82811              m.emit(0x9a)
 82812              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82813          })
 82814      }
 82815      if p.len == 0 {
 82816          panic("invalid operands for VPSHAD")
 82817      }
 82818      return p
 82819  }
 82820  
 82821  // VPSHAQ performs "Packed Shift Arithmetic Quadwords".
 82822  //
 82823  // Mnemonic        : VPSHAQ
 82824  // Supported forms : (3 forms)
 82825  //
 82826  //    * VPSHAQ xmm, xmm, xmm     [XOP]
 82827  //    * VPSHAQ m128, xmm, xmm    [XOP]
 82828  //    * VPSHAQ xmm, m128, xmm    [XOP]
 82829  //
 82830  func (self *Program) VPSHAQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 82831      p := self.alloc("VPSHAQ", 3, Operands { v0, v1, v2 })
 82832      // VPSHAQ xmm, xmm, xmm
 82833      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 82834          self.require(ISA_XOP)
 82835          p.domain = DomainAMDSpecific
 82836          p.add(0, func(m *_Encoding, v []interface{}) {
 82837              m.emit(0x8f)
 82838              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82839              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 82840              m.emit(0x9b)
 82841              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82842          })
 82843          p.add(0, func(m *_Encoding, v []interface{}) {
 82844              m.emit(0x8f)
 82845              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 82846              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 82847              m.emit(0x9b)
 82848              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82849          })
 82850      }
 82851      // VPSHAQ m128, xmm, xmm
 82852      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 82853          self.require(ISA_XOP)
 82854          p.domain = DomainAMDSpecific
 82855          p.add(0, func(m *_Encoding, v []interface{}) {
 82856              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 82857              m.emit(0x9b)
 82858              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 82859          })
 82860      }
 82861      // VPSHAQ xmm, m128, xmm
 82862      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 82863          self.require(ISA_XOP)
 82864          p.domain = DomainAMDSpecific
 82865          p.add(0, func(m *_Encoding, v []interface{}) {
 82866              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 82867              m.emit(0x9b)
 82868              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82869          })
 82870      }
 82871      if p.len == 0 {
 82872          panic("invalid operands for VPSHAQ")
 82873      }
 82874      return p
 82875  }
 82876  
 82877  // VPSHAW performs "Packed Shift Arithmetic Words".
 82878  //
 82879  // Mnemonic        : VPSHAW
 82880  // Supported forms : (3 forms)
 82881  //
 82882  //    * VPSHAW xmm, xmm, xmm     [XOP]
 82883  //    * VPSHAW m128, xmm, xmm    [XOP]
 82884  //    * VPSHAW xmm, m128, xmm    [XOP]
 82885  //
 82886  func (self *Program) VPSHAW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 82887      p := self.alloc("VPSHAW", 3, Operands { v0, v1, v2 })
 82888      // VPSHAW xmm, xmm, xmm
 82889      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 82890          self.require(ISA_XOP)
 82891          p.domain = DomainAMDSpecific
 82892          p.add(0, func(m *_Encoding, v []interface{}) {
 82893              m.emit(0x8f)
 82894              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82895              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 82896              m.emit(0x99)
 82897              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82898          })
 82899          p.add(0, func(m *_Encoding, v []interface{}) {
 82900              m.emit(0x8f)
 82901              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 82902              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 82903              m.emit(0x99)
 82904              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82905          })
 82906      }
 82907      // VPSHAW m128, xmm, xmm
 82908      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 82909          self.require(ISA_XOP)
 82910          p.domain = DomainAMDSpecific
 82911          p.add(0, func(m *_Encoding, v []interface{}) {
 82912              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 82913              m.emit(0x99)
 82914              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 82915          })
 82916      }
 82917      // VPSHAW xmm, m128, xmm
 82918      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 82919          self.require(ISA_XOP)
 82920          p.domain = DomainAMDSpecific
 82921          p.add(0, func(m *_Encoding, v []interface{}) {
 82922              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 82923              m.emit(0x99)
 82924              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82925          })
 82926      }
 82927      if p.len == 0 {
 82928          panic("invalid operands for VPSHAW")
 82929      }
 82930      return p
 82931  }
 82932  
 82933  // VPSHLB performs "Packed Shift Logical Bytes".
 82934  //
 82935  // Mnemonic        : VPSHLB
 82936  // Supported forms : (3 forms)
 82937  //
 82938  //    * VPSHLB xmm, xmm, xmm     [XOP]
 82939  //    * VPSHLB m128, xmm, xmm    [XOP]
 82940  //    * VPSHLB xmm, m128, xmm    [XOP]
 82941  //
 82942  func (self *Program) VPSHLB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 82943      p := self.alloc("VPSHLB", 3, Operands { v0, v1, v2 })
 82944      // VPSHLB xmm, xmm, xmm
 82945      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 82946          self.require(ISA_XOP)
 82947          p.domain = DomainAMDSpecific
 82948          p.add(0, func(m *_Encoding, v []interface{}) {
 82949              m.emit(0x8f)
 82950              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 82951              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 82952              m.emit(0x94)
 82953              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 82954          })
 82955          p.add(0, func(m *_Encoding, v []interface{}) {
 82956              m.emit(0x8f)
 82957              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 82958              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 82959              m.emit(0x94)
 82960              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 82961          })
 82962      }
 82963      // VPSHLB m128, xmm, xmm
 82964      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 82965          self.require(ISA_XOP)
 82966          p.domain = DomainAMDSpecific
 82967          p.add(0, func(m *_Encoding, v []interface{}) {
 82968              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 82969              m.emit(0x94)
 82970              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 82971          })
 82972      }
 82973      // VPSHLB xmm, m128, xmm
 82974      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 82975          self.require(ISA_XOP)
 82976          p.domain = DomainAMDSpecific
 82977          p.add(0, func(m *_Encoding, v []interface{}) {
 82978              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 82979              m.emit(0x94)
 82980              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 82981          })
 82982      }
 82983      if p.len == 0 {
 82984          panic("invalid operands for VPSHLB")
 82985      }
 82986      return p
 82987  }
 82988  
 82989  // VPSHLD performs "Packed Shift Logical Doublewords".
 82990  //
 82991  // Mnemonic        : VPSHLD
 82992  // Supported forms : (3 forms)
 82993  //
 82994  //    * VPSHLD xmm, xmm, xmm     [XOP]
 82995  //    * VPSHLD m128, xmm, xmm    [XOP]
 82996  //    * VPSHLD xmm, m128, xmm    [XOP]
 82997  //
 82998  func (self *Program) VPSHLD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 82999      p := self.alloc("VPSHLD", 3, Operands { v0, v1, v2 })
 83000      // VPSHLD xmm, xmm, xmm
 83001      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 83002          self.require(ISA_XOP)
 83003          p.domain = DomainAMDSpecific
 83004          p.add(0, func(m *_Encoding, v []interface{}) {
 83005              m.emit(0x8f)
 83006              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 83007              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 83008              m.emit(0x96)
 83009              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83010          })
 83011          p.add(0, func(m *_Encoding, v []interface{}) {
 83012              m.emit(0x8f)
 83013              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 83014              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 83015              m.emit(0x96)
 83016              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83017          })
 83018      }
 83019      // VPSHLD m128, xmm, xmm
 83020      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 83021          self.require(ISA_XOP)
 83022          p.domain = DomainAMDSpecific
 83023          p.add(0, func(m *_Encoding, v []interface{}) {
 83024              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83025              m.emit(0x96)
 83026              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83027          })
 83028      }
 83029      // VPSHLD xmm, m128, xmm
 83030      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 83031          self.require(ISA_XOP)
 83032          p.domain = DomainAMDSpecific
 83033          p.add(0, func(m *_Encoding, v []interface{}) {
 83034              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 83035              m.emit(0x96)
 83036              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 83037          })
 83038      }
 83039      if p.len == 0 {
 83040          panic("invalid operands for VPSHLD")
 83041      }
 83042      return p
 83043  }
 83044  
 83045  // VPSHLQ performs "Packed Shift Logical Quadwords".
 83046  //
 83047  // Mnemonic        : VPSHLQ
 83048  // Supported forms : (3 forms)
 83049  //
 83050  //    * VPSHLQ xmm, xmm, xmm     [XOP]
 83051  //    * VPSHLQ m128, xmm, xmm    [XOP]
 83052  //    * VPSHLQ xmm, m128, xmm    [XOP]
 83053  //
 83054  func (self *Program) VPSHLQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 83055      p := self.alloc("VPSHLQ", 3, Operands { v0, v1, v2 })
 83056      // VPSHLQ xmm, xmm, xmm
 83057      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 83058          self.require(ISA_XOP)
 83059          p.domain = DomainAMDSpecific
 83060          p.add(0, func(m *_Encoding, v []interface{}) {
 83061              m.emit(0x8f)
 83062              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 83063              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 83064              m.emit(0x97)
 83065              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83066          })
 83067          p.add(0, func(m *_Encoding, v []interface{}) {
 83068              m.emit(0x8f)
 83069              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 83070              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 83071              m.emit(0x97)
 83072              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83073          })
 83074      }
 83075      // VPSHLQ m128, xmm, xmm
 83076      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 83077          self.require(ISA_XOP)
 83078          p.domain = DomainAMDSpecific
 83079          p.add(0, func(m *_Encoding, v []interface{}) {
 83080              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83081              m.emit(0x97)
 83082              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83083          })
 83084      }
 83085      // VPSHLQ xmm, m128, xmm
 83086      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 83087          self.require(ISA_XOP)
 83088          p.domain = DomainAMDSpecific
 83089          p.add(0, func(m *_Encoding, v []interface{}) {
 83090              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 83091              m.emit(0x97)
 83092              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 83093          })
 83094      }
 83095      if p.len == 0 {
 83096          panic("invalid operands for VPSHLQ")
 83097      }
 83098      return p
 83099  }
 83100  
 83101  // VPSHLW performs "Packed Shift Logical Words".
 83102  //
 83103  // Mnemonic        : VPSHLW
 83104  // Supported forms : (3 forms)
 83105  //
 83106  //    * VPSHLW xmm, xmm, xmm     [XOP]
 83107  //    * VPSHLW m128, xmm, xmm    [XOP]
 83108  //    * VPSHLW xmm, m128, xmm    [XOP]
 83109  //
 83110  func (self *Program) VPSHLW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 83111      p := self.alloc("VPSHLW", 3, Operands { v0, v1, v2 })
 83112      // VPSHLW xmm, xmm, xmm
 83113      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 83114          self.require(ISA_XOP)
 83115          p.domain = DomainAMDSpecific
 83116          p.add(0, func(m *_Encoding, v []interface{}) {
 83117              m.emit(0x8f)
 83118              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 83119              m.emit(0x78 ^ (hlcode(v[0]) << 3))
 83120              m.emit(0x95)
 83121              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83122          })
 83123          p.add(0, func(m *_Encoding, v []interface{}) {
 83124              m.emit(0x8f)
 83125              m.emit(0xe9 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 83126              m.emit(0xf8 ^ (hlcode(v[1]) << 3))
 83127              m.emit(0x95)
 83128              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83129          })
 83130      }
 83131      // VPSHLW m128, xmm, xmm
 83132      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 83133          self.require(ISA_XOP)
 83134          p.domain = DomainAMDSpecific
 83135          p.add(0, func(m *_Encoding, v []interface{}) {
 83136              m.vex3(0x8f, 0b1001, 0x80, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83137              m.emit(0x95)
 83138              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83139          })
 83140      }
 83141      // VPSHLW xmm, m128, xmm
 83142      if isXMM(v0) && isM128(v1) && isXMM(v2) {
 83143          self.require(ISA_XOP)
 83144          p.domain = DomainAMDSpecific
 83145          p.add(0, func(m *_Encoding, v []interface{}) {
 83146              m.vex3(0x8f, 0b1001, 0x00, hcode(v[2]), addr(v[1]), hlcode(v[0]))
 83147              m.emit(0x95)
 83148              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 83149          })
 83150      }
 83151      if p.len == 0 {
 83152          panic("invalid operands for VPSHLW")
 83153      }
 83154      return p
 83155  }
 83156  
 83157  // VPSHUFB performs "Packed Shuffle Bytes".
 83158  //
 83159  // Mnemonic        : VPSHUFB
 83160  // Supported forms : (10 forms)
 83161  //
 83162  //    * VPSHUFB xmm, xmm, xmm           [AVX]
 83163  //    * VPSHUFB m128, xmm, xmm          [AVX]
 83164  //    * VPSHUFB ymm, ymm, ymm           [AVX2]
 83165  //    * VPSHUFB m256, ymm, ymm          [AVX2]
 83166  //    * VPSHUFB zmm, zmm, zmm{k}{z}     [AVX512BW]
 83167  //    * VPSHUFB m512, zmm, zmm{k}{z}    [AVX512BW]
 83168  //    * VPSHUFB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 83169  //    * VPSHUFB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 83170  //    * VPSHUFB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 83171  //    * VPSHUFB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 83172  //
 83173  func (self *Program) VPSHUFB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 83174      p := self.alloc("VPSHUFB", 3, Operands { v0, v1, v2 })
 83175      // VPSHUFB xmm, xmm, xmm
 83176      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 83177          self.require(ISA_AVX)
 83178          p.domain = DomainAVX
 83179          p.add(0, func(m *_Encoding, v []interface{}) {
 83180              m.emit(0xc4)
 83181              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 83182              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 83183              m.emit(0x00)
 83184              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83185          })
 83186      }
 83187      // VPSHUFB m128, xmm, xmm
 83188      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 83189          self.require(ISA_AVX)
 83190          p.domain = DomainAVX
 83191          p.add(0, func(m *_Encoding, v []interface{}) {
 83192              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83193              m.emit(0x00)
 83194              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83195          })
 83196      }
 83197      // VPSHUFB ymm, ymm, ymm
 83198      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 83199          self.require(ISA_AVX2)
 83200          p.domain = DomainAVX
 83201          p.add(0, func(m *_Encoding, v []interface{}) {
 83202              m.emit(0xc4)
 83203              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 83204              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 83205              m.emit(0x00)
 83206              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83207          })
 83208      }
 83209      // VPSHUFB m256, ymm, ymm
 83210      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 83211          self.require(ISA_AVX2)
 83212          p.domain = DomainAVX
 83213          p.add(0, func(m *_Encoding, v []interface{}) {
 83214              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83215              m.emit(0x00)
 83216              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83217          })
 83218      }
 83219      // VPSHUFB zmm, zmm, zmm{k}{z}
 83220      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 83221          self.require(ISA_AVX512BW)
 83222          p.domain = DomainAVX
 83223          p.add(0, func(m *_Encoding, v []interface{}) {
 83224              m.emit(0x62)
 83225              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 83226              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 83227              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 83228              m.emit(0x00)
 83229              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83230          })
 83231      }
 83232      // VPSHUFB m512, zmm, zmm{k}{z}
 83233      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 83234          self.require(ISA_AVX512BW)
 83235          p.domain = DomainAVX
 83236          p.add(0, func(m *_Encoding, v []interface{}) {
 83237              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 83238              m.emit(0x00)
 83239              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 83240          })
 83241      }
 83242      // VPSHUFB xmm, xmm, xmm{k}{z}
 83243      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 83244          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83245          p.domain = DomainAVX
 83246          p.add(0, func(m *_Encoding, v []interface{}) {
 83247              m.emit(0x62)
 83248              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 83249              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 83250              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 83251              m.emit(0x00)
 83252              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83253          })
 83254      }
 83255      // VPSHUFB m128, xmm, xmm{k}{z}
 83256      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 83257          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83258          p.domain = DomainAVX
 83259          p.add(0, func(m *_Encoding, v []interface{}) {
 83260              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 83261              m.emit(0x00)
 83262              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 83263          })
 83264      }
 83265      // VPSHUFB ymm, ymm, ymm{k}{z}
 83266      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 83267          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83268          p.domain = DomainAVX
 83269          p.add(0, func(m *_Encoding, v []interface{}) {
 83270              m.emit(0x62)
 83271              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 83272              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 83273              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 83274              m.emit(0x00)
 83275              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83276          })
 83277      }
 83278      // VPSHUFB m256, ymm, ymm{k}{z}
 83279      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 83280          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83281          p.domain = DomainAVX
 83282          p.add(0, func(m *_Encoding, v []interface{}) {
 83283              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 83284              m.emit(0x00)
 83285              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 83286          })
 83287      }
 83288      if p.len == 0 {
 83289          panic("invalid operands for VPSHUFB")
 83290      }
 83291      return p
 83292  }
 83293  
 83294  // VPSHUFD performs "Shuffle Packed Doublewords".
 83295  //
 83296  // Mnemonic        : VPSHUFD
 83297  // Supported forms : (10 forms)
 83298  //
 83299  //    * VPSHUFD imm8, xmm, xmm                   [AVX]
 83300  //    * VPSHUFD imm8, m128, xmm                  [AVX]
 83301  //    * VPSHUFD imm8, ymm, ymm                   [AVX2]
 83302  //    * VPSHUFD imm8, m256, ymm                  [AVX2]
 83303  //    * VPSHUFD imm8, m512/m32bcst, zmm{k}{z}    [AVX512F]
 83304  //    * VPSHUFD imm8, zmm, zmm{k}{z}             [AVX512F]
 83305  //    * VPSHUFD imm8, m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 83306  //    * VPSHUFD imm8, m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 83307  //    * VPSHUFD imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 83308  //    * VPSHUFD imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 83309  //
 83310  func (self *Program) VPSHUFD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 83311      p := self.alloc("VPSHUFD", 3, Operands { v0, v1, v2 })
 83312      // VPSHUFD imm8, xmm, xmm
 83313      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 83314          self.require(ISA_AVX)
 83315          p.domain = DomainAVX
 83316          p.add(0, func(m *_Encoding, v []interface{}) {
 83317              m.vex2(1, hcode(v[2]), v[1], 0)
 83318              m.emit(0x70)
 83319              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83320              m.imm1(toImmAny(v[0]))
 83321          })
 83322      }
 83323      // VPSHUFD imm8, m128, xmm
 83324      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 83325          self.require(ISA_AVX)
 83326          p.domain = DomainAVX
 83327          p.add(0, func(m *_Encoding, v []interface{}) {
 83328              m.vex2(1, hcode(v[2]), addr(v[1]), 0)
 83329              m.emit(0x70)
 83330              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 83331              m.imm1(toImmAny(v[0]))
 83332          })
 83333      }
 83334      // VPSHUFD imm8, ymm, ymm
 83335      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 83336          self.require(ISA_AVX2)
 83337          p.domain = DomainAVX
 83338          p.add(0, func(m *_Encoding, v []interface{}) {
 83339              m.vex2(5, hcode(v[2]), v[1], 0)
 83340              m.emit(0x70)
 83341              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83342              m.imm1(toImmAny(v[0]))
 83343          })
 83344      }
 83345      // VPSHUFD imm8, m256, ymm
 83346      if isImm8(v0) && isM256(v1) && isYMM(v2) {
 83347          self.require(ISA_AVX2)
 83348          p.domain = DomainAVX
 83349          p.add(0, func(m *_Encoding, v []interface{}) {
 83350              m.vex2(5, hcode(v[2]), addr(v[1]), 0)
 83351              m.emit(0x70)
 83352              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 83353              m.imm1(toImmAny(v[0]))
 83354          })
 83355      }
 83356      // VPSHUFD imm8, m512/m32bcst, zmm{k}{z}
 83357      if isImm8(v0) && isM512M32bcst(v1) && isZMMkz(v2) {
 83358          self.require(ISA_AVX512F)
 83359          p.domain = DomainAVX
 83360          p.add(0, func(m *_Encoding, v []interface{}) {
 83361              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 83362              m.emit(0x70)
 83363              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 83364              m.imm1(toImmAny(v[0]))
 83365          })
 83366      }
 83367      // VPSHUFD imm8, zmm, zmm{k}{z}
 83368      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 83369          self.require(ISA_AVX512F)
 83370          p.domain = DomainAVX
 83371          p.add(0, func(m *_Encoding, v []interface{}) {
 83372              m.emit(0x62)
 83373              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 83374              m.emit(0x7d)
 83375              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 83376              m.emit(0x70)
 83377              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83378              m.imm1(toImmAny(v[0]))
 83379          })
 83380      }
 83381      // VPSHUFD imm8, m128/m32bcst, xmm{k}{z}
 83382      if isImm8(v0) && isM128M32bcst(v1) && isXMMkz(v2) {
 83383          self.require(ISA_AVX512VL | ISA_AVX512F)
 83384          p.domain = DomainAVX
 83385          p.add(0, func(m *_Encoding, v []interface{}) {
 83386              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 83387              m.emit(0x70)
 83388              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 83389              m.imm1(toImmAny(v[0]))
 83390          })
 83391      }
 83392      // VPSHUFD imm8, m256/m32bcst, ymm{k}{z}
 83393      if isImm8(v0) && isM256M32bcst(v1) && isYMMkz(v2) {
 83394          self.require(ISA_AVX512VL | ISA_AVX512F)
 83395          p.domain = DomainAVX
 83396          p.add(0, func(m *_Encoding, v []interface{}) {
 83397              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 83398              m.emit(0x70)
 83399              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 83400              m.imm1(toImmAny(v[0]))
 83401          })
 83402      }
 83403      // VPSHUFD imm8, xmm, xmm{k}{z}
 83404      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 83405          self.require(ISA_AVX512VL | ISA_AVX512F)
 83406          p.domain = DomainAVX
 83407          p.add(0, func(m *_Encoding, v []interface{}) {
 83408              m.emit(0x62)
 83409              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 83410              m.emit(0x7d)
 83411              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 83412              m.emit(0x70)
 83413              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83414              m.imm1(toImmAny(v[0]))
 83415          })
 83416      }
 83417      // VPSHUFD imm8, ymm, ymm{k}{z}
 83418      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 83419          self.require(ISA_AVX512VL | ISA_AVX512F)
 83420          p.domain = DomainAVX
 83421          p.add(0, func(m *_Encoding, v []interface{}) {
 83422              m.emit(0x62)
 83423              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 83424              m.emit(0x7d)
 83425              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 83426              m.emit(0x70)
 83427              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83428              m.imm1(toImmAny(v[0]))
 83429          })
 83430      }
 83431      if p.len == 0 {
 83432          panic("invalid operands for VPSHUFD")
 83433      }
 83434      return p
 83435  }
 83436  
 83437  // VPSHUFHW performs "Shuffle Packed High Words".
 83438  //
 83439  // Mnemonic        : VPSHUFHW
 83440  // Supported forms : (10 forms)
 83441  //
 83442  //    * VPSHUFHW imm8, xmm, xmm           [AVX]
 83443  //    * VPSHUFHW imm8, m128, xmm          [AVX]
 83444  //    * VPSHUFHW imm8, ymm, ymm           [AVX2]
 83445  //    * VPSHUFHW imm8, m256, ymm          [AVX2]
 83446  //    * VPSHUFHW imm8, zmm, zmm{k}{z}     [AVX512BW]
 83447  //    * VPSHUFHW imm8, m512, zmm{k}{z}    [AVX512BW]
 83448  //    * VPSHUFHW imm8, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 83449  //    * VPSHUFHW imm8, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 83450  //    * VPSHUFHW imm8, m128, xmm{k}{z}    [AVX512BW,AVX512VL]
 83451  //    * VPSHUFHW imm8, m256, ymm{k}{z}    [AVX512BW,AVX512VL]
 83452  //
 83453  func (self *Program) VPSHUFHW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 83454      p := self.alloc("VPSHUFHW", 3, Operands { v0, v1, v2 })
 83455      // VPSHUFHW imm8, xmm, xmm
 83456      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 83457          self.require(ISA_AVX)
 83458          p.domain = DomainAVX
 83459          p.add(0, func(m *_Encoding, v []interface{}) {
 83460              m.vex2(2, hcode(v[2]), v[1], 0)
 83461              m.emit(0x70)
 83462              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83463              m.imm1(toImmAny(v[0]))
 83464          })
 83465      }
 83466      // VPSHUFHW imm8, m128, xmm
 83467      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 83468          self.require(ISA_AVX)
 83469          p.domain = DomainAVX
 83470          p.add(0, func(m *_Encoding, v []interface{}) {
 83471              m.vex2(2, hcode(v[2]), addr(v[1]), 0)
 83472              m.emit(0x70)
 83473              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 83474              m.imm1(toImmAny(v[0]))
 83475          })
 83476      }
 83477      // VPSHUFHW imm8, ymm, ymm
 83478      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 83479          self.require(ISA_AVX2)
 83480          p.domain = DomainAVX
 83481          p.add(0, func(m *_Encoding, v []interface{}) {
 83482              m.vex2(6, hcode(v[2]), v[1], 0)
 83483              m.emit(0x70)
 83484              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83485              m.imm1(toImmAny(v[0]))
 83486          })
 83487      }
 83488      // VPSHUFHW imm8, m256, ymm
 83489      if isImm8(v0) && isM256(v1) && isYMM(v2) {
 83490          self.require(ISA_AVX2)
 83491          p.domain = DomainAVX
 83492          p.add(0, func(m *_Encoding, v []interface{}) {
 83493              m.vex2(6, hcode(v[2]), addr(v[1]), 0)
 83494              m.emit(0x70)
 83495              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 83496              m.imm1(toImmAny(v[0]))
 83497          })
 83498      }
 83499      // VPSHUFHW imm8, zmm, zmm{k}{z}
 83500      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 83501          self.require(ISA_AVX512BW)
 83502          p.domain = DomainAVX
 83503          p.add(0, func(m *_Encoding, v []interface{}) {
 83504              m.emit(0x62)
 83505              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 83506              m.emit(0x7e)
 83507              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 83508              m.emit(0x70)
 83509              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83510              m.imm1(toImmAny(v[0]))
 83511          })
 83512      }
 83513      // VPSHUFHW imm8, m512, zmm{k}{z}
 83514      if isImm8(v0) && isM512(v1) && isZMMkz(v2) {
 83515          self.require(ISA_AVX512BW)
 83516          p.domain = DomainAVX
 83517          p.add(0, func(m *_Encoding, v []interface{}) {
 83518              m.evex(0b01, 0x06, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), 0)
 83519              m.emit(0x70)
 83520              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 83521              m.imm1(toImmAny(v[0]))
 83522          })
 83523      }
 83524      // VPSHUFHW imm8, xmm, xmm{k}{z}
 83525      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 83526          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83527          p.domain = DomainAVX
 83528          p.add(0, func(m *_Encoding, v []interface{}) {
 83529              m.emit(0x62)
 83530              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 83531              m.emit(0x7e)
 83532              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 83533              m.emit(0x70)
 83534              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83535              m.imm1(toImmAny(v[0]))
 83536          })
 83537      }
 83538      // VPSHUFHW imm8, ymm, ymm{k}{z}
 83539      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 83540          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83541          p.domain = DomainAVX
 83542          p.add(0, func(m *_Encoding, v []interface{}) {
 83543              m.emit(0x62)
 83544              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 83545              m.emit(0x7e)
 83546              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 83547              m.emit(0x70)
 83548              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83549              m.imm1(toImmAny(v[0]))
 83550          })
 83551      }
 83552      // VPSHUFHW imm8, m128, xmm{k}{z}
 83553      if isImm8(v0) && isM128(v1) && isXMMkz(v2) {
 83554          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83555          p.domain = DomainAVX
 83556          p.add(0, func(m *_Encoding, v []interface{}) {
 83557              m.evex(0b01, 0x06, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), 0)
 83558              m.emit(0x70)
 83559              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 83560              m.imm1(toImmAny(v[0]))
 83561          })
 83562      }
 83563      // VPSHUFHW imm8, m256, ymm{k}{z}
 83564      if isImm8(v0) && isM256(v1) && isYMMkz(v2) {
 83565          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83566          p.domain = DomainAVX
 83567          p.add(0, func(m *_Encoding, v []interface{}) {
 83568              m.evex(0b01, 0x06, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), 0)
 83569              m.emit(0x70)
 83570              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 83571              m.imm1(toImmAny(v[0]))
 83572          })
 83573      }
 83574      if p.len == 0 {
 83575          panic("invalid operands for VPSHUFHW")
 83576      }
 83577      return p
 83578  }
 83579  
 83580  // VPSHUFLW performs "Shuffle Packed Low Words".
 83581  //
 83582  // Mnemonic        : VPSHUFLW
 83583  // Supported forms : (10 forms)
 83584  //
 83585  //    * VPSHUFLW imm8, xmm, xmm           [AVX]
 83586  //    * VPSHUFLW imm8, m128, xmm          [AVX]
 83587  //    * VPSHUFLW imm8, ymm, ymm           [AVX2]
 83588  //    * VPSHUFLW imm8, m256, ymm          [AVX2]
 83589  //    * VPSHUFLW imm8, zmm, zmm{k}{z}     [AVX512BW]
 83590  //    * VPSHUFLW imm8, m512, zmm{k}{z}    [AVX512BW]
 83591  //    * VPSHUFLW imm8, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 83592  //    * VPSHUFLW imm8, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 83593  //    * VPSHUFLW imm8, m128, xmm{k}{z}    [AVX512BW,AVX512VL]
 83594  //    * VPSHUFLW imm8, m256, ymm{k}{z}    [AVX512BW,AVX512VL]
 83595  //
 83596  func (self *Program) VPSHUFLW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 83597      p := self.alloc("VPSHUFLW", 3, Operands { v0, v1, v2 })
 83598      // VPSHUFLW imm8, xmm, xmm
 83599      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 83600          self.require(ISA_AVX)
 83601          p.domain = DomainAVX
 83602          p.add(0, func(m *_Encoding, v []interface{}) {
 83603              m.vex2(3, hcode(v[2]), v[1], 0)
 83604              m.emit(0x70)
 83605              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83606              m.imm1(toImmAny(v[0]))
 83607          })
 83608      }
 83609      // VPSHUFLW imm8, m128, xmm
 83610      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 83611          self.require(ISA_AVX)
 83612          p.domain = DomainAVX
 83613          p.add(0, func(m *_Encoding, v []interface{}) {
 83614              m.vex2(3, hcode(v[2]), addr(v[1]), 0)
 83615              m.emit(0x70)
 83616              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 83617              m.imm1(toImmAny(v[0]))
 83618          })
 83619      }
 83620      // VPSHUFLW imm8, ymm, ymm
 83621      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 83622          self.require(ISA_AVX2)
 83623          p.domain = DomainAVX
 83624          p.add(0, func(m *_Encoding, v []interface{}) {
 83625              m.vex2(7, hcode(v[2]), v[1], 0)
 83626              m.emit(0x70)
 83627              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83628              m.imm1(toImmAny(v[0]))
 83629          })
 83630      }
 83631      // VPSHUFLW imm8, m256, ymm
 83632      if isImm8(v0) && isM256(v1) && isYMM(v2) {
 83633          self.require(ISA_AVX2)
 83634          p.domain = DomainAVX
 83635          p.add(0, func(m *_Encoding, v []interface{}) {
 83636              m.vex2(7, hcode(v[2]), addr(v[1]), 0)
 83637              m.emit(0x70)
 83638              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 83639              m.imm1(toImmAny(v[0]))
 83640          })
 83641      }
 83642      // VPSHUFLW imm8, zmm, zmm{k}{z}
 83643      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 83644          self.require(ISA_AVX512BW)
 83645          p.domain = DomainAVX
 83646          p.add(0, func(m *_Encoding, v []interface{}) {
 83647              m.emit(0x62)
 83648              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 83649              m.emit(0x7f)
 83650              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 83651              m.emit(0x70)
 83652              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83653              m.imm1(toImmAny(v[0]))
 83654          })
 83655      }
 83656      // VPSHUFLW imm8, m512, zmm{k}{z}
 83657      if isImm8(v0) && isM512(v1) && isZMMkz(v2) {
 83658          self.require(ISA_AVX512BW)
 83659          p.domain = DomainAVX
 83660          p.add(0, func(m *_Encoding, v []interface{}) {
 83661              m.evex(0b01, 0x07, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), 0)
 83662              m.emit(0x70)
 83663              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 83664              m.imm1(toImmAny(v[0]))
 83665          })
 83666      }
 83667      // VPSHUFLW imm8, xmm, xmm{k}{z}
 83668      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 83669          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83670          p.domain = DomainAVX
 83671          p.add(0, func(m *_Encoding, v []interface{}) {
 83672              m.emit(0x62)
 83673              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 83674              m.emit(0x7f)
 83675              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 83676              m.emit(0x70)
 83677              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83678              m.imm1(toImmAny(v[0]))
 83679          })
 83680      }
 83681      // VPSHUFLW imm8, ymm, ymm{k}{z}
 83682      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 83683          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83684          p.domain = DomainAVX
 83685          p.add(0, func(m *_Encoding, v []interface{}) {
 83686              m.emit(0x62)
 83687              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 83688              m.emit(0x7f)
 83689              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 83690              m.emit(0x70)
 83691              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 83692              m.imm1(toImmAny(v[0]))
 83693          })
 83694      }
 83695      // VPSHUFLW imm8, m128, xmm{k}{z}
 83696      if isImm8(v0) && isM128(v1) && isXMMkz(v2) {
 83697          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83698          p.domain = DomainAVX
 83699          p.add(0, func(m *_Encoding, v []interface{}) {
 83700              m.evex(0b01, 0x07, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), 0)
 83701              m.emit(0x70)
 83702              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 83703              m.imm1(toImmAny(v[0]))
 83704          })
 83705      }
 83706      // VPSHUFLW imm8, m256, ymm{k}{z}
 83707      if isImm8(v0) && isM256(v1) && isYMMkz(v2) {
 83708          self.require(ISA_AVX512VL | ISA_AVX512BW)
 83709          p.domain = DomainAVX
 83710          p.add(0, func(m *_Encoding, v []interface{}) {
 83711              m.evex(0b01, 0x07, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), 0)
 83712              m.emit(0x70)
 83713              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 83714              m.imm1(toImmAny(v[0]))
 83715          })
 83716      }
 83717      if p.len == 0 {
 83718          panic("invalid operands for VPSHUFLW")
 83719      }
 83720      return p
 83721  }
 83722  
 83723  // VPSIGNB performs "Packed Sign of Byte Integers".
 83724  //
 83725  // Mnemonic        : VPSIGNB
 83726  // Supported forms : (4 forms)
 83727  //
 83728  //    * VPSIGNB xmm, xmm, xmm     [AVX]
 83729  //    * VPSIGNB m128, xmm, xmm    [AVX]
 83730  //    * VPSIGNB ymm, ymm, ymm     [AVX2]
 83731  //    * VPSIGNB m256, ymm, ymm    [AVX2]
 83732  //
 83733  func (self *Program) VPSIGNB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 83734      p := self.alloc("VPSIGNB", 3, Operands { v0, v1, v2 })
 83735      // VPSIGNB xmm, xmm, xmm
 83736      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 83737          self.require(ISA_AVX)
 83738          p.domain = DomainAVX
 83739          p.add(0, func(m *_Encoding, v []interface{}) {
 83740              m.emit(0xc4)
 83741              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 83742              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 83743              m.emit(0x08)
 83744              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83745          })
 83746      }
 83747      // VPSIGNB m128, xmm, xmm
 83748      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 83749          self.require(ISA_AVX)
 83750          p.domain = DomainAVX
 83751          p.add(0, func(m *_Encoding, v []interface{}) {
 83752              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83753              m.emit(0x08)
 83754              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83755          })
 83756      }
 83757      // VPSIGNB ymm, ymm, ymm
 83758      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 83759          self.require(ISA_AVX2)
 83760          p.domain = DomainAVX
 83761          p.add(0, func(m *_Encoding, v []interface{}) {
 83762              m.emit(0xc4)
 83763              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 83764              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 83765              m.emit(0x08)
 83766              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83767          })
 83768      }
 83769      // VPSIGNB m256, ymm, ymm
 83770      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 83771          self.require(ISA_AVX2)
 83772          p.domain = DomainAVX
 83773          p.add(0, func(m *_Encoding, v []interface{}) {
 83774              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83775              m.emit(0x08)
 83776              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83777          })
 83778      }
 83779      if p.len == 0 {
 83780          panic("invalid operands for VPSIGNB")
 83781      }
 83782      return p
 83783  }
 83784  
 83785  // VPSIGND performs "Packed Sign of Doubleword Integers".
 83786  //
 83787  // Mnemonic        : VPSIGND
 83788  // Supported forms : (4 forms)
 83789  //
 83790  //    * VPSIGND xmm, xmm, xmm     [AVX]
 83791  //    * VPSIGND m128, xmm, xmm    [AVX]
 83792  //    * VPSIGND ymm, ymm, ymm     [AVX2]
 83793  //    * VPSIGND m256, ymm, ymm    [AVX2]
 83794  //
 83795  func (self *Program) VPSIGND(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 83796      p := self.alloc("VPSIGND", 3, Operands { v0, v1, v2 })
 83797      // VPSIGND xmm, xmm, xmm
 83798      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 83799          self.require(ISA_AVX)
 83800          p.domain = DomainAVX
 83801          p.add(0, func(m *_Encoding, v []interface{}) {
 83802              m.emit(0xc4)
 83803              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 83804              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 83805              m.emit(0x0a)
 83806              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83807          })
 83808      }
 83809      // VPSIGND m128, xmm, xmm
 83810      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 83811          self.require(ISA_AVX)
 83812          p.domain = DomainAVX
 83813          p.add(0, func(m *_Encoding, v []interface{}) {
 83814              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83815              m.emit(0x0a)
 83816              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83817          })
 83818      }
 83819      // VPSIGND ymm, ymm, ymm
 83820      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 83821          self.require(ISA_AVX2)
 83822          p.domain = DomainAVX
 83823          p.add(0, func(m *_Encoding, v []interface{}) {
 83824              m.emit(0xc4)
 83825              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 83826              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 83827              m.emit(0x0a)
 83828              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83829          })
 83830      }
 83831      // VPSIGND m256, ymm, ymm
 83832      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 83833          self.require(ISA_AVX2)
 83834          p.domain = DomainAVX
 83835          p.add(0, func(m *_Encoding, v []interface{}) {
 83836              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83837              m.emit(0x0a)
 83838              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83839          })
 83840      }
 83841      if p.len == 0 {
 83842          panic("invalid operands for VPSIGND")
 83843      }
 83844      return p
 83845  }
 83846  
 83847  // VPSIGNW performs "Packed Sign of Word Integers".
 83848  //
 83849  // Mnemonic        : VPSIGNW
 83850  // Supported forms : (4 forms)
 83851  //
 83852  //    * VPSIGNW xmm, xmm, xmm     [AVX]
 83853  //    * VPSIGNW m128, xmm, xmm    [AVX]
 83854  //    * VPSIGNW ymm, ymm, ymm     [AVX2]
 83855  //    * VPSIGNW m256, ymm, ymm    [AVX2]
 83856  //
 83857  func (self *Program) VPSIGNW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 83858      p := self.alloc("VPSIGNW", 3, Operands { v0, v1, v2 })
 83859      // VPSIGNW xmm, xmm, xmm
 83860      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 83861          self.require(ISA_AVX)
 83862          p.domain = DomainAVX
 83863          p.add(0, func(m *_Encoding, v []interface{}) {
 83864              m.emit(0xc4)
 83865              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 83866              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 83867              m.emit(0x09)
 83868              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83869          })
 83870      }
 83871      // VPSIGNW m128, xmm, xmm
 83872      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 83873          self.require(ISA_AVX)
 83874          p.domain = DomainAVX
 83875          p.add(0, func(m *_Encoding, v []interface{}) {
 83876              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83877              m.emit(0x09)
 83878              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83879          })
 83880      }
 83881      // VPSIGNW ymm, ymm, ymm
 83882      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 83883          self.require(ISA_AVX2)
 83884          p.domain = DomainAVX
 83885          p.add(0, func(m *_Encoding, v []interface{}) {
 83886              m.emit(0xc4)
 83887              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 83888              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 83889              m.emit(0x09)
 83890              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83891          })
 83892      }
 83893      // VPSIGNW m256, ymm, ymm
 83894      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 83895          self.require(ISA_AVX2)
 83896          p.domain = DomainAVX
 83897          p.add(0, func(m *_Encoding, v []interface{}) {
 83898              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83899              m.emit(0x09)
 83900              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83901          })
 83902      }
 83903      if p.len == 0 {
 83904          panic("invalid operands for VPSIGNW")
 83905      }
 83906      return p
 83907  }
 83908  
 83909  // VPSLLD performs "Shift Packed Doubleword Data Left Logical".
 83910  //
 83911  // Mnemonic        : VPSLLD
 83912  // Supported forms : (18 forms)
 83913  //
 83914  //    * VPSLLD imm8, xmm, xmm                   [AVX]
 83915  //    * VPSLLD xmm, xmm, xmm                    [AVX]
 83916  //    * VPSLLD m128, xmm, xmm                   [AVX]
 83917  //    * VPSLLD imm8, ymm, ymm                   [AVX2]
 83918  //    * VPSLLD xmm, ymm, ymm                    [AVX2]
 83919  //    * VPSLLD m128, ymm, ymm                   [AVX2]
 83920  //    * VPSLLD imm8, m512/m32bcst, zmm{k}{z}    [AVX512F]
 83921  //    * VPSLLD imm8, zmm, zmm{k}{z}             [AVX512F]
 83922  //    * VPSLLD xmm, zmm, zmm{k}{z}              [AVX512F]
 83923  //    * VPSLLD m128, zmm, zmm{k}{z}             [AVX512F]
 83924  //    * VPSLLD imm8, m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 83925  //    * VPSLLD imm8, m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 83926  //    * VPSLLD imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 83927  //    * VPSLLD xmm, xmm, xmm{k}{z}              [AVX512F,AVX512VL]
 83928  //    * VPSLLD m128, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 83929  //    * VPSLLD imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 83930  //    * VPSLLD xmm, ymm, ymm{k}{z}              [AVX512F,AVX512VL]
 83931  //    * VPSLLD m128, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 83932  //
 83933  func (self *Program) VPSLLD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 83934      p := self.alloc("VPSLLD", 3, Operands { v0, v1, v2 })
 83935      // VPSLLD imm8, xmm, xmm
 83936      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 83937          self.require(ISA_AVX)
 83938          p.domain = DomainAVX
 83939          p.add(0, func(m *_Encoding, v []interface{}) {
 83940              m.vex2(1, 0, v[1], hlcode(v[2]))
 83941              m.emit(0x72)
 83942              m.emit(0xf0 | lcode(v[1]))
 83943              m.imm1(toImmAny(v[0]))
 83944          })
 83945      }
 83946      // VPSLLD xmm, xmm, xmm
 83947      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 83948          self.require(ISA_AVX)
 83949          p.domain = DomainAVX
 83950          p.add(0, func(m *_Encoding, v []interface{}) {
 83951              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 83952              m.emit(0xf2)
 83953              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83954          })
 83955      }
 83956      // VPSLLD m128, xmm, xmm
 83957      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 83958          self.require(ISA_AVX)
 83959          p.domain = DomainAVX
 83960          p.add(0, func(m *_Encoding, v []interface{}) {
 83961              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83962              m.emit(0xf2)
 83963              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83964          })
 83965      }
 83966      // VPSLLD imm8, ymm, ymm
 83967      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 83968          self.require(ISA_AVX2)
 83969          p.domain = DomainAVX
 83970          p.add(0, func(m *_Encoding, v []interface{}) {
 83971              m.vex2(5, 0, v[1], hlcode(v[2]))
 83972              m.emit(0x72)
 83973              m.emit(0xf0 | lcode(v[1]))
 83974              m.imm1(toImmAny(v[0]))
 83975          })
 83976      }
 83977      // VPSLLD xmm, ymm, ymm
 83978      if isXMM(v0) && isYMM(v1) && isYMM(v2) {
 83979          self.require(ISA_AVX2)
 83980          p.domain = DomainAVX
 83981          p.add(0, func(m *_Encoding, v []interface{}) {
 83982              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 83983              m.emit(0xf2)
 83984              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 83985          })
 83986      }
 83987      // VPSLLD m128, ymm, ymm
 83988      if isM128(v0) && isYMM(v1) && isYMM(v2) {
 83989          self.require(ISA_AVX2)
 83990          p.domain = DomainAVX
 83991          p.add(0, func(m *_Encoding, v []interface{}) {
 83992              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 83993              m.emit(0xf2)
 83994              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 83995          })
 83996      }
 83997      // VPSLLD imm8, m512/m32bcst, zmm{k}{z}
 83998      if isImm8(v0) && isM512M32bcst(v1) && isZMMkz(v2) {
 83999          self.require(ISA_AVX512F)
 84000          p.domain = DomainAVX
 84001          p.add(0, func(m *_Encoding, v []interface{}) {
 84002              m.evex(0b01, 0x05, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 84003              m.emit(0x72)
 84004              m.mrsd(6, addr(v[1]), 64)
 84005              m.imm1(toImmAny(v[0]))
 84006          })
 84007      }
 84008      // VPSLLD imm8, zmm, zmm{k}{z}
 84009      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 84010          self.require(ISA_AVX512F)
 84011          p.domain = DomainAVX
 84012          p.add(0, func(m *_Encoding, v []interface{}) {
 84013              m.emit(0x62)
 84014              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 84015              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 84016              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 84017              m.emit(0x72)
 84018              m.emit(0xf0 | lcode(v[1]))
 84019              m.imm1(toImmAny(v[0]))
 84020          })
 84021      }
 84022      // VPSLLD xmm, zmm, zmm{k}{z}
 84023      if isEVEXXMM(v0) && isZMM(v1) && isZMMkz(v2) {
 84024          self.require(ISA_AVX512F)
 84025          p.domain = DomainAVX
 84026          p.add(0, func(m *_Encoding, v []interface{}) {
 84027              m.emit(0x62)
 84028              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84029              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 84030              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 84031              m.emit(0xf2)
 84032              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84033          })
 84034      }
 84035      // VPSLLD m128, zmm, zmm{k}{z}
 84036      if isM128(v0) && isZMM(v1) && isZMMkz(v2) {
 84037          self.require(ISA_AVX512F)
 84038          p.domain = DomainAVX
 84039          p.add(0, func(m *_Encoding, v []interface{}) {
 84040              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 84041              m.emit(0xf2)
 84042              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 84043          })
 84044      }
 84045      // VPSLLD imm8, m128/m32bcst, xmm{k}{z}
 84046      if isImm8(v0) && isM128M32bcst(v1) && isXMMkz(v2) {
 84047          self.require(ISA_AVX512VL | ISA_AVX512F)
 84048          p.domain = DomainAVX
 84049          p.add(0, func(m *_Encoding, v []interface{}) {
 84050              m.evex(0b01, 0x05, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 84051              m.emit(0x72)
 84052              m.mrsd(6, addr(v[1]), 16)
 84053              m.imm1(toImmAny(v[0]))
 84054          })
 84055      }
 84056      // VPSLLD imm8, m256/m32bcst, ymm{k}{z}
 84057      if isImm8(v0) && isM256M32bcst(v1) && isYMMkz(v2) {
 84058          self.require(ISA_AVX512VL | ISA_AVX512F)
 84059          p.domain = DomainAVX
 84060          p.add(0, func(m *_Encoding, v []interface{}) {
 84061              m.evex(0b01, 0x05, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 84062              m.emit(0x72)
 84063              m.mrsd(6, addr(v[1]), 32)
 84064              m.imm1(toImmAny(v[0]))
 84065          })
 84066      }
 84067      // VPSLLD imm8, xmm, xmm{k}{z}
 84068      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84069          self.require(ISA_AVX512VL | ISA_AVX512F)
 84070          p.domain = DomainAVX
 84071          p.add(0, func(m *_Encoding, v []interface{}) {
 84072              m.emit(0x62)
 84073              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 84074              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 84075              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 84076              m.emit(0x72)
 84077              m.emit(0xf0 | lcode(v[1]))
 84078              m.imm1(toImmAny(v[0]))
 84079          })
 84080      }
 84081      // VPSLLD xmm, xmm, xmm{k}{z}
 84082      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84083          self.require(ISA_AVX512VL | ISA_AVX512F)
 84084          p.domain = DomainAVX
 84085          p.add(0, func(m *_Encoding, v []interface{}) {
 84086              m.emit(0x62)
 84087              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84088              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 84089              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 84090              m.emit(0xf2)
 84091              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84092          })
 84093      }
 84094      // VPSLLD m128, xmm, xmm{k}{z}
 84095      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84096          self.require(ISA_AVX512VL | ISA_AVX512F)
 84097          p.domain = DomainAVX
 84098          p.add(0, func(m *_Encoding, v []interface{}) {
 84099              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 84100              m.emit(0xf2)
 84101              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 84102          })
 84103      }
 84104      // VPSLLD imm8, ymm, ymm{k}{z}
 84105      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84106          self.require(ISA_AVX512VL | ISA_AVX512F)
 84107          p.domain = DomainAVX
 84108          p.add(0, func(m *_Encoding, v []interface{}) {
 84109              m.emit(0x62)
 84110              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 84111              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 84112              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 84113              m.emit(0x72)
 84114              m.emit(0xf0 | lcode(v[1]))
 84115              m.imm1(toImmAny(v[0]))
 84116          })
 84117      }
 84118      // VPSLLD xmm, ymm, ymm{k}{z}
 84119      if isEVEXXMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84120          self.require(ISA_AVX512VL | ISA_AVX512F)
 84121          p.domain = DomainAVX
 84122          p.add(0, func(m *_Encoding, v []interface{}) {
 84123              m.emit(0x62)
 84124              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84125              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 84126              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 84127              m.emit(0xf2)
 84128              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84129          })
 84130      }
 84131      // VPSLLD m128, ymm, ymm{k}{z}
 84132      if isM128(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84133          self.require(ISA_AVX512VL | ISA_AVX512F)
 84134          p.domain = DomainAVX
 84135          p.add(0, func(m *_Encoding, v []interface{}) {
 84136              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 84137              m.emit(0xf2)
 84138              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 84139          })
 84140      }
 84141      if p.len == 0 {
 84142          panic("invalid operands for VPSLLD")
 84143      }
 84144      return p
 84145  }
 84146  
 84147  // VPSLLDQ performs "Shift Packed Double Quadword Left Logical".
 84148  //
 84149  // Mnemonic        : VPSLLDQ
 84150  // Supported forms : (8 forms)
 84151  //
 84152  //    * VPSLLDQ imm8, xmm, xmm     [AVX]
 84153  //    * VPSLLDQ imm8, ymm, ymm     [AVX2]
 84154  //    * VPSLLDQ imm8, zmm, zmm     [AVX512BW]
 84155  //    * VPSLLDQ imm8, m512, zmm    [AVX512BW]
 84156  //    * VPSLLDQ imm8, xmm, xmm     [AVX512BW,AVX512VL]
 84157  //    * VPSLLDQ imm8, m128, xmm    [AVX512BW,AVX512VL]
 84158  //    * VPSLLDQ imm8, ymm, ymm     [AVX512BW,AVX512VL]
 84159  //    * VPSLLDQ imm8, m256, ymm    [AVX512BW,AVX512VL]
 84160  //
 84161  func (self *Program) VPSLLDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 84162      p := self.alloc("VPSLLDQ", 3, Operands { v0, v1, v2 })
 84163      // VPSLLDQ imm8, xmm, xmm
 84164      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 84165          self.require(ISA_AVX)
 84166          p.domain = DomainAVX
 84167          p.add(0, func(m *_Encoding, v []interface{}) {
 84168              m.vex2(1, 0, v[1], hlcode(v[2]))
 84169              m.emit(0x73)
 84170              m.emit(0xf8 | lcode(v[1]))
 84171              m.imm1(toImmAny(v[0]))
 84172          })
 84173      }
 84174      // VPSLLDQ imm8, ymm, ymm
 84175      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 84176          self.require(ISA_AVX2)
 84177          p.domain = DomainAVX
 84178          p.add(0, func(m *_Encoding, v []interface{}) {
 84179              m.vex2(5, 0, v[1], hlcode(v[2]))
 84180              m.emit(0x73)
 84181              m.emit(0xf8 | lcode(v[1]))
 84182              m.imm1(toImmAny(v[0]))
 84183          })
 84184      }
 84185      // VPSLLDQ imm8, zmm, zmm
 84186      if isImm8(v0) && isZMM(v1) && isZMM(v2) {
 84187          self.require(ISA_AVX512BW)
 84188          p.domain = DomainAVX
 84189          p.add(0, func(m *_Encoding, v []interface{}) {
 84190              m.emit(0x62)
 84191              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 84192              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 84193              m.emit((0x08 ^ (ecode(v[2]) << 3)) | 0x40)
 84194              m.emit(0x73)
 84195              m.emit(0xf8 | lcode(v[1]))
 84196              m.imm1(toImmAny(v[0]))
 84197          })
 84198      }
 84199      // VPSLLDQ imm8, m512, zmm
 84200      if isImm8(v0) && isM512(v1) && isZMM(v2) {
 84201          self.require(ISA_AVX512BW)
 84202          p.domain = DomainAVX
 84203          p.add(0, func(m *_Encoding, v []interface{}) {
 84204              m.evex(0b01, 0x05, 0b10, 0, addr(v[1]), vcode(v[2]), 0, 0, 0)
 84205              m.emit(0x73)
 84206              m.mrsd(7, addr(v[1]), 64)
 84207              m.imm1(toImmAny(v[0]))
 84208          })
 84209      }
 84210      // VPSLLDQ imm8, xmm, xmm
 84211      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 84212          self.require(ISA_AVX512VL | ISA_AVX512BW)
 84213          p.domain = DomainAVX
 84214          p.add(0, func(m *_Encoding, v []interface{}) {
 84215              m.emit(0x62)
 84216              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 84217              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 84218              m.emit((0x08 ^ (ecode(v[2]) << 3)) | 0x00)
 84219              m.emit(0x73)
 84220              m.emit(0xf8 | lcode(v[1]))
 84221              m.imm1(toImmAny(v[0]))
 84222          })
 84223      }
 84224      // VPSLLDQ imm8, m128, xmm
 84225      if isImm8(v0) && isM128(v1) && isEVEXXMM(v2) {
 84226          self.require(ISA_AVX512VL | ISA_AVX512BW)
 84227          p.domain = DomainAVX
 84228          p.add(0, func(m *_Encoding, v []interface{}) {
 84229              m.evex(0b01, 0x05, 0b00, 0, addr(v[1]), vcode(v[2]), 0, 0, 0)
 84230              m.emit(0x73)
 84231              m.mrsd(7, addr(v[1]), 16)
 84232              m.imm1(toImmAny(v[0]))
 84233          })
 84234      }
 84235      // VPSLLDQ imm8, ymm, ymm
 84236      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) {
 84237          self.require(ISA_AVX512VL | ISA_AVX512BW)
 84238          p.domain = DomainAVX
 84239          p.add(0, func(m *_Encoding, v []interface{}) {
 84240              m.emit(0x62)
 84241              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 84242              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 84243              m.emit((0x08 ^ (ecode(v[2]) << 3)) | 0x20)
 84244              m.emit(0x73)
 84245              m.emit(0xf8 | lcode(v[1]))
 84246              m.imm1(toImmAny(v[0]))
 84247          })
 84248      }
 84249      // VPSLLDQ imm8, m256, ymm
 84250      if isImm8(v0) && isM256(v1) && isEVEXYMM(v2) {
 84251          self.require(ISA_AVX512VL | ISA_AVX512BW)
 84252          p.domain = DomainAVX
 84253          p.add(0, func(m *_Encoding, v []interface{}) {
 84254              m.evex(0b01, 0x05, 0b01, 0, addr(v[1]), vcode(v[2]), 0, 0, 0)
 84255              m.emit(0x73)
 84256              m.mrsd(7, addr(v[1]), 32)
 84257              m.imm1(toImmAny(v[0]))
 84258          })
 84259      }
 84260      if p.len == 0 {
 84261          panic("invalid operands for VPSLLDQ")
 84262      }
 84263      return p
 84264  }
 84265  
 84266  // VPSLLQ performs "Shift Packed Quadword Data Left Logical".
 84267  //
 84268  // Mnemonic        : VPSLLQ
 84269  // Supported forms : (18 forms)
 84270  //
 84271  //    * VPSLLQ imm8, xmm, xmm                   [AVX]
 84272  //    * VPSLLQ xmm, xmm, xmm                    [AVX]
 84273  //    * VPSLLQ m128, xmm, xmm                   [AVX]
 84274  //    * VPSLLQ imm8, ymm, ymm                   [AVX2]
 84275  //    * VPSLLQ xmm, ymm, ymm                    [AVX2]
 84276  //    * VPSLLQ m128, ymm, ymm                   [AVX2]
 84277  //    * VPSLLQ imm8, m512/m64bcst, zmm{k}{z}    [AVX512F]
 84278  //    * VPSLLQ imm8, zmm, zmm{k}{z}             [AVX512F]
 84279  //    * VPSLLQ xmm, zmm, zmm{k}{z}              [AVX512F]
 84280  //    * VPSLLQ m128, zmm, zmm{k}{z}             [AVX512F]
 84281  //    * VPSLLQ imm8, m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 84282  //    * VPSLLQ imm8, m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 84283  //    * VPSLLQ imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 84284  //    * VPSLLQ xmm, xmm, xmm{k}{z}              [AVX512F,AVX512VL]
 84285  //    * VPSLLQ m128, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 84286  //    * VPSLLQ imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 84287  //    * VPSLLQ xmm, ymm, ymm{k}{z}              [AVX512F,AVX512VL]
 84288  //    * VPSLLQ m128, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 84289  //
 84290  func (self *Program) VPSLLQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 84291      p := self.alloc("VPSLLQ", 3, Operands { v0, v1, v2 })
 84292      // VPSLLQ imm8, xmm, xmm
 84293      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 84294          self.require(ISA_AVX)
 84295          p.domain = DomainAVX
 84296          p.add(0, func(m *_Encoding, v []interface{}) {
 84297              m.vex2(1, 0, v[1], hlcode(v[2]))
 84298              m.emit(0x73)
 84299              m.emit(0xf0 | lcode(v[1]))
 84300              m.imm1(toImmAny(v[0]))
 84301          })
 84302      }
 84303      // VPSLLQ xmm, xmm, xmm
 84304      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 84305          self.require(ISA_AVX)
 84306          p.domain = DomainAVX
 84307          p.add(0, func(m *_Encoding, v []interface{}) {
 84308              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 84309              m.emit(0xf3)
 84310              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84311          })
 84312      }
 84313      // VPSLLQ m128, xmm, xmm
 84314      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 84315          self.require(ISA_AVX)
 84316          p.domain = DomainAVX
 84317          p.add(0, func(m *_Encoding, v []interface{}) {
 84318              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 84319              m.emit(0xf3)
 84320              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 84321          })
 84322      }
 84323      // VPSLLQ imm8, ymm, ymm
 84324      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 84325          self.require(ISA_AVX2)
 84326          p.domain = DomainAVX
 84327          p.add(0, func(m *_Encoding, v []interface{}) {
 84328              m.vex2(5, 0, v[1], hlcode(v[2]))
 84329              m.emit(0x73)
 84330              m.emit(0xf0 | lcode(v[1]))
 84331              m.imm1(toImmAny(v[0]))
 84332          })
 84333      }
 84334      // VPSLLQ xmm, ymm, ymm
 84335      if isXMM(v0) && isYMM(v1) && isYMM(v2) {
 84336          self.require(ISA_AVX2)
 84337          p.domain = DomainAVX
 84338          p.add(0, func(m *_Encoding, v []interface{}) {
 84339              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 84340              m.emit(0xf3)
 84341              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84342          })
 84343      }
 84344      // VPSLLQ m128, ymm, ymm
 84345      if isM128(v0) && isYMM(v1) && isYMM(v2) {
 84346          self.require(ISA_AVX2)
 84347          p.domain = DomainAVX
 84348          p.add(0, func(m *_Encoding, v []interface{}) {
 84349              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 84350              m.emit(0xf3)
 84351              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 84352          })
 84353      }
 84354      // VPSLLQ imm8, m512/m64bcst, zmm{k}{z}
 84355      if isImm8(v0) && isM512M64bcst(v1) && isZMMkz(v2) {
 84356          self.require(ISA_AVX512F)
 84357          p.domain = DomainAVX
 84358          p.add(0, func(m *_Encoding, v []interface{}) {
 84359              m.evex(0b01, 0x85, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 84360              m.emit(0x73)
 84361              m.mrsd(6, addr(v[1]), 64)
 84362              m.imm1(toImmAny(v[0]))
 84363          })
 84364      }
 84365      // VPSLLQ imm8, zmm, zmm{k}{z}
 84366      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 84367          self.require(ISA_AVX512F)
 84368          p.domain = DomainAVX
 84369          p.add(0, func(m *_Encoding, v []interface{}) {
 84370              m.emit(0x62)
 84371              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 84372              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 84373              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 84374              m.emit(0x73)
 84375              m.emit(0xf0 | lcode(v[1]))
 84376              m.imm1(toImmAny(v[0]))
 84377          })
 84378      }
 84379      // VPSLLQ xmm, zmm, zmm{k}{z}
 84380      if isEVEXXMM(v0) && isZMM(v1) && isZMMkz(v2) {
 84381          self.require(ISA_AVX512F)
 84382          p.domain = DomainAVX
 84383          p.add(0, func(m *_Encoding, v []interface{}) {
 84384              m.emit(0x62)
 84385              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84386              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 84387              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 84388              m.emit(0xf3)
 84389              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84390          })
 84391      }
 84392      // VPSLLQ m128, zmm, zmm{k}{z}
 84393      if isM128(v0) && isZMM(v1) && isZMMkz(v2) {
 84394          self.require(ISA_AVX512F)
 84395          p.domain = DomainAVX
 84396          p.add(0, func(m *_Encoding, v []interface{}) {
 84397              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 84398              m.emit(0xf3)
 84399              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 84400          })
 84401      }
 84402      // VPSLLQ imm8, m128/m64bcst, xmm{k}{z}
 84403      if isImm8(v0) && isM128M64bcst(v1) && isXMMkz(v2) {
 84404          self.require(ISA_AVX512VL | ISA_AVX512F)
 84405          p.domain = DomainAVX
 84406          p.add(0, func(m *_Encoding, v []interface{}) {
 84407              m.evex(0b01, 0x85, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 84408              m.emit(0x73)
 84409              m.mrsd(6, addr(v[1]), 16)
 84410              m.imm1(toImmAny(v[0]))
 84411          })
 84412      }
 84413      // VPSLLQ imm8, m256/m64bcst, ymm{k}{z}
 84414      if isImm8(v0) && isM256M64bcst(v1) && isYMMkz(v2) {
 84415          self.require(ISA_AVX512VL | ISA_AVX512F)
 84416          p.domain = DomainAVX
 84417          p.add(0, func(m *_Encoding, v []interface{}) {
 84418              m.evex(0b01, 0x85, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 84419              m.emit(0x73)
 84420              m.mrsd(6, addr(v[1]), 32)
 84421              m.imm1(toImmAny(v[0]))
 84422          })
 84423      }
 84424      // VPSLLQ imm8, xmm, xmm{k}{z}
 84425      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84426          self.require(ISA_AVX512VL | ISA_AVX512F)
 84427          p.domain = DomainAVX
 84428          p.add(0, func(m *_Encoding, v []interface{}) {
 84429              m.emit(0x62)
 84430              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 84431              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 84432              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 84433              m.emit(0x73)
 84434              m.emit(0xf0 | lcode(v[1]))
 84435              m.imm1(toImmAny(v[0]))
 84436          })
 84437      }
 84438      // VPSLLQ xmm, xmm, xmm{k}{z}
 84439      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84440          self.require(ISA_AVX512VL | ISA_AVX512F)
 84441          p.domain = DomainAVX
 84442          p.add(0, func(m *_Encoding, v []interface{}) {
 84443              m.emit(0x62)
 84444              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84445              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 84446              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 84447              m.emit(0xf3)
 84448              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84449          })
 84450      }
 84451      // VPSLLQ m128, xmm, xmm{k}{z}
 84452      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84453          self.require(ISA_AVX512VL | ISA_AVX512F)
 84454          p.domain = DomainAVX
 84455          p.add(0, func(m *_Encoding, v []interface{}) {
 84456              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 84457              m.emit(0xf3)
 84458              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 84459          })
 84460      }
 84461      // VPSLLQ imm8, ymm, ymm{k}{z}
 84462      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84463          self.require(ISA_AVX512VL | ISA_AVX512F)
 84464          p.domain = DomainAVX
 84465          p.add(0, func(m *_Encoding, v []interface{}) {
 84466              m.emit(0x62)
 84467              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 84468              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 84469              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 84470              m.emit(0x73)
 84471              m.emit(0xf0 | lcode(v[1]))
 84472              m.imm1(toImmAny(v[0]))
 84473          })
 84474      }
 84475      // VPSLLQ xmm, ymm, ymm{k}{z}
 84476      if isEVEXXMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84477          self.require(ISA_AVX512VL | ISA_AVX512F)
 84478          p.domain = DomainAVX
 84479          p.add(0, func(m *_Encoding, v []interface{}) {
 84480              m.emit(0x62)
 84481              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84482              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 84483              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 84484              m.emit(0xf3)
 84485              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84486          })
 84487      }
 84488      // VPSLLQ m128, ymm, ymm{k}{z}
 84489      if isM128(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84490          self.require(ISA_AVX512VL | ISA_AVX512F)
 84491          p.domain = DomainAVX
 84492          p.add(0, func(m *_Encoding, v []interface{}) {
 84493              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 84494              m.emit(0xf3)
 84495              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 84496          })
 84497      }
 84498      if p.len == 0 {
 84499          panic("invalid operands for VPSLLQ")
 84500      }
 84501      return p
 84502  }
 84503  
 84504  // VPSLLVD performs "Variable Shift Packed Doubleword Data Left Logical".
 84505  //
 84506  // Mnemonic        : VPSLLVD
 84507  // Supported forms : (10 forms)
 84508  //
 84509  //    * VPSLLVD xmm, xmm, xmm                   [AVX2]
 84510  //    * VPSLLVD m128, xmm, xmm                  [AVX2]
 84511  //    * VPSLLVD ymm, ymm, ymm                   [AVX2]
 84512  //    * VPSLLVD m256, ymm, ymm                  [AVX2]
 84513  //    * VPSLLVD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 84514  //    * VPSLLVD zmm, zmm, zmm{k}{z}             [AVX512F]
 84515  //    * VPSLLVD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 84516  //    * VPSLLVD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 84517  //    * VPSLLVD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 84518  //    * VPSLLVD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 84519  //
 84520  func (self *Program) VPSLLVD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 84521      p := self.alloc("VPSLLVD", 3, Operands { v0, v1, v2 })
 84522      // VPSLLVD xmm, xmm, xmm
 84523      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 84524          self.require(ISA_AVX2)
 84525          p.domain = DomainAVX
 84526          p.add(0, func(m *_Encoding, v []interface{}) {
 84527              m.emit(0xc4)
 84528              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 84529              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 84530              m.emit(0x47)
 84531              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84532          })
 84533      }
 84534      // VPSLLVD m128, xmm, xmm
 84535      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 84536          self.require(ISA_AVX2)
 84537          p.domain = DomainAVX
 84538          p.add(0, func(m *_Encoding, v []interface{}) {
 84539              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 84540              m.emit(0x47)
 84541              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 84542          })
 84543      }
 84544      // VPSLLVD ymm, ymm, ymm
 84545      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 84546          self.require(ISA_AVX2)
 84547          p.domain = DomainAVX
 84548          p.add(0, func(m *_Encoding, v []interface{}) {
 84549              m.emit(0xc4)
 84550              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 84551              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 84552              m.emit(0x47)
 84553              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84554          })
 84555      }
 84556      // VPSLLVD m256, ymm, ymm
 84557      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 84558          self.require(ISA_AVX2)
 84559          p.domain = DomainAVX
 84560          p.add(0, func(m *_Encoding, v []interface{}) {
 84561              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 84562              m.emit(0x47)
 84563              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 84564          })
 84565      }
 84566      // VPSLLVD m512/m32bcst, zmm, zmm{k}{z}
 84567      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 84568          self.require(ISA_AVX512F)
 84569          p.domain = DomainAVX
 84570          p.add(0, func(m *_Encoding, v []interface{}) {
 84571              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 84572              m.emit(0x47)
 84573              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 84574          })
 84575      }
 84576      // VPSLLVD zmm, zmm, zmm{k}{z}
 84577      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 84578          self.require(ISA_AVX512F)
 84579          p.domain = DomainAVX
 84580          p.add(0, func(m *_Encoding, v []interface{}) {
 84581              m.emit(0x62)
 84582              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84583              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 84584              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 84585              m.emit(0x47)
 84586              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84587          })
 84588      }
 84589      // VPSLLVD m128/m32bcst, xmm, xmm{k}{z}
 84590      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84591          self.require(ISA_AVX512VL | ISA_AVX512F)
 84592          p.domain = DomainAVX
 84593          p.add(0, func(m *_Encoding, v []interface{}) {
 84594              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 84595              m.emit(0x47)
 84596              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 84597          })
 84598      }
 84599      // VPSLLVD xmm, xmm, xmm{k}{z}
 84600      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84601          self.require(ISA_AVX512VL | ISA_AVX512F)
 84602          p.domain = DomainAVX
 84603          p.add(0, func(m *_Encoding, v []interface{}) {
 84604              m.emit(0x62)
 84605              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84606              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 84607              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 84608              m.emit(0x47)
 84609              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84610          })
 84611      }
 84612      // VPSLLVD m256/m32bcst, ymm, ymm{k}{z}
 84613      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84614          self.require(ISA_AVX512VL | ISA_AVX512F)
 84615          p.domain = DomainAVX
 84616          p.add(0, func(m *_Encoding, v []interface{}) {
 84617              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 84618              m.emit(0x47)
 84619              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 84620          })
 84621      }
 84622      // VPSLLVD ymm, ymm, ymm{k}{z}
 84623      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84624          self.require(ISA_AVX512VL | ISA_AVX512F)
 84625          p.domain = DomainAVX
 84626          p.add(0, func(m *_Encoding, v []interface{}) {
 84627              m.emit(0x62)
 84628              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84629              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 84630              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 84631              m.emit(0x47)
 84632              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84633          })
 84634      }
 84635      if p.len == 0 {
 84636          panic("invalid operands for VPSLLVD")
 84637      }
 84638      return p
 84639  }
 84640  
 84641  // VPSLLVQ performs "Variable Shift Packed Quadword Data Left Logical".
 84642  //
 84643  // Mnemonic        : VPSLLVQ
 84644  // Supported forms : (10 forms)
 84645  //
 84646  //    * VPSLLVQ xmm, xmm, xmm                   [AVX2]
 84647  //    * VPSLLVQ m128, xmm, xmm                  [AVX2]
 84648  //    * VPSLLVQ ymm, ymm, ymm                   [AVX2]
 84649  //    * VPSLLVQ m256, ymm, ymm                  [AVX2]
 84650  //    * VPSLLVQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 84651  //    * VPSLLVQ zmm, zmm, zmm{k}{z}             [AVX512F]
 84652  //    * VPSLLVQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 84653  //    * VPSLLVQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 84654  //    * VPSLLVQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 84655  //    * VPSLLVQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 84656  //
 84657  func (self *Program) VPSLLVQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 84658      p := self.alloc("VPSLLVQ", 3, Operands { v0, v1, v2 })
 84659      // VPSLLVQ xmm, xmm, xmm
 84660      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 84661          self.require(ISA_AVX2)
 84662          p.domain = DomainAVX
 84663          p.add(0, func(m *_Encoding, v []interface{}) {
 84664              m.emit(0xc4)
 84665              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 84666              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 84667              m.emit(0x47)
 84668              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84669          })
 84670      }
 84671      // VPSLLVQ m128, xmm, xmm
 84672      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 84673          self.require(ISA_AVX2)
 84674          p.domain = DomainAVX
 84675          p.add(0, func(m *_Encoding, v []interface{}) {
 84676              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 84677              m.emit(0x47)
 84678              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 84679          })
 84680      }
 84681      // VPSLLVQ ymm, ymm, ymm
 84682      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 84683          self.require(ISA_AVX2)
 84684          p.domain = DomainAVX
 84685          p.add(0, func(m *_Encoding, v []interface{}) {
 84686              m.emit(0xc4)
 84687              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 84688              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 84689              m.emit(0x47)
 84690              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84691          })
 84692      }
 84693      // VPSLLVQ m256, ymm, ymm
 84694      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 84695          self.require(ISA_AVX2)
 84696          p.domain = DomainAVX
 84697          p.add(0, func(m *_Encoding, v []interface{}) {
 84698              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 84699              m.emit(0x47)
 84700              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 84701          })
 84702      }
 84703      // VPSLLVQ m512/m64bcst, zmm, zmm{k}{z}
 84704      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 84705          self.require(ISA_AVX512F)
 84706          p.domain = DomainAVX
 84707          p.add(0, func(m *_Encoding, v []interface{}) {
 84708              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 84709              m.emit(0x47)
 84710              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 84711          })
 84712      }
 84713      // VPSLLVQ zmm, zmm, zmm{k}{z}
 84714      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 84715          self.require(ISA_AVX512F)
 84716          p.domain = DomainAVX
 84717          p.add(0, func(m *_Encoding, v []interface{}) {
 84718              m.emit(0x62)
 84719              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84720              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 84721              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 84722              m.emit(0x47)
 84723              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84724          })
 84725      }
 84726      // VPSLLVQ m128/m64bcst, xmm, xmm{k}{z}
 84727      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84728          self.require(ISA_AVX512VL | ISA_AVX512F)
 84729          p.domain = DomainAVX
 84730          p.add(0, func(m *_Encoding, v []interface{}) {
 84731              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 84732              m.emit(0x47)
 84733              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 84734          })
 84735      }
 84736      // VPSLLVQ xmm, xmm, xmm{k}{z}
 84737      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84738          self.require(ISA_AVX512VL | ISA_AVX512F)
 84739          p.domain = DomainAVX
 84740          p.add(0, func(m *_Encoding, v []interface{}) {
 84741              m.emit(0x62)
 84742              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84743              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 84744              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 84745              m.emit(0x47)
 84746              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84747          })
 84748      }
 84749      // VPSLLVQ m256/m64bcst, ymm, ymm{k}{z}
 84750      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84751          self.require(ISA_AVX512VL | ISA_AVX512F)
 84752          p.domain = DomainAVX
 84753          p.add(0, func(m *_Encoding, v []interface{}) {
 84754              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 84755              m.emit(0x47)
 84756              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 84757          })
 84758      }
 84759      // VPSLLVQ ymm, ymm, ymm{k}{z}
 84760      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84761          self.require(ISA_AVX512VL | ISA_AVX512F)
 84762          p.domain = DomainAVX
 84763          p.add(0, func(m *_Encoding, v []interface{}) {
 84764              m.emit(0x62)
 84765              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84766              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 84767              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 84768              m.emit(0x47)
 84769              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84770          })
 84771      }
 84772      if p.len == 0 {
 84773          panic("invalid operands for VPSLLVQ")
 84774      }
 84775      return p
 84776  }
 84777  
 84778  // VPSLLVW performs "Variable Shift Packed Word Data Left Logical".
 84779  //
 84780  // Mnemonic        : VPSLLVW
 84781  // Supported forms : (6 forms)
 84782  //
 84783  //    * VPSLLVW zmm, zmm, zmm{k}{z}     [AVX512BW]
 84784  //    * VPSLLVW m512, zmm, zmm{k}{z}    [AVX512BW]
 84785  //    * VPSLLVW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 84786  //    * VPSLLVW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 84787  //    * VPSLLVW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 84788  //    * VPSLLVW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 84789  //
 84790  func (self *Program) VPSLLVW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 84791      p := self.alloc("VPSLLVW", 3, Operands { v0, v1, v2 })
 84792      // VPSLLVW zmm, zmm, zmm{k}{z}
 84793      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 84794          self.require(ISA_AVX512BW)
 84795          p.domain = DomainAVX
 84796          p.add(0, func(m *_Encoding, v []interface{}) {
 84797              m.emit(0x62)
 84798              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84799              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 84800              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 84801              m.emit(0x12)
 84802              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84803          })
 84804      }
 84805      // VPSLLVW m512, zmm, zmm{k}{z}
 84806      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 84807          self.require(ISA_AVX512BW)
 84808          p.domain = DomainAVX
 84809          p.add(0, func(m *_Encoding, v []interface{}) {
 84810              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 84811              m.emit(0x12)
 84812              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 84813          })
 84814      }
 84815      // VPSLLVW xmm, xmm, xmm{k}{z}
 84816      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84817          self.require(ISA_AVX512VL | ISA_AVX512BW)
 84818          p.domain = DomainAVX
 84819          p.add(0, func(m *_Encoding, v []interface{}) {
 84820              m.emit(0x62)
 84821              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84822              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 84823              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 84824              m.emit(0x12)
 84825              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84826          })
 84827      }
 84828      // VPSLLVW m128, xmm, xmm{k}{z}
 84829      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 84830          self.require(ISA_AVX512VL | ISA_AVX512BW)
 84831          p.domain = DomainAVX
 84832          p.add(0, func(m *_Encoding, v []interface{}) {
 84833              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 84834              m.emit(0x12)
 84835              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 84836          })
 84837      }
 84838      // VPSLLVW ymm, ymm, ymm{k}{z}
 84839      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84840          self.require(ISA_AVX512VL | ISA_AVX512BW)
 84841          p.domain = DomainAVX
 84842          p.add(0, func(m *_Encoding, v []interface{}) {
 84843              m.emit(0x62)
 84844              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84845              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 84846              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 84847              m.emit(0x12)
 84848              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84849          })
 84850      }
 84851      // VPSLLVW m256, ymm, ymm{k}{z}
 84852      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 84853          self.require(ISA_AVX512VL | ISA_AVX512BW)
 84854          p.domain = DomainAVX
 84855          p.add(0, func(m *_Encoding, v []interface{}) {
 84856              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 84857              m.emit(0x12)
 84858              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 84859          })
 84860      }
 84861      if p.len == 0 {
 84862          panic("invalid operands for VPSLLVW")
 84863      }
 84864      return p
 84865  }
 84866  
 84867  // VPSLLW performs "Shift Packed Word Data Left Logical".
 84868  //
 84869  // Mnemonic        : VPSLLW
 84870  // Supported forms : (18 forms)
 84871  //
 84872  //    * VPSLLW imm8, xmm, xmm           [AVX]
 84873  //    * VPSLLW xmm, xmm, xmm            [AVX]
 84874  //    * VPSLLW m128, xmm, xmm           [AVX]
 84875  //    * VPSLLW imm8, ymm, ymm           [AVX2]
 84876  //    * VPSLLW xmm, ymm, ymm            [AVX2]
 84877  //    * VPSLLW m128, ymm, ymm           [AVX2]
 84878  //    * VPSLLW imm8, zmm, zmm{k}{z}     [AVX512BW]
 84879  //    * VPSLLW xmm, zmm, zmm{k}{z}      [AVX512BW]
 84880  //    * VPSLLW m128, zmm, zmm{k}{z}     [AVX512BW]
 84881  //    * VPSLLW imm8, m512, zmm{k}{z}    [AVX512BW]
 84882  //    * VPSLLW imm8, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 84883  //    * VPSLLW xmm, xmm, xmm{k}{z}      [AVX512BW,AVX512VL]
 84884  //    * VPSLLW m128, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 84885  //    * VPSLLW imm8, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 84886  //    * VPSLLW xmm, ymm, ymm{k}{z}      [AVX512BW,AVX512VL]
 84887  //    * VPSLLW m128, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 84888  //    * VPSLLW imm8, m128, xmm{k}{z}    [AVX512BW,AVX512VL]
 84889  //    * VPSLLW imm8, m256, ymm{k}{z}    [AVX512BW,AVX512VL]
 84890  //
 84891  func (self *Program) VPSLLW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 84892      p := self.alloc("VPSLLW", 3, Operands { v0, v1, v2 })
 84893      // VPSLLW imm8, xmm, xmm
 84894      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 84895          self.require(ISA_AVX)
 84896          p.domain = DomainAVX
 84897          p.add(0, func(m *_Encoding, v []interface{}) {
 84898              m.vex2(1, 0, v[1], hlcode(v[2]))
 84899              m.emit(0x71)
 84900              m.emit(0xf0 | lcode(v[1]))
 84901              m.imm1(toImmAny(v[0]))
 84902          })
 84903      }
 84904      // VPSLLW xmm, xmm, xmm
 84905      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 84906          self.require(ISA_AVX)
 84907          p.domain = DomainAVX
 84908          p.add(0, func(m *_Encoding, v []interface{}) {
 84909              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 84910              m.emit(0xf1)
 84911              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84912          })
 84913      }
 84914      // VPSLLW m128, xmm, xmm
 84915      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 84916          self.require(ISA_AVX)
 84917          p.domain = DomainAVX
 84918          p.add(0, func(m *_Encoding, v []interface{}) {
 84919              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 84920              m.emit(0xf1)
 84921              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 84922          })
 84923      }
 84924      // VPSLLW imm8, ymm, ymm
 84925      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 84926          self.require(ISA_AVX2)
 84927          p.domain = DomainAVX
 84928          p.add(0, func(m *_Encoding, v []interface{}) {
 84929              m.vex2(5, 0, v[1], hlcode(v[2]))
 84930              m.emit(0x71)
 84931              m.emit(0xf0 | lcode(v[1]))
 84932              m.imm1(toImmAny(v[0]))
 84933          })
 84934      }
 84935      // VPSLLW xmm, ymm, ymm
 84936      if isXMM(v0) && isYMM(v1) && isYMM(v2) {
 84937          self.require(ISA_AVX2)
 84938          p.domain = DomainAVX
 84939          p.add(0, func(m *_Encoding, v []interface{}) {
 84940              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 84941              m.emit(0xf1)
 84942              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84943          })
 84944      }
 84945      // VPSLLW m128, ymm, ymm
 84946      if isM128(v0) && isYMM(v1) && isYMM(v2) {
 84947          self.require(ISA_AVX2)
 84948          p.domain = DomainAVX
 84949          p.add(0, func(m *_Encoding, v []interface{}) {
 84950              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 84951              m.emit(0xf1)
 84952              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 84953          })
 84954      }
 84955      // VPSLLW imm8, zmm, zmm{k}{z}
 84956      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 84957          self.require(ISA_AVX512BW)
 84958          p.domain = DomainAVX
 84959          p.add(0, func(m *_Encoding, v []interface{}) {
 84960              m.emit(0x62)
 84961              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 84962              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 84963              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 84964              m.emit(0x71)
 84965              m.emit(0xf0 | lcode(v[1]))
 84966              m.imm1(toImmAny(v[0]))
 84967          })
 84968      }
 84969      // VPSLLW xmm, zmm, zmm{k}{z}
 84970      if isEVEXXMM(v0) && isZMM(v1) && isZMMkz(v2) {
 84971          self.require(ISA_AVX512BW)
 84972          p.domain = DomainAVX
 84973          p.add(0, func(m *_Encoding, v []interface{}) {
 84974              m.emit(0x62)
 84975              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 84976              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 84977              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 84978              m.emit(0xf1)
 84979              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 84980          })
 84981      }
 84982      // VPSLLW m128, zmm, zmm{k}{z}
 84983      if isM128(v0) && isZMM(v1) && isZMMkz(v2) {
 84984          self.require(ISA_AVX512BW)
 84985          p.domain = DomainAVX
 84986          p.add(0, func(m *_Encoding, v []interface{}) {
 84987              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 84988              m.emit(0xf1)
 84989              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 84990          })
 84991      }
 84992      // VPSLLW imm8, m512, zmm{k}{z}
 84993      if isImm8(v0) && isM512(v1) && isZMMkz(v2) {
 84994          self.require(ISA_AVX512BW)
 84995          p.domain = DomainAVX
 84996          p.add(0, func(m *_Encoding, v []interface{}) {
 84997              m.evex(0b01, 0x05, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), 0)
 84998              m.emit(0x71)
 84999              m.mrsd(6, addr(v[1]), 64)
 85000              m.imm1(toImmAny(v[0]))
 85001          })
 85002      }
 85003      // VPSLLW imm8, xmm, xmm{k}{z}
 85004      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85005          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85006          p.domain = DomainAVX
 85007          p.add(0, func(m *_Encoding, v []interface{}) {
 85008              m.emit(0x62)
 85009              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 85010              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 85011              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 85012              m.emit(0x71)
 85013              m.emit(0xf0 | lcode(v[1]))
 85014              m.imm1(toImmAny(v[0]))
 85015          })
 85016      }
 85017      // VPSLLW xmm, xmm, xmm{k}{z}
 85018      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85019          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85020          p.domain = DomainAVX
 85021          p.add(0, func(m *_Encoding, v []interface{}) {
 85022              m.emit(0x62)
 85023              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85024              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 85025              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 85026              m.emit(0xf1)
 85027              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85028          })
 85029      }
 85030      // VPSLLW m128, xmm, xmm{k}{z}
 85031      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85032          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85033          p.domain = DomainAVX
 85034          p.add(0, func(m *_Encoding, v []interface{}) {
 85035              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85036              m.emit(0xf1)
 85037              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85038          })
 85039      }
 85040      // VPSLLW imm8, ymm, ymm{k}{z}
 85041      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85042          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85043          p.domain = DomainAVX
 85044          p.add(0, func(m *_Encoding, v []interface{}) {
 85045              m.emit(0x62)
 85046              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 85047              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 85048              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 85049              m.emit(0x71)
 85050              m.emit(0xf0 | lcode(v[1]))
 85051              m.imm1(toImmAny(v[0]))
 85052          })
 85053      }
 85054      // VPSLLW xmm, ymm, ymm{k}{z}
 85055      if isEVEXXMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85056          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85057          p.domain = DomainAVX
 85058          p.add(0, func(m *_Encoding, v []interface{}) {
 85059              m.emit(0x62)
 85060              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85061              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 85062              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 85063              m.emit(0xf1)
 85064              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85065          })
 85066      }
 85067      // VPSLLW m128, ymm, ymm{k}{z}
 85068      if isM128(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85069          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85070          p.domain = DomainAVX
 85071          p.add(0, func(m *_Encoding, v []interface{}) {
 85072              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85073              m.emit(0xf1)
 85074              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85075          })
 85076      }
 85077      // VPSLLW imm8, m128, xmm{k}{z}
 85078      if isImm8(v0) && isM128(v1) && isXMMkz(v2) {
 85079          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85080          p.domain = DomainAVX
 85081          p.add(0, func(m *_Encoding, v []interface{}) {
 85082              m.evex(0b01, 0x05, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), 0)
 85083              m.emit(0x71)
 85084              m.mrsd(6, addr(v[1]), 16)
 85085              m.imm1(toImmAny(v[0]))
 85086          })
 85087      }
 85088      // VPSLLW imm8, m256, ymm{k}{z}
 85089      if isImm8(v0) && isM256(v1) && isYMMkz(v2) {
 85090          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85091          p.domain = DomainAVX
 85092          p.add(0, func(m *_Encoding, v []interface{}) {
 85093              m.evex(0b01, 0x05, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), 0)
 85094              m.emit(0x71)
 85095              m.mrsd(6, addr(v[1]), 32)
 85096              m.imm1(toImmAny(v[0]))
 85097          })
 85098      }
 85099      if p.len == 0 {
 85100          panic("invalid operands for VPSLLW")
 85101      }
 85102      return p
 85103  }
 85104  
 85105  // VPSRAD performs "Shift Packed Doubleword Data Right Arithmetic".
 85106  //
 85107  // Mnemonic        : VPSRAD
 85108  // Supported forms : (18 forms)
 85109  //
 85110  //    * VPSRAD imm8, xmm, xmm                   [AVX]
 85111  //    * VPSRAD xmm, xmm, xmm                    [AVX]
 85112  //    * VPSRAD m128, xmm, xmm                   [AVX]
 85113  //    * VPSRAD imm8, ymm, ymm                   [AVX2]
 85114  //    * VPSRAD xmm, ymm, ymm                    [AVX2]
 85115  //    * VPSRAD m128, ymm, ymm                   [AVX2]
 85116  //    * VPSRAD imm8, m512/m32bcst, zmm{k}{z}    [AVX512F]
 85117  //    * VPSRAD imm8, zmm, zmm{k}{z}             [AVX512F]
 85118  //    * VPSRAD xmm, zmm, zmm{k}{z}              [AVX512F]
 85119  //    * VPSRAD m128, zmm, zmm{k}{z}             [AVX512F]
 85120  //    * VPSRAD imm8, m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 85121  //    * VPSRAD imm8, m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 85122  //    * VPSRAD imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 85123  //    * VPSRAD xmm, xmm, xmm{k}{z}              [AVX512F,AVX512VL]
 85124  //    * VPSRAD m128, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 85125  //    * VPSRAD imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 85126  //    * VPSRAD xmm, ymm, ymm{k}{z}              [AVX512F,AVX512VL]
 85127  //    * VPSRAD m128, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 85128  //
 85129  func (self *Program) VPSRAD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 85130      p := self.alloc("VPSRAD", 3, Operands { v0, v1, v2 })
 85131      // VPSRAD imm8, xmm, xmm
 85132      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 85133          self.require(ISA_AVX)
 85134          p.domain = DomainAVX
 85135          p.add(0, func(m *_Encoding, v []interface{}) {
 85136              m.vex2(1, 0, v[1], hlcode(v[2]))
 85137              m.emit(0x72)
 85138              m.emit(0xe0 | lcode(v[1]))
 85139              m.imm1(toImmAny(v[0]))
 85140          })
 85141      }
 85142      // VPSRAD xmm, xmm, xmm
 85143      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 85144          self.require(ISA_AVX)
 85145          p.domain = DomainAVX
 85146          p.add(0, func(m *_Encoding, v []interface{}) {
 85147              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 85148              m.emit(0xe2)
 85149              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85150          })
 85151      }
 85152      // VPSRAD m128, xmm, xmm
 85153      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 85154          self.require(ISA_AVX)
 85155          p.domain = DomainAVX
 85156          p.add(0, func(m *_Encoding, v []interface{}) {
 85157              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 85158              m.emit(0xe2)
 85159              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 85160          })
 85161      }
 85162      // VPSRAD imm8, ymm, ymm
 85163      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 85164          self.require(ISA_AVX2)
 85165          p.domain = DomainAVX
 85166          p.add(0, func(m *_Encoding, v []interface{}) {
 85167              m.vex2(5, 0, v[1], hlcode(v[2]))
 85168              m.emit(0x72)
 85169              m.emit(0xe0 | lcode(v[1]))
 85170              m.imm1(toImmAny(v[0]))
 85171          })
 85172      }
 85173      // VPSRAD xmm, ymm, ymm
 85174      if isXMM(v0) && isYMM(v1) && isYMM(v2) {
 85175          self.require(ISA_AVX2)
 85176          p.domain = DomainAVX
 85177          p.add(0, func(m *_Encoding, v []interface{}) {
 85178              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 85179              m.emit(0xe2)
 85180              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85181          })
 85182      }
 85183      // VPSRAD m128, ymm, ymm
 85184      if isM128(v0) && isYMM(v1) && isYMM(v2) {
 85185          self.require(ISA_AVX2)
 85186          p.domain = DomainAVX
 85187          p.add(0, func(m *_Encoding, v []interface{}) {
 85188              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 85189              m.emit(0xe2)
 85190              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 85191          })
 85192      }
 85193      // VPSRAD imm8, m512/m32bcst, zmm{k}{z}
 85194      if isImm8(v0) && isM512M32bcst(v1) && isZMMkz(v2) {
 85195          self.require(ISA_AVX512F)
 85196          p.domain = DomainAVX
 85197          p.add(0, func(m *_Encoding, v []interface{}) {
 85198              m.evex(0b01, 0x05, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 85199              m.emit(0x72)
 85200              m.mrsd(4, addr(v[1]), 64)
 85201              m.imm1(toImmAny(v[0]))
 85202          })
 85203      }
 85204      // VPSRAD imm8, zmm, zmm{k}{z}
 85205      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 85206          self.require(ISA_AVX512F)
 85207          p.domain = DomainAVX
 85208          p.add(0, func(m *_Encoding, v []interface{}) {
 85209              m.emit(0x62)
 85210              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 85211              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 85212              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 85213              m.emit(0x72)
 85214              m.emit(0xe0 | lcode(v[1]))
 85215              m.imm1(toImmAny(v[0]))
 85216          })
 85217      }
 85218      // VPSRAD xmm, zmm, zmm{k}{z}
 85219      if isEVEXXMM(v0) && isZMM(v1) && isZMMkz(v2) {
 85220          self.require(ISA_AVX512F)
 85221          p.domain = DomainAVX
 85222          p.add(0, func(m *_Encoding, v []interface{}) {
 85223              m.emit(0x62)
 85224              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85225              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 85226              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 85227              m.emit(0xe2)
 85228              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85229          })
 85230      }
 85231      // VPSRAD m128, zmm, zmm{k}{z}
 85232      if isM128(v0) && isZMM(v1) && isZMMkz(v2) {
 85233          self.require(ISA_AVX512F)
 85234          p.domain = DomainAVX
 85235          p.add(0, func(m *_Encoding, v []interface{}) {
 85236              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85237              m.emit(0xe2)
 85238              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85239          })
 85240      }
 85241      // VPSRAD imm8, m128/m32bcst, xmm{k}{z}
 85242      if isImm8(v0) && isM128M32bcst(v1) && isXMMkz(v2) {
 85243          self.require(ISA_AVX512VL | ISA_AVX512F)
 85244          p.domain = DomainAVX
 85245          p.add(0, func(m *_Encoding, v []interface{}) {
 85246              m.evex(0b01, 0x05, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 85247              m.emit(0x72)
 85248              m.mrsd(4, addr(v[1]), 16)
 85249              m.imm1(toImmAny(v[0]))
 85250          })
 85251      }
 85252      // VPSRAD imm8, m256/m32bcst, ymm{k}{z}
 85253      if isImm8(v0) && isM256M32bcst(v1) && isYMMkz(v2) {
 85254          self.require(ISA_AVX512VL | ISA_AVX512F)
 85255          p.domain = DomainAVX
 85256          p.add(0, func(m *_Encoding, v []interface{}) {
 85257              m.evex(0b01, 0x05, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 85258              m.emit(0x72)
 85259              m.mrsd(4, addr(v[1]), 32)
 85260              m.imm1(toImmAny(v[0]))
 85261          })
 85262      }
 85263      // VPSRAD imm8, xmm, xmm{k}{z}
 85264      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85265          self.require(ISA_AVX512VL | ISA_AVX512F)
 85266          p.domain = DomainAVX
 85267          p.add(0, func(m *_Encoding, v []interface{}) {
 85268              m.emit(0x62)
 85269              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 85270              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 85271              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 85272              m.emit(0x72)
 85273              m.emit(0xe0 | lcode(v[1]))
 85274              m.imm1(toImmAny(v[0]))
 85275          })
 85276      }
 85277      // VPSRAD xmm, xmm, xmm{k}{z}
 85278      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85279          self.require(ISA_AVX512VL | ISA_AVX512F)
 85280          p.domain = DomainAVX
 85281          p.add(0, func(m *_Encoding, v []interface{}) {
 85282              m.emit(0x62)
 85283              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85284              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 85285              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 85286              m.emit(0xe2)
 85287              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85288          })
 85289      }
 85290      // VPSRAD m128, xmm, xmm{k}{z}
 85291      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85292          self.require(ISA_AVX512VL | ISA_AVX512F)
 85293          p.domain = DomainAVX
 85294          p.add(0, func(m *_Encoding, v []interface{}) {
 85295              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85296              m.emit(0xe2)
 85297              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85298          })
 85299      }
 85300      // VPSRAD imm8, ymm, ymm{k}{z}
 85301      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85302          self.require(ISA_AVX512VL | ISA_AVX512F)
 85303          p.domain = DomainAVX
 85304          p.add(0, func(m *_Encoding, v []interface{}) {
 85305              m.emit(0x62)
 85306              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 85307              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 85308              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 85309              m.emit(0x72)
 85310              m.emit(0xe0 | lcode(v[1]))
 85311              m.imm1(toImmAny(v[0]))
 85312          })
 85313      }
 85314      // VPSRAD xmm, ymm, ymm{k}{z}
 85315      if isEVEXXMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85316          self.require(ISA_AVX512VL | ISA_AVX512F)
 85317          p.domain = DomainAVX
 85318          p.add(0, func(m *_Encoding, v []interface{}) {
 85319              m.emit(0x62)
 85320              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85321              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 85322              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 85323              m.emit(0xe2)
 85324              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85325          })
 85326      }
 85327      // VPSRAD m128, ymm, ymm{k}{z}
 85328      if isM128(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85329          self.require(ISA_AVX512VL | ISA_AVX512F)
 85330          p.domain = DomainAVX
 85331          p.add(0, func(m *_Encoding, v []interface{}) {
 85332              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85333              m.emit(0xe2)
 85334              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85335          })
 85336      }
 85337      if p.len == 0 {
 85338          panic("invalid operands for VPSRAD")
 85339      }
 85340      return p
 85341  }
 85342  
 85343  // VPSRAQ performs "Shift Packed Quadword Data Right Arithmetic".
 85344  //
 85345  // Mnemonic        : VPSRAQ
 85346  // Supported forms : (12 forms)
 85347  //
 85348  //    * VPSRAQ imm8, m512/m64bcst, zmm{k}{z}    [AVX512F]
 85349  //    * VPSRAQ imm8, zmm, zmm{k}{z}             [AVX512F]
 85350  //    * VPSRAQ xmm, zmm, zmm{k}{z}              [AVX512F]
 85351  //    * VPSRAQ m128, zmm, zmm{k}{z}             [AVX512F]
 85352  //    * VPSRAQ imm8, m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 85353  //    * VPSRAQ imm8, m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 85354  //    * VPSRAQ imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 85355  //    * VPSRAQ xmm, xmm, xmm{k}{z}              [AVX512F,AVX512VL]
 85356  //    * VPSRAQ m128, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 85357  //    * VPSRAQ imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 85358  //    * VPSRAQ xmm, ymm, ymm{k}{z}              [AVX512F,AVX512VL]
 85359  //    * VPSRAQ m128, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 85360  //
 85361  func (self *Program) VPSRAQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 85362      p := self.alloc("VPSRAQ", 3, Operands { v0, v1, v2 })
 85363      // VPSRAQ imm8, m512/m64bcst, zmm{k}{z}
 85364      if isImm8(v0) && isM512M64bcst(v1) && isZMMkz(v2) {
 85365          self.require(ISA_AVX512F)
 85366          p.domain = DomainAVX
 85367          p.add(0, func(m *_Encoding, v []interface{}) {
 85368              m.evex(0b01, 0x85, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 85369              m.emit(0x72)
 85370              m.mrsd(4, addr(v[1]), 64)
 85371              m.imm1(toImmAny(v[0]))
 85372          })
 85373      }
 85374      // VPSRAQ imm8, zmm, zmm{k}{z}
 85375      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 85376          self.require(ISA_AVX512F)
 85377          p.domain = DomainAVX
 85378          p.add(0, func(m *_Encoding, v []interface{}) {
 85379              m.emit(0x62)
 85380              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 85381              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 85382              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 85383              m.emit(0x72)
 85384              m.emit(0xe0 | lcode(v[1]))
 85385              m.imm1(toImmAny(v[0]))
 85386          })
 85387      }
 85388      // VPSRAQ xmm, zmm, zmm{k}{z}
 85389      if isEVEXXMM(v0) && isZMM(v1) && isZMMkz(v2) {
 85390          self.require(ISA_AVX512F)
 85391          p.domain = DomainAVX
 85392          p.add(0, func(m *_Encoding, v []interface{}) {
 85393              m.emit(0x62)
 85394              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85395              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 85396              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 85397              m.emit(0xe2)
 85398              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85399          })
 85400      }
 85401      // VPSRAQ m128, zmm, zmm{k}{z}
 85402      if isM128(v0) && isZMM(v1) && isZMMkz(v2) {
 85403          self.require(ISA_AVX512F)
 85404          p.domain = DomainAVX
 85405          p.add(0, func(m *_Encoding, v []interface{}) {
 85406              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85407              m.emit(0xe2)
 85408              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85409          })
 85410      }
 85411      // VPSRAQ imm8, m128/m64bcst, xmm{k}{z}
 85412      if isImm8(v0) && isM128M64bcst(v1) && isXMMkz(v2) {
 85413          self.require(ISA_AVX512VL | ISA_AVX512F)
 85414          p.domain = DomainAVX
 85415          p.add(0, func(m *_Encoding, v []interface{}) {
 85416              m.evex(0b01, 0x85, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 85417              m.emit(0x72)
 85418              m.mrsd(4, addr(v[1]), 16)
 85419              m.imm1(toImmAny(v[0]))
 85420          })
 85421      }
 85422      // VPSRAQ imm8, m256/m64bcst, ymm{k}{z}
 85423      if isImm8(v0) && isM256M64bcst(v1) && isYMMkz(v2) {
 85424          self.require(ISA_AVX512VL | ISA_AVX512F)
 85425          p.domain = DomainAVX
 85426          p.add(0, func(m *_Encoding, v []interface{}) {
 85427              m.evex(0b01, 0x85, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 85428              m.emit(0x72)
 85429              m.mrsd(4, addr(v[1]), 32)
 85430              m.imm1(toImmAny(v[0]))
 85431          })
 85432      }
 85433      // VPSRAQ imm8, xmm, xmm{k}{z}
 85434      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85435          self.require(ISA_AVX512VL | ISA_AVX512F)
 85436          p.domain = DomainAVX
 85437          p.add(0, func(m *_Encoding, v []interface{}) {
 85438              m.emit(0x62)
 85439              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 85440              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 85441              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 85442              m.emit(0x72)
 85443              m.emit(0xe0 | lcode(v[1]))
 85444              m.imm1(toImmAny(v[0]))
 85445          })
 85446      }
 85447      // VPSRAQ xmm, xmm, xmm{k}{z}
 85448      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85449          self.require(ISA_AVX512VL | ISA_AVX512F)
 85450          p.domain = DomainAVX
 85451          p.add(0, func(m *_Encoding, v []interface{}) {
 85452              m.emit(0x62)
 85453              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85454              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 85455              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 85456              m.emit(0xe2)
 85457              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85458          })
 85459      }
 85460      // VPSRAQ m128, xmm, xmm{k}{z}
 85461      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85462          self.require(ISA_AVX512VL | ISA_AVX512F)
 85463          p.domain = DomainAVX
 85464          p.add(0, func(m *_Encoding, v []interface{}) {
 85465              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85466              m.emit(0xe2)
 85467              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85468          })
 85469      }
 85470      // VPSRAQ imm8, ymm, ymm{k}{z}
 85471      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85472          self.require(ISA_AVX512VL | ISA_AVX512F)
 85473          p.domain = DomainAVX
 85474          p.add(0, func(m *_Encoding, v []interface{}) {
 85475              m.emit(0x62)
 85476              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 85477              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 85478              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 85479              m.emit(0x72)
 85480              m.emit(0xe0 | lcode(v[1]))
 85481              m.imm1(toImmAny(v[0]))
 85482          })
 85483      }
 85484      // VPSRAQ xmm, ymm, ymm{k}{z}
 85485      if isEVEXXMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85486          self.require(ISA_AVX512VL | ISA_AVX512F)
 85487          p.domain = DomainAVX
 85488          p.add(0, func(m *_Encoding, v []interface{}) {
 85489              m.emit(0x62)
 85490              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85491              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 85492              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 85493              m.emit(0xe2)
 85494              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85495          })
 85496      }
 85497      // VPSRAQ m128, ymm, ymm{k}{z}
 85498      if isM128(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85499          self.require(ISA_AVX512VL | ISA_AVX512F)
 85500          p.domain = DomainAVX
 85501          p.add(0, func(m *_Encoding, v []interface{}) {
 85502              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85503              m.emit(0xe2)
 85504              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85505          })
 85506      }
 85507      if p.len == 0 {
 85508          panic("invalid operands for VPSRAQ")
 85509      }
 85510      return p
 85511  }
 85512  
 85513  // VPSRAVD performs "Variable Shift Packed Doubleword Data Right Arithmetic".
 85514  //
 85515  // Mnemonic        : VPSRAVD
 85516  // Supported forms : (10 forms)
 85517  //
 85518  //    * VPSRAVD xmm, xmm, xmm                   [AVX2]
 85519  //    * VPSRAVD m128, xmm, xmm                  [AVX2]
 85520  //    * VPSRAVD ymm, ymm, ymm                   [AVX2]
 85521  //    * VPSRAVD m256, ymm, ymm                  [AVX2]
 85522  //    * VPSRAVD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 85523  //    * VPSRAVD zmm, zmm, zmm{k}{z}             [AVX512F]
 85524  //    * VPSRAVD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 85525  //    * VPSRAVD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 85526  //    * VPSRAVD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 85527  //    * VPSRAVD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 85528  //
 85529  func (self *Program) VPSRAVD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 85530      p := self.alloc("VPSRAVD", 3, Operands { v0, v1, v2 })
 85531      // VPSRAVD xmm, xmm, xmm
 85532      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 85533          self.require(ISA_AVX2)
 85534          p.domain = DomainAVX
 85535          p.add(0, func(m *_Encoding, v []interface{}) {
 85536              m.emit(0xc4)
 85537              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 85538              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 85539              m.emit(0x46)
 85540              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85541          })
 85542      }
 85543      // VPSRAVD m128, xmm, xmm
 85544      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 85545          self.require(ISA_AVX2)
 85546          p.domain = DomainAVX
 85547          p.add(0, func(m *_Encoding, v []interface{}) {
 85548              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 85549              m.emit(0x46)
 85550              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 85551          })
 85552      }
 85553      // VPSRAVD ymm, ymm, ymm
 85554      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 85555          self.require(ISA_AVX2)
 85556          p.domain = DomainAVX
 85557          p.add(0, func(m *_Encoding, v []interface{}) {
 85558              m.emit(0xc4)
 85559              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 85560              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 85561              m.emit(0x46)
 85562              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85563          })
 85564      }
 85565      // VPSRAVD m256, ymm, ymm
 85566      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 85567          self.require(ISA_AVX2)
 85568          p.domain = DomainAVX
 85569          p.add(0, func(m *_Encoding, v []interface{}) {
 85570              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 85571              m.emit(0x46)
 85572              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 85573          })
 85574      }
 85575      // VPSRAVD m512/m32bcst, zmm, zmm{k}{z}
 85576      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 85577          self.require(ISA_AVX512F)
 85578          p.domain = DomainAVX
 85579          p.add(0, func(m *_Encoding, v []interface{}) {
 85580              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 85581              m.emit(0x46)
 85582              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 85583          })
 85584      }
 85585      // VPSRAVD zmm, zmm, zmm{k}{z}
 85586      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 85587          self.require(ISA_AVX512F)
 85588          p.domain = DomainAVX
 85589          p.add(0, func(m *_Encoding, v []interface{}) {
 85590              m.emit(0x62)
 85591              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85592              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 85593              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 85594              m.emit(0x46)
 85595              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85596          })
 85597      }
 85598      // VPSRAVD m128/m32bcst, xmm, xmm{k}{z}
 85599      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85600          self.require(ISA_AVX512VL | ISA_AVX512F)
 85601          p.domain = DomainAVX
 85602          p.add(0, func(m *_Encoding, v []interface{}) {
 85603              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 85604              m.emit(0x46)
 85605              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85606          })
 85607      }
 85608      // VPSRAVD xmm, xmm, xmm{k}{z}
 85609      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85610          self.require(ISA_AVX512VL | ISA_AVX512F)
 85611          p.domain = DomainAVX
 85612          p.add(0, func(m *_Encoding, v []interface{}) {
 85613              m.emit(0x62)
 85614              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85615              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 85616              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 85617              m.emit(0x46)
 85618              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85619          })
 85620      }
 85621      // VPSRAVD m256/m32bcst, ymm, ymm{k}{z}
 85622      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85623          self.require(ISA_AVX512VL | ISA_AVX512F)
 85624          p.domain = DomainAVX
 85625          p.add(0, func(m *_Encoding, v []interface{}) {
 85626              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 85627              m.emit(0x46)
 85628              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 85629          })
 85630      }
 85631      // VPSRAVD ymm, ymm, ymm{k}{z}
 85632      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85633          self.require(ISA_AVX512VL | ISA_AVX512F)
 85634          p.domain = DomainAVX
 85635          p.add(0, func(m *_Encoding, v []interface{}) {
 85636              m.emit(0x62)
 85637              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85638              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 85639              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 85640              m.emit(0x46)
 85641              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85642          })
 85643      }
 85644      if p.len == 0 {
 85645          panic("invalid operands for VPSRAVD")
 85646      }
 85647      return p
 85648  }
 85649  
 85650  // VPSRAVQ performs "Variable Shift Packed Quadword Data Right Arithmetic".
 85651  //
 85652  // Mnemonic        : VPSRAVQ
 85653  // Supported forms : (6 forms)
 85654  //
 85655  //    * VPSRAVQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 85656  //    * VPSRAVQ zmm, zmm, zmm{k}{z}             [AVX512F]
 85657  //    * VPSRAVQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 85658  //    * VPSRAVQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 85659  //    * VPSRAVQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 85660  //    * VPSRAVQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 85661  //
 85662  func (self *Program) VPSRAVQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 85663      p := self.alloc("VPSRAVQ", 3, Operands { v0, v1, v2 })
 85664      // VPSRAVQ m512/m64bcst, zmm, zmm{k}{z}
 85665      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 85666          self.require(ISA_AVX512F)
 85667          p.domain = DomainAVX
 85668          p.add(0, func(m *_Encoding, v []interface{}) {
 85669              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 85670              m.emit(0x46)
 85671              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 85672          })
 85673      }
 85674      // VPSRAVQ zmm, zmm, zmm{k}{z}
 85675      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 85676          self.require(ISA_AVX512F)
 85677          p.domain = DomainAVX
 85678          p.add(0, func(m *_Encoding, v []interface{}) {
 85679              m.emit(0x62)
 85680              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85681              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 85682              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 85683              m.emit(0x46)
 85684              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85685          })
 85686      }
 85687      // VPSRAVQ m128/m64bcst, xmm, xmm{k}{z}
 85688      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85689          self.require(ISA_AVX512VL | ISA_AVX512F)
 85690          p.domain = DomainAVX
 85691          p.add(0, func(m *_Encoding, v []interface{}) {
 85692              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 85693              m.emit(0x46)
 85694              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85695          })
 85696      }
 85697      // VPSRAVQ xmm, xmm, xmm{k}{z}
 85698      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85699          self.require(ISA_AVX512VL | ISA_AVX512F)
 85700          p.domain = DomainAVX
 85701          p.add(0, func(m *_Encoding, v []interface{}) {
 85702              m.emit(0x62)
 85703              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85704              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 85705              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 85706              m.emit(0x46)
 85707              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85708          })
 85709      }
 85710      // VPSRAVQ m256/m64bcst, ymm, ymm{k}{z}
 85711      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85712          self.require(ISA_AVX512VL | ISA_AVX512F)
 85713          p.domain = DomainAVX
 85714          p.add(0, func(m *_Encoding, v []interface{}) {
 85715              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 85716              m.emit(0x46)
 85717              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 85718          })
 85719      }
 85720      // VPSRAVQ ymm, ymm, ymm{k}{z}
 85721      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85722          self.require(ISA_AVX512VL | ISA_AVX512F)
 85723          p.domain = DomainAVX
 85724          p.add(0, func(m *_Encoding, v []interface{}) {
 85725              m.emit(0x62)
 85726              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85727              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 85728              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 85729              m.emit(0x46)
 85730              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85731          })
 85732      }
 85733      if p.len == 0 {
 85734          panic("invalid operands for VPSRAVQ")
 85735      }
 85736      return p
 85737  }
 85738  
 85739  // VPSRAVW performs "Variable Shift Packed Word Data Right Arithmetic".
 85740  //
 85741  // Mnemonic        : VPSRAVW
 85742  // Supported forms : (6 forms)
 85743  //
 85744  //    * VPSRAVW zmm, zmm, zmm{k}{z}     [AVX512BW]
 85745  //    * VPSRAVW m512, zmm, zmm{k}{z}    [AVX512BW]
 85746  //    * VPSRAVW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 85747  //    * VPSRAVW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 85748  //    * VPSRAVW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 85749  //    * VPSRAVW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 85750  //
 85751  func (self *Program) VPSRAVW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 85752      p := self.alloc("VPSRAVW", 3, Operands { v0, v1, v2 })
 85753      // VPSRAVW zmm, zmm, zmm{k}{z}
 85754      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 85755          self.require(ISA_AVX512BW)
 85756          p.domain = DomainAVX
 85757          p.add(0, func(m *_Encoding, v []interface{}) {
 85758              m.emit(0x62)
 85759              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85760              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 85761              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 85762              m.emit(0x11)
 85763              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85764          })
 85765      }
 85766      // VPSRAVW m512, zmm, zmm{k}{z}
 85767      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 85768          self.require(ISA_AVX512BW)
 85769          p.domain = DomainAVX
 85770          p.add(0, func(m *_Encoding, v []interface{}) {
 85771              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85772              m.emit(0x11)
 85773              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 85774          })
 85775      }
 85776      // VPSRAVW xmm, xmm, xmm{k}{z}
 85777      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85778          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85779          p.domain = DomainAVX
 85780          p.add(0, func(m *_Encoding, v []interface{}) {
 85781              m.emit(0x62)
 85782              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85783              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 85784              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 85785              m.emit(0x11)
 85786              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85787          })
 85788      }
 85789      // VPSRAVW m128, xmm, xmm{k}{z}
 85790      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85791          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85792          p.domain = DomainAVX
 85793          p.add(0, func(m *_Encoding, v []interface{}) {
 85794              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85795              m.emit(0x11)
 85796              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85797          })
 85798      }
 85799      // VPSRAVW ymm, ymm, ymm{k}{z}
 85800      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85801          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85802          p.domain = DomainAVX
 85803          p.add(0, func(m *_Encoding, v []interface{}) {
 85804              m.emit(0x62)
 85805              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85806              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 85807              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 85808              m.emit(0x11)
 85809              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85810          })
 85811      }
 85812      // VPSRAVW m256, ymm, ymm{k}{z}
 85813      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 85814          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85815          p.domain = DomainAVX
 85816          p.add(0, func(m *_Encoding, v []interface{}) {
 85817              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85818              m.emit(0x11)
 85819              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 85820          })
 85821      }
 85822      if p.len == 0 {
 85823          panic("invalid operands for VPSRAVW")
 85824      }
 85825      return p
 85826  }
 85827  
 85828  // VPSRAW performs "Shift Packed Word Data Right Arithmetic".
 85829  //
 85830  // Mnemonic        : VPSRAW
 85831  // Supported forms : (18 forms)
 85832  //
 85833  //    * VPSRAW imm8, xmm, xmm           [AVX]
 85834  //    * VPSRAW xmm, xmm, xmm            [AVX]
 85835  //    * VPSRAW m128, xmm, xmm           [AVX]
 85836  //    * VPSRAW imm8, ymm, ymm           [AVX2]
 85837  //    * VPSRAW xmm, ymm, ymm            [AVX2]
 85838  //    * VPSRAW m128, ymm, ymm           [AVX2]
 85839  //    * VPSRAW imm8, zmm, zmm{k}{z}     [AVX512BW]
 85840  //    * VPSRAW xmm, zmm, zmm{k}{z}      [AVX512BW]
 85841  //    * VPSRAW m128, zmm, zmm{k}{z}     [AVX512BW]
 85842  //    * VPSRAW imm8, m512, zmm{k}{z}    [AVX512BW]
 85843  //    * VPSRAW imm8, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 85844  //    * VPSRAW xmm, xmm, xmm{k}{z}      [AVX512BW,AVX512VL]
 85845  //    * VPSRAW m128, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 85846  //    * VPSRAW imm8, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 85847  //    * VPSRAW xmm, ymm, ymm{k}{z}      [AVX512BW,AVX512VL]
 85848  //    * VPSRAW m128, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 85849  //    * VPSRAW imm8, m128, xmm{k}{z}    [AVX512BW,AVX512VL]
 85850  //    * VPSRAW imm8, m256, ymm{k}{z}    [AVX512BW,AVX512VL]
 85851  //
 85852  func (self *Program) VPSRAW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 85853      p := self.alloc("VPSRAW", 3, Operands { v0, v1, v2 })
 85854      // VPSRAW imm8, xmm, xmm
 85855      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 85856          self.require(ISA_AVX)
 85857          p.domain = DomainAVX
 85858          p.add(0, func(m *_Encoding, v []interface{}) {
 85859              m.vex2(1, 0, v[1], hlcode(v[2]))
 85860              m.emit(0x71)
 85861              m.emit(0xe0 | lcode(v[1]))
 85862              m.imm1(toImmAny(v[0]))
 85863          })
 85864      }
 85865      // VPSRAW xmm, xmm, xmm
 85866      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 85867          self.require(ISA_AVX)
 85868          p.domain = DomainAVX
 85869          p.add(0, func(m *_Encoding, v []interface{}) {
 85870              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 85871              m.emit(0xe1)
 85872              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85873          })
 85874      }
 85875      // VPSRAW m128, xmm, xmm
 85876      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 85877          self.require(ISA_AVX)
 85878          p.domain = DomainAVX
 85879          p.add(0, func(m *_Encoding, v []interface{}) {
 85880              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 85881              m.emit(0xe1)
 85882              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 85883          })
 85884      }
 85885      // VPSRAW imm8, ymm, ymm
 85886      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 85887          self.require(ISA_AVX2)
 85888          p.domain = DomainAVX
 85889          p.add(0, func(m *_Encoding, v []interface{}) {
 85890              m.vex2(5, 0, v[1], hlcode(v[2]))
 85891              m.emit(0x71)
 85892              m.emit(0xe0 | lcode(v[1]))
 85893              m.imm1(toImmAny(v[0]))
 85894          })
 85895      }
 85896      // VPSRAW xmm, ymm, ymm
 85897      if isXMM(v0) && isYMM(v1) && isYMM(v2) {
 85898          self.require(ISA_AVX2)
 85899          p.domain = DomainAVX
 85900          p.add(0, func(m *_Encoding, v []interface{}) {
 85901              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 85902              m.emit(0xe1)
 85903              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85904          })
 85905      }
 85906      // VPSRAW m128, ymm, ymm
 85907      if isM128(v0) && isYMM(v1) && isYMM(v2) {
 85908          self.require(ISA_AVX2)
 85909          p.domain = DomainAVX
 85910          p.add(0, func(m *_Encoding, v []interface{}) {
 85911              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 85912              m.emit(0xe1)
 85913              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 85914          })
 85915      }
 85916      // VPSRAW imm8, zmm, zmm{k}{z}
 85917      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 85918          self.require(ISA_AVX512BW)
 85919          p.domain = DomainAVX
 85920          p.add(0, func(m *_Encoding, v []interface{}) {
 85921              m.emit(0x62)
 85922              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 85923              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 85924              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 85925              m.emit(0x71)
 85926              m.emit(0xe0 | lcode(v[1]))
 85927              m.imm1(toImmAny(v[0]))
 85928          })
 85929      }
 85930      // VPSRAW xmm, zmm, zmm{k}{z}
 85931      if isEVEXXMM(v0) && isZMM(v1) && isZMMkz(v2) {
 85932          self.require(ISA_AVX512BW)
 85933          p.domain = DomainAVX
 85934          p.add(0, func(m *_Encoding, v []interface{}) {
 85935              m.emit(0x62)
 85936              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85937              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 85938              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 85939              m.emit(0xe1)
 85940              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85941          })
 85942      }
 85943      // VPSRAW m128, zmm, zmm{k}{z}
 85944      if isM128(v0) && isZMM(v1) && isZMMkz(v2) {
 85945          self.require(ISA_AVX512BW)
 85946          p.domain = DomainAVX
 85947          p.add(0, func(m *_Encoding, v []interface{}) {
 85948              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85949              m.emit(0xe1)
 85950              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85951          })
 85952      }
 85953      // VPSRAW imm8, m512, zmm{k}{z}
 85954      if isImm8(v0) && isM512(v1) && isZMMkz(v2) {
 85955          self.require(ISA_AVX512BW)
 85956          p.domain = DomainAVX
 85957          p.add(0, func(m *_Encoding, v []interface{}) {
 85958              m.evex(0b01, 0x05, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), 0)
 85959              m.emit(0x71)
 85960              m.mrsd(4, addr(v[1]), 64)
 85961              m.imm1(toImmAny(v[0]))
 85962          })
 85963      }
 85964      // VPSRAW imm8, xmm, xmm{k}{z}
 85965      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85966          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85967          p.domain = DomainAVX
 85968          p.add(0, func(m *_Encoding, v []interface{}) {
 85969              m.emit(0x62)
 85970              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 85971              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 85972              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 85973              m.emit(0x71)
 85974              m.emit(0xe0 | lcode(v[1]))
 85975              m.imm1(toImmAny(v[0]))
 85976          })
 85977      }
 85978      // VPSRAW xmm, xmm, xmm{k}{z}
 85979      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85980          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85981          p.domain = DomainAVX
 85982          p.add(0, func(m *_Encoding, v []interface{}) {
 85983              m.emit(0x62)
 85984              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 85985              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 85986              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 85987              m.emit(0xe1)
 85988              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 85989          })
 85990      }
 85991      // VPSRAW m128, xmm, xmm{k}{z}
 85992      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 85993          self.require(ISA_AVX512VL | ISA_AVX512BW)
 85994          p.domain = DomainAVX
 85995          p.add(0, func(m *_Encoding, v []interface{}) {
 85996              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 85997              m.emit(0xe1)
 85998              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 85999          })
 86000      }
 86001      // VPSRAW imm8, ymm, ymm{k}{z}
 86002      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86003          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86004          p.domain = DomainAVX
 86005          p.add(0, func(m *_Encoding, v []interface{}) {
 86006              m.emit(0x62)
 86007              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 86008              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 86009              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 86010              m.emit(0x71)
 86011              m.emit(0xe0 | lcode(v[1]))
 86012              m.imm1(toImmAny(v[0]))
 86013          })
 86014      }
 86015      // VPSRAW xmm, ymm, ymm{k}{z}
 86016      if isEVEXXMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86017          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86018          p.domain = DomainAVX
 86019          p.add(0, func(m *_Encoding, v []interface{}) {
 86020              m.emit(0x62)
 86021              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86022              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 86023              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 86024              m.emit(0xe1)
 86025              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86026          })
 86027      }
 86028      // VPSRAW m128, ymm, ymm{k}{z}
 86029      if isM128(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86030          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86031          p.domain = DomainAVX
 86032          p.add(0, func(m *_Encoding, v []interface{}) {
 86033              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 86034              m.emit(0xe1)
 86035              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 86036          })
 86037      }
 86038      // VPSRAW imm8, m128, xmm{k}{z}
 86039      if isImm8(v0) && isM128(v1) && isXMMkz(v2) {
 86040          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86041          p.domain = DomainAVX
 86042          p.add(0, func(m *_Encoding, v []interface{}) {
 86043              m.evex(0b01, 0x05, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), 0)
 86044              m.emit(0x71)
 86045              m.mrsd(4, addr(v[1]), 16)
 86046              m.imm1(toImmAny(v[0]))
 86047          })
 86048      }
 86049      // VPSRAW imm8, m256, ymm{k}{z}
 86050      if isImm8(v0) && isM256(v1) && isYMMkz(v2) {
 86051          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86052          p.domain = DomainAVX
 86053          p.add(0, func(m *_Encoding, v []interface{}) {
 86054              m.evex(0b01, 0x05, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), 0)
 86055              m.emit(0x71)
 86056              m.mrsd(4, addr(v[1]), 32)
 86057              m.imm1(toImmAny(v[0]))
 86058          })
 86059      }
 86060      if p.len == 0 {
 86061          panic("invalid operands for VPSRAW")
 86062      }
 86063      return p
 86064  }
 86065  
 86066  // VPSRLD performs "Shift Packed Doubleword Data Right Logical".
 86067  //
 86068  // Mnemonic        : VPSRLD
 86069  // Supported forms : (18 forms)
 86070  //
 86071  //    * VPSRLD imm8, xmm, xmm                   [AVX]
 86072  //    * VPSRLD xmm, xmm, xmm                    [AVX]
 86073  //    * VPSRLD m128, xmm, xmm                   [AVX]
 86074  //    * VPSRLD imm8, ymm, ymm                   [AVX2]
 86075  //    * VPSRLD xmm, ymm, ymm                    [AVX2]
 86076  //    * VPSRLD m128, ymm, ymm                   [AVX2]
 86077  //    * VPSRLD imm8, m512/m32bcst, zmm{k}{z}    [AVX512F]
 86078  //    * VPSRLD imm8, zmm, zmm{k}{z}             [AVX512F]
 86079  //    * VPSRLD xmm, zmm, zmm{k}{z}              [AVX512F]
 86080  //    * VPSRLD m128, zmm, zmm{k}{z}             [AVX512F]
 86081  //    * VPSRLD imm8, m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 86082  //    * VPSRLD imm8, m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 86083  //    * VPSRLD imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 86084  //    * VPSRLD xmm, xmm, xmm{k}{z}              [AVX512F,AVX512VL]
 86085  //    * VPSRLD m128, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 86086  //    * VPSRLD imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 86087  //    * VPSRLD xmm, ymm, ymm{k}{z}              [AVX512F,AVX512VL]
 86088  //    * VPSRLD m128, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 86089  //
 86090  func (self *Program) VPSRLD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 86091      p := self.alloc("VPSRLD", 3, Operands { v0, v1, v2 })
 86092      // VPSRLD imm8, xmm, xmm
 86093      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 86094          self.require(ISA_AVX)
 86095          p.domain = DomainAVX
 86096          p.add(0, func(m *_Encoding, v []interface{}) {
 86097              m.vex2(1, 0, v[1], hlcode(v[2]))
 86098              m.emit(0x72)
 86099              m.emit(0xd0 | lcode(v[1]))
 86100              m.imm1(toImmAny(v[0]))
 86101          })
 86102      }
 86103      // VPSRLD xmm, xmm, xmm
 86104      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 86105          self.require(ISA_AVX)
 86106          p.domain = DomainAVX
 86107          p.add(0, func(m *_Encoding, v []interface{}) {
 86108              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 86109              m.emit(0xd2)
 86110              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86111          })
 86112      }
 86113      // VPSRLD m128, xmm, xmm
 86114      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 86115          self.require(ISA_AVX)
 86116          p.domain = DomainAVX
 86117          p.add(0, func(m *_Encoding, v []interface{}) {
 86118              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 86119              m.emit(0xd2)
 86120              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 86121          })
 86122      }
 86123      // VPSRLD imm8, ymm, ymm
 86124      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 86125          self.require(ISA_AVX2)
 86126          p.domain = DomainAVX
 86127          p.add(0, func(m *_Encoding, v []interface{}) {
 86128              m.vex2(5, 0, v[1], hlcode(v[2]))
 86129              m.emit(0x72)
 86130              m.emit(0xd0 | lcode(v[1]))
 86131              m.imm1(toImmAny(v[0]))
 86132          })
 86133      }
 86134      // VPSRLD xmm, ymm, ymm
 86135      if isXMM(v0) && isYMM(v1) && isYMM(v2) {
 86136          self.require(ISA_AVX2)
 86137          p.domain = DomainAVX
 86138          p.add(0, func(m *_Encoding, v []interface{}) {
 86139              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 86140              m.emit(0xd2)
 86141              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86142          })
 86143      }
 86144      // VPSRLD m128, ymm, ymm
 86145      if isM128(v0) && isYMM(v1) && isYMM(v2) {
 86146          self.require(ISA_AVX2)
 86147          p.domain = DomainAVX
 86148          p.add(0, func(m *_Encoding, v []interface{}) {
 86149              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 86150              m.emit(0xd2)
 86151              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 86152          })
 86153      }
 86154      // VPSRLD imm8, m512/m32bcst, zmm{k}{z}
 86155      if isImm8(v0) && isM512M32bcst(v1) && isZMMkz(v2) {
 86156          self.require(ISA_AVX512F)
 86157          p.domain = DomainAVX
 86158          p.add(0, func(m *_Encoding, v []interface{}) {
 86159              m.evex(0b01, 0x05, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 86160              m.emit(0x72)
 86161              m.mrsd(2, addr(v[1]), 64)
 86162              m.imm1(toImmAny(v[0]))
 86163          })
 86164      }
 86165      // VPSRLD imm8, zmm, zmm{k}{z}
 86166      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 86167          self.require(ISA_AVX512F)
 86168          p.domain = DomainAVX
 86169          p.add(0, func(m *_Encoding, v []interface{}) {
 86170              m.emit(0x62)
 86171              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 86172              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 86173              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 86174              m.emit(0x72)
 86175              m.emit(0xd0 | lcode(v[1]))
 86176              m.imm1(toImmAny(v[0]))
 86177          })
 86178      }
 86179      // VPSRLD xmm, zmm, zmm{k}{z}
 86180      if isEVEXXMM(v0) && isZMM(v1) && isZMMkz(v2) {
 86181          self.require(ISA_AVX512F)
 86182          p.domain = DomainAVX
 86183          p.add(0, func(m *_Encoding, v []interface{}) {
 86184              m.emit(0x62)
 86185              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86186              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 86187              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 86188              m.emit(0xd2)
 86189              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86190          })
 86191      }
 86192      // VPSRLD m128, zmm, zmm{k}{z}
 86193      if isM128(v0) && isZMM(v1) && isZMMkz(v2) {
 86194          self.require(ISA_AVX512F)
 86195          p.domain = DomainAVX
 86196          p.add(0, func(m *_Encoding, v []interface{}) {
 86197              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 86198              m.emit(0xd2)
 86199              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 86200          })
 86201      }
 86202      // VPSRLD imm8, m128/m32bcst, xmm{k}{z}
 86203      if isImm8(v0) && isM128M32bcst(v1) && isXMMkz(v2) {
 86204          self.require(ISA_AVX512VL | ISA_AVX512F)
 86205          p.domain = DomainAVX
 86206          p.add(0, func(m *_Encoding, v []interface{}) {
 86207              m.evex(0b01, 0x05, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 86208              m.emit(0x72)
 86209              m.mrsd(2, addr(v[1]), 16)
 86210              m.imm1(toImmAny(v[0]))
 86211          })
 86212      }
 86213      // VPSRLD imm8, m256/m32bcst, ymm{k}{z}
 86214      if isImm8(v0) && isM256M32bcst(v1) && isYMMkz(v2) {
 86215          self.require(ISA_AVX512VL | ISA_AVX512F)
 86216          p.domain = DomainAVX
 86217          p.add(0, func(m *_Encoding, v []interface{}) {
 86218              m.evex(0b01, 0x05, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 86219              m.emit(0x72)
 86220              m.mrsd(2, addr(v[1]), 32)
 86221              m.imm1(toImmAny(v[0]))
 86222          })
 86223      }
 86224      // VPSRLD imm8, xmm, xmm{k}{z}
 86225      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86226          self.require(ISA_AVX512VL | ISA_AVX512F)
 86227          p.domain = DomainAVX
 86228          p.add(0, func(m *_Encoding, v []interface{}) {
 86229              m.emit(0x62)
 86230              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 86231              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 86232              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 86233              m.emit(0x72)
 86234              m.emit(0xd0 | lcode(v[1]))
 86235              m.imm1(toImmAny(v[0]))
 86236          })
 86237      }
 86238      // VPSRLD xmm, xmm, xmm{k}{z}
 86239      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86240          self.require(ISA_AVX512VL | ISA_AVX512F)
 86241          p.domain = DomainAVX
 86242          p.add(0, func(m *_Encoding, v []interface{}) {
 86243              m.emit(0x62)
 86244              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86245              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 86246              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 86247              m.emit(0xd2)
 86248              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86249          })
 86250      }
 86251      // VPSRLD m128, xmm, xmm{k}{z}
 86252      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86253          self.require(ISA_AVX512VL | ISA_AVX512F)
 86254          p.domain = DomainAVX
 86255          p.add(0, func(m *_Encoding, v []interface{}) {
 86256              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 86257              m.emit(0xd2)
 86258              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 86259          })
 86260      }
 86261      // VPSRLD imm8, ymm, ymm{k}{z}
 86262      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86263          self.require(ISA_AVX512VL | ISA_AVX512F)
 86264          p.domain = DomainAVX
 86265          p.add(0, func(m *_Encoding, v []interface{}) {
 86266              m.emit(0x62)
 86267              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 86268              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 86269              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 86270              m.emit(0x72)
 86271              m.emit(0xd0 | lcode(v[1]))
 86272              m.imm1(toImmAny(v[0]))
 86273          })
 86274      }
 86275      // VPSRLD xmm, ymm, ymm{k}{z}
 86276      if isEVEXXMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86277          self.require(ISA_AVX512VL | ISA_AVX512F)
 86278          p.domain = DomainAVX
 86279          p.add(0, func(m *_Encoding, v []interface{}) {
 86280              m.emit(0x62)
 86281              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86282              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 86283              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 86284              m.emit(0xd2)
 86285              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86286          })
 86287      }
 86288      // VPSRLD m128, ymm, ymm{k}{z}
 86289      if isM128(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86290          self.require(ISA_AVX512VL | ISA_AVX512F)
 86291          p.domain = DomainAVX
 86292          p.add(0, func(m *_Encoding, v []interface{}) {
 86293              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 86294              m.emit(0xd2)
 86295              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 86296          })
 86297      }
 86298      if p.len == 0 {
 86299          panic("invalid operands for VPSRLD")
 86300      }
 86301      return p
 86302  }
 86303  
 86304  // VPSRLDQ performs "Shift Packed Double Quadword Right Logical".
 86305  //
 86306  // Mnemonic        : VPSRLDQ
 86307  // Supported forms : (8 forms)
 86308  //
 86309  //    * VPSRLDQ imm8, xmm, xmm     [AVX]
 86310  //    * VPSRLDQ imm8, ymm, ymm     [AVX2]
 86311  //    * VPSRLDQ imm8, zmm, zmm     [AVX512BW]
 86312  //    * VPSRLDQ imm8, m512, zmm    [AVX512BW]
 86313  //    * VPSRLDQ imm8, xmm, xmm     [AVX512BW,AVX512VL]
 86314  //    * VPSRLDQ imm8, m128, xmm    [AVX512BW,AVX512VL]
 86315  //    * VPSRLDQ imm8, ymm, ymm     [AVX512BW,AVX512VL]
 86316  //    * VPSRLDQ imm8, m256, ymm    [AVX512BW,AVX512VL]
 86317  //
 86318  func (self *Program) VPSRLDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 86319      p := self.alloc("VPSRLDQ", 3, Operands { v0, v1, v2 })
 86320      // VPSRLDQ imm8, xmm, xmm
 86321      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 86322          self.require(ISA_AVX)
 86323          p.domain = DomainAVX
 86324          p.add(0, func(m *_Encoding, v []interface{}) {
 86325              m.vex2(1, 0, v[1], hlcode(v[2]))
 86326              m.emit(0x73)
 86327              m.emit(0xd8 | lcode(v[1]))
 86328              m.imm1(toImmAny(v[0]))
 86329          })
 86330      }
 86331      // VPSRLDQ imm8, ymm, ymm
 86332      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 86333          self.require(ISA_AVX2)
 86334          p.domain = DomainAVX
 86335          p.add(0, func(m *_Encoding, v []interface{}) {
 86336              m.vex2(5, 0, v[1], hlcode(v[2]))
 86337              m.emit(0x73)
 86338              m.emit(0xd8 | lcode(v[1]))
 86339              m.imm1(toImmAny(v[0]))
 86340          })
 86341      }
 86342      // VPSRLDQ imm8, zmm, zmm
 86343      if isImm8(v0) && isZMM(v1) && isZMM(v2) {
 86344          self.require(ISA_AVX512BW)
 86345          p.domain = DomainAVX
 86346          p.add(0, func(m *_Encoding, v []interface{}) {
 86347              m.emit(0x62)
 86348              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 86349              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 86350              m.emit((0x08 ^ (ecode(v[2]) << 3)) | 0x40)
 86351              m.emit(0x73)
 86352              m.emit(0xd8 | lcode(v[1]))
 86353              m.imm1(toImmAny(v[0]))
 86354          })
 86355      }
 86356      // VPSRLDQ imm8, m512, zmm
 86357      if isImm8(v0) && isM512(v1) && isZMM(v2) {
 86358          self.require(ISA_AVX512BW)
 86359          p.domain = DomainAVX
 86360          p.add(0, func(m *_Encoding, v []interface{}) {
 86361              m.evex(0b01, 0x05, 0b10, 0, addr(v[1]), vcode(v[2]), 0, 0, 0)
 86362              m.emit(0x73)
 86363              m.mrsd(3, addr(v[1]), 64)
 86364              m.imm1(toImmAny(v[0]))
 86365          })
 86366      }
 86367      // VPSRLDQ imm8, xmm, xmm
 86368      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) {
 86369          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86370          p.domain = DomainAVX
 86371          p.add(0, func(m *_Encoding, v []interface{}) {
 86372              m.emit(0x62)
 86373              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 86374              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 86375              m.emit((0x08 ^ (ecode(v[2]) << 3)) | 0x00)
 86376              m.emit(0x73)
 86377              m.emit(0xd8 | lcode(v[1]))
 86378              m.imm1(toImmAny(v[0]))
 86379          })
 86380      }
 86381      // VPSRLDQ imm8, m128, xmm
 86382      if isImm8(v0) && isM128(v1) && isEVEXXMM(v2) {
 86383          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86384          p.domain = DomainAVX
 86385          p.add(0, func(m *_Encoding, v []interface{}) {
 86386              m.evex(0b01, 0x05, 0b00, 0, addr(v[1]), vcode(v[2]), 0, 0, 0)
 86387              m.emit(0x73)
 86388              m.mrsd(3, addr(v[1]), 16)
 86389              m.imm1(toImmAny(v[0]))
 86390          })
 86391      }
 86392      // VPSRLDQ imm8, ymm, ymm
 86393      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) {
 86394          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86395          p.domain = DomainAVX
 86396          p.add(0, func(m *_Encoding, v []interface{}) {
 86397              m.emit(0x62)
 86398              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 86399              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 86400              m.emit((0x08 ^ (ecode(v[2]) << 3)) | 0x20)
 86401              m.emit(0x73)
 86402              m.emit(0xd8 | lcode(v[1]))
 86403              m.imm1(toImmAny(v[0]))
 86404          })
 86405      }
 86406      // VPSRLDQ imm8, m256, ymm
 86407      if isImm8(v0) && isM256(v1) && isEVEXYMM(v2) {
 86408          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86409          p.domain = DomainAVX
 86410          p.add(0, func(m *_Encoding, v []interface{}) {
 86411              m.evex(0b01, 0x05, 0b01, 0, addr(v[1]), vcode(v[2]), 0, 0, 0)
 86412              m.emit(0x73)
 86413              m.mrsd(3, addr(v[1]), 32)
 86414              m.imm1(toImmAny(v[0]))
 86415          })
 86416      }
 86417      if p.len == 0 {
 86418          panic("invalid operands for VPSRLDQ")
 86419      }
 86420      return p
 86421  }
 86422  
 86423  // VPSRLQ performs "Shift Packed Quadword Data Right Logical".
 86424  //
 86425  // Mnemonic        : VPSRLQ
 86426  // Supported forms : (18 forms)
 86427  //
 86428  //    * VPSRLQ imm8, xmm, xmm                   [AVX]
 86429  //    * VPSRLQ xmm, xmm, xmm                    [AVX]
 86430  //    * VPSRLQ m128, xmm, xmm                   [AVX]
 86431  //    * VPSRLQ imm8, ymm, ymm                   [AVX2]
 86432  //    * VPSRLQ xmm, ymm, ymm                    [AVX2]
 86433  //    * VPSRLQ m128, ymm, ymm                   [AVX2]
 86434  //    * VPSRLQ imm8, m512/m64bcst, zmm{k}{z}    [AVX512F]
 86435  //    * VPSRLQ imm8, zmm, zmm{k}{z}             [AVX512F]
 86436  //    * VPSRLQ xmm, zmm, zmm{k}{z}              [AVX512F]
 86437  //    * VPSRLQ m128, zmm, zmm{k}{z}             [AVX512F]
 86438  //    * VPSRLQ imm8, m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 86439  //    * VPSRLQ imm8, m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 86440  //    * VPSRLQ imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 86441  //    * VPSRLQ xmm, xmm, xmm{k}{z}              [AVX512F,AVX512VL]
 86442  //    * VPSRLQ m128, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 86443  //    * VPSRLQ imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 86444  //    * VPSRLQ xmm, ymm, ymm{k}{z}              [AVX512F,AVX512VL]
 86445  //    * VPSRLQ m128, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 86446  //
 86447  func (self *Program) VPSRLQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 86448      p := self.alloc("VPSRLQ", 3, Operands { v0, v1, v2 })
 86449      // VPSRLQ imm8, xmm, xmm
 86450      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 86451          self.require(ISA_AVX)
 86452          p.domain = DomainAVX
 86453          p.add(0, func(m *_Encoding, v []interface{}) {
 86454              m.vex2(1, 0, v[1], hlcode(v[2]))
 86455              m.emit(0x73)
 86456              m.emit(0xd0 | lcode(v[1]))
 86457              m.imm1(toImmAny(v[0]))
 86458          })
 86459      }
 86460      // VPSRLQ xmm, xmm, xmm
 86461      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 86462          self.require(ISA_AVX)
 86463          p.domain = DomainAVX
 86464          p.add(0, func(m *_Encoding, v []interface{}) {
 86465              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 86466              m.emit(0xd3)
 86467              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86468          })
 86469      }
 86470      // VPSRLQ m128, xmm, xmm
 86471      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 86472          self.require(ISA_AVX)
 86473          p.domain = DomainAVX
 86474          p.add(0, func(m *_Encoding, v []interface{}) {
 86475              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 86476              m.emit(0xd3)
 86477              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 86478          })
 86479      }
 86480      // VPSRLQ imm8, ymm, ymm
 86481      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 86482          self.require(ISA_AVX2)
 86483          p.domain = DomainAVX
 86484          p.add(0, func(m *_Encoding, v []interface{}) {
 86485              m.vex2(5, 0, v[1], hlcode(v[2]))
 86486              m.emit(0x73)
 86487              m.emit(0xd0 | lcode(v[1]))
 86488              m.imm1(toImmAny(v[0]))
 86489          })
 86490      }
 86491      // VPSRLQ xmm, ymm, ymm
 86492      if isXMM(v0) && isYMM(v1) && isYMM(v2) {
 86493          self.require(ISA_AVX2)
 86494          p.domain = DomainAVX
 86495          p.add(0, func(m *_Encoding, v []interface{}) {
 86496              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 86497              m.emit(0xd3)
 86498              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86499          })
 86500      }
 86501      // VPSRLQ m128, ymm, ymm
 86502      if isM128(v0) && isYMM(v1) && isYMM(v2) {
 86503          self.require(ISA_AVX2)
 86504          p.domain = DomainAVX
 86505          p.add(0, func(m *_Encoding, v []interface{}) {
 86506              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 86507              m.emit(0xd3)
 86508              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 86509          })
 86510      }
 86511      // VPSRLQ imm8, m512/m64bcst, zmm{k}{z}
 86512      if isImm8(v0) && isM512M64bcst(v1) && isZMMkz(v2) {
 86513          self.require(ISA_AVX512F)
 86514          p.domain = DomainAVX
 86515          p.add(0, func(m *_Encoding, v []interface{}) {
 86516              m.evex(0b01, 0x85, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 86517              m.emit(0x73)
 86518              m.mrsd(2, addr(v[1]), 64)
 86519              m.imm1(toImmAny(v[0]))
 86520          })
 86521      }
 86522      // VPSRLQ imm8, zmm, zmm{k}{z}
 86523      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 86524          self.require(ISA_AVX512F)
 86525          p.domain = DomainAVX
 86526          p.add(0, func(m *_Encoding, v []interface{}) {
 86527              m.emit(0x62)
 86528              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 86529              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 86530              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 86531              m.emit(0x73)
 86532              m.emit(0xd0 | lcode(v[1]))
 86533              m.imm1(toImmAny(v[0]))
 86534          })
 86535      }
 86536      // VPSRLQ xmm, zmm, zmm{k}{z}
 86537      if isEVEXXMM(v0) && isZMM(v1) && isZMMkz(v2) {
 86538          self.require(ISA_AVX512F)
 86539          p.domain = DomainAVX
 86540          p.add(0, func(m *_Encoding, v []interface{}) {
 86541              m.emit(0x62)
 86542              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86543              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 86544              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 86545              m.emit(0xd3)
 86546              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86547          })
 86548      }
 86549      // VPSRLQ m128, zmm, zmm{k}{z}
 86550      if isM128(v0) && isZMM(v1) && isZMMkz(v2) {
 86551          self.require(ISA_AVX512F)
 86552          p.domain = DomainAVX
 86553          p.add(0, func(m *_Encoding, v []interface{}) {
 86554              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 86555              m.emit(0xd3)
 86556              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 86557          })
 86558      }
 86559      // VPSRLQ imm8, m128/m64bcst, xmm{k}{z}
 86560      if isImm8(v0) && isM128M64bcst(v1) && isXMMkz(v2) {
 86561          self.require(ISA_AVX512VL | ISA_AVX512F)
 86562          p.domain = DomainAVX
 86563          p.add(0, func(m *_Encoding, v []interface{}) {
 86564              m.evex(0b01, 0x85, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 86565              m.emit(0x73)
 86566              m.mrsd(2, addr(v[1]), 16)
 86567              m.imm1(toImmAny(v[0]))
 86568          })
 86569      }
 86570      // VPSRLQ imm8, m256/m64bcst, ymm{k}{z}
 86571      if isImm8(v0) && isM256M64bcst(v1) && isYMMkz(v2) {
 86572          self.require(ISA_AVX512VL | ISA_AVX512F)
 86573          p.domain = DomainAVX
 86574          p.add(0, func(m *_Encoding, v []interface{}) {
 86575              m.evex(0b01, 0x85, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), bcode(v[1]))
 86576              m.emit(0x73)
 86577              m.mrsd(2, addr(v[1]), 32)
 86578              m.imm1(toImmAny(v[0]))
 86579          })
 86580      }
 86581      // VPSRLQ imm8, xmm, xmm{k}{z}
 86582      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86583          self.require(ISA_AVX512VL | ISA_AVX512F)
 86584          p.domain = DomainAVX
 86585          p.add(0, func(m *_Encoding, v []interface{}) {
 86586              m.emit(0x62)
 86587              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 86588              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 86589              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 86590              m.emit(0x73)
 86591              m.emit(0xd0 | lcode(v[1]))
 86592              m.imm1(toImmAny(v[0]))
 86593          })
 86594      }
 86595      // VPSRLQ xmm, xmm, xmm{k}{z}
 86596      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86597          self.require(ISA_AVX512VL | ISA_AVX512F)
 86598          p.domain = DomainAVX
 86599          p.add(0, func(m *_Encoding, v []interface{}) {
 86600              m.emit(0x62)
 86601              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86602              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 86603              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 86604              m.emit(0xd3)
 86605              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86606          })
 86607      }
 86608      // VPSRLQ m128, xmm, xmm{k}{z}
 86609      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86610          self.require(ISA_AVX512VL | ISA_AVX512F)
 86611          p.domain = DomainAVX
 86612          p.add(0, func(m *_Encoding, v []interface{}) {
 86613              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 86614              m.emit(0xd3)
 86615              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 86616          })
 86617      }
 86618      // VPSRLQ imm8, ymm, ymm{k}{z}
 86619      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86620          self.require(ISA_AVX512VL | ISA_AVX512F)
 86621          p.domain = DomainAVX
 86622          p.add(0, func(m *_Encoding, v []interface{}) {
 86623              m.emit(0x62)
 86624              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 86625              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 86626              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 86627              m.emit(0x73)
 86628              m.emit(0xd0 | lcode(v[1]))
 86629              m.imm1(toImmAny(v[0]))
 86630          })
 86631      }
 86632      // VPSRLQ xmm, ymm, ymm{k}{z}
 86633      if isEVEXXMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86634          self.require(ISA_AVX512VL | ISA_AVX512F)
 86635          p.domain = DomainAVX
 86636          p.add(0, func(m *_Encoding, v []interface{}) {
 86637              m.emit(0x62)
 86638              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86639              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 86640              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 86641              m.emit(0xd3)
 86642              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86643          })
 86644      }
 86645      // VPSRLQ m128, ymm, ymm{k}{z}
 86646      if isM128(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86647          self.require(ISA_AVX512VL | ISA_AVX512F)
 86648          p.domain = DomainAVX
 86649          p.add(0, func(m *_Encoding, v []interface{}) {
 86650              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 86651              m.emit(0xd3)
 86652              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 86653          })
 86654      }
 86655      if p.len == 0 {
 86656          panic("invalid operands for VPSRLQ")
 86657      }
 86658      return p
 86659  }
 86660  
 86661  // VPSRLVD performs "Variable Shift Packed Doubleword Data Right Logical".
 86662  //
 86663  // Mnemonic        : VPSRLVD
 86664  // Supported forms : (10 forms)
 86665  //
 86666  //    * VPSRLVD xmm, xmm, xmm                   [AVX2]
 86667  //    * VPSRLVD m128, xmm, xmm                  [AVX2]
 86668  //    * VPSRLVD ymm, ymm, ymm                   [AVX2]
 86669  //    * VPSRLVD m256, ymm, ymm                  [AVX2]
 86670  //    * VPSRLVD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 86671  //    * VPSRLVD zmm, zmm, zmm{k}{z}             [AVX512F]
 86672  //    * VPSRLVD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 86673  //    * VPSRLVD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 86674  //    * VPSRLVD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 86675  //    * VPSRLVD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 86676  //
 86677  func (self *Program) VPSRLVD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 86678      p := self.alloc("VPSRLVD", 3, Operands { v0, v1, v2 })
 86679      // VPSRLVD xmm, xmm, xmm
 86680      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 86681          self.require(ISA_AVX2)
 86682          p.domain = DomainAVX
 86683          p.add(0, func(m *_Encoding, v []interface{}) {
 86684              m.emit(0xc4)
 86685              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 86686              m.emit(0x79 ^ (hlcode(v[1]) << 3))
 86687              m.emit(0x45)
 86688              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86689          })
 86690      }
 86691      // VPSRLVD m128, xmm, xmm
 86692      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 86693          self.require(ISA_AVX2)
 86694          p.domain = DomainAVX
 86695          p.add(0, func(m *_Encoding, v []interface{}) {
 86696              m.vex3(0xc4, 0b10, 0x01, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 86697              m.emit(0x45)
 86698              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 86699          })
 86700      }
 86701      // VPSRLVD ymm, ymm, ymm
 86702      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 86703          self.require(ISA_AVX2)
 86704          p.domain = DomainAVX
 86705          p.add(0, func(m *_Encoding, v []interface{}) {
 86706              m.emit(0xc4)
 86707              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 86708              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 86709              m.emit(0x45)
 86710              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86711          })
 86712      }
 86713      // VPSRLVD m256, ymm, ymm
 86714      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 86715          self.require(ISA_AVX2)
 86716          p.domain = DomainAVX
 86717          p.add(0, func(m *_Encoding, v []interface{}) {
 86718              m.vex3(0xc4, 0b10, 0x05, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 86719              m.emit(0x45)
 86720              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 86721          })
 86722      }
 86723      // VPSRLVD m512/m32bcst, zmm, zmm{k}{z}
 86724      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 86725          self.require(ISA_AVX512F)
 86726          p.domain = DomainAVX
 86727          p.add(0, func(m *_Encoding, v []interface{}) {
 86728              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 86729              m.emit(0x45)
 86730              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 86731          })
 86732      }
 86733      // VPSRLVD zmm, zmm, zmm{k}{z}
 86734      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 86735          self.require(ISA_AVX512F)
 86736          p.domain = DomainAVX
 86737          p.add(0, func(m *_Encoding, v []interface{}) {
 86738              m.emit(0x62)
 86739              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86740              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 86741              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 86742              m.emit(0x45)
 86743              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86744          })
 86745      }
 86746      // VPSRLVD m128/m32bcst, xmm, xmm{k}{z}
 86747      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86748          self.require(ISA_AVX512VL | ISA_AVX512F)
 86749          p.domain = DomainAVX
 86750          p.add(0, func(m *_Encoding, v []interface{}) {
 86751              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 86752              m.emit(0x45)
 86753              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 86754          })
 86755      }
 86756      // VPSRLVD xmm, xmm, xmm{k}{z}
 86757      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86758          self.require(ISA_AVX512VL | ISA_AVX512F)
 86759          p.domain = DomainAVX
 86760          p.add(0, func(m *_Encoding, v []interface{}) {
 86761              m.emit(0x62)
 86762              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86763              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 86764              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 86765              m.emit(0x45)
 86766              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86767          })
 86768      }
 86769      // VPSRLVD m256/m32bcst, ymm, ymm{k}{z}
 86770      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86771          self.require(ISA_AVX512VL | ISA_AVX512F)
 86772          p.domain = DomainAVX
 86773          p.add(0, func(m *_Encoding, v []interface{}) {
 86774              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 86775              m.emit(0x45)
 86776              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 86777          })
 86778      }
 86779      // VPSRLVD ymm, ymm, ymm{k}{z}
 86780      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86781          self.require(ISA_AVX512VL | ISA_AVX512F)
 86782          p.domain = DomainAVX
 86783          p.add(0, func(m *_Encoding, v []interface{}) {
 86784              m.emit(0x62)
 86785              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86786              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 86787              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 86788              m.emit(0x45)
 86789              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86790          })
 86791      }
 86792      if p.len == 0 {
 86793          panic("invalid operands for VPSRLVD")
 86794      }
 86795      return p
 86796  }
 86797  
 86798  // VPSRLVQ performs "Variable Shift Packed Quadword Data Right Logical".
 86799  //
 86800  // Mnemonic        : VPSRLVQ
 86801  // Supported forms : (10 forms)
 86802  //
 86803  //    * VPSRLVQ xmm, xmm, xmm                   [AVX2]
 86804  //    * VPSRLVQ m128, xmm, xmm                  [AVX2]
 86805  //    * VPSRLVQ ymm, ymm, ymm                   [AVX2]
 86806  //    * VPSRLVQ m256, ymm, ymm                  [AVX2]
 86807  //    * VPSRLVQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 86808  //    * VPSRLVQ zmm, zmm, zmm{k}{z}             [AVX512F]
 86809  //    * VPSRLVQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 86810  //    * VPSRLVQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 86811  //    * VPSRLVQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 86812  //    * VPSRLVQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 86813  //
 86814  func (self *Program) VPSRLVQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 86815      p := self.alloc("VPSRLVQ", 3, Operands { v0, v1, v2 })
 86816      // VPSRLVQ xmm, xmm, xmm
 86817      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 86818          self.require(ISA_AVX2)
 86819          p.domain = DomainAVX
 86820          p.add(0, func(m *_Encoding, v []interface{}) {
 86821              m.emit(0xc4)
 86822              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 86823              m.emit(0xf9 ^ (hlcode(v[1]) << 3))
 86824              m.emit(0x45)
 86825              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86826          })
 86827      }
 86828      // VPSRLVQ m128, xmm, xmm
 86829      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 86830          self.require(ISA_AVX2)
 86831          p.domain = DomainAVX
 86832          p.add(0, func(m *_Encoding, v []interface{}) {
 86833              m.vex3(0xc4, 0b10, 0x81, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 86834              m.emit(0x45)
 86835              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 86836          })
 86837      }
 86838      // VPSRLVQ ymm, ymm, ymm
 86839      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 86840          self.require(ISA_AVX2)
 86841          p.domain = DomainAVX
 86842          p.add(0, func(m *_Encoding, v []interface{}) {
 86843              m.emit(0xc4)
 86844              m.emit(0xe2 ^ (hcode(v[2]) << 7) ^ (hcode(v[0]) << 5))
 86845              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 86846              m.emit(0x45)
 86847              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86848          })
 86849      }
 86850      // VPSRLVQ m256, ymm, ymm
 86851      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 86852          self.require(ISA_AVX2)
 86853          p.domain = DomainAVX
 86854          p.add(0, func(m *_Encoding, v []interface{}) {
 86855              m.vex3(0xc4, 0b10, 0x85, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 86856              m.emit(0x45)
 86857              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 86858          })
 86859      }
 86860      // VPSRLVQ m512/m64bcst, zmm, zmm{k}{z}
 86861      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 86862          self.require(ISA_AVX512F)
 86863          p.domain = DomainAVX
 86864          p.add(0, func(m *_Encoding, v []interface{}) {
 86865              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 86866              m.emit(0x45)
 86867              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 86868          })
 86869      }
 86870      // VPSRLVQ zmm, zmm, zmm{k}{z}
 86871      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 86872          self.require(ISA_AVX512F)
 86873          p.domain = DomainAVX
 86874          p.add(0, func(m *_Encoding, v []interface{}) {
 86875              m.emit(0x62)
 86876              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86877              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 86878              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 86879              m.emit(0x45)
 86880              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86881          })
 86882      }
 86883      // VPSRLVQ m128/m64bcst, xmm, xmm{k}{z}
 86884      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86885          self.require(ISA_AVX512VL | ISA_AVX512F)
 86886          p.domain = DomainAVX
 86887          p.add(0, func(m *_Encoding, v []interface{}) {
 86888              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 86889              m.emit(0x45)
 86890              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 86891          })
 86892      }
 86893      // VPSRLVQ xmm, xmm, xmm{k}{z}
 86894      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86895          self.require(ISA_AVX512VL | ISA_AVX512F)
 86896          p.domain = DomainAVX
 86897          p.add(0, func(m *_Encoding, v []interface{}) {
 86898              m.emit(0x62)
 86899              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86900              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 86901              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 86902              m.emit(0x45)
 86903              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86904          })
 86905      }
 86906      // VPSRLVQ m256/m64bcst, ymm, ymm{k}{z}
 86907      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86908          self.require(ISA_AVX512VL | ISA_AVX512F)
 86909          p.domain = DomainAVX
 86910          p.add(0, func(m *_Encoding, v []interface{}) {
 86911              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 86912              m.emit(0x45)
 86913              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 86914          })
 86915      }
 86916      // VPSRLVQ ymm, ymm, ymm{k}{z}
 86917      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86918          self.require(ISA_AVX512VL | ISA_AVX512F)
 86919          p.domain = DomainAVX
 86920          p.add(0, func(m *_Encoding, v []interface{}) {
 86921              m.emit(0x62)
 86922              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86923              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 86924              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 86925              m.emit(0x45)
 86926              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86927          })
 86928      }
 86929      if p.len == 0 {
 86930          panic("invalid operands for VPSRLVQ")
 86931      }
 86932      return p
 86933  }
 86934  
 86935  // VPSRLVW performs "Variable Shift Packed Word Data Right Logical".
 86936  //
 86937  // Mnemonic        : VPSRLVW
 86938  // Supported forms : (6 forms)
 86939  //
 86940  //    * VPSRLVW zmm, zmm, zmm{k}{z}     [AVX512BW]
 86941  //    * VPSRLVW m512, zmm, zmm{k}{z}    [AVX512BW]
 86942  //    * VPSRLVW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 86943  //    * VPSRLVW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 86944  //    * VPSRLVW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 86945  //    * VPSRLVW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 86946  //
 86947  func (self *Program) VPSRLVW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 86948      p := self.alloc("VPSRLVW", 3, Operands { v0, v1, v2 })
 86949      // VPSRLVW zmm, zmm, zmm{k}{z}
 86950      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 86951          self.require(ISA_AVX512BW)
 86952          p.domain = DomainAVX
 86953          p.add(0, func(m *_Encoding, v []interface{}) {
 86954              m.emit(0x62)
 86955              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86956              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 86957              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 86958              m.emit(0x10)
 86959              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86960          })
 86961      }
 86962      // VPSRLVW m512, zmm, zmm{k}{z}
 86963      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 86964          self.require(ISA_AVX512BW)
 86965          p.domain = DomainAVX
 86966          p.add(0, func(m *_Encoding, v []interface{}) {
 86967              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 86968              m.emit(0x10)
 86969              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 86970          })
 86971      }
 86972      // VPSRLVW xmm, xmm, xmm{k}{z}
 86973      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86974          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86975          p.domain = DomainAVX
 86976          p.add(0, func(m *_Encoding, v []interface{}) {
 86977              m.emit(0x62)
 86978              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 86979              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 86980              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 86981              m.emit(0x10)
 86982              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 86983          })
 86984      }
 86985      // VPSRLVW m128, xmm, xmm{k}{z}
 86986      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 86987          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86988          p.domain = DomainAVX
 86989          p.add(0, func(m *_Encoding, v []interface{}) {
 86990              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 86991              m.emit(0x10)
 86992              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 86993          })
 86994      }
 86995      // VPSRLVW ymm, ymm, ymm{k}{z}
 86996      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 86997          self.require(ISA_AVX512VL | ISA_AVX512BW)
 86998          p.domain = DomainAVX
 86999          p.add(0, func(m *_Encoding, v []interface{}) {
 87000              m.emit(0x62)
 87001              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87002              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 87003              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 87004              m.emit(0x10)
 87005              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87006          })
 87007      }
 87008      // VPSRLVW m256, ymm, ymm{k}{z}
 87009      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87010          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87011          p.domain = DomainAVX
 87012          p.add(0, func(m *_Encoding, v []interface{}) {
 87013              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87014              m.emit(0x10)
 87015              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 87016          })
 87017      }
 87018      if p.len == 0 {
 87019          panic("invalid operands for VPSRLVW")
 87020      }
 87021      return p
 87022  }
 87023  
 87024  // VPSRLW performs "Shift Packed Word Data Right Logical".
 87025  //
 87026  // Mnemonic        : VPSRLW
 87027  // Supported forms : (18 forms)
 87028  //
 87029  //    * VPSRLW imm8, xmm, xmm           [AVX]
 87030  //    * VPSRLW xmm, xmm, xmm            [AVX]
 87031  //    * VPSRLW m128, xmm, xmm           [AVX]
 87032  //    * VPSRLW imm8, ymm, ymm           [AVX2]
 87033  //    * VPSRLW xmm, ymm, ymm            [AVX2]
 87034  //    * VPSRLW m128, ymm, ymm           [AVX2]
 87035  //    * VPSRLW imm8, zmm, zmm{k}{z}     [AVX512BW]
 87036  //    * VPSRLW xmm, zmm, zmm{k}{z}      [AVX512BW]
 87037  //    * VPSRLW m128, zmm, zmm{k}{z}     [AVX512BW]
 87038  //    * VPSRLW imm8, m512, zmm{k}{z}    [AVX512BW]
 87039  //    * VPSRLW imm8, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 87040  //    * VPSRLW xmm, xmm, xmm{k}{z}      [AVX512BW,AVX512VL]
 87041  //    * VPSRLW m128, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 87042  //    * VPSRLW imm8, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 87043  //    * VPSRLW xmm, ymm, ymm{k}{z}      [AVX512BW,AVX512VL]
 87044  //    * VPSRLW m128, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 87045  //    * VPSRLW imm8, m128, xmm{k}{z}    [AVX512BW,AVX512VL]
 87046  //    * VPSRLW imm8, m256, ymm{k}{z}    [AVX512BW,AVX512VL]
 87047  //
 87048  func (self *Program) VPSRLW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 87049      p := self.alloc("VPSRLW", 3, Operands { v0, v1, v2 })
 87050      // VPSRLW imm8, xmm, xmm
 87051      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 87052          self.require(ISA_AVX)
 87053          p.domain = DomainAVX
 87054          p.add(0, func(m *_Encoding, v []interface{}) {
 87055              m.vex2(1, 0, v[1], hlcode(v[2]))
 87056              m.emit(0x71)
 87057              m.emit(0xd0 | lcode(v[1]))
 87058              m.imm1(toImmAny(v[0]))
 87059          })
 87060      }
 87061      // VPSRLW xmm, xmm, xmm
 87062      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 87063          self.require(ISA_AVX)
 87064          p.domain = DomainAVX
 87065          p.add(0, func(m *_Encoding, v []interface{}) {
 87066              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 87067              m.emit(0xd1)
 87068              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87069          })
 87070      }
 87071      // VPSRLW m128, xmm, xmm
 87072      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 87073          self.require(ISA_AVX)
 87074          p.domain = DomainAVX
 87075          p.add(0, func(m *_Encoding, v []interface{}) {
 87076              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87077              m.emit(0xd1)
 87078              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87079          })
 87080      }
 87081      // VPSRLW imm8, ymm, ymm
 87082      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 87083          self.require(ISA_AVX2)
 87084          p.domain = DomainAVX
 87085          p.add(0, func(m *_Encoding, v []interface{}) {
 87086              m.vex2(5, 0, v[1], hlcode(v[2]))
 87087              m.emit(0x71)
 87088              m.emit(0xd0 | lcode(v[1]))
 87089              m.imm1(toImmAny(v[0]))
 87090          })
 87091      }
 87092      // VPSRLW xmm, ymm, ymm
 87093      if isXMM(v0) && isYMM(v1) && isYMM(v2) {
 87094          self.require(ISA_AVX2)
 87095          p.domain = DomainAVX
 87096          p.add(0, func(m *_Encoding, v []interface{}) {
 87097              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 87098              m.emit(0xd1)
 87099              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87100          })
 87101      }
 87102      // VPSRLW m128, ymm, ymm
 87103      if isM128(v0) && isYMM(v1) && isYMM(v2) {
 87104          self.require(ISA_AVX2)
 87105          p.domain = DomainAVX
 87106          p.add(0, func(m *_Encoding, v []interface{}) {
 87107              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87108              m.emit(0xd1)
 87109              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87110          })
 87111      }
 87112      // VPSRLW imm8, zmm, zmm{k}{z}
 87113      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 87114          self.require(ISA_AVX512BW)
 87115          p.domain = DomainAVX
 87116          p.add(0, func(m *_Encoding, v []interface{}) {
 87117              m.emit(0x62)
 87118              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 87119              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 87120              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x40)
 87121              m.emit(0x71)
 87122              m.emit(0xd0 | lcode(v[1]))
 87123              m.imm1(toImmAny(v[0]))
 87124          })
 87125      }
 87126      // VPSRLW xmm, zmm, zmm{k}{z}
 87127      if isEVEXXMM(v0) && isZMM(v1) && isZMMkz(v2) {
 87128          self.require(ISA_AVX512BW)
 87129          p.domain = DomainAVX
 87130          p.add(0, func(m *_Encoding, v []interface{}) {
 87131              m.emit(0x62)
 87132              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87133              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87134              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 87135              m.emit(0xd1)
 87136              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87137          })
 87138      }
 87139      // VPSRLW m128, zmm, zmm{k}{z}
 87140      if isM128(v0) && isZMM(v1) && isZMMkz(v2) {
 87141          self.require(ISA_AVX512BW)
 87142          p.domain = DomainAVX
 87143          p.add(0, func(m *_Encoding, v []interface{}) {
 87144              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87145              m.emit(0xd1)
 87146              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 87147          })
 87148      }
 87149      // VPSRLW imm8, m512, zmm{k}{z}
 87150      if isImm8(v0) && isM512(v1) && isZMMkz(v2) {
 87151          self.require(ISA_AVX512BW)
 87152          p.domain = DomainAVX
 87153          p.add(0, func(m *_Encoding, v []interface{}) {
 87154              m.evex(0b01, 0x05, 0b10, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), 0)
 87155              m.emit(0x71)
 87156              m.mrsd(2, addr(v[1]), 64)
 87157              m.imm1(toImmAny(v[0]))
 87158          })
 87159      }
 87160      // VPSRLW imm8, xmm, xmm{k}{z}
 87161      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87162          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87163          p.domain = DomainAVX
 87164          p.add(0, func(m *_Encoding, v []interface{}) {
 87165              m.emit(0x62)
 87166              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 87167              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 87168              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x00)
 87169              m.emit(0x71)
 87170              m.emit(0xd0 | lcode(v[1]))
 87171              m.imm1(toImmAny(v[0]))
 87172          })
 87173      }
 87174      // VPSRLW xmm, xmm, xmm{k}{z}
 87175      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87176          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87177          p.domain = DomainAVX
 87178          p.add(0, func(m *_Encoding, v []interface{}) {
 87179              m.emit(0x62)
 87180              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87181              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87182              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 87183              m.emit(0xd1)
 87184              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87185          })
 87186      }
 87187      // VPSRLW m128, xmm, xmm{k}{z}
 87188      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87189          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87190          p.domain = DomainAVX
 87191          p.add(0, func(m *_Encoding, v []interface{}) {
 87192              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87193              m.emit(0xd1)
 87194              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 87195          })
 87196      }
 87197      // VPSRLW imm8, ymm, ymm{k}{z}
 87198      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87199          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87200          p.domain = DomainAVX
 87201          p.add(0, func(m *_Encoding, v []interface{}) {
 87202              m.emit(0x62)
 87203              m.emit(0xf1 ^ (ehcode(v[1]) << 5))
 87204              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 87205              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[2]) | 0x20)
 87206              m.emit(0x71)
 87207              m.emit(0xd0 | lcode(v[1]))
 87208              m.imm1(toImmAny(v[0]))
 87209          })
 87210      }
 87211      // VPSRLW xmm, ymm, ymm{k}{z}
 87212      if isEVEXXMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87213          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87214          p.domain = DomainAVX
 87215          p.add(0, func(m *_Encoding, v []interface{}) {
 87216              m.emit(0x62)
 87217              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87218              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87219              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 87220              m.emit(0xd1)
 87221              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87222          })
 87223      }
 87224      // VPSRLW m128, ymm, ymm{k}{z}
 87225      if isM128(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87226          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87227          p.domain = DomainAVX
 87228          p.add(0, func(m *_Encoding, v []interface{}) {
 87229              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87230              m.emit(0xd1)
 87231              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 87232          })
 87233      }
 87234      // VPSRLW imm8, m128, xmm{k}{z}
 87235      if isImm8(v0) && isM128(v1) && isXMMkz(v2) {
 87236          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87237          p.domain = DomainAVX
 87238          p.add(0, func(m *_Encoding, v []interface{}) {
 87239              m.evex(0b01, 0x05, 0b00, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), 0)
 87240              m.emit(0x71)
 87241              m.mrsd(2, addr(v[1]), 16)
 87242              m.imm1(toImmAny(v[0]))
 87243          })
 87244      }
 87245      // VPSRLW imm8, m256, ymm{k}{z}
 87246      if isImm8(v0) && isM256(v1) && isYMMkz(v2) {
 87247          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87248          p.domain = DomainAVX
 87249          p.add(0, func(m *_Encoding, v []interface{}) {
 87250              m.evex(0b01, 0x05, 0b01, 0, addr(v[1]), vcode(v[2]), kcode(v[2]), zcode(v[2]), 0)
 87251              m.emit(0x71)
 87252              m.mrsd(2, addr(v[1]), 32)
 87253              m.imm1(toImmAny(v[0]))
 87254          })
 87255      }
 87256      if p.len == 0 {
 87257          panic("invalid operands for VPSRLW")
 87258      }
 87259      return p
 87260  }
 87261  
 87262  // VPSUBB performs "Subtract Packed Byte Integers".
 87263  //
 87264  // Mnemonic        : VPSUBB
 87265  // Supported forms : (10 forms)
 87266  //
 87267  //    * VPSUBB xmm, xmm, xmm           [AVX]
 87268  //    * VPSUBB m128, xmm, xmm          [AVX]
 87269  //    * VPSUBB ymm, ymm, ymm           [AVX2]
 87270  //    * VPSUBB m256, ymm, ymm          [AVX2]
 87271  //    * VPSUBB zmm, zmm, zmm{k}{z}     [AVX512BW]
 87272  //    * VPSUBB m512, zmm, zmm{k}{z}    [AVX512BW]
 87273  //    * VPSUBB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 87274  //    * VPSUBB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 87275  //    * VPSUBB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 87276  //    * VPSUBB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 87277  //
 87278  func (self *Program) VPSUBB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 87279      p := self.alloc("VPSUBB", 3, Operands { v0, v1, v2 })
 87280      // VPSUBB xmm, xmm, xmm
 87281      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 87282          self.require(ISA_AVX)
 87283          p.domain = DomainAVX
 87284          p.add(0, func(m *_Encoding, v []interface{}) {
 87285              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 87286              m.emit(0xf8)
 87287              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87288          })
 87289      }
 87290      // VPSUBB m128, xmm, xmm
 87291      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 87292          self.require(ISA_AVX)
 87293          p.domain = DomainAVX
 87294          p.add(0, func(m *_Encoding, v []interface{}) {
 87295              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87296              m.emit(0xf8)
 87297              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87298          })
 87299      }
 87300      // VPSUBB ymm, ymm, ymm
 87301      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 87302          self.require(ISA_AVX2)
 87303          p.domain = DomainAVX
 87304          p.add(0, func(m *_Encoding, v []interface{}) {
 87305              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 87306              m.emit(0xf8)
 87307              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87308          })
 87309      }
 87310      // VPSUBB m256, ymm, ymm
 87311      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 87312          self.require(ISA_AVX2)
 87313          p.domain = DomainAVX
 87314          p.add(0, func(m *_Encoding, v []interface{}) {
 87315              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87316              m.emit(0xf8)
 87317              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87318          })
 87319      }
 87320      // VPSUBB zmm, zmm, zmm{k}{z}
 87321      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 87322          self.require(ISA_AVX512BW)
 87323          p.domain = DomainAVX
 87324          p.add(0, func(m *_Encoding, v []interface{}) {
 87325              m.emit(0x62)
 87326              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87327              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87328              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 87329              m.emit(0xf8)
 87330              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87331          })
 87332      }
 87333      // VPSUBB m512, zmm, zmm{k}{z}
 87334      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 87335          self.require(ISA_AVX512BW)
 87336          p.domain = DomainAVX
 87337          p.add(0, func(m *_Encoding, v []interface{}) {
 87338              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87339              m.emit(0xf8)
 87340              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 87341          })
 87342      }
 87343      // VPSUBB xmm, xmm, xmm{k}{z}
 87344      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87345          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87346          p.domain = DomainAVX
 87347          p.add(0, func(m *_Encoding, v []interface{}) {
 87348              m.emit(0x62)
 87349              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87350              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87351              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 87352              m.emit(0xf8)
 87353              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87354          })
 87355      }
 87356      // VPSUBB m128, xmm, xmm{k}{z}
 87357      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87358          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87359          p.domain = DomainAVX
 87360          p.add(0, func(m *_Encoding, v []interface{}) {
 87361              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87362              m.emit(0xf8)
 87363              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 87364          })
 87365      }
 87366      // VPSUBB ymm, ymm, ymm{k}{z}
 87367      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87368          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87369          p.domain = DomainAVX
 87370          p.add(0, func(m *_Encoding, v []interface{}) {
 87371              m.emit(0x62)
 87372              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87373              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87374              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 87375              m.emit(0xf8)
 87376              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87377          })
 87378      }
 87379      // VPSUBB m256, ymm, ymm{k}{z}
 87380      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87381          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87382          p.domain = DomainAVX
 87383          p.add(0, func(m *_Encoding, v []interface{}) {
 87384              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87385              m.emit(0xf8)
 87386              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 87387          })
 87388      }
 87389      if p.len == 0 {
 87390          panic("invalid operands for VPSUBB")
 87391      }
 87392      return p
 87393  }
 87394  
 87395  // VPSUBD performs "Subtract Packed Doubleword Integers".
 87396  //
 87397  // Mnemonic        : VPSUBD
 87398  // Supported forms : (10 forms)
 87399  //
 87400  //    * VPSUBD xmm, xmm, xmm                   [AVX]
 87401  //    * VPSUBD m128, xmm, xmm                  [AVX]
 87402  //    * VPSUBD ymm, ymm, ymm                   [AVX2]
 87403  //    * VPSUBD m256, ymm, ymm                  [AVX2]
 87404  //    * VPSUBD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 87405  //    * VPSUBD zmm, zmm, zmm{k}{z}             [AVX512F]
 87406  //    * VPSUBD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 87407  //    * VPSUBD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 87408  //    * VPSUBD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 87409  //    * VPSUBD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 87410  //
 87411  func (self *Program) VPSUBD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 87412      p := self.alloc("VPSUBD", 3, Operands { v0, v1, v2 })
 87413      // VPSUBD xmm, xmm, xmm
 87414      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 87415          self.require(ISA_AVX)
 87416          p.domain = DomainAVX
 87417          p.add(0, func(m *_Encoding, v []interface{}) {
 87418              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 87419              m.emit(0xfa)
 87420              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87421          })
 87422      }
 87423      // VPSUBD m128, xmm, xmm
 87424      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 87425          self.require(ISA_AVX)
 87426          p.domain = DomainAVX
 87427          p.add(0, func(m *_Encoding, v []interface{}) {
 87428              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87429              m.emit(0xfa)
 87430              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87431          })
 87432      }
 87433      // VPSUBD ymm, ymm, ymm
 87434      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 87435          self.require(ISA_AVX2)
 87436          p.domain = DomainAVX
 87437          p.add(0, func(m *_Encoding, v []interface{}) {
 87438              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 87439              m.emit(0xfa)
 87440              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87441          })
 87442      }
 87443      // VPSUBD m256, ymm, ymm
 87444      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 87445          self.require(ISA_AVX2)
 87446          p.domain = DomainAVX
 87447          p.add(0, func(m *_Encoding, v []interface{}) {
 87448              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87449              m.emit(0xfa)
 87450              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87451          })
 87452      }
 87453      // VPSUBD m512/m32bcst, zmm, zmm{k}{z}
 87454      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 87455          self.require(ISA_AVX512F)
 87456          p.domain = DomainAVX
 87457          p.add(0, func(m *_Encoding, v []interface{}) {
 87458              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 87459              m.emit(0xfa)
 87460              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 87461          })
 87462      }
 87463      // VPSUBD zmm, zmm, zmm{k}{z}
 87464      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 87465          self.require(ISA_AVX512F)
 87466          p.domain = DomainAVX
 87467          p.add(0, func(m *_Encoding, v []interface{}) {
 87468              m.emit(0x62)
 87469              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87470              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87471              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 87472              m.emit(0xfa)
 87473              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87474          })
 87475      }
 87476      // VPSUBD m128/m32bcst, xmm, xmm{k}{z}
 87477      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87478          self.require(ISA_AVX512VL | ISA_AVX512F)
 87479          p.domain = DomainAVX
 87480          p.add(0, func(m *_Encoding, v []interface{}) {
 87481              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 87482              m.emit(0xfa)
 87483              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 87484          })
 87485      }
 87486      // VPSUBD xmm, xmm, xmm{k}{z}
 87487      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87488          self.require(ISA_AVX512VL | ISA_AVX512F)
 87489          p.domain = DomainAVX
 87490          p.add(0, func(m *_Encoding, v []interface{}) {
 87491              m.emit(0x62)
 87492              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87493              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87494              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 87495              m.emit(0xfa)
 87496              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87497          })
 87498      }
 87499      // VPSUBD m256/m32bcst, ymm, ymm{k}{z}
 87500      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87501          self.require(ISA_AVX512VL | ISA_AVX512F)
 87502          p.domain = DomainAVX
 87503          p.add(0, func(m *_Encoding, v []interface{}) {
 87504              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 87505              m.emit(0xfa)
 87506              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 87507          })
 87508      }
 87509      // VPSUBD ymm, ymm, ymm{k}{z}
 87510      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87511          self.require(ISA_AVX512VL | ISA_AVX512F)
 87512          p.domain = DomainAVX
 87513          p.add(0, func(m *_Encoding, v []interface{}) {
 87514              m.emit(0x62)
 87515              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87516              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87517              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 87518              m.emit(0xfa)
 87519              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87520          })
 87521      }
 87522      if p.len == 0 {
 87523          panic("invalid operands for VPSUBD")
 87524      }
 87525      return p
 87526  }
 87527  
 87528  // VPSUBQ performs "Subtract Packed Quadword Integers".
 87529  //
 87530  // Mnemonic        : VPSUBQ
 87531  // Supported forms : (10 forms)
 87532  //
 87533  //    * VPSUBQ xmm, xmm, xmm                   [AVX]
 87534  //    * VPSUBQ m128, xmm, xmm                  [AVX]
 87535  //    * VPSUBQ ymm, ymm, ymm                   [AVX2]
 87536  //    * VPSUBQ m256, ymm, ymm                  [AVX2]
 87537  //    * VPSUBQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 87538  //    * VPSUBQ zmm, zmm, zmm{k}{z}             [AVX512F]
 87539  //    * VPSUBQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 87540  //    * VPSUBQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 87541  //    * VPSUBQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 87542  //    * VPSUBQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 87543  //
 87544  func (self *Program) VPSUBQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 87545      p := self.alloc("VPSUBQ", 3, Operands { v0, v1, v2 })
 87546      // VPSUBQ xmm, xmm, xmm
 87547      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 87548          self.require(ISA_AVX)
 87549          p.domain = DomainAVX
 87550          p.add(0, func(m *_Encoding, v []interface{}) {
 87551              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 87552              m.emit(0xfb)
 87553              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87554          })
 87555      }
 87556      // VPSUBQ m128, xmm, xmm
 87557      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 87558          self.require(ISA_AVX)
 87559          p.domain = DomainAVX
 87560          p.add(0, func(m *_Encoding, v []interface{}) {
 87561              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87562              m.emit(0xfb)
 87563              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87564          })
 87565      }
 87566      // VPSUBQ ymm, ymm, ymm
 87567      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 87568          self.require(ISA_AVX2)
 87569          p.domain = DomainAVX
 87570          p.add(0, func(m *_Encoding, v []interface{}) {
 87571              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 87572              m.emit(0xfb)
 87573              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87574          })
 87575      }
 87576      // VPSUBQ m256, ymm, ymm
 87577      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 87578          self.require(ISA_AVX2)
 87579          p.domain = DomainAVX
 87580          p.add(0, func(m *_Encoding, v []interface{}) {
 87581              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87582              m.emit(0xfb)
 87583              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87584          })
 87585      }
 87586      // VPSUBQ m512/m64bcst, zmm, zmm{k}{z}
 87587      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 87588          self.require(ISA_AVX512F)
 87589          p.domain = DomainAVX
 87590          p.add(0, func(m *_Encoding, v []interface{}) {
 87591              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 87592              m.emit(0xfb)
 87593              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 87594          })
 87595      }
 87596      // VPSUBQ zmm, zmm, zmm{k}{z}
 87597      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 87598          self.require(ISA_AVX512F)
 87599          p.domain = DomainAVX
 87600          p.add(0, func(m *_Encoding, v []interface{}) {
 87601              m.emit(0x62)
 87602              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87603              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 87604              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 87605              m.emit(0xfb)
 87606              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87607          })
 87608      }
 87609      // VPSUBQ m128/m64bcst, xmm, xmm{k}{z}
 87610      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87611          self.require(ISA_AVX512VL | ISA_AVX512F)
 87612          p.domain = DomainAVX
 87613          p.add(0, func(m *_Encoding, v []interface{}) {
 87614              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 87615              m.emit(0xfb)
 87616              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 87617          })
 87618      }
 87619      // VPSUBQ xmm, xmm, xmm{k}{z}
 87620      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87621          self.require(ISA_AVX512VL | ISA_AVX512F)
 87622          p.domain = DomainAVX
 87623          p.add(0, func(m *_Encoding, v []interface{}) {
 87624              m.emit(0x62)
 87625              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87626              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 87627              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 87628              m.emit(0xfb)
 87629              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87630          })
 87631      }
 87632      // VPSUBQ m256/m64bcst, ymm, ymm{k}{z}
 87633      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87634          self.require(ISA_AVX512VL | ISA_AVX512F)
 87635          p.domain = DomainAVX
 87636          p.add(0, func(m *_Encoding, v []interface{}) {
 87637              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 87638              m.emit(0xfb)
 87639              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 87640          })
 87641      }
 87642      // VPSUBQ ymm, ymm, ymm{k}{z}
 87643      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87644          self.require(ISA_AVX512VL | ISA_AVX512F)
 87645          p.domain = DomainAVX
 87646          p.add(0, func(m *_Encoding, v []interface{}) {
 87647              m.emit(0x62)
 87648              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87649              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 87650              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 87651              m.emit(0xfb)
 87652              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87653          })
 87654      }
 87655      if p.len == 0 {
 87656          panic("invalid operands for VPSUBQ")
 87657      }
 87658      return p
 87659  }
 87660  
 87661  // VPSUBSB performs "Subtract Packed Signed Byte Integers with Signed Saturation".
 87662  //
 87663  // Mnemonic        : VPSUBSB
 87664  // Supported forms : (10 forms)
 87665  //
 87666  //    * VPSUBSB xmm, xmm, xmm           [AVX]
 87667  //    * VPSUBSB m128, xmm, xmm          [AVX]
 87668  //    * VPSUBSB ymm, ymm, ymm           [AVX2]
 87669  //    * VPSUBSB m256, ymm, ymm          [AVX2]
 87670  //    * VPSUBSB zmm, zmm, zmm{k}{z}     [AVX512BW]
 87671  //    * VPSUBSB m512, zmm, zmm{k}{z}    [AVX512BW]
 87672  //    * VPSUBSB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 87673  //    * VPSUBSB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 87674  //    * VPSUBSB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 87675  //    * VPSUBSB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 87676  //
 87677  func (self *Program) VPSUBSB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 87678      p := self.alloc("VPSUBSB", 3, Operands { v0, v1, v2 })
 87679      // VPSUBSB xmm, xmm, xmm
 87680      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 87681          self.require(ISA_AVX)
 87682          p.domain = DomainAVX
 87683          p.add(0, func(m *_Encoding, v []interface{}) {
 87684              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 87685              m.emit(0xe8)
 87686              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87687          })
 87688      }
 87689      // VPSUBSB m128, xmm, xmm
 87690      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 87691          self.require(ISA_AVX)
 87692          p.domain = DomainAVX
 87693          p.add(0, func(m *_Encoding, v []interface{}) {
 87694              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87695              m.emit(0xe8)
 87696              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87697          })
 87698      }
 87699      // VPSUBSB ymm, ymm, ymm
 87700      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 87701          self.require(ISA_AVX2)
 87702          p.domain = DomainAVX
 87703          p.add(0, func(m *_Encoding, v []interface{}) {
 87704              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 87705              m.emit(0xe8)
 87706              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87707          })
 87708      }
 87709      // VPSUBSB m256, ymm, ymm
 87710      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 87711          self.require(ISA_AVX2)
 87712          p.domain = DomainAVX
 87713          p.add(0, func(m *_Encoding, v []interface{}) {
 87714              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87715              m.emit(0xe8)
 87716              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87717          })
 87718      }
 87719      // VPSUBSB zmm, zmm, zmm{k}{z}
 87720      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 87721          self.require(ISA_AVX512BW)
 87722          p.domain = DomainAVX
 87723          p.add(0, func(m *_Encoding, v []interface{}) {
 87724              m.emit(0x62)
 87725              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87726              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87727              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 87728              m.emit(0xe8)
 87729              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87730          })
 87731      }
 87732      // VPSUBSB m512, zmm, zmm{k}{z}
 87733      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 87734          self.require(ISA_AVX512BW)
 87735          p.domain = DomainAVX
 87736          p.add(0, func(m *_Encoding, v []interface{}) {
 87737              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87738              m.emit(0xe8)
 87739              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 87740          })
 87741      }
 87742      // VPSUBSB xmm, xmm, xmm{k}{z}
 87743      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87744          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87745          p.domain = DomainAVX
 87746          p.add(0, func(m *_Encoding, v []interface{}) {
 87747              m.emit(0x62)
 87748              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87749              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87750              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 87751              m.emit(0xe8)
 87752              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87753          })
 87754      }
 87755      // VPSUBSB m128, xmm, xmm{k}{z}
 87756      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87757          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87758          p.domain = DomainAVX
 87759          p.add(0, func(m *_Encoding, v []interface{}) {
 87760              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87761              m.emit(0xe8)
 87762              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 87763          })
 87764      }
 87765      // VPSUBSB ymm, ymm, ymm{k}{z}
 87766      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87767          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87768          p.domain = DomainAVX
 87769          p.add(0, func(m *_Encoding, v []interface{}) {
 87770              m.emit(0x62)
 87771              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87772              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87773              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 87774              m.emit(0xe8)
 87775              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87776          })
 87777      }
 87778      // VPSUBSB m256, ymm, ymm{k}{z}
 87779      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87780          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87781          p.domain = DomainAVX
 87782          p.add(0, func(m *_Encoding, v []interface{}) {
 87783              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87784              m.emit(0xe8)
 87785              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 87786          })
 87787      }
 87788      if p.len == 0 {
 87789          panic("invalid operands for VPSUBSB")
 87790      }
 87791      return p
 87792  }
 87793  
 87794  // VPSUBSW performs "Subtract Packed Signed Word Integers with Signed Saturation".
 87795  //
 87796  // Mnemonic        : VPSUBSW
 87797  // Supported forms : (10 forms)
 87798  //
 87799  //    * VPSUBSW xmm, xmm, xmm           [AVX]
 87800  //    * VPSUBSW m128, xmm, xmm          [AVX]
 87801  //    * VPSUBSW ymm, ymm, ymm           [AVX2]
 87802  //    * VPSUBSW m256, ymm, ymm          [AVX2]
 87803  //    * VPSUBSW zmm, zmm, zmm{k}{z}     [AVX512BW]
 87804  //    * VPSUBSW m512, zmm, zmm{k}{z}    [AVX512BW]
 87805  //    * VPSUBSW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 87806  //    * VPSUBSW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 87807  //    * VPSUBSW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 87808  //    * VPSUBSW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 87809  //
 87810  func (self *Program) VPSUBSW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 87811      p := self.alloc("VPSUBSW", 3, Operands { v0, v1, v2 })
 87812      // VPSUBSW xmm, xmm, xmm
 87813      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 87814          self.require(ISA_AVX)
 87815          p.domain = DomainAVX
 87816          p.add(0, func(m *_Encoding, v []interface{}) {
 87817              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 87818              m.emit(0xe9)
 87819              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87820          })
 87821      }
 87822      // VPSUBSW m128, xmm, xmm
 87823      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 87824          self.require(ISA_AVX)
 87825          p.domain = DomainAVX
 87826          p.add(0, func(m *_Encoding, v []interface{}) {
 87827              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87828              m.emit(0xe9)
 87829              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87830          })
 87831      }
 87832      // VPSUBSW ymm, ymm, ymm
 87833      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 87834          self.require(ISA_AVX2)
 87835          p.domain = DomainAVX
 87836          p.add(0, func(m *_Encoding, v []interface{}) {
 87837              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 87838              m.emit(0xe9)
 87839              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87840          })
 87841      }
 87842      // VPSUBSW m256, ymm, ymm
 87843      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 87844          self.require(ISA_AVX2)
 87845          p.domain = DomainAVX
 87846          p.add(0, func(m *_Encoding, v []interface{}) {
 87847              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87848              m.emit(0xe9)
 87849              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87850          })
 87851      }
 87852      // VPSUBSW zmm, zmm, zmm{k}{z}
 87853      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 87854          self.require(ISA_AVX512BW)
 87855          p.domain = DomainAVX
 87856          p.add(0, func(m *_Encoding, v []interface{}) {
 87857              m.emit(0x62)
 87858              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87859              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87860              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 87861              m.emit(0xe9)
 87862              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87863          })
 87864      }
 87865      // VPSUBSW m512, zmm, zmm{k}{z}
 87866      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 87867          self.require(ISA_AVX512BW)
 87868          p.domain = DomainAVX
 87869          p.add(0, func(m *_Encoding, v []interface{}) {
 87870              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87871              m.emit(0xe9)
 87872              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 87873          })
 87874      }
 87875      // VPSUBSW xmm, xmm, xmm{k}{z}
 87876      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87877          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87878          p.domain = DomainAVX
 87879          p.add(0, func(m *_Encoding, v []interface{}) {
 87880              m.emit(0x62)
 87881              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87882              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87883              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 87884              m.emit(0xe9)
 87885              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87886          })
 87887      }
 87888      // VPSUBSW m128, xmm, xmm{k}{z}
 87889      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 87890          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87891          p.domain = DomainAVX
 87892          p.add(0, func(m *_Encoding, v []interface{}) {
 87893              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87894              m.emit(0xe9)
 87895              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 87896          })
 87897      }
 87898      // VPSUBSW ymm, ymm, ymm{k}{z}
 87899      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87900          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87901          p.domain = DomainAVX
 87902          p.add(0, func(m *_Encoding, v []interface{}) {
 87903              m.emit(0x62)
 87904              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87905              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87906              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 87907              m.emit(0xe9)
 87908              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87909          })
 87910      }
 87911      // VPSUBSW m256, ymm, ymm{k}{z}
 87912      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 87913          self.require(ISA_AVX512VL | ISA_AVX512BW)
 87914          p.domain = DomainAVX
 87915          p.add(0, func(m *_Encoding, v []interface{}) {
 87916              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 87917              m.emit(0xe9)
 87918              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 87919          })
 87920      }
 87921      if p.len == 0 {
 87922          panic("invalid operands for VPSUBSW")
 87923      }
 87924      return p
 87925  }
 87926  
 87927  // VPSUBUSB performs "Subtract Packed Unsigned Byte Integers with Unsigned Saturation".
 87928  //
 87929  // Mnemonic        : VPSUBUSB
 87930  // Supported forms : (10 forms)
 87931  //
 87932  //    * VPSUBUSB xmm, xmm, xmm           [AVX]
 87933  //    * VPSUBUSB m128, xmm, xmm          [AVX]
 87934  //    * VPSUBUSB ymm, ymm, ymm           [AVX2]
 87935  //    * VPSUBUSB m256, ymm, ymm          [AVX2]
 87936  //    * VPSUBUSB zmm, zmm, zmm{k}{z}     [AVX512BW]
 87937  //    * VPSUBUSB m512, zmm, zmm{k}{z}    [AVX512BW]
 87938  //    * VPSUBUSB xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 87939  //    * VPSUBUSB m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 87940  //    * VPSUBUSB ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 87941  //    * VPSUBUSB m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 87942  //
 87943  func (self *Program) VPSUBUSB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 87944      p := self.alloc("VPSUBUSB", 3, Operands { v0, v1, v2 })
 87945      // VPSUBUSB xmm, xmm, xmm
 87946      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 87947          self.require(ISA_AVX)
 87948          p.domain = DomainAVX
 87949          p.add(0, func(m *_Encoding, v []interface{}) {
 87950              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 87951              m.emit(0xd8)
 87952              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87953          })
 87954      }
 87955      // VPSUBUSB m128, xmm, xmm
 87956      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 87957          self.require(ISA_AVX)
 87958          p.domain = DomainAVX
 87959          p.add(0, func(m *_Encoding, v []interface{}) {
 87960              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87961              m.emit(0xd8)
 87962              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87963          })
 87964      }
 87965      // VPSUBUSB ymm, ymm, ymm
 87966      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 87967          self.require(ISA_AVX2)
 87968          p.domain = DomainAVX
 87969          p.add(0, func(m *_Encoding, v []interface{}) {
 87970              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 87971              m.emit(0xd8)
 87972              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87973          })
 87974      }
 87975      // VPSUBUSB m256, ymm, ymm
 87976      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 87977          self.require(ISA_AVX2)
 87978          p.domain = DomainAVX
 87979          p.add(0, func(m *_Encoding, v []interface{}) {
 87980              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 87981              m.emit(0xd8)
 87982              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 87983          })
 87984      }
 87985      // VPSUBUSB zmm, zmm, zmm{k}{z}
 87986      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 87987          self.require(ISA_AVX512BW)
 87988          p.domain = DomainAVX
 87989          p.add(0, func(m *_Encoding, v []interface{}) {
 87990              m.emit(0x62)
 87991              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 87992              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 87993              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 87994              m.emit(0xd8)
 87995              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 87996          })
 87997      }
 87998      // VPSUBUSB m512, zmm, zmm{k}{z}
 87999      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 88000          self.require(ISA_AVX512BW)
 88001          p.domain = DomainAVX
 88002          p.add(0, func(m *_Encoding, v []interface{}) {
 88003              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 88004              m.emit(0xd8)
 88005              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 88006          })
 88007      }
 88008      // VPSUBUSB xmm, xmm, xmm{k}{z}
 88009      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 88010          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88011          p.domain = DomainAVX
 88012          p.add(0, func(m *_Encoding, v []interface{}) {
 88013              m.emit(0x62)
 88014              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88015              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88016              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 88017              m.emit(0xd8)
 88018              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88019          })
 88020      }
 88021      // VPSUBUSB m128, xmm, xmm{k}{z}
 88022      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 88023          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88024          p.domain = DomainAVX
 88025          p.add(0, func(m *_Encoding, v []interface{}) {
 88026              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 88027              m.emit(0xd8)
 88028              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 88029          })
 88030      }
 88031      // VPSUBUSB ymm, ymm, ymm{k}{z}
 88032      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 88033          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88034          p.domain = DomainAVX
 88035          p.add(0, func(m *_Encoding, v []interface{}) {
 88036              m.emit(0x62)
 88037              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88038              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88039              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 88040              m.emit(0xd8)
 88041              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88042          })
 88043      }
 88044      // VPSUBUSB m256, ymm, ymm{k}{z}
 88045      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 88046          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88047          p.domain = DomainAVX
 88048          p.add(0, func(m *_Encoding, v []interface{}) {
 88049              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 88050              m.emit(0xd8)
 88051              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 88052          })
 88053      }
 88054      if p.len == 0 {
 88055          panic("invalid operands for VPSUBUSB")
 88056      }
 88057      return p
 88058  }
 88059  
 88060  // VPSUBUSW performs "Subtract Packed Unsigned Word Integers with Unsigned Saturation".
 88061  //
 88062  // Mnemonic        : VPSUBUSW
 88063  // Supported forms : (10 forms)
 88064  //
 88065  //    * VPSUBUSW xmm, xmm, xmm           [AVX]
 88066  //    * VPSUBUSW m128, xmm, xmm          [AVX]
 88067  //    * VPSUBUSW ymm, ymm, ymm           [AVX2]
 88068  //    * VPSUBUSW m256, ymm, ymm          [AVX2]
 88069  //    * VPSUBUSW zmm, zmm, zmm{k}{z}     [AVX512BW]
 88070  //    * VPSUBUSW m512, zmm, zmm{k}{z}    [AVX512BW]
 88071  //    * VPSUBUSW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 88072  //    * VPSUBUSW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 88073  //    * VPSUBUSW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 88074  //    * VPSUBUSW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 88075  //
 88076  func (self *Program) VPSUBUSW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 88077      p := self.alloc("VPSUBUSW", 3, Operands { v0, v1, v2 })
 88078      // VPSUBUSW xmm, xmm, xmm
 88079      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 88080          self.require(ISA_AVX)
 88081          p.domain = DomainAVX
 88082          p.add(0, func(m *_Encoding, v []interface{}) {
 88083              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 88084              m.emit(0xd9)
 88085              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88086          })
 88087      }
 88088      // VPSUBUSW m128, xmm, xmm
 88089      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 88090          self.require(ISA_AVX)
 88091          p.domain = DomainAVX
 88092          p.add(0, func(m *_Encoding, v []interface{}) {
 88093              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 88094              m.emit(0xd9)
 88095              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 88096          })
 88097      }
 88098      // VPSUBUSW ymm, ymm, ymm
 88099      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 88100          self.require(ISA_AVX2)
 88101          p.domain = DomainAVX
 88102          p.add(0, func(m *_Encoding, v []interface{}) {
 88103              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 88104              m.emit(0xd9)
 88105              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88106          })
 88107      }
 88108      // VPSUBUSW m256, ymm, ymm
 88109      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 88110          self.require(ISA_AVX2)
 88111          p.domain = DomainAVX
 88112          p.add(0, func(m *_Encoding, v []interface{}) {
 88113              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 88114              m.emit(0xd9)
 88115              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 88116          })
 88117      }
 88118      // VPSUBUSW zmm, zmm, zmm{k}{z}
 88119      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 88120          self.require(ISA_AVX512BW)
 88121          p.domain = DomainAVX
 88122          p.add(0, func(m *_Encoding, v []interface{}) {
 88123              m.emit(0x62)
 88124              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88125              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88126              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 88127              m.emit(0xd9)
 88128              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88129          })
 88130      }
 88131      // VPSUBUSW m512, zmm, zmm{k}{z}
 88132      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 88133          self.require(ISA_AVX512BW)
 88134          p.domain = DomainAVX
 88135          p.add(0, func(m *_Encoding, v []interface{}) {
 88136              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 88137              m.emit(0xd9)
 88138              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 88139          })
 88140      }
 88141      // VPSUBUSW xmm, xmm, xmm{k}{z}
 88142      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 88143          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88144          p.domain = DomainAVX
 88145          p.add(0, func(m *_Encoding, v []interface{}) {
 88146              m.emit(0x62)
 88147              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88148              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88149              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 88150              m.emit(0xd9)
 88151              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88152          })
 88153      }
 88154      // VPSUBUSW m128, xmm, xmm{k}{z}
 88155      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 88156          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88157          p.domain = DomainAVX
 88158          p.add(0, func(m *_Encoding, v []interface{}) {
 88159              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 88160              m.emit(0xd9)
 88161              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 88162          })
 88163      }
 88164      // VPSUBUSW ymm, ymm, ymm{k}{z}
 88165      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 88166          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88167          p.domain = DomainAVX
 88168          p.add(0, func(m *_Encoding, v []interface{}) {
 88169              m.emit(0x62)
 88170              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88171              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88172              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 88173              m.emit(0xd9)
 88174              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88175          })
 88176      }
 88177      // VPSUBUSW m256, ymm, ymm{k}{z}
 88178      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 88179          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88180          p.domain = DomainAVX
 88181          p.add(0, func(m *_Encoding, v []interface{}) {
 88182              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 88183              m.emit(0xd9)
 88184              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 88185          })
 88186      }
 88187      if p.len == 0 {
 88188          panic("invalid operands for VPSUBUSW")
 88189      }
 88190      return p
 88191  }
 88192  
 88193  // VPSUBW performs "Subtract Packed Word Integers".
 88194  //
 88195  // Mnemonic        : VPSUBW
 88196  // Supported forms : (10 forms)
 88197  //
 88198  //    * VPSUBW xmm, xmm, xmm           [AVX]
 88199  //    * VPSUBW m128, xmm, xmm          [AVX]
 88200  //    * VPSUBW ymm, ymm, ymm           [AVX2]
 88201  //    * VPSUBW m256, ymm, ymm          [AVX2]
 88202  //    * VPSUBW zmm, zmm, zmm{k}{z}     [AVX512BW]
 88203  //    * VPSUBW m512, zmm, zmm{k}{z}    [AVX512BW]
 88204  //    * VPSUBW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 88205  //    * VPSUBW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 88206  //    * VPSUBW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 88207  //    * VPSUBW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 88208  //
 88209  func (self *Program) VPSUBW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 88210      p := self.alloc("VPSUBW", 3, Operands { v0, v1, v2 })
 88211      // VPSUBW xmm, xmm, xmm
 88212      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 88213          self.require(ISA_AVX)
 88214          p.domain = DomainAVX
 88215          p.add(0, func(m *_Encoding, v []interface{}) {
 88216              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 88217              m.emit(0xf9)
 88218              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88219          })
 88220      }
 88221      // VPSUBW m128, xmm, xmm
 88222      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 88223          self.require(ISA_AVX)
 88224          p.domain = DomainAVX
 88225          p.add(0, func(m *_Encoding, v []interface{}) {
 88226              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 88227              m.emit(0xf9)
 88228              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 88229          })
 88230      }
 88231      // VPSUBW ymm, ymm, ymm
 88232      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 88233          self.require(ISA_AVX2)
 88234          p.domain = DomainAVX
 88235          p.add(0, func(m *_Encoding, v []interface{}) {
 88236              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 88237              m.emit(0xf9)
 88238              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88239          })
 88240      }
 88241      // VPSUBW m256, ymm, ymm
 88242      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 88243          self.require(ISA_AVX2)
 88244          p.domain = DomainAVX
 88245          p.add(0, func(m *_Encoding, v []interface{}) {
 88246              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 88247              m.emit(0xf9)
 88248              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 88249          })
 88250      }
 88251      // VPSUBW zmm, zmm, zmm{k}{z}
 88252      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 88253          self.require(ISA_AVX512BW)
 88254          p.domain = DomainAVX
 88255          p.add(0, func(m *_Encoding, v []interface{}) {
 88256              m.emit(0x62)
 88257              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88258              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88259              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 88260              m.emit(0xf9)
 88261              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88262          })
 88263      }
 88264      // VPSUBW m512, zmm, zmm{k}{z}
 88265      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 88266          self.require(ISA_AVX512BW)
 88267          p.domain = DomainAVX
 88268          p.add(0, func(m *_Encoding, v []interface{}) {
 88269              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 88270              m.emit(0xf9)
 88271              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 88272          })
 88273      }
 88274      // VPSUBW xmm, xmm, xmm{k}{z}
 88275      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 88276          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88277          p.domain = DomainAVX
 88278          p.add(0, func(m *_Encoding, v []interface{}) {
 88279              m.emit(0x62)
 88280              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88281              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88282              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 88283              m.emit(0xf9)
 88284              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88285          })
 88286      }
 88287      // VPSUBW m128, xmm, xmm{k}{z}
 88288      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 88289          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88290          p.domain = DomainAVX
 88291          p.add(0, func(m *_Encoding, v []interface{}) {
 88292              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 88293              m.emit(0xf9)
 88294              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 88295          })
 88296      }
 88297      // VPSUBW ymm, ymm, ymm{k}{z}
 88298      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 88299          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88300          p.domain = DomainAVX
 88301          p.add(0, func(m *_Encoding, v []interface{}) {
 88302              m.emit(0x62)
 88303              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88304              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88305              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 88306              m.emit(0xf9)
 88307              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88308          })
 88309      }
 88310      // VPSUBW m256, ymm, ymm{k}{z}
 88311      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 88312          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88313          p.domain = DomainAVX
 88314          p.add(0, func(m *_Encoding, v []interface{}) {
 88315              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 88316              m.emit(0xf9)
 88317              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 88318          })
 88319      }
 88320      if p.len == 0 {
 88321          panic("invalid operands for VPSUBW")
 88322      }
 88323      return p
 88324  }
 88325  
 88326  // VPTERNLOGD performs "Bitwise Ternary Logical Operation on Doubleword Values".
 88327  //
 88328  // Mnemonic        : VPTERNLOGD
 88329  // Supported forms : (6 forms)
 88330  //
 88331  //    * VPTERNLOGD imm8, m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 88332  //    * VPTERNLOGD imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 88333  //    * VPTERNLOGD imm8, m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 88334  //    * VPTERNLOGD imm8, xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 88335  //    * VPTERNLOGD imm8, m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 88336  //    * VPTERNLOGD imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 88337  //
 88338  func (self *Program) VPTERNLOGD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 88339      p := self.alloc("VPTERNLOGD", 4, Operands { v0, v1, v2, v3 })
 88340      // VPTERNLOGD imm8, m512/m32bcst, zmm, zmm{k}{z}
 88341      if isImm8(v0) && isM512M32bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 88342          self.require(ISA_AVX512F)
 88343          p.domain = DomainAVX
 88344          p.add(0, func(m *_Encoding, v []interface{}) {
 88345              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 88346              m.emit(0x25)
 88347              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 88348              m.imm1(toImmAny(v[0]))
 88349          })
 88350      }
 88351      // VPTERNLOGD imm8, zmm, zmm, zmm{k}{z}
 88352      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 88353          self.require(ISA_AVX512F)
 88354          p.domain = DomainAVX
 88355          p.add(0, func(m *_Encoding, v []interface{}) {
 88356              m.emit(0x62)
 88357              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 88358              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 88359              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 88360              m.emit(0x25)
 88361              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 88362              m.imm1(toImmAny(v[0]))
 88363          })
 88364      }
 88365      // VPTERNLOGD imm8, m128/m32bcst, xmm, xmm{k}{z}
 88366      if isImm8(v0) && isM128M32bcst(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 88367          self.require(ISA_AVX512VL | ISA_AVX512F)
 88368          p.domain = DomainAVX
 88369          p.add(0, func(m *_Encoding, v []interface{}) {
 88370              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 88371              m.emit(0x25)
 88372              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 88373              m.imm1(toImmAny(v[0]))
 88374          })
 88375      }
 88376      // VPTERNLOGD imm8, xmm, xmm, xmm{k}{z}
 88377      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 88378          self.require(ISA_AVX512VL | ISA_AVX512F)
 88379          p.domain = DomainAVX
 88380          p.add(0, func(m *_Encoding, v []interface{}) {
 88381              m.emit(0x62)
 88382              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 88383              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 88384              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 88385              m.emit(0x25)
 88386              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 88387              m.imm1(toImmAny(v[0]))
 88388          })
 88389      }
 88390      // VPTERNLOGD imm8, m256/m32bcst, ymm, ymm{k}{z}
 88391      if isImm8(v0) && isM256M32bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 88392          self.require(ISA_AVX512VL | ISA_AVX512F)
 88393          p.domain = DomainAVX
 88394          p.add(0, func(m *_Encoding, v []interface{}) {
 88395              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 88396              m.emit(0x25)
 88397              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 88398              m.imm1(toImmAny(v[0]))
 88399          })
 88400      }
 88401      // VPTERNLOGD imm8, ymm, ymm, ymm{k}{z}
 88402      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 88403          self.require(ISA_AVX512VL | ISA_AVX512F)
 88404          p.domain = DomainAVX
 88405          p.add(0, func(m *_Encoding, v []interface{}) {
 88406              m.emit(0x62)
 88407              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 88408              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 88409              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 88410              m.emit(0x25)
 88411              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 88412              m.imm1(toImmAny(v[0]))
 88413          })
 88414      }
 88415      if p.len == 0 {
 88416          panic("invalid operands for VPTERNLOGD")
 88417      }
 88418      return p
 88419  }
 88420  
 88421  // VPTERNLOGQ performs "Bitwise Ternary Logical Operation on Quadword Values".
 88422  //
 88423  // Mnemonic        : VPTERNLOGQ
 88424  // Supported forms : (6 forms)
 88425  //
 88426  //    * VPTERNLOGQ imm8, m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 88427  //    * VPTERNLOGQ imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 88428  //    * VPTERNLOGQ imm8, m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 88429  //    * VPTERNLOGQ imm8, xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 88430  //    * VPTERNLOGQ imm8, m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 88431  //    * VPTERNLOGQ imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 88432  //
 88433  func (self *Program) VPTERNLOGQ(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 88434      p := self.alloc("VPTERNLOGQ", 4, Operands { v0, v1, v2, v3 })
 88435      // VPTERNLOGQ imm8, m512/m64bcst, zmm, zmm{k}{z}
 88436      if isImm8(v0) && isM512M64bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 88437          self.require(ISA_AVX512F)
 88438          p.domain = DomainAVX
 88439          p.add(0, func(m *_Encoding, v []interface{}) {
 88440              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 88441              m.emit(0x25)
 88442              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 88443              m.imm1(toImmAny(v[0]))
 88444          })
 88445      }
 88446      // VPTERNLOGQ imm8, zmm, zmm, zmm{k}{z}
 88447      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 88448          self.require(ISA_AVX512F)
 88449          p.domain = DomainAVX
 88450          p.add(0, func(m *_Encoding, v []interface{}) {
 88451              m.emit(0x62)
 88452              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 88453              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 88454              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 88455              m.emit(0x25)
 88456              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 88457              m.imm1(toImmAny(v[0]))
 88458          })
 88459      }
 88460      // VPTERNLOGQ imm8, m128/m64bcst, xmm, xmm{k}{z}
 88461      if isImm8(v0) && isM128M64bcst(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 88462          self.require(ISA_AVX512VL | ISA_AVX512F)
 88463          p.domain = DomainAVX
 88464          p.add(0, func(m *_Encoding, v []interface{}) {
 88465              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 88466              m.emit(0x25)
 88467              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 88468              m.imm1(toImmAny(v[0]))
 88469          })
 88470      }
 88471      // VPTERNLOGQ imm8, xmm, xmm, xmm{k}{z}
 88472      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 88473          self.require(ISA_AVX512VL | ISA_AVX512F)
 88474          p.domain = DomainAVX
 88475          p.add(0, func(m *_Encoding, v []interface{}) {
 88476              m.emit(0x62)
 88477              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 88478              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 88479              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 88480              m.emit(0x25)
 88481              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 88482              m.imm1(toImmAny(v[0]))
 88483          })
 88484      }
 88485      // VPTERNLOGQ imm8, m256/m64bcst, ymm, ymm{k}{z}
 88486      if isImm8(v0) && isM256M64bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 88487          self.require(ISA_AVX512VL | ISA_AVX512F)
 88488          p.domain = DomainAVX
 88489          p.add(0, func(m *_Encoding, v []interface{}) {
 88490              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 88491              m.emit(0x25)
 88492              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 88493              m.imm1(toImmAny(v[0]))
 88494          })
 88495      }
 88496      // VPTERNLOGQ imm8, ymm, ymm, ymm{k}{z}
 88497      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 88498          self.require(ISA_AVX512VL | ISA_AVX512F)
 88499          p.domain = DomainAVX
 88500          p.add(0, func(m *_Encoding, v []interface{}) {
 88501              m.emit(0x62)
 88502              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 88503              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 88504              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 88505              m.emit(0x25)
 88506              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 88507              m.imm1(toImmAny(v[0]))
 88508          })
 88509      }
 88510      if p.len == 0 {
 88511          panic("invalid operands for VPTERNLOGQ")
 88512      }
 88513      return p
 88514  }
 88515  
 88516  // VPTEST performs "Packed Logical Compare".
 88517  //
 88518  // Mnemonic        : VPTEST
 88519  // Supported forms : (4 forms)
 88520  //
 88521  //    * VPTEST xmm, xmm     [AVX]
 88522  //    * VPTEST m128, xmm    [AVX]
 88523  //    * VPTEST ymm, ymm     [AVX]
 88524  //    * VPTEST m256, ymm    [AVX]
 88525  //
 88526  func (self *Program) VPTEST(v0 interface{}, v1 interface{}) *Instruction {
 88527      p := self.alloc("VPTEST", 2, Operands { v0, v1 })
 88528      // VPTEST xmm, xmm
 88529      if isXMM(v0) && isXMM(v1) {
 88530          self.require(ISA_AVX)
 88531          p.domain = DomainAVX
 88532          p.add(0, func(m *_Encoding, v []interface{}) {
 88533              m.emit(0xc4)
 88534              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 88535              m.emit(0x79)
 88536              m.emit(0x17)
 88537              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 88538          })
 88539      }
 88540      // VPTEST m128, xmm
 88541      if isM128(v0) && isXMM(v1) {
 88542          self.require(ISA_AVX)
 88543          p.domain = DomainAVX
 88544          p.add(0, func(m *_Encoding, v []interface{}) {
 88545              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 88546              m.emit(0x17)
 88547              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 88548          })
 88549      }
 88550      // VPTEST ymm, ymm
 88551      if isYMM(v0) && isYMM(v1) {
 88552          self.require(ISA_AVX)
 88553          p.domain = DomainAVX
 88554          p.add(0, func(m *_Encoding, v []interface{}) {
 88555              m.emit(0xc4)
 88556              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 88557              m.emit(0x7d)
 88558              m.emit(0x17)
 88559              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 88560          })
 88561      }
 88562      // VPTEST m256, ymm
 88563      if isM256(v0) && isYMM(v1) {
 88564          self.require(ISA_AVX)
 88565          p.domain = DomainAVX
 88566          p.add(0, func(m *_Encoding, v []interface{}) {
 88567              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 88568              m.emit(0x17)
 88569              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 88570          })
 88571      }
 88572      if p.len == 0 {
 88573          panic("invalid operands for VPTEST")
 88574      }
 88575      return p
 88576  }
 88577  
 88578  // VPTESTMB performs "Logical AND of Packed Byte Integer Values and Set Mask".
 88579  //
 88580  // Mnemonic        : VPTESTMB
 88581  // Supported forms : (6 forms)
 88582  //
 88583  //    * VPTESTMB zmm, zmm, k{k}     [AVX512BW]
 88584  //    * VPTESTMB m512, zmm, k{k}    [AVX512BW]
 88585  //    * VPTESTMB xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 88586  //    * VPTESTMB m128, xmm, k{k}    [AVX512BW,AVX512VL]
 88587  //    * VPTESTMB ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 88588  //    * VPTESTMB m256, ymm, k{k}    [AVX512BW,AVX512VL]
 88589  //
 88590  func (self *Program) VPTESTMB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 88591      p := self.alloc("VPTESTMB", 3, Operands { v0, v1, v2 })
 88592      // VPTESTMB zmm, zmm, k{k}
 88593      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 88594          self.require(ISA_AVX512BW)
 88595          p.domain = DomainAVX
 88596          p.add(0, func(m *_Encoding, v []interface{}) {
 88597              m.emit(0x62)
 88598              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88599              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88600              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 88601              m.emit(0x26)
 88602              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88603          })
 88604      }
 88605      // VPTESTMB m512, zmm, k{k}
 88606      if isM512(v0) && isZMM(v1) && isKk(v2) {
 88607          self.require(ISA_AVX512BW)
 88608          p.domain = DomainAVX
 88609          p.add(0, func(m *_Encoding, v []interface{}) {
 88610              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 88611              m.emit(0x26)
 88612              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 88613          })
 88614      }
 88615      // VPTESTMB xmm, xmm, k{k}
 88616      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 88617          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88618          p.domain = DomainAVX
 88619          p.add(0, func(m *_Encoding, v []interface{}) {
 88620              m.emit(0x62)
 88621              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88622              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88623              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 88624              m.emit(0x26)
 88625              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88626          })
 88627      }
 88628      // VPTESTMB m128, xmm, k{k}
 88629      if isM128(v0) && isEVEXXMM(v1) && isKk(v2) {
 88630          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88631          p.domain = DomainAVX
 88632          p.add(0, func(m *_Encoding, v []interface{}) {
 88633              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 88634              m.emit(0x26)
 88635              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 88636          })
 88637      }
 88638      // VPTESTMB ymm, ymm, k{k}
 88639      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 88640          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88641          p.domain = DomainAVX
 88642          p.add(0, func(m *_Encoding, v []interface{}) {
 88643              m.emit(0x62)
 88644              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88645              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88646              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 88647              m.emit(0x26)
 88648              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88649          })
 88650      }
 88651      // VPTESTMB m256, ymm, k{k}
 88652      if isM256(v0) && isEVEXYMM(v1) && isKk(v2) {
 88653          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88654          p.domain = DomainAVX
 88655          p.add(0, func(m *_Encoding, v []interface{}) {
 88656              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 88657              m.emit(0x26)
 88658              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 88659          })
 88660      }
 88661      if p.len == 0 {
 88662          panic("invalid operands for VPTESTMB")
 88663      }
 88664      return p
 88665  }
 88666  
 88667  // VPTESTMD performs "Logical AND of Packed Doubleword Integer Values and Set Mask".
 88668  //
 88669  // Mnemonic        : VPTESTMD
 88670  // Supported forms : (6 forms)
 88671  //
 88672  //    * VPTESTMD m512/m32bcst, zmm, k{k}    [AVX512F]
 88673  //    * VPTESTMD zmm, zmm, k{k}             [AVX512F]
 88674  //    * VPTESTMD m128/m32bcst, xmm, k{k}    [AVX512F,AVX512VL]
 88675  //    * VPTESTMD xmm, xmm, k{k}             [AVX512F,AVX512VL]
 88676  //    * VPTESTMD m256/m32bcst, ymm, k{k}    [AVX512F,AVX512VL]
 88677  //    * VPTESTMD ymm, ymm, k{k}             [AVX512F,AVX512VL]
 88678  //
 88679  func (self *Program) VPTESTMD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 88680      p := self.alloc("VPTESTMD", 3, Operands { v0, v1, v2 })
 88681      // VPTESTMD m512/m32bcst, zmm, k{k}
 88682      if isM512M32bcst(v0) && isZMM(v1) && isKk(v2) {
 88683          self.require(ISA_AVX512F)
 88684          p.domain = DomainAVX
 88685          p.add(0, func(m *_Encoding, v []interface{}) {
 88686              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 88687              m.emit(0x27)
 88688              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 88689          })
 88690      }
 88691      // VPTESTMD zmm, zmm, k{k}
 88692      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 88693          self.require(ISA_AVX512F)
 88694          p.domain = DomainAVX
 88695          p.add(0, func(m *_Encoding, v []interface{}) {
 88696              m.emit(0x62)
 88697              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88698              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88699              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 88700              m.emit(0x27)
 88701              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88702          })
 88703      }
 88704      // VPTESTMD m128/m32bcst, xmm, k{k}
 88705      if isM128M32bcst(v0) && isEVEXXMM(v1) && isKk(v2) {
 88706          self.require(ISA_AVX512VL | ISA_AVX512F)
 88707          p.domain = DomainAVX
 88708          p.add(0, func(m *_Encoding, v []interface{}) {
 88709              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 88710              m.emit(0x27)
 88711              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 88712          })
 88713      }
 88714      // VPTESTMD xmm, xmm, k{k}
 88715      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 88716          self.require(ISA_AVX512VL | ISA_AVX512F)
 88717          p.domain = DomainAVX
 88718          p.add(0, func(m *_Encoding, v []interface{}) {
 88719              m.emit(0x62)
 88720              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88721              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88722              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 88723              m.emit(0x27)
 88724              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88725          })
 88726      }
 88727      // VPTESTMD m256/m32bcst, ymm, k{k}
 88728      if isM256M32bcst(v0) && isEVEXYMM(v1) && isKk(v2) {
 88729          self.require(ISA_AVX512VL | ISA_AVX512F)
 88730          p.domain = DomainAVX
 88731          p.add(0, func(m *_Encoding, v []interface{}) {
 88732              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 88733              m.emit(0x27)
 88734              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 88735          })
 88736      }
 88737      // VPTESTMD ymm, ymm, k{k}
 88738      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 88739          self.require(ISA_AVX512VL | ISA_AVX512F)
 88740          p.domain = DomainAVX
 88741          p.add(0, func(m *_Encoding, v []interface{}) {
 88742              m.emit(0x62)
 88743              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88744              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 88745              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 88746              m.emit(0x27)
 88747              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88748          })
 88749      }
 88750      if p.len == 0 {
 88751          panic("invalid operands for VPTESTMD")
 88752      }
 88753      return p
 88754  }
 88755  
 88756  // VPTESTMQ performs "Logical AND of Packed Quadword Integer Values and Set Mask".
 88757  //
 88758  // Mnemonic        : VPTESTMQ
 88759  // Supported forms : (6 forms)
 88760  //
 88761  //    * VPTESTMQ m512/m64bcst, zmm, k{k}    [AVX512F]
 88762  //    * VPTESTMQ zmm, zmm, k{k}             [AVX512F]
 88763  //    * VPTESTMQ m128/m64bcst, xmm, k{k}    [AVX512F,AVX512VL]
 88764  //    * VPTESTMQ xmm, xmm, k{k}             [AVX512F,AVX512VL]
 88765  //    * VPTESTMQ m256/m64bcst, ymm, k{k}    [AVX512F,AVX512VL]
 88766  //    * VPTESTMQ ymm, ymm, k{k}             [AVX512F,AVX512VL]
 88767  //
 88768  func (self *Program) VPTESTMQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 88769      p := self.alloc("VPTESTMQ", 3, Operands { v0, v1, v2 })
 88770      // VPTESTMQ m512/m64bcst, zmm, k{k}
 88771      if isM512M64bcst(v0) && isZMM(v1) && isKk(v2) {
 88772          self.require(ISA_AVX512F)
 88773          p.domain = DomainAVX
 88774          p.add(0, func(m *_Encoding, v []interface{}) {
 88775              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 88776              m.emit(0x27)
 88777              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 88778          })
 88779      }
 88780      // VPTESTMQ zmm, zmm, k{k}
 88781      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 88782          self.require(ISA_AVX512F)
 88783          p.domain = DomainAVX
 88784          p.add(0, func(m *_Encoding, v []interface{}) {
 88785              m.emit(0x62)
 88786              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88787              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 88788              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 88789              m.emit(0x27)
 88790              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88791          })
 88792      }
 88793      // VPTESTMQ m128/m64bcst, xmm, k{k}
 88794      if isM128M64bcst(v0) && isEVEXXMM(v1) && isKk(v2) {
 88795          self.require(ISA_AVX512VL | ISA_AVX512F)
 88796          p.domain = DomainAVX
 88797          p.add(0, func(m *_Encoding, v []interface{}) {
 88798              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 88799              m.emit(0x27)
 88800              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 88801          })
 88802      }
 88803      // VPTESTMQ xmm, xmm, k{k}
 88804      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 88805          self.require(ISA_AVX512VL | ISA_AVX512F)
 88806          p.domain = DomainAVX
 88807          p.add(0, func(m *_Encoding, v []interface{}) {
 88808              m.emit(0x62)
 88809              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88810              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 88811              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 88812              m.emit(0x27)
 88813              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88814          })
 88815      }
 88816      // VPTESTMQ m256/m64bcst, ymm, k{k}
 88817      if isM256M64bcst(v0) && isEVEXYMM(v1) && isKk(v2) {
 88818          self.require(ISA_AVX512VL | ISA_AVX512F)
 88819          p.domain = DomainAVX
 88820          p.add(0, func(m *_Encoding, v []interface{}) {
 88821              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 88822              m.emit(0x27)
 88823              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 88824          })
 88825      }
 88826      // VPTESTMQ ymm, ymm, k{k}
 88827      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 88828          self.require(ISA_AVX512VL | ISA_AVX512F)
 88829          p.domain = DomainAVX
 88830          p.add(0, func(m *_Encoding, v []interface{}) {
 88831              m.emit(0x62)
 88832              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88833              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 88834              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 88835              m.emit(0x27)
 88836              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88837          })
 88838      }
 88839      if p.len == 0 {
 88840          panic("invalid operands for VPTESTMQ")
 88841      }
 88842      return p
 88843  }
 88844  
 88845  // VPTESTMW performs "Logical AND of Packed Word Integer Values and Set Mask".
 88846  //
 88847  // Mnemonic        : VPTESTMW
 88848  // Supported forms : (6 forms)
 88849  //
 88850  //    * VPTESTMW zmm, zmm, k{k}     [AVX512BW]
 88851  //    * VPTESTMW m512, zmm, k{k}    [AVX512BW]
 88852  //    * VPTESTMW xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 88853  //    * VPTESTMW m128, xmm, k{k}    [AVX512BW,AVX512VL]
 88854  //    * VPTESTMW ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 88855  //    * VPTESTMW m256, ymm, k{k}    [AVX512BW,AVX512VL]
 88856  //
 88857  func (self *Program) VPTESTMW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 88858      p := self.alloc("VPTESTMW", 3, Operands { v0, v1, v2 })
 88859      // VPTESTMW zmm, zmm, k{k}
 88860      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 88861          self.require(ISA_AVX512BW)
 88862          p.domain = DomainAVX
 88863          p.add(0, func(m *_Encoding, v []interface{}) {
 88864              m.emit(0x62)
 88865              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88866              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 88867              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 88868              m.emit(0x26)
 88869              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88870          })
 88871      }
 88872      // VPTESTMW m512, zmm, k{k}
 88873      if isM512(v0) && isZMM(v1) && isKk(v2) {
 88874          self.require(ISA_AVX512BW)
 88875          p.domain = DomainAVX
 88876          p.add(0, func(m *_Encoding, v []interface{}) {
 88877              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 88878              m.emit(0x26)
 88879              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 88880          })
 88881      }
 88882      // VPTESTMW xmm, xmm, k{k}
 88883      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 88884          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88885          p.domain = DomainAVX
 88886          p.add(0, func(m *_Encoding, v []interface{}) {
 88887              m.emit(0x62)
 88888              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88889              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 88890              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 88891              m.emit(0x26)
 88892              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88893          })
 88894      }
 88895      // VPTESTMW m128, xmm, k{k}
 88896      if isM128(v0) && isEVEXXMM(v1) && isKk(v2) {
 88897          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88898          p.domain = DomainAVX
 88899          p.add(0, func(m *_Encoding, v []interface{}) {
 88900              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 88901              m.emit(0x26)
 88902              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 88903          })
 88904      }
 88905      // VPTESTMW ymm, ymm, k{k}
 88906      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 88907          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88908          p.domain = DomainAVX
 88909          p.add(0, func(m *_Encoding, v []interface{}) {
 88910              m.emit(0x62)
 88911              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88912              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 88913              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 88914              m.emit(0x26)
 88915              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88916          })
 88917      }
 88918      // VPTESTMW m256, ymm, k{k}
 88919      if isM256(v0) && isEVEXYMM(v1) && isKk(v2) {
 88920          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88921          p.domain = DomainAVX
 88922          p.add(0, func(m *_Encoding, v []interface{}) {
 88923              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 88924              m.emit(0x26)
 88925              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 88926          })
 88927      }
 88928      if p.len == 0 {
 88929          panic("invalid operands for VPTESTMW")
 88930      }
 88931      return p
 88932  }
 88933  
 88934  // VPTESTNMB performs "Logical NAND of Packed Byte Integer Values and Set Mask".
 88935  //
 88936  // Mnemonic        : VPTESTNMB
 88937  // Supported forms : (6 forms)
 88938  //
 88939  //    * VPTESTNMB zmm, zmm, k{k}     [AVX512BW,AVX512F]
 88940  //    * VPTESTNMB m512, zmm, k{k}    [AVX512BW,AVX512F]
 88941  //    * VPTESTNMB xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 88942  //    * VPTESTNMB m128, xmm, k{k}    [AVX512BW,AVX512VL]
 88943  //    * VPTESTNMB ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 88944  //    * VPTESTNMB m256, ymm, k{k}    [AVX512BW,AVX512VL]
 88945  //
 88946  func (self *Program) VPTESTNMB(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 88947      p := self.alloc("VPTESTNMB", 3, Operands { v0, v1, v2 })
 88948      // VPTESTNMB zmm, zmm, k{k}
 88949      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 88950          self.require(ISA_AVX512F | ISA_AVX512BW)
 88951          p.domain = DomainAVX
 88952          p.add(0, func(m *_Encoding, v []interface{}) {
 88953              m.emit(0x62)
 88954              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88955              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 88956              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 88957              m.emit(0x26)
 88958              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88959          })
 88960      }
 88961      // VPTESTNMB m512, zmm, k{k}
 88962      if isM512(v0) && isZMM(v1) && isKk(v2) {
 88963          self.require(ISA_AVX512F | ISA_AVX512BW)
 88964          p.domain = DomainAVX
 88965          p.add(0, func(m *_Encoding, v []interface{}) {
 88966              m.evex(0b10, 0x06, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 88967              m.emit(0x26)
 88968              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 88969          })
 88970      }
 88971      // VPTESTNMB xmm, xmm, k{k}
 88972      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 88973          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88974          p.domain = DomainAVX
 88975          p.add(0, func(m *_Encoding, v []interface{}) {
 88976              m.emit(0x62)
 88977              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 88978              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 88979              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 88980              m.emit(0x26)
 88981              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 88982          })
 88983      }
 88984      // VPTESTNMB m128, xmm, k{k}
 88985      if isM128(v0) && isEVEXXMM(v1) && isKk(v2) {
 88986          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88987          p.domain = DomainAVX
 88988          p.add(0, func(m *_Encoding, v []interface{}) {
 88989              m.evex(0b10, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 88990              m.emit(0x26)
 88991              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 88992          })
 88993      }
 88994      // VPTESTNMB ymm, ymm, k{k}
 88995      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 88996          self.require(ISA_AVX512VL | ISA_AVX512BW)
 88997          p.domain = DomainAVX
 88998          p.add(0, func(m *_Encoding, v []interface{}) {
 88999              m.emit(0x62)
 89000              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89001              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 89002              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 89003              m.emit(0x26)
 89004              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89005          })
 89006      }
 89007      // VPTESTNMB m256, ymm, k{k}
 89008      if isM256(v0) && isEVEXYMM(v1) && isKk(v2) {
 89009          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89010          p.domain = DomainAVX
 89011          p.add(0, func(m *_Encoding, v []interface{}) {
 89012              m.evex(0b10, 0x06, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 89013              m.emit(0x26)
 89014              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 89015          })
 89016      }
 89017      if p.len == 0 {
 89018          panic("invalid operands for VPTESTNMB")
 89019      }
 89020      return p
 89021  }
 89022  
 89023  // VPTESTNMD performs "Logical NAND of Packed Doubleword Integer Values and Set Mask".
 89024  //
 89025  // Mnemonic        : VPTESTNMD
 89026  // Supported forms : (6 forms)
 89027  //
 89028  //    * VPTESTNMD m512/m32bcst, zmm, k{k}    [AVX512F]
 89029  //    * VPTESTNMD zmm, zmm, k{k}             [AVX512F]
 89030  //    * VPTESTNMD m128/m32bcst, xmm, k{k}    [AVX512F,AVX512VL]
 89031  //    * VPTESTNMD xmm, xmm, k{k}             [AVX512F,AVX512VL]
 89032  //    * VPTESTNMD m256/m32bcst, ymm, k{k}    [AVX512F,AVX512VL]
 89033  //    * VPTESTNMD ymm, ymm, k{k}             [AVX512F,AVX512VL]
 89034  //
 89035  func (self *Program) VPTESTNMD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 89036      p := self.alloc("VPTESTNMD", 3, Operands { v0, v1, v2 })
 89037      // VPTESTNMD m512/m32bcst, zmm, k{k}
 89038      if isM512M32bcst(v0) && isZMM(v1) && isKk(v2) {
 89039          self.require(ISA_AVX512F)
 89040          p.domain = DomainAVX
 89041          p.add(0, func(m *_Encoding, v []interface{}) {
 89042              m.evex(0b10, 0x06, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 89043              m.emit(0x27)
 89044              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 89045          })
 89046      }
 89047      // VPTESTNMD zmm, zmm, k{k}
 89048      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 89049          self.require(ISA_AVX512F)
 89050          p.domain = DomainAVX
 89051          p.add(0, func(m *_Encoding, v []interface{}) {
 89052              m.emit(0x62)
 89053              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89054              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 89055              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 89056              m.emit(0x27)
 89057              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89058          })
 89059      }
 89060      // VPTESTNMD m128/m32bcst, xmm, k{k}
 89061      if isM128M32bcst(v0) && isEVEXXMM(v1) && isKk(v2) {
 89062          self.require(ISA_AVX512VL | ISA_AVX512F)
 89063          p.domain = DomainAVX
 89064          p.add(0, func(m *_Encoding, v []interface{}) {
 89065              m.evex(0b10, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 89066              m.emit(0x27)
 89067              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 89068          })
 89069      }
 89070      // VPTESTNMD xmm, xmm, k{k}
 89071      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 89072          self.require(ISA_AVX512VL | ISA_AVX512F)
 89073          p.domain = DomainAVX
 89074          p.add(0, func(m *_Encoding, v []interface{}) {
 89075              m.emit(0x62)
 89076              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89077              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 89078              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 89079              m.emit(0x27)
 89080              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89081          })
 89082      }
 89083      // VPTESTNMD m256/m32bcst, ymm, k{k}
 89084      if isM256M32bcst(v0) && isEVEXYMM(v1) && isKk(v2) {
 89085          self.require(ISA_AVX512VL | ISA_AVX512F)
 89086          p.domain = DomainAVX
 89087          p.add(0, func(m *_Encoding, v []interface{}) {
 89088              m.evex(0b10, 0x06, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 89089              m.emit(0x27)
 89090              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 89091          })
 89092      }
 89093      // VPTESTNMD ymm, ymm, k{k}
 89094      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 89095          self.require(ISA_AVX512VL | ISA_AVX512F)
 89096          p.domain = DomainAVX
 89097          p.add(0, func(m *_Encoding, v []interface{}) {
 89098              m.emit(0x62)
 89099              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89100              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 89101              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 89102              m.emit(0x27)
 89103              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89104          })
 89105      }
 89106      if p.len == 0 {
 89107          panic("invalid operands for VPTESTNMD")
 89108      }
 89109      return p
 89110  }
 89111  
 89112  // VPTESTNMQ performs "Logical NAND of Packed Quadword Integer Values and Set Mask".
 89113  //
 89114  // Mnemonic        : VPTESTNMQ
 89115  // Supported forms : (6 forms)
 89116  //
 89117  //    * VPTESTNMQ m512/m64bcst, zmm, k{k}    [AVX512F]
 89118  //    * VPTESTNMQ zmm, zmm, k{k}             [AVX512F]
 89119  //    * VPTESTNMQ m128/m64bcst, xmm, k{k}    [AVX512F,AVX512VL]
 89120  //    * VPTESTNMQ xmm, xmm, k{k}             [AVX512F,AVX512VL]
 89121  //    * VPTESTNMQ m256/m64bcst, ymm, k{k}    [AVX512F,AVX512VL]
 89122  //    * VPTESTNMQ ymm, ymm, k{k}             [AVX512F,AVX512VL]
 89123  //
 89124  func (self *Program) VPTESTNMQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 89125      p := self.alloc("VPTESTNMQ", 3, Operands { v0, v1, v2 })
 89126      // VPTESTNMQ m512/m64bcst, zmm, k{k}
 89127      if isM512M64bcst(v0) && isZMM(v1) && isKk(v2) {
 89128          self.require(ISA_AVX512F)
 89129          p.domain = DomainAVX
 89130          p.add(0, func(m *_Encoding, v []interface{}) {
 89131              m.evex(0b10, 0x86, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 89132              m.emit(0x27)
 89133              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 89134          })
 89135      }
 89136      // VPTESTNMQ zmm, zmm, k{k}
 89137      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 89138          self.require(ISA_AVX512F)
 89139          p.domain = DomainAVX
 89140          p.add(0, func(m *_Encoding, v []interface{}) {
 89141              m.emit(0x62)
 89142              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89143              m.emit(0xfe ^ (hlcode(v[1]) << 3))
 89144              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 89145              m.emit(0x27)
 89146              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89147          })
 89148      }
 89149      // VPTESTNMQ m128/m64bcst, xmm, k{k}
 89150      if isM128M64bcst(v0) && isEVEXXMM(v1) && isKk(v2) {
 89151          self.require(ISA_AVX512VL | ISA_AVX512F)
 89152          p.domain = DomainAVX
 89153          p.add(0, func(m *_Encoding, v []interface{}) {
 89154              m.evex(0b10, 0x86, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 89155              m.emit(0x27)
 89156              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 89157          })
 89158      }
 89159      // VPTESTNMQ xmm, xmm, k{k}
 89160      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 89161          self.require(ISA_AVX512VL | ISA_AVX512F)
 89162          p.domain = DomainAVX
 89163          p.add(0, func(m *_Encoding, v []interface{}) {
 89164              m.emit(0x62)
 89165              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89166              m.emit(0xfe ^ (hlcode(v[1]) << 3))
 89167              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 89168              m.emit(0x27)
 89169              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89170          })
 89171      }
 89172      // VPTESTNMQ m256/m64bcst, ymm, k{k}
 89173      if isM256M64bcst(v0) && isEVEXYMM(v1) && isKk(v2) {
 89174          self.require(ISA_AVX512VL | ISA_AVX512F)
 89175          p.domain = DomainAVX
 89176          p.add(0, func(m *_Encoding, v []interface{}) {
 89177              m.evex(0b10, 0x86, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, bcode(v[0]))
 89178              m.emit(0x27)
 89179              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 89180          })
 89181      }
 89182      // VPTESTNMQ ymm, ymm, k{k}
 89183      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 89184          self.require(ISA_AVX512VL | ISA_AVX512F)
 89185          p.domain = DomainAVX
 89186          p.add(0, func(m *_Encoding, v []interface{}) {
 89187              m.emit(0x62)
 89188              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89189              m.emit(0xfe ^ (hlcode(v[1]) << 3))
 89190              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 89191              m.emit(0x27)
 89192              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89193          })
 89194      }
 89195      if p.len == 0 {
 89196          panic("invalid operands for VPTESTNMQ")
 89197      }
 89198      return p
 89199  }
 89200  
 89201  // VPTESTNMW performs "Logical NAND of Packed Word Integer Values and Set Mask".
 89202  //
 89203  // Mnemonic        : VPTESTNMW
 89204  // Supported forms : (6 forms)
 89205  //
 89206  //    * VPTESTNMW zmm, zmm, k{k}     [AVX512BW,AVX512F]
 89207  //    * VPTESTNMW m512, zmm, k{k}    [AVX512BW,AVX512F]
 89208  //    * VPTESTNMW xmm, xmm, k{k}     [AVX512BW,AVX512VL]
 89209  //    * VPTESTNMW m128, xmm, k{k}    [AVX512BW,AVX512VL]
 89210  //    * VPTESTNMW ymm, ymm, k{k}     [AVX512BW,AVX512VL]
 89211  //    * VPTESTNMW m256, ymm, k{k}    [AVX512BW,AVX512VL]
 89212  //
 89213  func (self *Program) VPTESTNMW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 89214      p := self.alloc("VPTESTNMW", 3, Operands { v0, v1, v2 })
 89215      // VPTESTNMW zmm, zmm, k{k}
 89216      if isZMM(v0) && isZMM(v1) && isKk(v2) {
 89217          self.require(ISA_AVX512F | ISA_AVX512BW)
 89218          p.domain = DomainAVX
 89219          p.add(0, func(m *_Encoding, v []interface{}) {
 89220              m.emit(0x62)
 89221              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89222              m.emit(0xfe ^ (hlcode(v[1]) << 3))
 89223              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 89224              m.emit(0x26)
 89225              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89226          })
 89227      }
 89228      // VPTESTNMW m512, zmm, k{k}
 89229      if isM512(v0) && isZMM(v1) && isKk(v2) {
 89230          self.require(ISA_AVX512F | ISA_AVX512BW)
 89231          p.domain = DomainAVX
 89232          p.add(0, func(m *_Encoding, v []interface{}) {
 89233              m.evex(0b10, 0x86, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 89234              m.emit(0x26)
 89235              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 89236          })
 89237      }
 89238      // VPTESTNMW xmm, xmm, k{k}
 89239      if isEVEXXMM(v0) && isEVEXXMM(v1) && isKk(v2) {
 89240          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89241          p.domain = DomainAVX
 89242          p.add(0, func(m *_Encoding, v []interface{}) {
 89243              m.emit(0x62)
 89244              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89245              m.emit(0xfe ^ (hlcode(v[1]) << 3))
 89246              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 89247              m.emit(0x26)
 89248              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89249          })
 89250      }
 89251      // VPTESTNMW m128, xmm, k{k}
 89252      if isM128(v0) && isEVEXXMM(v1) && isKk(v2) {
 89253          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89254          p.domain = DomainAVX
 89255          p.add(0, func(m *_Encoding, v []interface{}) {
 89256              m.evex(0b10, 0x86, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 89257              m.emit(0x26)
 89258              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 89259          })
 89260      }
 89261      // VPTESTNMW ymm, ymm, k{k}
 89262      if isEVEXYMM(v0) && isEVEXYMM(v1) && isKk(v2) {
 89263          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89264          p.domain = DomainAVX
 89265          p.add(0, func(m *_Encoding, v []interface{}) {
 89266              m.emit(0x62)
 89267              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89268              m.emit(0xfe ^ (hlcode(v[1]) << 3))
 89269              m.emit((0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 89270              m.emit(0x26)
 89271              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89272          })
 89273      }
 89274      // VPTESTNMW m256, ymm, k{k}
 89275      if isM256(v0) && isEVEXYMM(v1) && isKk(v2) {
 89276          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89277          p.domain = DomainAVX
 89278          p.add(0, func(m *_Encoding, v []interface{}) {
 89279              m.evex(0b10, 0x86, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), 0, 0)
 89280              m.emit(0x26)
 89281              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 89282          })
 89283      }
 89284      if p.len == 0 {
 89285          panic("invalid operands for VPTESTNMW")
 89286      }
 89287      return p
 89288  }
 89289  
 89290  // VPUNPCKHBW performs "Unpack and Interleave High-Order Bytes into Words".
 89291  //
 89292  // Mnemonic        : VPUNPCKHBW
 89293  // Supported forms : (10 forms)
 89294  //
 89295  //    * VPUNPCKHBW xmm, xmm, xmm           [AVX]
 89296  //    * VPUNPCKHBW m128, xmm, xmm          [AVX]
 89297  //    * VPUNPCKHBW ymm, ymm, ymm           [AVX2]
 89298  //    * VPUNPCKHBW m256, ymm, ymm          [AVX2]
 89299  //    * VPUNPCKHBW zmm, zmm, zmm{k}{z}     [AVX512BW]
 89300  //    * VPUNPCKHBW m512, zmm, zmm{k}{z}    [AVX512BW]
 89301  //    * VPUNPCKHBW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 89302  //    * VPUNPCKHBW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 89303  //    * VPUNPCKHBW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 89304  //    * VPUNPCKHBW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 89305  //
 89306  func (self *Program) VPUNPCKHBW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 89307      p := self.alloc("VPUNPCKHBW", 3, Operands { v0, v1, v2 })
 89308      // VPUNPCKHBW xmm, xmm, xmm
 89309      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 89310          self.require(ISA_AVX)
 89311          p.domain = DomainAVX
 89312          p.add(0, func(m *_Encoding, v []interface{}) {
 89313              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 89314              m.emit(0x68)
 89315              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89316          })
 89317      }
 89318      // VPUNPCKHBW m128, xmm, xmm
 89319      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 89320          self.require(ISA_AVX)
 89321          p.domain = DomainAVX
 89322          p.add(0, func(m *_Encoding, v []interface{}) {
 89323              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89324              m.emit(0x68)
 89325              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89326          })
 89327      }
 89328      // VPUNPCKHBW ymm, ymm, ymm
 89329      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 89330          self.require(ISA_AVX2)
 89331          p.domain = DomainAVX
 89332          p.add(0, func(m *_Encoding, v []interface{}) {
 89333              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 89334              m.emit(0x68)
 89335              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89336          })
 89337      }
 89338      // VPUNPCKHBW m256, ymm, ymm
 89339      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 89340          self.require(ISA_AVX2)
 89341          p.domain = DomainAVX
 89342          p.add(0, func(m *_Encoding, v []interface{}) {
 89343              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89344              m.emit(0x68)
 89345              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89346          })
 89347      }
 89348      // VPUNPCKHBW zmm, zmm, zmm{k}{z}
 89349      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 89350          self.require(ISA_AVX512BW)
 89351          p.domain = DomainAVX
 89352          p.add(0, func(m *_Encoding, v []interface{}) {
 89353              m.emit(0x62)
 89354              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89355              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89356              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 89357              m.emit(0x68)
 89358              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89359          })
 89360      }
 89361      // VPUNPCKHBW m512, zmm, zmm{k}{z}
 89362      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 89363          self.require(ISA_AVX512BW)
 89364          p.domain = DomainAVX
 89365          p.add(0, func(m *_Encoding, v []interface{}) {
 89366              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 89367              m.emit(0x68)
 89368              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 89369          })
 89370      }
 89371      // VPUNPCKHBW xmm, xmm, xmm{k}{z}
 89372      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 89373          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89374          p.domain = DomainAVX
 89375          p.add(0, func(m *_Encoding, v []interface{}) {
 89376              m.emit(0x62)
 89377              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89378              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89379              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 89380              m.emit(0x68)
 89381              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89382          })
 89383      }
 89384      // VPUNPCKHBW m128, xmm, xmm{k}{z}
 89385      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 89386          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89387          p.domain = DomainAVX
 89388          p.add(0, func(m *_Encoding, v []interface{}) {
 89389              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 89390              m.emit(0x68)
 89391              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 89392          })
 89393      }
 89394      // VPUNPCKHBW ymm, ymm, ymm{k}{z}
 89395      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 89396          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89397          p.domain = DomainAVX
 89398          p.add(0, func(m *_Encoding, v []interface{}) {
 89399              m.emit(0x62)
 89400              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89401              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89402              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 89403              m.emit(0x68)
 89404              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89405          })
 89406      }
 89407      // VPUNPCKHBW m256, ymm, ymm{k}{z}
 89408      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 89409          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89410          p.domain = DomainAVX
 89411          p.add(0, func(m *_Encoding, v []interface{}) {
 89412              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 89413              m.emit(0x68)
 89414              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 89415          })
 89416      }
 89417      if p.len == 0 {
 89418          panic("invalid operands for VPUNPCKHBW")
 89419      }
 89420      return p
 89421  }
 89422  
 89423  // VPUNPCKHDQ performs "Unpack and Interleave High-Order Doublewords into Quadwords".
 89424  //
 89425  // Mnemonic        : VPUNPCKHDQ
 89426  // Supported forms : (10 forms)
 89427  //
 89428  //    * VPUNPCKHDQ xmm, xmm, xmm                   [AVX]
 89429  //    * VPUNPCKHDQ m128, xmm, xmm                  [AVX]
 89430  //    * VPUNPCKHDQ ymm, ymm, ymm                   [AVX2]
 89431  //    * VPUNPCKHDQ m256, ymm, ymm                  [AVX2]
 89432  //    * VPUNPCKHDQ m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 89433  //    * VPUNPCKHDQ zmm, zmm, zmm{k}{z}             [AVX512F]
 89434  //    * VPUNPCKHDQ m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 89435  //    * VPUNPCKHDQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 89436  //    * VPUNPCKHDQ m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 89437  //    * VPUNPCKHDQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 89438  //
 89439  func (self *Program) VPUNPCKHDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 89440      p := self.alloc("VPUNPCKHDQ", 3, Operands { v0, v1, v2 })
 89441      // VPUNPCKHDQ xmm, xmm, xmm
 89442      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 89443          self.require(ISA_AVX)
 89444          p.domain = DomainAVX
 89445          p.add(0, func(m *_Encoding, v []interface{}) {
 89446              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 89447              m.emit(0x6a)
 89448              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89449          })
 89450      }
 89451      // VPUNPCKHDQ m128, xmm, xmm
 89452      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 89453          self.require(ISA_AVX)
 89454          p.domain = DomainAVX
 89455          p.add(0, func(m *_Encoding, v []interface{}) {
 89456              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89457              m.emit(0x6a)
 89458              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89459          })
 89460      }
 89461      // VPUNPCKHDQ ymm, ymm, ymm
 89462      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 89463          self.require(ISA_AVX2)
 89464          p.domain = DomainAVX
 89465          p.add(0, func(m *_Encoding, v []interface{}) {
 89466              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 89467              m.emit(0x6a)
 89468              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89469          })
 89470      }
 89471      // VPUNPCKHDQ m256, ymm, ymm
 89472      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 89473          self.require(ISA_AVX2)
 89474          p.domain = DomainAVX
 89475          p.add(0, func(m *_Encoding, v []interface{}) {
 89476              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89477              m.emit(0x6a)
 89478              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89479          })
 89480      }
 89481      // VPUNPCKHDQ m512/m32bcst, zmm, zmm{k}{z}
 89482      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 89483          self.require(ISA_AVX512F)
 89484          p.domain = DomainAVX
 89485          p.add(0, func(m *_Encoding, v []interface{}) {
 89486              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 89487              m.emit(0x6a)
 89488              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 89489          })
 89490      }
 89491      // VPUNPCKHDQ zmm, zmm, zmm{k}{z}
 89492      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 89493          self.require(ISA_AVX512F)
 89494          p.domain = DomainAVX
 89495          p.add(0, func(m *_Encoding, v []interface{}) {
 89496              m.emit(0x62)
 89497              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89498              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89499              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 89500              m.emit(0x6a)
 89501              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89502          })
 89503      }
 89504      // VPUNPCKHDQ m128/m32bcst, xmm, xmm{k}{z}
 89505      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 89506          self.require(ISA_AVX512VL | ISA_AVX512F)
 89507          p.domain = DomainAVX
 89508          p.add(0, func(m *_Encoding, v []interface{}) {
 89509              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 89510              m.emit(0x6a)
 89511              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 89512          })
 89513      }
 89514      // VPUNPCKHDQ xmm, xmm, xmm{k}{z}
 89515      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 89516          self.require(ISA_AVX512VL | ISA_AVX512F)
 89517          p.domain = DomainAVX
 89518          p.add(0, func(m *_Encoding, v []interface{}) {
 89519              m.emit(0x62)
 89520              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89521              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89522              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 89523              m.emit(0x6a)
 89524              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89525          })
 89526      }
 89527      // VPUNPCKHDQ m256/m32bcst, ymm, ymm{k}{z}
 89528      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 89529          self.require(ISA_AVX512VL | ISA_AVX512F)
 89530          p.domain = DomainAVX
 89531          p.add(0, func(m *_Encoding, v []interface{}) {
 89532              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 89533              m.emit(0x6a)
 89534              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 89535          })
 89536      }
 89537      // VPUNPCKHDQ ymm, ymm, ymm{k}{z}
 89538      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 89539          self.require(ISA_AVX512VL | ISA_AVX512F)
 89540          p.domain = DomainAVX
 89541          p.add(0, func(m *_Encoding, v []interface{}) {
 89542              m.emit(0x62)
 89543              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89544              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89545              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 89546              m.emit(0x6a)
 89547              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89548          })
 89549      }
 89550      if p.len == 0 {
 89551          panic("invalid operands for VPUNPCKHDQ")
 89552      }
 89553      return p
 89554  }
 89555  
 89556  // VPUNPCKHQDQ performs "Unpack and Interleave High-Order Quadwords into Double Quadwords".
 89557  //
 89558  // Mnemonic        : VPUNPCKHQDQ
 89559  // Supported forms : (10 forms)
 89560  //
 89561  //    * VPUNPCKHQDQ xmm, xmm, xmm                   [AVX]
 89562  //    * VPUNPCKHQDQ m128, xmm, xmm                  [AVX]
 89563  //    * VPUNPCKHQDQ ymm, ymm, ymm                   [AVX2]
 89564  //    * VPUNPCKHQDQ m256, ymm, ymm                  [AVX2]
 89565  //    * VPUNPCKHQDQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 89566  //    * VPUNPCKHQDQ zmm, zmm, zmm{k}{z}             [AVX512F]
 89567  //    * VPUNPCKHQDQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 89568  //    * VPUNPCKHQDQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 89569  //    * VPUNPCKHQDQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 89570  //    * VPUNPCKHQDQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 89571  //
 89572  func (self *Program) VPUNPCKHQDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 89573      p := self.alloc("VPUNPCKHQDQ", 3, Operands { v0, v1, v2 })
 89574      // VPUNPCKHQDQ xmm, xmm, xmm
 89575      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 89576          self.require(ISA_AVX)
 89577          p.domain = DomainAVX
 89578          p.add(0, func(m *_Encoding, v []interface{}) {
 89579              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 89580              m.emit(0x6d)
 89581              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89582          })
 89583      }
 89584      // VPUNPCKHQDQ m128, xmm, xmm
 89585      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 89586          self.require(ISA_AVX)
 89587          p.domain = DomainAVX
 89588          p.add(0, func(m *_Encoding, v []interface{}) {
 89589              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89590              m.emit(0x6d)
 89591              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89592          })
 89593      }
 89594      // VPUNPCKHQDQ ymm, ymm, ymm
 89595      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 89596          self.require(ISA_AVX2)
 89597          p.domain = DomainAVX
 89598          p.add(0, func(m *_Encoding, v []interface{}) {
 89599              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 89600              m.emit(0x6d)
 89601              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89602          })
 89603      }
 89604      // VPUNPCKHQDQ m256, ymm, ymm
 89605      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 89606          self.require(ISA_AVX2)
 89607          p.domain = DomainAVX
 89608          p.add(0, func(m *_Encoding, v []interface{}) {
 89609              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89610              m.emit(0x6d)
 89611              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89612          })
 89613      }
 89614      // VPUNPCKHQDQ m512/m64bcst, zmm, zmm{k}{z}
 89615      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 89616          self.require(ISA_AVX512F)
 89617          p.domain = DomainAVX
 89618          p.add(0, func(m *_Encoding, v []interface{}) {
 89619              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 89620              m.emit(0x6d)
 89621              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 89622          })
 89623      }
 89624      // VPUNPCKHQDQ zmm, zmm, zmm{k}{z}
 89625      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 89626          self.require(ISA_AVX512F)
 89627          p.domain = DomainAVX
 89628          p.add(0, func(m *_Encoding, v []interface{}) {
 89629              m.emit(0x62)
 89630              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89631              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 89632              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 89633              m.emit(0x6d)
 89634              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89635          })
 89636      }
 89637      // VPUNPCKHQDQ m128/m64bcst, xmm, xmm{k}{z}
 89638      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 89639          self.require(ISA_AVX512VL | ISA_AVX512F)
 89640          p.domain = DomainAVX
 89641          p.add(0, func(m *_Encoding, v []interface{}) {
 89642              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 89643              m.emit(0x6d)
 89644              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 89645          })
 89646      }
 89647      // VPUNPCKHQDQ xmm, xmm, xmm{k}{z}
 89648      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 89649          self.require(ISA_AVX512VL | ISA_AVX512F)
 89650          p.domain = DomainAVX
 89651          p.add(0, func(m *_Encoding, v []interface{}) {
 89652              m.emit(0x62)
 89653              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89654              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 89655              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 89656              m.emit(0x6d)
 89657              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89658          })
 89659      }
 89660      // VPUNPCKHQDQ m256/m64bcst, ymm, ymm{k}{z}
 89661      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 89662          self.require(ISA_AVX512VL | ISA_AVX512F)
 89663          p.domain = DomainAVX
 89664          p.add(0, func(m *_Encoding, v []interface{}) {
 89665              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 89666              m.emit(0x6d)
 89667              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 89668          })
 89669      }
 89670      // VPUNPCKHQDQ ymm, ymm, ymm{k}{z}
 89671      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 89672          self.require(ISA_AVX512VL | ISA_AVX512F)
 89673          p.domain = DomainAVX
 89674          p.add(0, func(m *_Encoding, v []interface{}) {
 89675              m.emit(0x62)
 89676              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89677              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 89678              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 89679              m.emit(0x6d)
 89680              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89681          })
 89682      }
 89683      if p.len == 0 {
 89684          panic("invalid operands for VPUNPCKHQDQ")
 89685      }
 89686      return p
 89687  }
 89688  
 89689  // VPUNPCKHWD performs "Unpack and Interleave High-Order Words into Doublewords".
 89690  //
 89691  // Mnemonic        : VPUNPCKHWD
 89692  // Supported forms : (10 forms)
 89693  //
 89694  //    * VPUNPCKHWD xmm, xmm, xmm           [AVX]
 89695  //    * VPUNPCKHWD m128, xmm, xmm          [AVX]
 89696  //    * VPUNPCKHWD ymm, ymm, ymm           [AVX2]
 89697  //    * VPUNPCKHWD m256, ymm, ymm          [AVX2]
 89698  //    * VPUNPCKHWD zmm, zmm, zmm{k}{z}     [AVX512BW]
 89699  //    * VPUNPCKHWD m512, zmm, zmm{k}{z}    [AVX512BW]
 89700  //    * VPUNPCKHWD xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 89701  //    * VPUNPCKHWD m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 89702  //    * VPUNPCKHWD ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 89703  //    * VPUNPCKHWD m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 89704  //
 89705  func (self *Program) VPUNPCKHWD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 89706      p := self.alloc("VPUNPCKHWD", 3, Operands { v0, v1, v2 })
 89707      // VPUNPCKHWD xmm, xmm, xmm
 89708      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 89709          self.require(ISA_AVX)
 89710          p.domain = DomainAVX
 89711          p.add(0, func(m *_Encoding, v []interface{}) {
 89712              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 89713              m.emit(0x69)
 89714              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89715          })
 89716      }
 89717      // VPUNPCKHWD m128, xmm, xmm
 89718      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 89719          self.require(ISA_AVX)
 89720          p.domain = DomainAVX
 89721          p.add(0, func(m *_Encoding, v []interface{}) {
 89722              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89723              m.emit(0x69)
 89724              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89725          })
 89726      }
 89727      // VPUNPCKHWD ymm, ymm, ymm
 89728      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 89729          self.require(ISA_AVX2)
 89730          p.domain = DomainAVX
 89731          p.add(0, func(m *_Encoding, v []interface{}) {
 89732              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 89733              m.emit(0x69)
 89734              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89735          })
 89736      }
 89737      // VPUNPCKHWD m256, ymm, ymm
 89738      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 89739          self.require(ISA_AVX2)
 89740          p.domain = DomainAVX
 89741          p.add(0, func(m *_Encoding, v []interface{}) {
 89742              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89743              m.emit(0x69)
 89744              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89745          })
 89746      }
 89747      // VPUNPCKHWD zmm, zmm, zmm{k}{z}
 89748      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 89749          self.require(ISA_AVX512BW)
 89750          p.domain = DomainAVX
 89751          p.add(0, func(m *_Encoding, v []interface{}) {
 89752              m.emit(0x62)
 89753              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89754              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89755              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 89756              m.emit(0x69)
 89757              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89758          })
 89759      }
 89760      // VPUNPCKHWD m512, zmm, zmm{k}{z}
 89761      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 89762          self.require(ISA_AVX512BW)
 89763          p.domain = DomainAVX
 89764          p.add(0, func(m *_Encoding, v []interface{}) {
 89765              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 89766              m.emit(0x69)
 89767              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 89768          })
 89769      }
 89770      // VPUNPCKHWD xmm, xmm, xmm{k}{z}
 89771      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 89772          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89773          p.domain = DomainAVX
 89774          p.add(0, func(m *_Encoding, v []interface{}) {
 89775              m.emit(0x62)
 89776              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89777              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89778              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 89779              m.emit(0x69)
 89780              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89781          })
 89782      }
 89783      // VPUNPCKHWD m128, xmm, xmm{k}{z}
 89784      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 89785          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89786          p.domain = DomainAVX
 89787          p.add(0, func(m *_Encoding, v []interface{}) {
 89788              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 89789              m.emit(0x69)
 89790              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 89791          })
 89792      }
 89793      // VPUNPCKHWD ymm, ymm, ymm{k}{z}
 89794      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 89795          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89796          p.domain = DomainAVX
 89797          p.add(0, func(m *_Encoding, v []interface{}) {
 89798              m.emit(0x62)
 89799              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89800              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89801              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 89802              m.emit(0x69)
 89803              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89804          })
 89805      }
 89806      // VPUNPCKHWD m256, ymm, ymm{k}{z}
 89807      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 89808          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89809          p.domain = DomainAVX
 89810          p.add(0, func(m *_Encoding, v []interface{}) {
 89811              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 89812              m.emit(0x69)
 89813              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 89814          })
 89815      }
 89816      if p.len == 0 {
 89817          panic("invalid operands for VPUNPCKHWD")
 89818      }
 89819      return p
 89820  }
 89821  
 89822  // VPUNPCKLBW performs "Unpack and Interleave Low-Order Bytes into Words".
 89823  //
 89824  // Mnemonic        : VPUNPCKLBW
 89825  // Supported forms : (10 forms)
 89826  //
 89827  //    * VPUNPCKLBW xmm, xmm, xmm           [AVX]
 89828  //    * VPUNPCKLBW m128, xmm, xmm          [AVX]
 89829  //    * VPUNPCKLBW ymm, ymm, ymm           [AVX2]
 89830  //    * VPUNPCKLBW m256, ymm, ymm          [AVX2]
 89831  //    * VPUNPCKLBW zmm, zmm, zmm{k}{z}     [AVX512BW]
 89832  //    * VPUNPCKLBW m512, zmm, zmm{k}{z}    [AVX512BW]
 89833  //    * VPUNPCKLBW xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 89834  //    * VPUNPCKLBW m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 89835  //    * VPUNPCKLBW ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 89836  //    * VPUNPCKLBW m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 89837  //
 89838  func (self *Program) VPUNPCKLBW(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 89839      p := self.alloc("VPUNPCKLBW", 3, Operands { v0, v1, v2 })
 89840      // VPUNPCKLBW xmm, xmm, xmm
 89841      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 89842          self.require(ISA_AVX)
 89843          p.domain = DomainAVX
 89844          p.add(0, func(m *_Encoding, v []interface{}) {
 89845              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 89846              m.emit(0x60)
 89847              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89848          })
 89849      }
 89850      // VPUNPCKLBW m128, xmm, xmm
 89851      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 89852          self.require(ISA_AVX)
 89853          p.domain = DomainAVX
 89854          p.add(0, func(m *_Encoding, v []interface{}) {
 89855              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89856              m.emit(0x60)
 89857              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89858          })
 89859      }
 89860      // VPUNPCKLBW ymm, ymm, ymm
 89861      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 89862          self.require(ISA_AVX2)
 89863          p.domain = DomainAVX
 89864          p.add(0, func(m *_Encoding, v []interface{}) {
 89865              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 89866              m.emit(0x60)
 89867              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89868          })
 89869      }
 89870      // VPUNPCKLBW m256, ymm, ymm
 89871      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 89872          self.require(ISA_AVX2)
 89873          p.domain = DomainAVX
 89874          p.add(0, func(m *_Encoding, v []interface{}) {
 89875              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89876              m.emit(0x60)
 89877              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89878          })
 89879      }
 89880      // VPUNPCKLBW zmm, zmm, zmm{k}{z}
 89881      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 89882          self.require(ISA_AVX512BW)
 89883          p.domain = DomainAVX
 89884          p.add(0, func(m *_Encoding, v []interface{}) {
 89885              m.emit(0x62)
 89886              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89887              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89888              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 89889              m.emit(0x60)
 89890              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89891          })
 89892      }
 89893      // VPUNPCKLBW m512, zmm, zmm{k}{z}
 89894      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 89895          self.require(ISA_AVX512BW)
 89896          p.domain = DomainAVX
 89897          p.add(0, func(m *_Encoding, v []interface{}) {
 89898              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 89899              m.emit(0x60)
 89900              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 89901          })
 89902      }
 89903      // VPUNPCKLBW xmm, xmm, xmm{k}{z}
 89904      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 89905          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89906          p.domain = DomainAVX
 89907          p.add(0, func(m *_Encoding, v []interface{}) {
 89908              m.emit(0x62)
 89909              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89910              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89911              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 89912              m.emit(0x60)
 89913              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89914          })
 89915      }
 89916      // VPUNPCKLBW m128, xmm, xmm{k}{z}
 89917      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 89918          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89919          p.domain = DomainAVX
 89920          p.add(0, func(m *_Encoding, v []interface{}) {
 89921              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 89922              m.emit(0x60)
 89923              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 89924          })
 89925      }
 89926      // VPUNPCKLBW ymm, ymm, ymm{k}{z}
 89927      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 89928          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89929          p.domain = DomainAVX
 89930          p.add(0, func(m *_Encoding, v []interface{}) {
 89931              m.emit(0x62)
 89932              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 89933              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 89934              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 89935              m.emit(0x60)
 89936              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89937          })
 89938      }
 89939      // VPUNPCKLBW m256, ymm, ymm{k}{z}
 89940      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 89941          self.require(ISA_AVX512VL | ISA_AVX512BW)
 89942          p.domain = DomainAVX
 89943          p.add(0, func(m *_Encoding, v []interface{}) {
 89944              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 89945              m.emit(0x60)
 89946              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 89947          })
 89948      }
 89949      if p.len == 0 {
 89950          panic("invalid operands for VPUNPCKLBW")
 89951      }
 89952      return p
 89953  }
 89954  
 89955  // VPUNPCKLDQ performs "Unpack and Interleave Low-Order Doublewords into Quadwords".
 89956  //
 89957  // Mnemonic        : VPUNPCKLDQ
 89958  // Supported forms : (10 forms)
 89959  //
 89960  //    * VPUNPCKLDQ xmm, xmm, xmm                   [AVX]
 89961  //    * VPUNPCKLDQ m128, xmm, xmm                  [AVX]
 89962  //    * VPUNPCKLDQ ymm, ymm, ymm                   [AVX2]
 89963  //    * VPUNPCKLDQ m256, ymm, ymm                  [AVX2]
 89964  //    * VPUNPCKLDQ m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 89965  //    * VPUNPCKLDQ zmm, zmm, zmm{k}{z}             [AVX512F]
 89966  //    * VPUNPCKLDQ m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 89967  //    * VPUNPCKLDQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 89968  //    * VPUNPCKLDQ m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 89969  //    * VPUNPCKLDQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 89970  //
 89971  func (self *Program) VPUNPCKLDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 89972      p := self.alloc("VPUNPCKLDQ", 3, Operands { v0, v1, v2 })
 89973      // VPUNPCKLDQ xmm, xmm, xmm
 89974      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 89975          self.require(ISA_AVX)
 89976          p.domain = DomainAVX
 89977          p.add(0, func(m *_Encoding, v []interface{}) {
 89978              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 89979              m.emit(0x62)
 89980              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 89981          })
 89982      }
 89983      // VPUNPCKLDQ m128, xmm, xmm
 89984      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 89985          self.require(ISA_AVX)
 89986          p.domain = DomainAVX
 89987          p.add(0, func(m *_Encoding, v []interface{}) {
 89988              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 89989              m.emit(0x62)
 89990              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 89991          })
 89992      }
 89993      // VPUNPCKLDQ ymm, ymm, ymm
 89994      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 89995          self.require(ISA_AVX2)
 89996          p.domain = DomainAVX
 89997          p.add(0, func(m *_Encoding, v []interface{}) {
 89998              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 89999              m.emit(0x62)
 90000              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90001          })
 90002      }
 90003      // VPUNPCKLDQ m256, ymm, ymm
 90004      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 90005          self.require(ISA_AVX2)
 90006          p.domain = DomainAVX
 90007          p.add(0, func(m *_Encoding, v []interface{}) {
 90008              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 90009              m.emit(0x62)
 90010              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 90011          })
 90012      }
 90013      // VPUNPCKLDQ m512/m32bcst, zmm, zmm{k}{z}
 90014      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 90015          self.require(ISA_AVX512F)
 90016          p.domain = DomainAVX
 90017          p.add(0, func(m *_Encoding, v []interface{}) {
 90018              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90019              m.emit(0x62)
 90020              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 90021          })
 90022      }
 90023      // VPUNPCKLDQ zmm, zmm, zmm{k}{z}
 90024      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 90025          self.require(ISA_AVX512F)
 90026          p.domain = DomainAVX
 90027          p.add(0, func(m *_Encoding, v []interface{}) {
 90028              m.emit(0x62)
 90029              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90030              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 90031              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 90032              m.emit(0x62)
 90033              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90034          })
 90035      }
 90036      // VPUNPCKLDQ m128/m32bcst, xmm, xmm{k}{z}
 90037      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 90038          self.require(ISA_AVX512VL | ISA_AVX512F)
 90039          p.domain = DomainAVX
 90040          p.add(0, func(m *_Encoding, v []interface{}) {
 90041              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90042              m.emit(0x62)
 90043              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 90044          })
 90045      }
 90046      // VPUNPCKLDQ xmm, xmm, xmm{k}{z}
 90047      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 90048          self.require(ISA_AVX512VL | ISA_AVX512F)
 90049          p.domain = DomainAVX
 90050          p.add(0, func(m *_Encoding, v []interface{}) {
 90051              m.emit(0x62)
 90052              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90053              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 90054              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 90055              m.emit(0x62)
 90056              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90057          })
 90058      }
 90059      // VPUNPCKLDQ m256/m32bcst, ymm, ymm{k}{z}
 90060      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 90061          self.require(ISA_AVX512VL | ISA_AVX512F)
 90062          p.domain = DomainAVX
 90063          p.add(0, func(m *_Encoding, v []interface{}) {
 90064              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90065              m.emit(0x62)
 90066              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 90067          })
 90068      }
 90069      // VPUNPCKLDQ ymm, ymm, ymm{k}{z}
 90070      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 90071          self.require(ISA_AVX512VL | ISA_AVX512F)
 90072          p.domain = DomainAVX
 90073          p.add(0, func(m *_Encoding, v []interface{}) {
 90074              m.emit(0x62)
 90075              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90076              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 90077              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 90078              m.emit(0x62)
 90079              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90080          })
 90081      }
 90082      if p.len == 0 {
 90083          panic("invalid operands for VPUNPCKLDQ")
 90084      }
 90085      return p
 90086  }
 90087  
 90088  // VPUNPCKLQDQ performs "Unpack and Interleave Low-Order Quadwords into Double Quadwords".
 90089  //
 90090  // Mnemonic        : VPUNPCKLQDQ
 90091  // Supported forms : (10 forms)
 90092  //
 90093  //    * VPUNPCKLQDQ xmm, xmm, xmm                   [AVX]
 90094  //    * VPUNPCKLQDQ m128, xmm, xmm                  [AVX]
 90095  //    * VPUNPCKLQDQ ymm, ymm, ymm                   [AVX2]
 90096  //    * VPUNPCKLQDQ m256, ymm, ymm                  [AVX2]
 90097  //    * VPUNPCKLQDQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 90098  //    * VPUNPCKLQDQ zmm, zmm, zmm{k}{z}             [AVX512F]
 90099  //    * VPUNPCKLQDQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 90100  //    * VPUNPCKLQDQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 90101  //    * VPUNPCKLQDQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 90102  //    * VPUNPCKLQDQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 90103  //
 90104  func (self *Program) VPUNPCKLQDQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 90105      p := self.alloc("VPUNPCKLQDQ", 3, Operands { v0, v1, v2 })
 90106      // VPUNPCKLQDQ xmm, xmm, xmm
 90107      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 90108          self.require(ISA_AVX)
 90109          p.domain = DomainAVX
 90110          p.add(0, func(m *_Encoding, v []interface{}) {
 90111              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 90112              m.emit(0x6c)
 90113              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90114          })
 90115      }
 90116      // VPUNPCKLQDQ m128, xmm, xmm
 90117      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 90118          self.require(ISA_AVX)
 90119          p.domain = DomainAVX
 90120          p.add(0, func(m *_Encoding, v []interface{}) {
 90121              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 90122              m.emit(0x6c)
 90123              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 90124          })
 90125      }
 90126      // VPUNPCKLQDQ ymm, ymm, ymm
 90127      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 90128          self.require(ISA_AVX2)
 90129          p.domain = DomainAVX
 90130          p.add(0, func(m *_Encoding, v []interface{}) {
 90131              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 90132              m.emit(0x6c)
 90133              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90134          })
 90135      }
 90136      // VPUNPCKLQDQ m256, ymm, ymm
 90137      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 90138          self.require(ISA_AVX2)
 90139          p.domain = DomainAVX
 90140          p.add(0, func(m *_Encoding, v []interface{}) {
 90141              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 90142              m.emit(0x6c)
 90143              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 90144          })
 90145      }
 90146      // VPUNPCKLQDQ m512/m64bcst, zmm, zmm{k}{z}
 90147      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 90148          self.require(ISA_AVX512F)
 90149          p.domain = DomainAVX
 90150          p.add(0, func(m *_Encoding, v []interface{}) {
 90151              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90152              m.emit(0x6c)
 90153              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 90154          })
 90155      }
 90156      // VPUNPCKLQDQ zmm, zmm, zmm{k}{z}
 90157      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 90158          self.require(ISA_AVX512F)
 90159          p.domain = DomainAVX
 90160          p.add(0, func(m *_Encoding, v []interface{}) {
 90161              m.emit(0x62)
 90162              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90163              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 90164              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 90165              m.emit(0x6c)
 90166              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90167          })
 90168      }
 90169      // VPUNPCKLQDQ m128/m64bcst, xmm, xmm{k}{z}
 90170      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 90171          self.require(ISA_AVX512VL | ISA_AVX512F)
 90172          p.domain = DomainAVX
 90173          p.add(0, func(m *_Encoding, v []interface{}) {
 90174              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90175              m.emit(0x6c)
 90176              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 90177          })
 90178      }
 90179      // VPUNPCKLQDQ xmm, xmm, xmm{k}{z}
 90180      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 90181          self.require(ISA_AVX512VL | ISA_AVX512F)
 90182          p.domain = DomainAVX
 90183          p.add(0, func(m *_Encoding, v []interface{}) {
 90184              m.emit(0x62)
 90185              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90186              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 90187              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 90188              m.emit(0x6c)
 90189              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90190          })
 90191      }
 90192      // VPUNPCKLQDQ m256/m64bcst, ymm, ymm{k}{z}
 90193      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 90194          self.require(ISA_AVX512VL | ISA_AVX512F)
 90195          p.domain = DomainAVX
 90196          p.add(0, func(m *_Encoding, v []interface{}) {
 90197              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90198              m.emit(0x6c)
 90199              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 90200          })
 90201      }
 90202      // VPUNPCKLQDQ ymm, ymm, ymm{k}{z}
 90203      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 90204          self.require(ISA_AVX512VL | ISA_AVX512F)
 90205          p.domain = DomainAVX
 90206          p.add(0, func(m *_Encoding, v []interface{}) {
 90207              m.emit(0x62)
 90208              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90209              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 90210              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 90211              m.emit(0x6c)
 90212              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90213          })
 90214      }
 90215      if p.len == 0 {
 90216          panic("invalid operands for VPUNPCKLQDQ")
 90217      }
 90218      return p
 90219  }
 90220  
 90221  // VPUNPCKLWD performs "Unpack and Interleave Low-Order Words into Doublewords".
 90222  //
 90223  // Mnemonic        : VPUNPCKLWD
 90224  // Supported forms : (10 forms)
 90225  //
 90226  //    * VPUNPCKLWD xmm, xmm, xmm           [AVX]
 90227  //    * VPUNPCKLWD m128, xmm, xmm          [AVX]
 90228  //    * VPUNPCKLWD ymm, ymm, ymm           [AVX2]
 90229  //    * VPUNPCKLWD m256, ymm, ymm          [AVX2]
 90230  //    * VPUNPCKLWD zmm, zmm, zmm{k}{z}     [AVX512BW]
 90231  //    * VPUNPCKLWD m512, zmm, zmm{k}{z}    [AVX512BW]
 90232  //    * VPUNPCKLWD xmm, xmm, xmm{k}{z}     [AVX512BW,AVX512VL]
 90233  //    * VPUNPCKLWD m128, xmm, xmm{k}{z}    [AVX512BW,AVX512VL]
 90234  //    * VPUNPCKLWD ymm, ymm, ymm{k}{z}     [AVX512BW,AVX512VL]
 90235  //    * VPUNPCKLWD m256, ymm, ymm{k}{z}    [AVX512BW,AVX512VL]
 90236  //
 90237  func (self *Program) VPUNPCKLWD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 90238      p := self.alloc("VPUNPCKLWD", 3, Operands { v0, v1, v2 })
 90239      // VPUNPCKLWD xmm, xmm, xmm
 90240      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 90241          self.require(ISA_AVX)
 90242          p.domain = DomainAVX
 90243          p.add(0, func(m *_Encoding, v []interface{}) {
 90244              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 90245              m.emit(0x61)
 90246              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90247          })
 90248      }
 90249      // VPUNPCKLWD m128, xmm, xmm
 90250      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 90251          self.require(ISA_AVX)
 90252          p.domain = DomainAVX
 90253          p.add(0, func(m *_Encoding, v []interface{}) {
 90254              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 90255              m.emit(0x61)
 90256              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 90257          })
 90258      }
 90259      // VPUNPCKLWD ymm, ymm, ymm
 90260      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 90261          self.require(ISA_AVX2)
 90262          p.domain = DomainAVX
 90263          p.add(0, func(m *_Encoding, v []interface{}) {
 90264              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 90265              m.emit(0x61)
 90266              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90267          })
 90268      }
 90269      // VPUNPCKLWD m256, ymm, ymm
 90270      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 90271          self.require(ISA_AVX2)
 90272          p.domain = DomainAVX
 90273          p.add(0, func(m *_Encoding, v []interface{}) {
 90274              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 90275              m.emit(0x61)
 90276              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 90277          })
 90278      }
 90279      // VPUNPCKLWD zmm, zmm, zmm{k}{z}
 90280      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 90281          self.require(ISA_AVX512BW)
 90282          p.domain = DomainAVX
 90283          p.add(0, func(m *_Encoding, v []interface{}) {
 90284              m.emit(0x62)
 90285              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90286              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 90287              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 90288              m.emit(0x61)
 90289              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90290          })
 90291      }
 90292      // VPUNPCKLWD m512, zmm, zmm{k}{z}
 90293      if isM512(v0) && isZMM(v1) && isZMMkz(v2) {
 90294          self.require(ISA_AVX512BW)
 90295          p.domain = DomainAVX
 90296          p.add(0, func(m *_Encoding, v []interface{}) {
 90297              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 90298              m.emit(0x61)
 90299              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 90300          })
 90301      }
 90302      // VPUNPCKLWD xmm, xmm, xmm{k}{z}
 90303      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 90304          self.require(ISA_AVX512VL | ISA_AVX512BW)
 90305          p.domain = DomainAVX
 90306          p.add(0, func(m *_Encoding, v []interface{}) {
 90307              m.emit(0x62)
 90308              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90309              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 90310              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 90311              m.emit(0x61)
 90312              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90313          })
 90314      }
 90315      // VPUNPCKLWD m128, xmm, xmm{k}{z}
 90316      if isM128(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 90317          self.require(ISA_AVX512VL | ISA_AVX512BW)
 90318          p.domain = DomainAVX
 90319          p.add(0, func(m *_Encoding, v []interface{}) {
 90320              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 90321              m.emit(0x61)
 90322              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 90323          })
 90324      }
 90325      // VPUNPCKLWD ymm, ymm, ymm{k}{z}
 90326      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 90327          self.require(ISA_AVX512VL | ISA_AVX512BW)
 90328          p.domain = DomainAVX
 90329          p.add(0, func(m *_Encoding, v []interface{}) {
 90330              m.emit(0x62)
 90331              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90332              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 90333              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 90334              m.emit(0x61)
 90335              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90336          })
 90337      }
 90338      // VPUNPCKLWD m256, ymm, ymm{k}{z}
 90339      if isM256(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 90340          self.require(ISA_AVX512VL | ISA_AVX512BW)
 90341          p.domain = DomainAVX
 90342          p.add(0, func(m *_Encoding, v []interface{}) {
 90343              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 90344              m.emit(0x61)
 90345              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 90346          })
 90347      }
 90348      if p.len == 0 {
 90349          panic("invalid operands for VPUNPCKLWD")
 90350      }
 90351      return p
 90352  }
 90353  
 90354  // VPXOR performs "Packed Bitwise Logical Exclusive OR".
 90355  //
 90356  // Mnemonic        : VPXOR
 90357  // Supported forms : (4 forms)
 90358  //
 90359  //    * VPXOR xmm, xmm, xmm     [AVX]
 90360  //    * VPXOR m128, xmm, xmm    [AVX]
 90361  //    * VPXOR ymm, ymm, ymm     [AVX2]
 90362  //    * VPXOR m256, ymm, ymm    [AVX2]
 90363  //
 90364  func (self *Program) VPXOR(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 90365      p := self.alloc("VPXOR", 3, Operands { v0, v1, v2 })
 90366      // VPXOR xmm, xmm, xmm
 90367      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 90368          self.require(ISA_AVX)
 90369          p.domain = DomainAVX
 90370          p.add(0, func(m *_Encoding, v []interface{}) {
 90371              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 90372              m.emit(0xef)
 90373              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90374          })
 90375      }
 90376      // VPXOR m128, xmm, xmm
 90377      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 90378          self.require(ISA_AVX)
 90379          p.domain = DomainAVX
 90380          p.add(0, func(m *_Encoding, v []interface{}) {
 90381              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 90382              m.emit(0xef)
 90383              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 90384          })
 90385      }
 90386      // VPXOR ymm, ymm, ymm
 90387      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 90388          self.require(ISA_AVX2)
 90389          p.domain = DomainAVX
 90390          p.add(0, func(m *_Encoding, v []interface{}) {
 90391              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 90392              m.emit(0xef)
 90393              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90394          })
 90395      }
 90396      // VPXOR m256, ymm, ymm
 90397      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 90398          self.require(ISA_AVX2)
 90399          p.domain = DomainAVX
 90400          p.add(0, func(m *_Encoding, v []interface{}) {
 90401              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 90402              m.emit(0xef)
 90403              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 90404          })
 90405      }
 90406      if p.len == 0 {
 90407          panic("invalid operands for VPXOR")
 90408      }
 90409      return p
 90410  }
 90411  
 90412  // VPXORD performs "Bitwise Logical Exclusive OR of Packed Doubleword Integers".
 90413  //
 90414  // Mnemonic        : VPXORD
 90415  // Supported forms : (6 forms)
 90416  //
 90417  //    * VPXORD m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 90418  //    * VPXORD zmm, zmm, zmm{k}{z}             [AVX512F]
 90419  //    * VPXORD m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 90420  //    * VPXORD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 90421  //    * VPXORD m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 90422  //    * VPXORD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 90423  //
 90424  func (self *Program) VPXORD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 90425      p := self.alloc("VPXORD", 3, Operands { v0, v1, v2 })
 90426      // VPXORD m512/m32bcst, zmm, zmm{k}{z}
 90427      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 90428          self.require(ISA_AVX512F)
 90429          p.domain = DomainAVX
 90430          p.add(0, func(m *_Encoding, v []interface{}) {
 90431              m.evex(0b01, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90432              m.emit(0xef)
 90433              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 90434          })
 90435      }
 90436      // VPXORD zmm, zmm, zmm{k}{z}
 90437      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 90438          self.require(ISA_AVX512F)
 90439          p.domain = DomainAVX
 90440          p.add(0, func(m *_Encoding, v []interface{}) {
 90441              m.emit(0x62)
 90442              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90443              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 90444              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 90445              m.emit(0xef)
 90446              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90447          })
 90448      }
 90449      // VPXORD m128/m32bcst, xmm, xmm{k}{z}
 90450      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 90451          self.require(ISA_AVX512VL | ISA_AVX512F)
 90452          p.domain = DomainAVX
 90453          p.add(0, func(m *_Encoding, v []interface{}) {
 90454              m.evex(0b01, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90455              m.emit(0xef)
 90456              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 90457          })
 90458      }
 90459      // VPXORD xmm, xmm, xmm{k}{z}
 90460      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 90461          self.require(ISA_AVX512VL | ISA_AVX512F)
 90462          p.domain = DomainAVX
 90463          p.add(0, func(m *_Encoding, v []interface{}) {
 90464              m.emit(0x62)
 90465              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90466              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 90467              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 90468              m.emit(0xef)
 90469              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90470          })
 90471      }
 90472      // VPXORD m256/m32bcst, ymm, ymm{k}{z}
 90473      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 90474          self.require(ISA_AVX512VL | ISA_AVX512F)
 90475          p.domain = DomainAVX
 90476          p.add(0, func(m *_Encoding, v []interface{}) {
 90477              m.evex(0b01, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90478              m.emit(0xef)
 90479              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 90480          })
 90481      }
 90482      // VPXORD ymm, ymm, ymm{k}{z}
 90483      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 90484          self.require(ISA_AVX512VL | ISA_AVX512F)
 90485          p.domain = DomainAVX
 90486          p.add(0, func(m *_Encoding, v []interface{}) {
 90487              m.emit(0x62)
 90488              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90489              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 90490              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 90491              m.emit(0xef)
 90492              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90493          })
 90494      }
 90495      if p.len == 0 {
 90496          panic("invalid operands for VPXORD")
 90497      }
 90498      return p
 90499  }
 90500  
 90501  // VPXORQ performs "Bitwise Logical Exclusive OR of Packed Quadword Integers".
 90502  //
 90503  // Mnemonic        : VPXORQ
 90504  // Supported forms : (6 forms)
 90505  //
 90506  //    * VPXORQ m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 90507  //    * VPXORQ zmm, zmm, zmm{k}{z}             [AVX512F]
 90508  //    * VPXORQ m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 90509  //    * VPXORQ xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 90510  //    * VPXORQ m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 90511  //    * VPXORQ ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 90512  //
 90513  func (self *Program) VPXORQ(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 90514      p := self.alloc("VPXORQ", 3, Operands { v0, v1, v2 })
 90515      // VPXORQ m512/m64bcst, zmm, zmm{k}{z}
 90516      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 90517          self.require(ISA_AVX512F)
 90518          p.domain = DomainAVX
 90519          p.add(0, func(m *_Encoding, v []interface{}) {
 90520              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90521              m.emit(0xef)
 90522              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 90523          })
 90524      }
 90525      // VPXORQ zmm, zmm, zmm{k}{z}
 90526      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 90527          self.require(ISA_AVX512F)
 90528          p.domain = DomainAVX
 90529          p.add(0, func(m *_Encoding, v []interface{}) {
 90530              m.emit(0x62)
 90531              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90532              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 90533              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 90534              m.emit(0xef)
 90535              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90536          })
 90537      }
 90538      // VPXORQ m128/m64bcst, xmm, xmm{k}{z}
 90539      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 90540          self.require(ISA_AVX512VL | ISA_AVX512F)
 90541          p.domain = DomainAVX
 90542          p.add(0, func(m *_Encoding, v []interface{}) {
 90543              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90544              m.emit(0xef)
 90545              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 90546          })
 90547      }
 90548      // VPXORQ xmm, xmm, xmm{k}{z}
 90549      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 90550          self.require(ISA_AVX512VL | ISA_AVX512F)
 90551          p.domain = DomainAVX
 90552          p.add(0, func(m *_Encoding, v []interface{}) {
 90553              m.emit(0x62)
 90554              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90555              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 90556              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 90557              m.emit(0xef)
 90558              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90559          })
 90560      }
 90561      // VPXORQ m256/m64bcst, ymm, ymm{k}{z}
 90562      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 90563          self.require(ISA_AVX512VL | ISA_AVX512F)
 90564          p.domain = DomainAVX
 90565          p.add(0, func(m *_Encoding, v []interface{}) {
 90566              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 90567              m.emit(0xef)
 90568              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 90569          })
 90570      }
 90571      // VPXORQ ymm, ymm, ymm{k}{z}
 90572      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 90573          self.require(ISA_AVX512VL | ISA_AVX512F)
 90574          p.domain = DomainAVX
 90575          p.add(0, func(m *_Encoding, v []interface{}) {
 90576              m.emit(0x62)
 90577              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 90578              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 90579              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 90580              m.emit(0xef)
 90581              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 90582          })
 90583      }
 90584      if p.len == 0 {
 90585          panic("invalid operands for VPXORQ")
 90586      }
 90587      return p
 90588  }
 90589  
 90590  // VRANGEPD performs "Range Restriction Calculation For Packed Pairs of Double-Precision Floating-Point Values".
 90591  //
 90592  // Mnemonic        : VRANGEPD
 90593  // Supported forms : (7 forms)
 90594  //
 90595  //    * VRANGEPD imm8, m512/m64bcst, zmm, zmm{k}{z}    [AVX512DQ]
 90596  //    * VRANGEPD imm8, {sae}, zmm, zmm, zmm{k}{z}      [AVX512DQ]
 90597  //    * VRANGEPD imm8, zmm, zmm, zmm{k}{z}             [AVX512DQ]
 90598  //    * VRANGEPD imm8, m128/m64bcst, xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 90599  //    * VRANGEPD imm8, xmm, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 90600  //    * VRANGEPD imm8, m256/m64bcst, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 90601  //    * VRANGEPD imm8, ymm, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 90602  //
 90603  func (self *Program) VRANGEPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 90604      var p *Instruction
 90605      switch len(vv) {
 90606          case 0  : p = self.alloc("VRANGEPD", 4, Operands { v0, v1, v2, v3 })
 90607          case 1  : p = self.alloc("VRANGEPD", 5, Operands { v0, v1, v2, v3, vv[0] })
 90608          default : panic("instruction VRANGEPD takes 4 or 5 operands")
 90609      }
 90610      // VRANGEPD imm8, m512/m64bcst, zmm, zmm{k}{z}
 90611      if len(vv) == 0 && isImm8(v0) && isM512M64bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 90612          self.require(ISA_AVX512DQ)
 90613          p.domain = DomainAVX
 90614          p.add(0, func(m *_Encoding, v []interface{}) {
 90615              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 90616              m.emit(0x50)
 90617              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 90618              m.imm1(toImmAny(v[0]))
 90619          })
 90620      }
 90621      // VRANGEPD imm8, {sae}, zmm, zmm, zmm{k}{z}
 90622      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isZMM(v2) && isZMM(v3) && isZMMkz(vv[0]) {
 90623          self.require(ISA_AVX512DQ)
 90624          p.domain = DomainAVX
 90625          p.add(0, func(m *_Encoding, v []interface{}) {
 90626              m.emit(0x62)
 90627              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 90628              m.emit(0xfd ^ (hlcode(v[3]) << 3))
 90629              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 90630              m.emit(0x50)
 90631              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 90632              m.imm1(toImmAny(v[0]))
 90633          })
 90634      }
 90635      // VRANGEPD imm8, zmm, zmm, zmm{k}{z}
 90636      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 90637          self.require(ISA_AVX512DQ)
 90638          p.domain = DomainAVX
 90639          p.add(0, func(m *_Encoding, v []interface{}) {
 90640              m.emit(0x62)
 90641              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 90642              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 90643              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 90644              m.emit(0x50)
 90645              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 90646              m.imm1(toImmAny(v[0]))
 90647          })
 90648      }
 90649      // VRANGEPD imm8, m128/m64bcst, xmm, xmm{k}{z}
 90650      if len(vv) == 0 && isImm8(v0) && isM128M64bcst(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 90651          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 90652          p.domain = DomainAVX
 90653          p.add(0, func(m *_Encoding, v []interface{}) {
 90654              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 90655              m.emit(0x50)
 90656              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 90657              m.imm1(toImmAny(v[0]))
 90658          })
 90659      }
 90660      // VRANGEPD imm8, xmm, xmm, xmm{k}{z}
 90661      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 90662          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 90663          p.domain = DomainAVX
 90664          p.add(0, func(m *_Encoding, v []interface{}) {
 90665              m.emit(0x62)
 90666              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 90667              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 90668              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 90669              m.emit(0x50)
 90670              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 90671              m.imm1(toImmAny(v[0]))
 90672          })
 90673      }
 90674      // VRANGEPD imm8, m256/m64bcst, ymm, ymm{k}{z}
 90675      if len(vv) == 0 && isImm8(v0) && isM256M64bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 90676          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 90677          p.domain = DomainAVX
 90678          p.add(0, func(m *_Encoding, v []interface{}) {
 90679              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 90680              m.emit(0x50)
 90681              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 90682              m.imm1(toImmAny(v[0]))
 90683          })
 90684      }
 90685      // VRANGEPD imm8, ymm, ymm, ymm{k}{z}
 90686      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 90687          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 90688          p.domain = DomainAVX
 90689          p.add(0, func(m *_Encoding, v []interface{}) {
 90690              m.emit(0x62)
 90691              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 90692              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 90693              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 90694              m.emit(0x50)
 90695              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 90696              m.imm1(toImmAny(v[0]))
 90697          })
 90698      }
 90699      if p.len == 0 {
 90700          panic("invalid operands for VRANGEPD")
 90701      }
 90702      return p
 90703  }
 90704  
 90705  // VRANGEPS performs "Range Restriction Calculation For Packed Pairs of Single-Precision Floating-Point Values".
 90706  //
 90707  // Mnemonic        : VRANGEPS
 90708  // Supported forms : (7 forms)
 90709  //
 90710  //    * VRANGEPS imm8, m512/m32bcst, zmm, zmm{k}{z}    [AVX512DQ]
 90711  //    * VRANGEPS imm8, {sae}, zmm, zmm, zmm{k}{z}      [AVX512DQ]
 90712  //    * VRANGEPS imm8, zmm, zmm, zmm{k}{z}             [AVX512DQ]
 90713  //    * VRANGEPS imm8, m128/m32bcst, xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 90714  //    * VRANGEPS imm8, xmm, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 90715  //    * VRANGEPS imm8, m256/m32bcst, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 90716  //    * VRANGEPS imm8, ymm, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 90717  //
 90718  func (self *Program) VRANGEPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 90719      var p *Instruction
 90720      switch len(vv) {
 90721          case 0  : p = self.alloc("VRANGEPS", 4, Operands { v0, v1, v2, v3 })
 90722          case 1  : p = self.alloc("VRANGEPS", 5, Operands { v0, v1, v2, v3, vv[0] })
 90723          default : panic("instruction VRANGEPS takes 4 or 5 operands")
 90724      }
 90725      // VRANGEPS imm8, m512/m32bcst, zmm, zmm{k}{z}
 90726      if len(vv) == 0 && isImm8(v0) && isM512M32bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 90727          self.require(ISA_AVX512DQ)
 90728          p.domain = DomainAVX
 90729          p.add(0, func(m *_Encoding, v []interface{}) {
 90730              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 90731              m.emit(0x50)
 90732              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 90733              m.imm1(toImmAny(v[0]))
 90734          })
 90735      }
 90736      // VRANGEPS imm8, {sae}, zmm, zmm, zmm{k}{z}
 90737      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isZMM(v2) && isZMM(v3) && isZMMkz(vv[0]) {
 90738          self.require(ISA_AVX512DQ)
 90739          p.domain = DomainAVX
 90740          p.add(0, func(m *_Encoding, v []interface{}) {
 90741              m.emit(0x62)
 90742              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 90743              m.emit(0x7d ^ (hlcode(v[3]) << 3))
 90744              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 90745              m.emit(0x50)
 90746              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 90747              m.imm1(toImmAny(v[0]))
 90748          })
 90749      }
 90750      // VRANGEPS imm8, zmm, zmm, zmm{k}{z}
 90751      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 90752          self.require(ISA_AVX512DQ)
 90753          p.domain = DomainAVX
 90754          p.add(0, func(m *_Encoding, v []interface{}) {
 90755              m.emit(0x62)
 90756              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 90757              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 90758              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 90759              m.emit(0x50)
 90760              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 90761              m.imm1(toImmAny(v[0]))
 90762          })
 90763      }
 90764      // VRANGEPS imm8, m128/m32bcst, xmm, xmm{k}{z}
 90765      if len(vv) == 0 && isImm8(v0) && isM128M32bcst(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 90766          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 90767          p.domain = DomainAVX
 90768          p.add(0, func(m *_Encoding, v []interface{}) {
 90769              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 90770              m.emit(0x50)
 90771              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 90772              m.imm1(toImmAny(v[0]))
 90773          })
 90774      }
 90775      // VRANGEPS imm8, xmm, xmm, xmm{k}{z}
 90776      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 90777          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 90778          p.domain = DomainAVX
 90779          p.add(0, func(m *_Encoding, v []interface{}) {
 90780              m.emit(0x62)
 90781              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 90782              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 90783              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 90784              m.emit(0x50)
 90785              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 90786              m.imm1(toImmAny(v[0]))
 90787          })
 90788      }
 90789      // VRANGEPS imm8, m256/m32bcst, ymm, ymm{k}{z}
 90790      if len(vv) == 0 && isImm8(v0) && isM256M32bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 90791          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 90792          p.domain = DomainAVX
 90793          p.add(0, func(m *_Encoding, v []interface{}) {
 90794              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 90795              m.emit(0x50)
 90796              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 90797              m.imm1(toImmAny(v[0]))
 90798          })
 90799      }
 90800      // VRANGEPS imm8, ymm, ymm, ymm{k}{z}
 90801      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 90802          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 90803          p.domain = DomainAVX
 90804          p.add(0, func(m *_Encoding, v []interface{}) {
 90805              m.emit(0x62)
 90806              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 90807              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 90808              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 90809              m.emit(0x50)
 90810              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 90811              m.imm1(toImmAny(v[0]))
 90812          })
 90813      }
 90814      if p.len == 0 {
 90815          panic("invalid operands for VRANGEPS")
 90816      }
 90817      return p
 90818  }
 90819  
 90820  // VRANGESD performs "Range Restriction Calculation For a pair of Scalar Double-Precision Floating-Point Values".
 90821  //
 90822  // Mnemonic        : VRANGESD
 90823  // Supported forms : (3 forms)
 90824  //
 90825  //    * VRANGESD imm8, m64, xmm, xmm{k}{z}           [AVX512DQ]
 90826  //    * VRANGESD imm8, {sae}, xmm, xmm, xmm{k}{z}    [AVX512DQ]
 90827  //    * VRANGESD imm8, xmm, xmm, xmm{k}{z}           [AVX512DQ]
 90828  //
 90829  func (self *Program) VRANGESD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 90830      var p *Instruction
 90831      switch len(vv) {
 90832          case 0  : p = self.alloc("VRANGESD", 4, Operands { v0, v1, v2, v3 })
 90833          case 1  : p = self.alloc("VRANGESD", 5, Operands { v0, v1, v2, v3, vv[0] })
 90834          default : panic("instruction VRANGESD takes 4 or 5 operands")
 90835      }
 90836      // VRANGESD imm8, m64, xmm, xmm{k}{z}
 90837      if len(vv) == 0 && isImm8(v0) && isM64(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 90838          self.require(ISA_AVX512DQ)
 90839          p.domain = DomainAVX
 90840          p.add(0, func(m *_Encoding, v []interface{}) {
 90841              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 90842              m.emit(0x51)
 90843              m.mrsd(lcode(v[3]), addr(v[1]), 8)
 90844              m.imm1(toImmAny(v[0]))
 90845          })
 90846      }
 90847      // VRANGESD imm8, {sae}, xmm, xmm, xmm{k}{z}
 90848      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) && isXMMkz(vv[0]) {
 90849          self.require(ISA_AVX512DQ)
 90850          p.domain = DomainAVX
 90851          p.add(0, func(m *_Encoding, v []interface{}) {
 90852              m.emit(0x62)
 90853              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 90854              m.emit(0xfd ^ (hlcode(v[3]) << 3))
 90855              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 90856              m.emit(0x51)
 90857              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 90858              m.imm1(toImmAny(v[0]))
 90859          })
 90860      }
 90861      // VRANGESD imm8, xmm, xmm, xmm{k}{z}
 90862      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 90863          self.require(ISA_AVX512DQ)
 90864          p.domain = DomainAVX
 90865          p.add(0, func(m *_Encoding, v []interface{}) {
 90866              m.emit(0x62)
 90867              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 90868              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 90869              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 90870              m.emit(0x51)
 90871              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 90872              m.imm1(toImmAny(v[0]))
 90873          })
 90874      }
 90875      if p.len == 0 {
 90876          panic("invalid operands for VRANGESD")
 90877      }
 90878      return p
 90879  }
 90880  
 90881  // VRANGESS performs "Range Restriction Calculation For a pair of Scalar Single-Precision Floating-Point Values".
 90882  //
 90883  // Mnemonic        : VRANGESS
 90884  // Supported forms : (3 forms)
 90885  //
 90886  //    * VRANGESS imm8, m32, xmm, xmm{k}{z}           [AVX512DQ]
 90887  //    * VRANGESS imm8, {sae}, xmm, xmm, xmm{k}{z}    [AVX512DQ]
 90888  //    * VRANGESS imm8, xmm, xmm, xmm{k}{z}           [AVX512DQ]
 90889  //
 90890  func (self *Program) VRANGESS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 90891      var p *Instruction
 90892      switch len(vv) {
 90893          case 0  : p = self.alloc("VRANGESS", 4, Operands { v0, v1, v2, v3 })
 90894          case 1  : p = self.alloc("VRANGESS", 5, Operands { v0, v1, v2, v3, vv[0] })
 90895          default : panic("instruction VRANGESS takes 4 or 5 operands")
 90896      }
 90897      // VRANGESS imm8, m32, xmm, xmm{k}{z}
 90898      if len(vv) == 0 && isImm8(v0) && isM32(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 90899          self.require(ISA_AVX512DQ)
 90900          p.domain = DomainAVX
 90901          p.add(0, func(m *_Encoding, v []interface{}) {
 90902              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 90903              m.emit(0x51)
 90904              m.mrsd(lcode(v[3]), addr(v[1]), 4)
 90905              m.imm1(toImmAny(v[0]))
 90906          })
 90907      }
 90908      // VRANGESS imm8, {sae}, xmm, xmm, xmm{k}{z}
 90909      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) && isXMMkz(vv[0]) {
 90910          self.require(ISA_AVX512DQ)
 90911          p.domain = DomainAVX
 90912          p.add(0, func(m *_Encoding, v []interface{}) {
 90913              m.emit(0x62)
 90914              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 90915              m.emit(0x7d ^ (hlcode(v[3]) << 3))
 90916              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 90917              m.emit(0x51)
 90918              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 90919              m.imm1(toImmAny(v[0]))
 90920          })
 90921      }
 90922      // VRANGESS imm8, xmm, xmm, xmm{k}{z}
 90923      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 90924          self.require(ISA_AVX512DQ)
 90925          p.domain = DomainAVX
 90926          p.add(0, func(m *_Encoding, v []interface{}) {
 90927              m.emit(0x62)
 90928              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 90929              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 90930              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 90931              m.emit(0x51)
 90932              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 90933              m.imm1(toImmAny(v[0]))
 90934          })
 90935      }
 90936      if p.len == 0 {
 90937          panic("invalid operands for VRANGESS")
 90938      }
 90939      return p
 90940  }
 90941  
 90942  // VRCP14PD performs "Compute Approximate Reciprocals of Packed Double-Precision Floating-Point Values".
 90943  //
 90944  // Mnemonic        : VRCP14PD
 90945  // Supported forms : (6 forms)
 90946  //
 90947  //    * VRCP14PD m512/m64bcst, zmm{k}{z}    [AVX512F]
 90948  //    * VRCP14PD zmm, zmm{k}{z}             [AVX512F]
 90949  //    * VRCP14PD m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 90950  //    * VRCP14PD m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 90951  //    * VRCP14PD xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 90952  //    * VRCP14PD ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 90953  //
 90954  func (self *Program) VRCP14PD(v0 interface{}, v1 interface{}) *Instruction {
 90955      p := self.alloc("VRCP14PD", 2, Operands { v0, v1 })
 90956      // VRCP14PD m512/m64bcst, zmm{k}{z}
 90957      if isM512M64bcst(v0) && isZMMkz(v1) {
 90958          self.require(ISA_AVX512F)
 90959          p.domain = DomainAVX
 90960          p.add(0, func(m *_Encoding, v []interface{}) {
 90961              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 90962              m.emit(0x4c)
 90963              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 90964          })
 90965      }
 90966      // VRCP14PD zmm, zmm{k}{z}
 90967      if isZMM(v0) && isZMMkz(v1) {
 90968          self.require(ISA_AVX512F)
 90969          p.domain = DomainAVX
 90970          p.add(0, func(m *_Encoding, v []interface{}) {
 90971              m.emit(0x62)
 90972              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 90973              m.emit(0xfd)
 90974              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 90975              m.emit(0x4c)
 90976              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 90977          })
 90978      }
 90979      // VRCP14PD m128/m64bcst, xmm{k}{z}
 90980      if isM128M64bcst(v0) && isXMMkz(v1) {
 90981          self.require(ISA_AVX512VL | ISA_AVX512F)
 90982          p.domain = DomainAVX
 90983          p.add(0, func(m *_Encoding, v []interface{}) {
 90984              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 90985              m.emit(0x4c)
 90986              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 90987          })
 90988      }
 90989      // VRCP14PD m256/m64bcst, ymm{k}{z}
 90990      if isM256M64bcst(v0) && isYMMkz(v1) {
 90991          self.require(ISA_AVX512VL | ISA_AVX512F)
 90992          p.domain = DomainAVX
 90993          p.add(0, func(m *_Encoding, v []interface{}) {
 90994              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 90995              m.emit(0x4c)
 90996              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 90997          })
 90998      }
 90999      // VRCP14PD xmm, xmm{k}{z}
 91000      if isEVEXXMM(v0) && isXMMkz(v1) {
 91001          self.require(ISA_AVX512VL | ISA_AVX512F)
 91002          p.domain = DomainAVX
 91003          p.add(0, func(m *_Encoding, v []interface{}) {
 91004              m.emit(0x62)
 91005              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 91006              m.emit(0xfd)
 91007              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 91008              m.emit(0x4c)
 91009              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 91010          })
 91011      }
 91012      // VRCP14PD ymm, ymm{k}{z}
 91013      if isEVEXYMM(v0) && isYMMkz(v1) {
 91014          self.require(ISA_AVX512VL | ISA_AVX512F)
 91015          p.domain = DomainAVX
 91016          p.add(0, func(m *_Encoding, v []interface{}) {
 91017              m.emit(0x62)
 91018              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 91019              m.emit(0xfd)
 91020              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 91021              m.emit(0x4c)
 91022              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 91023          })
 91024      }
 91025      if p.len == 0 {
 91026          panic("invalid operands for VRCP14PD")
 91027      }
 91028      return p
 91029  }
 91030  
 91031  // VRCP14PS performs "Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values".
 91032  //
 91033  // Mnemonic        : VRCP14PS
 91034  // Supported forms : (6 forms)
 91035  //
 91036  //    * VRCP14PS m512/m32bcst, zmm{k}{z}    [AVX512F]
 91037  //    * VRCP14PS zmm, zmm{k}{z}             [AVX512F]
 91038  //    * VRCP14PS m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 91039  //    * VRCP14PS m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 91040  //    * VRCP14PS xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 91041  //    * VRCP14PS ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 91042  //
 91043  func (self *Program) VRCP14PS(v0 interface{}, v1 interface{}) *Instruction {
 91044      p := self.alloc("VRCP14PS", 2, Operands { v0, v1 })
 91045      // VRCP14PS m512/m32bcst, zmm{k}{z}
 91046      if isM512M32bcst(v0) && isZMMkz(v1) {
 91047          self.require(ISA_AVX512F)
 91048          p.domain = DomainAVX
 91049          p.add(0, func(m *_Encoding, v []interface{}) {
 91050              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 91051              m.emit(0x4c)
 91052              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 91053          })
 91054      }
 91055      // VRCP14PS zmm, zmm{k}{z}
 91056      if isZMM(v0) && isZMMkz(v1) {
 91057          self.require(ISA_AVX512F)
 91058          p.domain = DomainAVX
 91059          p.add(0, func(m *_Encoding, v []interface{}) {
 91060              m.emit(0x62)
 91061              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 91062              m.emit(0x7d)
 91063              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 91064              m.emit(0x4c)
 91065              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 91066          })
 91067      }
 91068      // VRCP14PS m128/m32bcst, xmm{k}{z}
 91069      if isM128M32bcst(v0) && isXMMkz(v1) {
 91070          self.require(ISA_AVX512VL | ISA_AVX512F)
 91071          p.domain = DomainAVX
 91072          p.add(0, func(m *_Encoding, v []interface{}) {
 91073              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 91074              m.emit(0x4c)
 91075              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 91076          })
 91077      }
 91078      // VRCP14PS m256/m32bcst, ymm{k}{z}
 91079      if isM256M32bcst(v0) && isYMMkz(v1) {
 91080          self.require(ISA_AVX512VL | ISA_AVX512F)
 91081          p.domain = DomainAVX
 91082          p.add(0, func(m *_Encoding, v []interface{}) {
 91083              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 91084              m.emit(0x4c)
 91085              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 91086          })
 91087      }
 91088      // VRCP14PS xmm, xmm{k}{z}
 91089      if isEVEXXMM(v0) && isXMMkz(v1) {
 91090          self.require(ISA_AVX512VL | ISA_AVX512F)
 91091          p.domain = DomainAVX
 91092          p.add(0, func(m *_Encoding, v []interface{}) {
 91093              m.emit(0x62)
 91094              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 91095              m.emit(0x7d)
 91096              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 91097              m.emit(0x4c)
 91098              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 91099          })
 91100      }
 91101      // VRCP14PS ymm, ymm{k}{z}
 91102      if isEVEXYMM(v0) && isYMMkz(v1) {
 91103          self.require(ISA_AVX512VL | ISA_AVX512F)
 91104          p.domain = DomainAVX
 91105          p.add(0, func(m *_Encoding, v []interface{}) {
 91106              m.emit(0x62)
 91107              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 91108              m.emit(0x7d)
 91109              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 91110              m.emit(0x4c)
 91111              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 91112          })
 91113      }
 91114      if p.len == 0 {
 91115          panic("invalid operands for VRCP14PS")
 91116      }
 91117      return p
 91118  }
 91119  
 91120  // VRCP14SD performs "Compute Approximate Reciprocal of a Scalar Double-Precision Floating-Point Value".
 91121  //
 91122  // Mnemonic        : VRCP14SD
 91123  // Supported forms : (2 forms)
 91124  //
 91125  //    * VRCP14SD xmm, xmm, xmm{k}{z}    [AVX512F]
 91126  //    * VRCP14SD m64, xmm, xmm{k}{z}    [AVX512F]
 91127  //
 91128  func (self *Program) VRCP14SD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 91129      p := self.alloc("VRCP14SD", 3, Operands { v0, v1, v2 })
 91130      // VRCP14SD xmm, xmm, xmm{k}{z}
 91131      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91132          self.require(ISA_AVX512F)
 91133          p.domain = DomainAVX
 91134          p.add(0, func(m *_Encoding, v []interface{}) {
 91135              m.emit(0x62)
 91136              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 91137              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 91138              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 91139              m.emit(0x4d)
 91140              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 91141          })
 91142      }
 91143      // VRCP14SD m64, xmm, xmm{k}{z}
 91144      if isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91145          self.require(ISA_AVX512F)
 91146          p.domain = DomainAVX
 91147          p.add(0, func(m *_Encoding, v []interface{}) {
 91148              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 91149              m.emit(0x4d)
 91150              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 91151          })
 91152      }
 91153      if p.len == 0 {
 91154          panic("invalid operands for VRCP14SD")
 91155      }
 91156      return p
 91157  }
 91158  
 91159  // VRCP14SS performs "Compute Approximate Reciprocal of a Scalar Single-Precision Floating-Point Value".
 91160  //
 91161  // Mnemonic        : VRCP14SS
 91162  // Supported forms : (2 forms)
 91163  //
 91164  //    * VRCP14SS xmm, xmm, xmm{k}{z}    [AVX512F]
 91165  //    * VRCP14SS m32, xmm, xmm{k}{z}    [AVX512F]
 91166  //
 91167  func (self *Program) VRCP14SS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 91168      p := self.alloc("VRCP14SS", 3, Operands { v0, v1, v2 })
 91169      // VRCP14SS xmm, xmm, xmm{k}{z}
 91170      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91171          self.require(ISA_AVX512F)
 91172          p.domain = DomainAVX
 91173          p.add(0, func(m *_Encoding, v []interface{}) {
 91174              m.emit(0x62)
 91175              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 91176              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 91177              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 91178              m.emit(0x4d)
 91179              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 91180          })
 91181      }
 91182      // VRCP14SS m32, xmm, xmm{k}{z}
 91183      if isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91184          self.require(ISA_AVX512F)
 91185          p.domain = DomainAVX
 91186          p.add(0, func(m *_Encoding, v []interface{}) {
 91187              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 91188              m.emit(0x4d)
 91189              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 91190          })
 91191      }
 91192      if p.len == 0 {
 91193          panic("invalid operands for VRCP14SS")
 91194      }
 91195      return p
 91196  }
 91197  
 91198  // VRCP28PD performs "Approximation to the Reciprocal of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error".
 91199  //
 91200  // Mnemonic        : VRCP28PD
 91201  // Supported forms : (3 forms)
 91202  //
 91203  //    * VRCP28PD m512/m64bcst, zmm{k}{z}    [AVX512ER]
 91204  //    * VRCP28PD {sae}, zmm, zmm{k}{z}      [AVX512ER]
 91205  //    * VRCP28PD zmm, zmm{k}{z}             [AVX512ER]
 91206  //
 91207  func (self *Program) VRCP28PD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 91208      var p *Instruction
 91209      switch len(vv) {
 91210          case 0  : p = self.alloc("VRCP28PD", 2, Operands { v0, v1 })
 91211          case 1  : p = self.alloc("VRCP28PD", 3, Operands { v0, v1, vv[0] })
 91212          default : panic("instruction VRCP28PD takes 2 or 3 operands")
 91213      }
 91214      // VRCP28PD m512/m64bcst, zmm{k}{z}
 91215      if len(vv) == 0 && isM512M64bcst(v0) && isZMMkz(v1) {
 91216          self.require(ISA_AVX512ER)
 91217          p.domain = DomainAVX
 91218          p.add(0, func(m *_Encoding, v []interface{}) {
 91219              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 91220              m.emit(0xca)
 91221              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 91222          })
 91223      }
 91224      // VRCP28PD {sae}, zmm, zmm{k}{z}
 91225      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 91226          self.require(ISA_AVX512ER)
 91227          p.domain = DomainAVX
 91228          p.add(0, func(m *_Encoding, v []interface{}) {
 91229              m.emit(0x62)
 91230              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91231              m.emit(0xfd)
 91232              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 91233              m.emit(0xca)
 91234              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91235          })
 91236      }
 91237      // VRCP28PD zmm, zmm{k}{z}
 91238      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 91239          self.require(ISA_AVX512ER)
 91240          p.domain = DomainAVX
 91241          p.add(0, func(m *_Encoding, v []interface{}) {
 91242              m.emit(0x62)
 91243              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 91244              m.emit(0xfd)
 91245              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 91246              m.emit(0xca)
 91247              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 91248          })
 91249      }
 91250      if p.len == 0 {
 91251          panic("invalid operands for VRCP28PD")
 91252      }
 91253      return p
 91254  }
 91255  
 91256  // VRCP28PS performs "Approximation to the Reciprocal of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error".
 91257  //
 91258  // Mnemonic        : VRCP28PS
 91259  // Supported forms : (3 forms)
 91260  //
 91261  //    * VRCP28PS m512/m32bcst, zmm{k}{z}    [AVX512ER]
 91262  //    * VRCP28PS {sae}, zmm, zmm{k}{z}      [AVX512ER]
 91263  //    * VRCP28PS zmm, zmm{k}{z}             [AVX512ER]
 91264  //
 91265  func (self *Program) VRCP28PS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 91266      var p *Instruction
 91267      switch len(vv) {
 91268          case 0  : p = self.alloc("VRCP28PS", 2, Operands { v0, v1 })
 91269          case 1  : p = self.alloc("VRCP28PS", 3, Operands { v0, v1, vv[0] })
 91270          default : panic("instruction VRCP28PS takes 2 or 3 operands")
 91271      }
 91272      // VRCP28PS m512/m32bcst, zmm{k}{z}
 91273      if len(vv) == 0 && isM512M32bcst(v0) && isZMMkz(v1) {
 91274          self.require(ISA_AVX512ER)
 91275          p.domain = DomainAVX
 91276          p.add(0, func(m *_Encoding, v []interface{}) {
 91277              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 91278              m.emit(0xca)
 91279              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 91280          })
 91281      }
 91282      // VRCP28PS {sae}, zmm, zmm{k}{z}
 91283      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 91284          self.require(ISA_AVX512ER)
 91285          p.domain = DomainAVX
 91286          p.add(0, func(m *_Encoding, v []interface{}) {
 91287              m.emit(0x62)
 91288              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91289              m.emit(0x7d)
 91290              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 91291              m.emit(0xca)
 91292              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91293          })
 91294      }
 91295      // VRCP28PS zmm, zmm{k}{z}
 91296      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 91297          self.require(ISA_AVX512ER)
 91298          p.domain = DomainAVX
 91299          p.add(0, func(m *_Encoding, v []interface{}) {
 91300              m.emit(0x62)
 91301              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 91302              m.emit(0x7d)
 91303              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 91304              m.emit(0xca)
 91305              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 91306          })
 91307      }
 91308      if p.len == 0 {
 91309          panic("invalid operands for VRCP28PS")
 91310      }
 91311      return p
 91312  }
 91313  
 91314  // VRCP28SD performs "Approximation to the Reciprocal of a Scalar Double-Precision Floating-Point Value with Less Than 2^-28 Relative Error".
 91315  //
 91316  // Mnemonic        : VRCP28SD
 91317  // Supported forms : (3 forms)
 91318  //
 91319  //    * VRCP28SD m64, xmm, xmm{k}{z}           [AVX512ER]
 91320  //    * VRCP28SD {sae}, xmm, xmm, xmm{k}{z}    [AVX512ER]
 91321  //    * VRCP28SD xmm, xmm, xmm{k}{z}           [AVX512ER]
 91322  //
 91323  func (self *Program) VRCP28SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 91324      var p *Instruction
 91325      switch len(vv) {
 91326          case 0  : p = self.alloc("VRCP28SD", 3, Operands { v0, v1, v2 })
 91327          case 1  : p = self.alloc("VRCP28SD", 4, Operands { v0, v1, v2, vv[0] })
 91328          default : panic("instruction VRCP28SD takes 3 or 4 operands")
 91329      }
 91330      // VRCP28SD m64, xmm, xmm{k}{z}
 91331      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91332          self.require(ISA_AVX512ER)
 91333          p.domain = DomainAVX
 91334          p.add(0, func(m *_Encoding, v []interface{}) {
 91335              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 91336              m.emit(0xcb)
 91337              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 91338          })
 91339      }
 91340      // VRCP28SD {sae}, xmm, xmm, xmm{k}{z}
 91341      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 91342          self.require(ISA_AVX512ER)
 91343          p.domain = DomainAVX
 91344          p.add(0, func(m *_Encoding, v []interface{}) {
 91345              m.emit(0x62)
 91346              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 91347              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 91348              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 91349              m.emit(0xcb)
 91350              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 91351          })
 91352      }
 91353      // VRCP28SD xmm, xmm, xmm{k}{z}
 91354      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91355          self.require(ISA_AVX512ER)
 91356          p.domain = DomainAVX
 91357          p.add(0, func(m *_Encoding, v []interface{}) {
 91358              m.emit(0x62)
 91359              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 91360              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 91361              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 91362              m.emit(0xcb)
 91363              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 91364          })
 91365      }
 91366      if p.len == 0 {
 91367          panic("invalid operands for VRCP28SD")
 91368      }
 91369      return p
 91370  }
 91371  
 91372  // VRCP28SS performs "Approximation to the Reciprocal of a Scalar Single-Precision Floating-Point Value with Less Than 2^-28 Relative Error".
 91373  //
 91374  // Mnemonic        : VRCP28SS
 91375  // Supported forms : (3 forms)
 91376  //
 91377  //    * VRCP28SS m32, xmm, xmm{k}{z}           [AVX512ER]
 91378  //    * VRCP28SS {sae}, xmm, xmm, xmm{k}{z}    [AVX512ER]
 91379  //    * VRCP28SS xmm, xmm, xmm{k}{z}           [AVX512ER]
 91380  //
 91381  func (self *Program) VRCP28SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 91382      var p *Instruction
 91383      switch len(vv) {
 91384          case 0  : p = self.alloc("VRCP28SS", 3, Operands { v0, v1, v2 })
 91385          case 1  : p = self.alloc("VRCP28SS", 4, Operands { v0, v1, v2, vv[0] })
 91386          default : panic("instruction VRCP28SS takes 3 or 4 operands")
 91387      }
 91388      // VRCP28SS m32, xmm, xmm{k}{z}
 91389      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91390          self.require(ISA_AVX512ER)
 91391          p.domain = DomainAVX
 91392          p.add(0, func(m *_Encoding, v []interface{}) {
 91393              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 91394              m.emit(0xcb)
 91395              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 91396          })
 91397      }
 91398      // VRCP28SS {sae}, xmm, xmm, xmm{k}{z}
 91399      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 91400          self.require(ISA_AVX512ER)
 91401          p.domain = DomainAVX
 91402          p.add(0, func(m *_Encoding, v []interface{}) {
 91403              m.emit(0x62)
 91404              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 91405              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 91406              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 91407              m.emit(0xcb)
 91408              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 91409          })
 91410      }
 91411      // VRCP28SS xmm, xmm, xmm{k}{z}
 91412      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91413          self.require(ISA_AVX512ER)
 91414          p.domain = DomainAVX
 91415          p.add(0, func(m *_Encoding, v []interface{}) {
 91416              m.emit(0x62)
 91417              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 91418              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 91419              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 91420              m.emit(0xcb)
 91421              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 91422          })
 91423      }
 91424      if p.len == 0 {
 91425          panic("invalid operands for VRCP28SS")
 91426      }
 91427      return p
 91428  }
 91429  
 91430  // VRCPPS performs "Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values".
 91431  //
 91432  // Mnemonic        : VRCPPS
 91433  // Supported forms : (4 forms)
 91434  //
 91435  //    * VRCPPS xmm, xmm     [AVX]
 91436  //    * VRCPPS m128, xmm    [AVX]
 91437  //    * VRCPPS ymm, ymm     [AVX]
 91438  //    * VRCPPS m256, ymm    [AVX]
 91439  //
 91440  func (self *Program) VRCPPS(v0 interface{}, v1 interface{}) *Instruction {
 91441      p := self.alloc("VRCPPS", 2, Operands { v0, v1 })
 91442      // VRCPPS xmm, xmm
 91443      if isXMM(v0) && isXMM(v1) {
 91444          self.require(ISA_AVX)
 91445          p.domain = DomainAVX
 91446          p.add(0, func(m *_Encoding, v []interface{}) {
 91447              m.vex2(0, hcode(v[1]), v[0], 0)
 91448              m.emit(0x53)
 91449              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 91450          })
 91451      }
 91452      // VRCPPS m128, xmm
 91453      if isM128(v0) && isXMM(v1) {
 91454          self.require(ISA_AVX)
 91455          p.domain = DomainAVX
 91456          p.add(0, func(m *_Encoding, v []interface{}) {
 91457              m.vex2(0, hcode(v[1]), addr(v[0]), 0)
 91458              m.emit(0x53)
 91459              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 91460          })
 91461      }
 91462      // VRCPPS ymm, ymm
 91463      if isYMM(v0) && isYMM(v1) {
 91464          self.require(ISA_AVX)
 91465          p.domain = DomainAVX
 91466          p.add(0, func(m *_Encoding, v []interface{}) {
 91467              m.vex2(4, hcode(v[1]), v[0], 0)
 91468              m.emit(0x53)
 91469              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 91470          })
 91471      }
 91472      // VRCPPS m256, ymm
 91473      if isM256(v0) && isYMM(v1) {
 91474          self.require(ISA_AVX)
 91475          p.domain = DomainAVX
 91476          p.add(0, func(m *_Encoding, v []interface{}) {
 91477              m.vex2(4, hcode(v[1]), addr(v[0]), 0)
 91478              m.emit(0x53)
 91479              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 91480          })
 91481      }
 91482      if p.len == 0 {
 91483          panic("invalid operands for VRCPPS")
 91484      }
 91485      return p
 91486  }
 91487  
 91488  // VRCPSS performs "Compute Approximate Reciprocal of Scalar Single-Precision Floating-Point Values".
 91489  //
 91490  // Mnemonic        : VRCPSS
 91491  // Supported forms : (2 forms)
 91492  //
 91493  //    * VRCPSS xmm, xmm, xmm    [AVX]
 91494  //    * VRCPSS m32, xmm, xmm    [AVX]
 91495  //
 91496  func (self *Program) VRCPSS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 91497      p := self.alloc("VRCPSS", 3, Operands { v0, v1, v2 })
 91498      // VRCPSS xmm, xmm, xmm
 91499      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 91500          self.require(ISA_AVX)
 91501          p.domain = DomainAVX
 91502          p.add(0, func(m *_Encoding, v []interface{}) {
 91503              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 91504              m.emit(0x53)
 91505              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 91506          })
 91507      }
 91508      // VRCPSS m32, xmm, xmm
 91509      if isM32(v0) && isXMM(v1) && isXMM(v2) {
 91510          self.require(ISA_AVX)
 91511          p.domain = DomainAVX
 91512          p.add(0, func(m *_Encoding, v []interface{}) {
 91513              m.vex2(2, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 91514              m.emit(0x53)
 91515              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 91516          })
 91517      }
 91518      if p.len == 0 {
 91519          panic("invalid operands for VRCPSS")
 91520      }
 91521      return p
 91522  }
 91523  
 91524  // VREDUCEPD performs "Perform Reduction Transformation on Packed Double-Precision Floating-Point Values".
 91525  //
 91526  // Mnemonic        : VREDUCEPD
 91527  // Supported forms : (6 forms)
 91528  //
 91529  //    * VREDUCEPD imm8, m512/m64bcst, zmm{k}{z}    [AVX512DQ]
 91530  //    * VREDUCEPD imm8, zmm, zmm{k}{z}             [AVX512DQ]
 91531  //    * VREDUCEPD imm8, m128/m64bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 91532  //    * VREDUCEPD imm8, m256/m64bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 91533  //    * VREDUCEPD imm8, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 91534  //    * VREDUCEPD imm8, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 91535  //
 91536  func (self *Program) VREDUCEPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 91537      p := self.alloc("VREDUCEPD", 3, Operands { v0, v1, v2 })
 91538      // VREDUCEPD imm8, m512/m64bcst, zmm{k}{z}
 91539      if isImm8(v0) && isM512M64bcst(v1) && isZMMkz(v2) {
 91540          self.require(ISA_AVX512DQ)
 91541          p.domain = DomainAVX
 91542          p.add(0, func(m *_Encoding, v []interface{}) {
 91543              m.evex(0b11, 0x85, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91544              m.emit(0x56)
 91545              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 91546              m.imm1(toImmAny(v[0]))
 91547          })
 91548      }
 91549      // VREDUCEPD imm8, zmm, zmm{k}{z}
 91550      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 91551          self.require(ISA_AVX512DQ)
 91552          p.domain = DomainAVX
 91553          p.add(0, func(m *_Encoding, v []interface{}) {
 91554              m.emit(0x62)
 91555              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91556              m.emit(0xfd)
 91557              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 91558              m.emit(0x56)
 91559              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91560              m.imm1(toImmAny(v[0]))
 91561          })
 91562      }
 91563      // VREDUCEPD imm8, m128/m64bcst, xmm{k}{z}
 91564      if isImm8(v0) && isM128M64bcst(v1) && isXMMkz(v2) {
 91565          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 91566          p.domain = DomainAVX
 91567          p.add(0, func(m *_Encoding, v []interface{}) {
 91568              m.evex(0b11, 0x85, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91569              m.emit(0x56)
 91570              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 91571              m.imm1(toImmAny(v[0]))
 91572          })
 91573      }
 91574      // VREDUCEPD imm8, m256/m64bcst, ymm{k}{z}
 91575      if isImm8(v0) && isM256M64bcst(v1) && isYMMkz(v2) {
 91576          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 91577          p.domain = DomainAVX
 91578          p.add(0, func(m *_Encoding, v []interface{}) {
 91579              m.evex(0b11, 0x85, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91580              m.emit(0x56)
 91581              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 91582              m.imm1(toImmAny(v[0]))
 91583          })
 91584      }
 91585      // VREDUCEPD imm8, xmm, xmm{k}{z}
 91586      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91587          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 91588          p.domain = DomainAVX
 91589          p.add(0, func(m *_Encoding, v []interface{}) {
 91590              m.emit(0x62)
 91591              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91592              m.emit(0xfd)
 91593              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 91594              m.emit(0x56)
 91595              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91596              m.imm1(toImmAny(v[0]))
 91597          })
 91598      }
 91599      // VREDUCEPD imm8, ymm, ymm{k}{z}
 91600      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 91601          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 91602          p.domain = DomainAVX
 91603          p.add(0, func(m *_Encoding, v []interface{}) {
 91604              m.emit(0x62)
 91605              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91606              m.emit(0xfd)
 91607              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 91608              m.emit(0x56)
 91609              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91610              m.imm1(toImmAny(v[0]))
 91611          })
 91612      }
 91613      if p.len == 0 {
 91614          panic("invalid operands for VREDUCEPD")
 91615      }
 91616      return p
 91617  }
 91618  
 91619  // VREDUCEPS performs "Perform Reduction Transformation on Packed Single-Precision Floating-Point Values".
 91620  //
 91621  // Mnemonic        : VREDUCEPS
 91622  // Supported forms : (6 forms)
 91623  //
 91624  //    * VREDUCEPS imm8, m512/m32bcst, zmm{k}{z}    [AVX512DQ]
 91625  //    * VREDUCEPS imm8, zmm, zmm{k}{z}             [AVX512DQ]
 91626  //    * VREDUCEPS imm8, m128/m32bcst, xmm{k}{z}    [AVX512DQ,AVX512VL]
 91627  //    * VREDUCEPS imm8, m256/m32bcst, ymm{k}{z}    [AVX512DQ,AVX512VL]
 91628  //    * VREDUCEPS imm8, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 91629  //    * VREDUCEPS imm8, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 91630  //
 91631  func (self *Program) VREDUCEPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 91632      p := self.alloc("VREDUCEPS", 3, Operands { v0, v1, v2 })
 91633      // VREDUCEPS imm8, m512/m32bcst, zmm{k}{z}
 91634      if isImm8(v0) && isM512M32bcst(v1) && isZMMkz(v2) {
 91635          self.require(ISA_AVX512DQ)
 91636          p.domain = DomainAVX
 91637          p.add(0, func(m *_Encoding, v []interface{}) {
 91638              m.evex(0b11, 0x05, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91639              m.emit(0x56)
 91640              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 91641              m.imm1(toImmAny(v[0]))
 91642          })
 91643      }
 91644      // VREDUCEPS imm8, zmm, zmm{k}{z}
 91645      if isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 91646          self.require(ISA_AVX512DQ)
 91647          p.domain = DomainAVX
 91648          p.add(0, func(m *_Encoding, v []interface{}) {
 91649              m.emit(0x62)
 91650              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91651              m.emit(0x7d)
 91652              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 91653              m.emit(0x56)
 91654              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91655              m.imm1(toImmAny(v[0]))
 91656          })
 91657      }
 91658      // VREDUCEPS imm8, m128/m32bcst, xmm{k}{z}
 91659      if isImm8(v0) && isM128M32bcst(v1) && isXMMkz(v2) {
 91660          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 91661          p.domain = DomainAVX
 91662          p.add(0, func(m *_Encoding, v []interface{}) {
 91663              m.evex(0b11, 0x05, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91664              m.emit(0x56)
 91665              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 91666              m.imm1(toImmAny(v[0]))
 91667          })
 91668      }
 91669      // VREDUCEPS imm8, m256/m32bcst, ymm{k}{z}
 91670      if isImm8(v0) && isM256M32bcst(v1) && isYMMkz(v2) {
 91671          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 91672          p.domain = DomainAVX
 91673          p.add(0, func(m *_Encoding, v []interface{}) {
 91674              m.evex(0b11, 0x05, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91675              m.emit(0x56)
 91676              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 91677              m.imm1(toImmAny(v[0]))
 91678          })
 91679      }
 91680      // VREDUCEPS imm8, xmm, xmm{k}{z}
 91681      if isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91682          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 91683          p.domain = DomainAVX
 91684          p.add(0, func(m *_Encoding, v []interface{}) {
 91685              m.emit(0x62)
 91686              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91687              m.emit(0x7d)
 91688              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 91689              m.emit(0x56)
 91690              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91691              m.imm1(toImmAny(v[0]))
 91692          })
 91693      }
 91694      // VREDUCEPS imm8, ymm, ymm{k}{z}
 91695      if isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 91696          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 91697          p.domain = DomainAVX
 91698          p.add(0, func(m *_Encoding, v []interface{}) {
 91699              m.emit(0x62)
 91700              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91701              m.emit(0x7d)
 91702              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 91703              m.emit(0x56)
 91704              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91705              m.imm1(toImmAny(v[0]))
 91706          })
 91707      }
 91708      if p.len == 0 {
 91709          panic("invalid operands for VREDUCEPS")
 91710      }
 91711      return p
 91712  }
 91713  
 91714  // VREDUCESD performs "Perform Reduction Transformation on a Scalar Double-Precision Floating-Point Value".
 91715  //
 91716  // Mnemonic        : VREDUCESD
 91717  // Supported forms : (2 forms)
 91718  //
 91719  //    * VREDUCESD imm8, xmm, xmm, xmm{k}{z}    [AVX512DQ]
 91720  //    * VREDUCESD imm8, m64, xmm, xmm{k}{z}    [AVX512DQ]
 91721  //
 91722  func (self *Program) VREDUCESD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 91723      p := self.alloc("VREDUCESD", 4, Operands { v0, v1, v2, v3 })
 91724      // VREDUCESD imm8, xmm, xmm, xmm{k}{z}
 91725      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 91726          self.require(ISA_AVX512DQ)
 91727          p.domain = DomainAVX
 91728          p.add(0, func(m *_Encoding, v []interface{}) {
 91729              m.emit(0x62)
 91730              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 91731              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 91732              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 91733              m.emit(0x57)
 91734              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 91735              m.imm1(toImmAny(v[0]))
 91736          })
 91737      }
 91738      // VREDUCESD imm8, m64, xmm, xmm{k}{z}
 91739      if isImm8(v0) && isM64(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 91740          self.require(ISA_AVX512DQ)
 91741          p.domain = DomainAVX
 91742          p.add(0, func(m *_Encoding, v []interface{}) {
 91743              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 91744              m.emit(0x57)
 91745              m.mrsd(lcode(v[3]), addr(v[1]), 8)
 91746              m.imm1(toImmAny(v[0]))
 91747          })
 91748      }
 91749      if p.len == 0 {
 91750          panic("invalid operands for VREDUCESD")
 91751      }
 91752      return p
 91753  }
 91754  
 91755  // VREDUCESS performs "Perform Reduction Transformation on a Scalar Single-Precision Floating-Point Value".
 91756  //
 91757  // Mnemonic        : VREDUCESS
 91758  // Supported forms : (2 forms)
 91759  //
 91760  //    * VREDUCESS imm8, xmm, xmm, xmm{k}{z}    [AVX512DQ]
 91761  //    * VREDUCESS imm8, m32, xmm, xmm{k}{z}    [AVX512DQ]
 91762  //
 91763  func (self *Program) VREDUCESS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 91764      p := self.alloc("VREDUCESS", 4, Operands { v0, v1, v2, v3 })
 91765      // VREDUCESS imm8, xmm, xmm, xmm{k}{z}
 91766      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 91767          self.require(ISA_AVX512DQ)
 91768          p.domain = DomainAVX
 91769          p.add(0, func(m *_Encoding, v []interface{}) {
 91770              m.emit(0x62)
 91771              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 91772              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 91773              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 91774              m.emit(0x57)
 91775              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 91776              m.imm1(toImmAny(v[0]))
 91777          })
 91778      }
 91779      // VREDUCESS imm8, m32, xmm, xmm{k}{z}
 91780      if isImm8(v0) && isM32(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 91781          self.require(ISA_AVX512DQ)
 91782          p.domain = DomainAVX
 91783          p.add(0, func(m *_Encoding, v []interface{}) {
 91784              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 91785              m.emit(0x57)
 91786              m.mrsd(lcode(v[3]), addr(v[1]), 4)
 91787              m.imm1(toImmAny(v[0]))
 91788          })
 91789      }
 91790      if p.len == 0 {
 91791          panic("invalid operands for VREDUCESS")
 91792      }
 91793      return p
 91794  }
 91795  
 91796  // VRNDSCALEPD performs "Round Packed Double-Precision Floating-Point Values To Include A Given Number Of Fraction Bits".
 91797  //
 91798  // Mnemonic        : VRNDSCALEPD
 91799  // Supported forms : (7 forms)
 91800  //
 91801  //    * VRNDSCALEPD imm8, m512/m64bcst, zmm{k}{z}    [AVX512F]
 91802  //    * VRNDSCALEPD imm8, {sae}, zmm, zmm{k}{z}      [AVX512F]
 91803  //    * VRNDSCALEPD imm8, zmm, zmm{k}{z}             [AVX512F]
 91804  //    * VRNDSCALEPD imm8, m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 91805  //    * VRNDSCALEPD imm8, m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 91806  //    * VRNDSCALEPD imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 91807  //    * VRNDSCALEPD imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 91808  //
 91809  func (self *Program) VRNDSCALEPD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 91810      var p *Instruction
 91811      switch len(vv) {
 91812          case 0  : p = self.alloc("VRNDSCALEPD", 3, Operands { v0, v1, v2 })
 91813          case 1  : p = self.alloc("VRNDSCALEPD", 4, Operands { v0, v1, v2, vv[0] })
 91814          default : panic("instruction VRNDSCALEPD takes 3 or 4 operands")
 91815      }
 91816      // VRNDSCALEPD imm8, m512/m64bcst, zmm{k}{z}
 91817      if len(vv) == 0 && isImm8(v0) && isM512M64bcst(v1) && isZMMkz(v2) {
 91818          self.require(ISA_AVX512F)
 91819          p.domain = DomainAVX
 91820          p.add(0, func(m *_Encoding, v []interface{}) {
 91821              m.evex(0b11, 0x85, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91822              m.emit(0x09)
 91823              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 91824              m.imm1(toImmAny(v[0]))
 91825          })
 91826      }
 91827      // VRNDSCALEPD imm8, {sae}, zmm, zmm{k}{z}
 91828      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 91829          self.require(ISA_AVX512F)
 91830          p.domain = DomainAVX
 91831          p.add(0, func(m *_Encoding, v []interface{}) {
 91832              m.emit(0x62)
 91833              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[3]) << 4)))
 91834              m.emit(0xfd)
 91835              m.emit((zcode(v[3]) << 7) | kcode(v[3]) | 0x18)
 91836              m.emit(0x09)
 91837              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[2]))
 91838              m.imm1(toImmAny(v[0]))
 91839          })
 91840      }
 91841      // VRNDSCALEPD imm8, zmm, zmm{k}{z}
 91842      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 91843          self.require(ISA_AVX512F)
 91844          p.domain = DomainAVX
 91845          p.add(0, func(m *_Encoding, v []interface{}) {
 91846              m.emit(0x62)
 91847              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91848              m.emit(0xfd)
 91849              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 91850              m.emit(0x09)
 91851              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91852              m.imm1(toImmAny(v[0]))
 91853          })
 91854      }
 91855      // VRNDSCALEPD imm8, m128/m64bcst, xmm{k}{z}
 91856      if len(vv) == 0 && isImm8(v0) && isM128M64bcst(v1) && isXMMkz(v2) {
 91857          self.require(ISA_AVX512VL | ISA_AVX512F)
 91858          p.domain = DomainAVX
 91859          p.add(0, func(m *_Encoding, v []interface{}) {
 91860              m.evex(0b11, 0x85, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91861              m.emit(0x09)
 91862              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 91863              m.imm1(toImmAny(v[0]))
 91864          })
 91865      }
 91866      // VRNDSCALEPD imm8, m256/m64bcst, ymm{k}{z}
 91867      if len(vv) == 0 && isImm8(v0) && isM256M64bcst(v1) && isYMMkz(v2) {
 91868          self.require(ISA_AVX512VL | ISA_AVX512F)
 91869          p.domain = DomainAVX
 91870          p.add(0, func(m *_Encoding, v []interface{}) {
 91871              m.evex(0b11, 0x85, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91872              m.emit(0x09)
 91873              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 91874              m.imm1(toImmAny(v[0]))
 91875          })
 91876      }
 91877      // VRNDSCALEPD imm8, xmm, xmm{k}{z}
 91878      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91879          self.require(ISA_AVX512VL | ISA_AVX512F)
 91880          p.domain = DomainAVX
 91881          p.add(0, func(m *_Encoding, v []interface{}) {
 91882              m.emit(0x62)
 91883              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91884              m.emit(0xfd)
 91885              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 91886              m.emit(0x09)
 91887              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91888              m.imm1(toImmAny(v[0]))
 91889          })
 91890      }
 91891      // VRNDSCALEPD imm8, ymm, ymm{k}{z}
 91892      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 91893          self.require(ISA_AVX512VL | ISA_AVX512F)
 91894          p.domain = DomainAVX
 91895          p.add(0, func(m *_Encoding, v []interface{}) {
 91896              m.emit(0x62)
 91897              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91898              m.emit(0xfd)
 91899              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 91900              m.emit(0x09)
 91901              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91902              m.imm1(toImmAny(v[0]))
 91903          })
 91904      }
 91905      if p.len == 0 {
 91906          panic("invalid operands for VRNDSCALEPD")
 91907      }
 91908      return p
 91909  }
 91910  
 91911  // VRNDSCALEPS performs "Round Packed Single-Precision Floating-Point Values To Include A Given Number Of Fraction Bits".
 91912  //
 91913  // Mnemonic        : VRNDSCALEPS
 91914  // Supported forms : (7 forms)
 91915  //
 91916  //    * VRNDSCALEPS imm8, m512/m32bcst, zmm{k}{z}    [AVX512F]
 91917  //    * VRNDSCALEPS imm8, {sae}, zmm, zmm{k}{z}      [AVX512F]
 91918  //    * VRNDSCALEPS imm8, zmm, zmm{k}{z}             [AVX512F]
 91919  //    * VRNDSCALEPS imm8, m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 91920  //    * VRNDSCALEPS imm8, m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 91921  //    * VRNDSCALEPS imm8, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 91922  //    * VRNDSCALEPS imm8, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 91923  //
 91924  func (self *Program) VRNDSCALEPS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 91925      var p *Instruction
 91926      switch len(vv) {
 91927          case 0  : p = self.alloc("VRNDSCALEPS", 3, Operands { v0, v1, v2 })
 91928          case 1  : p = self.alloc("VRNDSCALEPS", 4, Operands { v0, v1, v2, vv[0] })
 91929          default : panic("instruction VRNDSCALEPS takes 3 or 4 operands")
 91930      }
 91931      // VRNDSCALEPS imm8, m512/m32bcst, zmm{k}{z}
 91932      if len(vv) == 0 && isImm8(v0) && isM512M32bcst(v1) && isZMMkz(v2) {
 91933          self.require(ISA_AVX512F)
 91934          p.domain = DomainAVX
 91935          p.add(0, func(m *_Encoding, v []interface{}) {
 91936              m.evex(0b11, 0x05, 0b10, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91937              m.emit(0x08)
 91938              m.mrsd(lcode(v[2]), addr(v[1]), 64)
 91939              m.imm1(toImmAny(v[0]))
 91940          })
 91941      }
 91942      // VRNDSCALEPS imm8, {sae}, zmm, zmm{k}{z}
 91943      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 91944          self.require(ISA_AVX512F)
 91945          p.domain = DomainAVX
 91946          p.add(0, func(m *_Encoding, v []interface{}) {
 91947              m.emit(0x62)
 91948              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[3]) << 4)))
 91949              m.emit(0x7d)
 91950              m.emit((zcode(v[3]) << 7) | kcode(v[3]) | 0x18)
 91951              m.emit(0x08)
 91952              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[2]))
 91953              m.imm1(toImmAny(v[0]))
 91954          })
 91955      }
 91956      // VRNDSCALEPS imm8, zmm, zmm{k}{z}
 91957      if len(vv) == 0 && isImm8(v0) && isZMM(v1) && isZMMkz(v2) {
 91958          self.require(ISA_AVX512F)
 91959          p.domain = DomainAVX
 91960          p.add(0, func(m *_Encoding, v []interface{}) {
 91961              m.emit(0x62)
 91962              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91963              m.emit(0x7d)
 91964              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x48)
 91965              m.emit(0x08)
 91966              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 91967              m.imm1(toImmAny(v[0]))
 91968          })
 91969      }
 91970      // VRNDSCALEPS imm8, m128/m32bcst, xmm{k}{z}
 91971      if len(vv) == 0 && isImm8(v0) && isM128M32bcst(v1) && isXMMkz(v2) {
 91972          self.require(ISA_AVX512VL | ISA_AVX512F)
 91973          p.domain = DomainAVX
 91974          p.add(0, func(m *_Encoding, v []interface{}) {
 91975              m.evex(0b11, 0x05, 0b00, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91976              m.emit(0x08)
 91977              m.mrsd(lcode(v[2]), addr(v[1]), 16)
 91978              m.imm1(toImmAny(v[0]))
 91979          })
 91980      }
 91981      // VRNDSCALEPS imm8, m256/m32bcst, ymm{k}{z}
 91982      if len(vv) == 0 && isImm8(v0) && isM256M32bcst(v1) && isYMMkz(v2) {
 91983          self.require(ISA_AVX512VL | ISA_AVX512F)
 91984          p.domain = DomainAVX
 91985          p.add(0, func(m *_Encoding, v []interface{}) {
 91986              m.evex(0b11, 0x05, 0b01, ehcode(v[2]), addr(v[1]), 0, kcode(v[2]), zcode(v[2]), bcode(v[1]))
 91987              m.emit(0x08)
 91988              m.mrsd(lcode(v[2]), addr(v[1]), 32)
 91989              m.imm1(toImmAny(v[0]))
 91990          })
 91991      }
 91992      // VRNDSCALEPS imm8, xmm, xmm{k}{z}
 91993      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 91994          self.require(ISA_AVX512VL | ISA_AVX512F)
 91995          p.domain = DomainAVX
 91996          p.add(0, func(m *_Encoding, v []interface{}) {
 91997              m.emit(0x62)
 91998              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 91999              m.emit(0x7d)
 92000              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x08)
 92001              m.emit(0x08)
 92002              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 92003              m.imm1(toImmAny(v[0]))
 92004          })
 92005      }
 92006      // VRNDSCALEPS imm8, ymm, ymm{k}{z}
 92007      if len(vv) == 0 && isImm8(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 92008          self.require(ISA_AVX512VL | ISA_AVX512F)
 92009          p.domain = DomainAVX
 92010          p.add(0, func(m *_Encoding, v []interface{}) {
 92011              m.emit(0x62)
 92012              m.emit(0xf3 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 92013              m.emit(0x7d)
 92014              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x28)
 92015              m.emit(0x08)
 92016              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 92017              m.imm1(toImmAny(v[0]))
 92018          })
 92019      }
 92020      if p.len == 0 {
 92021          panic("invalid operands for VRNDSCALEPS")
 92022      }
 92023      return p
 92024  }
 92025  
 92026  // VRNDSCALESD performs "Round Scalar Double-Precision Floating-Point Value To Include A Given Number Of Fraction Bits".
 92027  //
 92028  // Mnemonic        : VRNDSCALESD
 92029  // Supported forms : (3 forms)
 92030  //
 92031  //    * VRNDSCALESD imm8, m64, xmm, xmm{k}{z}           [AVX512F]
 92032  //    * VRNDSCALESD imm8, {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 92033  //    * VRNDSCALESD imm8, xmm, xmm, xmm{k}{z}           [AVX512F]
 92034  //
 92035  func (self *Program) VRNDSCALESD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 92036      var p *Instruction
 92037      switch len(vv) {
 92038          case 0  : p = self.alloc("VRNDSCALESD", 4, Operands { v0, v1, v2, v3 })
 92039          case 1  : p = self.alloc("VRNDSCALESD", 5, Operands { v0, v1, v2, v3, vv[0] })
 92040          default : panic("instruction VRNDSCALESD takes 4 or 5 operands")
 92041      }
 92042      // VRNDSCALESD imm8, m64, xmm, xmm{k}{z}
 92043      if len(vv) == 0 && isImm8(v0) && isM64(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 92044          self.require(ISA_AVX512F)
 92045          p.domain = DomainAVX
 92046          p.add(0, func(m *_Encoding, v []interface{}) {
 92047              m.evex(0b11, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 92048              m.emit(0x0b)
 92049              m.mrsd(lcode(v[3]), addr(v[1]), 8)
 92050              m.imm1(toImmAny(v[0]))
 92051          })
 92052      }
 92053      // VRNDSCALESD imm8, {sae}, xmm, xmm, xmm{k}{z}
 92054      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) && isXMMkz(vv[0]) {
 92055          self.require(ISA_AVX512F)
 92056          p.domain = DomainAVX
 92057          p.add(0, func(m *_Encoding, v []interface{}) {
 92058              m.emit(0x62)
 92059              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 92060              m.emit(0xfd ^ (hlcode(v[3]) << 3))
 92061              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 92062              m.emit(0x0b)
 92063              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 92064              m.imm1(toImmAny(v[0]))
 92065          })
 92066      }
 92067      // VRNDSCALESD imm8, xmm, xmm, xmm{k}{z}
 92068      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 92069          self.require(ISA_AVX512F)
 92070          p.domain = DomainAVX
 92071          p.add(0, func(m *_Encoding, v []interface{}) {
 92072              m.emit(0x62)
 92073              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 92074              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 92075              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 92076              m.emit(0x0b)
 92077              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 92078              m.imm1(toImmAny(v[0]))
 92079          })
 92080      }
 92081      if p.len == 0 {
 92082          panic("invalid operands for VRNDSCALESD")
 92083      }
 92084      return p
 92085  }
 92086  
 92087  // VRNDSCALESS performs "Round Scalar Single-Precision Floating-Point Value To Include A Given Number Of Fraction Bits".
 92088  //
 92089  // Mnemonic        : VRNDSCALESS
 92090  // Supported forms : (3 forms)
 92091  //
 92092  //    * VRNDSCALESS imm8, m32, xmm, xmm{k}{z}           [AVX512F]
 92093  //    * VRNDSCALESS imm8, {sae}, xmm, xmm, xmm{k}{z}    [AVX512F]
 92094  //    * VRNDSCALESS imm8, xmm, xmm, xmm{k}{z}           [AVX512F]
 92095  //
 92096  func (self *Program) VRNDSCALESS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}, vv ...interface{}) *Instruction {
 92097      var p *Instruction
 92098      switch len(vv) {
 92099          case 0  : p = self.alloc("VRNDSCALESS", 4, Operands { v0, v1, v2, v3 })
 92100          case 1  : p = self.alloc("VRNDSCALESS", 5, Operands { v0, v1, v2, v3, vv[0] })
 92101          default : panic("instruction VRNDSCALESS takes 4 or 5 operands")
 92102      }
 92103      // VRNDSCALESS imm8, m32, xmm, xmm{k}{z}
 92104      if len(vv) == 0 && isImm8(v0) && isM32(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 92105          self.require(ISA_AVX512F)
 92106          p.domain = DomainAVX
 92107          p.add(0, func(m *_Encoding, v []interface{}) {
 92108              m.evex(0b11, 0x05, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), 0)
 92109              m.emit(0x0a)
 92110              m.mrsd(lcode(v[3]), addr(v[1]), 4)
 92111              m.imm1(toImmAny(v[0]))
 92112          })
 92113      }
 92114      // VRNDSCALESS imm8, {sae}, xmm, xmm, xmm{k}{z}
 92115      if len(vv) == 1 && isImm8(v0) && isSAE(v1) && isEVEXXMM(v2) && isEVEXXMM(v3) && isXMMkz(vv[0]) {
 92116          self.require(ISA_AVX512F)
 92117          p.domain = DomainAVX
 92118          p.add(0, func(m *_Encoding, v []interface{}) {
 92119              m.emit(0x62)
 92120              m.emit(0xf3 ^ ((hcode(v[4]) << 7) | (ehcode(v[2]) << 5) | (ecode(v[4]) << 4)))
 92121              m.emit(0x7d ^ (hlcode(v[3]) << 3))
 92122              m.emit((zcode(v[4]) << 7) | (0x08 ^ (ecode(v[3]) << 3)) | kcode(v[4]) | 0x10)
 92123              m.emit(0x0a)
 92124              m.emit(0xc0 | lcode(v[4]) << 3 | lcode(v[2]))
 92125              m.imm1(toImmAny(v[0]))
 92126          })
 92127      }
 92128      // VRNDSCALESS imm8, xmm, xmm, xmm{k}{z}
 92129      if len(vv) == 0 && isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 92130          self.require(ISA_AVX512F)
 92131          p.domain = DomainAVX
 92132          p.add(0, func(m *_Encoding, v []interface{}) {
 92133              m.emit(0x62)
 92134              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 92135              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 92136              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 92137              m.emit(0x0a)
 92138              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 92139              m.imm1(toImmAny(v[0]))
 92140          })
 92141      }
 92142      if p.len == 0 {
 92143          panic("invalid operands for VRNDSCALESS")
 92144      }
 92145      return p
 92146  }
 92147  
 92148  // VROUNDPD performs "Round Packed Double Precision Floating-Point Values".
 92149  //
 92150  // Mnemonic        : VROUNDPD
 92151  // Supported forms : (4 forms)
 92152  //
 92153  //    * VROUNDPD imm8, xmm, xmm     [AVX]
 92154  //    * VROUNDPD imm8, m128, xmm    [AVX]
 92155  //    * VROUNDPD imm8, ymm, ymm     [AVX]
 92156  //    * VROUNDPD imm8, m256, ymm    [AVX]
 92157  //
 92158  func (self *Program) VROUNDPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 92159      p := self.alloc("VROUNDPD", 3, Operands { v0, v1, v2 })
 92160      // VROUNDPD imm8, xmm, xmm
 92161      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 92162          self.require(ISA_AVX)
 92163          p.domain = DomainAVX
 92164          p.add(0, func(m *_Encoding, v []interface{}) {
 92165              m.emit(0xc4)
 92166              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 92167              m.emit(0x79)
 92168              m.emit(0x09)
 92169              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 92170              m.imm1(toImmAny(v[0]))
 92171          })
 92172      }
 92173      // VROUNDPD imm8, m128, xmm
 92174      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 92175          self.require(ISA_AVX)
 92176          p.domain = DomainAVX
 92177          p.add(0, func(m *_Encoding, v []interface{}) {
 92178              m.vex3(0xc4, 0b11, 0x01, hcode(v[2]), addr(v[1]), 0)
 92179              m.emit(0x09)
 92180              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 92181              m.imm1(toImmAny(v[0]))
 92182          })
 92183      }
 92184      // VROUNDPD imm8, ymm, ymm
 92185      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 92186          self.require(ISA_AVX)
 92187          p.domain = DomainAVX
 92188          p.add(0, func(m *_Encoding, v []interface{}) {
 92189              m.emit(0xc4)
 92190              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 92191              m.emit(0x7d)
 92192              m.emit(0x09)
 92193              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 92194              m.imm1(toImmAny(v[0]))
 92195          })
 92196      }
 92197      // VROUNDPD imm8, m256, ymm
 92198      if isImm8(v0) && isM256(v1) && isYMM(v2) {
 92199          self.require(ISA_AVX)
 92200          p.domain = DomainAVX
 92201          p.add(0, func(m *_Encoding, v []interface{}) {
 92202              m.vex3(0xc4, 0b11, 0x05, hcode(v[2]), addr(v[1]), 0)
 92203              m.emit(0x09)
 92204              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 92205              m.imm1(toImmAny(v[0]))
 92206          })
 92207      }
 92208      if p.len == 0 {
 92209          panic("invalid operands for VROUNDPD")
 92210      }
 92211      return p
 92212  }
 92213  
 92214  // VROUNDPS performs "Round Packed Single Precision Floating-Point Values".
 92215  //
 92216  // Mnemonic        : VROUNDPS
 92217  // Supported forms : (4 forms)
 92218  //
 92219  //    * VROUNDPS imm8, xmm, xmm     [AVX]
 92220  //    * VROUNDPS imm8, m128, xmm    [AVX]
 92221  //    * VROUNDPS imm8, ymm, ymm     [AVX]
 92222  //    * VROUNDPS imm8, m256, ymm    [AVX]
 92223  //
 92224  func (self *Program) VROUNDPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 92225      p := self.alloc("VROUNDPS", 3, Operands { v0, v1, v2 })
 92226      // VROUNDPS imm8, xmm, xmm
 92227      if isImm8(v0) && isXMM(v1) && isXMM(v2) {
 92228          self.require(ISA_AVX)
 92229          p.domain = DomainAVX
 92230          p.add(0, func(m *_Encoding, v []interface{}) {
 92231              m.emit(0xc4)
 92232              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 92233              m.emit(0x79)
 92234              m.emit(0x08)
 92235              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 92236              m.imm1(toImmAny(v[0]))
 92237          })
 92238      }
 92239      // VROUNDPS imm8, m128, xmm
 92240      if isImm8(v0) && isM128(v1) && isXMM(v2) {
 92241          self.require(ISA_AVX)
 92242          p.domain = DomainAVX
 92243          p.add(0, func(m *_Encoding, v []interface{}) {
 92244              m.vex3(0xc4, 0b11, 0x01, hcode(v[2]), addr(v[1]), 0)
 92245              m.emit(0x08)
 92246              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 92247              m.imm1(toImmAny(v[0]))
 92248          })
 92249      }
 92250      // VROUNDPS imm8, ymm, ymm
 92251      if isImm8(v0) && isYMM(v1) && isYMM(v2) {
 92252          self.require(ISA_AVX)
 92253          p.domain = DomainAVX
 92254          p.add(0, func(m *_Encoding, v []interface{}) {
 92255              m.emit(0xc4)
 92256              m.emit(0xe3 ^ (hcode(v[2]) << 7) ^ (hcode(v[1]) << 5))
 92257              m.emit(0x7d)
 92258              m.emit(0x08)
 92259              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 92260              m.imm1(toImmAny(v[0]))
 92261          })
 92262      }
 92263      // VROUNDPS imm8, m256, ymm
 92264      if isImm8(v0) && isM256(v1) && isYMM(v2) {
 92265          self.require(ISA_AVX)
 92266          p.domain = DomainAVX
 92267          p.add(0, func(m *_Encoding, v []interface{}) {
 92268              m.vex3(0xc4, 0b11, 0x05, hcode(v[2]), addr(v[1]), 0)
 92269              m.emit(0x08)
 92270              m.mrsd(lcode(v[2]), addr(v[1]), 1)
 92271              m.imm1(toImmAny(v[0]))
 92272          })
 92273      }
 92274      if p.len == 0 {
 92275          panic("invalid operands for VROUNDPS")
 92276      }
 92277      return p
 92278  }
 92279  
 92280  // VROUNDSD performs "Round Scalar Double Precision Floating-Point Values".
 92281  //
 92282  // Mnemonic        : VROUNDSD
 92283  // Supported forms : (2 forms)
 92284  //
 92285  //    * VROUNDSD imm8, xmm, xmm, xmm    [AVX]
 92286  //    * VROUNDSD imm8, m64, xmm, xmm    [AVX]
 92287  //
 92288  func (self *Program) VROUNDSD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 92289      p := self.alloc("VROUNDSD", 4, Operands { v0, v1, v2, v3 })
 92290      // VROUNDSD imm8, xmm, xmm, xmm
 92291      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 92292          self.require(ISA_AVX)
 92293          p.domain = DomainAVX
 92294          p.add(0, func(m *_Encoding, v []interface{}) {
 92295              m.emit(0xc4)
 92296              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 92297              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 92298              m.emit(0x0b)
 92299              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 92300              m.imm1(toImmAny(v[0]))
 92301          })
 92302      }
 92303      // VROUNDSD imm8, m64, xmm, xmm
 92304      if isImm8(v0) && isM64(v1) && isXMM(v2) && isXMM(v3) {
 92305          self.require(ISA_AVX)
 92306          p.domain = DomainAVX
 92307          p.add(0, func(m *_Encoding, v []interface{}) {
 92308              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 92309              m.emit(0x0b)
 92310              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 92311              m.imm1(toImmAny(v[0]))
 92312          })
 92313      }
 92314      if p.len == 0 {
 92315          panic("invalid operands for VROUNDSD")
 92316      }
 92317      return p
 92318  }
 92319  
 92320  // VROUNDSS performs "Round Scalar Single Precision Floating-Point Values".
 92321  //
 92322  // Mnemonic        : VROUNDSS
 92323  // Supported forms : (2 forms)
 92324  //
 92325  //    * VROUNDSS imm8, xmm, xmm, xmm    [AVX]
 92326  //    * VROUNDSS imm8, m32, xmm, xmm    [AVX]
 92327  //
 92328  func (self *Program) VROUNDSS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 92329      p := self.alloc("VROUNDSS", 4, Operands { v0, v1, v2, v3 })
 92330      // VROUNDSS imm8, xmm, xmm, xmm
 92331      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 92332          self.require(ISA_AVX)
 92333          p.domain = DomainAVX
 92334          p.add(0, func(m *_Encoding, v []interface{}) {
 92335              m.emit(0xc4)
 92336              m.emit(0xe3 ^ (hcode(v[3]) << 7) ^ (hcode(v[1]) << 5))
 92337              m.emit(0x79 ^ (hlcode(v[2]) << 3))
 92338              m.emit(0x0a)
 92339              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 92340              m.imm1(toImmAny(v[0]))
 92341          })
 92342      }
 92343      // VROUNDSS imm8, m32, xmm, xmm
 92344      if isImm8(v0) && isM32(v1) && isXMM(v2) && isXMM(v3) {
 92345          self.require(ISA_AVX)
 92346          p.domain = DomainAVX
 92347          p.add(0, func(m *_Encoding, v []interface{}) {
 92348              m.vex3(0xc4, 0b11, 0x01, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 92349              m.emit(0x0a)
 92350              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 92351              m.imm1(toImmAny(v[0]))
 92352          })
 92353      }
 92354      if p.len == 0 {
 92355          panic("invalid operands for VROUNDSS")
 92356      }
 92357      return p
 92358  }
 92359  
 92360  // VRSQRT14PD performs "Compute Approximate Reciprocals of Square Roots of Packed Double-Precision Floating-Point Values".
 92361  //
 92362  // Mnemonic        : VRSQRT14PD
 92363  // Supported forms : (6 forms)
 92364  //
 92365  //    * VRSQRT14PD m512/m64bcst, zmm{k}{z}    [AVX512F]
 92366  //    * VRSQRT14PD zmm, zmm{k}{z}             [AVX512F]
 92367  //    * VRSQRT14PD m128/m64bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 92368  //    * VRSQRT14PD m256/m64bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 92369  //    * VRSQRT14PD xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 92370  //    * VRSQRT14PD ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 92371  //
 92372  func (self *Program) VRSQRT14PD(v0 interface{}, v1 interface{}) *Instruction {
 92373      p := self.alloc("VRSQRT14PD", 2, Operands { v0, v1 })
 92374      // VRSQRT14PD m512/m64bcst, zmm{k}{z}
 92375      if isM512M64bcst(v0) && isZMMkz(v1) {
 92376          self.require(ISA_AVX512F)
 92377          p.domain = DomainAVX
 92378          p.add(0, func(m *_Encoding, v []interface{}) {
 92379              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 92380              m.emit(0x4e)
 92381              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 92382          })
 92383      }
 92384      // VRSQRT14PD zmm, zmm{k}{z}
 92385      if isZMM(v0) && isZMMkz(v1) {
 92386          self.require(ISA_AVX512F)
 92387          p.domain = DomainAVX
 92388          p.add(0, func(m *_Encoding, v []interface{}) {
 92389              m.emit(0x62)
 92390              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 92391              m.emit(0xfd)
 92392              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 92393              m.emit(0x4e)
 92394              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 92395          })
 92396      }
 92397      // VRSQRT14PD m128/m64bcst, xmm{k}{z}
 92398      if isM128M64bcst(v0) && isXMMkz(v1) {
 92399          self.require(ISA_AVX512VL | ISA_AVX512F)
 92400          p.domain = DomainAVX
 92401          p.add(0, func(m *_Encoding, v []interface{}) {
 92402              m.evex(0b10, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 92403              m.emit(0x4e)
 92404              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 92405          })
 92406      }
 92407      // VRSQRT14PD m256/m64bcst, ymm{k}{z}
 92408      if isM256M64bcst(v0) && isYMMkz(v1) {
 92409          self.require(ISA_AVX512VL | ISA_AVX512F)
 92410          p.domain = DomainAVX
 92411          p.add(0, func(m *_Encoding, v []interface{}) {
 92412              m.evex(0b10, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 92413              m.emit(0x4e)
 92414              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 92415          })
 92416      }
 92417      // VRSQRT14PD xmm, xmm{k}{z}
 92418      if isEVEXXMM(v0) && isXMMkz(v1) {
 92419          self.require(ISA_AVX512VL | ISA_AVX512F)
 92420          p.domain = DomainAVX
 92421          p.add(0, func(m *_Encoding, v []interface{}) {
 92422              m.emit(0x62)
 92423              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 92424              m.emit(0xfd)
 92425              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 92426              m.emit(0x4e)
 92427              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 92428          })
 92429      }
 92430      // VRSQRT14PD ymm, ymm{k}{z}
 92431      if isEVEXYMM(v0) && isYMMkz(v1) {
 92432          self.require(ISA_AVX512VL | ISA_AVX512F)
 92433          p.domain = DomainAVX
 92434          p.add(0, func(m *_Encoding, v []interface{}) {
 92435              m.emit(0x62)
 92436              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 92437              m.emit(0xfd)
 92438              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 92439              m.emit(0x4e)
 92440              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 92441          })
 92442      }
 92443      if p.len == 0 {
 92444          panic("invalid operands for VRSQRT14PD")
 92445      }
 92446      return p
 92447  }
 92448  
 92449  // VRSQRT14PS performs "Compute Approximate Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values".
 92450  //
 92451  // Mnemonic        : VRSQRT14PS
 92452  // Supported forms : (6 forms)
 92453  //
 92454  //    * VRSQRT14PS m512/m32bcst, zmm{k}{z}    [AVX512F]
 92455  //    * VRSQRT14PS zmm, zmm{k}{z}             [AVX512F]
 92456  //    * VRSQRT14PS m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 92457  //    * VRSQRT14PS m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 92458  //    * VRSQRT14PS xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 92459  //    * VRSQRT14PS ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 92460  //
 92461  func (self *Program) VRSQRT14PS(v0 interface{}, v1 interface{}) *Instruction {
 92462      p := self.alloc("VRSQRT14PS", 2, Operands { v0, v1 })
 92463      // VRSQRT14PS m512/m32bcst, zmm{k}{z}
 92464      if isM512M32bcst(v0) && isZMMkz(v1) {
 92465          self.require(ISA_AVX512F)
 92466          p.domain = DomainAVX
 92467          p.add(0, func(m *_Encoding, v []interface{}) {
 92468              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 92469              m.emit(0x4e)
 92470              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 92471          })
 92472      }
 92473      // VRSQRT14PS zmm, zmm{k}{z}
 92474      if isZMM(v0) && isZMMkz(v1) {
 92475          self.require(ISA_AVX512F)
 92476          p.domain = DomainAVX
 92477          p.add(0, func(m *_Encoding, v []interface{}) {
 92478              m.emit(0x62)
 92479              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 92480              m.emit(0x7d)
 92481              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 92482              m.emit(0x4e)
 92483              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 92484          })
 92485      }
 92486      // VRSQRT14PS m128/m32bcst, xmm{k}{z}
 92487      if isM128M32bcst(v0) && isXMMkz(v1) {
 92488          self.require(ISA_AVX512VL | ISA_AVX512F)
 92489          p.domain = DomainAVX
 92490          p.add(0, func(m *_Encoding, v []interface{}) {
 92491              m.evex(0b10, 0x05, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 92492              m.emit(0x4e)
 92493              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 92494          })
 92495      }
 92496      // VRSQRT14PS m256/m32bcst, ymm{k}{z}
 92497      if isM256M32bcst(v0) && isYMMkz(v1) {
 92498          self.require(ISA_AVX512VL | ISA_AVX512F)
 92499          p.domain = DomainAVX
 92500          p.add(0, func(m *_Encoding, v []interface{}) {
 92501              m.evex(0b10, 0x05, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 92502              m.emit(0x4e)
 92503              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 92504          })
 92505      }
 92506      // VRSQRT14PS xmm, xmm{k}{z}
 92507      if isEVEXXMM(v0) && isXMMkz(v1) {
 92508          self.require(ISA_AVX512VL | ISA_AVX512F)
 92509          p.domain = DomainAVX
 92510          p.add(0, func(m *_Encoding, v []interface{}) {
 92511              m.emit(0x62)
 92512              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 92513              m.emit(0x7d)
 92514              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 92515              m.emit(0x4e)
 92516              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 92517          })
 92518      }
 92519      // VRSQRT14PS ymm, ymm{k}{z}
 92520      if isEVEXYMM(v0) && isYMMkz(v1) {
 92521          self.require(ISA_AVX512VL | ISA_AVX512F)
 92522          p.domain = DomainAVX
 92523          p.add(0, func(m *_Encoding, v []interface{}) {
 92524              m.emit(0x62)
 92525              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 92526              m.emit(0x7d)
 92527              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 92528              m.emit(0x4e)
 92529              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 92530          })
 92531      }
 92532      if p.len == 0 {
 92533          panic("invalid operands for VRSQRT14PS")
 92534      }
 92535      return p
 92536  }
 92537  
 92538  // VRSQRT14SD performs "Compute Approximate Reciprocal of a Square Root of a Scalar Double-Precision Floating-Point Value".
 92539  //
 92540  // Mnemonic        : VRSQRT14SD
 92541  // Supported forms : (2 forms)
 92542  //
 92543  //    * VRSQRT14SD xmm, xmm, xmm{k}{z}    [AVX512F]
 92544  //    * VRSQRT14SD m64, xmm, xmm{k}{z}    [AVX512F]
 92545  //
 92546  func (self *Program) VRSQRT14SD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 92547      p := self.alloc("VRSQRT14SD", 3, Operands { v0, v1, v2 })
 92548      // VRSQRT14SD xmm, xmm, xmm{k}{z}
 92549      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 92550          self.require(ISA_AVX512F)
 92551          p.domain = DomainAVX
 92552          p.add(0, func(m *_Encoding, v []interface{}) {
 92553              m.emit(0x62)
 92554              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 92555              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 92556              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 92557              m.emit(0x4f)
 92558              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 92559          })
 92560      }
 92561      // VRSQRT14SD m64, xmm, xmm{k}{z}
 92562      if isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 92563          self.require(ISA_AVX512F)
 92564          p.domain = DomainAVX
 92565          p.add(0, func(m *_Encoding, v []interface{}) {
 92566              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 92567              m.emit(0x4f)
 92568              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 92569          })
 92570      }
 92571      if p.len == 0 {
 92572          panic("invalid operands for VRSQRT14SD")
 92573      }
 92574      return p
 92575  }
 92576  
 92577  // VRSQRT14SS performs "Compute Approximate Reciprocal of a Square Root of a Scalar Single-Precision Floating-Point Value".
 92578  //
 92579  // Mnemonic        : VRSQRT14SS
 92580  // Supported forms : (2 forms)
 92581  //
 92582  //    * VRSQRT14SS xmm, xmm, xmm{k}{z}    [AVX512F]
 92583  //    * VRSQRT14SS m32, xmm, xmm{k}{z}    [AVX512F]
 92584  //
 92585  func (self *Program) VRSQRT14SS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 92586      p := self.alloc("VRSQRT14SS", 3, Operands { v0, v1, v2 })
 92587      // VRSQRT14SS xmm, xmm, xmm{k}{z}
 92588      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 92589          self.require(ISA_AVX512F)
 92590          p.domain = DomainAVX
 92591          p.add(0, func(m *_Encoding, v []interface{}) {
 92592              m.emit(0x62)
 92593              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 92594              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 92595              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 92596              m.emit(0x4f)
 92597              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 92598          })
 92599      }
 92600      // VRSQRT14SS m32, xmm, xmm{k}{z}
 92601      if isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 92602          self.require(ISA_AVX512F)
 92603          p.domain = DomainAVX
 92604          p.add(0, func(m *_Encoding, v []interface{}) {
 92605              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 92606              m.emit(0x4f)
 92607              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 92608          })
 92609      }
 92610      if p.len == 0 {
 92611          panic("invalid operands for VRSQRT14SS")
 92612      }
 92613      return p
 92614  }
 92615  
 92616  // VRSQRT28PD performs "Approximation to the Reciprocal Square Root of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error".
 92617  //
 92618  // Mnemonic        : VRSQRT28PD
 92619  // Supported forms : (3 forms)
 92620  //
 92621  //    * VRSQRT28PD m512/m64bcst, zmm{k}{z}    [AVX512ER]
 92622  //    * VRSQRT28PD {sae}, zmm, zmm{k}{z}      [AVX512ER]
 92623  //    * VRSQRT28PD zmm, zmm{k}{z}             [AVX512ER]
 92624  //
 92625  func (self *Program) VRSQRT28PD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 92626      var p *Instruction
 92627      switch len(vv) {
 92628          case 0  : p = self.alloc("VRSQRT28PD", 2, Operands { v0, v1 })
 92629          case 1  : p = self.alloc("VRSQRT28PD", 3, Operands { v0, v1, vv[0] })
 92630          default : panic("instruction VRSQRT28PD takes 2 or 3 operands")
 92631      }
 92632      // VRSQRT28PD m512/m64bcst, zmm{k}{z}
 92633      if len(vv) == 0 && isM512M64bcst(v0) && isZMMkz(v1) {
 92634          self.require(ISA_AVX512ER)
 92635          p.domain = DomainAVX
 92636          p.add(0, func(m *_Encoding, v []interface{}) {
 92637              m.evex(0b10, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 92638              m.emit(0xcc)
 92639              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 92640          })
 92641      }
 92642      // VRSQRT28PD {sae}, zmm, zmm{k}{z}
 92643      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 92644          self.require(ISA_AVX512ER)
 92645          p.domain = DomainAVX
 92646          p.add(0, func(m *_Encoding, v []interface{}) {
 92647              m.emit(0x62)
 92648              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 92649              m.emit(0xfd)
 92650              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 92651              m.emit(0xcc)
 92652              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 92653          })
 92654      }
 92655      // VRSQRT28PD zmm, zmm{k}{z}
 92656      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 92657          self.require(ISA_AVX512ER)
 92658          p.domain = DomainAVX
 92659          p.add(0, func(m *_Encoding, v []interface{}) {
 92660              m.emit(0x62)
 92661              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 92662              m.emit(0xfd)
 92663              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 92664              m.emit(0xcc)
 92665              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 92666          })
 92667      }
 92668      if p.len == 0 {
 92669          panic("invalid operands for VRSQRT28PD")
 92670      }
 92671      return p
 92672  }
 92673  
 92674  // VRSQRT28PS performs "Approximation to the Reciprocal Square Root of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error".
 92675  //
 92676  // Mnemonic        : VRSQRT28PS
 92677  // Supported forms : (3 forms)
 92678  //
 92679  //    * VRSQRT28PS m512/m32bcst, zmm{k}{z}    [AVX512ER]
 92680  //    * VRSQRT28PS {sae}, zmm, zmm{k}{z}      [AVX512ER]
 92681  //    * VRSQRT28PS zmm, zmm{k}{z}             [AVX512ER]
 92682  //
 92683  func (self *Program) VRSQRT28PS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 92684      var p *Instruction
 92685      switch len(vv) {
 92686          case 0  : p = self.alloc("VRSQRT28PS", 2, Operands { v0, v1 })
 92687          case 1  : p = self.alloc("VRSQRT28PS", 3, Operands { v0, v1, vv[0] })
 92688          default : panic("instruction VRSQRT28PS takes 2 or 3 operands")
 92689      }
 92690      // VRSQRT28PS m512/m32bcst, zmm{k}{z}
 92691      if len(vv) == 0 && isM512M32bcst(v0) && isZMMkz(v1) {
 92692          self.require(ISA_AVX512ER)
 92693          p.domain = DomainAVX
 92694          p.add(0, func(m *_Encoding, v []interface{}) {
 92695              m.evex(0b10, 0x05, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 92696              m.emit(0xcc)
 92697              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 92698          })
 92699      }
 92700      // VRSQRT28PS {sae}, zmm, zmm{k}{z}
 92701      if len(vv) == 1 && isSAE(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 92702          self.require(ISA_AVX512ER)
 92703          p.domain = DomainAVX
 92704          p.add(0, func(m *_Encoding, v []interface{}) {
 92705              m.emit(0x62)
 92706              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 92707              m.emit(0x7d)
 92708              m.emit((zcode(v[2]) << 7) | kcode(v[2]) | 0x18)
 92709              m.emit(0xcc)
 92710              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 92711          })
 92712      }
 92713      // VRSQRT28PS zmm, zmm{k}{z}
 92714      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 92715          self.require(ISA_AVX512ER)
 92716          p.domain = DomainAVX
 92717          p.add(0, func(m *_Encoding, v []interface{}) {
 92718              m.emit(0x62)
 92719              m.emit(0xf2 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 92720              m.emit(0x7d)
 92721              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 92722              m.emit(0xcc)
 92723              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 92724          })
 92725      }
 92726      if p.len == 0 {
 92727          panic("invalid operands for VRSQRT28PS")
 92728      }
 92729      return p
 92730  }
 92731  
 92732  // VRSQRT28SD performs "Approximation to the Reciprocal Square Root of a Scalar Double-Precision Floating-Point Value with Less Than 2^-28 Relative Error".
 92733  //
 92734  // Mnemonic        : VRSQRT28SD
 92735  // Supported forms : (3 forms)
 92736  //
 92737  //    * VRSQRT28SD m64, xmm, xmm{k}{z}           [AVX512ER]
 92738  //    * VRSQRT28SD {sae}, xmm, xmm, xmm{k}{z}    [AVX512ER]
 92739  //    * VRSQRT28SD xmm, xmm, xmm{k}{z}           [AVX512ER]
 92740  //
 92741  func (self *Program) VRSQRT28SD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 92742      var p *Instruction
 92743      switch len(vv) {
 92744          case 0  : p = self.alloc("VRSQRT28SD", 3, Operands { v0, v1, v2 })
 92745          case 1  : p = self.alloc("VRSQRT28SD", 4, Operands { v0, v1, v2, vv[0] })
 92746          default : panic("instruction VRSQRT28SD takes 3 or 4 operands")
 92747      }
 92748      // VRSQRT28SD m64, xmm, xmm{k}{z}
 92749      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 92750          self.require(ISA_AVX512ER)
 92751          p.domain = DomainAVX
 92752          p.add(0, func(m *_Encoding, v []interface{}) {
 92753              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 92754              m.emit(0xcd)
 92755              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 92756          })
 92757      }
 92758      // VRSQRT28SD {sae}, xmm, xmm, xmm{k}{z}
 92759      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 92760          self.require(ISA_AVX512ER)
 92761          p.domain = DomainAVX
 92762          p.add(0, func(m *_Encoding, v []interface{}) {
 92763              m.emit(0x62)
 92764              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 92765              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 92766              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 92767              m.emit(0xcd)
 92768              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 92769          })
 92770      }
 92771      // VRSQRT28SD xmm, xmm, xmm{k}{z}
 92772      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 92773          self.require(ISA_AVX512ER)
 92774          p.domain = DomainAVX
 92775          p.add(0, func(m *_Encoding, v []interface{}) {
 92776              m.emit(0x62)
 92777              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 92778              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 92779              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 92780              m.emit(0xcd)
 92781              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 92782          })
 92783      }
 92784      if p.len == 0 {
 92785          panic("invalid operands for VRSQRT28SD")
 92786      }
 92787      return p
 92788  }
 92789  
 92790  // VRSQRT28SS performs "Approximation to the Reciprocal Square Root of a Scalar Single-Precision Floating-Point Value with Less Than 2^-28 Relative Error".
 92791  //
 92792  // Mnemonic        : VRSQRT28SS
 92793  // Supported forms : (3 forms)
 92794  //
 92795  //    * VRSQRT28SS m32, xmm, xmm{k}{z}           [AVX512ER]
 92796  //    * VRSQRT28SS {sae}, xmm, xmm, xmm{k}{z}    [AVX512ER]
 92797  //    * VRSQRT28SS xmm, xmm, xmm{k}{z}           [AVX512ER]
 92798  //
 92799  func (self *Program) VRSQRT28SS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 92800      var p *Instruction
 92801      switch len(vv) {
 92802          case 0  : p = self.alloc("VRSQRT28SS", 3, Operands { v0, v1, v2 })
 92803          case 1  : p = self.alloc("VRSQRT28SS", 4, Operands { v0, v1, v2, vv[0] })
 92804          default : panic("instruction VRSQRT28SS takes 3 or 4 operands")
 92805      }
 92806      // VRSQRT28SS m32, xmm, xmm{k}{z}
 92807      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 92808          self.require(ISA_AVX512ER)
 92809          p.domain = DomainAVX
 92810          p.add(0, func(m *_Encoding, v []interface{}) {
 92811              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 92812              m.emit(0xcd)
 92813              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 92814          })
 92815      }
 92816      // VRSQRT28SS {sae}, xmm, xmm, xmm{k}{z}
 92817      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 92818          self.require(ISA_AVX512ER)
 92819          p.domain = DomainAVX
 92820          p.add(0, func(m *_Encoding, v []interface{}) {
 92821              m.emit(0x62)
 92822              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 92823              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 92824              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 92825              m.emit(0xcd)
 92826              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 92827          })
 92828      }
 92829      // VRSQRT28SS xmm, xmm, xmm{k}{z}
 92830      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 92831          self.require(ISA_AVX512ER)
 92832          p.domain = DomainAVX
 92833          p.add(0, func(m *_Encoding, v []interface{}) {
 92834              m.emit(0x62)
 92835              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 92836              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 92837              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 92838              m.emit(0xcd)
 92839              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 92840          })
 92841      }
 92842      if p.len == 0 {
 92843          panic("invalid operands for VRSQRT28SS")
 92844      }
 92845      return p
 92846  }
 92847  
 92848  // VRSQRTPS performs "Compute Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values".
 92849  //
 92850  // Mnemonic        : VRSQRTPS
 92851  // Supported forms : (4 forms)
 92852  //
 92853  //    * VRSQRTPS xmm, xmm     [AVX]
 92854  //    * VRSQRTPS m128, xmm    [AVX]
 92855  //    * VRSQRTPS ymm, ymm     [AVX]
 92856  //    * VRSQRTPS m256, ymm    [AVX]
 92857  //
 92858  func (self *Program) VRSQRTPS(v0 interface{}, v1 interface{}) *Instruction {
 92859      p := self.alloc("VRSQRTPS", 2, Operands { v0, v1 })
 92860      // VRSQRTPS xmm, xmm
 92861      if isXMM(v0) && isXMM(v1) {
 92862          self.require(ISA_AVX)
 92863          p.domain = DomainAVX
 92864          p.add(0, func(m *_Encoding, v []interface{}) {
 92865              m.vex2(0, hcode(v[1]), v[0], 0)
 92866              m.emit(0x52)
 92867              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 92868          })
 92869      }
 92870      // VRSQRTPS m128, xmm
 92871      if isM128(v0) && isXMM(v1) {
 92872          self.require(ISA_AVX)
 92873          p.domain = DomainAVX
 92874          p.add(0, func(m *_Encoding, v []interface{}) {
 92875              m.vex2(0, hcode(v[1]), addr(v[0]), 0)
 92876              m.emit(0x52)
 92877              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 92878          })
 92879      }
 92880      // VRSQRTPS ymm, ymm
 92881      if isYMM(v0) && isYMM(v1) {
 92882          self.require(ISA_AVX)
 92883          p.domain = DomainAVX
 92884          p.add(0, func(m *_Encoding, v []interface{}) {
 92885              m.vex2(4, hcode(v[1]), v[0], 0)
 92886              m.emit(0x52)
 92887              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 92888          })
 92889      }
 92890      // VRSQRTPS m256, ymm
 92891      if isM256(v0) && isYMM(v1) {
 92892          self.require(ISA_AVX)
 92893          p.domain = DomainAVX
 92894          p.add(0, func(m *_Encoding, v []interface{}) {
 92895              m.vex2(4, hcode(v[1]), addr(v[0]), 0)
 92896              m.emit(0x52)
 92897              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 92898          })
 92899      }
 92900      if p.len == 0 {
 92901          panic("invalid operands for VRSQRTPS")
 92902      }
 92903      return p
 92904  }
 92905  
 92906  // VRSQRTSS performs "Compute Reciprocal of Square Root of Scalar Single-Precision Floating-Point Value".
 92907  //
 92908  // Mnemonic        : VRSQRTSS
 92909  // Supported forms : (2 forms)
 92910  //
 92911  //    * VRSQRTSS xmm, xmm, xmm    [AVX]
 92912  //    * VRSQRTSS m32, xmm, xmm    [AVX]
 92913  //
 92914  func (self *Program) VRSQRTSS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 92915      p := self.alloc("VRSQRTSS", 3, Operands { v0, v1, v2 })
 92916      // VRSQRTSS xmm, xmm, xmm
 92917      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 92918          self.require(ISA_AVX)
 92919          p.domain = DomainAVX
 92920          p.add(0, func(m *_Encoding, v []interface{}) {
 92921              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 92922              m.emit(0x52)
 92923              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 92924          })
 92925      }
 92926      // VRSQRTSS m32, xmm, xmm
 92927      if isM32(v0) && isXMM(v1) && isXMM(v2) {
 92928          self.require(ISA_AVX)
 92929          p.domain = DomainAVX
 92930          p.add(0, func(m *_Encoding, v []interface{}) {
 92931              m.vex2(2, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 92932              m.emit(0x52)
 92933              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 92934          })
 92935      }
 92936      if p.len == 0 {
 92937          panic("invalid operands for VRSQRTSS")
 92938      }
 92939      return p
 92940  }
 92941  
 92942  // VSCALEFPD performs "Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values".
 92943  //
 92944  // Mnemonic        : VSCALEFPD
 92945  // Supported forms : (7 forms)
 92946  //
 92947  //    * VSCALEFPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 92948  //    * VSCALEFPD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 92949  //    * VSCALEFPD zmm, zmm, zmm{k}{z}             [AVX512F]
 92950  //    * VSCALEFPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 92951  //    * VSCALEFPD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 92952  //    * VSCALEFPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 92953  //    * VSCALEFPD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 92954  //
 92955  func (self *Program) VSCALEFPD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 92956      var p *Instruction
 92957      switch len(vv) {
 92958          case 0  : p = self.alloc("VSCALEFPD", 3, Operands { v0, v1, v2 })
 92959          case 1  : p = self.alloc("VSCALEFPD", 4, Operands { v0, v1, v2, vv[0] })
 92960          default : panic("instruction VSCALEFPD takes 3 or 4 operands")
 92961      }
 92962      // VSCALEFPD m512/m64bcst, zmm, zmm{k}{z}
 92963      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 92964          self.require(ISA_AVX512F)
 92965          p.domain = DomainAVX
 92966          p.add(0, func(m *_Encoding, v []interface{}) {
 92967              m.evex(0b10, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 92968              m.emit(0x2c)
 92969              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 92970          })
 92971      }
 92972      // VSCALEFPD {er}, zmm, zmm, zmm{k}{z}
 92973      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 92974          self.require(ISA_AVX512F)
 92975          p.domain = DomainAVX
 92976          p.add(0, func(m *_Encoding, v []interface{}) {
 92977              m.emit(0x62)
 92978              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 92979              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 92980              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 92981              m.emit(0x2c)
 92982              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 92983          })
 92984      }
 92985      // VSCALEFPD zmm, zmm, zmm{k}{z}
 92986      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 92987          self.require(ISA_AVX512F)
 92988          p.domain = DomainAVX
 92989          p.add(0, func(m *_Encoding, v []interface{}) {
 92990              m.emit(0x62)
 92991              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 92992              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 92993              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 92994              m.emit(0x2c)
 92995              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 92996          })
 92997      }
 92998      // VSCALEFPD m128/m64bcst, xmm, xmm{k}{z}
 92999      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 93000          self.require(ISA_AVX512VL | ISA_AVX512F)
 93001          p.domain = DomainAVX
 93002          p.add(0, func(m *_Encoding, v []interface{}) {
 93003              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 93004              m.emit(0x2c)
 93005              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 93006          })
 93007      }
 93008      // VSCALEFPD xmm, xmm, xmm{k}{z}
 93009      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 93010          self.require(ISA_AVX512VL | ISA_AVX512F)
 93011          p.domain = DomainAVX
 93012          p.add(0, func(m *_Encoding, v []interface{}) {
 93013              m.emit(0x62)
 93014              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 93015              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 93016              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 93017              m.emit(0x2c)
 93018              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 93019          })
 93020      }
 93021      // VSCALEFPD m256/m64bcst, ymm, ymm{k}{z}
 93022      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 93023          self.require(ISA_AVX512VL | ISA_AVX512F)
 93024          p.domain = DomainAVX
 93025          p.add(0, func(m *_Encoding, v []interface{}) {
 93026              m.evex(0b10, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 93027              m.emit(0x2c)
 93028              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 93029          })
 93030      }
 93031      // VSCALEFPD ymm, ymm, ymm{k}{z}
 93032      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 93033          self.require(ISA_AVX512VL | ISA_AVX512F)
 93034          p.domain = DomainAVX
 93035          p.add(0, func(m *_Encoding, v []interface{}) {
 93036              m.emit(0x62)
 93037              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 93038              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 93039              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 93040              m.emit(0x2c)
 93041              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 93042          })
 93043      }
 93044      if p.len == 0 {
 93045          panic("invalid operands for VSCALEFPD")
 93046      }
 93047      return p
 93048  }
 93049  
 93050  // VSCALEFPS performs "Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values".
 93051  //
 93052  // Mnemonic        : VSCALEFPS
 93053  // Supported forms : (7 forms)
 93054  //
 93055  //    * VSCALEFPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 93056  //    * VSCALEFPS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 93057  //    * VSCALEFPS zmm, zmm, zmm{k}{z}             [AVX512F]
 93058  //    * VSCALEFPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 93059  //    * VSCALEFPS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 93060  //    * VSCALEFPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 93061  //    * VSCALEFPS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 93062  //
 93063  func (self *Program) VSCALEFPS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 93064      var p *Instruction
 93065      switch len(vv) {
 93066          case 0  : p = self.alloc("VSCALEFPS", 3, Operands { v0, v1, v2 })
 93067          case 1  : p = self.alloc("VSCALEFPS", 4, Operands { v0, v1, v2, vv[0] })
 93068          default : panic("instruction VSCALEFPS takes 3 or 4 operands")
 93069      }
 93070      // VSCALEFPS m512/m32bcst, zmm, zmm{k}{z}
 93071      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 93072          self.require(ISA_AVX512F)
 93073          p.domain = DomainAVX
 93074          p.add(0, func(m *_Encoding, v []interface{}) {
 93075              m.evex(0b10, 0x05, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 93076              m.emit(0x2c)
 93077              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 93078          })
 93079      }
 93080      // VSCALEFPS {er}, zmm, zmm, zmm{k}{z}
 93081      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 93082          self.require(ISA_AVX512F)
 93083          p.domain = DomainAVX
 93084          p.add(0, func(m *_Encoding, v []interface{}) {
 93085              m.emit(0x62)
 93086              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93087              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 93088              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 93089              m.emit(0x2c)
 93090              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93091          })
 93092      }
 93093      // VSCALEFPS zmm, zmm, zmm{k}{z}
 93094      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 93095          self.require(ISA_AVX512F)
 93096          p.domain = DomainAVX
 93097          p.add(0, func(m *_Encoding, v []interface{}) {
 93098              m.emit(0x62)
 93099              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 93100              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 93101              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 93102              m.emit(0x2c)
 93103              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 93104          })
 93105      }
 93106      // VSCALEFPS m128/m32bcst, xmm, xmm{k}{z}
 93107      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 93108          self.require(ISA_AVX512VL | ISA_AVX512F)
 93109          p.domain = DomainAVX
 93110          p.add(0, func(m *_Encoding, v []interface{}) {
 93111              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 93112              m.emit(0x2c)
 93113              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 93114          })
 93115      }
 93116      // VSCALEFPS xmm, xmm, xmm{k}{z}
 93117      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 93118          self.require(ISA_AVX512VL | ISA_AVX512F)
 93119          p.domain = DomainAVX
 93120          p.add(0, func(m *_Encoding, v []interface{}) {
 93121              m.emit(0x62)
 93122              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 93123              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 93124              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 93125              m.emit(0x2c)
 93126              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 93127          })
 93128      }
 93129      // VSCALEFPS m256/m32bcst, ymm, ymm{k}{z}
 93130      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 93131          self.require(ISA_AVX512VL | ISA_AVX512F)
 93132          p.domain = DomainAVX
 93133          p.add(0, func(m *_Encoding, v []interface{}) {
 93134              m.evex(0b10, 0x05, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 93135              m.emit(0x2c)
 93136              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 93137          })
 93138      }
 93139      // VSCALEFPS ymm, ymm, ymm{k}{z}
 93140      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 93141          self.require(ISA_AVX512VL | ISA_AVX512F)
 93142          p.domain = DomainAVX
 93143          p.add(0, func(m *_Encoding, v []interface{}) {
 93144              m.emit(0x62)
 93145              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 93146              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 93147              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 93148              m.emit(0x2c)
 93149              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 93150          })
 93151      }
 93152      if p.len == 0 {
 93153          panic("invalid operands for VSCALEFPS")
 93154      }
 93155      return p
 93156  }
 93157  
 93158  // VSCALEFSD performs "Scale Scalar Double-Precision Floating-Point Value With a Double-Precision Floating-Point Value".
 93159  //
 93160  // Mnemonic        : VSCALEFSD
 93161  // Supported forms : (3 forms)
 93162  //
 93163  //    * VSCALEFSD m64, xmm, xmm{k}{z}          [AVX512F]
 93164  //    * VSCALEFSD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 93165  //    * VSCALEFSD xmm, xmm, xmm{k}{z}          [AVX512F]
 93166  //
 93167  func (self *Program) VSCALEFSD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 93168      var p *Instruction
 93169      switch len(vv) {
 93170          case 0  : p = self.alloc("VSCALEFSD", 3, Operands { v0, v1, v2 })
 93171          case 1  : p = self.alloc("VSCALEFSD", 4, Operands { v0, v1, v2, vv[0] })
 93172          default : panic("instruction VSCALEFSD takes 3 or 4 operands")
 93173      }
 93174      // VSCALEFSD m64, xmm, xmm{k}{z}
 93175      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 93176          self.require(ISA_AVX512F)
 93177          p.domain = DomainAVX
 93178          p.add(0, func(m *_Encoding, v []interface{}) {
 93179              m.evex(0b10, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 93180              m.emit(0x2d)
 93181              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 93182          })
 93183      }
 93184      // VSCALEFSD {er}, xmm, xmm, xmm{k}{z}
 93185      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 93186          self.require(ISA_AVX512F)
 93187          p.domain = DomainAVX
 93188          p.add(0, func(m *_Encoding, v []interface{}) {
 93189              m.emit(0x62)
 93190              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93191              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 93192              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 93193              m.emit(0x2d)
 93194              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93195          })
 93196      }
 93197      // VSCALEFSD xmm, xmm, xmm{k}{z}
 93198      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 93199          self.require(ISA_AVX512F)
 93200          p.domain = DomainAVX
 93201          p.add(0, func(m *_Encoding, v []interface{}) {
 93202              m.emit(0x62)
 93203              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 93204              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 93205              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 93206              m.emit(0x2d)
 93207              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 93208          })
 93209      }
 93210      if p.len == 0 {
 93211          panic("invalid operands for VSCALEFSD")
 93212      }
 93213      return p
 93214  }
 93215  
 93216  // VSCALEFSS performs "Scale Scalar Single-Precision Floating-Point Value With a Single-Precision Floating-Point Value".
 93217  //
 93218  // Mnemonic        : VSCALEFSS
 93219  // Supported forms : (3 forms)
 93220  //
 93221  //    * VSCALEFSS m32, xmm, xmm{k}{z}          [AVX512F]
 93222  //    * VSCALEFSS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 93223  //    * VSCALEFSS xmm, xmm, xmm{k}{z}          [AVX512F]
 93224  //
 93225  func (self *Program) VSCALEFSS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 93226      var p *Instruction
 93227      switch len(vv) {
 93228          case 0  : p = self.alloc("VSCALEFSS", 3, Operands { v0, v1, v2 })
 93229          case 1  : p = self.alloc("VSCALEFSS", 4, Operands { v0, v1, v2, vv[0] })
 93230          default : panic("instruction VSCALEFSS takes 3 or 4 operands")
 93231      }
 93232      // VSCALEFSS m32, xmm, xmm{k}{z}
 93233      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 93234          self.require(ISA_AVX512F)
 93235          p.domain = DomainAVX
 93236          p.add(0, func(m *_Encoding, v []interface{}) {
 93237              m.evex(0b10, 0x05, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 93238              m.emit(0x2d)
 93239              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 93240          })
 93241      }
 93242      // VSCALEFSS {er}, xmm, xmm, xmm{k}{z}
 93243      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 93244          self.require(ISA_AVX512F)
 93245          p.domain = DomainAVX
 93246          p.add(0, func(m *_Encoding, v []interface{}) {
 93247              m.emit(0x62)
 93248              m.emit(0xf2 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93249              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 93250              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 93251              m.emit(0x2d)
 93252              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93253          })
 93254      }
 93255      // VSCALEFSS xmm, xmm, xmm{k}{z}
 93256      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 93257          self.require(ISA_AVX512F)
 93258          p.domain = DomainAVX
 93259          p.add(0, func(m *_Encoding, v []interface{}) {
 93260              m.emit(0x62)
 93261              m.emit(0xf2 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 93262              m.emit(0x7d ^ (hlcode(v[1]) << 3))
 93263              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 93264              m.emit(0x2d)
 93265              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 93266          })
 93267      }
 93268      if p.len == 0 {
 93269          panic("invalid operands for VSCALEFSS")
 93270      }
 93271      return p
 93272  }
 93273  
 93274  // VSCATTERDPD performs "Scatter Packed Double-Precision Floating-Point Values with Signed Doubleword Indices".
 93275  //
 93276  // Mnemonic        : VSCATTERDPD
 93277  // Supported forms : (3 forms)
 93278  //
 93279  //    * VSCATTERDPD zmm, vm32y{k}    [AVX512F]
 93280  //    * VSCATTERDPD xmm, vm32x{k}    [AVX512F,AVX512VL]
 93281  //    * VSCATTERDPD ymm, vm32x{k}    [AVX512F,AVX512VL]
 93282  //
 93283  func (self *Program) VSCATTERDPD(v0 interface{}, v1 interface{}) *Instruction {
 93284      p := self.alloc("VSCATTERDPD", 2, Operands { v0, v1 })
 93285      // VSCATTERDPD zmm, vm32y{k}
 93286      if isZMM(v0) && isVMYk(v1) {
 93287          self.require(ISA_AVX512F)
 93288          p.domain = DomainAVX
 93289          p.add(0, func(m *_Encoding, v []interface{}) {
 93290              m.evex(0b10, 0x85, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93291              m.emit(0xa2)
 93292              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 93293          })
 93294      }
 93295      // VSCATTERDPD xmm, vm32x{k}
 93296      if isEVEXXMM(v0) && isVMXk(v1) {
 93297          self.require(ISA_AVX512VL | ISA_AVX512F)
 93298          p.domain = DomainAVX
 93299          p.add(0, func(m *_Encoding, v []interface{}) {
 93300              m.evex(0b10, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93301              m.emit(0xa2)
 93302              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 93303          })
 93304      }
 93305      // VSCATTERDPD ymm, vm32x{k}
 93306      if isEVEXYMM(v0) && isVMXk(v1) {
 93307          self.require(ISA_AVX512VL | ISA_AVX512F)
 93308          p.domain = DomainAVX
 93309          p.add(0, func(m *_Encoding, v []interface{}) {
 93310              m.evex(0b10, 0x85, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93311              m.emit(0xa2)
 93312              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 93313          })
 93314      }
 93315      if p.len == 0 {
 93316          panic("invalid operands for VSCATTERDPD")
 93317      }
 93318      return p
 93319  }
 93320  
 93321  // VSCATTERDPS performs "Scatter Packed Single-Precision Floating-Point Values with Signed Doubleword Indices".
 93322  //
 93323  // Mnemonic        : VSCATTERDPS
 93324  // Supported forms : (3 forms)
 93325  //
 93326  //    * VSCATTERDPS zmm, vm32z{k}    [AVX512F]
 93327  //    * VSCATTERDPS xmm, vm32x{k}    [AVX512F,AVX512VL]
 93328  //    * VSCATTERDPS ymm, vm32y{k}    [AVX512F,AVX512VL]
 93329  //
 93330  func (self *Program) VSCATTERDPS(v0 interface{}, v1 interface{}) *Instruction {
 93331      p := self.alloc("VSCATTERDPS", 2, Operands { v0, v1 })
 93332      // VSCATTERDPS zmm, vm32z{k}
 93333      if isZMM(v0) && isVMZk(v1) {
 93334          self.require(ISA_AVX512F)
 93335          p.domain = DomainAVX
 93336          p.add(0, func(m *_Encoding, v []interface{}) {
 93337              m.evex(0b10, 0x05, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93338              m.emit(0xa2)
 93339              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 93340          })
 93341      }
 93342      // VSCATTERDPS xmm, vm32x{k}
 93343      if isEVEXXMM(v0) && isVMXk(v1) {
 93344          self.require(ISA_AVX512VL | ISA_AVX512F)
 93345          p.domain = DomainAVX
 93346          p.add(0, func(m *_Encoding, v []interface{}) {
 93347              m.evex(0b10, 0x05, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93348              m.emit(0xa2)
 93349              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 93350          })
 93351      }
 93352      // VSCATTERDPS ymm, vm32y{k}
 93353      if isEVEXYMM(v0) && isVMYk(v1) {
 93354          self.require(ISA_AVX512VL | ISA_AVX512F)
 93355          p.domain = DomainAVX
 93356          p.add(0, func(m *_Encoding, v []interface{}) {
 93357              m.evex(0b10, 0x05, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93358              m.emit(0xa2)
 93359              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 93360          })
 93361      }
 93362      if p.len == 0 {
 93363          panic("invalid operands for VSCATTERDPS")
 93364      }
 93365      return p
 93366  }
 93367  
 93368  // VSCATTERPF0DPD performs "Sparse Prefetch Packed Double-Precision Floating-Point Data Values with Signed Doubleword Indices Using T0 Hint with Intent to Write".
 93369  //
 93370  // Mnemonic        : VSCATTERPF0DPD
 93371  // Supported forms : (1 form)
 93372  //
 93373  //    * VSCATTERPF0DPD vm32y{k}    [AVX512PF]
 93374  //
 93375  func (self *Program) VSCATTERPF0DPD(v0 interface{}) *Instruction {
 93376      p := self.alloc("VSCATTERPF0DPD", 1, Operands { v0 })
 93377      // VSCATTERPF0DPD vm32y{k}
 93378      if isVMYk(v0) {
 93379          self.require(ISA_AVX512PF)
 93380          p.domain = DomainAVX
 93381          p.add(0, func(m *_Encoding, v []interface{}) {
 93382              m.evex(0b10, 0x85, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 93383              m.emit(0xc6)
 93384              m.mrsd(5, addr(v[0]), 8)
 93385          })
 93386      }
 93387      if p.len == 0 {
 93388          panic("invalid operands for VSCATTERPF0DPD")
 93389      }
 93390      return p
 93391  }
 93392  
 93393  // VSCATTERPF0DPS performs "Sparse Prefetch Packed Single-Precision Floating-Point Data Values with Signed Doubleword Indices Using T0 Hint with Intent to Write".
 93394  //
 93395  // Mnemonic        : VSCATTERPF0DPS
 93396  // Supported forms : (1 form)
 93397  //
 93398  //    * VSCATTERPF0DPS vm32z{k}    [AVX512PF]
 93399  //
 93400  func (self *Program) VSCATTERPF0DPS(v0 interface{}) *Instruction {
 93401      p := self.alloc("VSCATTERPF0DPS", 1, Operands { v0 })
 93402      // VSCATTERPF0DPS vm32z{k}
 93403      if isVMZk(v0) {
 93404          self.require(ISA_AVX512PF)
 93405          p.domain = DomainAVX
 93406          p.add(0, func(m *_Encoding, v []interface{}) {
 93407              m.evex(0b10, 0x05, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 93408              m.emit(0xc6)
 93409              m.mrsd(5, addr(v[0]), 4)
 93410          })
 93411      }
 93412      if p.len == 0 {
 93413          panic("invalid operands for VSCATTERPF0DPS")
 93414      }
 93415      return p
 93416  }
 93417  
 93418  // VSCATTERPF0QPD performs "Sparse Prefetch Packed Double-Precision Floating-Point Data Values with Signed Quadword Indices Using T0 Hint with Intent to Write".
 93419  //
 93420  // Mnemonic        : VSCATTERPF0QPD
 93421  // Supported forms : (1 form)
 93422  //
 93423  //    * VSCATTERPF0QPD vm64z{k}    [AVX512PF]
 93424  //
 93425  func (self *Program) VSCATTERPF0QPD(v0 interface{}) *Instruction {
 93426      p := self.alloc("VSCATTERPF0QPD", 1, Operands { v0 })
 93427      // VSCATTERPF0QPD vm64z{k}
 93428      if isVMZk(v0) {
 93429          self.require(ISA_AVX512PF)
 93430          p.domain = DomainAVX
 93431          p.add(0, func(m *_Encoding, v []interface{}) {
 93432              m.evex(0b10, 0x85, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 93433              m.emit(0xc7)
 93434              m.mrsd(5, addr(v[0]), 8)
 93435          })
 93436      }
 93437      if p.len == 0 {
 93438          panic("invalid operands for VSCATTERPF0QPD")
 93439      }
 93440      return p
 93441  }
 93442  
 93443  // VSCATTERPF0QPS performs "Sparse Prefetch Packed Single-Precision Floating-Point Data Values with Signed Quadword Indices Using T0 Hint with Intent to Write".
 93444  //
 93445  // Mnemonic        : VSCATTERPF0QPS
 93446  // Supported forms : (1 form)
 93447  //
 93448  //    * VSCATTERPF0QPS vm64z{k}    [AVX512PF]
 93449  //
 93450  func (self *Program) VSCATTERPF0QPS(v0 interface{}) *Instruction {
 93451      p := self.alloc("VSCATTERPF0QPS", 1, Operands { v0 })
 93452      // VSCATTERPF0QPS vm64z{k}
 93453      if isVMZk(v0) {
 93454          self.require(ISA_AVX512PF)
 93455          p.domain = DomainAVX
 93456          p.add(0, func(m *_Encoding, v []interface{}) {
 93457              m.evex(0b10, 0x05, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 93458              m.emit(0xc7)
 93459              m.mrsd(5, addr(v[0]), 4)
 93460          })
 93461      }
 93462      if p.len == 0 {
 93463          panic("invalid operands for VSCATTERPF0QPS")
 93464      }
 93465      return p
 93466  }
 93467  
 93468  // VSCATTERPF1DPD performs "Sparse Prefetch Packed Double-Precision Floating-Point Data Values with Signed Doubleword Indices Using T1 Hint with Intent to Write".
 93469  //
 93470  // Mnemonic        : VSCATTERPF1DPD
 93471  // Supported forms : (1 form)
 93472  //
 93473  //    * VSCATTERPF1DPD vm32y{k}    [AVX512PF]
 93474  //
 93475  func (self *Program) VSCATTERPF1DPD(v0 interface{}) *Instruction {
 93476      p := self.alloc("VSCATTERPF1DPD", 1, Operands { v0 })
 93477      // VSCATTERPF1DPD vm32y{k}
 93478      if isVMYk(v0) {
 93479          self.require(ISA_AVX512PF)
 93480          p.domain = DomainAVX
 93481          p.add(0, func(m *_Encoding, v []interface{}) {
 93482              m.evex(0b10, 0x85, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 93483              m.emit(0xc6)
 93484              m.mrsd(6, addr(v[0]), 8)
 93485          })
 93486      }
 93487      if p.len == 0 {
 93488          panic("invalid operands for VSCATTERPF1DPD")
 93489      }
 93490      return p
 93491  }
 93492  
 93493  // VSCATTERPF1DPS performs "Sparse Prefetch Packed Single-Precision Floating-Point Data Values with Signed Doubleword Indices Using T1 Hint with Intent to Write".
 93494  //
 93495  // Mnemonic        : VSCATTERPF1DPS
 93496  // Supported forms : (1 form)
 93497  //
 93498  //    * VSCATTERPF1DPS vm32z{k}    [AVX512PF]
 93499  //
 93500  func (self *Program) VSCATTERPF1DPS(v0 interface{}) *Instruction {
 93501      p := self.alloc("VSCATTERPF1DPS", 1, Operands { v0 })
 93502      // VSCATTERPF1DPS vm32z{k}
 93503      if isVMZk(v0) {
 93504          self.require(ISA_AVX512PF)
 93505          p.domain = DomainAVX
 93506          p.add(0, func(m *_Encoding, v []interface{}) {
 93507              m.evex(0b10, 0x05, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 93508              m.emit(0xc6)
 93509              m.mrsd(6, addr(v[0]), 4)
 93510          })
 93511      }
 93512      if p.len == 0 {
 93513          panic("invalid operands for VSCATTERPF1DPS")
 93514      }
 93515      return p
 93516  }
 93517  
 93518  // VSCATTERPF1QPD performs "Sparse Prefetch Packed Double-Precision Floating-Point Data Values with Signed Quadword Indices Using T1 Hint with Intent to Write".
 93519  //
 93520  // Mnemonic        : VSCATTERPF1QPD
 93521  // Supported forms : (1 form)
 93522  //
 93523  //    * VSCATTERPF1QPD vm64z{k}    [AVX512PF]
 93524  //
 93525  func (self *Program) VSCATTERPF1QPD(v0 interface{}) *Instruction {
 93526      p := self.alloc("VSCATTERPF1QPD", 1, Operands { v0 })
 93527      // VSCATTERPF1QPD vm64z{k}
 93528      if isVMZk(v0) {
 93529          self.require(ISA_AVX512PF)
 93530          p.domain = DomainAVX
 93531          p.add(0, func(m *_Encoding, v []interface{}) {
 93532              m.evex(0b10, 0x85, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 93533              m.emit(0xc7)
 93534              m.mrsd(6, addr(v[0]), 8)
 93535          })
 93536      }
 93537      if p.len == 0 {
 93538          panic("invalid operands for VSCATTERPF1QPD")
 93539      }
 93540      return p
 93541  }
 93542  
 93543  // VSCATTERPF1QPS performs "Sparse Prefetch Packed Single-Precision Floating-Point Data Values with Signed Quadword Indices Using T1 Hint with Intent to Write".
 93544  //
 93545  // Mnemonic        : VSCATTERPF1QPS
 93546  // Supported forms : (1 form)
 93547  //
 93548  //    * VSCATTERPF1QPS vm64z{k}    [AVX512PF]
 93549  //
 93550  func (self *Program) VSCATTERPF1QPS(v0 interface{}) *Instruction {
 93551      p := self.alloc("VSCATTERPF1QPS", 1, Operands { v0 })
 93552      // VSCATTERPF1QPS vm64z{k}
 93553      if isVMZk(v0) {
 93554          self.require(ISA_AVX512PF)
 93555          p.domain = DomainAVX
 93556          p.add(0, func(m *_Encoding, v []interface{}) {
 93557              m.evex(0b10, 0x05, 0b10, 0, addr(v[0]), 0, kcode(v[0]), 0, 0)
 93558              m.emit(0xc7)
 93559              m.mrsd(6, addr(v[0]), 4)
 93560          })
 93561      }
 93562      if p.len == 0 {
 93563          panic("invalid operands for VSCATTERPF1QPS")
 93564      }
 93565      return p
 93566  }
 93567  
 93568  // VSCATTERQPD performs "Scatter Packed Double-Precision Floating-Point Values with Signed Quadword Indices".
 93569  //
 93570  // Mnemonic        : VSCATTERQPD
 93571  // Supported forms : (3 forms)
 93572  //
 93573  //    * VSCATTERQPD zmm, vm64z{k}    [AVX512F]
 93574  //    * VSCATTERQPD xmm, vm64x{k}    [AVX512F,AVX512VL]
 93575  //    * VSCATTERQPD ymm, vm64y{k}    [AVX512F,AVX512VL]
 93576  //
 93577  func (self *Program) VSCATTERQPD(v0 interface{}, v1 interface{}) *Instruction {
 93578      p := self.alloc("VSCATTERQPD", 2, Operands { v0, v1 })
 93579      // VSCATTERQPD zmm, vm64z{k}
 93580      if isZMM(v0) && isVMZk(v1) {
 93581          self.require(ISA_AVX512F)
 93582          p.domain = DomainAVX
 93583          p.add(0, func(m *_Encoding, v []interface{}) {
 93584              m.evex(0b10, 0x85, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93585              m.emit(0xa3)
 93586              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 93587          })
 93588      }
 93589      // VSCATTERQPD xmm, vm64x{k}
 93590      if isEVEXXMM(v0) && isVMXk(v1) {
 93591          self.require(ISA_AVX512VL | ISA_AVX512F)
 93592          p.domain = DomainAVX
 93593          p.add(0, func(m *_Encoding, v []interface{}) {
 93594              m.evex(0b10, 0x85, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93595              m.emit(0xa3)
 93596              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 93597          })
 93598      }
 93599      // VSCATTERQPD ymm, vm64y{k}
 93600      if isEVEXYMM(v0) && isVMYk(v1) {
 93601          self.require(ISA_AVX512VL | ISA_AVX512F)
 93602          p.domain = DomainAVX
 93603          p.add(0, func(m *_Encoding, v []interface{}) {
 93604              m.evex(0b10, 0x85, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93605              m.emit(0xa3)
 93606              m.mrsd(lcode(v[0]), addr(v[1]), 8)
 93607          })
 93608      }
 93609      if p.len == 0 {
 93610          panic("invalid operands for VSCATTERQPD")
 93611      }
 93612      return p
 93613  }
 93614  
 93615  // VSCATTERQPS performs "Scatter Packed Single-Precision Floating-Point Values with Signed Quadword Indices".
 93616  //
 93617  // Mnemonic        : VSCATTERQPS
 93618  // Supported forms : (3 forms)
 93619  //
 93620  //    * VSCATTERQPS ymm, vm64z{k}    [AVX512F]
 93621  //    * VSCATTERQPS xmm, vm64x{k}    [AVX512F,AVX512VL]
 93622  //    * VSCATTERQPS xmm, vm64y{k}    [AVX512F,AVX512VL]
 93623  //
 93624  func (self *Program) VSCATTERQPS(v0 interface{}, v1 interface{}) *Instruction {
 93625      p := self.alloc("VSCATTERQPS", 2, Operands { v0, v1 })
 93626      // VSCATTERQPS ymm, vm64z{k}
 93627      if isEVEXYMM(v0) && isVMZk(v1) {
 93628          self.require(ISA_AVX512F)
 93629          p.domain = DomainAVX
 93630          p.add(0, func(m *_Encoding, v []interface{}) {
 93631              m.evex(0b10, 0x05, 0b10, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93632              m.emit(0xa3)
 93633              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 93634          })
 93635      }
 93636      // VSCATTERQPS xmm, vm64x{k}
 93637      if isEVEXXMM(v0) && isVMXk(v1) {
 93638          self.require(ISA_AVX512VL | ISA_AVX512F)
 93639          p.domain = DomainAVX
 93640          p.add(0, func(m *_Encoding, v []interface{}) {
 93641              m.evex(0b10, 0x05, 0b00, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93642              m.emit(0xa3)
 93643              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 93644          })
 93645      }
 93646      // VSCATTERQPS xmm, vm64y{k}
 93647      if isEVEXXMM(v0) && isVMYk(v1) {
 93648          self.require(ISA_AVX512VL | ISA_AVX512F)
 93649          p.domain = DomainAVX
 93650          p.add(0, func(m *_Encoding, v []interface{}) {
 93651              m.evex(0b10, 0x05, 0b01, ehcode(v[0]), addr(v[1]), 0, kcode(v[1]), 0, 0)
 93652              m.emit(0xa3)
 93653              m.mrsd(lcode(v[0]), addr(v[1]), 4)
 93654          })
 93655      }
 93656      if p.len == 0 {
 93657          panic("invalid operands for VSCATTERQPS")
 93658      }
 93659      return p
 93660  }
 93661  
 93662  // VSHUFF32X4 performs "Shuffle 128-Bit Packed Single-Precision Floating-Point Values".
 93663  //
 93664  // Mnemonic        : VSHUFF32X4
 93665  // Supported forms : (4 forms)
 93666  //
 93667  //    * VSHUFF32X4 imm8, m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 93668  //    * VSHUFF32X4 imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 93669  //    * VSHUFF32X4 imm8, m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 93670  //    * VSHUFF32X4 imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 93671  //
 93672  func (self *Program) VSHUFF32X4(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 93673      p := self.alloc("VSHUFF32X4", 4, Operands { v0, v1, v2, v3 })
 93674      // VSHUFF32X4 imm8, m512/m32bcst, zmm, zmm{k}{z}
 93675      if isImm8(v0) && isM512M32bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 93676          self.require(ISA_AVX512F)
 93677          p.domain = DomainAVX
 93678          p.add(0, func(m *_Encoding, v []interface{}) {
 93679              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 93680              m.emit(0x23)
 93681              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 93682              m.imm1(toImmAny(v[0]))
 93683          })
 93684      }
 93685      // VSHUFF32X4 imm8, zmm, zmm, zmm{k}{z}
 93686      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 93687          self.require(ISA_AVX512F)
 93688          p.domain = DomainAVX
 93689          p.add(0, func(m *_Encoding, v []interface{}) {
 93690              m.emit(0x62)
 93691              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93692              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 93693              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 93694              m.emit(0x23)
 93695              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93696              m.imm1(toImmAny(v[0]))
 93697          })
 93698      }
 93699      // VSHUFF32X4 imm8, m256/m32bcst, ymm, ymm{k}{z}
 93700      if isImm8(v0) && isM256M32bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 93701          self.require(ISA_AVX512VL | ISA_AVX512F)
 93702          p.domain = DomainAVX
 93703          p.add(0, func(m *_Encoding, v []interface{}) {
 93704              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 93705              m.emit(0x23)
 93706              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 93707              m.imm1(toImmAny(v[0]))
 93708          })
 93709      }
 93710      // VSHUFF32X4 imm8, ymm, ymm, ymm{k}{z}
 93711      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 93712          self.require(ISA_AVX512VL | ISA_AVX512F)
 93713          p.domain = DomainAVX
 93714          p.add(0, func(m *_Encoding, v []interface{}) {
 93715              m.emit(0x62)
 93716              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93717              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 93718              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 93719              m.emit(0x23)
 93720              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93721              m.imm1(toImmAny(v[0]))
 93722          })
 93723      }
 93724      if p.len == 0 {
 93725          panic("invalid operands for VSHUFF32X4")
 93726      }
 93727      return p
 93728  }
 93729  
 93730  // VSHUFF64X2 performs "Shuffle 128-Bit Packed Double-Precision Floating-Point Values".
 93731  //
 93732  // Mnemonic        : VSHUFF64X2
 93733  // Supported forms : (4 forms)
 93734  //
 93735  //    * VSHUFF64X2 imm8, m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 93736  //    * VSHUFF64X2 imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 93737  //    * VSHUFF64X2 imm8, m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 93738  //    * VSHUFF64X2 imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 93739  //
 93740  func (self *Program) VSHUFF64X2(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 93741      p := self.alloc("VSHUFF64X2", 4, Operands { v0, v1, v2, v3 })
 93742      // VSHUFF64X2 imm8, m512/m64bcst, zmm, zmm{k}{z}
 93743      if isImm8(v0) && isM512M64bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 93744          self.require(ISA_AVX512F)
 93745          p.domain = DomainAVX
 93746          p.add(0, func(m *_Encoding, v []interface{}) {
 93747              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 93748              m.emit(0x23)
 93749              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 93750              m.imm1(toImmAny(v[0]))
 93751          })
 93752      }
 93753      // VSHUFF64X2 imm8, zmm, zmm, zmm{k}{z}
 93754      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 93755          self.require(ISA_AVX512F)
 93756          p.domain = DomainAVX
 93757          p.add(0, func(m *_Encoding, v []interface{}) {
 93758              m.emit(0x62)
 93759              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93760              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 93761              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 93762              m.emit(0x23)
 93763              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93764              m.imm1(toImmAny(v[0]))
 93765          })
 93766      }
 93767      // VSHUFF64X2 imm8, m256/m64bcst, ymm, ymm{k}{z}
 93768      if isImm8(v0) && isM256M64bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 93769          self.require(ISA_AVX512VL | ISA_AVX512F)
 93770          p.domain = DomainAVX
 93771          p.add(0, func(m *_Encoding, v []interface{}) {
 93772              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 93773              m.emit(0x23)
 93774              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 93775              m.imm1(toImmAny(v[0]))
 93776          })
 93777      }
 93778      // VSHUFF64X2 imm8, ymm, ymm, ymm{k}{z}
 93779      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 93780          self.require(ISA_AVX512VL | ISA_AVX512F)
 93781          p.domain = DomainAVX
 93782          p.add(0, func(m *_Encoding, v []interface{}) {
 93783              m.emit(0x62)
 93784              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93785              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 93786              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 93787              m.emit(0x23)
 93788              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93789              m.imm1(toImmAny(v[0]))
 93790          })
 93791      }
 93792      if p.len == 0 {
 93793          panic("invalid operands for VSHUFF64X2")
 93794      }
 93795      return p
 93796  }
 93797  
 93798  // VSHUFI32X4 performs "Shuffle 128-Bit Packed Doubleword Integer Values".
 93799  //
 93800  // Mnemonic        : VSHUFI32X4
 93801  // Supported forms : (4 forms)
 93802  //
 93803  //    * VSHUFI32X4 imm8, m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 93804  //    * VSHUFI32X4 imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 93805  //    * VSHUFI32X4 imm8, m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 93806  //    * VSHUFI32X4 imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 93807  //
 93808  func (self *Program) VSHUFI32X4(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 93809      p := self.alloc("VSHUFI32X4", 4, Operands { v0, v1, v2, v3 })
 93810      // VSHUFI32X4 imm8, m512/m32bcst, zmm, zmm{k}{z}
 93811      if isImm8(v0) && isM512M32bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 93812          self.require(ISA_AVX512F)
 93813          p.domain = DomainAVX
 93814          p.add(0, func(m *_Encoding, v []interface{}) {
 93815              m.evex(0b11, 0x05, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 93816              m.emit(0x43)
 93817              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 93818              m.imm1(toImmAny(v[0]))
 93819          })
 93820      }
 93821      // VSHUFI32X4 imm8, zmm, zmm, zmm{k}{z}
 93822      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 93823          self.require(ISA_AVX512F)
 93824          p.domain = DomainAVX
 93825          p.add(0, func(m *_Encoding, v []interface{}) {
 93826              m.emit(0x62)
 93827              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93828              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 93829              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 93830              m.emit(0x43)
 93831              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93832              m.imm1(toImmAny(v[0]))
 93833          })
 93834      }
 93835      // VSHUFI32X4 imm8, m256/m32bcst, ymm, ymm{k}{z}
 93836      if isImm8(v0) && isM256M32bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 93837          self.require(ISA_AVX512VL | ISA_AVX512F)
 93838          p.domain = DomainAVX
 93839          p.add(0, func(m *_Encoding, v []interface{}) {
 93840              m.evex(0b11, 0x05, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 93841              m.emit(0x43)
 93842              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 93843              m.imm1(toImmAny(v[0]))
 93844          })
 93845      }
 93846      // VSHUFI32X4 imm8, ymm, ymm, ymm{k}{z}
 93847      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 93848          self.require(ISA_AVX512VL | ISA_AVX512F)
 93849          p.domain = DomainAVX
 93850          p.add(0, func(m *_Encoding, v []interface{}) {
 93851              m.emit(0x62)
 93852              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93853              m.emit(0x7d ^ (hlcode(v[2]) << 3))
 93854              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 93855              m.emit(0x43)
 93856              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93857              m.imm1(toImmAny(v[0]))
 93858          })
 93859      }
 93860      if p.len == 0 {
 93861          panic("invalid operands for VSHUFI32X4")
 93862      }
 93863      return p
 93864  }
 93865  
 93866  // VSHUFI64X2 performs "Shuffle 128-Bit Packed Quadword Integer Values".
 93867  //
 93868  // Mnemonic        : VSHUFI64X2
 93869  // Supported forms : (4 forms)
 93870  //
 93871  //    * VSHUFI64X2 imm8, m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 93872  //    * VSHUFI64X2 imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 93873  //    * VSHUFI64X2 imm8, m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 93874  //    * VSHUFI64X2 imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 93875  //
 93876  func (self *Program) VSHUFI64X2(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 93877      p := self.alloc("VSHUFI64X2", 4, Operands { v0, v1, v2, v3 })
 93878      // VSHUFI64X2 imm8, m512/m64bcst, zmm, zmm{k}{z}
 93879      if isImm8(v0) && isM512M64bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 93880          self.require(ISA_AVX512F)
 93881          p.domain = DomainAVX
 93882          p.add(0, func(m *_Encoding, v []interface{}) {
 93883              m.evex(0b11, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 93884              m.emit(0x43)
 93885              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 93886              m.imm1(toImmAny(v[0]))
 93887          })
 93888      }
 93889      // VSHUFI64X2 imm8, zmm, zmm, zmm{k}{z}
 93890      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 93891          self.require(ISA_AVX512F)
 93892          p.domain = DomainAVX
 93893          p.add(0, func(m *_Encoding, v []interface{}) {
 93894              m.emit(0x62)
 93895              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93896              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 93897              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 93898              m.emit(0x43)
 93899              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93900              m.imm1(toImmAny(v[0]))
 93901          })
 93902      }
 93903      // VSHUFI64X2 imm8, m256/m64bcst, ymm, ymm{k}{z}
 93904      if isImm8(v0) && isM256M64bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 93905          self.require(ISA_AVX512VL | ISA_AVX512F)
 93906          p.domain = DomainAVX
 93907          p.add(0, func(m *_Encoding, v []interface{}) {
 93908              m.evex(0b11, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 93909              m.emit(0x43)
 93910              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 93911              m.imm1(toImmAny(v[0]))
 93912          })
 93913      }
 93914      // VSHUFI64X2 imm8, ymm, ymm, ymm{k}{z}
 93915      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 93916          self.require(ISA_AVX512VL | ISA_AVX512F)
 93917          p.domain = DomainAVX
 93918          p.add(0, func(m *_Encoding, v []interface{}) {
 93919              m.emit(0x62)
 93920              m.emit(0xf3 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 93921              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 93922              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 93923              m.emit(0x43)
 93924              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93925              m.imm1(toImmAny(v[0]))
 93926          })
 93927      }
 93928      if p.len == 0 {
 93929          panic("invalid operands for VSHUFI64X2")
 93930      }
 93931      return p
 93932  }
 93933  
 93934  // VSHUFPD performs "Shuffle Packed Double-Precision Floating-Point Values".
 93935  //
 93936  // Mnemonic        : VSHUFPD
 93937  // Supported forms : (10 forms)
 93938  //
 93939  //    * VSHUFPD imm8, xmm, xmm, xmm                   [AVX]
 93940  //    * VSHUFPD imm8, m128, xmm, xmm                  [AVX]
 93941  //    * VSHUFPD imm8, ymm, ymm, ymm                   [AVX]
 93942  //    * VSHUFPD imm8, m256, ymm, ymm                  [AVX]
 93943  //    * VSHUFPD imm8, m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 93944  //    * VSHUFPD imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 93945  //    * VSHUFPD imm8, m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 93946  //    * VSHUFPD imm8, xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 93947  //    * VSHUFPD imm8, m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 93948  //    * VSHUFPD imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 93949  //
 93950  func (self *Program) VSHUFPD(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 93951      p := self.alloc("VSHUFPD", 4, Operands { v0, v1, v2, v3 })
 93952      // VSHUFPD imm8, xmm, xmm, xmm
 93953      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 93954          self.require(ISA_AVX)
 93955          p.domain = DomainAVX
 93956          p.add(0, func(m *_Encoding, v []interface{}) {
 93957              m.vex2(1, hcode(v[3]), v[1], hlcode(v[2]))
 93958              m.emit(0xc6)
 93959              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93960              m.imm1(toImmAny(v[0]))
 93961          })
 93962      }
 93963      // VSHUFPD imm8, m128, xmm, xmm
 93964      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 93965          self.require(ISA_AVX)
 93966          p.domain = DomainAVX
 93967          p.add(0, func(m *_Encoding, v []interface{}) {
 93968              m.vex2(1, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 93969              m.emit(0xc6)
 93970              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 93971              m.imm1(toImmAny(v[0]))
 93972          })
 93973      }
 93974      // VSHUFPD imm8, ymm, ymm, ymm
 93975      if isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 93976          self.require(ISA_AVX)
 93977          p.domain = DomainAVX
 93978          p.add(0, func(m *_Encoding, v []interface{}) {
 93979              m.vex2(5, hcode(v[3]), v[1], hlcode(v[2]))
 93980              m.emit(0xc6)
 93981              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 93982              m.imm1(toImmAny(v[0]))
 93983          })
 93984      }
 93985      // VSHUFPD imm8, m256, ymm, ymm
 93986      if isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 93987          self.require(ISA_AVX)
 93988          p.domain = DomainAVX
 93989          p.add(0, func(m *_Encoding, v []interface{}) {
 93990              m.vex2(5, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 93991              m.emit(0xc6)
 93992              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 93993              m.imm1(toImmAny(v[0]))
 93994          })
 93995      }
 93996      // VSHUFPD imm8, m512/m64bcst, zmm, zmm{k}{z}
 93997      if isImm8(v0) && isM512M64bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 93998          self.require(ISA_AVX512F)
 93999          p.domain = DomainAVX
 94000          p.add(0, func(m *_Encoding, v []interface{}) {
 94001              m.evex(0b01, 0x85, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 94002              m.emit(0xc6)
 94003              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 94004              m.imm1(toImmAny(v[0]))
 94005          })
 94006      }
 94007      // VSHUFPD imm8, zmm, zmm, zmm{k}{z}
 94008      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 94009          self.require(ISA_AVX512F)
 94010          p.domain = DomainAVX
 94011          p.add(0, func(m *_Encoding, v []interface{}) {
 94012              m.emit(0x62)
 94013              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 94014              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 94015              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 94016              m.emit(0xc6)
 94017              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94018              m.imm1(toImmAny(v[0]))
 94019          })
 94020      }
 94021      // VSHUFPD imm8, m128/m64bcst, xmm, xmm{k}{z}
 94022      if isImm8(v0) && isM128M64bcst(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 94023          self.require(ISA_AVX512VL | ISA_AVX512F)
 94024          p.domain = DomainAVX
 94025          p.add(0, func(m *_Encoding, v []interface{}) {
 94026              m.evex(0b01, 0x85, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 94027              m.emit(0xc6)
 94028              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 94029              m.imm1(toImmAny(v[0]))
 94030          })
 94031      }
 94032      // VSHUFPD imm8, xmm, xmm, xmm{k}{z}
 94033      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 94034          self.require(ISA_AVX512VL | ISA_AVX512F)
 94035          p.domain = DomainAVX
 94036          p.add(0, func(m *_Encoding, v []interface{}) {
 94037              m.emit(0x62)
 94038              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 94039              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 94040              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 94041              m.emit(0xc6)
 94042              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94043              m.imm1(toImmAny(v[0]))
 94044          })
 94045      }
 94046      // VSHUFPD imm8, m256/m64bcst, ymm, ymm{k}{z}
 94047      if isImm8(v0) && isM256M64bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 94048          self.require(ISA_AVX512VL | ISA_AVX512F)
 94049          p.domain = DomainAVX
 94050          p.add(0, func(m *_Encoding, v []interface{}) {
 94051              m.evex(0b01, 0x85, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 94052              m.emit(0xc6)
 94053              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 94054              m.imm1(toImmAny(v[0]))
 94055          })
 94056      }
 94057      // VSHUFPD imm8, ymm, ymm, ymm{k}{z}
 94058      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 94059          self.require(ISA_AVX512VL | ISA_AVX512F)
 94060          p.domain = DomainAVX
 94061          p.add(0, func(m *_Encoding, v []interface{}) {
 94062              m.emit(0x62)
 94063              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 94064              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 94065              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 94066              m.emit(0xc6)
 94067              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94068              m.imm1(toImmAny(v[0]))
 94069          })
 94070      }
 94071      if p.len == 0 {
 94072          panic("invalid operands for VSHUFPD")
 94073      }
 94074      return p
 94075  }
 94076  
 94077  // VSHUFPS performs "Shuffle Packed Single-Precision Floating-Point Values".
 94078  //
 94079  // Mnemonic        : VSHUFPS
 94080  // Supported forms : (10 forms)
 94081  //
 94082  //    * VSHUFPS imm8, xmm, xmm, xmm                   [AVX]
 94083  //    * VSHUFPS imm8, m128, xmm, xmm                  [AVX]
 94084  //    * VSHUFPS imm8, ymm, ymm, ymm                   [AVX]
 94085  //    * VSHUFPS imm8, m256, ymm, ymm                  [AVX]
 94086  //    * VSHUFPS imm8, m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 94087  //    * VSHUFPS imm8, zmm, zmm, zmm{k}{z}             [AVX512F]
 94088  //    * VSHUFPS imm8, m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 94089  //    * VSHUFPS imm8, xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 94090  //    * VSHUFPS imm8, m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 94091  //    * VSHUFPS imm8, ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 94092  //
 94093  func (self *Program) VSHUFPS(v0 interface{}, v1 interface{}, v2 interface{}, v3 interface{}) *Instruction {
 94094      p := self.alloc("VSHUFPS", 4, Operands { v0, v1, v2, v3 })
 94095      // VSHUFPS imm8, xmm, xmm, xmm
 94096      if isImm8(v0) && isXMM(v1) && isXMM(v2) && isXMM(v3) {
 94097          self.require(ISA_AVX)
 94098          p.domain = DomainAVX
 94099          p.add(0, func(m *_Encoding, v []interface{}) {
 94100              m.vex2(0, hcode(v[3]), v[1], hlcode(v[2]))
 94101              m.emit(0xc6)
 94102              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94103              m.imm1(toImmAny(v[0]))
 94104          })
 94105      }
 94106      // VSHUFPS imm8, m128, xmm, xmm
 94107      if isImm8(v0) && isM128(v1) && isXMM(v2) && isXMM(v3) {
 94108          self.require(ISA_AVX)
 94109          p.domain = DomainAVX
 94110          p.add(0, func(m *_Encoding, v []interface{}) {
 94111              m.vex2(0, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 94112              m.emit(0xc6)
 94113              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 94114              m.imm1(toImmAny(v[0]))
 94115          })
 94116      }
 94117      // VSHUFPS imm8, ymm, ymm, ymm
 94118      if isImm8(v0) && isYMM(v1) && isYMM(v2) && isYMM(v3) {
 94119          self.require(ISA_AVX)
 94120          p.domain = DomainAVX
 94121          p.add(0, func(m *_Encoding, v []interface{}) {
 94122              m.vex2(4, hcode(v[3]), v[1], hlcode(v[2]))
 94123              m.emit(0xc6)
 94124              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94125              m.imm1(toImmAny(v[0]))
 94126          })
 94127      }
 94128      // VSHUFPS imm8, m256, ymm, ymm
 94129      if isImm8(v0) && isM256(v1) && isYMM(v2) && isYMM(v3) {
 94130          self.require(ISA_AVX)
 94131          p.domain = DomainAVX
 94132          p.add(0, func(m *_Encoding, v []interface{}) {
 94133              m.vex2(4, hcode(v[3]), addr(v[1]), hlcode(v[2]))
 94134              m.emit(0xc6)
 94135              m.mrsd(lcode(v[3]), addr(v[1]), 1)
 94136              m.imm1(toImmAny(v[0]))
 94137          })
 94138      }
 94139      // VSHUFPS imm8, m512/m32bcst, zmm, zmm{k}{z}
 94140      if isImm8(v0) && isM512M32bcst(v1) && isZMM(v2) && isZMMkz(v3) {
 94141          self.require(ISA_AVX512F)
 94142          p.domain = DomainAVX
 94143          p.add(0, func(m *_Encoding, v []interface{}) {
 94144              m.evex(0b01, 0x04, 0b10, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 94145              m.emit(0xc6)
 94146              m.mrsd(lcode(v[3]), addr(v[1]), 64)
 94147              m.imm1(toImmAny(v[0]))
 94148          })
 94149      }
 94150      // VSHUFPS imm8, zmm, zmm, zmm{k}{z}
 94151      if isImm8(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(v3) {
 94152          self.require(ISA_AVX512F)
 94153          p.domain = DomainAVX
 94154          p.add(0, func(m *_Encoding, v []interface{}) {
 94155              m.emit(0x62)
 94156              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 94157              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 94158              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x40)
 94159              m.emit(0xc6)
 94160              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94161              m.imm1(toImmAny(v[0]))
 94162          })
 94163      }
 94164      // VSHUFPS imm8, m128/m32bcst, xmm, xmm{k}{z}
 94165      if isImm8(v0) && isM128M32bcst(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 94166          self.require(ISA_AVX512VL | ISA_AVX512F)
 94167          p.domain = DomainAVX
 94168          p.add(0, func(m *_Encoding, v []interface{}) {
 94169              m.evex(0b01, 0x04, 0b00, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 94170              m.emit(0xc6)
 94171              m.mrsd(lcode(v[3]), addr(v[1]), 16)
 94172              m.imm1(toImmAny(v[0]))
 94173          })
 94174      }
 94175      // VSHUFPS imm8, xmm, xmm, xmm{k}{z}
 94176      if isImm8(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(v3) {
 94177          self.require(ISA_AVX512VL | ISA_AVX512F)
 94178          p.domain = DomainAVX
 94179          p.add(0, func(m *_Encoding, v []interface{}) {
 94180              m.emit(0x62)
 94181              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 94182              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 94183              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x00)
 94184              m.emit(0xc6)
 94185              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94186              m.imm1(toImmAny(v[0]))
 94187          })
 94188      }
 94189      // VSHUFPS imm8, m256/m32bcst, ymm, ymm{k}{z}
 94190      if isImm8(v0) && isM256M32bcst(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 94191          self.require(ISA_AVX512VL | ISA_AVX512F)
 94192          p.domain = DomainAVX
 94193          p.add(0, func(m *_Encoding, v []interface{}) {
 94194              m.evex(0b01, 0x04, 0b01, ehcode(v[3]), addr(v[1]), vcode(v[2]), kcode(v[3]), zcode(v[3]), bcode(v[1]))
 94195              m.emit(0xc6)
 94196              m.mrsd(lcode(v[3]), addr(v[1]), 32)
 94197              m.imm1(toImmAny(v[0]))
 94198          })
 94199      }
 94200      // VSHUFPS imm8, ymm, ymm, ymm{k}{z}
 94201      if isImm8(v0) && isEVEXYMM(v1) && isEVEXYMM(v2) && isYMMkz(v3) {
 94202          self.require(ISA_AVX512VL | ISA_AVX512F)
 94203          p.domain = DomainAVX
 94204          p.add(0, func(m *_Encoding, v []interface{}) {
 94205              m.emit(0x62)
 94206              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 94207              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 94208              m.emit((zcode(v[3]) << 7) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x20)
 94209              m.emit(0xc6)
 94210              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94211              m.imm1(toImmAny(v[0]))
 94212          })
 94213      }
 94214      if p.len == 0 {
 94215          panic("invalid operands for VSHUFPS")
 94216      }
 94217      return p
 94218  }
 94219  
 94220  // VSQRTPD performs "Compute Square Roots of Packed Double-Precision Floating-Point Values".
 94221  //
 94222  // Mnemonic        : VSQRTPD
 94223  // Supported forms : (11 forms)
 94224  //
 94225  //    * VSQRTPD xmm, xmm                   [AVX]
 94226  //    * VSQRTPD m128, xmm                  [AVX]
 94227  //    * VSQRTPD ymm, ymm                   [AVX]
 94228  //    * VSQRTPD m256, ymm                  [AVX]
 94229  //    * VSQRTPD m512/m64bcst, zmm{k}{z}    [AVX512F]
 94230  //    * VSQRTPD {er}, zmm, zmm{k}{z}       [AVX512F]
 94231  //    * VSQRTPD zmm, zmm{k}{z}             [AVX512F]
 94232  //    * VSQRTPD m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 94233  //    * VSQRTPD m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 94234  //    * VSQRTPD xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 94235  //    * VSQRTPD ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 94236  //
 94237  func (self *Program) VSQRTPD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 94238      var p *Instruction
 94239      switch len(vv) {
 94240          case 0  : p = self.alloc("VSQRTPD", 2, Operands { v0, v1 })
 94241          case 1  : p = self.alloc("VSQRTPD", 3, Operands { v0, v1, vv[0] })
 94242          default : panic("instruction VSQRTPD takes 2 or 3 operands")
 94243      }
 94244      // VSQRTPD xmm, xmm
 94245      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 94246          self.require(ISA_AVX)
 94247          p.domain = DomainAVX
 94248          p.add(0, func(m *_Encoding, v []interface{}) {
 94249              m.vex2(1, hcode(v[1]), v[0], 0)
 94250              m.emit(0x51)
 94251              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 94252          })
 94253      }
 94254      // VSQRTPD m128, xmm
 94255      if len(vv) == 0 && isM128(v0) && isXMM(v1) {
 94256          self.require(ISA_AVX)
 94257          p.domain = DomainAVX
 94258          p.add(0, func(m *_Encoding, v []interface{}) {
 94259              m.vex2(1, hcode(v[1]), addr(v[0]), 0)
 94260              m.emit(0x51)
 94261              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 94262          })
 94263      }
 94264      // VSQRTPD ymm, ymm
 94265      if len(vv) == 0 && isYMM(v0) && isYMM(v1) {
 94266          self.require(ISA_AVX)
 94267          p.domain = DomainAVX
 94268          p.add(0, func(m *_Encoding, v []interface{}) {
 94269              m.vex2(5, hcode(v[1]), v[0], 0)
 94270              m.emit(0x51)
 94271              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 94272          })
 94273      }
 94274      // VSQRTPD m256, ymm
 94275      if len(vv) == 0 && isM256(v0) && isYMM(v1) {
 94276          self.require(ISA_AVX)
 94277          p.domain = DomainAVX
 94278          p.add(0, func(m *_Encoding, v []interface{}) {
 94279              m.vex2(5, hcode(v[1]), addr(v[0]), 0)
 94280              m.emit(0x51)
 94281              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 94282          })
 94283      }
 94284      // VSQRTPD m512/m64bcst, zmm{k}{z}
 94285      if len(vv) == 0 && isM512M64bcst(v0) && isZMMkz(v1) {
 94286          self.require(ISA_AVX512F)
 94287          p.domain = DomainAVX
 94288          p.add(0, func(m *_Encoding, v []interface{}) {
 94289              m.evex(0b01, 0x85, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 94290              m.emit(0x51)
 94291              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 94292          })
 94293      }
 94294      // VSQRTPD {er}, zmm, zmm{k}{z}
 94295      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 94296          self.require(ISA_AVX512F)
 94297          p.domain = DomainAVX
 94298          p.add(0, func(m *_Encoding, v []interface{}) {
 94299              m.emit(0x62)
 94300              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 94301              m.emit(0xfd)
 94302              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 94303              m.emit(0x51)
 94304              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 94305          })
 94306      }
 94307      // VSQRTPD zmm, zmm{k}{z}
 94308      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 94309          self.require(ISA_AVX512F)
 94310          p.domain = DomainAVX
 94311          p.add(0, func(m *_Encoding, v []interface{}) {
 94312              m.emit(0x62)
 94313              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 94314              m.emit(0xfd)
 94315              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 94316              m.emit(0x51)
 94317              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 94318          })
 94319      }
 94320      // VSQRTPD m128/m32bcst, xmm{k}{z}
 94321      if len(vv) == 0 && isM128M32bcst(v0) && isXMMkz(v1) {
 94322          self.require(ISA_AVX512VL | ISA_AVX512F)
 94323          p.domain = DomainAVX
 94324          p.add(0, func(m *_Encoding, v []interface{}) {
 94325              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 94326              m.emit(0x51)
 94327              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 94328          })
 94329      }
 94330      // VSQRTPD m256/m32bcst, ymm{k}{z}
 94331      if len(vv) == 0 && isM256M32bcst(v0) && isYMMkz(v1) {
 94332          self.require(ISA_AVX512VL | ISA_AVX512F)
 94333          p.domain = DomainAVX
 94334          p.add(0, func(m *_Encoding, v []interface{}) {
 94335              m.evex(0b01, 0x85, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 94336              m.emit(0x51)
 94337              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 94338          })
 94339      }
 94340      // VSQRTPD xmm, xmm{k}{z}
 94341      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 94342          self.require(ISA_AVX512VL | ISA_AVX512F)
 94343          p.domain = DomainAVX
 94344          p.add(0, func(m *_Encoding, v []interface{}) {
 94345              m.emit(0x62)
 94346              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 94347              m.emit(0xfd)
 94348              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 94349              m.emit(0x51)
 94350              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 94351          })
 94352      }
 94353      // VSQRTPD ymm, ymm{k}{z}
 94354      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 94355          self.require(ISA_AVX512VL | ISA_AVX512F)
 94356          p.domain = DomainAVX
 94357          p.add(0, func(m *_Encoding, v []interface{}) {
 94358              m.emit(0x62)
 94359              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 94360              m.emit(0xfd)
 94361              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 94362              m.emit(0x51)
 94363              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 94364          })
 94365      }
 94366      if p.len == 0 {
 94367          panic("invalid operands for VSQRTPD")
 94368      }
 94369      return p
 94370  }
 94371  
 94372  // VSQRTPS performs "Compute Square Roots of Packed Single-Precision Floating-Point Values".
 94373  //
 94374  // Mnemonic        : VSQRTPS
 94375  // Supported forms : (11 forms)
 94376  //
 94377  //    * VSQRTPS xmm, xmm                   [AVX]
 94378  //    * VSQRTPS m128, xmm                  [AVX]
 94379  //    * VSQRTPS ymm, ymm                   [AVX]
 94380  //    * VSQRTPS m256, ymm                  [AVX]
 94381  //    * VSQRTPS m512/m32bcst, zmm{k}{z}    [AVX512F]
 94382  //    * VSQRTPS {er}, zmm, zmm{k}{z}       [AVX512F]
 94383  //    * VSQRTPS zmm, zmm{k}{z}             [AVX512F]
 94384  //    * VSQRTPS m128/m32bcst, xmm{k}{z}    [AVX512F,AVX512VL]
 94385  //    * VSQRTPS m256/m32bcst, ymm{k}{z}    [AVX512F,AVX512VL]
 94386  //    * VSQRTPS xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 94387  //    * VSQRTPS ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 94388  //
 94389  func (self *Program) VSQRTPS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 94390      var p *Instruction
 94391      switch len(vv) {
 94392          case 0  : p = self.alloc("VSQRTPS", 2, Operands { v0, v1 })
 94393          case 1  : p = self.alloc("VSQRTPS", 3, Operands { v0, v1, vv[0] })
 94394          default : panic("instruction VSQRTPS takes 2 or 3 operands")
 94395      }
 94396      // VSQRTPS xmm, xmm
 94397      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 94398          self.require(ISA_AVX)
 94399          p.domain = DomainAVX
 94400          p.add(0, func(m *_Encoding, v []interface{}) {
 94401              m.vex2(0, hcode(v[1]), v[0], 0)
 94402              m.emit(0x51)
 94403              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 94404          })
 94405      }
 94406      // VSQRTPS m128, xmm
 94407      if len(vv) == 0 && isM128(v0) && isXMM(v1) {
 94408          self.require(ISA_AVX)
 94409          p.domain = DomainAVX
 94410          p.add(0, func(m *_Encoding, v []interface{}) {
 94411              m.vex2(0, hcode(v[1]), addr(v[0]), 0)
 94412              m.emit(0x51)
 94413              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 94414          })
 94415      }
 94416      // VSQRTPS ymm, ymm
 94417      if len(vv) == 0 && isYMM(v0) && isYMM(v1) {
 94418          self.require(ISA_AVX)
 94419          p.domain = DomainAVX
 94420          p.add(0, func(m *_Encoding, v []interface{}) {
 94421              m.vex2(4, hcode(v[1]), v[0], 0)
 94422              m.emit(0x51)
 94423              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 94424          })
 94425      }
 94426      // VSQRTPS m256, ymm
 94427      if len(vv) == 0 && isM256(v0) && isYMM(v1) {
 94428          self.require(ISA_AVX)
 94429          p.domain = DomainAVX
 94430          p.add(0, func(m *_Encoding, v []interface{}) {
 94431              m.vex2(4, hcode(v[1]), addr(v[0]), 0)
 94432              m.emit(0x51)
 94433              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 94434          })
 94435      }
 94436      // VSQRTPS m512/m32bcst, zmm{k}{z}
 94437      if len(vv) == 0 && isM512M32bcst(v0) && isZMMkz(v1) {
 94438          self.require(ISA_AVX512F)
 94439          p.domain = DomainAVX
 94440          p.add(0, func(m *_Encoding, v []interface{}) {
 94441              m.evex(0b01, 0x04, 0b10, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 94442              m.emit(0x51)
 94443              m.mrsd(lcode(v[1]), addr(v[0]), 64)
 94444          })
 94445      }
 94446      // VSQRTPS {er}, zmm, zmm{k}{z}
 94447      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMMkz(vv[0]) {
 94448          self.require(ISA_AVX512F)
 94449          p.domain = DomainAVX
 94450          p.add(0, func(m *_Encoding, v []interface{}) {
 94451              m.emit(0x62)
 94452              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 94453              m.emit(0x7c)
 94454              m.emit((zcode(v[2]) << 7) | (vcode(v[0]) << 5) | kcode(v[2]) | 0x18)
 94455              m.emit(0x51)
 94456              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 94457          })
 94458      }
 94459      // VSQRTPS zmm, zmm{k}{z}
 94460      if len(vv) == 0 && isZMM(v0) && isZMMkz(v1) {
 94461          self.require(ISA_AVX512F)
 94462          p.domain = DomainAVX
 94463          p.add(0, func(m *_Encoding, v []interface{}) {
 94464              m.emit(0x62)
 94465              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 94466              m.emit(0x7c)
 94467              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x48)
 94468              m.emit(0x51)
 94469              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 94470          })
 94471      }
 94472      // VSQRTPS m128/m32bcst, xmm{k}{z}
 94473      if len(vv) == 0 && isM128M32bcst(v0) && isXMMkz(v1) {
 94474          self.require(ISA_AVX512VL | ISA_AVX512F)
 94475          p.domain = DomainAVX
 94476          p.add(0, func(m *_Encoding, v []interface{}) {
 94477              m.evex(0b01, 0x04, 0b00, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 94478              m.emit(0x51)
 94479              m.mrsd(lcode(v[1]), addr(v[0]), 16)
 94480          })
 94481      }
 94482      // VSQRTPS m256/m32bcst, ymm{k}{z}
 94483      if len(vv) == 0 && isM256M32bcst(v0) && isYMMkz(v1) {
 94484          self.require(ISA_AVX512VL | ISA_AVX512F)
 94485          p.domain = DomainAVX
 94486          p.add(0, func(m *_Encoding, v []interface{}) {
 94487              m.evex(0b01, 0x04, 0b01, ehcode(v[1]), addr(v[0]), 0, kcode(v[1]), zcode(v[1]), bcode(v[0]))
 94488              m.emit(0x51)
 94489              m.mrsd(lcode(v[1]), addr(v[0]), 32)
 94490          })
 94491      }
 94492      // VSQRTPS xmm, xmm{k}{z}
 94493      if len(vv) == 0 && isEVEXXMM(v0) && isXMMkz(v1) {
 94494          self.require(ISA_AVX512VL | ISA_AVX512F)
 94495          p.domain = DomainAVX
 94496          p.add(0, func(m *_Encoding, v []interface{}) {
 94497              m.emit(0x62)
 94498              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 94499              m.emit(0x7c)
 94500              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x08)
 94501              m.emit(0x51)
 94502              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 94503          })
 94504      }
 94505      // VSQRTPS ymm, ymm{k}{z}
 94506      if len(vv) == 0 && isEVEXYMM(v0) && isYMMkz(v1) {
 94507          self.require(ISA_AVX512VL | ISA_AVX512F)
 94508          p.domain = DomainAVX
 94509          p.add(0, func(m *_Encoding, v []interface{}) {
 94510              m.emit(0x62)
 94511              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 94512              m.emit(0x7c)
 94513              m.emit((zcode(v[1]) << 7) | kcode(v[1]) | 0x28)
 94514              m.emit(0x51)
 94515              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 94516          })
 94517      }
 94518      if p.len == 0 {
 94519          panic("invalid operands for VSQRTPS")
 94520      }
 94521      return p
 94522  }
 94523  
 94524  // VSQRTSD performs "Compute Square Root of Scalar Double-Precision Floating-Point Value".
 94525  //
 94526  // Mnemonic        : VSQRTSD
 94527  // Supported forms : (5 forms)
 94528  //
 94529  //    * VSQRTSD xmm, xmm, xmm                [AVX]
 94530  //    * VSQRTSD m64, xmm, xmm                [AVX]
 94531  //    * VSQRTSD m64, xmm, xmm{k}{z}          [AVX512F]
 94532  //    * VSQRTSD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 94533  //    * VSQRTSD xmm, xmm, xmm{k}{z}          [AVX512F]
 94534  //
 94535  func (self *Program) VSQRTSD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 94536      var p *Instruction
 94537      switch len(vv) {
 94538          case 0  : p = self.alloc("VSQRTSD", 3, Operands { v0, v1, v2 })
 94539          case 1  : p = self.alloc("VSQRTSD", 4, Operands { v0, v1, v2, vv[0] })
 94540          default : panic("instruction VSQRTSD takes 3 or 4 operands")
 94541      }
 94542      // VSQRTSD xmm, xmm, xmm
 94543      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 94544          self.require(ISA_AVX)
 94545          p.domain = DomainAVX
 94546          p.add(0, func(m *_Encoding, v []interface{}) {
 94547              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 94548              m.emit(0x51)
 94549              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94550          })
 94551      }
 94552      // VSQRTSD m64, xmm, xmm
 94553      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 94554          self.require(ISA_AVX)
 94555          p.domain = DomainAVX
 94556          p.add(0, func(m *_Encoding, v []interface{}) {
 94557              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 94558              m.emit(0x51)
 94559              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 94560          })
 94561      }
 94562      // VSQRTSD m64, xmm, xmm{k}{z}
 94563      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 94564          self.require(ISA_AVX512F)
 94565          p.domain = DomainAVX
 94566          p.add(0, func(m *_Encoding, v []interface{}) {
 94567              m.evex(0b01, 0x87, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 94568              m.emit(0x51)
 94569              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 94570          })
 94571      }
 94572      // VSQRTSD {er}, xmm, xmm, xmm{k}{z}
 94573      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 94574          self.require(ISA_AVX512F)
 94575          p.domain = DomainAVX
 94576          p.add(0, func(m *_Encoding, v []interface{}) {
 94577              m.emit(0x62)
 94578              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 94579              m.emit(0xff ^ (hlcode(v[2]) << 3))
 94580              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 94581              m.emit(0x51)
 94582              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94583          })
 94584      }
 94585      // VSQRTSD xmm, xmm, xmm{k}{z}
 94586      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 94587          self.require(ISA_AVX512F)
 94588          p.domain = DomainAVX
 94589          p.add(0, func(m *_Encoding, v []interface{}) {
 94590              m.emit(0x62)
 94591              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 94592              m.emit(0xff ^ (hlcode(v[1]) << 3))
 94593              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 94594              m.emit(0x51)
 94595              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94596          })
 94597      }
 94598      if p.len == 0 {
 94599          panic("invalid operands for VSQRTSD")
 94600      }
 94601      return p
 94602  }
 94603  
 94604  // VSQRTSS performs "Compute Square Root of Scalar Single-Precision Floating-Point Value".
 94605  //
 94606  // Mnemonic        : VSQRTSS
 94607  // Supported forms : (5 forms)
 94608  //
 94609  //    * VSQRTSS xmm, xmm, xmm                [AVX]
 94610  //    * VSQRTSS m32, xmm, xmm                [AVX]
 94611  //    * VSQRTSS m32, xmm, xmm{k}{z}          [AVX512F]
 94612  //    * VSQRTSS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 94613  //    * VSQRTSS xmm, xmm, xmm{k}{z}          [AVX512F]
 94614  //
 94615  func (self *Program) VSQRTSS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 94616      var p *Instruction
 94617      switch len(vv) {
 94618          case 0  : p = self.alloc("VSQRTSS", 3, Operands { v0, v1, v2 })
 94619          case 1  : p = self.alloc("VSQRTSS", 4, Operands { v0, v1, v2, vv[0] })
 94620          default : panic("instruction VSQRTSS takes 3 or 4 operands")
 94621      }
 94622      // VSQRTSS xmm, xmm, xmm
 94623      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 94624          self.require(ISA_AVX)
 94625          p.domain = DomainAVX
 94626          p.add(0, func(m *_Encoding, v []interface{}) {
 94627              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 94628              m.emit(0x51)
 94629              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94630          })
 94631      }
 94632      // VSQRTSS m32, xmm, xmm
 94633      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 94634          self.require(ISA_AVX)
 94635          p.domain = DomainAVX
 94636          p.add(0, func(m *_Encoding, v []interface{}) {
 94637              m.vex2(2, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 94638              m.emit(0x51)
 94639              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 94640          })
 94641      }
 94642      // VSQRTSS m32, xmm, xmm{k}{z}
 94643      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 94644          self.require(ISA_AVX512F)
 94645          p.domain = DomainAVX
 94646          p.add(0, func(m *_Encoding, v []interface{}) {
 94647              m.evex(0b01, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 94648              m.emit(0x51)
 94649              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 94650          })
 94651      }
 94652      // VSQRTSS {er}, xmm, xmm, xmm{k}{z}
 94653      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 94654          self.require(ISA_AVX512F)
 94655          p.domain = DomainAVX
 94656          p.add(0, func(m *_Encoding, v []interface{}) {
 94657              m.emit(0x62)
 94658              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 94659              m.emit(0x7e ^ (hlcode(v[2]) << 3))
 94660              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 94661              m.emit(0x51)
 94662              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94663          })
 94664      }
 94665      // VSQRTSS xmm, xmm, xmm{k}{z}
 94666      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 94667          self.require(ISA_AVX512F)
 94668          p.domain = DomainAVX
 94669          p.add(0, func(m *_Encoding, v []interface{}) {
 94670              m.emit(0x62)
 94671              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 94672              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 94673              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 94674              m.emit(0x51)
 94675              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94676          })
 94677      }
 94678      if p.len == 0 {
 94679          panic("invalid operands for VSQRTSS")
 94680      }
 94681      return p
 94682  }
 94683  
 94684  // VSTMXCSR performs "Store MXCSR Register State".
 94685  //
 94686  // Mnemonic        : VSTMXCSR
 94687  // Supported forms : (1 form)
 94688  //
 94689  //    * VSTMXCSR m32    [AVX]
 94690  //
 94691  func (self *Program) VSTMXCSR(v0 interface{}) *Instruction {
 94692      p := self.alloc("VSTMXCSR", 1, Operands { v0 })
 94693      // VSTMXCSR m32
 94694      if isM32(v0) {
 94695          self.require(ISA_AVX)
 94696          p.domain = DomainAVX
 94697          p.add(0, func(m *_Encoding, v []interface{}) {
 94698              m.vex2(0, 0, addr(v[0]), 0)
 94699              m.emit(0xae)
 94700              m.mrsd(3, addr(v[0]), 1)
 94701          })
 94702      }
 94703      if p.len == 0 {
 94704          panic("invalid operands for VSTMXCSR")
 94705      }
 94706      return p
 94707  }
 94708  
 94709  // VSUBPD performs "Subtract Packed Double-Precision Floating-Point Values".
 94710  //
 94711  // Mnemonic        : VSUBPD
 94712  // Supported forms : (11 forms)
 94713  //
 94714  //    * VSUBPD xmm, xmm, xmm                   [AVX]
 94715  //    * VSUBPD m128, xmm, xmm                  [AVX]
 94716  //    * VSUBPD ymm, ymm, ymm                   [AVX]
 94717  //    * VSUBPD m256, ymm, ymm                  [AVX]
 94718  //    * VSUBPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 94719  //    * VSUBPD {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 94720  //    * VSUBPD zmm, zmm, zmm{k}{z}             [AVX512F]
 94721  //    * VSUBPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 94722  //    * VSUBPD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 94723  //    * VSUBPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 94724  //    * VSUBPD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 94725  //
 94726  func (self *Program) VSUBPD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 94727      var p *Instruction
 94728      switch len(vv) {
 94729          case 0  : p = self.alloc("VSUBPD", 3, Operands { v0, v1, v2 })
 94730          case 1  : p = self.alloc("VSUBPD", 4, Operands { v0, v1, v2, vv[0] })
 94731          default : panic("instruction VSUBPD takes 3 or 4 operands")
 94732      }
 94733      // VSUBPD xmm, xmm, xmm
 94734      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 94735          self.require(ISA_AVX)
 94736          p.domain = DomainAVX
 94737          p.add(0, func(m *_Encoding, v []interface{}) {
 94738              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 94739              m.emit(0x5c)
 94740              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94741          })
 94742      }
 94743      // VSUBPD m128, xmm, xmm
 94744      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 94745          self.require(ISA_AVX)
 94746          p.domain = DomainAVX
 94747          p.add(0, func(m *_Encoding, v []interface{}) {
 94748              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 94749              m.emit(0x5c)
 94750              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 94751          })
 94752      }
 94753      // VSUBPD ymm, ymm, ymm
 94754      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 94755          self.require(ISA_AVX)
 94756          p.domain = DomainAVX
 94757          p.add(0, func(m *_Encoding, v []interface{}) {
 94758              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 94759              m.emit(0x5c)
 94760              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94761          })
 94762      }
 94763      // VSUBPD m256, ymm, ymm
 94764      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 94765          self.require(ISA_AVX)
 94766          p.domain = DomainAVX
 94767          p.add(0, func(m *_Encoding, v []interface{}) {
 94768              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 94769              m.emit(0x5c)
 94770              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 94771          })
 94772      }
 94773      // VSUBPD m512/m64bcst, zmm, zmm{k}{z}
 94774      if len(vv) == 0 && isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 94775          self.require(ISA_AVX512F)
 94776          p.domain = DomainAVX
 94777          p.add(0, func(m *_Encoding, v []interface{}) {
 94778              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 94779              m.emit(0x5c)
 94780              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 94781          })
 94782      }
 94783      // VSUBPD {er}, zmm, zmm, zmm{k}{z}
 94784      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 94785          self.require(ISA_AVX512F)
 94786          p.domain = DomainAVX
 94787          p.add(0, func(m *_Encoding, v []interface{}) {
 94788              m.emit(0x62)
 94789              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 94790              m.emit(0xfd ^ (hlcode(v[2]) << 3))
 94791              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 94792              m.emit(0x5c)
 94793              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94794          })
 94795      }
 94796      // VSUBPD zmm, zmm, zmm{k}{z}
 94797      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 94798          self.require(ISA_AVX512F)
 94799          p.domain = DomainAVX
 94800          p.add(0, func(m *_Encoding, v []interface{}) {
 94801              m.emit(0x62)
 94802              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 94803              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 94804              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 94805              m.emit(0x5c)
 94806              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94807          })
 94808      }
 94809      // VSUBPD m128/m64bcst, xmm, xmm{k}{z}
 94810      if len(vv) == 0 && isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 94811          self.require(ISA_AVX512VL | ISA_AVX512F)
 94812          p.domain = DomainAVX
 94813          p.add(0, func(m *_Encoding, v []interface{}) {
 94814              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 94815              m.emit(0x5c)
 94816              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 94817          })
 94818      }
 94819      // VSUBPD xmm, xmm, xmm{k}{z}
 94820      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 94821          self.require(ISA_AVX512VL | ISA_AVX512F)
 94822          p.domain = DomainAVX
 94823          p.add(0, func(m *_Encoding, v []interface{}) {
 94824              m.emit(0x62)
 94825              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 94826              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 94827              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 94828              m.emit(0x5c)
 94829              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94830          })
 94831      }
 94832      // VSUBPD m256/m64bcst, ymm, ymm{k}{z}
 94833      if len(vv) == 0 && isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 94834          self.require(ISA_AVX512VL | ISA_AVX512F)
 94835          p.domain = DomainAVX
 94836          p.add(0, func(m *_Encoding, v []interface{}) {
 94837              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 94838              m.emit(0x5c)
 94839              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 94840          })
 94841      }
 94842      // VSUBPD ymm, ymm, ymm{k}{z}
 94843      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 94844          self.require(ISA_AVX512VL | ISA_AVX512F)
 94845          p.domain = DomainAVX
 94846          p.add(0, func(m *_Encoding, v []interface{}) {
 94847              m.emit(0x62)
 94848              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 94849              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 94850              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 94851              m.emit(0x5c)
 94852              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94853          })
 94854      }
 94855      if p.len == 0 {
 94856          panic("invalid operands for VSUBPD")
 94857      }
 94858      return p
 94859  }
 94860  
 94861  // VSUBPS performs "Subtract Packed Single-Precision Floating-Point Values".
 94862  //
 94863  // Mnemonic        : VSUBPS
 94864  // Supported forms : (11 forms)
 94865  //
 94866  //    * VSUBPS xmm, xmm, xmm                   [AVX]
 94867  //    * VSUBPS m128, xmm, xmm                  [AVX]
 94868  //    * VSUBPS ymm, ymm, ymm                   [AVX]
 94869  //    * VSUBPS m256, ymm, ymm                  [AVX]
 94870  //    * VSUBPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 94871  //    * VSUBPS {er}, zmm, zmm, zmm{k}{z}       [AVX512F]
 94872  //    * VSUBPS zmm, zmm, zmm{k}{z}             [AVX512F]
 94873  //    * VSUBPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 94874  //    * VSUBPS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 94875  //    * VSUBPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 94876  //    * VSUBPS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 94877  //
 94878  func (self *Program) VSUBPS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 94879      var p *Instruction
 94880      switch len(vv) {
 94881          case 0  : p = self.alloc("VSUBPS", 3, Operands { v0, v1, v2 })
 94882          case 1  : p = self.alloc("VSUBPS", 4, Operands { v0, v1, v2, vv[0] })
 94883          default : panic("instruction VSUBPS takes 3 or 4 operands")
 94884      }
 94885      // VSUBPS xmm, xmm, xmm
 94886      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 94887          self.require(ISA_AVX)
 94888          p.domain = DomainAVX
 94889          p.add(0, func(m *_Encoding, v []interface{}) {
 94890              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 94891              m.emit(0x5c)
 94892              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94893          })
 94894      }
 94895      // VSUBPS m128, xmm, xmm
 94896      if len(vv) == 0 && isM128(v0) && isXMM(v1) && isXMM(v2) {
 94897          self.require(ISA_AVX)
 94898          p.domain = DomainAVX
 94899          p.add(0, func(m *_Encoding, v []interface{}) {
 94900              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 94901              m.emit(0x5c)
 94902              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 94903          })
 94904      }
 94905      // VSUBPS ymm, ymm, ymm
 94906      if len(vv) == 0 && isYMM(v0) && isYMM(v1) && isYMM(v2) {
 94907          self.require(ISA_AVX)
 94908          p.domain = DomainAVX
 94909          p.add(0, func(m *_Encoding, v []interface{}) {
 94910              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 94911              m.emit(0x5c)
 94912              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94913          })
 94914      }
 94915      // VSUBPS m256, ymm, ymm
 94916      if len(vv) == 0 && isM256(v0) && isYMM(v1) && isYMM(v2) {
 94917          self.require(ISA_AVX)
 94918          p.domain = DomainAVX
 94919          p.add(0, func(m *_Encoding, v []interface{}) {
 94920              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 94921              m.emit(0x5c)
 94922              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 94923          })
 94924      }
 94925      // VSUBPS m512/m32bcst, zmm, zmm{k}{z}
 94926      if len(vv) == 0 && isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 94927          self.require(ISA_AVX512F)
 94928          p.domain = DomainAVX
 94929          p.add(0, func(m *_Encoding, v []interface{}) {
 94930              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 94931              m.emit(0x5c)
 94932              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 94933          })
 94934      }
 94935      // VSUBPS {er}, zmm, zmm, zmm{k}{z}
 94936      if len(vv) == 1 && isER(v0) && isZMM(v1) && isZMM(v2) && isZMMkz(vv[0]) {
 94937          self.require(ISA_AVX512F)
 94938          p.domain = DomainAVX
 94939          p.add(0, func(m *_Encoding, v []interface{}) {
 94940              m.emit(0x62)
 94941              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 94942              m.emit(0x7c ^ (hlcode(v[2]) << 3))
 94943              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 94944              m.emit(0x5c)
 94945              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 94946          })
 94947      }
 94948      // VSUBPS zmm, zmm, zmm{k}{z}
 94949      if len(vv) == 0 && isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 94950          self.require(ISA_AVX512F)
 94951          p.domain = DomainAVX
 94952          p.add(0, func(m *_Encoding, v []interface{}) {
 94953              m.emit(0x62)
 94954              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 94955              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 94956              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 94957              m.emit(0x5c)
 94958              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94959          })
 94960      }
 94961      // VSUBPS m128/m32bcst, xmm, xmm{k}{z}
 94962      if len(vv) == 0 && isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 94963          self.require(ISA_AVX512VL | ISA_AVX512F)
 94964          p.domain = DomainAVX
 94965          p.add(0, func(m *_Encoding, v []interface{}) {
 94966              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 94967              m.emit(0x5c)
 94968              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 94969          })
 94970      }
 94971      // VSUBPS xmm, xmm, xmm{k}{z}
 94972      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 94973          self.require(ISA_AVX512VL | ISA_AVX512F)
 94974          p.domain = DomainAVX
 94975          p.add(0, func(m *_Encoding, v []interface{}) {
 94976              m.emit(0x62)
 94977              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 94978              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 94979              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 94980              m.emit(0x5c)
 94981              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 94982          })
 94983      }
 94984      // VSUBPS m256/m32bcst, ymm, ymm{k}{z}
 94985      if len(vv) == 0 && isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 94986          self.require(ISA_AVX512VL | ISA_AVX512F)
 94987          p.domain = DomainAVX
 94988          p.add(0, func(m *_Encoding, v []interface{}) {
 94989              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 94990              m.emit(0x5c)
 94991              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 94992          })
 94993      }
 94994      // VSUBPS ymm, ymm, ymm{k}{z}
 94995      if len(vv) == 0 && isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 94996          self.require(ISA_AVX512VL | ISA_AVX512F)
 94997          p.domain = DomainAVX
 94998          p.add(0, func(m *_Encoding, v []interface{}) {
 94999              m.emit(0x62)
 95000              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95001              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 95002              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 95003              m.emit(0x5c)
 95004              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95005          })
 95006      }
 95007      if p.len == 0 {
 95008          panic("invalid operands for VSUBPS")
 95009      }
 95010      return p
 95011  }
 95012  
 95013  // VSUBSD performs "Subtract Scalar Double-Precision Floating-Point Values".
 95014  //
 95015  // Mnemonic        : VSUBSD
 95016  // Supported forms : (5 forms)
 95017  //
 95018  //    * VSUBSD xmm, xmm, xmm                [AVX]
 95019  //    * VSUBSD m64, xmm, xmm                [AVX]
 95020  //    * VSUBSD m64, xmm, xmm{k}{z}          [AVX512F]
 95021  //    * VSUBSD {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 95022  //    * VSUBSD xmm, xmm, xmm{k}{z}          [AVX512F]
 95023  //
 95024  func (self *Program) VSUBSD(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 95025      var p *Instruction
 95026      switch len(vv) {
 95027          case 0  : p = self.alloc("VSUBSD", 3, Operands { v0, v1, v2 })
 95028          case 1  : p = self.alloc("VSUBSD", 4, Operands { v0, v1, v2, vv[0] })
 95029          default : panic("instruction VSUBSD takes 3 or 4 operands")
 95030      }
 95031      // VSUBSD xmm, xmm, xmm
 95032      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 95033          self.require(ISA_AVX)
 95034          p.domain = DomainAVX
 95035          p.add(0, func(m *_Encoding, v []interface{}) {
 95036              m.vex2(3, hcode(v[2]), v[0], hlcode(v[1]))
 95037              m.emit(0x5c)
 95038              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95039          })
 95040      }
 95041      // VSUBSD m64, xmm, xmm
 95042      if len(vv) == 0 && isM64(v0) && isXMM(v1) && isXMM(v2) {
 95043          self.require(ISA_AVX)
 95044          p.domain = DomainAVX
 95045          p.add(0, func(m *_Encoding, v []interface{}) {
 95046              m.vex2(3, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 95047              m.emit(0x5c)
 95048              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 95049          })
 95050      }
 95051      // VSUBSD m64, xmm, xmm{k}{z}
 95052      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95053          self.require(ISA_AVX512F)
 95054          p.domain = DomainAVX
 95055          p.add(0, func(m *_Encoding, v []interface{}) {
 95056              m.evex(0b01, 0x87, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 95057              m.emit(0x5c)
 95058              m.mrsd(lcode(v[2]), addr(v[0]), 8)
 95059          })
 95060      }
 95061      // VSUBSD {er}, xmm, xmm, xmm{k}{z}
 95062      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 95063          self.require(ISA_AVX512F)
 95064          p.domain = DomainAVX
 95065          p.add(0, func(m *_Encoding, v []interface{}) {
 95066              m.emit(0x62)
 95067              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 95068              m.emit(0xff ^ (hlcode(v[2]) << 3))
 95069              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 95070              m.emit(0x5c)
 95071              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 95072          })
 95073      }
 95074      // VSUBSD xmm, xmm, xmm{k}{z}
 95075      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95076          self.require(ISA_AVX512F)
 95077          p.domain = DomainAVX
 95078          p.add(0, func(m *_Encoding, v []interface{}) {
 95079              m.emit(0x62)
 95080              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95081              m.emit(0xff ^ (hlcode(v[1]) << 3))
 95082              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 95083              m.emit(0x5c)
 95084              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95085          })
 95086      }
 95087      if p.len == 0 {
 95088          panic("invalid operands for VSUBSD")
 95089      }
 95090      return p
 95091  }
 95092  
 95093  // VSUBSS performs "Subtract Scalar Single-Precision Floating-Point Values".
 95094  //
 95095  // Mnemonic        : VSUBSS
 95096  // Supported forms : (5 forms)
 95097  //
 95098  //    * VSUBSS xmm, xmm, xmm                [AVX]
 95099  //    * VSUBSS m32, xmm, xmm                [AVX]
 95100  //    * VSUBSS m32, xmm, xmm{k}{z}          [AVX512F]
 95101  //    * VSUBSS {er}, xmm, xmm, xmm{k}{z}    [AVX512F]
 95102  //    * VSUBSS xmm, xmm, xmm{k}{z}          [AVX512F]
 95103  //
 95104  func (self *Program) VSUBSS(v0 interface{}, v1 interface{}, v2 interface{}, vv ...interface{}) *Instruction {
 95105      var p *Instruction
 95106      switch len(vv) {
 95107          case 0  : p = self.alloc("VSUBSS", 3, Operands { v0, v1, v2 })
 95108          case 1  : p = self.alloc("VSUBSS", 4, Operands { v0, v1, v2, vv[0] })
 95109          default : panic("instruction VSUBSS takes 3 or 4 operands")
 95110      }
 95111      // VSUBSS xmm, xmm, xmm
 95112      if len(vv) == 0 && isXMM(v0) && isXMM(v1) && isXMM(v2) {
 95113          self.require(ISA_AVX)
 95114          p.domain = DomainAVX
 95115          p.add(0, func(m *_Encoding, v []interface{}) {
 95116              m.vex2(2, hcode(v[2]), v[0], hlcode(v[1]))
 95117              m.emit(0x5c)
 95118              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95119          })
 95120      }
 95121      // VSUBSS m32, xmm, xmm
 95122      if len(vv) == 0 && isM32(v0) && isXMM(v1) && isXMM(v2) {
 95123          self.require(ISA_AVX)
 95124          p.domain = DomainAVX
 95125          p.add(0, func(m *_Encoding, v []interface{}) {
 95126              m.vex2(2, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 95127              m.emit(0x5c)
 95128              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 95129          })
 95130      }
 95131      // VSUBSS m32, xmm, xmm{k}{z}
 95132      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95133          self.require(ISA_AVX512F)
 95134          p.domain = DomainAVX
 95135          p.add(0, func(m *_Encoding, v []interface{}) {
 95136              m.evex(0b01, 0x06, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), 0)
 95137              m.emit(0x5c)
 95138              m.mrsd(lcode(v[2]), addr(v[0]), 4)
 95139          })
 95140      }
 95141      // VSUBSS {er}, xmm, xmm, xmm{k}{z}
 95142      if len(vv) == 1 && isER(v0) && isEVEXXMM(v1) && isEVEXXMM(v2) && isXMMkz(vv[0]) {
 95143          self.require(ISA_AVX512F)
 95144          p.domain = DomainAVX
 95145          p.add(0, func(m *_Encoding, v []interface{}) {
 95146              m.emit(0x62)
 95147              m.emit(0xf1 ^ ((hcode(v[3]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[3]) << 4)))
 95148              m.emit(0x7e ^ (hlcode(v[2]) << 3))
 95149              m.emit((zcode(v[3]) << 7) | (vcode(v[0]) << 5) | (0x08 ^ (ecode(v[2]) << 3)) | kcode(v[3]) | 0x10)
 95150              m.emit(0x5c)
 95151              m.emit(0xc0 | lcode(v[3]) << 3 | lcode(v[1]))
 95152          })
 95153      }
 95154      // VSUBSS xmm, xmm, xmm{k}{z}
 95155      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95156          self.require(ISA_AVX512F)
 95157          p.domain = DomainAVX
 95158          p.add(0, func(m *_Encoding, v []interface{}) {
 95159              m.emit(0x62)
 95160              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95161              m.emit(0x7e ^ (hlcode(v[1]) << 3))
 95162              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 95163              m.emit(0x5c)
 95164              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95165          })
 95166      }
 95167      if p.len == 0 {
 95168          panic("invalid operands for VSUBSS")
 95169      }
 95170      return p
 95171  }
 95172  
 95173  // VTESTPD performs "Packed Double-Precision Floating-Point Bit Test".
 95174  //
 95175  // Mnemonic        : VTESTPD
 95176  // Supported forms : (4 forms)
 95177  //
 95178  //    * VTESTPD xmm, xmm     [AVX]
 95179  //    * VTESTPD m128, xmm    [AVX]
 95180  //    * VTESTPD ymm, ymm     [AVX]
 95181  //    * VTESTPD m256, ymm    [AVX]
 95182  //
 95183  func (self *Program) VTESTPD(v0 interface{}, v1 interface{}) *Instruction {
 95184      p := self.alloc("VTESTPD", 2, Operands { v0, v1 })
 95185      // VTESTPD xmm, xmm
 95186      if isXMM(v0) && isXMM(v1) {
 95187          self.require(ISA_AVX)
 95188          p.domain = DomainAVX
 95189          p.add(0, func(m *_Encoding, v []interface{}) {
 95190              m.emit(0xc4)
 95191              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 95192              m.emit(0x79)
 95193              m.emit(0x0f)
 95194              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 95195          })
 95196      }
 95197      // VTESTPD m128, xmm
 95198      if isM128(v0) && isXMM(v1) {
 95199          self.require(ISA_AVX)
 95200          p.domain = DomainAVX
 95201          p.add(0, func(m *_Encoding, v []interface{}) {
 95202              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 95203              m.emit(0x0f)
 95204              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 95205          })
 95206      }
 95207      // VTESTPD ymm, ymm
 95208      if isYMM(v0) && isYMM(v1) {
 95209          self.require(ISA_AVX)
 95210          p.domain = DomainAVX
 95211          p.add(0, func(m *_Encoding, v []interface{}) {
 95212              m.emit(0xc4)
 95213              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 95214              m.emit(0x7d)
 95215              m.emit(0x0f)
 95216              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 95217          })
 95218      }
 95219      // VTESTPD m256, ymm
 95220      if isM256(v0) && isYMM(v1) {
 95221          self.require(ISA_AVX)
 95222          p.domain = DomainAVX
 95223          p.add(0, func(m *_Encoding, v []interface{}) {
 95224              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 95225              m.emit(0x0f)
 95226              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 95227          })
 95228      }
 95229      if p.len == 0 {
 95230          panic("invalid operands for VTESTPD")
 95231      }
 95232      return p
 95233  }
 95234  
 95235  // VTESTPS performs "Packed Single-Precision Floating-Point Bit Test".
 95236  //
 95237  // Mnemonic        : VTESTPS
 95238  // Supported forms : (4 forms)
 95239  //
 95240  //    * VTESTPS xmm, xmm     [AVX]
 95241  //    * VTESTPS m128, xmm    [AVX]
 95242  //    * VTESTPS ymm, ymm     [AVX]
 95243  //    * VTESTPS m256, ymm    [AVX]
 95244  //
 95245  func (self *Program) VTESTPS(v0 interface{}, v1 interface{}) *Instruction {
 95246      p := self.alloc("VTESTPS", 2, Operands { v0, v1 })
 95247      // VTESTPS xmm, xmm
 95248      if isXMM(v0) && isXMM(v1) {
 95249          self.require(ISA_AVX)
 95250          p.domain = DomainAVX
 95251          p.add(0, func(m *_Encoding, v []interface{}) {
 95252              m.emit(0xc4)
 95253              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 95254              m.emit(0x79)
 95255              m.emit(0x0e)
 95256              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 95257          })
 95258      }
 95259      // VTESTPS m128, xmm
 95260      if isM128(v0) && isXMM(v1) {
 95261          self.require(ISA_AVX)
 95262          p.domain = DomainAVX
 95263          p.add(0, func(m *_Encoding, v []interface{}) {
 95264              m.vex3(0xc4, 0b10, 0x01, hcode(v[1]), addr(v[0]), 0)
 95265              m.emit(0x0e)
 95266              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 95267          })
 95268      }
 95269      // VTESTPS ymm, ymm
 95270      if isYMM(v0) && isYMM(v1) {
 95271          self.require(ISA_AVX)
 95272          p.domain = DomainAVX
 95273          p.add(0, func(m *_Encoding, v []interface{}) {
 95274              m.emit(0xc4)
 95275              m.emit(0xe2 ^ (hcode(v[1]) << 7) ^ (hcode(v[0]) << 5))
 95276              m.emit(0x7d)
 95277              m.emit(0x0e)
 95278              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 95279          })
 95280      }
 95281      // VTESTPS m256, ymm
 95282      if isM256(v0) && isYMM(v1) {
 95283          self.require(ISA_AVX)
 95284          p.domain = DomainAVX
 95285          p.add(0, func(m *_Encoding, v []interface{}) {
 95286              m.vex3(0xc4, 0b10, 0x05, hcode(v[1]), addr(v[0]), 0)
 95287              m.emit(0x0e)
 95288              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 95289          })
 95290      }
 95291      if p.len == 0 {
 95292          panic("invalid operands for VTESTPS")
 95293      }
 95294      return p
 95295  }
 95296  
 95297  // VUCOMISD performs "Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS".
 95298  //
 95299  // Mnemonic        : VUCOMISD
 95300  // Supported forms : (5 forms)
 95301  //
 95302  //    * VUCOMISD xmm, xmm           [AVX]
 95303  //    * VUCOMISD m64, xmm           [AVX]
 95304  //    * VUCOMISD m64, xmm           [AVX512F]
 95305  //    * VUCOMISD {sae}, xmm, xmm    [AVX512F]
 95306  //    * VUCOMISD xmm, xmm           [AVX512F]
 95307  //
 95308  func (self *Program) VUCOMISD(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 95309      var p *Instruction
 95310      switch len(vv) {
 95311          case 0  : p = self.alloc("VUCOMISD", 2, Operands { v0, v1 })
 95312          case 1  : p = self.alloc("VUCOMISD", 3, Operands { v0, v1, vv[0] })
 95313          default : panic("instruction VUCOMISD takes 2 or 3 operands")
 95314      }
 95315      // VUCOMISD xmm, xmm
 95316      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 95317          self.require(ISA_AVX)
 95318          p.domain = DomainAVX
 95319          p.add(0, func(m *_Encoding, v []interface{}) {
 95320              m.vex2(1, hcode(v[1]), v[0], 0)
 95321              m.emit(0x2e)
 95322              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 95323          })
 95324      }
 95325      // VUCOMISD m64, xmm
 95326      if len(vv) == 0 && isM64(v0) && isXMM(v1) {
 95327          self.require(ISA_AVX)
 95328          p.domain = DomainAVX
 95329          p.add(0, func(m *_Encoding, v []interface{}) {
 95330              m.vex2(1, hcode(v[1]), addr(v[0]), 0)
 95331              m.emit(0x2e)
 95332              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 95333          })
 95334      }
 95335      // VUCOMISD m64, xmm
 95336      if len(vv) == 0 && isM64(v0) && isEVEXXMM(v1) {
 95337          self.require(ISA_AVX512F)
 95338          p.domain = DomainAVX
 95339          p.add(0, func(m *_Encoding, v []interface{}) {
 95340              m.evex(0b01, 0x85, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 95341              m.emit(0x2e)
 95342              m.mrsd(lcode(v[1]), addr(v[0]), 8)
 95343          })
 95344      }
 95345      // VUCOMISD {sae}, xmm, xmm
 95346      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(vv[0]) {
 95347          self.require(ISA_AVX512F)
 95348          p.domain = DomainAVX
 95349          p.add(0, func(m *_Encoding, v []interface{}) {
 95350              m.emit(0x62)
 95351              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 95352              m.emit(0xfd)
 95353              m.emit(0x18)
 95354              m.emit(0x2e)
 95355              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 95356          })
 95357      }
 95358      // VUCOMISD xmm, xmm
 95359      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) {
 95360          self.require(ISA_AVX512F)
 95361          p.domain = DomainAVX
 95362          p.add(0, func(m *_Encoding, v []interface{}) {
 95363              m.emit(0x62)
 95364              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 95365              m.emit(0xfd)
 95366              m.emit(0x48)
 95367              m.emit(0x2e)
 95368              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 95369          })
 95370      }
 95371      if p.len == 0 {
 95372          panic("invalid operands for VUCOMISD")
 95373      }
 95374      return p
 95375  }
 95376  
 95377  // VUCOMISS performs "Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS".
 95378  //
 95379  // Mnemonic        : VUCOMISS
 95380  // Supported forms : (5 forms)
 95381  //
 95382  //    * VUCOMISS xmm, xmm           [AVX]
 95383  //    * VUCOMISS m32, xmm           [AVX]
 95384  //    * VUCOMISS m32, xmm           [AVX512F]
 95385  //    * VUCOMISS {sae}, xmm, xmm    [AVX512F]
 95386  //    * VUCOMISS xmm, xmm           [AVX512F]
 95387  //
 95388  func (self *Program) VUCOMISS(v0 interface{}, v1 interface{}, vv ...interface{}) *Instruction {
 95389      var p *Instruction
 95390      switch len(vv) {
 95391          case 0  : p = self.alloc("VUCOMISS", 2, Operands { v0, v1 })
 95392          case 1  : p = self.alloc("VUCOMISS", 3, Operands { v0, v1, vv[0] })
 95393          default : panic("instruction VUCOMISS takes 2 or 3 operands")
 95394      }
 95395      // VUCOMISS xmm, xmm
 95396      if len(vv) == 0 && isXMM(v0) && isXMM(v1) {
 95397          self.require(ISA_AVX)
 95398          p.domain = DomainAVX
 95399          p.add(0, func(m *_Encoding, v []interface{}) {
 95400              m.vex2(0, hcode(v[1]), v[0], 0)
 95401              m.emit(0x2e)
 95402              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 95403          })
 95404      }
 95405      // VUCOMISS m32, xmm
 95406      if len(vv) == 0 && isM32(v0) && isXMM(v1) {
 95407          self.require(ISA_AVX)
 95408          p.domain = DomainAVX
 95409          p.add(0, func(m *_Encoding, v []interface{}) {
 95410              m.vex2(0, hcode(v[1]), addr(v[0]), 0)
 95411              m.emit(0x2e)
 95412              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 95413          })
 95414      }
 95415      // VUCOMISS m32, xmm
 95416      if len(vv) == 0 && isM32(v0) && isEVEXXMM(v1) {
 95417          self.require(ISA_AVX512F)
 95418          p.domain = DomainAVX
 95419          p.add(0, func(m *_Encoding, v []interface{}) {
 95420              m.evex(0b01, 0x04, 0b00, ehcode(v[1]), addr(v[0]), 0, 0, 0, 0)
 95421              m.emit(0x2e)
 95422              m.mrsd(lcode(v[1]), addr(v[0]), 4)
 95423          })
 95424      }
 95425      // VUCOMISS {sae}, xmm, xmm
 95426      if len(vv) == 1 && isSAE(v0) && isEVEXXMM(v1) && isEVEXXMM(vv[0]) {
 95427          self.require(ISA_AVX512F)
 95428          p.domain = DomainAVX
 95429          p.add(0, func(m *_Encoding, v []interface{}) {
 95430              m.emit(0x62)
 95431              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[1]) << 5) | (ecode(v[2]) << 4)))
 95432              m.emit(0x7c)
 95433              m.emit(0x18)
 95434              m.emit(0x2e)
 95435              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[1]))
 95436          })
 95437      }
 95438      // VUCOMISS xmm, xmm
 95439      if len(vv) == 0 && isEVEXXMM(v0) && isEVEXXMM(v1) {
 95440          self.require(ISA_AVX512F)
 95441          p.domain = DomainAVX
 95442          p.add(0, func(m *_Encoding, v []interface{}) {
 95443              m.emit(0x62)
 95444              m.emit(0xf1 ^ ((hcode(v[1]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[1]) << 4)))
 95445              m.emit(0x7c)
 95446              m.emit(0x48)
 95447              m.emit(0x2e)
 95448              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 95449          })
 95450      }
 95451      if p.len == 0 {
 95452          panic("invalid operands for VUCOMISS")
 95453      }
 95454      return p
 95455  }
 95456  
 95457  // VUNPCKHPD performs "Unpack and Interleave High Packed Double-Precision Floating-Point Values".
 95458  //
 95459  // Mnemonic        : VUNPCKHPD
 95460  // Supported forms : (10 forms)
 95461  //
 95462  //    * VUNPCKHPD xmm, xmm, xmm                   [AVX]
 95463  //    * VUNPCKHPD m128, xmm, xmm                  [AVX]
 95464  //    * VUNPCKHPD ymm, ymm, ymm                   [AVX]
 95465  //    * VUNPCKHPD m256, ymm, ymm                  [AVX]
 95466  //    * VUNPCKHPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 95467  //    * VUNPCKHPD zmm, zmm, zmm{k}{z}             [AVX512F]
 95468  //    * VUNPCKHPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 95469  //    * VUNPCKHPD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 95470  //    * VUNPCKHPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 95471  //    * VUNPCKHPD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 95472  //
 95473  func (self *Program) VUNPCKHPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 95474      p := self.alloc("VUNPCKHPD", 3, Operands { v0, v1, v2 })
 95475      // VUNPCKHPD xmm, xmm, xmm
 95476      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 95477          self.require(ISA_AVX)
 95478          p.domain = DomainAVX
 95479          p.add(0, func(m *_Encoding, v []interface{}) {
 95480              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 95481              m.emit(0x15)
 95482              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95483          })
 95484      }
 95485      // VUNPCKHPD m128, xmm, xmm
 95486      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 95487          self.require(ISA_AVX)
 95488          p.domain = DomainAVX
 95489          p.add(0, func(m *_Encoding, v []interface{}) {
 95490              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 95491              m.emit(0x15)
 95492              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 95493          })
 95494      }
 95495      // VUNPCKHPD ymm, ymm, ymm
 95496      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 95497          self.require(ISA_AVX)
 95498          p.domain = DomainAVX
 95499          p.add(0, func(m *_Encoding, v []interface{}) {
 95500              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 95501              m.emit(0x15)
 95502              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95503          })
 95504      }
 95505      // VUNPCKHPD m256, ymm, ymm
 95506      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 95507          self.require(ISA_AVX)
 95508          p.domain = DomainAVX
 95509          p.add(0, func(m *_Encoding, v []interface{}) {
 95510              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 95511              m.emit(0x15)
 95512              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 95513          })
 95514      }
 95515      // VUNPCKHPD m512/m64bcst, zmm, zmm{k}{z}
 95516      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 95517          self.require(ISA_AVX512F)
 95518          p.domain = DomainAVX
 95519          p.add(0, func(m *_Encoding, v []interface{}) {
 95520              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95521              m.emit(0x15)
 95522              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 95523          })
 95524      }
 95525      // VUNPCKHPD zmm, zmm, zmm{k}{z}
 95526      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 95527          self.require(ISA_AVX512F)
 95528          p.domain = DomainAVX
 95529          p.add(0, func(m *_Encoding, v []interface{}) {
 95530              m.emit(0x62)
 95531              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95532              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 95533              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 95534              m.emit(0x15)
 95535              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95536          })
 95537      }
 95538      // VUNPCKHPD m128/m64bcst, xmm, xmm{k}{z}
 95539      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95540          self.require(ISA_AVX512VL | ISA_AVX512F)
 95541          p.domain = DomainAVX
 95542          p.add(0, func(m *_Encoding, v []interface{}) {
 95543              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95544              m.emit(0x15)
 95545              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 95546          })
 95547      }
 95548      // VUNPCKHPD xmm, xmm, xmm{k}{z}
 95549      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95550          self.require(ISA_AVX512VL | ISA_AVX512F)
 95551          p.domain = DomainAVX
 95552          p.add(0, func(m *_Encoding, v []interface{}) {
 95553              m.emit(0x62)
 95554              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95555              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 95556              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 95557              m.emit(0x15)
 95558              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95559          })
 95560      }
 95561      // VUNPCKHPD m256/m64bcst, ymm, ymm{k}{z}
 95562      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 95563          self.require(ISA_AVX512VL | ISA_AVX512F)
 95564          p.domain = DomainAVX
 95565          p.add(0, func(m *_Encoding, v []interface{}) {
 95566              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95567              m.emit(0x15)
 95568              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 95569          })
 95570      }
 95571      // VUNPCKHPD ymm, ymm, ymm{k}{z}
 95572      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 95573          self.require(ISA_AVX512VL | ISA_AVX512F)
 95574          p.domain = DomainAVX
 95575          p.add(0, func(m *_Encoding, v []interface{}) {
 95576              m.emit(0x62)
 95577              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95578              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 95579              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 95580              m.emit(0x15)
 95581              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95582          })
 95583      }
 95584      if p.len == 0 {
 95585          panic("invalid operands for VUNPCKHPD")
 95586      }
 95587      return p
 95588  }
 95589  
 95590  // VUNPCKHPS performs "Unpack and Interleave High Packed Single-Precision Floating-Point Values".
 95591  //
 95592  // Mnemonic        : VUNPCKHPS
 95593  // Supported forms : (10 forms)
 95594  //
 95595  //    * VUNPCKHPS xmm, xmm, xmm                   [AVX]
 95596  //    * VUNPCKHPS m128, xmm, xmm                  [AVX]
 95597  //    * VUNPCKHPS ymm, ymm, ymm                   [AVX]
 95598  //    * VUNPCKHPS m256, ymm, ymm                  [AVX]
 95599  //    * VUNPCKHPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 95600  //    * VUNPCKHPS zmm, zmm, zmm{k}{z}             [AVX512F]
 95601  //    * VUNPCKHPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 95602  //    * VUNPCKHPS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 95603  //    * VUNPCKHPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 95604  //    * VUNPCKHPS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 95605  //
 95606  func (self *Program) VUNPCKHPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 95607      p := self.alloc("VUNPCKHPS", 3, Operands { v0, v1, v2 })
 95608      // VUNPCKHPS xmm, xmm, xmm
 95609      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 95610          self.require(ISA_AVX)
 95611          p.domain = DomainAVX
 95612          p.add(0, func(m *_Encoding, v []interface{}) {
 95613              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 95614              m.emit(0x15)
 95615              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95616          })
 95617      }
 95618      // VUNPCKHPS m128, xmm, xmm
 95619      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 95620          self.require(ISA_AVX)
 95621          p.domain = DomainAVX
 95622          p.add(0, func(m *_Encoding, v []interface{}) {
 95623              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 95624              m.emit(0x15)
 95625              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 95626          })
 95627      }
 95628      // VUNPCKHPS ymm, ymm, ymm
 95629      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 95630          self.require(ISA_AVX)
 95631          p.domain = DomainAVX
 95632          p.add(0, func(m *_Encoding, v []interface{}) {
 95633              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 95634              m.emit(0x15)
 95635              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95636          })
 95637      }
 95638      // VUNPCKHPS m256, ymm, ymm
 95639      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 95640          self.require(ISA_AVX)
 95641          p.domain = DomainAVX
 95642          p.add(0, func(m *_Encoding, v []interface{}) {
 95643              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 95644              m.emit(0x15)
 95645              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 95646          })
 95647      }
 95648      // VUNPCKHPS m512/m32bcst, zmm, zmm{k}{z}
 95649      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 95650          self.require(ISA_AVX512F)
 95651          p.domain = DomainAVX
 95652          p.add(0, func(m *_Encoding, v []interface{}) {
 95653              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95654              m.emit(0x15)
 95655              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 95656          })
 95657      }
 95658      // VUNPCKHPS zmm, zmm, zmm{k}{z}
 95659      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 95660          self.require(ISA_AVX512F)
 95661          p.domain = DomainAVX
 95662          p.add(0, func(m *_Encoding, v []interface{}) {
 95663              m.emit(0x62)
 95664              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95665              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 95666              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 95667              m.emit(0x15)
 95668              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95669          })
 95670      }
 95671      // VUNPCKHPS m128/m32bcst, xmm, xmm{k}{z}
 95672      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95673          self.require(ISA_AVX512VL | ISA_AVX512F)
 95674          p.domain = DomainAVX
 95675          p.add(0, func(m *_Encoding, v []interface{}) {
 95676              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95677              m.emit(0x15)
 95678              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 95679          })
 95680      }
 95681      // VUNPCKHPS xmm, xmm, xmm{k}{z}
 95682      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95683          self.require(ISA_AVX512VL | ISA_AVX512F)
 95684          p.domain = DomainAVX
 95685          p.add(0, func(m *_Encoding, v []interface{}) {
 95686              m.emit(0x62)
 95687              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95688              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 95689              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 95690              m.emit(0x15)
 95691              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95692          })
 95693      }
 95694      // VUNPCKHPS m256/m32bcst, ymm, ymm{k}{z}
 95695      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 95696          self.require(ISA_AVX512VL | ISA_AVX512F)
 95697          p.domain = DomainAVX
 95698          p.add(0, func(m *_Encoding, v []interface{}) {
 95699              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95700              m.emit(0x15)
 95701              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 95702          })
 95703      }
 95704      // VUNPCKHPS ymm, ymm, ymm{k}{z}
 95705      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 95706          self.require(ISA_AVX512VL | ISA_AVX512F)
 95707          p.domain = DomainAVX
 95708          p.add(0, func(m *_Encoding, v []interface{}) {
 95709              m.emit(0x62)
 95710              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95711              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 95712              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 95713              m.emit(0x15)
 95714              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95715          })
 95716      }
 95717      if p.len == 0 {
 95718          panic("invalid operands for VUNPCKHPS")
 95719      }
 95720      return p
 95721  }
 95722  
 95723  // VUNPCKLPD performs "Unpack and Interleave Low Packed Double-Precision Floating-Point Values".
 95724  //
 95725  // Mnemonic        : VUNPCKLPD
 95726  // Supported forms : (10 forms)
 95727  //
 95728  //    * VUNPCKLPD xmm, xmm, xmm                   [AVX]
 95729  //    * VUNPCKLPD m128, xmm, xmm                  [AVX]
 95730  //    * VUNPCKLPD ymm, ymm, ymm                   [AVX]
 95731  //    * VUNPCKLPD m256, ymm, ymm                  [AVX]
 95732  //    * VUNPCKLPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512F]
 95733  //    * VUNPCKLPD zmm, zmm, zmm{k}{z}             [AVX512F]
 95734  //    * VUNPCKLPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 95735  //    * VUNPCKLPD xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 95736  //    * VUNPCKLPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 95737  //    * VUNPCKLPD ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 95738  //
 95739  func (self *Program) VUNPCKLPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 95740      p := self.alloc("VUNPCKLPD", 3, Operands { v0, v1, v2 })
 95741      // VUNPCKLPD xmm, xmm, xmm
 95742      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 95743          self.require(ISA_AVX)
 95744          p.domain = DomainAVX
 95745          p.add(0, func(m *_Encoding, v []interface{}) {
 95746              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 95747              m.emit(0x14)
 95748              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95749          })
 95750      }
 95751      // VUNPCKLPD m128, xmm, xmm
 95752      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 95753          self.require(ISA_AVX)
 95754          p.domain = DomainAVX
 95755          p.add(0, func(m *_Encoding, v []interface{}) {
 95756              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 95757              m.emit(0x14)
 95758              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 95759          })
 95760      }
 95761      // VUNPCKLPD ymm, ymm, ymm
 95762      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 95763          self.require(ISA_AVX)
 95764          p.domain = DomainAVX
 95765          p.add(0, func(m *_Encoding, v []interface{}) {
 95766              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 95767              m.emit(0x14)
 95768              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95769          })
 95770      }
 95771      // VUNPCKLPD m256, ymm, ymm
 95772      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 95773          self.require(ISA_AVX)
 95774          p.domain = DomainAVX
 95775          p.add(0, func(m *_Encoding, v []interface{}) {
 95776              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 95777              m.emit(0x14)
 95778              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 95779          })
 95780      }
 95781      // VUNPCKLPD m512/m64bcst, zmm, zmm{k}{z}
 95782      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 95783          self.require(ISA_AVX512F)
 95784          p.domain = DomainAVX
 95785          p.add(0, func(m *_Encoding, v []interface{}) {
 95786              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95787              m.emit(0x14)
 95788              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 95789          })
 95790      }
 95791      // VUNPCKLPD zmm, zmm, zmm{k}{z}
 95792      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 95793          self.require(ISA_AVX512F)
 95794          p.domain = DomainAVX
 95795          p.add(0, func(m *_Encoding, v []interface{}) {
 95796              m.emit(0x62)
 95797              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95798              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 95799              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 95800              m.emit(0x14)
 95801              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95802          })
 95803      }
 95804      // VUNPCKLPD m128/m64bcst, xmm, xmm{k}{z}
 95805      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95806          self.require(ISA_AVX512VL | ISA_AVX512F)
 95807          p.domain = DomainAVX
 95808          p.add(0, func(m *_Encoding, v []interface{}) {
 95809              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95810              m.emit(0x14)
 95811              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 95812          })
 95813      }
 95814      // VUNPCKLPD xmm, xmm, xmm{k}{z}
 95815      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95816          self.require(ISA_AVX512VL | ISA_AVX512F)
 95817          p.domain = DomainAVX
 95818          p.add(0, func(m *_Encoding, v []interface{}) {
 95819              m.emit(0x62)
 95820              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95821              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 95822              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 95823              m.emit(0x14)
 95824              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95825          })
 95826      }
 95827      // VUNPCKLPD m256/m64bcst, ymm, ymm{k}{z}
 95828      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 95829          self.require(ISA_AVX512VL | ISA_AVX512F)
 95830          p.domain = DomainAVX
 95831          p.add(0, func(m *_Encoding, v []interface{}) {
 95832              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95833              m.emit(0x14)
 95834              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 95835          })
 95836      }
 95837      // VUNPCKLPD ymm, ymm, ymm{k}{z}
 95838      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 95839          self.require(ISA_AVX512VL | ISA_AVX512F)
 95840          p.domain = DomainAVX
 95841          p.add(0, func(m *_Encoding, v []interface{}) {
 95842              m.emit(0x62)
 95843              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95844              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 95845              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 95846              m.emit(0x14)
 95847              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95848          })
 95849      }
 95850      if p.len == 0 {
 95851          panic("invalid operands for VUNPCKLPD")
 95852      }
 95853      return p
 95854  }
 95855  
 95856  // VUNPCKLPS performs "Unpack and Interleave Low Packed Single-Precision Floating-Point Values".
 95857  //
 95858  // Mnemonic        : VUNPCKLPS
 95859  // Supported forms : (10 forms)
 95860  //
 95861  //    * VUNPCKLPS xmm, xmm, xmm                   [AVX]
 95862  //    * VUNPCKLPS m128, xmm, xmm                  [AVX]
 95863  //    * VUNPCKLPS ymm, ymm, ymm                   [AVX]
 95864  //    * VUNPCKLPS m256, ymm, ymm                  [AVX]
 95865  //    * VUNPCKLPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512F]
 95866  //    * VUNPCKLPS zmm, zmm, zmm{k}{z}             [AVX512F]
 95867  //    * VUNPCKLPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512F,AVX512VL]
 95868  //    * VUNPCKLPS xmm, xmm, xmm{k}{z}             [AVX512F,AVX512VL]
 95869  //    * VUNPCKLPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512F,AVX512VL]
 95870  //    * VUNPCKLPS ymm, ymm, ymm{k}{z}             [AVX512F,AVX512VL]
 95871  //
 95872  func (self *Program) VUNPCKLPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 95873      p := self.alloc("VUNPCKLPS", 3, Operands { v0, v1, v2 })
 95874      // VUNPCKLPS xmm, xmm, xmm
 95875      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 95876          self.require(ISA_AVX)
 95877          p.domain = DomainAVX
 95878          p.add(0, func(m *_Encoding, v []interface{}) {
 95879              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 95880              m.emit(0x14)
 95881              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95882          })
 95883      }
 95884      // VUNPCKLPS m128, xmm, xmm
 95885      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 95886          self.require(ISA_AVX)
 95887          p.domain = DomainAVX
 95888          p.add(0, func(m *_Encoding, v []interface{}) {
 95889              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 95890              m.emit(0x14)
 95891              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 95892          })
 95893      }
 95894      // VUNPCKLPS ymm, ymm, ymm
 95895      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 95896          self.require(ISA_AVX)
 95897          p.domain = DomainAVX
 95898          p.add(0, func(m *_Encoding, v []interface{}) {
 95899              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 95900              m.emit(0x14)
 95901              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95902          })
 95903      }
 95904      // VUNPCKLPS m256, ymm, ymm
 95905      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 95906          self.require(ISA_AVX)
 95907          p.domain = DomainAVX
 95908          p.add(0, func(m *_Encoding, v []interface{}) {
 95909              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 95910              m.emit(0x14)
 95911              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 95912          })
 95913      }
 95914      // VUNPCKLPS m512/m32bcst, zmm, zmm{k}{z}
 95915      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 95916          self.require(ISA_AVX512F)
 95917          p.domain = DomainAVX
 95918          p.add(0, func(m *_Encoding, v []interface{}) {
 95919              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95920              m.emit(0x14)
 95921              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 95922          })
 95923      }
 95924      // VUNPCKLPS zmm, zmm, zmm{k}{z}
 95925      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 95926          self.require(ISA_AVX512F)
 95927          p.domain = DomainAVX
 95928          p.add(0, func(m *_Encoding, v []interface{}) {
 95929              m.emit(0x62)
 95930              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95931              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 95932              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 95933              m.emit(0x14)
 95934              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95935          })
 95936      }
 95937      // VUNPCKLPS m128/m32bcst, xmm, xmm{k}{z}
 95938      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95939          self.require(ISA_AVX512VL | ISA_AVX512F)
 95940          p.domain = DomainAVX
 95941          p.add(0, func(m *_Encoding, v []interface{}) {
 95942              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95943              m.emit(0x14)
 95944              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 95945          })
 95946      }
 95947      // VUNPCKLPS xmm, xmm, xmm{k}{z}
 95948      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 95949          self.require(ISA_AVX512VL | ISA_AVX512F)
 95950          p.domain = DomainAVX
 95951          p.add(0, func(m *_Encoding, v []interface{}) {
 95952              m.emit(0x62)
 95953              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95954              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 95955              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 95956              m.emit(0x14)
 95957              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95958          })
 95959      }
 95960      // VUNPCKLPS m256/m32bcst, ymm, ymm{k}{z}
 95961      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 95962          self.require(ISA_AVX512VL | ISA_AVX512F)
 95963          p.domain = DomainAVX
 95964          p.add(0, func(m *_Encoding, v []interface{}) {
 95965              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 95966              m.emit(0x14)
 95967              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 95968          })
 95969      }
 95970      // VUNPCKLPS ymm, ymm, ymm{k}{z}
 95971      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 95972          self.require(ISA_AVX512VL | ISA_AVX512F)
 95973          p.domain = DomainAVX
 95974          p.add(0, func(m *_Encoding, v []interface{}) {
 95975              m.emit(0x62)
 95976              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 95977              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 95978              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 95979              m.emit(0x14)
 95980              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 95981          })
 95982      }
 95983      if p.len == 0 {
 95984          panic("invalid operands for VUNPCKLPS")
 95985      }
 95986      return p
 95987  }
 95988  
 95989  // VXORPD performs "Bitwise Logical XOR for Double-Precision Floating-Point Values".
 95990  //
 95991  // Mnemonic        : VXORPD
 95992  // Supported forms : (10 forms)
 95993  //
 95994  //    * VXORPD xmm, xmm, xmm                   [AVX]
 95995  //    * VXORPD m128, xmm, xmm                  [AVX]
 95996  //    * VXORPD ymm, ymm, ymm                   [AVX]
 95997  //    * VXORPD m256, ymm, ymm                  [AVX]
 95998  //    * VXORPD m512/m64bcst, zmm, zmm{k}{z}    [AVX512DQ]
 95999  //    * VXORPD zmm, zmm, zmm{k}{z}             [AVX512DQ]
 96000  //    * VXORPD m128/m64bcst, xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 96001  //    * VXORPD xmm, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 96002  //    * VXORPD m256/m64bcst, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 96003  //    * VXORPD ymm, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 96004  //
 96005  func (self *Program) VXORPD(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 96006      p := self.alloc("VXORPD", 3, Operands { v0, v1, v2 })
 96007      // VXORPD xmm, xmm, xmm
 96008      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 96009          self.require(ISA_AVX)
 96010          p.domain = DomainAVX
 96011          p.add(0, func(m *_Encoding, v []interface{}) {
 96012              m.vex2(1, hcode(v[2]), v[0], hlcode(v[1]))
 96013              m.emit(0x57)
 96014              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 96015          })
 96016      }
 96017      // VXORPD m128, xmm, xmm
 96018      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 96019          self.require(ISA_AVX)
 96020          p.domain = DomainAVX
 96021          p.add(0, func(m *_Encoding, v []interface{}) {
 96022              m.vex2(1, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 96023              m.emit(0x57)
 96024              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 96025          })
 96026      }
 96027      // VXORPD ymm, ymm, ymm
 96028      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 96029          self.require(ISA_AVX)
 96030          p.domain = DomainAVX
 96031          p.add(0, func(m *_Encoding, v []interface{}) {
 96032              m.vex2(5, hcode(v[2]), v[0], hlcode(v[1]))
 96033              m.emit(0x57)
 96034              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 96035          })
 96036      }
 96037      // VXORPD m256, ymm, ymm
 96038      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 96039          self.require(ISA_AVX)
 96040          p.domain = DomainAVX
 96041          p.add(0, func(m *_Encoding, v []interface{}) {
 96042              m.vex2(5, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 96043              m.emit(0x57)
 96044              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 96045          })
 96046      }
 96047      // VXORPD m512/m64bcst, zmm, zmm{k}{z}
 96048      if isM512M64bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 96049          self.require(ISA_AVX512DQ)
 96050          p.domain = DomainAVX
 96051          p.add(0, func(m *_Encoding, v []interface{}) {
 96052              m.evex(0b01, 0x85, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 96053              m.emit(0x57)
 96054              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 96055          })
 96056      }
 96057      // VXORPD zmm, zmm, zmm{k}{z}
 96058      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 96059          self.require(ISA_AVX512DQ)
 96060          p.domain = DomainAVX
 96061          p.add(0, func(m *_Encoding, v []interface{}) {
 96062              m.emit(0x62)
 96063              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 96064              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 96065              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 96066              m.emit(0x57)
 96067              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 96068          })
 96069      }
 96070      // VXORPD m128/m64bcst, xmm, xmm{k}{z}
 96071      if isM128M64bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 96072          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 96073          p.domain = DomainAVX
 96074          p.add(0, func(m *_Encoding, v []interface{}) {
 96075              m.evex(0b01, 0x85, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 96076              m.emit(0x57)
 96077              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 96078          })
 96079      }
 96080      // VXORPD xmm, xmm, xmm{k}{z}
 96081      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 96082          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 96083          p.domain = DomainAVX
 96084          p.add(0, func(m *_Encoding, v []interface{}) {
 96085              m.emit(0x62)
 96086              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 96087              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 96088              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 96089              m.emit(0x57)
 96090              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 96091          })
 96092      }
 96093      // VXORPD m256/m64bcst, ymm, ymm{k}{z}
 96094      if isM256M64bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 96095          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 96096          p.domain = DomainAVX
 96097          p.add(0, func(m *_Encoding, v []interface{}) {
 96098              m.evex(0b01, 0x85, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 96099              m.emit(0x57)
 96100              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 96101          })
 96102      }
 96103      // VXORPD ymm, ymm, ymm{k}{z}
 96104      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 96105          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 96106          p.domain = DomainAVX
 96107          p.add(0, func(m *_Encoding, v []interface{}) {
 96108              m.emit(0x62)
 96109              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 96110              m.emit(0xfd ^ (hlcode(v[1]) << 3))
 96111              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 96112              m.emit(0x57)
 96113              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 96114          })
 96115      }
 96116      if p.len == 0 {
 96117          panic("invalid operands for VXORPD")
 96118      }
 96119      return p
 96120  }
 96121  
 96122  // VXORPS performs "Bitwise Logical XOR for Single-Precision Floating-Point Values".
 96123  //
 96124  // Mnemonic        : VXORPS
 96125  // Supported forms : (10 forms)
 96126  //
 96127  //    * VXORPS xmm, xmm, xmm                   [AVX]
 96128  //    * VXORPS m128, xmm, xmm                  [AVX]
 96129  //    * VXORPS ymm, ymm, ymm                   [AVX]
 96130  //    * VXORPS m256, ymm, ymm                  [AVX]
 96131  //    * VXORPS m512/m32bcst, zmm, zmm{k}{z}    [AVX512DQ]
 96132  //    * VXORPS zmm, zmm, zmm{k}{z}             [AVX512DQ]
 96133  //    * VXORPS m128/m32bcst, xmm, xmm{k}{z}    [AVX512DQ,AVX512VL]
 96134  //    * VXORPS xmm, xmm, xmm{k}{z}             [AVX512DQ,AVX512VL]
 96135  //    * VXORPS m256/m32bcst, ymm, ymm{k}{z}    [AVX512DQ,AVX512VL]
 96136  //    * VXORPS ymm, ymm, ymm{k}{z}             [AVX512DQ,AVX512VL]
 96137  //
 96138  func (self *Program) VXORPS(v0 interface{}, v1 interface{}, v2 interface{}) *Instruction {
 96139      p := self.alloc("VXORPS", 3, Operands { v0, v1, v2 })
 96140      // VXORPS xmm, xmm, xmm
 96141      if isXMM(v0) && isXMM(v1) && isXMM(v2) {
 96142          self.require(ISA_AVX)
 96143          p.domain = DomainAVX
 96144          p.add(0, func(m *_Encoding, v []interface{}) {
 96145              m.vex2(0, hcode(v[2]), v[0], hlcode(v[1]))
 96146              m.emit(0x57)
 96147              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 96148          })
 96149      }
 96150      // VXORPS m128, xmm, xmm
 96151      if isM128(v0) && isXMM(v1) && isXMM(v2) {
 96152          self.require(ISA_AVX)
 96153          p.domain = DomainAVX
 96154          p.add(0, func(m *_Encoding, v []interface{}) {
 96155              m.vex2(0, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 96156              m.emit(0x57)
 96157              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 96158          })
 96159      }
 96160      // VXORPS ymm, ymm, ymm
 96161      if isYMM(v0) && isYMM(v1) && isYMM(v2) {
 96162          self.require(ISA_AVX)
 96163          p.domain = DomainAVX
 96164          p.add(0, func(m *_Encoding, v []interface{}) {
 96165              m.vex2(4, hcode(v[2]), v[0], hlcode(v[1]))
 96166              m.emit(0x57)
 96167              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 96168          })
 96169      }
 96170      // VXORPS m256, ymm, ymm
 96171      if isM256(v0) && isYMM(v1) && isYMM(v2) {
 96172          self.require(ISA_AVX)
 96173          p.domain = DomainAVX
 96174          p.add(0, func(m *_Encoding, v []interface{}) {
 96175              m.vex2(4, hcode(v[2]), addr(v[0]), hlcode(v[1]))
 96176              m.emit(0x57)
 96177              m.mrsd(lcode(v[2]), addr(v[0]), 1)
 96178          })
 96179      }
 96180      // VXORPS m512/m32bcst, zmm, zmm{k}{z}
 96181      if isM512M32bcst(v0) && isZMM(v1) && isZMMkz(v2) {
 96182          self.require(ISA_AVX512DQ)
 96183          p.domain = DomainAVX
 96184          p.add(0, func(m *_Encoding, v []interface{}) {
 96185              m.evex(0b01, 0x04, 0b10, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 96186              m.emit(0x57)
 96187              m.mrsd(lcode(v[2]), addr(v[0]), 64)
 96188          })
 96189      }
 96190      // VXORPS zmm, zmm, zmm{k}{z}
 96191      if isZMM(v0) && isZMM(v1) && isZMMkz(v2) {
 96192          self.require(ISA_AVX512DQ)
 96193          p.domain = DomainAVX
 96194          p.add(0, func(m *_Encoding, v []interface{}) {
 96195              m.emit(0x62)
 96196              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 96197              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 96198              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x40)
 96199              m.emit(0x57)
 96200              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 96201          })
 96202      }
 96203      // VXORPS m128/m32bcst, xmm, xmm{k}{z}
 96204      if isM128M32bcst(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 96205          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 96206          p.domain = DomainAVX
 96207          p.add(0, func(m *_Encoding, v []interface{}) {
 96208              m.evex(0b01, 0x04, 0b00, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 96209              m.emit(0x57)
 96210              m.mrsd(lcode(v[2]), addr(v[0]), 16)
 96211          })
 96212      }
 96213      // VXORPS xmm, xmm, xmm{k}{z}
 96214      if isEVEXXMM(v0) && isEVEXXMM(v1) && isXMMkz(v2) {
 96215          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 96216          p.domain = DomainAVX
 96217          p.add(0, func(m *_Encoding, v []interface{}) {
 96218              m.emit(0x62)
 96219              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 96220              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 96221              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x00)
 96222              m.emit(0x57)
 96223              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 96224          })
 96225      }
 96226      // VXORPS m256/m32bcst, ymm, ymm{k}{z}
 96227      if isM256M32bcst(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 96228          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 96229          p.domain = DomainAVX
 96230          p.add(0, func(m *_Encoding, v []interface{}) {
 96231              m.evex(0b01, 0x04, 0b01, ehcode(v[2]), addr(v[0]), vcode(v[1]), kcode(v[2]), zcode(v[2]), bcode(v[0]))
 96232              m.emit(0x57)
 96233              m.mrsd(lcode(v[2]), addr(v[0]), 32)
 96234          })
 96235      }
 96236      // VXORPS ymm, ymm, ymm{k}{z}
 96237      if isEVEXYMM(v0) && isEVEXYMM(v1) && isYMMkz(v2) {
 96238          self.require(ISA_AVX512VL | ISA_AVX512DQ)
 96239          p.domain = DomainAVX
 96240          p.add(0, func(m *_Encoding, v []interface{}) {
 96241              m.emit(0x62)
 96242              m.emit(0xf1 ^ ((hcode(v[2]) << 7) | (ehcode(v[0]) << 5) | (ecode(v[2]) << 4)))
 96243              m.emit(0x7c ^ (hlcode(v[1]) << 3))
 96244              m.emit((zcode(v[2]) << 7) | (0x08 ^ (ecode(v[1]) << 3)) | kcode(v[2]) | 0x20)
 96245              m.emit(0x57)
 96246              m.emit(0xc0 | lcode(v[2]) << 3 | lcode(v[0]))
 96247          })
 96248      }
 96249      if p.len == 0 {
 96250          panic("invalid operands for VXORPS")
 96251      }
 96252      return p
 96253  }
 96254  
 96255  // VZEROALL performs "Zero All YMM Registers".
 96256  //
 96257  // Mnemonic        : VZEROALL
 96258  // Supported forms : (1 form)
 96259  //
 96260  //    * VZEROALL    [AVX]
 96261  //
 96262  func (self *Program) VZEROALL() *Instruction {
 96263      p := self.alloc("VZEROALL", 0, Operands {  })
 96264      // VZEROALL
 96265      self.require(ISA_AVX)
 96266      p.domain = DomainAVX
 96267      p.add(0, func(m *_Encoding, v []interface{}) {
 96268          m.vex2(4, 0, nil, 0)
 96269          m.emit(0x77)
 96270      })
 96271      return p
 96272  }
 96273  
 96274  // VZEROUPPER performs "Zero Upper Bits of YMM Registers".
 96275  //
 96276  // Mnemonic        : VZEROUPPER
 96277  // Supported forms : (1 form)
 96278  //
 96279  //    * VZEROUPPER    [AVX]
 96280  //
 96281  func (self *Program) VZEROUPPER() *Instruction {
 96282      p := self.alloc("VZEROUPPER", 0, Operands {  })
 96283      // VZEROUPPER
 96284      self.require(ISA_AVX)
 96285      p.domain = DomainAVX
 96286      p.add(0, func(m *_Encoding, v []interface{}) {
 96287          m.vex2(0, 0, nil, 0)
 96288          m.emit(0x77)
 96289      })
 96290      return p
 96291  }
 96292  
 96293  // XADDB performs "Exchange and Add".
 96294  //
 96295  // Mnemonic        : XADD
 96296  // Supported forms : (2 forms)
 96297  //
 96298  //    * XADDB r8, r8
 96299  //    * XADDB r8, m8
 96300  //
 96301  func (self *Program) XADDB(v0 interface{}, v1 interface{}) *Instruction {
 96302      p := self.alloc("XADDB", 2, Operands { v0, v1 })
 96303      // XADDB r8, r8
 96304      if isReg8(v0) && isReg8(v1) {
 96305          p.domain = DomainGeneric
 96306          p.add(0, func(m *_Encoding, v []interface{}) {
 96307              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
 96308              m.emit(0x0f)
 96309              m.emit(0xc0)
 96310              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 96311          })
 96312      }
 96313      // XADDB r8, m8
 96314      if isReg8(v0) && isM8(v1) {
 96315          p.domain = DomainGeneric
 96316          p.add(0, func(m *_Encoding, v []interface{}) {
 96317              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
 96318              m.emit(0x0f)
 96319              m.emit(0xc0)
 96320              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 96321          })
 96322      }
 96323      if p.len == 0 {
 96324          panic("invalid operands for XADDB")
 96325      }
 96326      return p
 96327  }
 96328  
 96329  // XADDL performs "Exchange and Add".
 96330  //
 96331  // Mnemonic        : XADD
 96332  // Supported forms : (2 forms)
 96333  //
 96334  //    * XADDL r32, r32
 96335  //    * XADDL r32, m32
 96336  //
 96337  func (self *Program) XADDL(v0 interface{}, v1 interface{}) *Instruction {
 96338      p := self.alloc("XADDL", 2, Operands { v0, v1 })
 96339      // XADDL r32, r32
 96340      if isReg32(v0) && isReg32(v1) {
 96341          p.domain = DomainGeneric
 96342          p.add(0, func(m *_Encoding, v []interface{}) {
 96343              m.rexo(hcode(v[0]), v[1], false)
 96344              m.emit(0x0f)
 96345              m.emit(0xc1)
 96346              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 96347          })
 96348      }
 96349      // XADDL r32, m32
 96350      if isReg32(v0) && isM32(v1) {
 96351          p.domain = DomainGeneric
 96352          p.add(0, func(m *_Encoding, v []interface{}) {
 96353              m.rexo(hcode(v[0]), addr(v[1]), false)
 96354              m.emit(0x0f)
 96355              m.emit(0xc1)
 96356              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 96357          })
 96358      }
 96359      if p.len == 0 {
 96360          panic("invalid operands for XADDL")
 96361      }
 96362      return p
 96363  }
 96364  
 96365  // XADDQ performs "Exchange and Add".
 96366  //
 96367  // Mnemonic        : XADD
 96368  // Supported forms : (2 forms)
 96369  //
 96370  //    * XADDQ r64, r64
 96371  //    * XADDQ r64, m64
 96372  //
 96373  func (self *Program) XADDQ(v0 interface{}, v1 interface{}) *Instruction {
 96374      p := self.alloc("XADDQ", 2, Operands { v0, v1 })
 96375      // XADDQ r64, r64
 96376      if isReg64(v0) && isReg64(v1) {
 96377          p.domain = DomainGeneric
 96378          p.add(0, func(m *_Encoding, v []interface{}) {
 96379              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
 96380              m.emit(0x0f)
 96381              m.emit(0xc1)
 96382              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 96383          })
 96384      }
 96385      // XADDQ r64, m64
 96386      if isReg64(v0) && isM64(v1) {
 96387          p.domain = DomainGeneric
 96388          p.add(0, func(m *_Encoding, v []interface{}) {
 96389              m.rexm(1, hcode(v[0]), addr(v[1]))
 96390              m.emit(0x0f)
 96391              m.emit(0xc1)
 96392              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 96393          })
 96394      }
 96395      if p.len == 0 {
 96396          panic("invalid operands for XADDQ")
 96397      }
 96398      return p
 96399  }
 96400  
 96401  // XADDW performs "Exchange and Add".
 96402  //
 96403  // Mnemonic        : XADD
 96404  // Supported forms : (2 forms)
 96405  //
 96406  //    * XADDW r16, r16
 96407  //    * XADDW r16, m16
 96408  //
 96409  func (self *Program) XADDW(v0 interface{}, v1 interface{}) *Instruction {
 96410      p := self.alloc("XADDW", 2, Operands { v0, v1 })
 96411      // XADDW r16, r16
 96412      if isReg16(v0) && isReg16(v1) {
 96413          p.domain = DomainGeneric
 96414          p.add(0, func(m *_Encoding, v []interface{}) {
 96415              m.emit(0x66)
 96416              m.rexo(hcode(v[0]), v[1], false)
 96417              m.emit(0x0f)
 96418              m.emit(0xc1)
 96419              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 96420          })
 96421      }
 96422      // XADDW r16, m16
 96423      if isReg16(v0) && isM16(v1) {
 96424          p.domain = DomainGeneric
 96425          p.add(0, func(m *_Encoding, v []interface{}) {
 96426              m.emit(0x66)
 96427              m.rexo(hcode(v[0]), addr(v[1]), false)
 96428              m.emit(0x0f)
 96429              m.emit(0xc1)
 96430              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 96431          })
 96432      }
 96433      if p.len == 0 {
 96434          panic("invalid operands for XADDW")
 96435      }
 96436      return p
 96437  }
 96438  
 96439  // XCHGB performs "Exchange Register/Memory with Register".
 96440  //
 96441  // Mnemonic        : XCHG
 96442  // Supported forms : (3 forms)
 96443  //
 96444  //    * XCHGB r8, r8
 96445  //    * XCHGB m8, r8
 96446  //    * XCHGB r8, m8
 96447  //
 96448  func (self *Program) XCHGB(v0 interface{}, v1 interface{}) *Instruction {
 96449      p := self.alloc("XCHGB", 2, Operands { v0, v1 })
 96450      // XCHGB r8, r8
 96451      if isReg8(v0) && isReg8(v1) {
 96452          p.domain = DomainGeneric
 96453          p.add(0, func(m *_Encoding, v []interface{}) {
 96454              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
 96455              m.emit(0x86)
 96456              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 96457          })
 96458          p.add(0, func(m *_Encoding, v []interface{}) {
 96459              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]) || isReg8REX(v[1]))
 96460              m.emit(0x86)
 96461              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 96462          })
 96463      }
 96464      // XCHGB m8, r8
 96465      if isM8(v0) && isReg8(v1) {
 96466          p.domain = DomainGeneric
 96467          p.add(0, func(m *_Encoding, v []interface{}) {
 96468              m.rexo(hcode(v[1]), addr(v[0]), isReg8REX(v[1]))
 96469              m.emit(0x86)
 96470              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 96471          })
 96472      }
 96473      // XCHGB r8, m8
 96474      if isReg8(v0) && isM8(v1) {
 96475          p.domain = DomainGeneric
 96476          p.add(0, func(m *_Encoding, v []interface{}) {
 96477              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
 96478              m.emit(0x86)
 96479              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 96480          })
 96481      }
 96482      if p.len == 0 {
 96483          panic("invalid operands for XCHGB")
 96484      }
 96485      return p
 96486  }
 96487  
 96488  // XCHGL performs "Exchange Register/Memory with Register".
 96489  //
 96490  // Mnemonic        : XCHG
 96491  // Supported forms : (5 forms)
 96492  //
 96493  //    * XCHGL r32, eax
 96494  //    * XCHGL eax, r32
 96495  //    * XCHGL r32, r32
 96496  //    * XCHGL m32, r32
 96497  //    * XCHGL r32, m32
 96498  //
 96499  func (self *Program) XCHGL(v0 interface{}, v1 interface{}) *Instruction {
 96500      p := self.alloc("XCHGL", 2, Operands { v0, v1 })
 96501      // XCHGL r32, eax
 96502      if isReg32(v0) && v1 == EAX {
 96503          p.domain = DomainGeneric
 96504          p.add(0, func(m *_Encoding, v []interface{}) {
 96505              m.rexo(0, v[0], false)
 96506              m.emit(0x90 | lcode(v[0]))
 96507          })
 96508      }
 96509      // XCHGL eax, r32
 96510      if v0 == EAX && isReg32(v1) {
 96511          p.domain = DomainGeneric
 96512          p.add(0, func(m *_Encoding, v []interface{}) {
 96513              m.rexo(0, v[1], false)
 96514              m.emit(0x90 | lcode(v[1]))
 96515          })
 96516      }
 96517      // XCHGL r32, r32
 96518      if isReg32(v0) && isReg32(v1) {
 96519          p.domain = DomainGeneric
 96520          p.add(0, func(m *_Encoding, v []interface{}) {
 96521              m.rexo(hcode(v[0]), v[1], false)
 96522              m.emit(0x87)
 96523              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 96524          })
 96525          p.add(0, func(m *_Encoding, v []interface{}) {
 96526              m.rexo(hcode(v[1]), v[0], false)
 96527              m.emit(0x87)
 96528              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 96529          })
 96530      }
 96531      // XCHGL m32, r32
 96532      if isM32(v0) && isReg32(v1) {
 96533          p.domain = DomainGeneric
 96534          p.add(0, func(m *_Encoding, v []interface{}) {
 96535              m.rexo(hcode(v[1]), addr(v[0]), false)
 96536              m.emit(0x87)
 96537              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 96538          })
 96539      }
 96540      // XCHGL r32, m32
 96541      if isReg32(v0) && isM32(v1) {
 96542          p.domain = DomainGeneric
 96543          p.add(0, func(m *_Encoding, v []interface{}) {
 96544              m.rexo(hcode(v[0]), addr(v[1]), false)
 96545              m.emit(0x87)
 96546              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 96547          })
 96548      }
 96549      if p.len == 0 {
 96550          panic("invalid operands for XCHGL")
 96551      }
 96552      return p
 96553  }
 96554  
 96555  // XCHGQ performs "Exchange Register/Memory with Register".
 96556  //
 96557  // Mnemonic        : XCHG
 96558  // Supported forms : (5 forms)
 96559  //
 96560  //    * XCHGQ r64, rax
 96561  //    * XCHGQ rax, r64
 96562  //    * XCHGQ r64, r64
 96563  //    * XCHGQ m64, r64
 96564  //    * XCHGQ r64, m64
 96565  //
 96566  func (self *Program) XCHGQ(v0 interface{}, v1 interface{}) *Instruction {
 96567      p := self.alloc("XCHGQ", 2, Operands { v0, v1 })
 96568      // XCHGQ r64, rax
 96569      if isReg64(v0) && v1 == RAX {
 96570          p.domain = DomainGeneric
 96571          p.add(0, func(m *_Encoding, v []interface{}) {
 96572              m.emit(0x48 | hcode(v[0]))
 96573              m.emit(0x90 | lcode(v[0]))
 96574          })
 96575      }
 96576      // XCHGQ rax, r64
 96577      if v0 == RAX && isReg64(v1) {
 96578          p.domain = DomainGeneric
 96579          p.add(0, func(m *_Encoding, v []interface{}) {
 96580              m.emit(0x48 | hcode(v[1]))
 96581              m.emit(0x90 | lcode(v[1]))
 96582          })
 96583      }
 96584      // XCHGQ r64, r64
 96585      if isReg64(v0) && isReg64(v1) {
 96586          p.domain = DomainGeneric
 96587          p.add(0, func(m *_Encoding, v []interface{}) {
 96588              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
 96589              m.emit(0x87)
 96590              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 96591          })
 96592          p.add(0, func(m *_Encoding, v []interface{}) {
 96593              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 96594              m.emit(0x87)
 96595              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 96596          })
 96597      }
 96598      // XCHGQ m64, r64
 96599      if isM64(v0) && isReg64(v1) {
 96600          p.domain = DomainGeneric
 96601          p.add(0, func(m *_Encoding, v []interface{}) {
 96602              m.rexm(1, hcode(v[1]), addr(v[0]))
 96603              m.emit(0x87)
 96604              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 96605          })
 96606      }
 96607      // XCHGQ r64, m64
 96608      if isReg64(v0) && isM64(v1) {
 96609          p.domain = DomainGeneric
 96610          p.add(0, func(m *_Encoding, v []interface{}) {
 96611              m.rexm(1, hcode(v[0]), addr(v[1]))
 96612              m.emit(0x87)
 96613              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 96614          })
 96615      }
 96616      if p.len == 0 {
 96617          panic("invalid operands for XCHGQ")
 96618      }
 96619      return p
 96620  }
 96621  
 96622  // XCHGW performs "Exchange Register/Memory with Register".
 96623  //
 96624  // Mnemonic        : XCHG
 96625  // Supported forms : (5 forms)
 96626  //
 96627  //    * XCHGW r16, ax
 96628  //    * XCHGW ax, r16
 96629  //    * XCHGW r16, r16
 96630  //    * XCHGW m16, r16
 96631  //    * XCHGW r16, m16
 96632  //
 96633  func (self *Program) XCHGW(v0 interface{}, v1 interface{}) *Instruction {
 96634      p := self.alloc("XCHGW", 2, Operands { v0, v1 })
 96635      // XCHGW r16, ax
 96636      if isReg16(v0) && v1 == AX {
 96637          p.domain = DomainGeneric
 96638          p.add(0, func(m *_Encoding, v []interface{}) {
 96639              m.emit(0x66)
 96640              m.rexo(0, v[0], false)
 96641              m.emit(0x90 | lcode(v[0]))
 96642          })
 96643      }
 96644      // XCHGW ax, r16
 96645      if v0 == AX && isReg16(v1) {
 96646          p.domain = DomainGeneric
 96647          p.add(0, func(m *_Encoding, v []interface{}) {
 96648              m.emit(0x66)
 96649              m.rexo(0, v[1], false)
 96650              m.emit(0x90 | lcode(v[1]))
 96651          })
 96652      }
 96653      // XCHGW r16, r16
 96654      if isReg16(v0) && isReg16(v1) {
 96655          p.domain = DomainGeneric
 96656          p.add(0, func(m *_Encoding, v []interface{}) {
 96657              m.emit(0x66)
 96658              m.rexo(hcode(v[0]), v[1], false)
 96659              m.emit(0x87)
 96660              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 96661          })
 96662          p.add(0, func(m *_Encoding, v []interface{}) {
 96663              m.emit(0x66)
 96664              m.rexo(hcode(v[1]), v[0], false)
 96665              m.emit(0x87)
 96666              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 96667          })
 96668      }
 96669      // XCHGW m16, r16
 96670      if isM16(v0) && isReg16(v1) {
 96671          p.domain = DomainGeneric
 96672          p.add(0, func(m *_Encoding, v []interface{}) {
 96673              m.emit(0x66)
 96674              m.rexo(hcode(v[1]), addr(v[0]), false)
 96675              m.emit(0x87)
 96676              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 96677          })
 96678      }
 96679      // XCHGW r16, m16
 96680      if isReg16(v0) && isM16(v1) {
 96681          p.domain = DomainGeneric
 96682          p.add(0, func(m *_Encoding, v []interface{}) {
 96683              m.emit(0x66)
 96684              m.rexo(hcode(v[0]), addr(v[1]), false)
 96685              m.emit(0x87)
 96686              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 96687          })
 96688      }
 96689      if p.len == 0 {
 96690          panic("invalid operands for XCHGW")
 96691      }
 96692      return p
 96693  }
 96694  
 96695  // XGETBV performs "Get Value of Extended Control Register".
 96696  //
 96697  // Mnemonic        : XGETBV
 96698  // Supported forms : (1 form)
 96699  //
 96700  //    * XGETBV
 96701  //
 96702  func (self *Program) XGETBV() *Instruction {
 96703      p := self.alloc("XGETBV", 0, Operands {  })
 96704      // XGETBV
 96705      p.domain = DomainGeneric
 96706      p.add(0, func(m *_Encoding, v []interface{}) {
 96707          m.emit(0x0f)
 96708          m.emit(0x01)
 96709          m.emit(0xd0)
 96710      })
 96711      return p
 96712  }
 96713  
 96714  // XLATB performs "Table Look-up Translation".
 96715  //
 96716  // Mnemonic        : XLATB
 96717  // Supported forms : (2 forms)
 96718  //
 96719  //    * XLATB
 96720  //    * XLATB
 96721  //
 96722  func (self *Program) XLATB() *Instruction {
 96723      p := self.alloc("XLATB", 0, Operands {  })
 96724      // XLATB
 96725      p.domain = DomainMisc
 96726      p.add(0, func(m *_Encoding, v []interface{}) {
 96727          m.emit(0xd7)
 96728      })
 96729      // XLATB
 96730      p.domain = DomainMisc
 96731      p.add(0, func(m *_Encoding, v []interface{}) {
 96732          m.emit(0x48)
 96733          m.emit(0xd7)
 96734      })
 96735      return p
 96736  }
 96737  
 96738  // XORB performs "Logical Exclusive OR".
 96739  //
 96740  // Mnemonic        : XOR
 96741  // Supported forms : (6 forms)
 96742  //
 96743  //    * XORB imm8, al
 96744  //    * XORB imm8, r8
 96745  //    * XORB r8, r8
 96746  //    * XORB m8, r8
 96747  //    * XORB imm8, m8
 96748  //    * XORB r8, m8
 96749  //
 96750  func (self *Program) XORB(v0 interface{}, v1 interface{}) *Instruction {
 96751      p := self.alloc("XORB", 2, Operands { v0, v1 })
 96752      // XORB imm8, al
 96753      if isImm8(v0) && v1 == AL {
 96754          p.domain = DomainGeneric
 96755          p.add(0, func(m *_Encoding, v []interface{}) {
 96756              m.emit(0x34)
 96757              m.imm1(toImmAny(v[0]))
 96758          })
 96759      }
 96760      // XORB imm8, r8
 96761      if isImm8(v0) && isReg8(v1) {
 96762          p.domain = DomainGeneric
 96763          p.add(0, func(m *_Encoding, v []interface{}) {
 96764              m.rexo(0, v[1], isReg8REX(v[1]))
 96765              m.emit(0x80)
 96766              m.emit(0xf0 | lcode(v[1]))
 96767              m.imm1(toImmAny(v[0]))
 96768          })
 96769      }
 96770      // XORB r8, r8
 96771      if isReg8(v0) && isReg8(v1) {
 96772          p.domain = DomainGeneric
 96773          p.add(0, func(m *_Encoding, v []interface{}) {
 96774              m.rexo(hcode(v[0]), v[1], isReg8REX(v[0]) || isReg8REX(v[1]))
 96775              m.emit(0x30)
 96776              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 96777          })
 96778          p.add(0, func(m *_Encoding, v []interface{}) {
 96779              m.rexo(hcode(v[1]), v[0], isReg8REX(v[0]) || isReg8REX(v[1]))
 96780              m.emit(0x32)
 96781              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 96782          })
 96783      }
 96784      // XORB m8, r8
 96785      if isM8(v0) && isReg8(v1) {
 96786          p.domain = DomainGeneric
 96787          p.add(0, func(m *_Encoding, v []interface{}) {
 96788              m.rexo(hcode(v[1]), addr(v[0]), isReg8REX(v[1]))
 96789              m.emit(0x32)
 96790              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 96791          })
 96792      }
 96793      // XORB imm8, m8
 96794      if isImm8(v0) && isM8(v1) {
 96795          p.domain = DomainGeneric
 96796          p.add(0, func(m *_Encoding, v []interface{}) {
 96797              m.rexo(0, addr(v[1]), false)
 96798              m.emit(0x80)
 96799              m.mrsd(6, addr(v[1]), 1)
 96800              m.imm1(toImmAny(v[0]))
 96801          })
 96802      }
 96803      // XORB r8, m8
 96804      if isReg8(v0) && isM8(v1) {
 96805          p.domain = DomainGeneric
 96806          p.add(0, func(m *_Encoding, v []interface{}) {
 96807              m.rexo(hcode(v[0]), addr(v[1]), isReg8REX(v[0]))
 96808              m.emit(0x30)
 96809              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 96810          })
 96811      }
 96812      if p.len == 0 {
 96813          panic("invalid operands for XORB")
 96814      }
 96815      return p
 96816  }
 96817  
 96818  // XORL performs "Logical Exclusive OR".
 96819  //
 96820  // Mnemonic        : XOR
 96821  // Supported forms : (8 forms)
 96822  //
 96823  //    * XORL imm32, eax
 96824  //    * XORL imm8, r32
 96825  //    * XORL imm32, r32
 96826  //    * XORL r32, r32
 96827  //    * XORL m32, r32
 96828  //    * XORL imm8, m32
 96829  //    * XORL imm32, m32
 96830  //    * XORL r32, m32
 96831  //
 96832  func (self *Program) XORL(v0 interface{}, v1 interface{}) *Instruction {
 96833      p := self.alloc("XORL", 2, Operands { v0, v1 })
 96834      // XORL imm32, eax
 96835      if isImm32(v0) && v1 == EAX {
 96836          p.domain = DomainGeneric
 96837          p.add(0, func(m *_Encoding, v []interface{}) {
 96838              m.emit(0x35)
 96839              m.imm4(toImmAny(v[0]))
 96840          })
 96841      }
 96842      // XORL imm8, r32
 96843      if isImm8Ext(v0, 4) && isReg32(v1) {
 96844          p.domain = DomainGeneric
 96845          p.add(0, func(m *_Encoding, v []interface{}) {
 96846              m.rexo(0, v[1], false)
 96847              m.emit(0x83)
 96848              m.emit(0xf0 | lcode(v[1]))
 96849              m.imm1(toImmAny(v[0]))
 96850          })
 96851      }
 96852      // XORL imm32, r32
 96853      if isImm32(v0) && isReg32(v1) {
 96854          p.domain = DomainGeneric
 96855          p.add(0, func(m *_Encoding, v []interface{}) {
 96856              m.rexo(0, v[1], false)
 96857              m.emit(0x81)
 96858              m.emit(0xf0 | lcode(v[1]))
 96859              m.imm4(toImmAny(v[0]))
 96860          })
 96861      }
 96862      // XORL r32, r32
 96863      if isReg32(v0) && isReg32(v1) {
 96864          p.domain = DomainGeneric
 96865          p.add(0, func(m *_Encoding, v []interface{}) {
 96866              m.rexo(hcode(v[0]), v[1], false)
 96867              m.emit(0x31)
 96868              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 96869          })
 96870          p.add(0, func(m *_Encoding, v []interface{}) {
 96871              m.rexo(hcode(v[1]), v[0], false)
 96872              m.emit(0x33)
 96873              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 96874          })
 96875      }
 96876      // XORL m32, r32
 96877      if isM32(v0) && isReg32(v1) {
 96878          p.domain = DomainGeneric
 96879          p.add(0, func(m *_Encoding, v []interface{}) {
 96880              m.rexo(hcode(v[1]), addr(v[0]), false)
 96881              m.emit(0x33)
 96882              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 96883          })
 96884      }
 96885      // XORL imm8, m32
 96886      if isImm8Ext(v0, 4) && isM32(v1) {
 96887          p.domain = DomainGeneric
 96888          p.add(0, func(m *_Encoding, v []interface{}) {
 96889              m.rexo(0, addr(v[1]), false)
 96890              m.emit(0x83)
 96891              m.mrsd(6, addr(v[1]), 1)
 96892              m.imm1(toImmAny(v[0]))
 96893          })
 96894      }
 96895      // XORL imm32, m32
 96896      if isImm32(v0) && isM32(v1) {
 96897          p.domain = DomainGeneric
 96898          p.add(0, func(m *_Encoding, v []interface{}) {
 96899              m.rexo(0, addr(v[1]), false)
 96900              m.emit(0x81)
 96901              m.mrsd(6, addr(v[1]), 1)
 96902              m.imm4(toImmAny(v[0]))
 96903          })
 96904      }
 96905      // XORL r32, m32
 96906      if isReg32(v0) && isM32(v1) {
 96907          p.domain = DomainGeneric
 96908          p.add(0, func(m *_Encoding, v []interface{}) {
 96909              m.rexo(hcode(v[0]), addr(v[1]), false)
 96910              m.emit(0x31)
 96911              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 96912          })
 96913      }
 96914      if p.len == 0 {
 96915          panic("invalid operands for XORL")
 96916      }
 96917      return p
 96918  }
 96919  
 96920  // XORPD performs "Bitwise Logical XOR for Double-Precision Floating-Point Values".
 96921  //
 96922  // Mnemonic        : XORPD
 96923  // Supported forms : (2 forms)
 96924  //
 96925  //    * XORPD xmm, xmm     [SSE2]
 96926  //    * XORPD m128, xmm    [SSE2]
 96927  //
 96928  func (self *Program) XORPD(v0 interface{}, v1 interface{}) *Instruction {
 96929      p := self.alloc("XORPD", 2, Operands { v0, v1 })
 96930      // XORPD xmm, xmm
 96931      if isXMM(v0) && isXMM(v1) {
 96932          self.require(ISA_SSE2)
 96933          p.domain = DomainMMXSSE
 96934          p.add(0, func(m *_Encoding, v []interface{}) {
 96935              m.emit(0x66)
 96936              m.rexo(hcode(v[1]), v[0], false)
 96937              m.emit(0x0f)
 96938              m.emit(0x57)
 96939              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 96940          })
 96941      }
 96942      // XORPD m128, xmm
 96943      if isM128(v0) && isXMM(v1) {
 96944          self.require(ISA_SSE2)
 96945          p.domain = DomainMMXSSE
 96946          p.add(0, func(m *_Encoding, v []interface{}) {
 96947              m.emit(0x66)
 96948              m.rexo(hcode(v[1]), addr(v[0]), false)
 96949              m.emit(0x0f)
 96950              m.emit(0x57)
 96951              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 96952          })
 96953      }
 96954      if p.len == 0 {
 96955          panic("invalid operands for XORPD")
 96956      }
 96957      return p
 96958  }
 96959  
 96960  // XORPS performs "Bitwise Logical XOR for Single-Precision Floating-Point Values".
 96961  //
 96962  // Mnemonic        : XORPS
 96963  // Supported forms : (2 forms)
 96964  //
 96965  //    * XORPS xmm, xmm     [SSE]
 96966  //    * XORPS m128, xmm    [SSE]
 96967  //
 96968  func (self *Program) XORPS(v0 interface{}, v1 interface{}) *Instruction {
 96969      p := self.alloc("XORPS", 2, Operands { v0, v1 })
 96970      // XORPS xmm, xmm
 96971      if isXMM(v0) && isXMM(v1) {
 96972          self.require(ISA_SSE)
 96973          p.domain = DomainMMXSSE
 96974          p.add(0, func(m *_Encoding, v []interface{}) {
 96975              m.rexo(hcode(v[1]), v[0], false)
 96976              m.emit(0x0f)
 96977              m.emit(0x57)
 96978              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 96979          })
 96980      }
 96981      // XORPS m128, xmm
 96982      if isM128(v0) && isXMM(v1) {
 96983          self.require(ISA_SSE)
 96984          p.domain = DomainMMXSSE
 96985          p.add(0, func(m *_Encoding, v []interface{}) {
 96986              m.rexo(hcode(v[1]), addr(v[0]), false)
 96987              m.emit(0x0f)
 96988              m.emit(0x57)
 96989              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 96990          })
 96991      }
 96992      if p.len == 0 {
 96993          panic("invalid operands for XORPS")
 96994      }
 96995      return p
 96996  }
 96997  
 96998  // XORQ performs "Logical Exclusive OR".
 96999  //
 97000  // Mnemonic        : XOR
 97001  // Supported forms : (8 forms)
 97002  //
 97003  //    * XORQ imm32, rax
 97004  //    * XORQ imm8, r64
 97005  //    * XORQ imm32, r64
 97006  //    * XORQ r64, r64
 97007  //    * XORQ m64, r64
 97008  //    * XORQ imm8, m64
 97009  //    * XORQ imm32, m64
 97010  //    * XORQ r64, m64
 97011  //
 97012  func (self *Program) XORQ(v0 interface{}, v1 interface{}) *Instruction {
 97013      p := self.alloc("XORQ", 2, Operands { v0, v1 })
 97014      // XORQ imm32, rax
 97015      if isImm32(v0) && v1 == RAX {
 97016          p.domain = DomainGeneric
 97017          p.add(0, func(m *_Encoding, v []interface{}) {
 97018              m.emit(0x48)
 97019              m.emit(0x35)
 97020              m.imm4(toImmAny(v[0]))
 97021          })
 97022      }
 97023      // XORQ imm8, r64
 97024      if isImm8Ext(v0, 8) && isReg64(v1) {
 97025          p.domain = DomainGeneric
 97026          p.add(0, func(m *_Encoding, v []interface{}) {
 97027              m.emit(0x48 | hcode(v[1]))
 97028              m.emit(0x83)
 97029              m.emit(0xf0 | lcode(v[1]))
 97030              m.imm1(toImmAny(v[0]))
 97031          })
 97032      }
 97033      // XORQ imm32, r64
 97034      if isImm32Ext(v0, 8) && isReg64(v1) {
 97035          p.domain = DomainGeneric
 97036          p.add(0, func(m *_Encoding, v []interface{}) {
 97037              m.emit(0x48 | hcode(v[1]))
 97038              m.emit(0x81)
 97039              m.emit(0xf0 | lcode(v[1]))
 97040              m.imm4(toImmAny(v[0]))
 97041          })
 97042      }
 97043      // XORQ r64, r64
 97044      if isReg64(v0) && isReg64(v1) {
 97045          p.domain = DomainGeneric
 97046          p.add(0, func(m *_Encoding, v []interface{}) {
 97047              m.emit(0x48 | hcode(v[0]) << 2 | hcode(v[1]))
 97048              m.emit(0x31)
 97049              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 97050          })
 97051          p.add(0, func(m *_Encoding, v []interface{}) {
 97052              m.emit(0x48 | hcode(v[1]) << 2 | hcode(v[0]))
 97053              m.emit(0x33)
 97054              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 97055          })
 97056      }
 97057      // XORQ m64, r64
 97058      if isM64(v0) && isReg64(v1) {
 97059          p.domain = DomainGeneric
 97060          p.add(0, func(m *_Encoding, v []interface{}) {
 97061              m.rexm(1, hcode(v[1]), addr(v[0]))
 97062              m.emit(0x33)
 97063              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 97064          })
 97065      }
 97066      // XORQ imm8, m64
 97067      if isImm8Ext(v0, 8) && isM64(v1) {
 97068          p.domain = DomainGeneric
 97069          p.add(0, func(m *_Encoding, v []interface{}) {
 97070              m.rexm(1, 0, addr(v[1]))
 97071              m.emit(0x83)
 97072              m.mrsd(6, addr(v[1]), 1)
 97073              m.imm1(toImmAny(v[0]))
 97074          })
 97075      }
 97076      // XORQ imm32, m64
 97077      if isImm32Ext(v0, 8) && isM64(v1) {
 97078          p.domain = DomainGeneric
 97079          p.add(0, func(m *_Encoding, v []interface{}) {
 97080              m.rexm(1, 0, addr(v[1]))
 97081              m.emit(0x81)
 97082              m.mrsd(6, addr(v[1]), 1)
 97083              m.imm4(toImmAny(v[0]))
 97084          })
 97085      }
 97086      // XORQ r64, m64
 97087      if isReg64(v0) && isM64(v1) {
 97088          p.domain = DomainGeneric
 97089          p.add(0, func(m *_Encoding, v []interface{}) {
 97090              m.rexm(1, hcode(v[0]), addr(v[1]))
 97091              m.emit(0x31)
 97092              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 97093          })
 97094      }
 97095      if p.len == 0 {
 97096          panic("invalid operands for XORQ")
 97097      }
 97098      return p
 97099  }
 97100  
 97101  // XORW performs "Logical Exclusive OR".
 97102  //
 97103  // Mnemonic        : XOR
 97104  // Supported forms : (8 forms)
 97105  //
 97106  //    * XORW imm16, ax
 97107  //    * XORW imm8, r16
 97108  //    * XORW imm16, r16
 97109  //    * XORW r16, r16
 97110  //    * XORW m16, r16
 97111  //    * XORW imm8, m16
 97112  //    * XORW imm16, m16
 97113  //    * XORW r16, m16
 97114  //
 97115  func (self *Program) XORW(v0 interface{}, v1 interface{}) *Instruction {
 97116      p := self.alloc("XORW", 2, Operands { v0, v1 })
 97117      // XORW imm16, ax
 97118      if isImm16(v0) && v1 == AX {
 97119          p.domain = DomainGeneric
 97120          p.add(0, func(m *_Encoding, v []interface{}) {
 97121              m.emit(0x66)
 97122              m.emit(0x35)
 97123              m.imm2(toImmAny(v[0]))
 97124          })
 97125      }
 97126      // XORW imm8, r16
 97127      if isImm8Ext(v0, 2) && isReg16(v1) {
 97128          p.domain = DomainGeneric
 97129          p.add(0, func(m *_Encoding, v []interface{}) {
 97130              m.emit(0x66)
 97131              m.rexo(0, v[1], false)
 97132              m.emit(0x83)
 97133              m.emit(0xf0 | lcode(v[1]))
 97134              m.imm1(toImmAny(v[0]))
 97135          })
 97136      }
 97137      // XORW imm16, r16
 97138      if isImm16(v0) && isReg16(v1) {
 97139          p.domain = DomainGeneric
 97140          p.add(0, func(m *_Encoding, v []interface{}) {
 97141              m.emit(0x66)
 97142              m.rexo(0, v[1], false)
 97143              m.emit(0x81)
 97144              m.emit(0xf0 | lcode(v[1]))
 97145              m.imm2(toImmAny(v[0]))
 97146          })
 97147      }
 97148      // XORW r16, r16
 97149      if isReg16(v0) && isReg16(v1) {
 97150          p.domain = DomainGeneric
 97151          p.add(0, func(m *_Encoding, v []interface{}) {
 97152              m.emit(0x66)
 97153              m.rexo(hcode(v[0]), v[1], false)
 97154              m.emit(0x31)
 97155              m.emit(0xc0 | lcode(v[0]) << 3 | lcode(v[1]))
 97156          })
 97157          p.add(0, func(m *_Encoding, v []interface{}) {
 97158              m.emit(0x66)
 97159              m.rexo(hcode(v[1]), v[0], false)
 97160              m.emit(0x33)
 97161              m.emit(0xc0 | lcode(v[1]) << 3 | lcode(v[0]))
 97162          })
 97163      }
 97164      // XORW m16, r16
 97165      if isM16(v0) && isReg16(v1) {
 97166          p.domain = DomainGeneric
 97167          p.add(0, func(m *_Encoding, v []interface{}) {
 97168              m.emit(0x66)
 97169              m.rexo(hcode(v[1]), addr(v[0]), false)
 97170              m.emit(0x33)
 97171              m.mrsd(lcode(v[1]), addr(v[0]), 1)
 97172          })
 97173      }
 97174      // XORW imm8, m16
 97175      if isImm8Ext(v0, 2) && isM16(v1) {
 97176          p.domain = DomainGeneric
 97177          p.add(0, func(m *_Encoding, v []interface{}) {
 97178              m.emit(0x66)
 97179              m.rexo(0, addr(v[1]), false)
 97180              m.emit(0x83)
 97181              m.mrsd(6, addr(v[1]), 1)
 97182              m.imm1(toImmAny(v[0]))
 97183          })
 97184      }
 97185      // XORW imm16, m16
 97186      if isImm16(v0) && isM16(v1) {
 97187          p.domain = DomainGeneric
 97188          p.add(0, func(m *_Encoding, v []interface{}) {
 97189              m.emit(0x66)
 97190              m.rexo(0, addr(v[1]), false)
 97191              m.emit(0x81)
 97192              m.mrsd(6, addr(v[1]), 1)
 97193              m.imm2(toImmAny(v[0]))
 97194          })
 97195      }
 97196      // XORW r16, m16
 97197      if isReg16(v0) && isM16(v1) {
 97198          p.domain = DomainGeneric
 97199          p.add(0, func(m *_Encoding, v []interface{}) {
 97200              m.emit(0x66)
 97201              m.rexo(hcode(v[0]), addr(v[1]), false)
 97202              m.emit(0x31)
 97203              m.mrsd(lcode(v[0]), addr(v[1]), 1)
 97204          })
 97205      }
 97206      if p.len == 0 {
 97207          panic("invalid operands for XORW")
 97208      }
 97209      return p
 97210  }