github.com/qichengzx/mattermost-server@v4.5.1-0.20180604164826-2c75247c97d0+incompatible/plugin/rpcplugin/sandbox/seccomp_linux.go (about) 1 // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. 2 // See License.txt for license information. 3 4 package sandbox 5 6 import ( 7 "syscall" 8 "unsafe" 9 10 "github.com/pkg/errors" 11 "golang.org/x/net/bpf" 12 "golang.org/x/sys/unix" 13 ) 14 15 const ( 16 SECCOMP_RET_ALLOW = 0x7fff0000 17 SECCOMP_RET_ERRNO = 0x00050000 18 ) 19 20 const ( 21 EM_X86_64 = 62 22 23 __AUDIT_ARCH_64BIT = 0x80000000 24 __AUDIT_ARCH_LE = 0x40000000 25 26 AUDIT_ARCH_X86_64 = EM_X86_64 | __AUDIT_ARCH_64BIT | __AUDIT_ARCH_LE 27 28 nrSize = 4 29 archOffset = nrSize 30 ipOffset = archOffset + 4 31 argsOffset = ipOffset + 8 32 ) 33 34 type SeccompCondition interface { 35 Filter(littleEndian bool, skipFalseSentinel uint8) []bpf.Instruction 36 } 37 38 func seccompArgLowWord(arg int, littleEndian bool) uint32 { 39 offset := uint32(argsOffset + arg*8) 40 if !littleEndian { 41 offset += 4 42 } 43 return offset 44 } 45 46 func seccompArgHighWord(arg int, littleEndian bool) uint32 { 47 offset := uint32(argsOffset + arg*8) 48 if littleEndian { 49 offset += 4 50 } 51 return offset 52 } 53 54 type SeccompArgHasNoBits struct { 55 Arg int 56 Mask uint64 57 } 58 59 func (c SeccompArgHasNoBits) Filter(littleEndian bool, skipFalseSentinel uint8) []bpf.Instruction { 60 return []bpf.Instruction{ 61 bpf.LoadAbsolute{Off: seccompArgHighWord(c.Arg, littleEndian), Size: 4}, 62 bpf.JumpIf{Cond: bpf.JumpBitsSet, Val: uint32(c.Mask >> 32), SkipTrue: skipFalseSentinel}, 63 bpf.LoadAbsolute{Off: seccompArgLowWord(c.Arg, littleEndian), Size: 4}, 64 bpf.JumpIf{Cond: bpf.JumpBitsSet, Val: uint32(c.Mask), SkipTrue: skipFalseSentinel}, 65 } 66 } 67 68 type SeccompArgHasAnyBit struct { 69 Arg int 70 Mask uint64 71 } 72 73 func (c SeccompArgHasAnyBit) Filter(littleEndian bool, skipFalseSentinel uint8) []bpf.Instruction { 74 return []bpf.Instruction{ 75 bpf.LoadAbsolute{Off: seccompArgHighWord(c.Arg, littleEndian), Size: 4}, 76 bpf.JumpIf{Cond: bpf.JumpBitsSet, Val: uint32(c.Mask >> 32), SkipTrue: 2}, 77 bpf.LoadAbsolute{Off: seccompArgLowWord(c.Arg, littleEndian), Size: 4}, 78 bpf.JumpIf{Cond: bpf.JumpBitsSet, Val: uint32(c.Mask), SkipFalse: skipFalseSentinel}, 79 } 80 } 81 82 type SeccompArgEquals struct { 83 Arg int 84 Value uint64 85 } 86 87 func (c SeccompArgEquals) Filter(littleEndian bool, skipFalseSentinel uint8) []bpf.Instruction { 88 return []bpf.Instruction{ 89 bpf.LoadAbsolute{Off: seccompArgHighWord(c.Arg, littleEndian), Size: 4}, 90 bpf.JumpIf{Cond: bpf.JumpEqual, Val: uint32(c.Value >> 32), SkipFalse: skipFalseSentinel}, 91 bpf.LoadAbsolute{Off: seccompArgLowWord(c.Arg, littleEndian), Size: 4}, 92 bpf.JumpIf{Cond: bpf.JumpEqual, Val: uint32(c.Value), SkipFalse: skipFalseSentinel}, 93 } 94 } 95 96 type SeccompConditions struct { 97 All []SeccompCondition 98 } 99 100 type SeccompSyscall struct { 101 Syscall uint32 102 Any []SeccompConditions 103 } 104 105 func SeccompFilter(arch uint32, allowedSyscalls []SeccompSyscall) (filter []bpf.Instruction) { 106 filter = append(filter, 107 bpf.LoadAbsolute{Off: archOffset, Size: 4}, 108 bpf.JumpIf{Cond: bpf.JumpEqual, Val: arch, SkipTrue: 1}, 109 bpf.RetConstant{Val: uint32(SECCOMP_RET_ERRNO | unix.EPERM)}, 110 ) 111 112 filter = append(filter, bpf.LoadAbsolute{Off: 0, Size: nrSize}) 113 for _, s := range allowedSyscalls { 114 if s.Any != nil { 115 syscallStart := len(filter) 116 filter = append(filter, bpf.Instruction(nil)) 117 for _, cs := range s.Any { 118 anyStart := len(filter) 119 for _, c := range cs.All { 120 filter = append(filter, c.Filter((arch&__AUDIT_ARCH_LE) != 0, 255)...) 121 } 122 filter = append(filter, bpf.RetConstant{Val: SECCOMP_RET_ALLOW}) 123 for i := anyStart; i < len(filter); i++ { 124 if jump, ok := filter[i].(bpf.JumpIf); ok { 125 if len(filter)-i-1 > 255 { 126 panic("condition too long") 127 } 128 if jump.SkipFalse == 255 { 129 jump.SkipFalse = uint8(len(filter) - i - 1) 130 } 131 if jump.SkipTrue == 255 { 132 jump.SkipTrue = uint8(len(filter) - i - 1) 133 } 134 filter[i] = jump 135 } 136 } 137 } 138 filter = append(filter, bpf.RetConstant{Val: uint32(SECCOMP_RET_ERRNO | unix.EPERM)}) 139 if len(filter)-syscallStart-1 > 255 { 140 panic("conditions too long") 141 } 142 filter[syscallStart] = bpf.JumpIf{Cond: bpf.JumpEqual, Val: uint32(s.Syscall), SkipFalse: uint8(len(filter) - syscallStart - 1)} 143 } else { 144 filter = append(filter, 145 bpf.JumpIf{Cond: bpf.JumpEqual, Val: uint32(s.Syscall), SkipFalse: 1}, 146 bpf.RetConstant{Val: SECCOMP_RET_ALLOW}, 147 ) 148 } 149 } 150 151 return append(filter, bpf.RetConstant{Val: uint32(SECCOMP_RET_ERRNO | unix.EPERM)}) 152 } 153 154 func EnableSeccompFilter(filter []bpf.Instruction) error { 155 assembled, err := bpf.Assemble(filter) 156 if err != nil { 157 return errors.Wrapf(err, "unable to assemble filter") 158 } 159 160 sockFilter := make([]unix.SockFilter, len(filter)) 161 for i, instruction := range assembled { 162 sockFilter[i].Code = instruction.Op 163 sockFilter[i].Jt = instruction.Jt 164 sockFilter[i].Jf = instruction.Jf 165 sockFilter[i].K = instruction.K 166 } 167 168 prog := unix.SockFprog{ 169 Len: uint16(len(sockFilter)), 170 Filter: &sockFilter[0], 171 } 172 173 if _, _, errno := syscall.Syscall(syscall.SYS_PRCTL, unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, uintptr(unsafe.Pointer(&prog))); errno != 0 { 174 return errors.Wrapf(syscall.Errno(errno), "syscall error") 175 } 176 177 return nil 178 }