github.com/usbarmory/tamago@v0.0.0-20240508072735-8612bbe1e454/soc/nxp/enet/dma.go (about) 1 // NXP 10/100-Mbps Ethernet MAC (ENET) 2 // https://github.com/usbarmory/tamago 3 // 4 // Copyright (c) WithSecure Corporation 5 // https://foundry.withsecure.com 6 // 7 // Use of this source code is governed by the license 8 // that can be found in the LICENSE file. 9 10 package enet 11 12 import ( 13 "bytes" 14 "encoding/binary" 15 16 "github.com/usbarmory/tamago/dma" 17 "github.com/usbarmory/tamago/internal/reg" 18 ) 19 20 const ( 21 MTU = 1518 22 minFrameSizeBytes = 42 23 defaultRingSize = 16 24 bufferAlign = 64 25 ) 26 27 // Common buffer descriptor fields 28 const ( 29 BD_ST_W = 13 // Wrap 30 BD_ST_L = 11 // Last 31 ) 32 33 // p1014, Table 22-35. Receive buffer descriptor field definitions, IMX6ULLRM 34 const ( 35 BD_RX_ST_E = 15 // Empty 36 BD_RX_ST_LG = 5 // Frame length violation 37 BD_RX_ST_NO = 4 // Non-octet aligned frame 38 BD_RX_ST_CR = 2 // CRC or frame error 39 BD_RX_ST_OV = 1 // Overrun 40 BD_RX_ST_TR = 0 // Frame truncated 41 42 frameErrorMask = 1<<BD_RX_ST_CR | 1<<BD_RX_ST_LG | 1<<BD_RX_ST_NO | 1<<BD_RX_ST_OV | 1<<BD_RX_ST_TR 43 ) 44 45 // p1017, Table 22-37. Enhanced transmit buffer descriptor field definitions, IMX6ULLRM 46 const ( 47 BD_TX_ST_R = 15 // Ready 48 BD_TX_ST_TC = 10 // Transmit CRC 49 ) 50 51 // bufferDescriptor represents a legacy FEC receive/transmit buffer descriptor 52 // (p1012, 22.6.13 Legacy buffer descriptors, IMX6ULLRM). 53 type bufferDescriptor struct { 54 Length uint16 55 Status uint16 56 Addr uint32 57 58 stats *Stats 59 60 // DMA buffers 61 desc []byte 62 data []byte 63 } 64 65 func (bd *bufferDescriptor) Bytes() []byte { 66 buf := new(bytes.Buffer) 67 68 binary.Write(buf, binary.LittleEndian, bd.Length) 69 binary.Write(buf, binary.LittleEndian, bd.Status) 70 binary.Write(buf, binary.LittleEndian, bd.Addr) 71 72 return buf.Bytes() 73 } 74 75 func (bd *bufferDescriptor) Data() (buf []byte) { 76 buf = make([]byte, bd.Length-4) 77 copy(buf, bd.data) 78 return 79 } 80 81 func (bd *bufferDescriptor) Valid() bool { 82 s := uint32(bd.Status) 83 84 switch { 85 case s&(1<<BD_ST_L) == 0: 86 return false 87 case s&frameErrorMask != 0: 88 if (s>>BD_RX_ST_OV)&1 == 1 { 89 bd.stats.Overrun += 1 90 } else { 91 bd.stats.FrameLengthViolation += (s >> BD_RX_ST_LG) & 1 92 bd.stats.NonOctetAlignedFrame += (s >> BD_RX_ST_NO) & 1 93 bd.stats.CRCOrFrameError += (s >> BD_RX_ST_CR) & 1 94 } 95 96 return false 97 case bd.Length < minFrameSizeBytes: 98 bd.stats.FrameTooSmall += 1 99 return false 100 case bd.Length > MTU: 101 bd.stats.FrameTooLarge += 1 102 return false 103 } 104 105 return true 106 } 107 108 type bufferDescriptorRing struct { 109 bds []*bufferDescriptor 110 index int 111 size int 112 stats *Stats 113 } 114 115 func (ring *bufferDescriptorRing) init(rx bool, n int, s *Stats) uint32 { 116 ring.bds = make([]*bufferDescriptor, n) 117 ring.size = n 118 119 // To avoid excessive DMA region fragmentation, a single allocation 120 // reserves all descriptors and data pointers which are slices for each 121 // entry. 122 123 descSize := len((&bufferDescriptor{}).Bytes()) 124 ptr, desc := dma.Reserve(n*descSize, bufferAlign) 125 126 dataSize := MTU + (bufferAlign - (MTU % bufferAlign)) 127 addr, data := dma.Reserve(n*dataSize, bufferAlign) 128 129 for i := 0; i < n; i++ { 130 off := dataSize * i 131 132 bd := &bufferDescriptor{ 133 Addr: uint32(addr) + uint32(off), 134 data: data[off : off+dataSize], 135 stats: s, 136 } 137 138 if rx { 139 bd.Status |= 1 << BD_RX_ST_E 140 } 141 142 if i == n-1 { 143 bd.Status |= 1 << BD_ST_W 144 } 145 146 off = descSize * i 147 bd.desc = desc[off : off+descSize] 148 copy(bd.desc, bd.Bytes()) 149 150 ring.bds[i] = bd 151 } 152 153 return uint32(ptr) 154 } 155 156 func (ring *bufferDescriptorRing) next() (wrap bool) { 157 wrap = ring.index == (ring.size - 1) 158 159 if wrap { 160 ring.index = 0 161 } else { 162 ring.index += 1 163 } 164 165 return 166 } 167 168 func (ring *bufferDescriptorRing) pop() (data []byte) { 169 bd := ring.bds[ring.index] 170 171 bd.Length = uint16(bd.desc[0]) 172 bd.Length |= uint16(bd.desc[1]) << 8 173 174 bd.Status = uint16(bd.desc[2]) 175 bd.Status |= uint16(bd.desc[3]) << 8 176 177 if bd.Status&(1<<BD_RX_ST_E) != 0 { 178 return 179 } 180 181 ring.next() 182 183 if bd.Valid() { 184 data = bd.Data() 185 } 186 187 // set empty 188 bd.desc[3] |= (1 << BD_RX_ST_E) >> 8 189 190 return 191 } 192 193 func (ring *bufferDescriptorRing) push(data []byte) { 194 bd := ring.bds[ring.index] 195 196 if uint16(bd.desc[3]<<8)&(1<<BD_TX_ST_R) != 0 { 197 print("enet: frame not sent\n") 198 } 199 200 bd.Length = uint16(len(data)) 201 bd.Status = (1 << BD_ST_L) | (1 << BD_TX_ST_TC) 202 203 bd.desc[0] = byte(bd.Length & 0xff) 204 bd.desc[1] = byte((bd.Length & 0xff00) >> 8) 205 206 bd.desc[2] = byte((bd.Status & 0xff)) 207 bd.desc[3] = byte((bd.Status & 0xff00) >> 8) 208 209 copy(bd.data, data) 210 211 if ring.next() { 212 bd.desc[3] |= (1 << BD_ST_W) >> 8 213 } 214 215 // set ready 216 bd.desc[3] |= (1 << BD_TX_ST_R) >> 8 217 } 218 219 // Rx receives a single Ethernet frame, excluding the checksum, from the MAC 220 // controller ring buffer. 221 func (hw *ENET) Rx() (buf []byte) { 222 hw.Lock() 223 defer hw.Unlock() 224 225 buf = hw.rx.pop() 226 reg.Set(hw.rdar, RDAR_ACTIVE) 227 228 return 229 } 230 231 // Tx transmits a single Ethernet frame, the checksum is appended 232 // automatically and must not be included. 233 func (hw *ENET) Tx(buf []byte) { 234 hw.Lock() 235 defer hw.Unlock() 236 237 if len(buf) > MTU { 238 return 239 } 240 241 hw.tx.push(buf) 242 reg.Set(hw.tdar, TDAR_ACTIVE) 243 }