github.com/df-mc/dragonfly@v0.9.13/server/session/chunk.go (about) 1 package session 2 3 import ( 4 "bytes" 5 "github.com/cespare/xxhash/v2" 6 "github.com/df-mc/dragonfly/server/block/cube" 7 "github.com/df-mc/dragonfly/server/world" 8 "github.com/df-mc/dragonfly/server/world/chunk" 9 "github.com/sandertv/gophertunnel/minecraft/nbt" 10 "github.com/sandertv/gophertunnel/minecraft/protocol" 11 "github.com/sandertv/gophertunnel/minecraft/protocol/packet" 12 ) 13 14 // subChunkRequests is set to true to enable the sub-chunk request system. This can (likely) cause unexpected issues, 15 // but also solves issues with block entities such as item frames and lecterns as of v1.19.10. 16 const subChunkRequests = true 17 18 // ViewChunk ... 19 func (s *Session) ViewChunk(pos world.ChunkPos, c *chunk.Chunk, blockEntities map[cube.Pos]world.Block) { 20 if !s.conn.ClientCacheEnabled() { 21 s.sendNetworkChunk(pos, c, blockEntities) 22 return 23 } 24 s.sendBlobHashes(pos, c, blockEntities) 25 } 26 27 // ViewSubChunks ... 28 func (s *Session) ViewSubChunks(center world.SubChunkPos, offsets []protocol.SubChunkOffset) { 29 w := s.c.World() 30 r := w.Range() 31 32 entries := make([]protocol.SubChunkEntry, 0, len(offsets)) 33 transaction := make(map[uint64]struct{}) 34 for _, offset := range offsets { 35 ind := int16(center.Y()) + int16(offset[1]) - int16(r[0])>>4 36 if ind < 0 || ind > int16(r.Height()>>4) { 37 entries = append(entries, protocol.SubChunkEntry{Result: protocol.SubChunkResultIndexOutOfBounds, Offset: offset}) 38 continue 39 } 40 col, ok := s.chunkLoader.Chunk(world.ChunkPos{ 41 center.X() + int32(offset[0]), 42 center.Z() + int32(offset[2]), 43 }) 44 if !ok { 45 entries = append(entries, protocol.SubChunkEntry{Result: protocol.SubChunkResultChunkNotFound, Offset: offset}) 46 continue 47 } 48 col.Lock() 49 entries = append(entries, s.subChunkEntry(offset, ind, col, transaction)) 50 col.Unlock() 51 } 52 if s.conn.ClientCacheEnabled() && len(transaction) > 0 { 53 s.blobMu.Lock() 54 s.openChunkTransactions = append(s.openChunkTransactions, transaction) 55 s.blobMu.Unlock() 56 } 57 dim, _ := world.DimensionID(w.Dimension()) 58 s.writePacket(&packet.SubChunk{ 59 Dimension: int32(dim), 60 Position: protocol.SubChunkPos(center), 61 CacheEnabled: s.conn.ClientCacheEnabled(), 62 SubChunkEntries: entries, 63 }) 64 } 65 66 func (s *Session) subChunkEntry(offset protocol.SubChunkOffset, ind int16, col *world.Column, transaction map[uint64]struct{}) protocol.SubChunkEntry { 67 chunkMap := col.Chunk.HeightMap() 68 subMapType, subMap := byte(protocol.HeightMapDataHasData), make([]int8, 256) 69 higher, lower := true, true 70 for x := uint8(0); x < 16; x++ { 71 for z := uint8(0); z < 16; z++ { 72 y, i := chunkMap.At(x, z), (uint16(z)<<4)|uint16(x) 73 otherInd := col.Chunk.SubIndex(y) 74 if otherInd > ind { 75 subMap[i], lower = 16, false 76 } else if otherInd < ind { 77 subMap[i], higher = -1, false 78 } else { 79 subMap[i], lower, higher = int8(y-col.Chunk.SubY(otherInd)), false, false 80 } 81 } 82 } 83 if higher { 84 subMapType, subMap = protocol.HeightMapDataTooHigh, nil 85 } else if lower { 86 subMapType, subMap = protocol.HeightMapDataTooLow, nil 87 } 88 89 sub := col.Chunk.Sub()[ind] 90 if sub.Empty() { 91 return protocol.SubChunkEntry{ 92 Result: protocol.SubChunkResultSuccessAllAir, 93 HeightMapType: subMapType, 94 HeightMapData: subMap, 95 Offset: offset, 96 } 97 } 98 99 serialisedSubChunk := chunk.EncodeSubChunk(col.Chunk, chunk.NetworkEncoding, int(ind)) 100 101 blockEntityBuf := bytes.NewBuffer(nil) 102 enc := nbt.NewEncoderWithEncoding(blockEntityBuf, nbt.NetworkLittleEndian) 103 for pos, b := range col.BlockEntities { 104 if n, ok := b.(world.NBTer); ok && col.Chunk.SubIndex(int16(pos.Y())) == ind { 105 d := n.EncodeNBT() 106 d["x"], d["y"], d["z"] = int32(pos[0]), int32(pos[1]), int32(pos[2]) 107 _ = enc.Encode(d) 108 } 109 } 110 111 entry := protocol.SubChunkEntry{ 112 Result: protocol.SubChunkResultSuccess, 113 RawPayload: append(serialisedSubChunk, blockEntityBuf.Bytes()...), 114 HeightMapType: subMapType, 115 HeightMapData: subMap, 116 Offset: offset, 117 } 118 if s.conn.ClientCacheEnabled() { 119 if hash := xxhash.Sum64(serialisedSubChunk); s.trackBlob(hash, serialisedSubChunk) { 120 transaction[hash] = struct{}{} 121 122 entry.BlobHash = hash 123 entry.RawPayload = blockEntityBuf.Bytes() 124 } 125 } 126 return entry 127 } 128 129 // sendBlobHashes sends chunk blob hashes of the data of the chunk and stores the data in a map of blobs. Only 130 // data that the client doesn't yet have will be sent over the network. 131 func (s *Session) sendBlobHashes(pos world.ChunkPos, c *chunk.Chunk, blockEntities map[cube.Pos]world.Block) { 132 if subChunkRequests { 133 biomes := chunk.EncodeBiomes(c, chunk.NetworkEncoding) 134 if hash := xxhash.Sum64(biomes); s.trackBlob(hash, biomes) { 135 s.writePacket(&packet.LevelChunk{ 136 SubChunkCount: protocol.SubChunkRequestModeLimited, 137 Position: protocol.ChunkPos(pos), 138 HighestSubChunk: c.HighestFilledSubChunk(), 139 BlobHashes: []uint64{hash}, 140 RawPayload: []byte{0}, 141 CacheEnabled: true, 142 }) 143 return 144 } 145 } 146 147 var ( 148 data = chunk.Encode(c, chunk.NetworkEncoding) 149 count = uint32(len(data.SubChunks)) 150 blobs = append(data.SubChunks, data.Biomes) 151 hashes = make([]uint64, len(blobs)) 152 m = make(map[uint64]struct{}, len(blobs)) 153 ) 154 for i, blob := range blobs { 155 h := xxhash.Sum64(blob) 156 hashes[i], m[h] = h, struct{}{} 157 } 158 159 s.blobMu.Lock() 160 s.openChunkTransactions = append(s.openChunkTransactions, m) 161 if l := len(s.blobs); l > 4096 { 162 s.blobMu.Unlock() 163 s.log.Errorf("player %v has too many blobs pending %v: disconnecting", s.c.Name(), l) 164 _ = s.c.Close() 165 return 166 } 167 for i := range hashes { 168 s.blobs[hashes[i]] = blobs[i] 169 } 170 s.blobMu.Unlock() 171 172 // Length of 1 byte for the border block count. 173 raw := bytes.NewBuffer(make([]byte, 1, 32)) 174 enc := nbt.NewEncoderWithEncoding(raw, nbt.NetworkLittleEndian) 175 for bp, b := range blockEntities { 176 if n, ok := b.(world.NBTer); ok { 177 d := n.EncodeNBT() 178 d["x"], d["y"], d["z"] = int32(bp[0]), int32(bp[1]), int32(bp[2]) 179 _ = enc.Encode(d) 180 } 181 } 182 183 s.writePacket(&packet.LevelChunk{ 184 Position: protocol.ChunkPos{pos.X(), pos.Z()}, 185 SubChunkCount: count, 186 CacheEnabled: true, 187 BlobHashes: hashes, 188 RawPayload: raw.Bytes(), 189 }) 190 } 191 192 // sendNetworkChunk sends a network encoded chunk to the client. 193 func (s *Session) sendNetworkChunk(pos world.ChunkPos, c *chunk.Chunk, blockEntities map[cube.Pos]world.Block) { 194 if subChunkRequests { 195 s.writePacket(&packet.LevelChunk{ 196 SubChunkCount: protocol.SubChunkRequestModeLimited, 197 Position: protocol.ChunkPos(pos), 198 HighestSubChunk: c.HighestFilledSubChunk(), 199 RawPayload: append(chunk.EncodeBiomes(c, chunk.NetworkEncoding), 0), 200 }) 201 return 202 } 203 204 data := chunk.Encode(c, chunk.NetworkEncoding) 205 chunkBuf := bytes.NewBuffer(nil) 206 for _, s := range data.SubChunks { 207 _, _ = chunkBuf.Write(s) 208 } 209 _, _ = chunkBuf.Write(data.Biomes) 210 211 // Length of 1 byte for the border block count. 212 chunkBuf.WriteByte(0) 213 214 enc := nbt.NewEncoderWithEncoding(chunkBuf, nbt.NetworkLittleEndian) 215 for bp, b := range blockEntities { 216 if n, ok := b.(world.NBTer); ok { 217 d := n.EncodeNBT() 218 d["x"], d["y"], d["z"] = int32(bp[0]), int32(bp[1]), int32(bp[2]) 219 _ = enc.Encode(d) 220 } 221 } 222 223 s.writePacket(&packet.LevelChunk{ 224 Position: protocol.ChunkPos{pos.X(), pos.Z()}, 225 SubChunkCount: uint32(len(data.SubChunks)), 226 RawPayload: append([]byte(nil), chunkBuf.Bytes()...), 227 }) 228 } 229 230 // trackBlob attempts to track the given blob. If the player has too many pending blobs, it returns false and closes the 231 // connection. 232 func (s *Session) trackBlob(hash uint64, blob []byte) bool { 233 s.blobMu.Lock() 234 if l := len(s.blobs); l > 4096 { 235 s.blobMu.Unlock() 236 s.log.Errorf("player %v has too many blobs pending %v: disconnecting", s.c.Name(), l) 237 _ = s.c.Close() 238 return false 239 } 240 s.blobs[hash] = blob 241 s.blobMu.Unlock() 242 return true 243 }