github.com/Cloud-Foundations/Dominator@v0.3.4/hypervisor/rpcd/watchDhcp.go (about) 1 package rpcd 2 3 import ( 4 "time" 5 6 "github.com/Cloud-Foundations/Dominator/lib/srpc" 7 proto "github.com/Cloud-Foundations/Dominator/proto/hypervisor" 8 ) 9 10 func (t *srpcType) WatchDhcp(conn *srpc.Conn) error { 11 var request proto.WatchDhcpRequest 12 if err := conn.Decode(&request); err != nil { 13 return err 14 } 15 closeChannel := conn.GetCloseNotifier() 16 packetChannel := t.dhcpServer.MakePacketWatchChannel() 17 defer t.dhcpServer.ClosePacketWatchChannel(packetChannel) 18 flushTimer := time.NewTimer(flushDelay) 19 var numToFlush uint 20 maxPackets := request.MaxPackets 21 for count := uint64(0); maxPackets < 1 || count < maxPackets; { 22 select { 23 case packet, ok := <-packetChannel: 24 if !ok { 25 msg := proto.WatchDhcpResponse{ 26 Error: "receiver not keeping up with DHCP packets", 27 } 28 return conn.Encode(msg) 29 } 30 if request.Interface != "" && 31 packet.Interface != request.Interface { 32 continue 33 } 34 if err := conn.Encode(packet); err != nil { 35 t.logger.Printf("error sending packet: %s\n", err) 36 return err 37 } 38 if packet.Error != "" { 39 return nil 40 } 41 count++ 42 numToFlush++ 43 flushTimer.Reset(flushDelay) 44 case <-flushTimer.C: 45 if numToFlush > 1 { 46 t.logger.Debugf(0, "flushing %d packets\n", numToFlush) 47 } 48 numToFlush = 0 49 if err := conn.Flush(); err != nil { 50 t.logger.Printf("error flushing packet(s): %s\n", err) 51 return err 52 } 53 case err := <-closeChannel: 54 if err == nil { 55 t.logger.Debugf(0, "packet client disconnected: %s\n", 56 conn.RemoteAddr()) 57 return nil 58 } 59 t.logger.Println(err) 60 return err 61 } 62 } 63 return nil 64 }