github.com/simpleiot/simpleiot@v0.18.3/client/db.go (about)

     1  package client
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"fmt"
     7  	"log"
     8  	"strings"
     9  
    10  	influxdb2 "github.com/influxdata/influxdb-client-go/v2"
    11  	"github.com/influxdata/influxdb-client-go/v2/api"
    12  	"github.com/nats-io/nats.go"
    13  	"github.com/simpleiot/simpleiot/data"
    14  )
    15  
    16  // InfluxMeasurement is the Influx measurement to which all points are written
    17  const InfluxMeasurement = "points"
    18  
    19  // Db represents the configuration for a SIOT DB client
    20  type Db struct {
    21  	ID            string   `node:"id"`
    22  	Parent        string   `node:"parent"`
    23  	Description   string   `point:"description"`
    24  	URI           string   `point:"uri"`
    25  	Org           string   `point:"org"`
    26  	Bucket        string   `point:"bucket"`
    27  	AuthToken     string   `point:"authToken"`
    28  	TagPointTypes []string `point:"tagPointType"`
    29  }
    30  
    31  // DbClient is a SIOT database client
    32  type DbClient struct {
    33  	nc            *nats.Conn
    34  	config        Db
    35  	stop          chan struct{}
    36  	newPoints     chan NewPoints
    37  	newEdgePoints chan NewPoints
    38  	newDbPoints   chan NewPoints
    39  	upSub         *nats.Subscription
    40  	upSubHr       *nats.Subscription
    41  	historySub    *nats.Subscription
    42  	nodeCache     nodeCache
    43  	client        influxdb2.Client
    44  	writeAPI      api.WriteAPI
    45  }
    46  
    47  // NewDbClient ...
    48  func NewDbClient(nc *nats.Conn, config Db) Client {
    49  	return &DbClient{
    50  		nc:            nc,
    51  		config:        config,
    52  		stop:          make(chan struct{}),
    53  		newPoints:     make(chan NewPoints),
    54  		newEdgePoints: make(chan NewPoints),
    55  		newDbPoints:   make(chan NewPoints),
    56  		nodeCache:     newNodeCache(config.TagPointTypes),
    57  	}
    58  }
    59  
    60  // Run runs the main logic for this client and blocks until stopped
    61  func (dbc *DbClient) Run() error {
    62  	log.Println("Starting db client:", dbc.config.Description)
    63  	var err error
    64  
    65  	// FIXME, we probably want to store edge points too ...
    66  
    67  	subject := fmt.Sprintf("up.%v.*", dbc.config.Parent)
    68  	dbc.upSub, err = dbc.nc.Subscribe(subject, func(msg *nats.Msg) {
    69  		points, err := data.PbDecodePoints(msg.Data)
    70  		if err != nil {
    71  			log.Println("Error decoding points in db upSub:", err)
    72  			return
    73  		}
    74  
    75  		// find node ID for points
    76  		chunks := strings.Split(msg.Subject, ".")
    77  		if len(chunks) != 3 {
    78  			log.Println("rule client up sub, malformed subject:", msg.Subject)
    79  			return
    80  		}
    81  
    82  		dbc.newDbPoints <- NewPoints{chunks[2], "", points}
    83  	})
    84  
    85  	if err != nil {
    86  		return fmt.Errorf("subscribing to %v: %w", subject, err)
    87  	}
    88  
    89  	subjectHR := fmt.Sprintf("phrup.%v.*", dbc.config.Parent)
    90  	dbc.upSubHr, err = dbc.nc.Subscribe(subjectHR, func(msg *nats.Msg) {
    91  		// find node ID for points
    92  		chunks := strings.Split(msg.Subject, ".")
    93  		if len(chunks) != 3 {
    94  			log.Println("rule client up hr sub, malformed subject:", msg.Subject)
    95  			return
    96  		}
    97  
    98  		nodeID := chunks[2]
    99  
   100  		// Update nodeCache with no points
   101  		err := dbc.nodeCache.Update(dbc.nc, NewPoints{
   102  			ID: nodeID,
   103  		})
   104  		if err != nil {
   105  			log.Printf("error updating cache: %v", err)
   106  		}
   107  
   108  		err = data.DecodeSerialHrPayload(msg.Data, func(pt data.Point) {
   109  			tags := map[string]string{
   110  				"type": pt.Type,
   111  				"key":  pt.Key,
   112  			}
   113  			dbc.nodeCache.CopyTags(nodeID, tags)
   114  			p := influxdb2.NewPoint(InfluxMeasurement,
   115  				tags,
   116  				map[string]interface{}{
   117  					"value": pt.Value,
   118  				},
   119  				pt.Time)
   120  			dbc.writeAPI.WritePoint(p)
   121  		})
   122  
   123  		if err != nil {
   124  			log.Println("DB: error decoding HR data:", err)
   125  		}
   126  	})
   127  
   128  	if err != nil {
   129  		return fmt.Errorf("subscribing to %v: %w", subjectHR, err)
   130  	}
   131  
   132  	subjectHistory := fmt.Sprintf("history.%v", dbc.config.ID)
   133  	dbc.historySub, err = dbc.nc.Subscribe(subjectHistory, func(msg *nats.Msg) {
   134  		query := new(data.HistoryQuery)
   135  		results := new(data.HistoryResults)
   136  		ctx := context.Background()
   137  
   138  		// Defer encoding and sending response
   139  		defer func() {
   140  			res, err := json.Marshal(results)
   141  			if err != nil {
   142  				err = msg.Respond([]byte(`{"error":"error encoding response"}`))
   143  				if err != nil {
   144  					log.Printf("error responding to history query: %v", err)
   145  				}
   146  			} else {
   147  				err = msg.Respond(res)
   148  				if err != nil {
   149  					// Try responding via NATS with the error
   150  					results = &data.HistoryResults{
   151  						ErrorMessage: err.Error(),
   152  					}
   153  					res, parseErr := json.Marshal(results)
   154  					if parseErr == nil {
   155  						retryError := msg.Respond(res)
   156  						if retryError == nil {
   157  							// clear original error
   158  							err = nil
   159  						}
   160  					}
   161  				}
   162  				if err != nil {
   163  					log.Printf("error responding to history query: %v", err)
   164  				}
   165  			}
   166  		}()
   167  
   168  		// Parse query
   169  		err = json.Unmarshal(msg.Data, query)
   170  		if err != nil {
   171  			results.ErrorMessage = "parsing query: " + err.Error()
   172  			return
   173  		}
   174  		log.Printf("received history query: %+v", query)
   175  
   176  		// Execute query
   177  		query.Execute(
   178  			ctx,
   179  			dbc.client.QueryAPI(dbc.config.Org),
   180  			dbc.config.Bucket,
   181  			InfluxMeasurement,
   182  			results,
   183  		)
   184  	})
   185  
   186  	if err != nil {
   187  		return fmt.Errorf("subscribing to %v: %w", subjectHistory, err)
   188  	}
   189  
   190  	setupAPI := func() {
   191  		log.Println("Setting up Influx API")
   192  		// you can set things like retries, batching, precision, etc in client options.
   193  		dbc.client = influxdb2.NewClientWithOptions(dbc.config.URI,
   194  			dbc.config.AuthToken, influxdb2.DefaultOptions())
   195  		dbc.writeAPI = dbc.client.WriteAPI(dbc.config.Org, dbc.config.Bucket)
   196  
   197  		influxErrors := dbc.writeAPI.Errors()
   198  
   199  		go func() {
   200  			for err := range influxErrors {
   201  				if err != nil {
   202  					log.Println("Influx write error:", err)
   203  				}
   204  
   205  			}
   206  			log.Println("Influxdb write api closed")
   207  		}()
   208  	}
   209  
   210  	setupAPI()
   211  
   212  done:
   213  	for {
   214  		select {
   215  		case <-dbc.stop:
   216  			log.Println("Stopping db client:", dbc.config.Description)
   217  			break done
   218  		case pts := <-dbc.newPoints:
   219  			err := data.MergePoints(pts.ID, pts.Points, &dbc.config)
   220  			if err != nil {
   221  				log.Println("error merging new points:", err)
   222  			}
   223  
   224  			for _, p := range pts.Points {
   225  				switch p.Type {
   226  				case data.PointTypeURI,
   227  					data.PointTypeOrg,
   228  					data.PointTypeBucket,
   229  					data.PointTypeAuthToken:
   230  					// we need to restart the influx write API
   231  					dbc.client.Close()
   232  					setupAPI()
   233  				case data.PointTypeTagPointType:
   234  					dbc.nodeCache = newNodeCache(dbc.config.TagPointTypes)
   235  				}
   236  			}
   237  
   238  		case pts := <-dbc.newEdgePoints:
   239  			err := data.MergeEdgePoints(pts.ID, pts.Parent, pts.Points, &dbc.config)
   240  			if err != nil {
   241  				log.Println("error merging new points:", err)
   242  			}
   243  		case pts := <-dbc.newDbPoints:
   244  			// Update nodeCache if needed
   245  			err := dbc.nodeCache.Update(dbc.nc, pts)
   246  			if err != nil {
   247  				log.Printf("error updating cache: %v", err)
   248  			}
   249  			// Add points to InfluxDB
   250  			for _, point := range pts.Points {
   251  				tags := map[string]string{
   252  					"type": point.Type,
   253  					"key":  point.Key,
   254  				}
   255  				dbc.nodeCache.CopyTags(pts.ID, tags)
   256  				p := influxdb2.NewPoint(InfluxMeasurement,
   257  					tags,
   258  					map[string]interface{}{
   259  						"value": point.Value,
   260  						"text":  point.Text,
   261  					},
   262  					point.Time)
   263  				dbc.writeAPI.WritePoint(p)
   264  			}
   265  		}
   266  	}
   267  
   268  	// clean up
   269  	_ = dbc.upSub.Unsubscribe()
   270  	_ = dbc.upSubHr.Unsubscribe()
   271  	_ = dbc.historySub.Unsubscribe()
   272  	dbc.client.Close()
   273  	return nil
   274  }
   275  
   276  // Stop sends a signal to the Run function to exit
   277  func (dbc *DbClient) Stop(_ error) {
   278  	close(dbc.stop)
   279  }
   280  
   281  // Points is called by the Manager when new points for this
   282  // node are received.
   283  func (dbc *DbClient) Points(nodeID string, points []data.Point) {
   284  	dbc.newPoints <- NewPoints{nodeID, "", points}
   285  }
   286  
   287  // EdgePoints is called by the Manager when new edge points for this
   288  // node are received.
   289  func (dbc *DbClient) EdgePoints(nodeID, parentID string, points []data.Point) {
   290  	dbc.newEdgePoints <- NewPoints{nodeID, parentID, points}
   291  }