github.com/angenalZZZ/gofunc@v0.0.0-20210507121333-48ff1be3917b/http/longpoll/longpoll.go (about)

     1  package longpoll
     2  
     3  import (
     4  	"container/heap"
     5  	"container/list"
     6  	"encoding/json"
     7  	"errors"
     8  	"fmt"
     9  	"github.com/rs/xid"
    10  	"io"
    11  	"log"
    12  	"net/http"
    13  	"strconv"
    14  	"time"
    15  )
    16  
    17  const (
    18  	// Magic number to represent 'Forever' in
    19  	// LongpollOptions.EventTimeToLiveSeconds
    20  	FOREVER = -1001
    21  )
    22  
    23  // LongpollManager provides an interface to interact with the internal
    24  // longpolling pup-sub goroutine.
    25  //
    26  // This allows you to publish events via Publish()
    27  // If for some reason you want to stop the pub-sub goroutine at any time
    28  // you can call Shutdown() and all longpolling will be disabled.  Note that the
    29  // pub-sub goroutine will exit on program exit, so for most simple programs,
    30  // calling Shutdown() is not necessary.
    31  //
    32  // A LongpollManager is created with each subscriptionManager that gets created
    33  // by calls to manager := StartLongpoll(options)
    34  // This interface also exposes the HTTP handler that client code can attach to
    35  // a URL like so:
    36  //		mux := http.NewServeMux()
    37  //		mux.HandleFunc("/custom/path/to/events", manager.SubscriptionHandler)
    38  // Note, this http handler can be wrapped by another function (try capturing the
    39  // manager in a closure) to add additional validation, access control, or other
    40  // functionality on top of the subscription handler.
    41  //
    42  // You can have another http handler publish events by capturing the manager in
    43  // a closure and calling manager.Publish() from inside a http handler.  See the
    44  // advanced example (examples/advanced/advanced.go)
    45  //
    46  // If for some reason you want multiple goroutines handling different pub-sub
    47  // channels, you can simply create multiple LongpollManagers.
    48  type LongpollManager struct {
    49  	subManager          *subscriptionManager
    50  	eventsIn            chan<- lpEvent
    51  	stopSignal          chan<- bool
    52  	SubscriptionHandler func(w http.ResponseWriter, r *http.Request)
    53  }
    54  
    55  // Publish an event for a given subscription category.  This event can have any
    56  // arbitrary data that is convert-able to JSON via the standard's json.Marshal()
    57  // the category param must be a non-empty string no longer than 1024,
    58  // otherwise you get an error.
    59  func (m *LongpollManager) Publish(category string, data interface{}) error {
    60  	if len(category) == 0 {
    61  		return errors.New("empty category")
    62  	}
    63  	if len(category) > 1024 {
    64  		return errors.New("category cannot be longer than 1024")
    65  	}
    66  	m.eventsIn <- lpEvent{time.Now().UnixNano() / int64(time.Millisecond), category, data}
    67  	return nil
    68  }
    69  
    70  // Shutdown allows the internal goroutine that handles the longpull pup-sub
    71  // to be stopped.  This may be useful if you want to turn off longpolling
    72  // without terminating your program.  After a shutdown, you can't call Publish()
    73  // or get any new results from the SubscriptionHandler.  Multiple calls to
    74  // this function on the same manager will result in a panic.
    75  func (m *LongpollManager) Shutdown() {
    76  	close(m.stopSignal)
    77  }
    78  
    79  // Options for LongpollManager that get sent to StartLongpoll(options)
    80  type Options struct {
    81  	// Whether or not to print logs about longpolling
    82  	LoggingEnabled bool
    83  
    84  	// Max client timeout seconds  to be accepted by the SubscriptionHandler
    85  	// (The 'timeout' HTTP query param).  Defaults to 120.
    86  	MaxLongpollTimeoutSeconds int
    87  
    88  	// How many events to buffer per subscriptoin category before discarding
    89  	// oldest events due to buffer being exhausted.  Larger buffer sizes are
    90  	// useful for high volumes of events in the same categories.  But for
    91  	// low-volumes, smaller buffer sizes are more efficient.  Defaults to 250.
    92  	MaxEventBufferSize int
    93  
    94  	// How long (seconds) events remain in their respective category's
    95  	// eventBuffer before being deleted. Deletes old events even if buffer has
    96  	// the room.  Useful to save space if you don't need old events.
    97  	// You can use a large MaxEventBufferSize to handle spikes in event volumes
    98  	// in a single category but have a relatively short EventTimeToLiveSeconds
    99  	// value to save space in the more common low-volume case.
   100  	// If you want events to remain in the buffer as long as there is room per
   101  	// MaxEventBufferSize, then use the magic value longpoll.FOREVER here.
   102  	// Defaults to FOREVER.
   103  	EventTimeToLiveSeconds int
   104  
   105  	// Whether or not to delete an event as soon as it is retrieved via an
   106  	// HTTP longpoll.  Saves on space if clients only interested in seing an
   107  	// event once and never again.  Meant mostly for scenarios where events
   108  	// act as a sort of notification and each subscription category is assigned
   109  	// to a single client.  As soon as any client(s) pull down this event, it's
   110  	// gone forever.  Notice how multiple clients can get the event if there
   111  	// are multiple clients actively in the middle of a longpoll when a new
   112  	// event occurs.  This event gets sent to all listening clients and then
   113  	// the event skips being placed in a buffer and is gone forever.
   114  	DeleteEventAfterFirstRetrieval bool
   115  }
   116  
   117  // Creates a LongpollManager, starts the internal pub-sub goroutine and returns
   118  // the manager reference which you can use anywhere to Publish() events or
   119  // attach a URL to the manager's SubscriptionHandler member.  This function
   120  // takes an Options struct that configures the longpoll behavior.
   121  // If Options.EventTimeToLiveSeconds is omitted, the default is forever.
   122  func StartLongpoll(opts Options) (*LongpollManager, error) {
   123  	// default if not specified (likely struct skipped defining this field)
   124  	if opts.MaxLongpollTimeoutSeconds == 0 {
   125  		opts.MaxLongpollTimeoutSeconds = 120
   126  	}
   127  	// default if not specified (likely struct skipped defining this field)
   128  	if opts.MaxEventBufferSize == 0 {
   129  		opts.MaxEventBufferSize = 250
   130  	}
   131  	// If TTL is zero, default to FOREVER
   132  	if opts.EventTimeToLiveSeconds == 0 {
   133  		opts.EventTimeToLiveSeconds = FOREVER
   134  	}
   135  	if opts.MaxEventBufferSize < 1 {
   136  		return nil, errors.New("Options.MaxEventBufferSize must be at least 1")
   137  	}
   138  	if opts.MaxLongpollTimeoutSeconds < 1 {
   139  		return nil, errors.New("Options.MaxLongpollTimeoutSeconds must be at least 1")
   140  	}
   141  	// TTL must be positive, non-zero, or the magic FOREVER value (a negative const)
   142  	if opts.EventTimeToLiveSeconds < 1 && opts.EventTimeToLiveSeconds != FOREVER {
   143  		return nil, errors.New("options.EventTimeToLiveSeconds must be at least 1 or the constant longpoll.FOREVER")
   144  	}
   145  	channelSize := 100
   146  	clientRequestChan := make(chan clientSubscription, channelSize)
   147  	clientTimeoutChan := make(chan clientCategoryPair, channelSize)
   148  	events := make(chan lpEvent, channelSize)
   149  	// never has a send, only a close, so no larger capacity needed:
   150  	quit := make(chan bool, 1)
   151  	subManager := subscriptionManager{
   152  		clientSubscriptions:            clientRequestChan,
   153  		ClientTimeouts:                 clientTimeoutChan,
   154  		Events:                         events,
   155  		ClientSubChannels:              make(map[string]map[string]chan<- []lpEvent),
   156  		SubEventBuffer:                 make(map[string]*expiringBuffer),
   157  		Quit:                           quit,
   158  		LoggingEnabled:                 opts.LoggingEnabled,
   159  		MaxLongpollTimeoutSeconds:      opts.MaxLongpollTimeoutSeconds,
   160  		MaxEventBufferSize:             opts.MaxEventBufferSize,
   161  		EventTimeToLiveSeconds:         opts.EventTimeToLiveSeconds,
   162  		DeleteEventAfterFirstRetrieval: opts.DeleteEventAfterFirstRetrieval,
   163  		// check for stale categories every 3 minutes.
   164  		// remember we do expiration/cleanup on individual buffers whenever
   165  		// activity occurs on that buffer's category (client request, event published)
   166  		// so this periodic purge check is only needed to remove events on
   167  		// categories that have been inactive for a while.
   168  		staleCategoryPurgePeriodSeconds: 60 * 3,
   169  		// set last purge time to present so we wait a full period before puring
   170  		// if this defaulted to zero then we'd immediately do a purge which is unecessary
   171  		lastStaleCategoryPurgeTime: time.Now().UnixNano() / int64(time.Millisecond),
   172  		// A priority queue (min heap) that keeps track of event buffers by their
   173  		// last event time.  Used to know when to delete inactive categories/buffers.
   174  		bufferPriorityQueue: make(priorityQueue, 0),
   175  	}
   176  	heap.Init(&subManager.bufferPriorityQueue)
   177  	// Start subscription manager
   178  	go subManager.run()
   179  	LongpollManager := LongpollManager{
   180  		&subManager,
   181  		events,
   182  		quit,
   183  		getLongPollSubscriptionHandler(opts.MaxLongpollTimeoutSeconds, clientRequestChan, clientTimeoutChan, opts.LoggingEnabled),
   184  	}
   185  	return &LongpollManager, nil
   186  }
   187  
   188  type clientSubscription struct {
   189  	clientCategoryPair
   190  	// used to ensure no events skipped between long polls:
   191  	LastEventTime time.Time
   192  	// we channel arrays of events since we need to send everything a client
   193  	// cares about in a single channel send.  This makes channel receives a
   194  	// one shot deal.
   195  	Events chan []lpEvent
   196  }
   197  
   198  func newclientSubscription(subscriptionCategory string, lastEventTime time.Time) (*clientSubscription, error) {
   199  	subscription := clientSubscription{
   200  		clientCategoryPair{xid.New().String(), subscriptionCategory},
   201  		lastEventTime,
   202  		make(chan []lpEvent, 1),
   203  	}
   204  	return &subscription, nil
   205  }
   206  
   207  // get web handler that has closure around sub chanel and clientTimeout channnel
   208  func getLongPollSubscriptionHandler(maxTimeoutSeconds int, subscriptionRequests chan clientSubscription,
   209  	clientTimeouts chan<- clientCategoryPair, loggingEnabled bool) func(w http.ResponseWriter, r *http.Request) {
   210  	return func(w http.ResponseWriter, r *http.Request) {
   211  		timeout, err := strconv.Atoi(r.URL.Query().Get("timeout"))
   212  		if loggingEnabled {
   213  			log.Println("Handling HTTP request at ", r.URL)
   214  		}
   215  		// We are going to return json no matter what:
   216  		w.Header().Set("Content-Type", "application/json")
   217  		// Don't cache response:
   218  		w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") // HTTP 1.1.
   219  		w.Header().Set("Pragma", "no-cache")                                   // HTTP 1.0.
   220  		w.Header().Set("Expires", "0")                                         // Proxies.
   221  		if err != nil || timeout > maxTimeoutSeconds || timeout < 1 {
   222  			if loggingEnabled {
   223  				log.Printf("Error: Invalid timeout param.  Must be 1-%d. Got: %q.\n",
   224  					maxTimeoutSeconds, r.URL.Query().Get("timeout"))
   225  			}
   226  			io.WriteString(w, fmt.Sprintf("{\"error\": \"Invalid timeout arg.  Must be 1-%d.\"}", maxTimeoutSeconds))
   227  			return
   228  		}
   229  		category := r.URL.Query().Get("category")
   230  		if len(category) == 0 || len(category) > 1024 {
   231  			if loggingEnabled {
   232  				log.Printf("Error: Invalid subscription category, must be 1-1024 characters long.\n")
   233  			}
   234  			io.WriteString(w, "{\"error\": \"Invalid subscription category, must be 1-1024 characters long.\"}")
   235  			return
   236  		}
   237  		// Default to only looking for current events
   238  		lastEventTime := time.Now()
   239  		// since_time is string of milliseconds since epoch
   240  		lastEventTimeParam := r.URL.Query().Get("since_time")
   241  		if len(lastEventTimeParam) > 0 {
   242  			// Client is requesting any event from given timestamp parse time
   243  			msInt, parseError := strconv.ParseInt(lastEventTimeParam, 10, 64)
   244  			if parseError != nil {
   245  				if loggingEnabled {
   246  					log.Printf("Error parsing last_event_time arg. Parm Value: %s, Error: %s.\n",
   247  						lastEventTimeParam, parseError)
   248  				}
   249  				io.WriteString(w, "{\"error\": \"Invalid last_event_time arg.\"}")
   250  				return
   251  			}
   252  			lastEventTime = time.Unix(0, msInt*int64(time.Millisecond)).In(time.UTC)
   253  		}
   254  		subscription, err := newclientSubscription(category, lastEventTime)
   255  		if err != nil {
   256  			if loggingEnabled {
   257  				log.Printf("Error creating new Subscription: %s.\n", err)
   258  			}
   259  			io.WriteString(w, "{\"error\": \"Error creating new Subscription.\"}")
   260  			return
   261  		}
   262  		subscriptionRequests <- *subscription
   263  		// Listens for connection close and un-register subscription in the
   264  		// event that a client crashes or the connection goes down.  We don't
   265  		// need to wait around to fulfill a subscription if no one is going to
   266  		// receive it
   267  		disconnectNotify := w.(http.CloseNotifier).CloseNotify()
   268  		select {
   269  		case <-time.After(time.Duration(timeout) * time.Second):
   270  			// Lets the subscription manager know it can discard this request's
   271  			// channel.
   272  			clientTimeouts <- subscription.clientCategoryPair
   273  			timeout_resp := makeTimeoutResponse(time.Now())
   274  			if jsonData, err := json.Marshal(timeout_resp); err == nil {
   275  				io.WriteString(w, string(jsonData))
   276  			} else {
   277  				io.WriteString(w, "{\"error\": \"json marshaller failed\"}")
   278  			}
   279  		case events := <-subscription.Events:
   280  			// Consume event.  Subscription manager will automatically discard
   281  			// this client's channel upon sending event
   282  			// NOTE: event is actually []Event
   283  			if jsonData, err := json.Marshal(eventResponse{&events}); err == nil {
   284  				io.WriteString(w, string(jsonData))
   285  			} else {
   286  				io.WriteString(w, "{\"error\": \"json marshaller failed\"}")
   287  			}
   288  		case <-disconnectNotify:
   289  			// Client connection closed before any events occurred and before
   290  			// the timeout was exceeded.  Tell manager to forget about this
   291  			// client.
   292  			clientTimeouts <- subscription.clientCategoryPair
   293  		}
   294  	}
   295  }
   296  
   297  // eventResponse is the json response that carries longpoll events.
   298  type timeoutResponse struct {
   299  	TimeoutMessage string `json:"timeout"`
   300  	Timestamp      int64  `json:"timestamp"`
   301  }
   302  
   303  func makeTimeoutResponse(t time.Time) *timeoutResponse {
   304  	return &timeoutResponse{"no events before timeout",
   305  		t.UnixNano() / int64(time.Millisecond)}
   306  }
   307  
   308  type clientCategoryPair struct {
   309  	ClientUUID           string
   310  	SubscriptionCategory string
   311  }
   312  
   313  type subscriptionManager struct {
   314  	clientSubscriptions chan clientSubscription
   315  	ClientTimeouts      <-chan clientCategoryPair
   316  	Events              <-chan lpEvent
   317  	// Contains all client sub channels grouped first by sub id then by
   318  	// client uuid
   319  	ClientSubChannels map[string]map[string]chan<- []lpEvent
   320  	SubEventBuffer    map[string]*expiringBuffer
   321  	// channel to inform manager to stop running
   322  	Quit           <-chan bool
   323  	LoggingEnabled bool
   324  	// Max allowed timeout seconds when clients requesting a longpoll
   325  	// This is to validate the 'timeout' query param
   326  	MaxLongpollTimeoutSeconds int
   327  	// How big the buffers are (1-n) before events are discareded FIFO
   328  	MaxEventBufferSize int
   329  	// How long events can stay in their eventBuffer
   330  	EventTimeToLiveSeconds int
   331  	// Whether or not to delete an event after the first time it is served via
   332  	// HTTP
   333  	DeleteEventAfterFirstRetrieval bool
   334  	// How often we check for stale event buffers and delete them
   335  	staleCategoryPurgePeriodSeconds int
   336  	// Last time in millisecondss since epoch that performed a stale category purge
   337  	lastStaleCategoryPurgeTime int64
   338  	// PriorityQueue/heap that keeps event buffers in oldest-first order
   339  	// so we can easily delete eventBuffer/categories that have expired data
   340  	bufferPriorityQueue priorityQueue
   341  }
   342  
   343  // This should be fired off in its own goroutine
   344  func (sm *subscriptionManager) run() error {
   345  	if sm.LoggingEnabled {
   346  		log.Println("SubscriptionManager: Starting run.")
   347  	}
   348  	for {
   349  		// NOTE: we check to see if its time to purge old buffers whenever
   350  		// something happens or a period of inactivity has occurred.
   351  		// An alternative would be to have another goroutine with a
   352  		// select case time.After() but then you'd have concurrency issues
   353  		// with access to the sm.SubEventBuffer and sm.bufferPriorityQueue objs
   354  		// So instead of introducing mutexes we have this uglier manual time check calls
   355  		select {
   356  		case newClient := <-sm.clientSubscriptions:
   357  			sm.handleNewClient(&newClient)
   358  			sm.seeIfTimeToPurgeStaleCategories()
   359  		case disconnected := <-sm.ClientTimeouts:
   360  			sm.handleClientDisconnect(&disconnected)
   361  			sm.seeIfTimeToPurgeStaleCategories()
   362  		case event := <-sm.Events:
   363  			sm.handleNewEvent(&event)
   364  			sm.seeIfTimeToPurgeStaleCategories()
   365  		case <-time.After(time.Duration(5) * time.Second):
   366  			sm.seeIfTimeToPurgeStaleCategories()
   367  		case _ = <-sm.Quit:
   368  			if sm.LoggingEnabled {
   369  				log.Println("SubscriptionManager: received quit signal, stopping.")
   370  			}
   371  			// break out of our infinite loop/select
   372  			return nil
   373  		}
   374  	}
   375  }
   376  
   377  func (sm *subscriptionManager) seeIfTimeToPurgeStaleCategories() error {
   378  	now_ms := time.Now().UnixNano() / int64(time.Millisecond)
   379  	if now_ms > (sm.lastStaleCategoryPurgeTime + int64(1000*sm.staleCategoryPurgePeriodSeconds)) {
   380  		sm.lastStaleCategoryPurgeTime = now_ms
   381  		return sm.purgeStaleCategories()
   382  	}
   383  	return nil
   384  }
   385  
   386  func (sm *subscriptionManager) handleNewClient(newClient *clientSubscription) error {
   387  	var funcErr error
   388  	// before storing client sub request, see if we already have data in
   389  	// the corresponding event buffer that we can use to fufil request
   390  	// without storing it
   391  	doQueueRequest := true
   392  	if expiringBuf, found := sm.SubEventBuffer[newClient.SubscriptionCategory]; found {
   393  		// First clean up anything that expired
   394  		sm.checkExpiredEvents(expiringBuf)
   395  		// We have a buffer for this sub category, check for buffered events
   396  		if events, err := expiringBuf.eventBuffer_ptr.GetEventsSince(newClient.LastEventTime,
   397  			sm.DeleteEventAfterFirstRetrieval); err == nil && len(events) > 0 {
   398  			doQueueRequest = false
   399  			if sm.LoggingEnabled {
   400  				log.Printf("SubscriptionManager: Skip adding client, sending %d events. (Category: %q Client: %s)\n",
   401  					len(events), newClient.SubscriptionCategory, newClient.ClientUUID)
   402  			}
   403  			// Send client buffered events.  Client will immediately consume
   404  			// and end long poll request, so no need to have manager store
   405  			newClient.Events <- events
   406  		} else if err != nil {
   407  			funcErr = fmt.Errorf("Error getting events from event buffer: %s.\n", err)
   408  			if sm.LoggingEnabled {
   409  				log.Printf("Error getting events from event buffer: %s.\n", err)
   410  			}
   411  		}
   412  		// Buffer Could have been emptied due to the  DeleteEventAfterFirstRetrieval
   413  		// or EventTimeToLiveSeconds options.
   414  		sm.deleteBufferIfEmpty(expiringBuf, newClient.SubscriptionCategory)
   415  		// NOTE: expiringBuf may now be invalidated (if it was empty/deleted),
   416  		// don't use ref anymore.
   417  	}
   418  	if doQueueRequest {
   419  		// Couldn't find any immediate events, store for future:
   420  		categoryClients, found := sm.ClientSubChannels[newClient.SubscriptionCategory]
   421  		if !found {
   422  			// first request for this sub category, add client chan map entry
   423  			categoryClients = make(map[string]chan<- []lpEvent)
   424  			sm.ClientSubChannels[newClient.SubscriptionCategory] = categoryClients
   425  		}
   426  		if sm.LoggingEnabled {
   427  			log.Printf("SubscriptionManager: Adding Client (Category: %q Client: %s)\n",
   428  				newClient.SubscriptionCategory, newClient.ClientUUID)
   429  		}
   430  		categoryClients[newClient.ClientUUID] = newClient.Events
   431  	}
   432  	return funcErr
   433  }
   434  
   435  func (sm *subscriptionManager) handleClientDisconnect(disconnected *clientCategoryPair) error {
   436  	var funcErr error
   437  	if subCategoryClients, found := sm.ClientSubChannels[disconnected.SubscriptionCategory]; found {
   438  		// NOTE:  The delete function doesn't return anything, and will do nothing if the
   439  		// specified key doesn't exist.
   440  		delete(subCategoryClients, disconnected.ClientUUID)
   441  		if sm.LoggingEnabled {
   442  			log.Printf("SubscriptionManager: Removing Client (Category: %q Client: %s)\n",
   443  				disconnected.SubscriptionCategory, disconnected.ClientUUID)
   444  		}
   445  		// Remove the client sub map entry for this category if there are
   446  		// zero clients.  This keeps the ClientSubChannels map lean in
   447  		// the event that there are many categories over time and we
   448  		// would otherwise keep a bunch of empty sub maps
   449  		if len(subCategoryClients) == 0 {
   450  			delete(sm.ClientSubChannels, disconnected.SubscriptionCategory)
   451  		}
   452  	} else {
   453  		// Sub category entry not found.  Weird.  Log this!
   454  		if sm.LoggingEnabled {
   455  			log.Printf("Warning: client disconnect for non-existing subscription category: %q\n",
   456  				disconnected.SubscriptionCategory)
   457  		}
   458  		funcErr = fmt.Errorf("Client disconnect for non-existing subscription category: %q\n",
   459  			disconnected.SubscriptionCategory)
   460  	}
   461  	return funcErr
   462  }
   463  
   464  func (sm *subscriptionManager) handleNewEvent(newEvent *lpEvent) error {
   465  	var funcErr error
   466  	doBufferEvents := true
   467  	// Send event to any listening client's channels
   468  	if clients, found := sm.ClientSubChannels[newEvent.Category]; found && len(clients) > 0 {
   469  		if sm.DeleteEventAfterFirstRetrieval {
   470  			// Configured to delete events from buffer after first retrieval by clients.
   471  			// Now that we already have clients receiving, don't bother
   472  			// buffering the event.
   473  			// NOTE: this is wrapped by condition that clients are found
   474  			// if no clients are found, we queue the event even when we have
   475  			// the delete-on-first option set because no-one has recieved
   476  			// the event yet.
   477  			doBufferEvents = false
   478  		}
   479  		if sm.LoggingEnabled {
   480  			log.Printf("SubscriptionManager: forwarding event to %d clients. (event: %v)\n", len(clients), newEvent)
   481  		}
   482  		for clientUUID, clientChan := range clients {
   483  			if sm.LoggingEnabled {
   484  				log.Printf("SubscriptionManager: sending event to client: %s\n", clientUUID)
   485  			}
   486  			clientChan <- []lpEvent{*newEvent}
   487  		}
   488  		// Remove all client subscriptions since we just sent all the
   489  		// clients an event.  In longpolling, subscriptions only last
   490  		// until there is data (which just got sent) or a timeout
   491  		// (which is handled by the disconnect case).
   492  		// Doing this also keeps the subscription map lean in the event
   493  		// of many different subscription categories, we don't keep the
   494  		// trivial/empty map entries.
   495  		if sm.LoggingEnabled {
   496  			log.Printf("SubscriptionManager: Removing %d client subscriptions for: %s\n",
   497  				len(clients), newEvent.Category)
   498  		}
   499  		delete(sm.ClientSubChannels, newEvent.Category)
   500  	} // else no client subscriptions
   501  
   502  	expiringBuf, bufFound := sm.SubEventBuffer[newEvent.Category]
   503  	if doBufferEvents {
   504  		// Add event buffer for this event's subscription category if doesn't exist
   505  		if !bufFound {
   506  			now_ms := time.Now().UnixNano() / int64(time.Millisecond)
   507  			buf := &eventBuffer{
   508  				list.New(),
   509  				sm.MaxEventBufferSize,
   510  				now_ms,
   511  			}
   512  			expiringBuf = &expiringBuffer{
   513  				eventBuffer_ptr: buf,
   514  				category:        newEvent.Category,
   515  				priority:        now_ms,
   516  			}
   517  			if sm.LoggingEnabled {
   518  				log.Printf("Creating new eventBuffer for category: %v",
   519  					newEvent.Category)
   520  			}
   521  			sm.SubEventBuffer[newEvent.Category] = expiringBuf
   522  			sm.priorityQueueUpdateBufferCreated(expiringBuf)
   523  		}
   524  		// queue event in event buffer
   525  		if qErr := expiringBuf.eventBuffer_ptr.QueueEvent(newEvent); qErr != nil {
   526  			if sm.LoggingEnabled {
   527  				log.Printf("Error: failed to queue event.  err: %s\n", qErr)
   528  			}
   529  			funcErr = fmt.Errorf("Error: failed to queue event.  err: %s\n", qErr)
   530  		} else {
   531  			if sm.LoggingEnabled {
   532  				log.Printf("SubscriptionManager: queued event: %v.\n", newEvent)
   533  			}
   534  			// Queued event successfully
   535  			sm.priorityQueueUpdateNewEvent(expiringBuf, newEvent)
   536  		}
   537  	} else {
   538  		if sm.LoggingEnabled {
   539  			log.Printf("SubscriptionManager: DeleteEventAfterFirstRetrieval: SKIP queue event: %v.\n", newEvent)
   540  		}
   541  	}
   542  	// Perform Event TTL check and empty buffer cleanup:
   543  	if bufFound && expiringBuf != nil {
   544  		sm.checkExpiredEvents(expiringBuf)
   545  		sm.deleteBufferIfEmpty(expiringBuf, newEvent.Category)
   546  		// NOTE: expiringBuf may now be invalidated if it was deleted
   547  	}
   548  	return funcErr
   549  }
   550  
   551  func (sm *subscriptionManager) checkExpiredEvents(expiringBuf *expiringBuffer) error {
   552  	if sm.EventTimeToLiveSeconds == FOREVER {
   553  		// Events can never expire. bail out early instead of wasting time.
   554  		return nil
   555  	}
   556  	// determine what time is considered the threshold for expiration
   557  	now_ms := time.Now().UnixNano() / int64(time.Millisecond)
   558  	expiration_time := now_ms - int64(sm.EventTimeToLiveSeconds*1000)
   559  	return expiringBuf.eventBuffer_ptr.DeleteEventsOlderThan(expiration_time)
   560  }
   561  
   562  func (sm *subscriptionManager) deleteBufferIfEmpty(expiringBuf *expiringBuffer, category string) error {
   563  	if expiringBuf.eventBuffer_ptr.List.Len() == 0 { // TODO: nil check?  or is never possible
   564  		if sm.LoggingEnabled {
   565  			log.Printf("Deleting empty eventBuffer for category: %s\n", category)
   566  		}
   567  		delete(sm.SubEventBuffer, category)
   568  		sm.priorityQueueUpdateDeletedBuffer(expiringBuf)
   569  	}
   570  	return nil
   571  }
   572  
   573  func (sm *subscriptionManager) purgeStaleCategories() error {
   574  	if sm.EventTimeToLiveSeconds == FOREVER {
   575  		// Events never expire, don't bother checking here
   576  		return nil
   577  	}
   578  	if sm.LoggingEnabled {
   579  		log.Println("SubscriptionManager: performing stale category purge.")
   580  	}
   581  	now_ms := time.Now().UnixNano() / int64(time.Millisecond)
   582  	expiration_time := now_ms - int64(sm.EventTimeToLiveSeconds*1000)
   583  	for sm.bufferPriorityQueue.Len() > 0 {
   584  		topPriority, err := sm.bufferPriorityQueue.peakTopPriority()
   585  		if err != nil {
   586  			// queue is empty (threw empty buffer error) nothing to purge
   587  			break
   588  		}
   589  		if topPriority > expiration_time {
   590  			// The eventBuffer with the oldest most-recent-event-Timestamp is
   591  			// still too recent to be expired, nothing to purge.
   592  			break
   593  		} else { // topPriority <= expiration_time
   594  			// This buffer's most recent event is older than our TTL, so remove
   595  			// the entire buffer.
   596  			if item, ok := heap.Pop(&sm.bufferPriorityQueue).(*expiringBuffer); ok {
   597  				if sm.LoggingEnabled {
   598  					log.Printf("SubscriptionManager: purging expired eventBuffer for category: %v.\n",
   599  						item.category)
   600  				}
   601  				// remove from our category-to-buffer map:
   602  				delete(sm.SubEventBuffer, item.category)
   603  				// invalidate references
   604  				item.eventBuffer_ptr = nil
   605  			} else {
   606  				log.Printf("ERROR: found item in bufferPriorityQueue of unexpected type when attempting a TTL purge.\n")
   607  			}
   608  		}
   609  		// will continue until we either run out of heap/queue items or we found
   610  		// a buffer that has events more recent than our TTL window which
   611  		// means we will never find any older buffers.
   612  	}
   613  	return nil
   614  }
   615  
   616  // Wraps updates to SubscriptionManager.bufferPriorityQueue when a new
   617  // eventBuffer is created for a given category.  In the event that we don't
   618  // expire events (TTL == FOREVER), we don't bother paying the price of keeping
   619  // the priority queue.
   620  func (sm *subscriptionManager) priorityQueueUpdateBufferCreated(expiringBuf *expiringBuffer) error {
   621  	if sm.EventTimeToLiveSeconds == FOREVER {
   622  		// don't bother keeping track
   623  		return nil
   624  	}
   625  	// NOTE: this call has a complexity of O(log(n)) where n is len of heap
   626  	heap.Push(&sm.bufferPriorityQueue, expiringBuf)
   627  	return nil
   628  }
   629  
   630  // Wraps updates to SubscriptionManager.bufferPriorityQueue when a new lpEvent
   631  // is added to an eventBuffer.  In the event that we don't expire events
   632  // (TTL == FOREVER), we don't bother paying the price of keeping the priority
   633  // queue.
   634  func (sm *subscriptionManager) priorityQueueUpdateNewEvent(expiringBuf *expiringBuffer, newEvent *lpEvent) error {
   635  	if sm.EventTimeToLiveSeconds == FOREVER {
   636  		// don't bother keeping track
   637  		return nil
   638  	}
   639  	// Update the priority to be the new event's timestmap.
   640  	// we keep the buffers in order of oldest last-event-timestamp
   641  	// so we can fetch the most stale buffers first when we do
   642  	// purgeStaleCategories()
   643  	//
   644  	// NOTE: this call is O(log(n)) where n is len of heap/priority queue
   645  	sm.bufferPriorityQueue.updatePriority(expiringBuf, newEvent.Timestamp)
   646  	return nil
   647  }
   648  
   649  // Wraps updates to SubscriptionManager.bufferPriorityQueue when an eventBuffer
   650  // is deleted after becoming empty.  In the event that we don't
   651  // expire events (TTL == FOREVER), we don't bother paying the price of keeping
   652  // the priority queue.
   653  // NOTE: This is called after an eventBuffer is deleted from sm.SubEventBuffer
   654  // and we want to remove the corresponding buffer item from our priority queue
   655  func (sm *subscriptionManager) priorityQueueUpdateDeletedBuffer(expiringBuf *expiringBuffer) error {
   656  	if sm.EventTimeToLiveSeconds == FOREVER {
   657  		// don't bother keeping track
   658  		return nil
   659  	}
   660  	// NOTE: this call is O(log(n)) where n is len of heap (queue)
   661  	heap.Remove(&sm.bufferPriorityQueue, expiringBuf.index)
   662  	expiringBuf.eventBuffer_ptr = nil // remove reference to eventBuffer
   663  	return nil
   664  }