feta/manager/manager.go

237 lines
5.5 KiB
Go
Raw Normal View History

2019-12-19 14:24:26 +00:00
package manager
2019-11-03 13:17:00 +00:00
2020-03-27 23:02:36 +00:00
import (
"sync"
"time"
2019-11-03 13:17:00 +00:00
2020-03-27 23:02:36 +00:00
"git.eeqj.de/sneak/feta/instance"
"git.eeqj.de/sneak/feta/seeds"
"git.eeqj.de/sneak/feta/toot"
"github.com/rs/zerolog/log"
"github.com/spf13/viper"
)
2019-11-03 13:17:00 +00:00
2020-03-27 23:46:47 +00:00
// conform for storing toots
type DatabaseStorage interface {
ListInstances() ([]*instance.Instance, error)
2020-03-28 01:17:52 +00:00
//StoreInstances([]*instance.Instance) error
SaveInstance(*instance.Instance) error
2020-03-27 23:46:47 +00:00
}
2019-11-03 13:17:00 +00:00
// InstanceManager is the main data structure for the goroutine that manages
// the list of all known instances, fed by the locator
2019-11-03 13:17:00 +00:00
type InstanceManager struct {
2019-11-03 18:00:01 +00:00
mu sync.Mutex
2020-03-27 23:46:47 +00:00
db DatabaseStorage
2020-03-28 01:17:52 +00:00
instances map[string]*instance.Instance
newInstanceNotifications chan string
tootDestination chan *toot.Toot
2019-11-03 18:00:01 +00:00
startup time.Time
2019-11-06 00:46:52 +00:00
hostAdderSemaphore chan bool
2020-03-28 01:17:52 +00:00
nextDBSave time.Time
2019-11-03 13:17:00 +00:00
}
2019-12-19 14:24:26 +00:00
// New returns a new InstanceManager for use by the Process
2020-03-28 01:17:52 +00:00
func New(db DatabaseStorage) *InstanceManager {
im := new(InstanceManager)
im.db = db
im.hostAdderSemaphore = make(chan bool, viper.GetInt("HostDiscoveryParallelism"))
im.instances = make(map[string]*instance.Instance)
im.RestoreFromDB()
return im
}
func (im *InstanceManager) RestoreFromDB() {
newil, err := im.db.ListInstances()
if err != nil {
log.Panic().
Err(err).
Msg("cannot get instance list from db")
}
im.lock()
defer im.unlock()
2020-03-28 02:57:58 +00:00
count := 0
2020-03-28 01:17:52 +00:00
for _, x := range newil {
2020-03-28 02:57:58 +00:00
x.SetTootDestination(im.tootDestination)
2020-03-28 01:17:52 +00:00
im.instances[x.Hostname] = x
2020-03-28 02:57:58 +00:00
count = count + 1
2020-03-28 01:17:52 +00:00
}
2020-03-28 02:57:58 +00:00
log.Info().
Int("count", count).
Msg("restored instances from database")
2020-03-28 01:17:52 +00:00
}
func (im *InstanceManager) SaveToDB() {
for _, x := range im.ListInstances() {
err := im.db.SaveInstance(x)
if err != nil {
log.Panic().
Err(err).
Msg("cannot write to db")
}
}
2019-11-03 13:17:00 +00:00
}
2019-12-19 14:24:26 +00:00
// SetTootDestination provides the instancemanager with a channel to the
// ingester that it can give to its instances
func (im *InstanceManager) SetTootDestination(td chan *toot.Toot) {
2019-12-14 16:34:13 +00:00
im.tootDestination = td
}
func (im *InstanceManager) lock() {
im.mu.Lock()
2019-11-03 18:00:01 +00:00
}
func (im *InstanceManager) unlock() {
im.mu.Unlock()
2019-11-03 18:00:01 +00:00
}
2019-12-19 14:24:26 +00:00
// SetInstanceNotificationChannel is how the Process tells the
// InstanceManager about the channel from the InstanceLocator so that the
// InstanceLocator can provide it/us (the InstanceManager) with new
// instance.Hostnames. We (the manager) deduplicate the list ourselves.
2020-03-28 01:17:52 +00:00
func (im *InstanceManager) SetInstanceNotificationChannel(via chan string) {
im.lock()
defer im.unlock()
im.newInstanceNotifications = via
2019-11-03 13:17:00 +00:00
}
func (im *InstanceManager) receiveSeedInstanceHostnames() {
for _, x := range seeds.SeedInstances {
2020-03-28 01:17:52 +00:00
go func(tmp string) {
im.addInstanceByHostname(tmp)
2020-03-28 01:17:52 +00:00
}(x)
}
}
// Manage is the main entrypoint of the InstanceManager, designed to be
// called once in its own goroutine.
func (im *InstanceManager) Manage() {
2019-11-03 18:00:01 +00:00
log.Info().Msg("InstanceManager starting")
2019-11-05 23:32:09 +00:00
go func() {
im.receiveNewInstanceHostnames()
2019-11-05 23:32:09 +00:00
}()
im.startup = time.Now()
x := im.startup
go func() {
im.receiveSeedInstanceHostnames()
}()
2019-11-03 13:17:00 +00:00
for {
2019-11-03 18:00:01 +00:00
log.Info().Msg("InstanceManager tick")
im.managerLoop()
2019-11-03 13:17:00 +00:00
time.Sleep(1 * time.Second)
2020-03-27 23:46:47 +00:00
if time.Now().After(x.Add(viper.GetDuration("LogReportInterval"))) {
2019-11-06 07:03:42 +00:00
x = time.Now()
im.logInstanceReport()
2019-11-06 07:03:42 +00:00
}
2020-03-28 01:17:52 +00:00
if im.nextDBSave.Before(time.Now()) {
im.nextDBSave = time.Now().Add(time.Second * 60)
im.SaveToDB()
}
2019-11-03 13:17:00 +00:00
}
}
2019-11-03 18:00:01 +00:00
func (im *InstanceManager) managerLoop() {
im.lock()
2019-12-19 14:24:26 +00:00
il := make([]*instance.Instance, 0)
for _, v := range im.instances {
2019-11-05 23:32:09 +00:00
il = append(il, v)
2019-11-04 17:07:04 +00:00
}
im.unlock()
2019-11-05 23:32:09 +00:00
2019-12-14 15:49:35 +00:00
// FIXME is this a bug outside of the mutex above?
2019-11-05 23:32:09 +00:00
for _, v := range il {
2019-12-19 14:24:26 +00:00
go func(i *instance.Instance) {
2019-11-06 07:03:42 +00:00
i.Tick()
}(v)
2019-11-05 23:32:09 +00:00
}
2019-11-04 17:07:04 +00:00
}
2020-03-28 01:17:52 +00:00
func (im *InstanceManager) hostnameExists(newhn string) bool {
im.lock()
defer im.unlock()
for k := range im.instances {
2019-11-03 18:00:01 +00:00
if newhn == k {
return true
}
}
return false
}
2020-03-28 01:17:52 +00:00
func (im *InstanceManager) addInstanceByHostname(newhn string) {
if im.hostnameExists(newhn) {
2019-12-14 15:49:35 +00:00
// ignore adding new if we already know about it
2019-11-04 17:07:04 +00:00
return
2019-11-03 18:00:01 +00:00
}
2019-11-05 23:32:09 +00:00
2019-11-06 00:46:52 +00:00
// this blocks on the channel size, limiting concurrency
im.hostAdderSemaphore <- true
2019-11-06 00:46:52 +00:00
2019-12-19 14:24:26 +00:00
i := instance.New(func(x *instance.Instance) {
x.Hostname = string(newhn) // set hostname
x.SetTootDestination(im.tootDestination) // copy ingester input channel from manager to instance
2019-11-06 07:03:42 +00:00
})
2019-12-19 14:24:26 +00:00
// we do node detection under the adderSemaphore to avoid thundering
2019-11-05 23:32:09 +00:00
// on startup
2019-12-19 14:24:26 +00:00
i.DetectNodeTypeIfNecessary()
2019-11-05 23:32:09 +00:00
2019-11-06 00:46:52 +00:00
// pop an item from the buffered channel
<-im.hostAdderSemaphore
2019-11-06 00:46:52 +00:00
// lock the map to insert
im.lock()
im.instances[newhn] = i
im.unlock()
2019-11-06 00:46:52 +00:00
2019-11-03 18:00:01 +00:00
}
func (im *InstanceManager) receiveNewInstanceHostnames() {
2020-03-28 01:17:52 +00:00
var newhn string
2019-11-03 18:00:01 +00:00
for {
newhn = <-im.newInstanceNotifications
2019-11-05 23:32:09 +00:00
// receive them fast out of the channel, let the adding function lock to add
// them one at a time, using a bunch of blocked goroutines as our
2019-12-14 15:49:35 +00:00
// modification queue
go im.addInstanceByHostname(newhn)
2019-11-03 18:00:01 +00:00
}
}
func (im *InstanceManager) logInstanceReport() {
r := im.instanceSummaryReport()
2019-11-03 18:00:01 +00:00
2019-11-06 07:03:42 +00:00
sublogger := log.With().Logger()
2019-11-03 18:00:01 +00:00
2019-11-06 07:03:42 +00:00
for k, v := range r {
sublogger = sublogger.With().Uint(k, v).Logger()
}
2019-11-03 18:00:01 +00:00
2019-11-06 07:03:42 +00:00
sublogger.Info().
Msg("instance report")
2019-11-03 18:00:01 +00:00
}
2019-12-19 14:24:26 +00:00
// ListInstances dumps a slice of all Instances the InstanceManager knows
// about
func (im *InstanceManager) ListInstances() []*instance.Instance {
var out []*instance.Instance
im.lock()
defer im.unlock()
for _, v := range im.instances {
2019-11-04 17:07:04 +00:00
out = append(out, v)
}
return out
}
2019-11-06 07:03:42 +00:00
func (im *InstanceManager) instanceSummaryReport() map[string]uint {
r := make(map[string]uint)
2019-12-19 14:24:26 +00:00
for _, v := range im.ListInstances() {
2019-11-06 07:03:42 +00:00
v.Lock()
r[v.Status()]++
v.Unlock()
2019-11-03 18:00:01 +00:00
}
return r
}