|
|
|
@ -14,7 +14,8 @@ import ( |
|
|
|
|
// conform for storing toots
|
|
|
|
|
type DatabaseStorage interface { |
|
|
|
|
ListInstances() ([]*instance.Instance, error) |
|
|
|
|
StoreInstances([]*instance.Instance) error |
|
|
|
|
//StoreInstances([]*instance.Instance) error
|
|
|
|
|
SaveInstance(*instance.Instance) error |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// InstanceManager is the main data structure for the goroutine that manages
|
|
|
|
@ -22,21 +23,47 @@ type DatabaseStorage interface { |
|
|
|
|
type InstanceManager struct { |
|
|
|
|
mu sync.Mutex |
|
|
|
|
db DatabaseStorage |
|
|
|
|
instances map[instance.Hostname]*instance.Instance |
|
|
|
|
newInstanceNotifications chan instance.Hostname |
|
|
|
|
instances map[string]*instance.Instance |
|
|
|
|
newInstanceNotifications chan string |
|
|
|
|
tootDestination chan *toot.Toot |
|
|
|
|
startup time.Time |
|
|
|
|
hostDiscoveryParallelism int |
|
|
|
|
hostAdderSemaphore chan bool |
|
|
|
|
nextDBSave time.Time |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// New returns a new InstanceManager for use by the Process
|
|
|
|
|
func New() *InstanceManager { |
|
|
|
|
i := new(InstanceManager) |
|
|
|
|
i.hostDiscoveryParallelism = viper.GetInt("HostDiscoveryParallelism") |
|
|
|
|
i.hostAdderSemaphore = make(chan bool, i.hostDiscoveryParallelism) |
|
|
|
|
i.instances = make(map[instance.Hostname]*instance.Instance) |
|
|
|
|
return i |
|
|
|
|
func New(db DatabaseStorage) *InstanceManager { |
|
|
|
|
im := new(InstanceManager) |
|
|
|
|
im.db = db |
|
|
|
|
im.hostAdderSemaphore = make(chan bool, viper.GetInt("HostDiscoveryParallelism")) |
|
|
|
|
im.instances = make(map[string]*instance.Instance) |
|
|
|
|
im.RestoreFromDB() |
|
|
|
|
return im |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
func (im *InstanceManager) RestoreFromDB() { |
|
|
|
|
newil, err := im.db.ListInstances() |
|
|
|
|
if err != nil { |
|
|
|
|
log.Panic(). |
|
|
|
|
Err(err). |
|
|
|
|
Msg("cannot get instance list from db") |
|
|
|
|
} |
|
|
|
|
im.lock() |
|
|
|
|
defer im.unlock() |
|
|
|
|
for _, x := range newil { |
|
|
|
|
im.instances[x.Hostname] = x |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
func (im *InstanceManager) SaveToDB() { |
|
|
|
|
for _, x := range im.ListInstances() { |
|
|
|
|
err := im.db.SaveInstance(x) |
|
|
|
|
if err != nil { |
|
|
|
|
log.Panic(). |
|
|
|
|
Err(err). |
|
|
|
|
Msg("cannot write to db") |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// SetTootDestination provides the instancemanager with a channel to the
|
|
|
|
@ -57,7 +84,7 @@ func (im *InstanceManager) unlock() { |
|
|
|
|
// InstanceManager about the channel from the InstanceLocator so that the
|
|
|
|
|
// InstanceLocator can provide it/us (the InstanceManager) with new
|
|
|
|
|
// instance.Hostnames. We (the manager) deduplicate the list ourselves.
|
|
|
|
|
func (im *InstanceManager) SetInstanceNotificationChannel(via chan instance.Hostname) { |
|
|
|
|
func (im *InstanceManager) SetInstanceNotificationChannel(via chan string) { |
|
|
|
|
im.lock() |
|
|
|
|
defer im.unlock() |
|
|
|
|
im.newInstanceNotifications = via |
|
|
|
@ -65,9 +92,9 @@ func (im *InstanceManager) SetInstanceNotificationChannel(via chan instance.Host |
|
|
|
|
|
|
|
|
|
func (im *InstanceManager) receiveSeedInstanceHostnames() { |
|
|
|
|
for _, x := range seeds.SeedInstances { |
|
|
|
|
go func(tmp instance.Hostname) { |
|
|
|
|
go func(tmp string) { |
|
|
|
|
im.addInstanceByHostname(tmp) |
|
|
|
|
}(instance.Hostname(x)) |
|
|
|
|
}(x) |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -94,6 +121,11 @@ func (im *InstanceManager) Manage() { |
|
|
|
|
x = time.Now() |
|
|
|
|
im.logInstanceReport() |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if im.nextDBSave.Before(time.Now()) { |
|
|
|
|
im.nextDBSave = time.Now().Add(time.Second * 60) |
|
|
|
|
im.SaveToDB() |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -113,7 +145,7 @@ func (im *InstanceManager) managerLoop() { |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
func (im *InstanceManager) hostnameExists(newhn instance.Hostname) bool { |
|
|
|
|
func (im *InstanceManager) hostnameExists(newhn string) bool { |
|
|
|
|
im.lock() |
|
|
|
|
defer im.unlock() |
|
|
|
|
for k := range im.instances { |
|
|
|
@ -124,7 +156,7 @@ func (im *InstanceManager) hostnameExists(newhn instance.Hostname) bool { |
|
|
|
|
return false |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
func (im *InstanceManager) addInstanceByHostname(newhn instance.Hostname) { |
|
|
|
|
func (im *InstanceManager) addInstanceByHostname(newhn string) { |
|
|
|
|
if im.hostnameExists(newhn) { |
|
|
|
|
// ignore adding new if we already know about it
|
|
|
|
|
return |
|
|
|
@ -152,7 +184,7 @@ func (im *InstanceManager) addInstanceByHostname(newhn instance.Hostname) { |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
func (im *InstanceManager) receiveNewInstanceHostnames() { |
|
|
|
|
var newhn instance.Hostname |
|
|
|
|
var newhn string |
|
|
|
|
for { |
|
|
|
|
newhn = <-im.newInstanceNotifications |
|
|
|
|
// receive them fast out of the channel, let the adding function lock to add
|
|
|
|
@ -178,7 +210,6 @@ func (im *InstanceManager) logInstanceReport() { |
|
|
|
|
// ListInstances dumps a slice of all Instances the InstanceManager knows
|
|
|
|
|
// about
|
|
|
|
|
func (im *InstanceManager) ListInstances() []*instance.Instance { |
|
|
|
|
// FIXME make this pull from db
|
|
|
|
|
var out []*instance.Instance |
|
|
|
|
im.lock() |
|
|
|
|
defer im.unlock() |
|
|
|
@ -189,7 +220,6 @@ func (im *InstanceManager) ListInstances() []*instance.Instance { |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
func (im *InstanceManager) instanceSummaryReport() map[string]uint { |
|
|
|
|
// FIXME make this pull from db
|
|
|
|
|
r := make(map[string]uint) |
|
|
|
|
for _, v := range im.ListInstances() { |
|
|
|
|
v.Lock() |
|
|
|
|