2019-02-20 20:54:46 +00:00
|
|
|
package gun
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
2019-02-22 06:46:19 +00:00
|
|
|
"sync"
|
2019-02-22 18:40:02 +00:00
|
|
|
"time"
|
2019-02-20 20:54:46 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type Gun struct {
|
2019-02-25 22:28:19 +00:00
|
|
|
// Never mutated, always overwritten
|
|
|
|
currentPeers []*Peer
|
|
|
|
currentPeersLock sync.RWMutex
|
|
|
|
|
2019-02-22 09:23:14 +00:00
|
|
|
storage Storage
|
|
|
|
soulGen func() string
|
|
|
|
peerErrorHandler func(*ErrPeer)
|
2019-02-22 18:40:02 +00:00
|
|
|
peerSleepOnError time.Duration
|
2019-02-22 19:51:50 +00:00
|
|
|
myPeerID string
|
2019-02-22 21:40:55 +00:00
|
|
|
tracking Tracking
|
2019-02-22 06:46:19 +00:00
|
|
|
|
2019-02-25 22:28:19 +00:00
|
|
|
serversCancelFn context.CancelFunc
|
|
|
|
|
|
|
|
messageIDListeners map[string]chan<- *messageReceived
|
2019-02-25 04:23:15 +00:00
|
|
|
messageIDListenersLock sync.RWMutex
|
2019-02-20 20:54:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type Config struct {
|
2019-02-22 18:40:02 +00:00
|
|
|
PeerURLs []string
|
2019-02-25 22:28:19 +00:00
|
|
|
Servers []Server
|
2019-02-22 09:23:14 +00:00
|
|
|
Storage Storage
|
|
|
|
SoulGen func() string
|
|
|
|
PeerErrorHandler func(*ErrPeer)
|
2019-02-22 18:40:02 +00:00
|
|
|
PeerSleepOnError time.Duration
|
2019-02-22 19:51:50 +00:00
|
|
|
MyPeerID string
|
2019-02-22 21:40:55 +00:00
|
|
|
Tracking Tracking
|
2019-02-20 20:54:46 +00:00
|
|
|
}
|
|
|
|
|
2019-02-22 21:40:55 +00:00
|
|
|
type Tracking int
|
|
|
|
|
|
|
|
const (
|
|
|
|
TrackingRequested Tracking = iota
|
|
|
|
TrackingNothing
|
|
|
|
TrackingEverything
|
|
|
|
)
|
|
|
|
|
2019-02-22 18:40:02 +00:00
|
|
|
const DefaultPeerSleepOnError = 30 * time.Second
|
2019-02-25 20:09:47 +00:00
|
|
|
const DefaultOldestAllowedStorageValue = 7 * (60 * time.Minute)
|
2019-02-22 18:40:02 +00:00
|
|
|
|
|
|
|
func New(ctx context.Context, config Config) (*Gun, error) {
|
2019-02-20 20:54:46 +00:00
|
|
|
g := &Gun{
|
2019-02-25 22:28:19 +00:00
|
|
|
currentPeers: make([]*Peer, len(config.PeerURLs)),
|
2019-02-25 04:23:15 +00:00
|
|
|
storage: config.Storage,
|
|
|
|
soulGen: config.SoulGen,
|
|
|
|
peerErrorHandler: config.PeerErrorHandler,
|
|
|
|
peerSleepOnError: config.PeerSleepOnError,
|
|
|
|
myPeerID: config.MyPeerID,
|
|
|
|
tracking: config.Tracking,
|
2019-02-25 22:28:19 +00:00
|
|
|
messageIDListeners: map[string]chan<- *messageReceived{},
|
2019-02-20 20:54:46 +00:00
|
|
|
}
|
2019-02-22 18:40:02 +00:00
|
|
|
// Create all the peers
|
|
|
|
sleepOnError := config.PeerSleepOnError
|
|
|
|
if sleepOnError == 0 {
|
|
|
|
sleepOnError = DefaultPeerSleepOnError
|
2019-02-20 20:54:46 +00:00
|
|
|
}
|
2019-02-22 18:40:02 +00:00
|
|
|
var err error
|
|
|
|
for i := 0; i < len(config.PeerURLs) && err == nil; i++ {
|
|
|
|
peerURL := config.PeerURLs[i]
|
2019-02-25 05:14:26 +00:00
|
|
|
newConn := func() (PeerConn, error) { return NewPeerConn(ctx, peerURL) }
|
2019-02-25 22:28:19 +00:00
|
|
|
if g.currentPeers[i], err = newPeer(peerURL, newConn, sleepOnError); err != nil {
|
2019-02-22 18:40:02 +00:00
|
|
|
err = fmt.Errorf("Failed connecting to peer %v: %v", peerURL, err)
|
2019-02-20 20:54:46 +00:00
|
|
|
}
|
|
|
|
}
|
2019-02-22 18:40:02 +00:00
|
|
|
// If there was an error, we need to close what we did create
|
2019-02-20 20:54:46 +00:00
|
|
|
if err != nil {
|
2019-02-25 22:28:19 +00:00
|
|
|
for _, peer := range g.currentPeers {
|
2019-02-22 06:46:19 +00:00
|
|
|
if peer != nil {
|
|
|
|
peer.Close()
|
|
|
|
}
|
2019-02-20 20:54:46 +00:00
|
|
|
}
|
2019-02-22 18:40:02 +00:00
|
|
|
return nil, err
|
2019-02-20 20:54:46 +00:00
|
|
|
}
|
2019-02-22 18:40:02 +00:00
|
|
|
// Set defaults
|
|
|
|
if g.storage == nil {
|
2019-02-25 20:09:47 +00:00
|
|
|
g.storage = NewStorageInMem(DefaultOldestAllowedStorageValue)
|
2019-02-22 18:40:02 +00:00
|
|
|
}
|
|
|
|
if g.soulGen == nil {
|
2019-02-25 04:23:15 +00:00
|
|
|
g.soulGen = DefaultSoulGen
|
2019-02-22 18:40:02 +00:00
|
|
|
}
|
2019-02-22 19:51:50 +00:00
|
|
|
if g.myPeerID == "" {
|
|
|
|
g.myPeerID = randString(9)
|
|
|
|
}
|
2019-02-25 22:28:19 +00:00
|
|
|
// Start receiving from peers
|
|
|
|
for _, peer := range g.currentPeers {
|
|
|
|
go g.startReceiving(peer)
|
|
|
|
}
|
|
|
|
// Start all the servers
|
|
|
|
go g.startServers(config.Servers)
|
2019-02-22 18:40:02 +00:00
|
|
|
return g, nil
|
2019-02-20 20:54:46 +00:00
|
|
|
}
|
|
|
|
|
2019-02-25 05:14:26 +00:00
|
|
|
func (g *Gun) Scoped(ctx context.Context, key string, children ...string) *Scoped {
|
|
|
|
s := newScoped(g, nil, key)
|
|
|
|
if len(children) > 0 {
|
|
|
|
s = s.Scoped(ctx, children[0], children[1:]...)
|
|
|
|
}
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
2019-02-22 09:23:14 +00:00
|
|
|
func (g *Gun) Close() error {
|
|
|
|
var errs []error
|
2019-02-25 22:28:19 +00:00
|
|
|
for _, p := range g.peers() {
|
2019-02-22 09:23:14 +00:00
|
|
|
if err := p.Close(); err != nil {
|
|
|
|
errs = append(errs, err)
|
|
|
|
}
|
|
|
|
}
|
2019-02-25 22:28:19 +00:00
|
|
|
g.serversCancelFn()
|
2019-02-25 20:09:47 +00:00
|
|
|
if err := g.storage.Close(); err != nil {
|
|
|
|
errs = append(errs, err)
|
|
|
|
}
|
2019-02-22 09:23:14 +00:00
|
|
|
if len(errs) == 0 {
|
|
|
|
return nil
|
|
|
|
} else if len(errs) == 1 {
|
|
|
|
return errs[0]
|
|
|
|
} else {
|
|
|
|
return fmt.Errorf("Multiple errors: %v", errs)
|
|
|
|
}
|
2019-02-22 06:46:19 +00:00
|
|
|
}
|
|
|
|
|
2019-02-25 22:28:19 +00:00
|
|
|
func (g *Gun) peers() []*Peer {
|
|
|
|
g.currentPeersLock.RLock()
|
|
|
|
defer g.currentPeersLock.RUnlock()
|
|
|
|
return g.currentPeers
|
|
|
|
}
|
|
|
|
|
|
|
|
func (g *Gun) addPeer(p *Peer) {
|
|
|
|
g.currentPeersLock.Lock()
|
|
|
|
defer g.currentPeersLock.Unlock()
|
|
|
|
prev := g.currentPeers
|
|
|
|
g.currentPeers = make([]*Peer, len(prev)+1)
|
|
|
|
copy(g.currentPeers, prev)
|
|
|
|
g.currentPeers[len(prev)] = p
|
|
|
|
}
|
|
|
|
|
|
|
|
func (g *Gun) removePeer(p *Peer) {
|
|
|
|
g.currentPeersLock.Lock()
|
|
|
|
defer g.currentPeersLock.Unlock()
|
|
|
|
prev := g.currentPeers
|
|
|
|
g.currentPeers = make([]*Peer, 0, len(prev))
|
|
|
|
for _, peer := range prev {
|
|
|
|
if peer != p {
|
|
|
|
g.currentPeers = append(g.currentPeers, peer)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-25 05:14:26 +00:00
|
|
|
func (g *Gun) send(ctx context.Context, msg *Message, ignorePeer *Peer) <-chan *ErrPeer {
|
2019-02-25 22:28:19 +00:00
|
|
|
peers := g.peers()
|
|
|
|
ch := make(chan *ErrPeer, len(peers))
|
2019-02-22 06:46:19 +00:00
|
|
|
// Everything async
|
|
|
|
go func() {
|
|
|
|
defer close(ch)
|
|
|
|
var wg sync.WaitGroup
|
2019-02-25 22:28:19 +00:00
|
|
|
for _, peer := range peers {
|
2019-02-22 09:23:14 +00:00
|
|
|
if peer == ignorePeer {
|
|
|
|
continue
|
|
|
|
}
|
2019-02-22 06:46:19 +00:00
|
|
|
wg.Add(1)
|
2019-02-25 05:14:26 +00:00
|
|
|
go func(peer *Peer) {
|
2019-02-22 06:46:19 +00:00
|
|
|
defer wg.Done()
|
2019-02-22 18:40:02 +00:00
|
|
|
// Just do nothing if the peer is bad and we couldn't send
|
|
|
|
if _, err := peer.send(ctx, msg); err != nil {
|
2019-02-25 22:28:19 +00:00
|
|
|
if !peer.reconnectSupported() {
|
|
|
|
g.removePeer(peer)
|
|
|
|
}
|
2019-02-22 09:23:14 +00:00
|
|
|
peerErr := &ErrPeer{err, peer}
|
|
|
|
go g.onPeerError(peerErr)
|
|
|
|
ch <- peerErr
|
2019-02-22 06:46:19 +00:00
|
|
|
}
|
|
|
|
}(peer)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
}()
|
|
|
|
return ch
|
|
|
|
}
|
|
|
|
|
2019-02-25 22:28:19 +00:00
|
|
|
func (g *Gun) startReceiving(peer *Peer) {
|
|
|
|
// TDO: some kind of overall context is probably needed
|
|
|
|
ctx, cancelFn := context.WithCancel(context.TODO())
|
|
|
|
defer cancelFn()
|
|
|
|
for !peer.Closed() {
|
|
|
|
// We might not be able receive because peer is sleeping from
|
|
|
|
// an error happened within or a just-before send error.
|
|
|
|
if ok, msgs, err := peer.receive(ctx); !ok {
|
|
|
|
if err != nil {
|
|
|
|
go g.onPeerError(&ErrPeer{err, peer})
|
2019-02-22 06:46:19 +00:00
|
|
|
}
|
2019-02-25 22:28:19 +00:00
|
|
|
// If can reconnect, sleep at least the err duration, otherwise remove
|
|
|
|
if peer.reconnectSupported() {
|
|
|
|
time.Sleep(g.peerSleepOnError)
|
|
|
|
} else {
|
|
|
|
g.removePeer(peer)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Go over each message and see if it needs delivering or rebroadcasting
|
|
|
|
for _, msg := range msgs {
|
|
|
|
g.onPeerMessage(ctx, &messageReceived{Message: msg, peer: peer})
|
|
|
|
}
|
|
|
|
}
|
2019-02-22 06:46:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-25 22:28:19 +00:00
|
|
|
func (g *Gun) onPeerMessage(ctx context.Context, msg *messageReceived) {
|
2019-02-25 20:09:47 +00:00
|
|
|
// If we're tracking everything, persist all puts here.
|
|
|
|
if g.tracking == TrackingEverything {
|
|
|
|
for parentSoul, node := range msg.Put {
|
|
|
|
for field, value := range node.Values {
|
|
|
|
if state, ok := node.Metadata.State[field]; ok {
|
|
|
|
// TODO: warn on error or something
|
|
|
|
g.storage.Put(ctx, parentSoul, field, value, state, false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-02-22 19:51:50 +00:00
|
|
|
// If there is a listener for this message, use it
|
2019-02-25 04:23:15 +00:00
|
|
|
if msg.Ack != "" {
|
|
|
|
g.messageIDListenersLock.RLock()
|
|
|
|
l := g.messageIDListeners[msg.Ack]
|
|
|
|
g.messageIDListenersLock.RUnlock()
|
2019-02-22 19:51:50 +00:00
|
|
|
if l != nil {
|
|
|
|
go safeReceivedMessageSend(l, msg)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// DAM messages are either requests for our ID or setting of theirs
|
|
|
|
if msg.DAM != "" {
|
|
|
|
if msg.PID == "" {
|
|
|
|
// This is a request, set the PID and send it back
|
|
|
|
msg.PID = g.myPeerID
|
2019-02-25 22:28:19 +00:00
|
|
|
if _, err := msg.peer.send(ctx, msg.Message); err != nil {
|
|
|
|
go g.onPeerError(&ErrPeer{err, msg.peer})
|
|
|
|
if !msg.peer.reconnectSupported() {
|
|
|
|
g.removePeer(msg.peer)
|
|
|
|
}
|
2019-02-22 19:51:50 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// This is them telling us theirs
|
2019-02-25 22:28:19 +00:00
|
|
|
msg.peer.id = msg.PID
|
2019-02-22 19:51:50 +00:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2019-02-22 09:23:14 +00:00
|
|
|
// Unhandled message means rebroadcast
|
2019-02-25 22:28:19 +00:00
|
|
|
g.send(ctx, msg.Message, msg.peer)
|
2019-02-22 06:46:19 +00:00
|
|
|
}
|
|
|
|
|
2019-02-22 09:23:14 +00:00
|
|
|
func (g *Gun) onPeerError(err *ErrPeer) {
|
|
|
|
if g.peerErrorHandler != nil {
|
|
|
|
g.peerErrorHandler(err)
|
|
|
|
}
|
2019-02-22 06:46:19 +00:00
|
|
|
}
|
|
|
|
|
2019-02-25 22:28:19 +00:00
|
|
|
func (g *Gun) registerMessageIDListener(id string, ch chan<- *messageReceived) {
|
2019-02-25 04:23:15 +00:00
|
|
|
g.messageIDListenersLock.Lock()
|
|
|
|
defer g.messageIDListenersLock.Unlock()
|
|
|
|
g.messageIDListeners[id] = ch
|
2019-02-22 06:46:19 +00:00
|
|
|
}
|
|
|
|
|
2019-02-25 05:14:26 +00:00
|
|
|
func (g *Gun) unregisterMessageIDListener(id string) {
|
2019-02-25 04:23:15 +00:00
|
|
|
g.messageIDListenersLock.Lock()
|
|
|
|
defer g.messageIDListenersLock.Unlock()
|
|
|
|
delete(g.messageIDListeners, id)
|
2019-02-22 06:46:19 +00:00
|
|
|
}
|
|
|
|
|
2019-02-25 22:28:19 +00:00
|
|
|
func safeReceivedMessageSend(ch chan<- *messageReceived, msg *messageReceived) {
|
2019-02-22 06:46:19 +00:00
|
|
|
// Due to the fact that we may send on a closed channel here, we ignore the panic
|
|
|
|
defer func() { recover() }()
|
|
|
|
ch <- msg
|
2019-02-20 20:54:46 +00:00
|
|
|
}
|