Fixed: functional with concurrent maps

This commit is contained in:
kayos@tcp.direct 2022-03-16 08:11:05 -07:00
parent 23cea998f1
commit b8186e2144
Signed by: kayos
GPG Key ID: 4B841471B4BEE979
12 changed files with 171 additions and 385 deletions

View File

@ -12,17 +12,10 @@ import (
"github.com/araddon/dateparse"
)
const (
stateUnlocked uint32 = iota
stateLocked
)
// registerBuiltin sets up built-in handlers, based on client
// configuration.
func (c *Client) registerBuiltins() {
c.debug.Print("registering built-in handlers")
c.Handlers.mu.Lock()
defer c.Handlers.mu.Unlock()
// Built-in things that should always be supported.
c.Handlers.register(true, true, RPL_WELCOME, HandlerFunc(handleConnect))
@ -107,10 +100,19 @@ func handleConnect(c *Client, e Event) {
c.state.nick.Store(e.Params[0])
c.state.notify(c, UPDATE_GENERAL)
split := strings.Split(e.Params[1], " ")
if strings.HasPrefix(e.Params[1], "Welcome to the") && len(split) > 3 {
if len(split[3]) > 0 {
c.state.network = split[3]
c.IRCd.Network = split[3]
search:
for i, artifact := range split {
switch strings.ToLower(artifact) {
case "welcome", "to":
continue
case "the":
if len(split) < i {
break search
}
c.IRCd.Network = split[i+1]
break search
default:
break search
}
}
}
@ -221,9 +223,7 @@ func handlePART(c *Client, e Event) {
defer c.state.notify(c, UPDATE_STATE)
if e.Source.ID() == c.GetID() {
c.state.deleteChannel(channel)
return
}
@ -376,7 +376,6 @@ func handleNICK(c *Client, e Event) {
if len(e.Params) >= 1 {
c.state.renameUser(e.Source.ID(), e.Last())
}
c.state.notify(c, UPDATE_STATE)
}
@ -404,7 +403,6 @@ func handleGLOBALUSERS(c *Client, e Event) {
if err != nil {
return
}
c.IRCd.UserCount = cusers
c.IRCd.MaxUserCount = musers
}
@ -418,7 +416,6 @@ func handleLOCALUSERS(c *Client, e Event) {
if err != nil {
return
}
c.IRCd.LocalUserCount = cusers
c.IRCd.LocalMaxUserCount = musers
}
@ -428,7 +425,6 @@ func handleLUSERCHANNELS(c *Client, e Event) {
if err != nil {
return
}
c.IRCd.ChannelCount = ccount
}
@ -437,7 +433,6 @@ func handleLUSEROP(c *Client, e Event) {
if err != nil {
return
}
c.IRCd.OperCount = ocount
}
@ -462,9 +457,7 @@ func handleCREATED(c *Client, e Event) {
if err != nil {
return
}
c.IRCd.Compiled = compiled
c.state.notify(c, UPDATE_GENERAL)
}
@ -484,10 +477,8 @@ func handleYOURHOST(c *Client, e Event) {
if len(host)+len(ver) == 0 {
return
}
c.IRCd.Host = host
c.IRCd.Version = ver
c.state.notify(c, UPDATE_GENERAL)
}
@ -533,13 +524,11 @@ func handleISUPPORT(c *Client, e Event) {
// handleMOTD handles incoming MOTD messages and buffers them up for use with
// Client.ServerMOTD().
func handleMOTD(c *Client, e Event) {
defer c.state.notify(c, UPDATE_GENERAL)
// Beginning of the MOTD.
if e.Command == RPL_MOTDSTART {
c.state.motd = ""
return
}
@ -548,7 +537,6 @@ func handleMOTD(c *Client, e Event) {
c.state.motd += "\n"
}
c.state.motd += e.Last()
}
// handleNAMES handles incoming NAMES queries, of which lists all users in
@ -608,7 +596,6 @@ func handleNAMES(c *Client, e Event) {
perms.set(modes, false)
user.Perms.set(channel.Name, perms)
}
c.state.notify(c, UPDATE_STATE)
}

36
cap.go
View File

@ -9,6 +9,8 @@ import (
"strconv"
"strings"
"time"
cmap "github.com/orcaman/concurrent-map"
)
// Something not in the list? Depending on the type of capability, you can
@ -118,12 +120,11 @@ func parseCap(raw string) map[string]map[string]string {
// This will lock further registration until we have acknowledged (or denied)
// the capabilities.
func handleCAP(c *Client, e Event) {
if len(e.Params) >= 2 && e.Params[1] == CAP_DEL {
caps := parseCap(e.Last())
for capab := range caps {
// TODO: test the deletion.
delete(c.state.enabledCap, capab)
c.state.enabledCap.Remove(capab)
}
return
}
@ -146,7 +147,7 @@ func handleCAP(c *Client, e Event) {
}
if len(possible[capName]) == 0 || len(caps[capName]) == 0 {
c.state.tmpCap[capName] = caps[capName]
c.state.tmpCap.Set(capName, caps[capName])
continue
}
@ -167,7 +168,7 @@ func handleCAP(c *Client, e Event) {
continue
}
c.state.tmpCap[capName] = caps[capName]
c.state.tmpCap.Set(capName, caps[capName])
}
// Indicates if this is a multi-line LS. (3 args means it's the
@ -180,10 +181,11 @@ func handleCAP(c *Client, e Event) {
}
// Let them know which ones we'd like to enable.
reqKeys := make([]string, len(c.state.tmpCap))
reqKeys := make([]string, len(c.state.tmpCap.Keys()))
i := 0
for k := range c.state.tmpCap {
reqKeys[i] = k
for k := range c.state.tmpCap.IterBuffered() {
kv := k.Val.(string)
reqKeys[i] = kv
i++
}
c.write(&Event{Command: CAP, Params: []string{CAP_REQ, strings.Join(reqKeys, " ")}})
@ -193,10 +195,12 @@ func handleCAP(c *Client, e Event) {
if len(e.Params) == 3 && e.Params[1] == CAP_ACK {
enabled := strings.Split(e.Last(), " ")
for _, capab := range enabled {
if val, ok := c.state.tmpCap[capab]; ok {
c.state.enabledCap[capab] = val
val, ok := c.state.tmpCap.Get(capab)
if ok {
val = val.(map[string]string)
c.state.enabledCap.Set(capab, val)
} else {
c.state.enabledCap[capab] = nil
c.state.enabledCap.Set(capab, nil)
}
}
@ -205,9 +209,10 @@ func handleCAP(c *Client, e Event) {
// Handle STS, and only if it's something specifically we enabled (client
// may choose to disable girc automatic STS, and do it themselves).
if sts, sok := c.state.enabledCap["sts"]; sok && !c.Config.DisableSTS {
stsi, sok := c.state.enabledCap.Get("sts")
if sok && !c.Config.DisableSTS {
var isError bool
sts := stsi.(map[string]string)
// Some things are updated in the policy depending on if the current
// connection is over tls or not.
var hasTLSConnection bool
@ -284,9 +289,10 @@ func handleCAP(c *Client, e Event) {
// Re-initialize the tmpCap, so if we get multiple 'CAP LS' requests
// due to cap-notify, we can re-evaluate what we can support.
c.state.tmpCap = make(map[string]map[string]string)
c.state.tmpCap = cmap.New()
if _, ok := c.state.enabledCap["sasl"]; ok && c.Config.SASL != nil {
_, ok := c.state.enabledCap.Get("sasl")
if ok && c.Config.SASL != nil {
c.write(&Event{Command: AUTHENTICATE, Params: []string{c.Config.SASL.Method()}})
// Don't "CAP END", since we want to authenticate.
return
@ -342,11 +348,9 @@ func handleACCOUNT(c *Client, e Event) {
account = ""
}
c.state.Lock()
user := c.state.lookupUser(e.Source.Name)
if user != nil {
user.Extras.Account = account
}
c.state.Unlock()
c.state.notify(c, UPDATE_STATE)
}

View File

@ -5,6 +5,7 @@
package girc
import (
"os"
"reflect"
"testing"
)
@ -16,7 +17,7 @@ func TestCapSupported(t *testing.T) {
User: "user",
SASL: &SASLPlain{User: "test", Pass: "example"},
SupportedCaps: map[string][]string{"example": nil},
Debug: newDebugWriter(t),
Debug: os.Stdout,
})
var ok bool

View File

@ -19,7 +19,6 @@ import (
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
cmap "github.com/orcaman/concurrent-map"
@ -297,7 +296,6 @@ func New(config Config) *Client {
tx: make(chan *Event, 25),
CTCP: newCTCP(),
initTime: time.Now(),
atom: stateUnlocked,
}
c.IRCd = Server{
@ -341,6 +339,7 @@ func New(config Config) *Client {
// Give ourselves a new state.
c.state = &state{}
c.state.RWMutex = &sync.RWMutex{}
c.state.reset(true)
c.state.client = c
@ -596,9 +595,12 @@ func (c *Client) GetHost() (host string) {
func (c *Client) ChannelList() []string {
c.panicIfNotTracking()
channels := make([]string, 0, len(c.state.channels))
channels := make([]string, 0, len(c.state.channels.Keys()))
for channel := range c.state.channels.IterBuffered() {
chn := channel.Val.(*Channel)
if !chn.UserIn(c.GetNick()) {
continue
}
channels = append(channels, chn.Name)
}
@ -729,7 +731,7 @@ func (c *Client) NetworkName() (name string) {
var ok bool
if len(c.state.network) > 0 {
return
return c.state.network
}
name, ok = c.GetServerOption("NETWORK")
@ -788,12 +790,7 @@ func (c *Client) HasCapability(name string) (has bool) {
name = strings.ToLower(name)
for atomic.CompareAndSwapUint32(&c.atom, stateUnlocked, stateLocked) {
randSleep()
}
defer atomic.StoreUint32(&c.atom, stateUnlocked)
for key := range c.state.enabledCap {
for _, key := range c.state.enabledCap.Keys() {
key = strings.ToLower(key)
if key == name {
has = true

View File

@ -5,24 +5,12 @@
package girc
import (
"os"
"strings"
"testing"
"time"
)
type debugWriter struct {
t *testing.T
}
func newDebugWriter(t *testing.T) debugWriter {
return debugWriter{t: t}
}
func (d debugWriter) Write(p []byte) (n int, err error) {
go d.t.Logf("%v", string(p))
return len(p), nil
}
func TestDisableTracking(t *testing.T) {
client := New(Config{
Server: "dummy.int",
@ -30,7 +18,7 @@ func TestDisableTracking(t *testing.T) {
Nick: "test",
User: "test",
Name: "Testing123",
Debug: newDebugWriter(t),
Debug: os.Stdout,
})
if client.Handlers.internal.len() < 1 {
@ -96,7 +84,7 @@ func TestClientLifetime(t *testing.T) {
Nick: "test",
User: "test",
Name: "Testing123",
Debug: newDebugWriter(t),
Debug: os.Stdout,
})
tm := client.Lifetime()
@ -107,7 +95,7 @@ func TestClientLifetime(t *testing.T) {
}
func TestClientUptime(t *testing.T) {
c, conn, server := genMockConn(t)
c, conn, server := genMockConn()
defer conn.Close()
defer server.Close()
go mockReadBuffer(conn)
@ -152,7 +140,7 @@ func TestClientUptime(t *testing.T) {
}
func TestClientGet(t *testing.T) {
c, conn, server := genMockConn(t)
c, conn, server := genMockConn()
defer conn.Close()
defer server.Close()
go mockReadBuffer(conn)
@ -183,7 +171,7 @@ func TestClientGet(t *testing.T) {
}
func TestClientClose(t *testing.T) {
c, conn, server := genMockConn(t)
c, conn, server := genMockConn()
defer server.Close()
defer conn.Close()
go mockReadBuffer(conn)

View File

@ -1,191 +0,0 @@
package cmdhandler
import (
"errors"
"fmt"
"regexp"
"strings"
"github.com/yunginnanet/girc-atomic"
)
// Input is a wrapper for events, based around private messages.
type Input struct {
Origin *girc.Event
Args []string
RawArgs string
}
// Command is an IRC command, supporting aliases, help documentation and easy
// wrapping for message inputs.
type Command struct {
// Name of command, e.g. "search" or "ping".
Name string
// Aliases for the above command, e.g. "s" for search, or "p" for "ping".
Aliases []string
// Help documentation. Should be in the format "<arg> <arg> [arg] --
// something useful here"
Help string
// MinArgs is the minimum required arguments for the command. Defaults to
// 0, which means multiple, or no arguments can be supplied. If set
// above 0, this means that the command handler will throw an error asking
// the person to check "<prefix>help <command>" for more info.
MinArgs int
// Fn is the function which is executed when the command is ran from a
// private message, or channel.
Fn func(*girc.Client, *Input)
}
func (c *Command) genHelp(prefix string) string {
out := "{b}" + prefix + c.Name + "{b}"
if c.Aliases != nil && len(c.Aliases) > 0 {
out += " ({b}" + prefix + strings.Join(c.Aliases, "{b}, {b}"+prefix) + "{b})"
}
out += " :: " + c.Help
return out
}
// CmdHandler is an irc command parser and execution format which you could
// use as an example for building your own version/bot.
//
// An example of how you would register this with girc:
//
// ch, err := cmdhandler.New("!")
// if err != nil {
// panic(err)
// }
//
// ch.Add(&cmdhandler.Command{
// Name: "ping",
// Help: "Sends a pong reply back to the original user.",
// Fn: func(c *girc.Client, input *cmdhandler.Input) {
// c.Commands.ReplyTo(*input.Origin, "pong!")
// },
// })
//
// client.Handlers.AddHandler(girc.PRIVMSG, ch)
type CmdHandler struct {
prefix string
re *regexp.Regexp
cmds map[string]*Command
}
var cmdMatch = `^%s([a-z0-9-_]{1,20})(?: (.*))?$`
// New returns a new CmdHandler based on the specified command prefix. A good
// prefix is a single character, and easy to remember/use. E.g. "!", or ".".
func New(prefix string) (*CmdHandler, error) {
re, err := regexp.Compile(fmt.Sprintf(cmdMatch, regexp.QuoteMeta(prefix)))
if err != nil {
return nil, err
}
return &CmdHandler{prefix: prefix, re: re, cmds: make(map[string]*Command)}, nil
}
var validName = regexp.MustCompile(`^[a-z0-9-_]{1,20}$`)
// Add registers a new command to the handler. Note that you cannot remove
// commands once added, unless you add another CmdHandler to the client.
func (ch *CmdHandler) Add(cmd *Command) error {
if cmd == nil {
return errors.New("nil command provided to CmdHandler")
}
cmd.Name = strings.ToLower(cmd.Name)
if !validName.MatchString(cmd.Name) {
return fmt.Errorf("invalid command name: %q (req: %q)", cmd.Name, validName.String())
}
if cmd.Aliases != nil {
for i := 0; i < len(cmd.Aliases); i++ {
cmd.Aliases[i] = strings.ToLower(cmd.Aliases[i])
if !validName.MatchString(cmd.Aliases[i]) {
return fmt.Errorf("invalid command name: %q (req: %q)", cmd.Aliases[i], validName.String())
}
}
}
if cmd.MinArgs < 0 {
cmd.MinArgs = 0
}
if _, ok := ch.cmds[cmd.Name]; ok {
return fmt.Errorf("command already registered: %s", cmd.Name)
}
ch.cmds[cmd.Name] = cmd
// Since we'd be storing pointers, duplicates do not matter.
for i := 0; i < len(cmd.Aliases); i++ {
if _, ok := ch.cmds[cmd.Aliases[i]]; ok {
return fmt.Errorf("alias already registered: %s", cmd.Aliases[i])
}
ch.cmds[cmd.Aliases[i]] = cmd
}
return nil
}
// Execute satisfies the girc.Handler interface.
func (ch *CmdHandler) Execute(client *girc.Client, event girc.Event) {
if event.Source == nil || event.Command != girc.PRIVMSG {
return
}
parsed := ch.re.FindStringSubmatch(event.Last())
if len(parsed) != 3 {
return
}
invCmd := strings.ToLower(parsed[1])
args := strings.Split(parsed[2], " ")
if len(args) == 1 && args[0] == "" {
args = []string{}
}
if invCmd == "help" {
if len(args) == 0 {
client.Cmd.ReplyTo(event, girc.Fmt("type '{b}!help {blue}<command>{c}{b}' to optionally get more info about a specific command."))
return
}
args[0] = strings.ToLower(args[0])
if _, ok := ch.cmds[args[0]]; !ok {
client.Cmd.ReplyTof(event, girc.Fmt("unknown command {b}%q{b}."), args[0])
return
}
if ch.cmds[args[0]].Help == "" {
client.Cmd.ReplyTof(event, girc.Fmt("there is no help documentation for {b}%q{b}"), args[0])
return
}
client.Cmd.ReplyTo(event, girc.Fmt(ch.cmds[args[0]].genHelp(ch.prefix)))
return
}
cmd, ok := ch.cmds[invCmd]
if !ok {
return
}
if len(args) < cmd.MinArgs {
client.Cmd.ReplyTof(event, girc.Fmt("not enough arguments supplied for {b}%q{b}. try '{b}%shelp %s{b}'?"), invCmd, ch.prefix, invCmd)
return
}
in := &Input{
Origin: &event,
Args: args,
RawArgs: parsed[2],
}
go cmd.Fn(client, in)
}

11
conn.go
View File

@ -441,11 +441,6 @@ func (c *Client) Send(event *Event) {
event.Network = c.NetworkName()
for atomic.CompareAndSwapUint32(&c.atom, stateUnlocked, stateLocked) {
randSleep()
}
defer atomic.StoreUint32(&c.atom, stateUnlocked)
if !c.Config.AllowFlood {
// Drop the event early as we're disconnected, this way we don't have to wait
// the (potentially long) rate limit delay before dropping.
@ -515,7 +510,7 @@ func (c *Client) sendLoop(ctx context.Context, errs chan error, wg *sync.WaitGro
//
var in bool
for i := 0; i < len(c.state.enabledCap); i++ {
if _, ok := c.state.enabledCap["message-tags"]; ok {
if _, ok := c.state.enabledCap.Get("message-tags"); ok {
in = true
break
}
@ -580,9 +575,9 @@ type ErrTimedOut struct {
func (ErrTimedOut) Error() string { return "timed out waiting for a requested PING response" }
func (c *Client) pingLoop(ctx context.Context, errs chan error, wg *sync.WaitGroup) {
defer wg.Done()
// Don't run the pingLoop if they want to disable it.
if c.Config.PingDelay <= 0 {
wg.Done()
return
}
@ -621,6 +616,7 @@ func (c *Client) pingLoop(ctx context.Context, errs chan error, wg *sync.WaitGro
Delay: c.Config.PingDelay,
}
wg.Done()
return
}
@ -628,6 +624,7 @@ func (c *Client) pingLoop(ctx context.Context, errs chan error, wg *sync.WaitGro
c.Cmd.Ping(fmt.Sprintf("%d", time.Now().UnixNano()))
case <-ctx.Done():
wg.Done()
return
}
}

View File

@ -8,6 +8,7 @@ import (
"bufio"
"bytes"
"net"
"os"
"sync/atomic"
"testing"
"time"
@ -93,14 +94,14 @@ func TestRate(t *testing.T) {
return
}
func genMockConn(t *testing.T) (client *Client, clientConn net.Conn, serverConn net.Conn) {
func genMockConn() (client *Client, clientConn net.Conn, serverConn net.Conn) {
client = New(Config{
Server: "dummy.int",
Port: 6667,
Nick: "test",
User: "test",
Name: "Testing123",
Debug: newDebugWriter(t),
Debug: os.Stdout,
})
conn1, conn2 := net.Pipe()
@ -108,19 +109,14 @@ func genMockConn(t *testing.T) (client *Client, clientConn net.Conn, serverConn
return client, conn1, conn2
}
func mockReadBuffer(conn net.Conn) error {
func mockReadBuffer(conn net.Conn) {
// Accept all outgoing writes from the client.
b := bufio.NewReader(conn)
for {
err := conn.SetReadDeadline(time.Now().Add(10 * time.Second))
conn.SetReadDeadline(time.Now().Add(10 * time.Second))
_, err := b.ReadString(byte('\n'))
if err != nil {
return err
}
var str string
str, err = b.ReadString(byte('\n'))
println(str)
if err != nil {
return err
return
}
}
}

View File

@ -167,6 +167,7 @@ func (c *Caller) Len() int {
// Count is much like Caller.Len(), however it counts the number of
// registered handlers for a given command.
func (c *Caller) Count(cmd string) int {
cmd = strings.ToUpper(cmd)
return c.external.lenFor(cmd)
}
@ -198,63 +199,91 @@ func (c *Caller) cuidToID(input string) (cmd, uid string) {
return input[:i], input[i+1:]
}
type execStack struct {
Handler
cuid string
}
// exec executes all handlers pertaining to specified event. Internal first,
// then external.
//
// Please note that there is no specific order/priority for which the handlers
// are executed.
func (c *Caller) exec(command string, bg bool, client *Client, event *Event) {
handle := func(wgr *sync.WaitGroup, h handlerTuple) {
c.debug.Printf("(%s) exec %s => %s", c.parent.Config.Nick, command, h.cuid)
start := time.Now()
// Build a stack of handlers which can be executed concurrently.
var stack []execStack
if bg {
go func() {
defer wgr.Done()
if client.Config.RecoverFunc != nil {
defer recoverHandlerPanic(client, event, h.cuid, 3)
}
h.handler.Execute(client, *event)
c.debug.Printf("(%s) done %s == %s", c.parent.Config.Nick,
h.cuid, time.Since(start))
}()
return
// Get internal handlers first.
ihm, iok := c.internal.cm.Get(command)
if iok {
hmap := ihm.(cmap.ConcurrentMap)
for _, cuid := range hmap.Keys() {
if (strings.HasSuffix(cuid, ":bg") && !bg) || (!strings.HasSuffix(cuid, ":bg") && bg) {
continue
}
hi, _ := hmap.Get(cuid)
hndlr, ok := hi.(Handler)
if !ok {
continue
}
stack = append(stack, execStack{hndlr, cuid})
}
if client.Config.RecoverFunc != nil {
defer recoverHandlerPanic(client, event, h.cuid, 3)
}
// Then external handlers.
ehm, eok := c.external.cm.Get(command)
if eok {
hmap := ehm.(cmap.ConcurrentMap)
for _, cuid := range hmap.Keys() {
if (strings.HasSuffix(cuid, ":bg") && !bg) || (!strings.HasSuffix(cuid, ":bg") && bg) {
continue
}
hi, _ := hmap.Get(cuid)
hndlr, ok := hi.(Handler)
if !ok {
continue
}
stack = append(stack, execStack{hndlr, cuid})
}
h.handler.Execute(client, *event)
c.debug.Printf("(%s) done %s == %s", c.parent.Config.Nick, h.cuid, time.Since(start))
wgr.Done()
}
// Run all handlers concurrently across the same event. This should
// still help prevent mis-ordered events, while speeding up the
// execution speed.
var wg sync.WaitGroup
wg.Add(len(stack))
for i := 0; i < len(stack); i++ {
go func(index int) {
c.debug.Printf("(%s) [%d/%d] exec %s => %s", c.parent.Config.Nick,
index+1, len(stack), stack[index].cuid, command)
start := time.Now()
internals, iok := c.internal.getAllHandlersFor(command)
if iok {
for h := range internals {
wg.Add(1)
go handle(&wg, h)
}
}
externals, eok := c.external.getAllHandlersFor(command)
if eok {
for h := range externals {
wg.Add(1)
go handle(&wg, h)
}
}
if bg {
go func() {
defer wg.Done()
if client.Config.RecoverFunc != nil {
defer recoverHandlerPanic(client, event, stack[index].cuid, 3)
}
stack[index].Handler.Execute(client, *event)
c.debug.Printf("(%s) done %s == %s", c.parent.Config.Nick,
stack[index].cuid, time.Since(start))
}()
return
}
defer wg.Done()
// Wait for all of the handlers to complete. Not doing this may cause
// new events from becoming ahead of older handlers.
c.debug.Printf("(%s) wg.Wait()", c.parent.Config.Nick)
wg.Wait()
if client.Config.RecoverFunc != nil {
defer recoverHandlerPanic(client, event, stack[index].cuid, 3)
}
stack[index].Handler.Execute(client, *event)
c.debug.Printf("(%s) done %s == %s", c.parent.Config.Nick, stack[index].cuid, time.Since(start))
}(i)
// new events from becoming ahead of ol1 handlers.
c.debug.Printf("(%s) wg.Wait()", c.parent.Config.Nick)
wg.Wait()
}
}
// ClearAll clears all external handlers currently setup within the client.
@ -283,11 +312,8 @@ func (c *Caller) Clear(cmd string) {
// indicates that it existed, and has been removed. If not success, it
// wasn't a registered handler.
func (c *Caller) Remove(cuid string) (success bool) {
c.mu.Lock()
success = c.remove(cuid)
c.mu.Unlock()
return success
c.remove(cuid)
return true
}
// remove is much like Remove, however is NOT concurrency safe. Lock Caller.mu
@ -359,12 +385,12 @@ func (c *Caller) register(internal, bg bool, cmd string, handler Handler) (cuid
} else {
chandlers = cmap.New()
}
parent.cm.SetIfAbsent(cmd, chandlers)
chandlers.Set(uid, handler)
_, file, line, _ := runtime.Caller(2)
parent.cm.Set(cmd, chandlers)
_, file, line, _ := runtime.Caller(2)
c.debug.Printf("reg %q => %s [int:%t bg:%t] %s:%d", uid, cmd, internal, bg, file, line)
return cuid

View File

@ -404,13 +404,9 @@ func (p *UserPerms) Copy() (perms *UserPerms) {
np := &UserPerms{
channels: make(map[string]Perms),
}
p.mu.RLock()
for key := range p.channels {
np.channels[key] = p.channels[key]
}
p.mu.RUnlock()
return np
}
@ -426,9 +422,7 @@ func (p *UserPerms) MarshalJSON() ([]byte, error) {
// Lookup looks up the users permissions for a given channel. ok is false
// if the user is not in the given channel.
func (p *UserPerms) Lookup(channel string) (perms Perms, ok bool) {
p.mu.RLock()
perms, ok = p.channels[ToRFC1459(channel)]
p.mu.RUnlock()
return perms, ok
}

View File

@ -28,11 +28,11 @@ type state struct {
// users map[string]*User
users cmap.ConcurrentMap
// enabledCap are the capabilities which are enabled for this connection.
enabledCap map[string]map[string]string
enabledCap cmap.ConcurrentMap
// tmpCap are the capabilties which we share with the server during the
// last capability check. These will get sent once we have received the
// last capability list command from the server.
tmpCap map[string]map[string]string
tmpCap cmap.ConcurrentMap
// serverOptions are the standard capabilities and configurations
// supported by the server at connection time. This also includes
// RPL_ISUPPORT entries.
@ -69,8 +69,8 @@ func (s *state) reset(initial bool) {
}
}
s.enabledCap = make(map[string]map[string]string)
s.tmpCap = make(map[string]map[string]string)
s.enabledCap = cmap.New()
s.tmpCap = cmap.New()
s.motd = ""
if initial {
@ -482,6 +482,7 @@ func (s *state) createUser(src *Source) (u *User, ok bool) {
func (s *state) deleteUser(channelName, nick string) {
user := s.lookupUser(nick)
if user == nil {
s.client.debug.Printf(nick + ": was not found when trying to deleteUser from " + channelName)
return
}

View File

@ -5,7 +5,6 @@
package girc
import (
"log"
"reflect"
"testing"
"time"
@ -22,6 +21,7 @@ func debounce(delay time.Duration, done chan bool, f func()) {
f()
return
}
default:
}
}
}
@ -57,8 +57,7 @@ const mockConnEndState = `:nick2!nick2@other.int QUIT :example reason
`
func TestState(t *testing.T) {
c, conn, server := genMockConn(t)
c, conn, server := genMockConn()
defer c.Close()
go mockReadBuffer(conn)
@ -73,57 +72,50 @@ func TestState(t *testing.T) {
finishStart := make(chan bool, 1)
go debounce(250*time.Millisecond, bounceStart, func() {
if motd := c.ServerMOTD(); motd != "example motd" {
t.Errorf("Client.ServerMOTD() returned invalid MOTD: %q", motd)
t.Fatalf("Client.ServerMOTD() returned invalid MOTD: %q", motd)
}
if network := c.NetworkName(); network != "DummyIRC" && network != "DUMMY" {
t.Errorf("User.Network == %q, want \"DummyIRC\" or \"DUMMY\"", network)
t.Fatalf("User.Network == %q, want \"DummyIRC\" or \"DUMMY\"", network)
}
if caseExample, ok := c.GetServerOption("NICKLEN"); !ok || caseExample != "20" {
t.Errorf("Client.GetServerOptions returned invalid ISUPPORT variable: %q", caseExample)
t.Fatalf("Client.GetServerOptions returned invalid ISUPPORT variable: %q", caseExample)
}
t.Logf("getting user list")
users := c.UserList()
t.Logf("getting channel list")
channels := c.ChannelList()
if !reflect.DeepEqual(users, []string{"fhjones", "nick2"}) {
// This could fail too, if sorting isn't occurring.
t.Errorf("got state users %#v, wanted: %#v", users, []string{"fhjones", "nick2"})
t.Fatalf("got state users %#v, wanted: %#v", users, []string{"fhjones", "nick2"})
}
if !reflect.DeepEqual(channels, []string{"#channel", "#channel2"}) {
// This could fail too, if sorting isn't occurring.
t.Errorf("got state channels %#v, wanted: %#v", channels, []string{"#channel", "#channel2"})
t.Fatalf("got state channels %#v, wanted: %#v", channels, []string{"#channel", "#channel2"})
}
fullChannels := c.Channels()
for i := 0; i < len(fullChannels); i++ {
if fullChannels[i].Name != channels[i] {
t.Errorf("fullChannels name doesn't map to same name in ChannelsList: %q :: %#v", fullChannels[i].Name, channels)
t.Fatalf("fullChannels name doesn't map to same name in ChannelsList: %q :: %#v", fullChannels[i].Name, channels)
}
}
fullUsers := c.Users()
for i := 0; i < len(fullUsers); i++ {
if fullUsers[i].Nick != users[i] {
t.Errorf("fullUsers nick doesn't map to same nick in UsersList: %q :: %#v", fullUsers[i].Nick, users)
t.Fatalf("fullUsers nick doesn't map to same nick in UsersList: %q :: %#v", fullUsers[i].Nick, users)
}
}
ch := c.LookupChannel("#channel")
if ch == nil {
t.Error("Client.LookupChannel returned nil on existing channel")
return
t.Fatal("Client.LookupChannel returned nil on existing channel")
}
adm := ch.Admins(c)
if adm == nil {
t.Errorf("admin list is nil")
t.Fail()
}
admList := []string{}
for i := 0; i < len(adm); i++ {
admList = append(admList, adm[i].Nick)
@ -135,78 +127,76 @@ func TestState(t *testing.T) {
}
if !reflect.DeepEqual(admList, []string{"nick2"}) {
t.Errorf("got Channel.Admins() == %#v, wanted %#v", admList, []string{"nick2"})
t.Fatalf("got Channel.Admins() == %#v, wanted %#v", admList, []string{"nick2"})
}
if !reflect.DeepEqual(trustedList, []string{"nick2"}) {
t.Errorf("got Channel.Trusted() == %#v, wanted %#v", trustedList, []string{"nick2"})
t.Fatalf("got Channel.Trusted() == %#v, wanted %#v", trustedList, []string{"nick2"})
}
if topic := ch.Topic; topic != "example topic" {
t.Errorf("Channel.Topic == %q, want \"example topic\"", topic)
t.Fatalf("Channel.Topic == %q, want \"example topic\"", topic)
}
if ch.Network != "DummyIRC" && ch.Network != "DUMMY" {
t.Errorf("Channel.Network == %q, want \"DummyIRC\" or \"DUMMY\"", ch.Network)
t.Fatalf("Channel.Network == %q, want \"DummyIRC\" or \"DUMMY\"", ch.Network)
}
if in := ch.UserIn("fhjones"); !in {
t.Errorf("Channel.UserIn == %t, want %t", in, true)
t.Fatalf("Channel.UserIn == %t, want %t", in, true)
}
if users := ch.Users(c); len(users) != 2 {
t.Errorf("Channel.Users == %#v, wanted length of 2", users)
t.Fatalf("Channel.Users == %#v, wanted length of 2", users)
}
if h := c.GetHost(); h != "local.int" {
t.Errorf("Client.GetHost() == %q, want local.int", h)
t.Fatalf("Client.GetHost() == %q, want local.int", h)
}
if nick := c.GetNick(); nick != "fhjones" {
t.Errorf("Client.GetNick() == %q, want nick", nick)
t.Fatalf("Client.GetNick() == %q, want nick", nick)
}
if ident := c.GetIdent(); ident != "~user" {
t.Errorf("Client.GetIdent() == %q, want ~user", ident)
t.Fatalf("Client.GetIdent() == %q, want ~user", ident)
}
user := c.LookupUser("fhjones")
if user == nil {
t.Errorf("Client.LookupUser() returned nil on existing user")
return
t.Fatal("Client.LookupUser() returned nil on existing user")
}
if !reflect.DeepEqual(user.ChannelList, []string{"#channel", "#channel2"}) {
t.Errorf("User.ChannelList == %#v, wanted %#v", user.ChannelList, []string{"#channel", "#channel2"})
t.Fatalf("User.ChannelList == %#v, wanted %#v", user.ChannelList, []string{"#channel", "#channel2"})
}
if count := len(user.Channels(c)); count != 2 {
t.Errorf("len(User.Channels) == %d, want 2", count)
t.Fatalf("len(User.Channels) == %d, want 2", count)
}
if user.Nick != "fhjones" {
t.Errorf("User.Nick == %q, wanted \"nick\"", user.Nick)
t.Fatalf("User.Nick == %q, wanted \"nick\"", user.Nick)
}
if user.Extras.Name != "realname" {
t.Errorf("User.Extras.Name == %q, wanted \"realname\"", user.Extras.Name)
t.Fatalf("User.Extras.Name == %q, wanted \"realname\"", user.Extras.Name)
}
if user.Host != "local.int" {
t.Errorf("User.Host == %q, wanted \"local.int\"", user.Host)
t.Fatalf("User.Host == %q, wanted \"local.int\"", user.Host)
}
if user.Ident != "~user" {
t.Errorf("User.Ident == %q, wanted \"~user\"", user.Ident)
t.Fatalf("User.Ident == %q, wanted \"~user\"", user.Ident)
}
if user.Network != "DummyIRC" && user.Network != "DUMMY" {
t.Errorf("User.Network == %q, want \"DummyIRC\" or \"DUMMY\"", user.Network)
t.Fatalf("User.Network == %q, want \"DummyIRC\" or \"DUMMY\"", user.Network)
}
if !user.InChannel("#channel2") {
t.Error("User.InChannel() returned false for existing channel")
return
t.Fatal("User.InChannel() returned false for existing channel")
}
finishStart <- true
@ -217,11 +207,8 @@ func TestState(t *testing.T) {
bounceStart <- true
})
err := conn.SetDeadline(time.Now().Add(5 * time.Second))
if err != nil {
log.Fatalf(err.Error())
}
_, err = conn.Write([]byte(mockConnStartState))
conn.SetDeadline(time.Now().Add(5 * time.Second))
_, err := conn.Write([]byte(mockConnStartState))
if err != nil {
panic(err)
}
@ -237,11 +224,11 @@ func TestState(t *testing.T) {
finishEnd := make(chan bool, 1)
go debounce(250*time.Millisecond, bounceEnd, func() {
if !reflect.DeepEqual(c.ChannelList(), []string{"#channel"}) {
t.Errorf("Client.ChannelList() == %#v, wanted %#v", c.ChannelList(), []string{"#channel"})
t.Fatalf("Client.ChannelList() == %#v, wanted %#v", c.ChannelList(), []string{"#channel"})
}
if !reflect.DeepEqual(c.UserList(), []string{"notjones"}) {
t.Errorf("Client.UserList() == %#v, wanted %#v", c.UserList(), []string{"notjones"})
t.Fatalf("Client.UserList() == %#v, wanted %#v", c.UserList(), []string{"notjones"})
}
user := c.LookupUser("notjones")
@ -250,19 +237,18 @@ func TestState(t *testing.T) {
}
if !reflect.DeepEqual(user.ChannelList, []string{"#channel"}) {
t.Errorf("user.ChannelList == %q, wanted %q", user.ChannelList, []string{"#channel"})
t.Fatalf("user.ChannelList == %q, wanted %q", user.ChannelList, []string{"#channel"})
}
channel := c.LookupChannel("#channel")
if channel == nil {
t.Error("Client.LookupChannel() returned nil for existing channel")
t.Fatal("Client.LookupChannel() returned nil for existing channel")
}
if !reflect.DeepEqual(channel.UserList, []string{"notjones"}) {
t.Errorf("channel.UserList == %q, wanted %q", channel.UserList, []string{"notjones"})
t.Fatalf("channel.UserList == %q, wanted %q", channel.UserList, []string{"notjones"})
}
t.Logf(c.String())
finishEnd <- true
})