refactor: multi-module workspace + dataset owns Syncer interface

- Each package gets its own go.mod: net/{dataset,httpcache,gitshallow,ipcohort,geoip,formmailer}
- go.work with replace directives for cross-module workspace resolution
- dataset.Syncer/NopSyncer moved here from httpcache; callers duck-type it
- dataset.View[T] returned by Add to prevent Init/Sync/Run misuse on group members
- cmd/check-ip moved from net/ipcohort/cmd/check-ip to top-level cmd/check-ip
- Add net/ipcohort/cmd/ipcohort-contains for standalone cohort membership testing
This commit is contained in:
AJ ONeal 2026-04-20 11:22:01 -06:00
parent 225faec549
commit 34a54c2d66
No known key found for this signature in database
15 changed files with 200 additions and 27 deletions

3
.gitignore vendored
View File

@ -1,6 +1,7 @@
credentials.tsv credentials.tsv
GeoIP.conf GeoIP.conf
*.mmdb *.mmdb
testdata
.env .env
*.env *.env
@ -9,6 +10,8 @@ env.*
# Project binaries # Project binaries
dist/ dist/
check-ip
cmd/check-ip/check-ip
auth/csvauth/cmd/csvauth/csvauth auth/csvauth/cmd/csvauth/csvauth
cmd/auth-proxy/auth-proxy cmd/auth-proxy/auth-proxy
cmd/httplog/httplog cmd/httplog/httplog

View File

@ -22,7 +22,7 @@ type Sources struct {
inboundPaths []string inboundPaths []string
outboundPaths []string outboundPaths []string
syncs []httpcache.Syncer // all syncable sources syncs []dataset.Syncer // all syncable sources
} }
func newFileSources(whitelist, inbound, outbound []string) *Sources { func newFileSources(whitelist, inbound, outbound []string) *Sources {
@ -46,7 +46,7 @@ func newGitSources(gitURL, repoDir string, whitelist, inboundRel, outboundRel []
whitelistPaths: whitelist, whitelistPaths: whitelist,
inboundPaths: abs(inboundRel), inboundPaths: abs(inboundRel),
outboundPaths: abs(outboundRel), outboundPaths: abs(outboundRel),
syncs: []httpcache.Syncer{repo}, syncs: []dataset.Syncer{repo},
} }
} }
@ -64,7 +64,7 @@ func newHTTPSources(whitelist []string, inbound, outbound []HTTPSource) *Sources
} }
// Fetch pulls updates from all sources. Returns whether any new data arrived. // Fetch pulls updates from all sources. Returns whether any new data arrived.
// Satisfies httpcache.Syncer. // Satisfies dataset.Syncer.
func (s *Sources) Fetch() (bool, error) { func (s *Sources) Fetch() (bool, error) {
var anyUpdated bool var anyUpdated bool
for _, syn := range s.syncs { for _, syn := range s.syncs {

12
cmd/check-ip/go.mod Normal file
View File

@ -0,0 +1,12 @@
module github.com/therootcompany/golib/cmd/check-ip
go 1.26.0
require (
github.com/oschwald/geoip2-golang v1.13.0
github.com/therootcompany/golib/net/dataset v0.0.0
github.com/therootcompany/golib/net/geoip v0.0.0
github.com/therootcompany/golib/net/gitshallow v0.0.0
github.com/therootcompany/golib/net/httpcache v0.0.0
github.com/therootcompany/golib/net/ipcohort v0.0.0
)

View File

@ -13,7 +13,6 @@ import (
"github.com/oschwald/geoip2-golang" "github.com/oschwald/geoip2-golang"
"github.com/therootcompany/golib/net/dataset" "github.com/therootcompany/golib/net/dataset"
"github.com/therootcompany/golib/net/geoip" "github.com/therootcompany/golib/net/geoip"
"github.com/therootcompany/golib/net/httpcache"
"github.com/therootcompany/golib/net/ipcohort" "github.com/therootcompany/golib/net/ipcohort"
) )
@ -194,11 +193,11 @@ func main() {
// newGeoIPDataset creates a Dataset[geoip2.Reader]. If d is nil, only // newGeoIPDataset creates a Dataset[geoip2.Reader]. If d is nil, only
// opens the existing file (no download). Close is wired to Reader.Close. // opens the existing file (no download). Close is wired to Reader.Close.
func newGeoIPDataset(d *geoip.Downloader, edition, path string) *dataset.Dataset[geoip2.Reader] { func newGeoIPDataset(d *geoip.Downloader, edition, path string) *dataset.Dataset[geoip2.Reader] {
var syncer httpcache.Syncer var syncer dataset.Syncer
if d != nil { if d != nil {
syncer = d.NewCacher(edition, path) syncer = d.NewCacher(edition, path)
} else { } else {
syncer = httpcache.NopSyncer{} syncer = dataset.NopSyncer{}
} }
ds := dataset.New(syncer, func() (*geoip2.Reader, error) { ds := dataset.New(syncer, func() (*geoip2.Reader, error) {
return geoip2.Open(path) return geoip2.Open(path)

21
go.work Normal file
View File

@ -0,0 +1,21 @@
go 1.26.1
use (
.
./cmd/check-ip
./net/dataset
./net/formmailer
./net/geoip
./net/gitshallow
./net/httpcache
./net/ipcohort
)
replace (
github.com/therootcompany/golib/net/dataset v0.0.0 => ./net/dataset
github.com/therootcompany/golib/net/formmailer v0.0.0 => ./net/formmailer
github.com/therootcompany/golib/net/geoip v0.0.0 => ./net/geoip
github.com/therootcompany/golib/net/gitshallow v0.0.0 => ./net/gitshallow
github.com/therootcompany/golib/net/httpcache v0.0.0 => ./net/httpcache
github.com/therootcompany/golib/net/ipcohort v0.0.0 => ./net/ipcohort
)

View File

@ -26,10 +26,20 @@ import (
"os" "os"
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/therootcompany/golib/net/httpcache"
) )
// Syncer is implemented by any value that can fetch a remote resource and
// report whether it changed.
type Syncer interface {
Fetch() (updated bool, err error)
}
// NopSyncer is a Syncer that always reports no update and no error.
// Use for datasets backed by local files with no remote source.
type NopSyncer struct{}
func (NopSyncer) Fetch() (bool, error) { return false, nil }
// Dataset couples a Syncer, a load function, and an atomic.Pointer[T]. // Dataset couples a Syncer, a load function, and an atomic.Pointer[T].
// Load is safe for concurrent use without locks. // Load is safe for concurrent use without locks.
type Dataset[T any] struct { type Dataset[T any] struct {
@ -39,14 +49,14 @@ type Dataset[T any] struct {
// Use this for values that hold resources, e.g. func(r *geoip2.Reader) { r.Close() }. // Use this for values that hold resources, e.g. func(r *geoip2.Reader) { r.Close() }.
Close func(*T) Close func(*T)
syncer httpcache.Syncer syncer Syncer
load func() (*T, error) load func() (*T, error)
ptr atomic.Pointer[T] ptr atomic.Pointer[T]
} }
// New creates a Dataset. The syncer fetches updates; load produces the value. // New creates a Dataset. The syncer fetches updates; load produces the value.
// load is a closure — it captures whatever paths or config it needs. // load is a closure — it captures whatever paths or config it needs.
func New[T any](syncer httpcache.Syncer, load func() (*T, error)) *Dataset[T] { func New[T any](syncer Syncer, load func() (*T, error)) *Dataset[T] {
return &Dataset[T]{syncer: syncer, load: load} return &Dataset[T]{syncer: syncer, load: load}
} }
@ -115,12 +125,12 @@ type member interface {
// Group ties one Syncer to multiple datasets so a single Fetch drives all // Group ties one Syncer to multiple datasets so a single Fetch drives all
// reloads — no redundant network calls when datasets share a source. // reloads — no redundant network calls when datasets share a source.
type Group struct { type Group struct {
syncer httpcache.Syncer syncer Syncer
members []member members []member
} }
// NewGroup creates a Group backed by syncer. // NewGroup creates a Group backed by syncer.
func NewGroup(syncer httpcache.Syncer) *Group { func NewGroup(syncer Syncer) *Group {
return &Group{syncer: syncer} return &Group{syncer: syncer}
} }

View File

@ -8,7 +8,6 @@ import (
"time" "time"
"github.com/therootcompany/golib/net/dataset" "github.com/therootcompany/golib/net/dataset"
"github.com/therootcompany/golib/net/httpcache"
) )
// countSyncer counts Fetch calls and optionally reports updated. // countSyncer counts Fetch calls and optionally reports updated.
@ -47,7 +46,7 @@ func TestDataset_Init(t *testing.T) {
} }
func TestDataset_LoadBeforeInit(t *testing.T) { func TestDataset_LoadBeforeInit(t *testing.T) {
syn := httpcache.NopSyncer{} syn := dataset.NopSyncer{}
ds := dataset.New(syn, func() (*string, error) { ds := dataset.New(syn, func() (*string, error) {
v := "x" v := "x"
return &v, nil return &v, nil
@ -120,7 +119,7 @@ func TestDataset_InitError(t *testing.T) {
} }
func TestDataset_LoaderError(t *testing.T) { func TestDataset_LoaderError(t *testing.T) {
syn := httpcache.NopSyncer{} syn := dataset.NopSyncer{}
ds := dataset.New(syn, func() (*string, error) { ds := dataset.New(syn, func() (*string, error) {
return nil, errors.New("load failed") return nil, errors.New("load failed")
}) })
@ -273,7 +272,7 @@ func TestGroup_FetchError(t *testing.T) {
} }
func TestGroup_LoaderError(t *testing.T) { func TestGroup_LoaderError(t *testing.T) {
syn := httpcache.NopSyncer{} syn := dataset.NopSyncer{}
g := dataset.NewGroup(syn) g := dataset.NewGroup(syn)
dataset.Add(g, func() (*string, error) { dataset.Add(g, func() (*string, error) {
return nil, errors.New("parse error") return nil, errors.New("parse error")

3
net/dataset/go.mod Normal file
View File

@ -0,0 +1,3 @@
module github.com/therootcompany/golib/net/dataset
go 1.26.0

10
net/formmailer/go.mod Normal file
View File

@ -0,0 +1,10 @@
module github.com/therootcompany/golib/net/formmailer
go 1.26.0
require (
github.com/phuslu/iploc v1.0.20260415
github.com/therootcompany/golib/net/dataset v0.0.0
github.com/therootcompany/golib/net/ipcohort v0.0.0
golang.org/x/time v0.15.0
)

8
net/geoip/go.mod Normal file
View File

@ -0,0 +1,8 @@
module github.com/therootcompany/golib/net/geoip
go 1.26.0
require (
github.com/oschwald/geoip2-golang v1.13.0
github.com/therootcompany/golib/net/httpcache v0.0.0
)

3
net/gitshallow/go.mod Normal file
View File

@ -0,0 +1,3 @@
module github.com/therootcompany/golib/net/gitshallow
go 1.26.0

3
net/httpcache/go.mod Normal file
View File

@ -0,0 +1,3 @@
module github.com/therootcompany/golib/net/httpcache
go 1.26.0

View File

@ -16,17 +16,6 @@ const (
defaultTimeout = 5 * time.Minute // overall including body read defaultTimeout = 5 * time.Minute // overall including body read
) )
// Syncer is implemented by any value that can fetch a remote resource and
// report whether it changed. Both *Cacher and *gitshallow.Repo satisfy this.
type Syncer interface {
Fetch() (updated bool, err error)
}
// NopSyncer is a Syncer that always reports no update and no error.
// Use for datasets backed by local files managed externally (no download).
type NopSyncer struct{}
func (NopSyncer) Fetch() (bool, error) { return false, nil }
// Cacher fetches a URL to a local file, using ETag/Last-Modified to skip // Cacher fetches a URL to a local file, using ETag/Last-Modified to skip
// unchanged responses. // unchanged responses.

View File

@ -0,0 +1,110 @@
// ipcohort-contains checks whether one or more IP addresses appear in a set
// of cohort files (plain text, one IP/CIDR per line).
//
// Usage:
//
// ipcohort-contains [flags] <file>... -- <ip>...
// ipcohort-contains [flags] -ip <ip> <file>...
//
// Examples:
//
// ipcohort-contains networks.txt single_ips.txt -- 1.2.3.4 5.6.7.8
// ipcohort-contains -ip 1.2.3.4 single_ips.txt
// echo "1.2.3.4" | ipcohort-contains networks.txt
//
// Exit code: 0 if all queried IPs are found, 1 if any are not found, 2 on error.
package main
import (
"bufio"
"flag"
"fmt"
"os"
"strings"
"github.com/therootcompany/golib/net/ipcohort"
)
func main() {
ipFlag := flag.String("ip", "", "IP address to check (alternative to -- separator)")
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage: %s [flags] <file>... -- <ip>...\n", os.Args[0])
fmt.Fprintf(os.Stderr, " %s -ip <ip> <file>...\n", os.Args[0])
fmt.Fprintf(os.Stderr, " echo <ip> | %s <file>...\n", os.Args[0])
fmt.Fprintln(os.Stderr, "Flags:")
flag.PrintDefaults()
fmt.Fprintln(os.Stderr, "Exit: 0=all found, 1=not found, 2=error")
}
flag.Parse()
args := flag.Args()
var filePaths, ips []string
switch {
case *ipFlag != "":
filePaths = args
ips = []string{*ipFlag}
default:
// Split args at "--"
sep := -1
for i, a := range args {
if a == "--" {
sep = i
break
}
}
if sep >= 0 {
filePaths = args[:sep]
ips = args[sep+1:]
} else {
filePaths = args
}
}
if len(filePaths) == 0 {
fmt.Fprintln(os.Stderr, "error: at least one file path required")
flag.Usage()
os.Exit(2)
}
cohort, err := ipcohort.LoadFiles(filePaths...)
if err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(2)
}
// If no IPs from flags/args, read from stdin.
if len(ips) == 0 {
sc := bufio.NewScanner(os.Stdin)
for sc.Scan() {
if line := strings.TrimSpace(sc.Text()); line != "" && !strings.HasPrefix(line, "#") {
ips = append(ips, line)
}
}
if err := sc.Err(); err != nil {
fmt.Fprintf(os.Stderr, "error reading stdin: %v\n", err)
os.Exit(2)
}
}
if len(ips) == 0 {
fmt.Fprintln(os.Stderr, "error: no IP addresses to check")
flag.Usage()
os.Exit(2)
}
allFound := true
for _, ip := range ips {
found := cohort.Contains(ip)
if found {
fmt.Printf("%s\tFOUND\n", ip)
} else {
fmt.Printf("%s\tNOT FOUND\n", ip)
allFound = false
}
}
if !allFound {
os.Exit(1)
}
}

3
net/ipcohort/go.mod Normal file
View File

@ -0,0 +1,3 @@
module github.com/therootcompany/golib/net/ipcohort
go 1.26.0