refactor: dataset uses closure Loader + Close callback; check-ip uses Dataset/Group

dataset.Loader[T] is now func() (*T, error) — a closure capturing its own
paths/config, so multi-file cases (LoadFiles(paths...)) work naturally.

Dataset.Close func(*T) is called with the old value after each swap, enabling
resource cleanup (e.g. geoip2.Reader.Close).

Sources.Datasets() builds a dataset.Group + three typed *Dataset[ipcohort.Cohort].
main.go now uses blGroup.Run / cityDS.Run / asnDS.Run instead of hand-rolled
atomic.Pointer + polling loops. containsInbound/OutBound accept *Dataset[Cohort].
nopSyncer handles file-only GeoIP paths (no download, just open).
This commit is contained in:
AJ ONeal 2026-04-20 09:28:20 -06:00
parent 7c0cd26da1
commit 673d084bd2
No known key found for this signature in database
3 changed files with 192 additions and 203 deletions

View File

@ -2,22 +2,22 @@
// atomic.Pointer (hot-swap), providing a generic periodically-updated // atomic.Pointer (hot-swap), providing a generic periodically-updated
// in-memory dataset with lock-free reads. // in-memory dataset with lock-free reads.
// //
// Single dataset: // Standalone dataset (one syncer, one value):
// //
// ds := dataset.New(cacher, ipcohort.LoadFile, path) // ds := dataset.New(cacher, func() (*MyType, error) {
// return mytype.LoadFile(path)
// })
// if err := ds.Init(); err != nil { ... } // if err := ds.Init(); err != nil { ... }
// go ds.Run(ctx, 47*time.Minute) // go ds.Run(ctx, 47*time.Minute)
// cohort := ds.Load() // val := ds.Load() // *MyType, lock-free
// //
// Multiple datasets sharing one syncer (e.g. inbound + outbound from one git repo): // Group (one syncer, multiple values — e.g. inbound+outbound from one git repo):
// //
// g := dataset.NewGroup(repo) // g := dataset.NewGroup(repo)
// inbound := dataset.Add(g, ipcohort.LoadFile, inboundPath) // inbound := dataset.Add(g, func() (*ipcohort.Cohort, error) { return ipcohort.LoadFiles(inboundPaths...) })
// outbound := dataset.Add(g, ipcohort.LoadFile, outboundPath) // outbound := dataset.Add(g, func() (*ipcohort.Cohort, error) { return ipcohort.LoadFiles(outboundPaths...) })
// if err := g.Init(); err != nil { ... } // if err := g.Init(); err != nil { ... }
// go g.Run(ctx, 47*time.Minute) // go g.Run(ctx, 47*time.Minute)
// in := inbound.Load()
// out := outbound.Load()
package dataset package dataset
import ( import (
@ -30,21 +30,24 @@ import (
"github.com/therootcompany/golib/net/httpcache" "github.com/therootcompany/golib/net/httpcache"
) )
// Loader reads path and returns the parsed value, or an error. // Dataset couples a Syncer, a load function, and an atomic.Pointer[T].
type Loader[T any] func(path string) (*T, error)
// Dataset couples a Syncer, a Loader, and an atomic.Pointer.
// Load is safe for concurrent use without locks. // Load is safe for concurrent use without locks.
type Dataset[T any] struct { type Dataset[T any] struct {
// Name is used in error messages. Optional.
Name string
// Close is called with the previous value after each successful swap.
// Use this for values that hold resources, e.g. func(r *geoip2.Reader) { r.Close() }.
Close func(*T)
syncer httpcache.Syncer syncer httpcache.Syncer
load Loader[T] load func() (*T, error)
path string
ptr atomic.Pointer[T] ptr atomic.Pointer[T]
} }
// New creates a Dataset. The syncer fetches updates to path; load parses it. // New creates a Dataset. The syncer fetches updates; load produces the value.
func New[T any](syncer httpcache.Syncer, load Loader[T], path string) *Dataset[T] { // load is a closure — it captures whatever paths or config it needs.
return &Dataset[T]{syncer: syncer, load: load, path: path} func New[T any](syncer httpcache.Syncer, load func() (*T, error)) *Dataset[T] {
return &Dataset[T]{syncer: syncer, load: load}
} }
// Load returns the current value. Returns nil before Init is called. // Load returns the current value. Returns nil before Init is called.
@ -52,7 +55,7 @@ func (d *Dataset[T]) Load() *T {
return d.ptr.Load() return d.ptr.Load()
} }
// Init fetches (if the syncer needs it) then loads, ensuring the dataset is // Init fetches (if needed) then always loads, ensuring the dataset is
// populated on startup from an existing local file even if nothing changed. // populated on startup from an existing local file even if nothing changed.
func (d *Dataset[T]) Init() error { func (d *Dataset[T]) Init() error {
if _, err := d.syncer.Fetch(); err != nil { if _, err := d.syncer.Fetch(); err != nil {
@ -61,8 +64,7 @@ func (d *Dataset[T]) Init() error {
return d.reload() return d.reload()
} }
// Sync fetches from the remote and reloads if the content changed. // Sync fetches and reloads if the content changed. Returns whether updated.
// Returns whether the value was updated.
func (d *Dataset[T]) Sync() (bool, error) { func (d *Dataset[T]) Sync() (bool, error) {
updated, err := d.syncer.Fetch() updated, err := d.syncer.Fetch()
if err != nil || !updated { if err != nil || !updated {
@ -80,7 +82,11 @@ func (d *Dataset[T]) Run(ctx context.Context, interval time.Duration) {
select { select {
case <-ticker.C: case <-ticker.C:
if _, err := d.Sync(); err != nil { if _, err := d.Sync(); err != nil {
fmt.Fprintf(os.Stderr, "dataset %s: sync error: %v\n", d.path, err) name := d.Name
if name == "" {
name = "dataset"
}
fmt.Fprintf(os.Stderr, "%s: sync error: %v\n", name, err)
} }
case <-ctx.Done(): case <-ctx.Done():
return return
@ -89,27 +95,28 @@ func (d *Dataset[T]) Run(ctx context.Context, interval time.Duration) {
} }
func (d *Dataset[T]) reload() error { func (d *Dataset[T]) reload() error {
val, err := d.load(d.path) val, err := d.load()
if err != nil { if err != nil {
return err return err
} }
d.ptr.Store(val) if old := d.ptr.Swap(val); old != nil && d.Close != nil {
d.Close(old)
}
return nil return nil
} }
// -- Group: one Syncer driving multiple datasets --------------------------- // -- Group: one Syncer driving multiple datasets ---------------------------
// entry is the type-erased reload handle stored in a Group. // member is the type-erased reload handle stored in a Group.
type entry interface { type member interface {
reload() error reload() error
} }
// Group ties one Syncer to multiple datasets so a single Fetch drives all // Group ties one Syncer to multiple datasets so a single Fetch drives all
// reloads — avoiding redundant network calls when datasets share a source // reloads — no redundant network calls when datasets share a source.
// (e.g. multiple files from the same git repo or HTTP directory).
type Group struct { type Group struct {
syncer httpcache.Syncer syncer httpcache.Syncer
entries []entry members []member
} }
// NewGroup creates a Group backed by syncer. // NewGroup creates a Group backed by syncer.
@ -117,11 +124,12 @@ func NewGroup(syncer httpcache.Syncer) *Group {
return &Group{syncer: syncer} return &Group{syncer: syncer}
} }
// Add registers a new dataset in g and returns it. Subsequent Init/Sync/Run // Add registers a new dataset in g and returns it. Call Init or Run on g —
// calls on g will reload this dataset whenever the syncer reports an update. // not on the returned dataset — to drive updates.
func Add[T any](g *Group, load Loader[T], path string) *Dataset[T] { // load is a closure capturing whatever paths or config it needs.
d := &Dataset[T]{load: load, path: path} func Add[T any](g *Group, load func() (*T, error)) *Dataset[T] {
g.entries = append(g.entries, d) d := &Dataset[T]{load: load}
g.members = append(g.members, d)
return d return d
} }
@ -159,8 +167,8 @@ func (g *Group) Run(ctx context.Context, interval time.Duration) {
} }
func (g *Group) reloadAll() error { func (g *Group) reloadAll() error {
for _, e := range g.entries { for _, m := range g.members {
if err := e.reload(); err != nil { if err := m.reload(); err != nil {
return err return err
} }
} }

View File

@ -3,6 +3,7 @@ package main
import ( import (
"path/filepath" "path/filepath"
"github.com/therootcompany/golib/net/dataset"
"github.com/therootcompany/golib/net/gitshallow" "github.com/therootcompany/golib/net/gitshallow"
"github.com/therootcompany/golib/net/httpcache" "github.com/therootcompany/golib/net/httpcache"
"github.com/therootcompany/golib/net/ipcohort" "github.com/therootcompany/golib/net/ipcohort"
@ -14,15 +15,15 @@ type HTTPSource struct {
Path string Path string
} }
// Sources holds the configuration for fetching and loading the three cohorts. // Sources holds fetch configuration for the three blocklist cohorts.
// It knows how to pull data from git or HTTP, but owns no atomic state. // It knows how to pull data from git or HTTP, but owns no atomic state.
type Sources struct { type Sources struct {
whitelistPaths []string whitelistPaths []string
inboundPaths []string inboundPaths []string
outboundPaths []string outboundPaths []string
gitRepo *gitshallow.Repo // non-nil for git source; used by Init for clone-if-missing gitRepo *gitshallow.Repo // non-nil for git source; used by Init for clone-if-missing
syncs []httpcache.Syncer // all syncable sources (git repo or HTTP cachers) syncs []httpcache.Syncer // all syncable sources
} }
func newFileSources(whitelist, inbound, outbound []string) *Sources { func newFileSources(whitelist, inbound, outbound []string) *Sources {
@ -78,8 +79,7 @@ func (s *Sources) Fetch() (bool, error) {
return anyUpdated, nil return anyUpdated, nil
} }
// Init ensures remotes are ready. For git: clones if missing then syncs. // Init ensures remotes are ready: clones git if missing, or fetches HTTP files.
// For HTTP: fetches each cacher unconditionally on first run.
func (s *Sources) Init() error { func (s *Sources) Init() error {
if s.gitRepo != nil { if s.gitRepo != nil {
_, err := s.gitRepo.Init() _, err := s.gitRepo.Init()
@ -93,23 +93,33 @@ func (s *Sources) Init() error {
return nil return nil
} }
func (s *Sources) LoadWhitelist() (*ipcohort.Cohort, error) { // Datasets builds a dataset.Group backed by this Sources and returns typed
if len(s.whitelistPaths) == 0 { // datasets for whitelist, inbound, and outbound cohorts. Either whitelist or
return nil, nil // outbound may be nil if no paths were configured.
func (s *Sources) Datasets() (
g *dataset.Group,
whitelist *dataset.Dataset[ipcohort.Cohort],
inbound *dataset.Dataset[ipcohort.Cohort],
outbound *dataset.Dataset[ipcohort.Cohort],
) {
g = dataset.NewGroup(s)
if len(s.whitelistPaths) > 0 {
paths := s.whitelistPaths
whitelist = dataset.Add(g, func() (*ipcohort.Cohort, error) {
return ipcohort.LoadFiles(paths...)
})
} }
return ipcohort.LoadFiles(s.whitelistPaths...) if len(s.inboundPaths) > 0 {
} paths := s.inboundPaths
inbound = dataset.Add(g, func() (*ipcohort.Cohort, error) {
func (s *Sources) LoadInbound() (*ipcohort.Cohort, error) { return ipcohort.LoadFiles(paths...)
if len(s.inboundPaths) == 0 { })
return nil, nil
} }
return ipcohort.LoadFiles(s.inboundPaths...) if len(s.outboundPaths) > 0 {
} paths := s.outboundPaths
outbound = dataset.Add(g, func() (*ipcohort.Cohort, error) {
func (s *Sources) LoadOutbound() (*ipcohort.Cohort, error) { return ipcohort.LoadFiles(paths...)
if len(s.outboundPaths) == 0 { })
return nil, nil
} }
return ipcohort.LoadFiles(s.outboundPaths...) return g, whitelist, inbound, outbound
} }

View File

@ -8,12 +8,11 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"sync/atomic"
"time" "time"
"github.com/oschwald/geoip2-golang" "github.com/oschwald/geoip2-golang"
"github.com/therootcompany/golib/net/dataset"
"github.com/therootcompany/golib/net/geoip" "github.com/therootcompany/golib/net/geoip"
"github.com/therootcompany/golib/net/httpcache"
"github.com/therootcompany/golib/net/ipcohort" "github.com/therootcompany/golib/net/ipcohort"
) )
@ -75,26 +74,25 @@ func main() {
) )
} }
var whitelist, inbound, outbound atomic.Pointer[ipcohort.Cohort] // Build typed datasets from the source.
if err := src.Init(); err != nil { if err := src.Init(); err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err) fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1) os.Exit(1)
} }
if err := reloadBlocklists(src, &whitelist, &inbound, &outbound); err != nil { blGroup, whitelistDS, inboundDS, outboundDS := src.Datasets()
if err := blGroup.Init(); err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err) fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1) os.Exit(1)
} }
fmt.Fprintf(os.Stderr, "Loaded inbound=%d outbound=%d\n", fmt.Fprintf(os.Stderr, "Loaded inbound=%d outbound=%d\n",
cohortSize(&inbound), cohortSize(&outbound)) cohortSize(inboundDS), cohortSize(outboundDS))
// GeoIP: resolve paths and build cachers if we have credentials.
var cityDB, asnDB atomic.Pointer[geoip2.Reader]
var cityCacher, asnCacher *httpcache.Cacher
// GeoIP datasets.
resolvedCityPath := *cityDBPath resolvedCityPath := *cityDBPath
resolvedASNPath := *asnDBPath resolvedASNPath := *asnDBPath
var cityDS, asnDS *dataset.Dataset[geoip2.Reader]
if *geoipConf != "" { if *geoipConf != "" {
cfg, err := geoip.ParseConf(*geoipConf) cfg, err := geoip.ParseConf(*geoipConf)
if err != nil { if err != nil {
@ -104,6 +102,9 @@ func main() {
if dbDir == "" { if dbDir == "" {
dbDir = dataPath dbDir = dataPath
} }
if err := os.MkdirAll(dbDir, 0o755); err != nil {
fmt.Fprintf(os.Stderr, "warn: mkdir %s: %v\n", dbDir, err)
}
d := geoip.New(cfg.AccountID, cfg.LicenseKey) d := geoip.New(cfg.AccountID, cfg.LicenseKey)
if resolvedCityPath == "" { if resolvedCityPath == "" {
resolvedCityPath = filepath.Join(dbDir, geoip.CityEdition+".mmdb") resolvedCityPath = filepath.Join(dbDir, geoip.CityEdition+".mmdb")
@ -111,37 +112,44 @@ func main() {
if resolvedASNPath == "" { if resolvedASNPath == "" {
resolvedASNPath = filepath.Join(dbDir, geoip.ASNEdition+".mmdb") resolvedASNPath = filepath.Join(dbDir, geoip.ASNEdition+".mmdb")
} }
cityCacher = d.NewCacher(geoip.CityEdition, resolvedCityPath) cityDS = newGeoIPDataset(d, geoip.CityEdition, resolvedCityPath)
asnCacher = d.NewCacher(geoip.ASNEdition, resolvedASNPath) asnDS = newGeoIPDataset(d, geoip.ASNEdition, resolvedASNPath)
if err := os.MkdirAll(dbDir, 0o755); err != nil { }
fmt.Fprintf(os.Stderr, "warn: mkdir %s: %v\n", dbDir, err) } else {
} // Manual paths: no auto-download, just open existing files.
if resolvedCityPath != "" {
cityDS = newGeoIPDataset(nil, "", resolvedCityPath)
}
if resolvedASNPath != "" {
asnDS = newGeoIPDataset(nil, "", resolvedASNPath)
} }
} }
// Fetch GeoIP DBs if we have cachers; otherwise just open existing files. if cityDS != nil {
if cityCacher != nil { if err := cityDS.Init(); err != nil {
if _, err := cityCacher.Fetch(); err != nil { fmt.Fprintf(os.Stderr, "warn: city DB: %v\n", err)
fmt.Fprintf(os.Stderr, "warn: city DB fetch: %v\n", err)
} }
} }
if asnCacher != nil { if asnDS != nil {
if _, err := asnCacher.Fetch(); err != nil { if err := asnDS.Init(); err != nil {
fmt.Fprintf(os.Stderr, "warn: ASN DB fetch: %v\n", err) fmt.Fprintf(os.Stderr, "warn: ASN DB: %v\n", err)
} }
} }
openGeoIPReader(resolvedCityPath, &cityDB)
openGeoIPReader(resolvedASNPath, &asnDB)
// Keep everything fresh in the background if running as a daemon. // Keep everything fresh in the background.
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
go runLoop(ctx, src, &whitelist, &inbound, &outbound, go blGroup.Run(ctx, 47*time.Minute)
cityCacher, asnCacher, &cityDB, &asnDB) if cityDS != nil {
go cityDS.Run(ctx, 47*time.Minute)
}
if asnDS != nil {
go asnDS.Run(ctx, 47*time.Minute)
}
// Check and report. // Check and report.
blockedInbound := containsInbound(ipStr, &whitelist, &inbound) blockedInbound := containsInbound(ipStr, whitelistDS, inboundDS)
blockedOutbound := containsOutbound(ipStr, &whitelist, &outbound) blockedOutbound := containsOutbound(ipStr, whitelistDS, outboundDS)
switch { switch {
case blockedInbound && blockedOutbound: case blockedInbound && blockedOutbound:
@ -154,149 +162,112 @@ func main() {
fmt.Printf("%s is allowed\n", ipStr) fmt.Printf("%s is allowed\n", ipStr)
} }
printGeoInfo(ipStr, &cityDB, &asnDB) printGeoInfo(ipStr, cityDS, asnDS)
if blockedInbound || blockedOutbound { if blockedInbound || blockedOutbound {
os.Exit(1) os.Exit(1)
} }
} }
func openGeoIPReader(path string, ptr *atomic.Pointer[geoip2.Reader]) { // newGeoIPDataset creates a Dataset[geoip2.Reader]. If d is nil, only
if path == "" { // opens the existing file (no download). Close is wired to Reader.Close.
return func newGeoIPDataset(d *geoip.Downloader, edition, path string) *dataset.Dataset[geoip2.Reader] {
} var syncer interface{ Fetch() (bool, error) }
r, err := geoip2.Open(path) if d != nil {
if err != nil { syncer = d.NewCacher(edition, path)
return } else {
} syncer = &nopSyncer{}
if old := ptr.Swap(r); old != nil {
old.Close()
} }
ds := dataset.New(syncer, func() (*geoip2.Reader, error) {
return geoip2.Open(path)
})
ds.Name = edition
ds.Close = func(r *geoip2.Reader) { r.Close() }
return ds
} }
func runLoop(ctx context.Context, src *Sources, // nopSyncer satisfies httpcache.Syncer for file-only datasets (no download).
whitelist, inbound, outbound *atomic.Pointer[ipcohort.Cohort], type nopSyncer struct{}
cityCacher, asnCacher *httpcache.Cacher,
cityDB, asnDB *atomic.Pointer[geoip2.Reader],
) {
ticker := time.NewTicker(47 * time.Minute)
defer ticker.Stop()
for { func (n *nopSyncer) Fetch() (bool, error) { return false, nil }
select {
case <-ticker.C:
// Blocklists.
if updated, err := src.Fetch(); err != nil {
fmt.Fprintf(os.Stderr, "error: blocklist sync: %v\n", err)
} else if updated {
if err := reloadBlocklists(src, whitelist, inbound, outbound); err != nil {
fmt.Fprintf(os.Stderr, "error: blocklist reload: %v\n", err)
} else {
fmt.Fprintf(os.Stderr, "reloaded: inbound=%d outbound=%d\n",
cohortSize(inbound), cohortSize(outbound))
}
}
// GeoIP DBs. func containsInbound(ip string,
if cityCacher != nil { whitelist, inbound *dataset.Dataset[ipcohort.Cohort],
if updated, err := cityCacher.Fetch(); err != nil { ) bool {
fmt.Fprintf(os.Stderr, "error: city DB sync: %v\n", err) if whitelist != nil {
} else if updated { if wl := whitelist.Load(); wl != nil && wl.Contains(ip) {
openGeoIPReader(cityCacher.Path, cityDB) return false
fmt.Fprintf(os.Stderr, "reloaded: %s\n", cityCacher.Path)
}
}
if asnCacher != nil {
if updated, err := asnCacher.Fetch(); err != nil {
fmt.Fprintf(os.Stderr, "error: ASN DB sync: %v\n", err)
} else if updated {
openGeoIPReader(asnCacher.Path, asnDB)
fmt.Fprintf(os.Stderr, "reloaded: %s\n", asnCacher.Path)
}
}
case <-ctx.Done():
return
} }
} }
} if inbound == nil {
func printGeoInfo(ipStr string, cityDB, asnDB *atomic.Pointer[geoip2.Reader]) {
ip, err := netip.ParseAddr(ipStr)
if err != nil {
return
}
stdIP := ip.AsSlice()
if r := cityDB.Load(); r != nil {
if rec, err := r.City(stdIP); err == nil {
city := rec.City.Names["en"]
country := rec.Country.Names["en"]
iso := rec.Country.IsoCode
var parts []string
if city != "" {
parts = append(parts, city)
}
if len(rec.Subdivisions) > 0 {
if sub := rec.Subdivisions[0].Names["en"]; sub != "" && sub != city {
parts = append(parts, sub)
}
}
if country != "" {
parts = append(parts, fmt.Sprintf("%s (%s)", country, iso))
}
if len(parts) > 0 {
fmt.Printf(" Location: %s\n", strings.Join(parts, ", "))
}
}
}
if r := asnDB.Load(); r != nil {
if rec, err := r.ASN(stdIP); err == nil && rec.AutonomousSystemNumber != 0 {
fmt.Printf(" ASN: AS%d %s\n",
rec.AutonomousSystemNumber, rec.AutonomousSystemOrganization)
}
}
}
func reloadBlocklists(src *Sources,
whitelist, inbound, outbound *atomic.Pointer[ipcohort.Cohort],
) error {
if wl, err := src.LoadWhitelist(); err != nil {
return err
} else if wl != nil {
whitelist.Store(wl)
}
if in, err := src.LoadInbound(); err != nil {
return err
} else if in != nil {
inbound.Store(in)
}
if out, err := src.LoadOutbound(); err != nil {
return err
} else if out != nil {
outbound.Store(out)
}
return nil
}
func containsInbound(ip string, whitelist, inbound *atomic.Pointer[ipcohort.Cohort]) bool {
if wl := whitelist.Load(); wl != nil && wl.Contains(ip) {
return false return false
} }
c := inbound.Load() c := inbound.Load()
return c != nil && c.Contains(ip) return c != nil && c.Contains(ip)
} }
func containsOutbound(ip string, whitelist, outbound *atomic.Pointer[ipcohort.Cohort]) bool { func containsOutbound(ip string,
if wl := whitelist.Load(); wl != nil && wl.Contains(ip) { whitelist, outbound *dataset.Dataset[ipcohort.Cohort],
) bool {
if whitelist != nil {
if wl := whitelist.Load(); wl != nil && wl.Contains(ip) {
return false
}
}
if outbound == nil {
return false return false
} }
c := outbound.Load() c := outbound.Load()
return c != nil && c.Contains(ip) return c != nil && c.Contains(ip)
} }
func cohortSize(ptr *atomic.Pointer[ipcohort.Cohort]) int { func printGeoInfo(ipStr string, cityDS, asnDS *dataset.Dataset[geoip2.Reader]) {
if c := ptr.Load(); c != nil { ip, err := netip.ParseAddr(ipStr)
if err != nil {
return
}
stdIP := ip.AsSlice()
if cityDS != nil {
if r := cityDS.Load(); r != nil {
if rec, err := r.City(stdIP); err == nil {
city := rec.City.Names["en"]
country := rec.Country.Names["en"]
iso := rec.Country.IsoCode
var parts []string
if city != "" {
parts = append(parts, city)
}
if len(rec.Subdivisions) > 0 {
if sub := rec.Subdivisions[0].Names["en"]; sub != "" && sub != city {
parts = append(parts, sub)
}
}
if country != "" {
parts = append(parts, fmt.Sprintf("%s (%s)", country, iso))
}
if len(parts) > 0 {
fmt.Printf(" Location: %s\n", strings.Join(parts, ", "))
}
}
}
}
if asnDS != nil {
if r := asnDS.Load(); r != nil {
if rec, err := r.ASN(stdIP); err == nil && rec.AutonomousSystemNumber != 0 {
fmt.Printf(" ASN: AS%d %s\n",
rec.AutonomousSystemNumber, rec.AutonomousSystemOrganization)
}
}
}
}
func cohortSize(ds *dataset.Dataset[ipcohort.Cohort]) int {
if ds == nil {
return 0
}
if c := ds.Load(); c != nil {
return c.Size() return c.Size()
} }
return 0 return 0