refactor: dataset.Add returns View[T] instead of Dataset[T]

Group-managed datasets must never have Init/Sync/Run called on them.
Rather than patching with NopSyncer, introduce View[T] — a thin wrapper
that exposes only Load(). The compiler now prevents misuse: callers can
read values but cannot drive fetch/reload cycles directly.

Dataset[T] no longer needs a syncer when owned by a Group; View.reload()
delegates to the inner Dataset.reload() for Group.reloadAll().
This commit is contained in:
AJ ONeal 2026-04-20 09:50:48 -06:00
parent 896031b6a8
commit ad5d696ce6
No known key found for this signature in database
3 changed files with 23 additions and 12 deletions

View File

@ -124,13 +124,24 @@ func NewGroup(syncer httpcache.Syncer) *Group {
return &Group{syncer: syncer} return &Group{syncer: syncer}
} }
// Add registers a new dataset in g and returns it. Fetch and reload are driven // View is the read-only handle returned by Add. It exposes only Load —
// by the Group — call Init/Run/Sync on g, not on the returned Dataset. // fetch and reload are driven by the owning Group.
type View[T any] struct {
d *Dataset[T]
}
// Load returns the current value. Returns nil before the Group is initialised.
func (v *View[T]) Load() *T { return v.d.ptr.Load() }
func (v *View[T]) reload() error { return v.d.reload() }
// Add registers a new dataset in g and returns a View. Call Load to read the
// current value. Drive updates by calling Init/Sync/Run on the Group.
// load is a closure capturing whatever paths or config it needs. // load is a closure capturing whatever paths or config it needs.
func Add[T any](g *Group, load func() (*T, error)) *Dataset[T] { func Add[T any](g *Group, load func() (*T, error)) *View[T] {
d := &Dataset[T]{syncer: httpcache.NopSyncer{}, load: load} v := &View[T]{d: &Dataset[T]{load: load}}
g.members = append(g.members, d) g.members = append(g.members, v)
return d return v
} }
// Init fetches once then reloads all registered datasets. // Init fetches once then reloads all registered datasets.

View File

@ -82,9 +82,9 @@ func (s *Sources) Fetch() (bool, error) {
// outbound may be nil if no paths were configured. // outbound may be nil if no paths were configured.
func (s *Sources) Datasets() ( func (s *Sources) Datasets() (
g *dataset.Group, g *dataset.Group,
whitelist *dataset.Dataset[ipcohort.Cohort], whitelist *dataset.View[ipcohort.Cohort],
inbound *dataset.Dataset[ipcohort.Cohort], inbound *dataset.View[ipcohort.Cohort],
outbound *dataset.Dataset[ipcohort.Cohort], outbound *dataset.View[ipcohort.Cohort],
) { ) {
g = dataset.NewGroup(s) g = dataset.NewGroup(s)
if len(s.whitelistPaths) > 0 { if len(s.whitelistPaths) > 0 {

View File

@ -185,7 +185,7 @@ func newGeoIPDataset(d *geoip.Downloader, edition, path string) *dataset.Dataset
} }
func containsInbound(ip string, func containsInbound(ip string,
whitelist, inbound *dataset.Dataset[ipcohort.Cohort], whitelist, inbound *dataset.View[ipcohort.Cohort],
) bool { ) bool {
if whitelist != nil { if whitelist != nil {
if wl := whitelist.Load(); wl != nil && wl.Contains(ip) { if wl := whitelist.Load(); wl != nil && wl.Contains(ip) {
@ -200,7 +200,7 @@ func containsInbound(ip string,
} }
func containsOutbound(ip string, func containsOutbound(ip string,
whitelist, outbound *dataset.Dataset[ipcohort.Cohort], whitelist, outbound *dataset.View[ipcohort.Cohort],
) bool { ) bool {
if whitelist != nil { if whitelist != nil {
if wl := whitelist.Load(); wl != nil && wl.Contains(ip) { if wl := whitelist.Load(); wl != nil && wl.Contains(ip) {
@ -256,7 +256,7 @@ func printGeoInfo(ipStr string, cityDS, asnDS *dataset.Dataset[geoip2.Reader]) {
} }
} }
func cohortSize(ds *dataset.Dataset[ipcohort.Cohort]) int { func cohortSize(ds *dataset.View[ipcohort.Cohort]) int {
if ds == nil { if ds == nil {
return 0 return 0
} }