Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/go.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,4 @@ jobs:
run: go build -v ./...

- name: Test
run: go test -v ./...
run: go test ./...
17 changes: 8 additions & 9 deletions graph/algorithm/betweenness_centrality.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,15 @@ import (
"sync"

"github.com/elecbug/netkit/graph"
"github.com/elecbug/netkit/graph/node"
)

// BetweennessCentrality computes betweenness centrality using cached all shortest paths.
// - Uses AllShortestPaths(g, cfg) (assumed cached/fast) to enumerate all shortest paths.
// - For each pair (s,t), each interior node on a shortest path gets 1/|SP(s,t)| credit.
// - Undirected graphs enqueue only i<j pairs (no double counting).
// - Normalization matches NetworkX: undirected => 2/((n-1)(n-2)), directed => 1/((n-1)(n-2)).
func BetweennessCentrality(g *graph.Graph, cfg *Config) map[node.ID]float64 {
res := make(map[node.ID]float64)
func BetweennessCentrality(g *graph.Graph, cfg *Config) map[graph.NodeID]float64 {
res := make(map[graph.NodeID]float64)
if g == nil {
return res
}
Expand Down Expand Up @@ -46,30 +45,30 @@ func BetweennessCentrality(g *graph.Graph, cfg *Config) map[node.ID]float64 {
}

// Use cached all-pairs shortest paths.
// Type: map[node.ID]map[node.ID][]path.Path
// Type: map[graph.NodeID]map[graph.NodeID][]path.Path
all := AllShortestPaths(g, cfg)

// Build an index for stable iteration and pair generation.
idxOf := make(map[node.ID]int, n)
idxOf := make(map[graph.NodeID]int, n)
for i, u := range ids {
idxOf[u] = i
}

type pair struct{ s, t node.ID }
type pair struct{ s, t graph.NodeID }
isUndirected := g.IsBidirectional()

// Generate all (s,t) jobs.
jobs := make(chan pair, n)
var wg sync.WaitGroup

// Global accumulator with lock; each worker keeps a local map to minimize contention.
global := make(map[node.ID]float64, n)
global := make(map[graph.NodeID]float64, n)
var mu sync.Mutex

// Worker: consume pairs and accumulate contributions into a local map, then merge.
workerFn := func() {
defer wg.Done()
local := make(map[node.ID]float64, n)
local := make(map[graph.NodeID]float64, n)

for job := range jobs {
row, ok := all[job.s]
Expand All @@ -84,7 +83,7 @@ func BetweennessCentrality(g *graph.Graph, cfg *Config) map[node.ID]float64 {

// For each shortest path s->...->t, every interior node gets 1/den.
for _, pth := range pathsST {
seq := pth.Nodes() // []node.ID; interior nodes are [1 : len-1)
seq := pth.Nodes() // []graph.NodeID; interior nodes are [1 : len-1)
if len(seq) <= 2 {
continue // no interior node
}
Expand Down
22 changes: 17 additions & 5 deletions graph/algorithm/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,30 @@ package algorithm

import (
"sync"
"time"

"github.com/elecbug/netkit/graph/path"
"github.com/elecbug/netkit/graph"
)

var cachedAllShortestPaths = make(map[string]path.GraphPaths)
var cachedAllShortestPathLengths = make(map[string]path.PathLength)
var cachedAllShortestPaths = make(map[string]graph.Paths)
var cachedAllShortestPathLengths = make(map[string]graph.PathLength)
var cacheMu sync.RWMutex

// CacheClear clears the cached shortest paths and their lengths.
func CacheClear() {
cacheMu.Lock()
defer cacheMu.Unlock()
cachedAllShortestPaths = make(map[string]path.GraphPaths)
cachedAllShortestPathLengths = make(map[string]path.PathLength)
cachedAllShortestPaths = make(map[string]graph.Paths)
cachedAllShortestPathLengths = make(map[string]graph.PathLength)
}

// AutoCacheClear starts a goroutine that clears the cache at regular intervals defined by tick.
func AutoCacheClear(tick time.Duration) {
go func() {
for {
time.Sleep(tick)

CacheClear()
}
}()
}
5 changes: 2 additions & 3 deletions graph/algorithm/closeness_centrality.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package algorithm

import (
"github.com/elecbug/netkit/graph"
"github.com/elecbug/netkit/graph/node"
)

// ClosenessCentrality computes NetworkX-compatible closeness centrality.
Expand All @@ -17,8 +16,8 @@ import (
// Requirements:
// - AllShortestPaths(g, cfg) must respect directedness of g.
// - cfg.Closeness.WfImproved follows NetworkX default (true) unless overridden.
func ClosenessCentrality(g *graph.Graph, cfg *Config) map[node.ID]float64 {
out := make(map[node.ID]float64)
func ClosenessCentrality(g *graph.Graph, cfg *Config) map[graph.NodeID]float64 {
out := make(map[graph.NodeID]float64)
if g == nil {
return out
}
Expand Down
25 changes: 12 additions & 13 deletions graph/algorithm/clustering_coefficient.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,14 @@ import (
"sync"

"github.com/elecbug/netkit/graph"
"github.com/elecbug/netkit/graph/node"
)

// ClusteringCoefficientAll computes local clustering coefficients for all nodes.
// - If g.IsBidirectional()==false (directed): Fagiolo (2007) directed clustering (matches NetworkX).
// - If g.IsBidirectional()==true (undirected): standard undirected clustering.
// Returns map[node.ID]float64 with a value for every node in g.
func ClusteringCoefficient(g *graph.Graph, cfg *Config) map[node.ID]float64 {
res := make(map[node.ID]float64)
// Returns map[graph.NodeID]float64 with a value for every node in g.
func ClusteringCoefficient(g *graph.Graph, cfg *Config) map[graph.NodeID]float64 {
res := make(map[graph.NodeID]float64)
if g == nil {
return res
}
Expand All @@ -35,10 +34,10 @@ func ClusteringCoefficient(g *graph.Graph, cfg *Config) map[node.ID]float64 {
// Build helper structures
// outNeighbors[v] = slice of out-neighbors of v (exclude self)
// inNeighbors[v] = slice of in-neighbors of v (exclude self) - only needed for directed
outNeighbors := make(map[node.ID][]node.ID, n)
outNeighbors := make(map[graph.NodeID][]graph.NodeID, n)
for _, v := range nodes {
ns := g.Neighbors(v)
buf := make([]node.ID, 0, len(ns))
buf := make([]graph.NodeID, 0, len(ns))
for _, w := range ns {
if w != v {
buf = append(buf, w)
Expand All @@ -48,9 +47,9 @@ func ClusteringCoefficient(g *graph.Graph, cfg *Config) map[node.ID]float64 {
}

isDirected := !g.IsBidirectional()
var inNeighbors map[node.ID][]node.ID
var inNeighbors map[graph.NodeID][]graph.NodeID
if isDirected {
inNeighbors = make(map[node.ID][]node.ID, n)
inNeighbors = make(map[graph.NodeID][]graph.NodeID, n)
for _, u := range nodes {
for _, w := range outNeighbors[u] {
// u -> w, so u is in-neighbor of w
Expand All @@ -59,13 +58,13 @@ func ClusteringCoefficient(g *graph.Graph, cfg *Config) map[node.ID]float64 {
}
}

type job struct{ v node.ID }
type job struct{ v graph.NodeID }
jobs := make(chan job, workers*2)
var wg sync.WaitGroup
var mu sync.Mutex // protects res map

// Edge multiplicity for Fagiolo: b(u,v) = a_uv + a_vu ∈ {0,1,2}
b := func(u, v node.ID) int {
b := func(u, v graph.NodeID) int {
sum := 0

if g.HasEdge(u, v) {
Expand Down Expand Up @@ -98,7 +97,7 @@ func ClusteringCoefficient(g *graph.Graph, cfg *Config) map[node.ID]float64 {
mu.Unlock()
continue
}
outSet := make(map[node.ID]struct{}, kOut)
outSet := make(map[graph.NodeID]struct{}, kOut)
for _, w := range outNeighbors[v] {
outSet[w] = struct{}{}
}
Expand All @@ -119,15 +118,15 @@ func ClusteringCoefficient(g *graph.Graph, cfg *Config) map[node.ID]float64 {

// T_v = sum_{j != k} b(v,j) * b(j,k) * b(k,v)
// with j,k in tot = in(v) ∪ out(v)
totSet := make(map[node.ID]struct{}, kTot) // upper bound
totSet := make(map[graph.NodeID]struct{}, kTot) // upper bound
for _, u := range outNeighbors[v] {
totSet[u] = struct{}{}
}
for _, u := range inNeighbors[v] {
totSet[u] = struct{}{}
}
// Make a slice to iterate
tot := make([]node.ID, 0, len(totSet))
tot := make([]graph.NodeID, 0, len(totSet))
for u := range totSet {
if u != v { // guard (shouldn't be in set anyway)
tot = append(tot, u)
Expand Down
9 changes: 4 additions & 5 deletions graph/algorithm/degree_assortativity_coefficient.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ import (
"math"

"github.com/elecbug/netkit/graph"
"github.com/elecbug/netkit/graph/node"
)

// DegreeAssortativityCoefficient computes Newman's degree assortativity coefficient (Pearson correlation)
Expand Down Expand Up @@ -54,16 +53,16 @@ func DegreeAssortativityCoefficient(g *graph.Graph, cfg *Config) float64 {
}

// Build an index for upper-triangle filtering on undirected graphs.
idxOf := make(map[node.ID]int, n)
idxOf := make(map[graph.NodeID]int, n)
for i, u := range ids {
idxOf[u] = i
}

// Degree caches
// NOTE: Replace the neighbor getters with your graph API if needed.
outDeg := make(map[node.ID]int, n)
inDeg := make(map[node.ID]int, n)
undeg := make(map[node.ID]int, n)
outDeg := make(map[graph.NodeID]int, n)
inDeg := make(map[graph.NodeID]int, n)
undeg := make(map[graph.NodeID]int, n)

if isUndirected {
for _, u := range ids {
Expand Down
7 changes: 3 additions & 4 deletions graph/algorithm/degree_centrality.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import (
"sync"

"github.com/elecbug/netkit/graph"
"github.com/elecbug/netkit/graph/node"
)

// DegreeCentralityConfig (suggested to be added inside your config package)
Expand All @@ -32,8 +31,8 @@ import (
// - Undirected: deg(u)/(n-1).
// - Directed (default "total"): (in(u)+out(u))/(n-1). Use "in"/"out" for the specific variants.
// Self-loops are ignored for centrality.
func DegreeCentrality(g *graph.Graph, cfg *Config) map[node.ID]float64 {
res := make(map[node.ID]float64)
func DegreeCentrality(g *graph.Graph, cfg *Config) map[graph.NodeID]float64 {
res := make(map[graph.NodeID]float64)
if g == nil {
return res
}
Expand All @@ -55,7 +54,7 @@ func DegreeCentrality(g *graph.Graph, cfg *Config) map[node.ID]float64 {
}

// --- indexing ---
idxOf := make(map[node.ID]int, n)
idxOf := make(map[graph.NodeID]int, n)
for i, u := range ids {
idxOf[u] = i
}
Expand Down
29 changes: 14 additions & 15 deletions graph/algorithm/edge_betweenness_centrality.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,9 @@ import (
"sync"

"github.com/elecbug/netkit/graph"
"github.com/elecbug/netkit/graph/node"
)

func makeEdgeKey(u, v node.ID, undirected bool) (node.ID, node.ID) {
func makeEdgeKey(u, v graph.NodeID, undirected bool) (graph.NodeID, graph.NodeID) {
if undirected && v < u {
u, v = v, u
}
Expand All @@ -34,11 +33,11 @@ func makeEdgeKey(u, v node.ID, undirected bool) (node.ID, node.ID) {
// in Brandes accumulation (same practice as NetworkX).
//
// Returns:
// - map[node.ID]map[node.ID]float64 where:
// - map[graph.NodeID]map[graph.NodeID]float64 where:
// - Undirected: key is canonical [min(u,v), max(u,v)]
// - Directed: key is (u,v) ordered
func EdgeBetweennessCentrality(g *graph.Graph, cfg *Config) map[node.ID]map[node.ID]float64 {
out := make(map[node.ID]map[node.ID]float64)
func EdgeBetweennessCentrality(g *graph.Graph, cfg *Config) map[graph.NodeID]map[graph.NodeID]float64 {
out := make(map[graph.NodeID]map[graph.NodeID]float64)
if g == nil {
return out
}
Expand Down Expand Up @@ -79,15 +78,15 @@ func EdgeBetweennessCentrality(g *graph.Graph, cfg *Config) map[node.ID]map[node
u, v := makeEdgeKey(u, v, isUndirected)

if out[u] == nil {
out[u] = make(map[node.ID]float64)
out[u] = make(map[graph.NodeID]float64)
}

out[u][v] = 0.0
}
}

// ----- worker pool over source nodes -----
type job struct{ s node.ID }
type job struct{ s graph.NodeID }
jobs := make(chan job, n)

var mu sync.Mutex
Expand All @@ -97,16 +96,16 @@ func EdgeBetweennessCentrality(g *graph.Graph, cfg *Config) map[node.ID]map[node
defer wg.Done()

// Local accumulator to reduce lock contention
local := make(map[node.ID]map[node.ID]float64, 64)
local := make(map[graph.NodeID]map[graph.NodeID]float64, 64)

for jb := range jobs {
s := jb.s

// Brandes data structures
stack := make([]node.ID, 0, n)
preds := make(map[node.ID][]node.ID, n)
sigma := make(map[node.ID]float64, n)
dist := make(map[node.ID]int, n)
stack := make([]graph.NodeID, 0, n)
preds := make(map[graph.NodeID][]graph.NodeID, n)
sigma := make(map[graph.NodeID]float64, n)
dist := make(map[graph.NodeID]int, n)

for _, v := range ids {
dist[v] = -1
Expand All @@ -115,7 +114,7 @@ func EdgeBetweennessCentrality(g *graph.Graph, cfg *Config) map[node.ID]map[node
dist[s] = 0

// BFS (unweighted)
q := []node.ID{s}
q := []graph.NodeID{s}
for len(q) > 0 {
v := q[0]
q = q[1:]
Expand All @@ -136,7 +135,7 @@ func EdgeBetweennessCentrality(g *graph.Graph, cfg *Config) map[node.ID]map[node
}

// Dependency accumulation
delta := make(map[node.ID]float64, n)
delta := make(map[graph.NodeID]float64, n)
for len(stack) > 0 {
w := stack[len(stack)-1]
stack = stack[:len(stack)-1]
Expand All @@ -150,7 +149,7 @@ func EdgeBetweennessCentrality(g *graph.Graph, cfg *Config) map[node.ID]map[node
eu, ev := makeEdgeKey(v, w, isUndirected)

if local[eu] == nil {
local[eu] = make(map[node.ID]float64)
local[eu] = make(map[graph.NodeID]float64)
}

local[eu][ev] += c
Expand Down
9 changes: 4 additions & 5 deletions graph/algorithm/eigenvector_centrality.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ import (
"sync"

"github.com/elecbug/netkit/graph"
"github.com/elecbug/netkit/graph/node"
)

// EigenvectorCentrality computes eigenvector centrality using parallel power iteration.
Expand All @@ -16,8 +15,8 @@ import (
// Set Reverse=true to use successors/out-edges (right eigenvector).
//
// Unweighted edges are assumed. The result vector is L2-normalized (sum of squares == 1).
func EigenvectorCentrality(g *graph.Graph, cfg *Config) map[node.ID]float64 {
out := make(map[node.ID]float64)
func EigenvectorCentrality(g *graph.Graph, cfg *Config) map[graph.NodeID]float64 {
out := make(map[graph.NodeID]float64)
if g == nil {
return out
}
Expand All @@ -26,7 +25,7 @@ func EigenvectorCentrality(g *graph.Graph, cfg *Config) map[node.ID]float64 {
maxIter := 100
tol := 1e-6
reverse := false
var nstart *map[node.ID]float64
var nstart *map[graph.NodeID]float64
workers := runtime.NumCPU()

if cfg != nil {
Expand Down Expand Up @@ -54,7 +53,7 @@ func EigenvectorCentrality(g *graph.Graph, cfg *Config) map[node.ID]float64 {
if n == 0 {
return out
}
idxOf := make(map[node.ID]int, n)
idxOf := make(map[graph.NodeID]int, n)
for i, u := range ids {
idxOf[u] = i
}
Expand Down
Loading
Loading