diff --git a/R/ernm-fit.R b/R/ernm-fit.R index c9af743..73163ac 100644 --- a/R/ernm-fit.R +++ b/R/ernm-fit.R @@ -80,7 +80,8 @@ ernmFit <- function(sampler, parinit=theta0, rinit=1, rmax=100, - parscale=rep(1,length(theta0)), minimize=FALSE, + parscale=rep(1,length(theta0)), + minimize=FALSE, sample=sample, theta0=theta0, stats=stats)) diff --git a/inst/include/Model.h b/inst/include/Model.h index f2a4f24..2dd9601 100644 --- a/inst/include/Model.h +++ b/inst/include/Model.h @@ -524,6 +524,8 @@ class Model : public ShallowCopyable{ void discreteVertexUpdateR(int vertex, std::string varName, int newValue){ if(vertex > net->size()) ::Rf_error("vertex index is bigger than the size of the network"); + if(vertex < 0) + ::Rf_error("vertex index is negative in C++ end - did you enter 0 in R end?"); std::vector vars = net->discreteVarNames(); int variable = -1; for(int i=0;i net->size()) ::Rf_error("vertex index is bigger than the size of the network"); + if(vertex < 0) + ::Rf_error("vertex index is negative in C++ end - did you enter 0 in R end?"); std::vector vars = net->continVarNames(); int variable = -1; for(int i=0;i { typedef Stat > DirectedNodeMatch; typedef Stat > UndirectedNodeMatch; +/*! + * Adds a statistic the absolute value of the difference between the values + * of a continuous nodal covariate + */ +template +class AbsDiff : public BaseStat< Engine > { +protected: + typedef typename BinaryNet::NeighborIterator NeighborIterator; + std::string variableName; /*!< the name of the matching variable */ +int varIndex; /*!< the index of the variable in the network */ +public: + AbsDiff(){ + variableName=""; + varIndex = -1; + } + + AbsDiff(std::string name){ + variableName=name; + varIndex = -1; + } + + AbsDiff(List params){ + varIndex = -1; + try{ + variableName = as< std::string >(params[0]); + }catch(...){ + ::Rf_error("AbsDiff requires a nodal variable name"); + } + } + + + std::string name(){ + return "absDiff"; + } + + std::vector statNames(){ + std::vector statnames(1,"absDiff."+variableName); + return statnames; + } + + void calculate(const BinaryNet& net){ + int from,to; + double value1, value2; + std::vector vars = net.continVarNames(); + int variableIndex = -1; + for(int i=0;istats = std::vector(nstats,0.0); + if(this->thetas.size() != nstats) + this->thetas = std::vector(nstats,0.0); + boost::shared_ptr< std::vector< std::pair > > edges = net.edgelist(); + for(int i=0;isize();i++){ + from = (*edges)[i].first; + to = (*edges)[i].second; + value1 = net.continVariableValue(varIndex,from) - 1; + value2 = net.continVariableValue(varIndex,to) - 1; + // absolute value of the difference: + this->stats[0] += std::fabs(value1 - value2); + } + } + + void dyadUpdate(const BinaryNet& net, int from, int to){ + bool addingEdge = !net.hasEdge(from,to); + double value1 = net.continVariableValue(varIndex,from) - 1; + double value2 = net.continVariableValue(varIndex,to) - 1; + if(value1!=value2){ + if(addingEdge) + this->stats[0] += std::fabs(value1 - value2); + else + this->stats[0] -= std::fabs(value1 - value2); + } + } + + void discreteVertexUpdate(const BinaryNet& net, int vert, + int variable, double newValue){} + + void continVertexUpdate(const BinaryNet& net, int vert, + int variable, int newValue){ + if(variable != varIndex) + return; + int val = net.continVariableValue(varIndex,vert); + if(net.isDirected()){ + NeighborIterator it = net.outBegin(vert); + NeighborIterator end = net.outEnd(vert); + while(it!=end){ + int val2 = net.continVariableValue(varIndex,*it); + // remove old value: + this->stats[0] -= std::fabs(val - val2); + // add new value + this->stats[0] += std::fabs(newValue - val2); + it++; + } + it = net.inBegin(vert); + end = net.inEnd(vert); + while(it!=end){ + int val2 = net.continVariableValue(varIndex,*it); + // remove old value: + this->stats[0] -= std::fabs(val - val2); + // add new value + this->stats[0] += std::fabs(newValue - val2); + it++; + } + }else{ + NeighborIterator it = net.begin(vert); + NeighborIterator end = net.end(vert); + while(it!=end){ + int val2 = net.continVariableValue(varIndex,*it); + // remove old value: + this->stats[0] -= std::fabs(val - val2); + // add new value + this->stats[0] += std::fabs(newValue - val2); + it++; + } + } + } + +}; + +typedef Stat > DirectedAbsDiff; +typedef Stat > UndirectedAbsDiff; /*! @@ -1009,11 +1136,6 @@ typedef Stat > DirectedNodeCount; typedef Stat > UndirectedNodeCount; - - - - - //template //class LogisticModel : public Stat< Engine > { //protected: @@ -4272,11 +4394,311 @@ class Gauss : public BaseStat< Engine > { } } }; - typedef Stat > DirectedGauss; typedef Stat > UndirectedGauss; +/*! + * Guassian Regression + */ +template +class GaussRegression : public BaseStat< Engine > { +protected: + std::vector< std::string > yNames; + std::vector< std::string > xNames; + std::vector y_indices; + std::vector x_indices; +public: + + GaussRegression() {} + + virtual ~GaussRegression(){}; + + GaussRegression(List params) { + try{ + yNames = as< std::vector >(params(0)); + xNames = as< std::vector >(params(1)); + }catch(...){ + ::Rf_error("The first parameter of GaussRegression should be a character vector of variable names"); + } + if(yNames.size() != 1){ + ::Rf_error("GaussRegression requires exactly one dependent variable"); + } + } + + std::string name(){ + return "gaussRegression"; + } + + std::vector statNames(){ + int nstats = 1 + yNames.size() + xNames.size(); + std::vector statnames(nstats,""); + statnames[0] = "gaussRegress." + yNames[0] + "Y.Y"; + for(int i=0;i& net){ + std::vector vars = net.continVarNames(); + + y_indices = std::vector(yNames.size(),-1); + x_indices = std::vector(xNames.size(),-1); + // Get the y varnames: + for(int i=0;istats = std::vector(nstats,0.0); + if(this->thetas.size()!=nstats){ + // set to negative so as not to explode y**2 + this->thetas = std::vector(nstats,0.0); + for(int i=0;ithetas[i] = -.5; + } + + // Do the intercept: + this->stats[y_indices.size()] = 1.0; + this->thetas[y_indices.size()] = 0.0; + + for(int i=0;istats[1 + y_indices.size() + i] = s; + this->stats[y_indices.size()-1] = ssq; + }else{ + for(int j=0;jstats[1 + y_indices.size() + i] = s; + } + } + } + } + + + void dyadUpdate(const BinaryNet& net, int from, int to){} + + void discreteVertexUpdate(const BinaryNet& net, int vert, + int variable, int newValue){} + + void continVertexUpdate(const BinaryNet& net, int vert, + int variable, double newValue){ + if(y_indices[0] == variable){ + for(int j=0;jstats[1 + y_indices.size() + j] += newValue * net.continVariableValue(x_indices[j], vert) - + net.continVariableValue(variable, vert) * net.continVariableValue(x_indices[j], vert); + } + this->stats[y_indices.size()-1] += pow(newValue, 2.0) - pow(net.continVariableValue(variable, vert), 2.0); + } + for(int i=0;istats[1 + y_indices.size() + i] += newValue * net.continVariableValue(y_indices[0], vert) - + net.continVariableValue(y_indices[0], vert) * net.continVariableValue(variable, vert); + } + } + } +}; + +typedef Stat > DirectedGaussRegression; +typedef Stat > UndirectedGaussRegression; + +template +class RegressNeighbors : public BaseStat< Engine > { +protected: + int nstats; /*!< the number of stats generated */ +typedef typename BinaryNet::NeighborIterator NeighborIterator; +int variableIndex, regIndex; +std::string variableName, regressorName; +bool isDiscrete; +public: + RegressNeighbors(){ + nstats = 1; + variableIndex=regIndex = 0; + } + + RegressNeighbors(std::string out,std::string reg){ + nstats = 1; + variableIndex=regIndex = 0; + variableName = out; + regressorName = reg; + } + + RegressNeighbors(std::string out,std::string reg,std::string base_val){ + nstats = 1; + variableIndex=regIndex = 0; + variableName = out; + regressorName = reg; + } + + RegressNeighbors(List params){ + nstats = 1; + variableIndex=regIndex = 0; + if(params.size() < 2){ + ::Rf_error("RegressNeighbors requires at least two arguments passed"); + } + try{ + variableName = as(params[0]); + }catch(...){ + ::Rf_error("RegressNeighbors requires a formula"); + } + try{ + regressorName = as(params[1]); + }catch(...){ + ::Rf_error("RegressNeighbors requires a formula"); + } + } + + std::string name(){ + return "regressNeighbors"; + } + + std::vector statNames(){ + std::vector statnames(1,"regressNeighbors"); + return statnames; + } + + void calculate(const BinaryNet& net){ + isDiscrete = false; + this->stats = std::vector(1,0.0); + this->thetas = std::vector(1,0.0); + std::vector vars = net.continVarNames(); + regIndex = -1; + for(int i=0;istats[0] += val*val1; + it++; + } + } + } + + void discreteVertexUpdate(const BinaryNet& net, int vert,int variable, int newValue){ + + if(variable != regIndex | !isDiscrete) + return; + double regValue = net.discreteVariableValue(regIndex,vert); + // need to step trough all neighbours and recalculate the effect + NeighborIterator it = net.begin(vert); + NeighborIterator end = net.end(vert); + while(it != end){ + double neighbor_varVal = net.continVariableValue(variableIndex,*it); + this->stats[0] -= neighbor_varVal*regValue; + this->stats[0] += neighbor_varVal*newValue; + it++; + } + } + + void continVertexUpdate(const BinaryNet& net, int vert,int variable, double newValue){ + if(variable == variableIndex){ + // need to step through all neighbours and recalculate the effect + NeighborIterator it = net.begin(vert); + NeighborIterator end = net.end(vert); + double old_val = net.continVariableValue(variableIndex,vert); + + double neighbor_regVal; + while(it != end){ + if(isDiscrete){ + neighbor_regVal = net.discreteVariableValue(regIndex,*it); + }else{ + neighbor_regVal = net.continVariableValue(regIndex,*it); + } + this->stats[0] -= old_val*neighbor_regVal; + this->stats[0] += newValue*neighbor_regVal; + it++; + } + } + if(variable == regIndex){ + NeighborIterator it = net.begin(vert); + NeighborIterator end = net.end(vert); + double old_val = net.continVariableValue(regIndex,vert); + while(it != end){ + double neighbor_varVal = net.continVariableValue(variableIndex,*it); + this->stats[0] -= neighbor_varVal*old_val; + this->stats[0] += neighbor_varVal*newValue; + it++; + } + } + } + + void dyadUpdate(const BinaryNet& net, int from, int to){ + bool addingEdge = !net.hasEdge(from,to); + //get the variables + double fromVarValue = net.continVariableValue(variableIndex,from); + double fromRegValue; + if(isDiscrete){ + fromRegValue = net.discreteVariableValue(regIndex,from)-1; + }else{ + fromRegValue = net.continVariableValue(regIndex,from); + } + double toVarValue = net.continVariableValue(variableIndex,to); + double toRegValue; + if(isDiscrete){ + toRegValue = net.discreteVariableValue(regIndex,to)-1; + }else{ + toRegValue = net.continVariableValue(regIndex,to); + } + + // If we are removing the edge may remove a value for both from and to + double add = net.hasEdge(from,to)?-1:1; + this->stats[0] += add*fromVarValue*toRegValue; + this->stats[0] += add*toVarValue*fromRegValue; + } +}; +typedef Stat > DirectedRegressNeighbors; +typedef Stat > UndirectedRegressNeighbors; + /*! * Gamma sufficient statistics */ diff --git a/src/Makevars b/src/Makevars index f500fa3..27555f1 100644 --- a/src/Makevars +++ b/src/Makevars @@ -1,6 +1,8 @@ ## Use the R_HOME indirection to support installations of multiple R version PKG_LIBS = `$(R_HOME)/bin/Rscript -e "Rcpp:::LdFlags()"` PKG_CPPFLAGS=-I../inst/include +PKG_CXXFLAGS += -Wno-sign-compare +PKG_CXXFLAGS += -Wno-deprecated-declarations ## PKG_CXXFLAGS= ## -O0 -fno-inline ## As an alternative, one can also add this code in a file 'configure' diff --git a/src/StatController.cpp b/src/StatController.cpp index 9749990..5aa7ce2 100644 --- a/src/StatController.cpp +++ b/src/StatController.cpp @@ -60,6 +60,9 @@ void initStats(){ registerStatistic( DirStatPtr( new DirectedGamma() ) ); registerStatistic( DirStatPtr( new DirectedLogDegreeMoment() ) ); registerStatistic( DirStatPtr( new DirectedHamming() ) ); + registerStatistic( DirStatPtr( new DirectedGaussRegression() ) ); + registerStatistic( DirStatPtr( new DirectedAbsDiff() ) ); + registerStatistic( DirStatPtr( new DirectedRegressNeighbors() ) ); //////// Offsets ///////// //registerOffset( DirOffsetPtr( new DirectedREffectOffset() ) ); registerOffset( DirOffsetPtr( new DirectedBiasedSeedOffset() ) ); @@ -92,16 +95,19 @@ void initStats(){ registerStatistic( UndirStatPtr( new UndirectedNodeCov() ) ); registerStatistic( UndirStatPtr( new UndirectedGwesp() ) ); registerStatistic( UndirStatPtr( new UndirectedGeoDist() ) ); - registerStatistic( UndirStatPtr( new UndirectedGwDegree() ) ); - registerStatistic( UndirStatPtr( new UndirectedGwdsp() ) ); - registerStatistic( UndirStatPtr( new UndirectedEsp() ) ); - registerStatistic( UndirStatPtr( new UndirectedSumOfSquares() ) ); - registerStatistic( UndirStatPtr( new UndirectedGauss() ) ); - registerStatistic( UndirStatPtr( new UndirectedGamma() ) ); - registerStatistic( UndirStatPtr( new UndirectedLogDegreeMoment() ) ); - registerStatistic( UndirStatPtr( new UndirectedDegreeCrossProd() ) ); - registerStatistic( UndirStatPtr( new UndirectedPreferentialAttachment() ) ); - registerStatistic( UndirStatPtr( new UndirectedHamming() ) ); + registerStatistic( UndirStatPtr( new UndirectedGwDegree() ) ); + registerStatistic( UndirStatPtr( new UndirectedGwdsp() ) ); + registerStatistic( UndirStatPtr( new UndirectedEsp() ) ); + registerStatistic( UndirStatPtr( new UndirectedSumOfSquares() ) ); + registerStatistic( UndirStatPtr( new UndirectedGauss() ) ); + registerStatistic( UndirStatPtr( new UndirectedGamma() ) ); + registerStatistic( UndirStatPtr( new UndirectedLogDegreeMoment() ) ); + registerStatistic( UndirStatPtr( new UndirectedDegreeCrossProd() ) ); + registerStatistic( UndirStatPtr( new UndirectedPreferentialAttachment() ) ); + registerStatistic( UndirStatPtr( new UndirectedHamming() ) ); + registerStatistic( UndirStatPtr( new UndirectedGaussRegression() ) ); + registerStatistic( UndirStatPtr( new UndirectedAbsDiff() ) ); + registerStatistic( UndirStatPtr( new UndirectedRegressNeighbors() ) ); //////// Offsets ///////// registerOffset( UndirOffsetPtr( new UndirectedREffectOffset() ) ); diff --git a/tests/testthat/test-model.R b/tests/testthat/test-model.R index 0727a6f..9e72d32 100644 --- a/tests/testthat/test-model.R +++ b/tests/testthat/test-model.R @@ -20,44 +20,59 @@ test_that("models", { samplike <- as.network(samplike_undir, directed = FALSE) # Test MRF version of ERNM + t_1 <- proc.time()[3] MRF <- ernm(samplike_undir ~ edges + homophily("group") + logisticNeighbors('group','group','Loyal') | group, tapered = FALSE, - verbose = FALSE) + verbose = TRUE) + t_1 <- proc.time()[3] - t_1 # Test ERGM verision of ERNM + t_2 <- proc.time()[3] ERGM <- ernm(samplike_undir ~ edges + gwesp(0.5) + gwdegree(0.5) + homophily("group") + logisticNeighbors('group','group','Loyal'), tapered = FALSE, - verbose = FALSE) + verbose = TRUE) + t_2 <- proc.time()[3] - t_2 # Test ERNM - t_1 <- proc.time()[3] + t_3 <- proc.time()[3] ERNM <- ernm(samplike_undir ~ edges + gwesp(0.5) + gwdegree(0.5) + homophily("group") + logisticNeighbors('group','group','Loyal') | group, tapered = FALSE, - verbose = FALSE) - t_1 <- proc.time()[3] - t_1 + verbose = TRUE) + t_3 <- proc.time()[3] - t_3 # Test tapered ERNM: ERNM_formula <- as.formula("samplike_undir ~edges + gwesp(0.5) + gwdegree(0.5) + homophily('group') + logisticNeighbors('group','group','Loyal') | group") stats <- ernm::calculateStatistics(ERNM_formula) - t_2 <- proc.time()[3] + t_4 <- proc.time()[3] ERNM_tapered_1 <- ernm(ERNM_formula, tapered = TRUE, modelArgs = list(tau = 1 / (3^2 * (stats + 5)), centers = stats, modelClass = 'TaperedModel'), - verbose = FALSE) - t_2 <- proc.time()[3] - t_2 + verbose = TRUE) + t_4 <- proc.time()[3] - t_4 # Test tapered ERNM: # more tapering needed here ERNM_formula <- as.formula("samplike_undir ~ edges + triangles() + star(2) + homophily('group') + logisticNeighbors('group','group','Loyal') | group") stats <- ernm::calculateStatistics(ERNM_formula) + t_5 <- proc.time()[3] ERNM_tapered_2 <- ernm(ERNM_formula, tapered = TRUE, modelArgs = list(tau = 1 / (2 * (stats + 5)), centers = stats, modelClass = 'TaperedModel'), - verbose = FALSE) + verbose = TRUE) + t_5 <- proc.time()[3] - t_5 + + + # Print the time it took to fit the models vs expected time: + print(as.data.frame(cbind(c("MRF", + "ERGM", + "ERNM", + "ERNM_tapered_1", + "ERNM_tapered_2"), + c(t_1, t_2, t_3, t_4, t_5)))) # All models should converge diff --git a/tests/testthat/test-stats.R b/tests/testthat/test-stats.R index 10cadb1..06cc0bf 100644 --- a/tests/testthat/test-stats.R +++ b/tests/testthat/test-stats.R @@ -10,6 +10,7 @@ test_that("Stats", { # Setup # ================ set.seed(1) + N = 100 # make 1 100 node network with some variables: add_treated_neighs <- function(net,treatment_var){ tmp <- as.numeric(get.vertex.attribute(net,treatment_var)) @@ -19,11 +20,14 @@ test_that("Stats", { } make_net <- function(...){ - tmp <- matrix(rnorm(10000)>2,nrow = 100) + tmp <- matrix(rnorm(10000)>2,nrow = N) net <- as.network(tmp,directed =F) - set.vertex.attribute(net,"var_1",as.character(apply(rmultinom(100,1,rep(1/3,3)),2,FUN = function(x){which(x==1)}))) - set.vertex.attribute(net,"var_2",as.character(apply(rmultinom(100,1,rep(1/2,2)),2,FUN = function(x){which(x==1)}))) - set.vertex.attribute(net,"var_3",as.character(apply(rmultinom(100,1,rep(1/2,2)),2,FUN = function(x){which(x==1)}))) + set.vertex.attribute(net,"var_1",as.character(apply(rmultinom(N,1,rep(1/3,3)),2,FUN = function(x){which(x==1)}))) + set.vertex.attribute(net,"var_2",as.character(apply(rmultinom(N,1,rep(1/2,2)),2,FUN = function(x){which(x==1)}))) + set.vertex.attribute(net,"var_3",as.character(apply(rmultinom(N,1,rep(1/2,2)),2,FUN = function(x){which(x==1)}))) + set.vertex.attribute(net,"var_4",rnorm(N)) + set.vertex.attribute(net,"var_5",rnorm(N)) + set.vertex.attribute(net,"var_6",rnorm(N)) summary(as.factor(get.vertex.attribute(net,"var_1"))) net <- add_treated_neighs(net,"var_1") @@ -444,5 +448,205 @@ test_that("Stats", { testthat::expect_true(hamming_test_5) testthat::expect_true(hamming_test_6) testthat::expect_true(hamming_test_7) + + # ======================== + # gaussRegression + # ======================== + # Show 1) Linear regression Y ~ X gives the same results as ernm(net ~ GaussRegression(Y,X) | Y) + y = (net %v% "var_4") + X = (net %v% "var_5") + + r_stat_1 <- sum(y**2) + r_stat_2 <- sum(y*X) + + cpp_stat_1 <- as.numeric(ernm::calculateStatistics(net ~ gaussRegression("var_4","var_5") | var_4)) + cpp_stat_2 <- cpp_stat_1[3] + cpp_stat_1 <- cpp_stat_1[2] + regression_test_1 <- abs(r_stat_1 - cpp_stat_1) <= 10**-10 + regression_test_2 <- abs(r_stat_2 - cpp_stat_2) <= 10**-10 + + # Check the continVarUpdate function works change first y = 10 + k = 10 + r_stat_3 <- sum(y*X) - y[k]*X[k] + 1*X[k] + r_stat_4 <- sum(y**2) - y[k]**2 + 1**2 + r_stat_5 <- sum(y*X) - y[k]*X[k] + y[k]*1 + + model <- ernm(net ~ gaussRegression("var_4","var_5") | var_4, + tapered = FALSE, + maxIter = 2, + mcmcBurnIn = 100, + mcmcInterval = 10, + mcmcSampleSize = 100, + verbose = FALSE) + model <- model$m$sampler$getModel() + model$setNetwork(ernm::as.BinaryNet(net)) + model$calculate() + model$statistics() + model$continVertexUpdate(k, "var_4",1) + + regression_test_3 <- (abs(r_stat_3 - model$statistics()[3]) <= 10**-10) + regression_test_4 <- (abs(r_stat_4 - model$statistics()[2]) <= 10**-10) + + model$calculate() + model$continVertexUpdate(k, "var_5",1) + regression_test_5 <- (abs(r_stat_5 - model$statistics()[3]) <= 10**-10) + + # Check multiregression works: + model <- ernm(net ~ gaussRegression("var_4",c("var_5","var_6")) | var_4, + tapered = FALSE, + maxIter = 2, + mcmcBurnIn = 100, + mcmcInterval = 10, + mcmcSampleSize = 100, + verbose = FALSE) + model <- model$m$sampler$getModel() + model$setNetwork(ernm::as.BinaryNet(net)) + model$calculate() + model$statistics() + + testthat::expect_true(regression_test_1) + testthat::expect_true(regression_test_2) + testthat::expect_true(regression_test_3) + testthat::expect_true(regression_test_4) + testthat::expect_true(regression_test_5) + + # ======================== + # regressNeighbors + # ======================== + # Test : + # - Change in Y var + # - Change in X var + # - dyad update off + # - dyad update on + k <- 10 + neighs <- get.neighborhood(net,k) + Y <- (net %v% "var_4") + X <- (net %v% "var_5") + r_stat_1 <- sum(sapply(1:(net %n% 'n'), function(i){ + neighs <- get.neighborhood(net,i) + sum(Y[i]*X[neighs]) + } + )) + + model <- ernm(net ~ regressNeighbors("var_4","var_5") | var_4, + tapered = FALSE, + maxIter = 10, + mcmcBurnIn = 100, + mcmcInterval = 10, + mcmcSampleSize = 100, + nodeSamplingPercentage = 1, + verbose = FALSE) + model <- model$m$sampler$getModel() + model$setNetwork(ernm::as.BinaryNet(net)) + model$calculate() + model$statistics() + regress_neigh_test_1 <- (abs(r_stat_1 - model$statistics()) <= 10**-10) + + k = 10 + r_stat_2 <- sum(sapply(1:(net %n% 'n'), function(i){ + neighs <- get.neighborhood(net,i) + if(i==k){ + sum(10*X[neighs]) + }else{ + sum(Y[i]*X[neighs]) + } + })) + + X_tmp <- X + X_tmp[k] <- 10 + r_stat_3 <- sum(sapply(1:(net %n% 'n'), function(i){ + neighs <- get.neighborhood(net,i) + sum(Y[i]*X_tmp[neighs]) + })) + + neighs <- get.neighborhood(net,k) + delete.edges(net,get.edgeIDs(net,k,neighs[1])) + old_neighs <- neighs + neighs <- get.neighborhood(net,k) + r_stat_4 <- sum(sapply(1:(net %n% 'n'), function(i){ + neighs <- get.neighborhood(net,i) + sum(Y[i]*X[neighs]) + })) + add.edges(net,k,old_neighs[1]) + neighs_4 <- old_neighs + + neighs <- get.neighborhood(net,k) + add.edges(net,k,(1:N)[-c(neighs,k)][1]) + old_neighs <- neighs + neighs <- get.neighborhood(net,k) + r_stat_5 <- sum(sapply(1:(net %n% 'n'), function(i){ + neighs <- get.neighborhood(net,i) + sum(Y[i]*X[neighs]) + })) + delete.edges(net,get.edgeIDs(net,k,(1:N)[-c(old_neighs,k)][1])) + neighs_5 <- old_neighs + + + model$continVertexUpdate(k, "var_4",10) + regress_neigh_test_2 <- (abs(r_stat_2 - model$statistics()) <= 10**-10) + model$calculate() + model$continVertexUpdate(k, "var_5",10) + regress_neigh_test_3 <- (abs(r_stat_3 - model$statistics()) <= 10**-10) + model$calculate() + model$dyadUpdate(k,neighs_4[1]) + regress_neigh_test_4 <- (abs(r_stat_4 - model$statistics()) <= 10**-10) + model$calculate() + model$dyadUpdate(k,(1:N)[-c(neighs_5,k)][1]) + regress_neigh_test_5 <- (abs(r_stat_5 - model$statistics()) <= 10**-10) + + + testthat::expect_true(regress_neigh_test_1) + testthat::expect_true(regress_neigh_test_2) + testthat::expect_true(regress_neigh_test_3) + testthat::expect_true(regress_neigh_test_4) + testthat::expect_true(regress_neigh_test_5) + + # ======================== + # absDiff + # ======================== + # Test : + # - Change in Y var + # - dyad update off + # - dyad update on + k <- 10 + Y <- (net %v% "var_4") + tmp <- as.edgelist(net) + r_stat_1 <- sum(abs(Y[tmp[,1]] - Y[tmp[,2]])) + + model <- ernm(net ~ absDiff("var_4") | var_4, + tapered = FALSE, + maxIter = 10, + mcmcBurnIn = 100, + mcmcInterval = 10, + mcmcSampleSize = 100, + nodeSamplingPercentage = 1, + verbose = FALSE) + model <- model$m$sampler$getModel() + model$setNetwork(ernm::as.BinaryNet(net)) + model$calculate() + model$statistics() + absDiff_test_1 <- (abs(r_stat_1 - model$statistics()) <= 10**-10) + + neighs <- get.neighborhood(net,k) + r_stat_2 <- r_stat_1 - abs(Y[k] - Y[neighs[1]]) + neighs_2 <- neighs + + neighs <- get.neighborhood(net,k) + r_stat_3 <- r_stat_1 + abs(Y[k] - Y[(1:N)[-c(neighs,k)][1]]) + neighs_3 <- neighs + + model$calculate() + model$statistics() + model$dyadUpdate(k,neighs_2[1]) + model$statistics() + absDiff_test_2 <- (abs(r_stat_2 - model$statistics()) <= 10**-10) + model$calculate() + model$dyadUpdate(k,(1:N)[-c(neighs_3,k)][1]) + absDiff_test_3 <- (abs(r_stat_3 - model$statistics()) <= 10**-10) + + + testthat::expect_true(absDiff_test_1) + testthat::expect_true(absDiff_test_2) + testthat::expect_true(absDiff_test_3) } )