67 #define COMPACTION_ALIGNMENT_PRE 16
68 #define COMPACTION_ALIGNMENT_POST 0
77 : networkName_(name), preferredSimMode_(preferredSimMode), loggerMode_(loggerMode),
78 randSeed_(
SNN::setRandSeed(randSeed))
86 if (!simulatorDeleted)
95 short int SNN::connect(
int grpId1,
int grpId2,
const std::string& _type,
float initWt,
float maxWt,
float prob,
96 uint8_t minDelay, uint8_t maxDelay,
RadiusRF radius,
97 float _mulSynFast,
float _mulSynSlow,
bool synWtType) {
100 assert(grpId1 < numGroups);
101 assert(grpId2 < numGroups);
102 assert(minDelay <= maxDelay);
122 connConfig.
grpSrc = grpId1;
124 connConfig.
initWt = initWt;
125 connConfig.
maxWt = maxWt;
139 connConfig.
conn = NULL;
142 if ( _type.find(
"random") != std::string::npos) {
146 else if ( _type.find(
"full-no-direct") != std::string::npos) {
149 else if ( _type.find(
"full") != std::string::npos) {
152 else if ( _type.find(
"one-to-one") != std::string::npos) {
154 }
else if ( _type.find(
"gaussian") != std::string::npos) {
157 KERNEL_ERROR(
"Invalid connection type (should be 'random', 'full', 'one-to-one', 'full-no-direct', or 'gaussian')");
162 assert(connConfig.
connId == -1);
163 connConfig.
connId = numConnections;
168 connectConfigMap[numConnections] = connConfig;
173 return (numConnections - 1);
181 assert(grpId1 < numGroups);
182 assert(grpId2 < numGroups);
187 connConfig.
grpSrc = grpId1;
190 connConfig.
maxWt = 0.0f;
197 connConfig.
conn = conn;
203 assert(connConfig.
connId == -1);
204 connConfig.
connId = numConnections;
207 connectConfigMap[numConnections] = connConfig;
212 return (numConnections - 1);
217 assert(grpIdLower >= 0 && grpIdLower < numGroups);
218 assert(grpIdUpper >= 0 && grpIdUpper < numGroups);
219 assert(grpIdLower != grpIdUpper);
224 assert(groupConfigMap[grpIdLower].preferredNetId == groupConfigMap[grpIdUpper].preferredNetId);
229 sim_with_compartments =
true;
233 compConnConfig.
grpSrc = grpIdLower;
234 compConnConfig.
grpDest = grpIdUpper;
235 compConnConfig.
connId = -1;
238 assert(compConnConfig.
connId == -1);
239 compConnConfig.
connId = numCompartmentConnections;
242 compConnectConfigMap[numCompartmentConnections] = compConnConfig;
244 numCompartmentConnections++;
246 return (numCompartmentConnections - 1);
253 assert(neurType >= 0);
258 KERNEL_ERROR(
"Invalid type using createGroup... Cannot create poisson generators here.");
271 grpConfig.
type = neurType;
272 grpConfig.
numN = grid.
N;
275 grpConfig.
grid = grid;
276 grpConfig.
isLIF =
false;
278 if (preferredPartition ==
ANY) {
280 }
else if (preferredBackend ==
CPU_CORES) {
287 grpConfigMD.
gGrpId = numGroups;
290 groupConfigMap[numGroups] = grpConfig;
291 groupConfigMDMap[numGroups] = grpConfigMD;
296 return grpConfigMD.
gGrpId;
303 assert(neurType >= 0);
308 KERNEL_ERROR(
"Invalid type using createGroup... Cannot create poisson generators here.");
318 grpConfig.
type = neurType;
319 grpConfig.
numN = grid.
N;
321 grpConfig.
isLIF =
true;
323 grpConfig.
grid = grid;
325 if (preferredPartition ==
ANY) {
327 }
else if (preferredBackend ==
CPU_CORES) {
334 grpConfigMD.
gGrpId = numGroups;
337 groupConfigMap[numGroups] = grpConfig;
338 groupConfigMDMap[numGroups] = grpConfigMD;
343 return grpConfigMD.
gGrpId;
350 assert(neurType >= 0);
363 grpConfig.
numN = grid.
N;
365 grpConfig.
grid = grid;
366 grpConfig.
isLIF =
false;
368 if (preferredPartition ==
ANY) {
371 else if (preferredBackend ==
CPU_CORES) {
379 grpConfigMD.
gGrpId = numGroups;
382 groupConfigMap[numGroups] = grpConfig;
383 groupConfigMDMap[numGroups] = grpConfigMD;
389 return grpConfigMD.
gGrpId;
394 for (
int grpId = 0; grpId<numGroups; grpId++) {
399 groupConfigMap[gGrpId].withCompartments =
true;
400 groupConfigMap[gGrpId].compCouplingUp = couplingUp;
401 groupConfigMap[gGrpId].compCouplingDown = couplingDown;
402 glbNetworkConfig.
numComp += groupConfigMap[gGrpId].numN;
408 void SNN::setConductances(
bool isSet,
int tdAMPA,
int trNMDA,
int tdNMDA,
int tdGABAa,
int trGABAb,
int tdGABAb) {
410 assert(tdAMPA>0); assert(tdNMDA>0); assert(tdGABAa>0); assert(tdGABAb>0);
411 assert(trNMDA>=0); assert(trGABAb>=0);
412 assert(trNMDA!=tdNMDA); assert(trGABAb!=tdGABAb);
416 sim_with_conductances |= isSet;
417 dAMPA = 1.0-1.0/tdAMPA;
418 dNMDA = 1.0-1.0/tdNMDA;
419 dGABAa = 1.0-1.0/tdGABAa;
420 dGABAb = 1.0-1.0/tdGABAb;
424 sim_with_NMDA_rise =
true;
425 rNMDA = 1.0-1.0/trNMDA;
429 double tmax = (-tdNMDA*trNMDA*log(1.0*trNMDA/tdNMDA))/(tdNMDA-trNMDA);
430 sNMDA = 1.0/(exp(-tmax/tdNMDA)-exp(-tmax/trNMDA));
431 assert(!isinf(tmax) && !isnan(tmax) && tmax>=0);
432 assert(!isinf(sNMDA) && !isnan(sNMDA) && sNMDA>0);
437 sim_with_GABAb_rise =
true;
438 rGABAb = 1.0-1.0/trGABAb;
442 double tmax = (-tdGABAb*trGABAb*log(1.0*trGABAb/tdGABAb))/(tdGABAb-trGABAb);
443 sGABAb = 1.0/(exp(-tmax/tdGABAb)-exp(-tmax/trGABAb));
444 assert(!isinf(tmax) && !isnan(tmax)); assert(!isinf(sGABAb) && !isnan(sGABAb) && sGABAb>0);
447 if (sim_with_conductances) {
449 KERNEL_INFO(
" - AMPA decay time = %5d ms", tdAMPA);
450 KERNEL_INFO(
" - NMDA rise time %s = %5d ms", sim_with_NMDA_rise?
" ":
"(disabled)", trNMDA);
451 KERNEL_INFO(
" - GABAa decay time = %5d ms", tdGABAa);
452 KERNEL_INFO(
" - GABAb rise time %s = %5d ms", sim_with_GABAb_rise?
" ":
"(disabled)",trGABAb);
453 KERNEL_INFO(
" - GABAb decay time = %5d ms", tdGABAb);
455 KERNEL_INFO(
"Running CUBA mode (all synaptic conductances disabled)");
462 for(
int grpId = 0; grpId < numGroups; grpId++) {
467 sim_with_homeostasis |= isSet;
468 groupConfigMap[gGrpId].homeoConfig.WithHomeostasis = isSet;
469 groupConfigMap[gGrpId].homeoConfig.homeostasisScale = homeoScale;
470 groupConfigMap[gGrpId].homeoConfig.avgTimeScale = avgTimeScale;
471 groupConfigMap[gGrpId].homeoConfig.avgTimeScaleInv = 1.0f / avgTimeScale;
472 groupConfigMap[gGrpId].homeoConfig.avgTimeScaleDecay = (avgTimeScale * 1000.0f - 1.0f) / (avgTimeScale * 1000.0f);
474 KERNEL_INFO(
"Homeostasis parameters %s for %d (%s):\thomeoScale: %f, avgTimeScale: %f",
475 isSet?
"enabled":
"disabled", gGrpId, groupConfigMap[gGrpId].grpName.c_str(), homeoScale, avgTimeScale);
482 for(
int grpId = 0; grpId < numGroups; grpId++) {
487 groupConfigMap[gGrpId].homeoConfig.baseFiring = baseFiring;
488 groupConfigMap[gGrpId].homeoConfig.baseFiringSD = baseFiringSD;
490 KERNEL_INFO(
"Homeostatic base firing rate set for %d (%s):\tbaseFiring: %3.3f, baseFiringStd: %3.3f",
491 gGrpId, groupConfigMap[gGrpId].grpName.c_str(), baseFiring, baseFiringSD);
497 assert(numStepsPerMs >= 1 && numStepsPerMs <= 100);
500 glbNetworkConfig.
timeStep = 1.0f / numStepsPerMs;
505 float izh_c,
float izh_c_sd,
float izh_d,
float izh_d_sd)
507 assert(gGrpId >= -1);
508 assert(izh_a_sd >= 0); assert(izh_b_sd >= 0); assert(izh_c_sd >= 0); assert(izh_d_sd >= 0);
511 for(
int grpId = 0; grpId < numGroups; grpId++) {
512 setNeuronParameters(grpId, izh_a, izh_a_sd, izh_b, izh_b_sd, izh_c, izh_c_sd, izh_d, izh_d_sd);
515 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a = izh_a;
516 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a_sd = izh_a_sd;
517 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b = izh_b;
518 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b_sd = izh_b_sd;
519 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c = izh_c;
520 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c_sd = izh_c_sd;
521 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d = izh_d;
522 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d_sd = izh_d_sd;
523 groupConfigMap[gGrpId].withParamModel_9 = 0;
524 groupConfigMap[gGrpId].isLIF = 0;
530 float izh_vr,
float izh_vr_sd,
float izh_vt,
float izh_vt_sd,
531 float izh_a,
float izh_a_sd,
float izh_b,
float izh_b_sd,
532 float izh_vpeak,
float izh_vpeak_sd,
float izh_c,
float izh_c_sd,
533 float izh_d,
float izh_d_sd)
535 assert(gGrpId >= -1);
536 assert(izh_C_sd >= 0); assert(izh_k_sd >= 0); assert(izh_vr_sd >= 0);
537 assert(izh_vt_sd >= 0); assert(izh_a_sd >= 0); assert(izh_b_sd >= 0); assert(izh_vpeak_sd >= 0);
538 assert(izh_c_sd >= 0); assert(izh_d_sd >= 0);
541 for (
int grpId = 0; grpId<numGroups; grpId++) {
542 setNeuronParameters(grpId, izh_C, izh_C_sd, izh_k, izh_k_sd, izh_vr, izh_vr_sd, izh_vt, izh_vt_sd,
543 izh_a, izh_a_sd, izh_b, izh_b_sd, izh_vpeak, izh_vpeak_sd, izh_c, izh_c_sd,
548 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a = izh_a;
549 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a_sd = izh_a_sd;
550 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b = izh_b;
551 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b_sd = izh_b_sd;
552 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c = izh_c;
553 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c_sd = izh_c_sd;
554 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d = izh_d;
555 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d_sd = izh_d_sd;
556 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_C = izh_C;
557 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_C_sd = izh_C_sd;
558 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_k = izh_k;
559 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_k_sd = izh_k_sd;
560 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vr = izh_vr;
561 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vr_sd = izh_vr_sd;
562 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vt = izh_vt;
563 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vt_sd = izh_vt_sd;
564 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vpeak = izh_vpeak;
565 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vpeak_sd = izh_vpeak_sd;
566 groupConfigMap[gGrpId].withParamModel_9 = 1;
567 groupConfigMap[gGrpId].isLIF = 0;
576 assert(gGrpId >= -1);
577 assert(tau_m >= 0); assert(tau_ref >= 0); assert(vReset < vTh);
578 assert(minRmem >= 0.0f); assert(minRmem <= maxRmem);
581 for(
int grpId = 0; grpId < numGroups; grpId++) {
585 groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_m = tau_m;
586 groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_ref = tau_ref;
587 groupConfigMap[gGrpId].neuralDynamicsConfig.lif_vTh = vTh;
588 groupConfigMap[gGrpId].neuralDynamicsConfig.lif_vReset = vReset;
589 groupConfigMap[gGrpId].neuralDynamicsConfig.lif_minRmem = minRmem;
590 groupConfigMap[gGrpId].neuralDynamicsConfig.lif_maxRmem = maxRmem;
591 groupConfigMap[gGrpId].withParamModel_9 = 0;
592 groupConfigMap[gGrpId].isLIF = 1;
597 float tauACh,
float baseNE,
float tauNE) {
599 assert(gGrpId >= -1);
600 assert(baseDP > 0.0f); assert(base5HT > 0.0f); assert(baseACh > 0.0f); assert(baseNE > 0.0f);
601 assert(tauDP > 0); assert(tau5HT > 0); assert(tauACh > 0); assert(tauNE > 0);
604 for (
int grpId = 0; grpId < numGroups; grpId++) {
605 setNeuromodulator(grpId, baseDP, tauDP, base5HT, tau5HT, baseACh, tauACh, baseNE, tauNE);
608 groupConfigMap[gGrpId].neuromodulatorConfig.baseDP = baseDP;
609 groupConfigMap[gGrpId].neuromodulatorConfig.decayDP = 1.0f - (1.0f / tauDP);
610 groupConfigMap[gGrpId].neuromodulatorConfig.base5HT = base5HT;
611 groupConfigMap[gGrpId].neuromodulatorConfig.decay5HT = 1.0f - (1.0f / tau5HT);
612 groupConfigMap[gGrpId].neuromodulatorConfig.baseACh = baseACh;
613 groupConfigMap[gGrpId].neuromodulatorConfig.decayACh = 1.0f - (1.0f / tauACh);
614 groupConfigMap[gGrpId].neuromodulatorConfig.baseNE = baseNE;
615 groupConfigMap[gGrpId].neuromodulatorConfig.decayNE = 1.0f - (1.0f / tauNE);
621 assert(gGrpId >= -1);
624 assert(tauPlus > 0.0f); assert(tauMinus > 0.0f); assert(gamma >= 0.0f);
628 for(
int grpId = 0; grpId < numGroups; grpId++) {
629 setESTDP(grpId, isSet, type, curve, alphaPlus, tauPlus, alphaMinus, tauMinus, gamma);
634 groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_EXC = alphaPlus;
635 groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_EXC = alphaMinus;
636 groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_EXC = 1.0f / tauPlus;
637 groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_EXC = 1.0f / tauMinus;
638 groupConfigMap[gGrpId].stdpConfig.GAMMA = gamma;
639 groupConfigMap[gGrpId].stdpConfig.KAPPA = (1 + exp(-gamma / tauPlus)) / (1 - exp(-gamma / tauPlus));
640 groupConfigMap[gGrpId].stdpConfig.OMEGA = alphaPlus * (1 - groupConfigMap[gGrpId].stdpConfig.KAPPA);
642 groupConfigMap[gGrpId].stdpConfig.WithESTDPtype = type;
643 groupConfigMap[gGrpId].stdpConfig.WithESTDPcurve = curve;
644 groupConfigMap[gGrpId].stdpConfig.WithESTDP = isSet;
645 groupConfigMap[gGrpId].stdpConfig.WithSTDP |= groupConfigMap[gGrpId].stdpConfig.WithESTDP;
646 sim_with_stdp |= groupConfigMap[gGrpId].stdpConfig.WithSTDP;
648 KERNEL_INFO(
"E-STDP %s for %s(%d)", isSet?
"enabled":
"disabled", groupConfigMap[gGrpId].grpName.c_str(), gGrpId);
654 assert(gGrpId >= -1);
657 assert(tau1 > 0); assert(tau2 > 0);
661 for(
int grpId = 0; grpId < numGroups; grpId++) {
662 setISTDP(grpId, isSet, type, curve, ab1, ab2, tau1, tau2);
668 groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_INB = ab1;
669 groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_INB = ab2;
670 groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_INB = 1.0f / tau1;
671 groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_INB = 1.0f / tau2;
672 groupConfigMap[gGrpId].stdpConfig.BETA_LTP = 0.0f;
673 groupConfigMap[gGrpId].stdpConfig.BETA_LTD = 0.0f;
674 groupConfigMap[gGrpId].stdpConfig.LAMBDA = 1.0f;
675 groupConfigMap[gGrpId].stdpConfig.DELTA = 1.0f;
677 groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_INB = 0.0f;
678 groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_INB = 0.0f;
679 groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_INB = 1.0f;
680 groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_INB = 1.0f;
681 groupConfigMap[gGrpId].stdpConfig.BETA_LTP = ab1;
682 groupConfigMap[gGrpId].stdpConfig.BETA_LTD = ab2;
683 groupConfigMap[gGrpId].stdpConfig.LAMBDA = tau1;
684 groupConfigMap[gGrpId].stdpConfig.DELTA = tau2;
688 groupConfigMap[gGrpId].stdpConfig.WithISTDPtype = type;
689 groupConfigMap[gGrpId].stdpConfig.WithISTDPcurve = curve;
690 groupConfigMap[gGrpId].stdpConfig.WithISTDP = isSet;
691 groupConfigMap[gGrpId].stdpConfig.WithSTDP |= groupConfigMap[gGrpId].stdpConfig.WithISTDP;
692 sim_with_stdp |= groupConfigMap[gGrpId].stdpConfig.WithSTDP;
694 KERNEL_INFO(
"I-STDP %s for %s(%d)", isSet?
"enabled":
"disabled", groupConfigMap[gGrpId].grpName.c_str(), gGrpId);
699 void SNN::setSTP(
int gGrpId,
bool isSet,
float STP_U,
float STP_tau_u,
float STP_tau_x) {
700 assert(gGrpId >= -1);
702 assert(STP_U > 0 && STP_U <= 1); assert(STP_tau_u > 0); assert(STP_tau_x > 0);
706 for(
int grpId = 0; grpId < numGroups; grpId++) {
707 setSTP(grpId, isSet, STP_U, STP_tau_u, STP_tau_x);
711 sim_with_stp |= isSet;
712 groupConfigMap[gGrpId].stpConfig.WithSTP = isSet;
713 groupConfigMap[gGrpId].stpConfig.STP_A = (STP_U > 0.0f) ? 1.0 / STP_U : 1.0f;
714 groupConfigMap[gGrpId].stpConfig.STP_U = STP_U;
715 groupConfigMap[gGrpId].stpConfig.STP_tau_u_inv = 1.0f / STP_tau_u;
716 groupConfigMap[gGrpId].stpConfig.STP_tau_x_inv = 1.0f / STP_tau_x;
718 KERNEL_INFO(
"STP %s for %d (%s):\tA: %1.4f, U: %1.4f, tau_u: %4.0f, tau_x: %4.0f", isSet?
"enabled":
"disabled",
719 gGrpId, groupConfigMap[gGrpId].grpName.c_str(), groupConfigMap[gGrpId].stpConfig.STP_A, STP_U, STP_tau_u, STP_tau_x);
724 assert(wtChangeDecay > 0.0f && wtChangeDecay < 1.0f);
726 switch (wtANDwtChangeUpdateInterval) {
728 wtANDwtChangeUpdateInterval_ = 10;
731 wtANDwtChangeUpdateInterval_ = 100;
735 wtANDwtChangeUpdateInterval_ = 1000;
739 if (enableWtChangeDecay) {
741 switch (wtANDwtChangeUpdateInterval) {
743 stdpScaleFactor_ = 0.005f;
746 stdpScaleFactor_ = 0.05f;
750 stdpScaleFactor_ = 0.5f;
754 wtChangeDecay_ = wtChangeDecay;
756 stdpScaleFactor_ = 1.0f;
757 wtChangeDecay_ = 0.0f;
760 KERNEL_INFO(
"Update weight and weight change every %d ms", wtANDwtChangeUpdateInterval_);
761 KERNEL_INFO(
"Weight Change Decay is %s", enableWtChangeDecay?
"enabled" :
"disable");
762 KERNEL_INFO(
"STDP scale factor = %1.3f, wtChangeDecay = %1.3f", stdpScaleFactor_, wtChangeDecay_);
779 generateRuntimeSNN();
794 assert(_nmsec >= 0 && _nmsec < 1000);
796 int runDurationMs = _nsec*1000 + _nmsec;
797 KERNEL_DEBUG(
"runNetwork: runDur=%dms, printRunSummary=%s", runDurationMs, printRunSummary?
"y":
"n");
803 printRunSummary = (loggerMode_==
SILENT) ?
false : printRunSummary;
806 if (simTime==0 && printRunSummary) {
808 KERNEL_INFO(
"******************** Running the simulation on %d GPU(s) and %d CPU(s) ***************************", numGPUs, numCores);
816 simTimeRunStart = simTime;
817 simTimeRunStop = simTime + runDurationMs;
818 assert(simTimeRunStop >= simTimeRunStart);
824 if (simTime == 0 && numConnectionMonitor) {
832 CUDA_RESET_TIMER(timer);
833 CUDA_START_TIMER(timer);
840 for(
int i = 0; i < runDurationMs; i++) {
845 if (!sim_with_fixedwts && wtANDwtChangeUpdateInterval_ == ++wtANDwtChangeUpdateIntervalCnt_) {
846 wtANDwtChangeUpdateIntervalCnt_ = 0;
847 if (!sim_in_testing) {
856 if (numSpikeMonitor) {
859 if (numGroupMonitor) {
862 if (numConnectionMonitor) {
865 if (numNeuronMonitor) {
872 fetchNeuronSpikeCount(
ALL);
878 if (printRunSummary) {
881 if (numSpikeMonitor) {
882 printStatusSpikeMonitor(
ALL);
884 if (numConnectionMonitor) {
885 printStatusConnectionMonitor(
ALL);
887 if (numGroupMonitor) {
888 printStatusGroupMonitor(
ALL);
892 simTimeLastRunSummary = simTime;
901 CUDA_STOP_TIMER(timer);
902 lastExecutionTime = CUDA_GET_TIMER_VALUE(timer);
903 cumExecutionTime += lastExecutionTime;
916 assert(connId>=0 && connId<numConnections);
918 int netId = groupConfigMDMap[connectConfigMap[connId].grpDest].netId;
919 int lGrpId = groupConfigMDMap[connectConfigMap[connId].grpDest].lGrpId;
921 fetchPreConnectionInfo(netId);
922 fetchConnIdsLookupArray(netId);
923 fetchSynapseState(netId);
925 for (
int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].
lEndN; lNId++) {
926 unsigned int cumIdx = managerRuntimeData.
cumulativePre[lNId];
929 unsigned int pos_ij = cumIdx;
930 for (
int j = 0; j < managerRuntimeData.
Npre[lNId]; pos_ij++, j++) {
933 float weight = managerRuntimeData.
wt[pos_ij] + bias;
937 bool needToPrintDebug = (weight > connectConfigMap[connId].maxWt || weight < 0.0f);
939 if (updateWeightRange) {
943 connectConfigMap[connId].maxWt = std::max(connectConfigMap[connId].maxWt, weight);
944 if (needToPrintDebug) {
945 KERNEL_DEBUG(
"biasWeights(%d,%f,%s): updated weight ranges to [%f,%f]", connId, bias,
946 (updateWeightRange?
"true":
"false"), 0.0f, connectConfigMap[connId].maxWt);
951 weight = std::min(weight, connectConfigMap[connId].maxWt);
953 weight = std::max(weight, 0.0f);
954 if (needToPrintDebug) {
955 KERNEL_DEBUG(
"biasWeights(%d,%f,%s): constrained weight %f to [%f,%f]", connId, bias,
956 (updateWeightRange?
"true":
"false"), weight, 0.0f, connectConfigMap[connId].maxWt);
961 managerRuntimeData.
wt[pos_ij] = weight;
962 managerRuntimeData.
maxSynWt[pos_ij] = connectConfigMap[connId].maxWt;
969 CUDA_CHECK_ERRORS( cudaMemcpy(&(runtimeData[netId].wt[cumIdx]), &(managerRuntimeData.
wt[cumIdx]),
sizeof(
float)*managerRuntimeData.
Npre[lNId],
970 cudaMemcpyHostToDevice) );
972 if (runtimeData[netId].maxSynWt != NULL) {
975 CUDA_CHECK_ERRORS( cudaMemcpy(&(runtimeData[netId].maxSynWt[cumIdx]), &(managerRuntimeData.
maxSynWt[cumIdx]),
976 sizeof(
float) * managerRuntimeData.
Npre[lNId], cudaMemcpyHostToDevice) );
982 memcpy(&runtimeData[netId].wt[cumIdx], &managerRuntimeData.
wt[cumIdx],
sizeof(
float) * managerRuntimeData.
Npre[lNId]);
984 if (runtimeData[netId].maxSynWt != NULL) {
987 memcpy(&runtimeData[netId].maxSynWt[cumIdx], &managerRuntimeData.
maxSynWt[cumIdx],
sizeof(
float) * managerRuntimeData.
Npre[lNId]);
1006 assert(connId>=0 && connId<numConnections);
1007 assert(scale>=0.0f);
1009 int netId = groupConfigMDMap[connectConfigMap[connId].grpDest].netId;
1010 int lGrpId = groupConfigMDMap[connectConfigMap[connId].grpDest].lGrpId;
1012 fetchPreConnectionInfo(netId);
1013 fetchConnIdsLookupArray(netId);
1014 fetchSynapseState(netId);
1017 for (
int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].
lEndN; lNId++) {
1018 unsigned int cumIdx = managerRuntimeData.
cumulativePre[lNId];
1021 unsigned int pos_ij = cumIdx;
1022 for (
int j = 0; j < managerRuntimeData.
Npre[lNId]; pos_ij++, j++) {
1025 float weight = managerRuntimeData.
wt[pos_ij] * scale;
1029 bool needToPrintDebug = (weight > connectConfigMap[connId].maxWt || weight < 0.0f);
1031 if (updateWeightRange) {
1035 connectConfigMap[connId].maxWt = std::max(connectConfigMap[connId].maxWt, weight);
1036 if (needToPrintDebug) {
1037 KERNEL_DEBUG(
"scaleWeights(%d,%f,%s): updated weight ranges to [%f,%f]", connId, scale,
1038 (updateWeightRange?
"true":
"false"), 0.0f, connectConfigMap[connId].maxWt);
1043 weight = std::min(weight, connectConfigMap[connId].maxWt);
1045 weight = std::max(weight, 0.0f);
1046 if (needToPrintDebug) {
1047 KERNEL_DEBUG(
"scaleWeights(%d,%f,%s): constrained weight %f to [%f,%f]", connId, scale,
1048 (updateWeightRange?
"true":
"false"), weight, 0.0f, connectConfigMap[connId].maxWt);
1053 managerRuntimeData.
wt[pos_ij] = weight;
1054 managerRuntimeData.
maxSynWt[pos_ij] = connectConfigMap[connId].maxWt;
1061 CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].wt[cumIdx], &managerRuntimeData.
wt[cumIdx],
sizeof(
float)*managerRuntimeData.
Npre[lNId],
1062 cudaMemcpyHostToDevice));
1064 if (runtimeData[netId].maxSynWt != NULL) {
1067 CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].maxSynWt[cumIdx], &managerRuntimeData.
maxSynWt[cumIdx],
1068 sizeof(
float) * managerRuntimeData.
Npre[lNId], cudaMemcpyHostToDevice));
1074 memcpy(&runtimeData[netId].wt[cumIdx], &managerRuntimeData.
wt[cumIdx],
sizeof(
float) * managerRuntimeData.
Npre[lNId]);
1076 if (runtimeData[netId].maxSynWt != NULL) {
1079 memcpy(&runtimeData[netId].maxSynWt[cumIdx], &managerRuntimeData.
maxSynWt[cumIdx],
sizeof(
float) * managerRuntimeData.
Npre[lNId]);
1088 int netId = groupConfigMDMap[gGrpId].netId;
1089 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
1092 if (groupConfigMDMap[gGrpId].groupMonitorId >= 0) {
1093 KERNEL_ERROR(
"setGroupMonitor has already been called on Group %d (%s).", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1100 groupMonCoreList[numGroupMonitor] = grpMonCoreObj;
1111 groupMonList[numGroupMonitor] = grpMonObj;
1114 groupConfigMDMap[gGrpId].groupMonitorId = numGroupMonitor;
1117 KERNEL_INFO(
"GroupMonitor set for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1134 if (connectConfigMap[connId].connectionMonitorId >= 0) {
1135 KERNEL_ERROR(
"setConnectionMonitor has already been called on Connection %d (MonitorId=%d)", connId, connectConfigMap[connId].connectionMonitorId);
1141 connectConfigMap[connId].connectionMonitorId = numConnectionMonitor;
1146 grpIdPre, grpIdPost);
1147 connMonCoreList[numConnectionMonitor] = connMonCoreObj;
1158 connMonList[numConnectionMonitor] = connMonObj;
1161 connMonCoreObj->
init();
1163 numConnectionMonitor++;
1164 KERNEL_INFO(
"ConnectionMonitor %d set for Connection %d: %d(%s) => %d(%s)", connectConfigMap[connId].connectionMonitorId, connId, grpIdPre,
getGroupName(grpIdPre).c_str(),
1175 assert(spikeGenFunc);
1176 assert(groupConfigMap[gGrpId].isSpikeGenerator);
1177 groupConfigMap[gGrpId].spikeGenFunc = spikeGenFunc;
1183 if (groupConfigMDMap[gGrpId].spikeMonitorId >= 0) {
1191 KERNEL_INFO(
"SpikeMonitor updated for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1197 spikeMonCoreList[numSpikeMonitor] = spkMonCoreObj;
1208 spikeMonList[numSpikeMonitor] = spkMonObj;
1211 groupConfigMDMap[gGrpId].spikeMonitorId = numSpikeMonitor;
1214 KERNEL_INFO(
"SpikeMonitor set for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1222 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
1223 int netId = groupConfigMDMap[gGrpId].netId;
1226 KERNEL_WARN(
"Due to limited memory space, only the first 128 neurons can be monitored by NeuronMonitor");
1230 if (groupConfigMDMap[gGrpId].neuronMonitorId >= 0) {
1238 KERNEL_INFO(
"NeuronMonitor updated for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1244 neuronMonCoreList[numNeuronMonitor] = nrnMonCoreObj;
1255 neuronMonList[numNeuronMonitor] = nrnMonObj;
1258 groupConfigMDMap[gGrpId].neuronMonitorId = numNeuronMonitor;
1261 KERNEL_INFO(
"NeuronMonitor set for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1271 int netId = groupConfigMDMap[gGrpId].netId;
1272 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
1274 assert(gGrpId >= 0 && lGrpId < networkConfigs[netId].numGroups);
1276 assert(groupConfigMap[gGrpId].isSpikeGenerator);
1277 assert(ratePtr->
getNumNeurons() == groupConfigMap[gGrpId].numN);
1278 assert(refPeriod >= 1);
1280 groupConfigMDMap[gGrpId].ratePtr = ratePtr;
1281 groupConfigMDMap[gGrpId].refractPeriod = refPeriod;
1282 spikeRateUpdated =
true;
1286 void SNN::setWeight(
short int connId,
int neurIdPre,
int neurIdPost,
float weight,
bool updateWeightRange) {
1288 assert(weight>=0.0f);
1290 assert(neurIdPre >= 0 && neurIdPre <
getGroupNumNeurons(connectConfigMap[connId].grpSrc));
1291 assert(neurIdPost >= 0 && neurIdPost <
getGroupNumNeurons(connectConfigMap[connId].grpDest));
1293 float maxWt = fabs(connectConfigMap[connId].maxWt);
1297 bool needToPrintDebug = (weight>maxWt || weight<minWt);
1299 int netId = groupConfigMDMap[connectConfigMap[connId].grpDest].netId;
1300 int postlGrpId = groupConfigMDMap[connectConfigMap[connId].grpDest].lGrpId;
1301 int prelGrpId = groupConfigMDMap[connectConfigMap[connId].grpSrc].lGrpId;
1303 fetchPreConnectionInfo(netId);
1304 fetchConnIdsLookupArray(netId);
1305 fetchSynapseState(netId);
1307 if (updateWeightRange) {
1311 maxWt = fmax(maxWt, weight);
1312 if (needToPrintDebug) {
1313 KERNEL_DEBUG(
"setWeight(%d,%d,%d,%f,%s): updated weight ranges to [%f,%f]", connId, neurIdPre, neurIdPost,
1314 weight, (updateWeightRange?
"true":
"false"), minWt, maxWt);
1319 weight = fmin(weight, maxWt);
1320 weight = fmax(weight, minWt);
1321 if (needToPrintDebug) {
1322 KERNEL_DEBUG(
"setWeight(%d,%d,%d,%f,%s): constrained weight %f to [%f,%f]", connId, neurIdPre, neurIdPost,
1323 weight, (updateWeightRange?
"true":
"false"), weight, minWt, maxWt);
1328 int neurIdPreReal = groupConfigs[netId][prelGrpId].
lStartN + neurIdPre;
1329 int neurIdPostReal = groupConfigs[netId][postlGrpId].
lStartN + neurIdPost;
1332 bool synapseFound =
false;
1333 int pos_ij = managerRuntimeData.
cumulativePre[neurIdPostReal];
1334 for (
int j = 0; j < managerRuntimeData.
Npre[neurIdPostReal]; pos_ij++, j++) {
1340 managerRuntimeData.
wt[pos_ij] =
isExcitatoryGroup(connectConfigMap[connId].grpSrc) ? weight : -1.0 * weight;
1346 CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].wt[pos_ij], &managerRuntimeData.
wt[pos_ij],
sizeof(
float), cudaMemcpyHostToDevice));
1347 if (runtimeData[netId].maxSynWt != NULL) {
1350 CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].maxSynWt[pos_ij], &managerRuntimeData.
maxSynWt[pos_ij],
sizeof(
float), cudaMemcpyHostToDevice));
1357 memcpy(&runtimeData[netId].wt[pos_ij], &managerRuntimeData.
wt[pos_ij],
sizeof(
float));
1358 if (runtimeData[netId].maxSynWt != NULL) {
1361 memcpy(&runtimeData[netId].maxSynWt[pos_ij], &managerRuntimeData.
maxSynWt[pos_ij],
sizeof(
float));
1366 synapseFound =
true;
1371 if (!synapseFound) {
1372 KERNEL_WARN(
"setWeight(%d,%d,%d,%f,%s): Synapse does not exist, not updated.", connId, neurIdPre, neurIdPost,
1373 weight, (updateWeightRange?
"true":
"false"));
1378 assert(grpId >= 0); assert(grpId < numGroups);
1382 int netId = groupConfigMDMap[grpId].netId;
1383 int lGrpId = groupConfigMDMap[grpId].lGrpId;
1393 for (
int lNId = groupConfigs[netId][lGrpId].lStartN, j = 0; lNId <= groupConfigs[netId][lGrpId].
lEndN; lNId++, j++) {
1394 managerRuntimeData.
extCurrent[lNId] = current[j];
1400 copyExternalCurrent(netId, lGrpId, &runtimeData[netId], cudaMemcpyHostToDevice,
false);
1403 copyExternalCurrent(netId, lGrpId, &runtimeData[netId],
false);
1418 if (!fwrite(&tmpInt,
sizeof(
int),1,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1422 if (!fwrite(&tmpFloat,
sizeof(
int),1,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1425 tmpFloat = ((float)simTimeSec) + ((float)simTimeMs)/1000.0f;
1426 if (!fwrite(&tmpFloat,
sizeof(
float),1,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1430 tmpFloat = executionTime/1000.0f;
1431 if (!fwrite(&tmpFloat,
sizeof(
float),1,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1436 if (!fwrite(&glbNetworkConfig.
numN,
sizeof(
int),1,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1442 if (!fwrite(&numGroups,
sizeof(
int),1,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1446 for (
int gGrpId=0;gGrpId<numGroups;gGrpId++) {
1447 if (!fwrite(&groupConfigMDMap[gGrpId].gStartN,
sizeof(
int),1,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1448 if (!fwrite(&groupConfigMDMap[gGrpId].gEndN,
sizeof(
int),1,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1450 if (!fwrite(&groupConfigMap[gGrpId].grid.numX,
sizeof(
int),1,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1451 if (!fwrite(&groupConfigMap[gGrpId].grid.numY,
sizeof(
int),1,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1452 if (!fwrite(&groupConfigMap[gGrpId].grid.numZ,
sizeof(
int),1,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1454 strncpy(name,groupConfigMap[gGrpId].grpName.c_str(),100);
1455 if (!fwrite(name,1,100,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1458 if (!saveSynapseInfo)
return;
1463 if (!groupPartitionLists[netId].empty()) {
1467 if (!fwrite(&net_count,
sizeof(
int), 1, fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1471 if (!groupPartitionLists[netId].empty()) {
1473 fetchPreConnectionInfo(netId);
1474 fetchPostConnectionInfo(netId);
1475 fetchConnIdsLookupArray(netId);
1476 fetchSynapseState(netId);
1479 int numSynToSave = 0;
1480 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
1481 if (grpIt->netId == netId) {
1482 numSynToSave += grpIt->numPostSynapses;
1485 if (!fwrite(&numSynToSave,
sizeof(
int), 1, fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1487 int numSynSaved = 0;
1488 for (
int lNId = 0; lNId < networkConfigs[netId].
numNAssigned; lNId++) {
1492 for (
int t = 0; t < glbNetworkConfig.
maxDelay; t++) {
1500 int pre_pos = managerRuntimeData.
cumulativePre[lNIdPost] + preSynId;
1504 float weight = managerRuntimeData.
wt[pre_pos];
1505 float maxWeight = managerRuntimeData.
maxSynWt[pre_pos];
1511 int gGrpIdPre = groupConfigs[netId][lGrpIdPre].
gGrpId;
1512 int gGrpIdPost = groupConfigs[netId][lGrpIdPost].
gGrpId;
1513 int grpNIdPre = lNId - groupConfigs[netId][lGrpIdPre].
lStartN;
1514 int grpNIdPost = lNIdPost - groupConfigs[netId][lGrpIdPost].
lStartN;
1519 if (groupConfigMDMap[gGrpIdPre].netId == netId) {
1521 if (!fwrite(&gGrpIdPre,
sizeof(
int), 1, fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1522 if (!fwrite(&gGrpIdPost,
sizeof(
int), 1, fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1523 if (!fwrite(&grpNIdPre,
sizeof(
int), 1, fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1524 if (!fwrite(&grpNIdPost,
sizeof(
int), 1, fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1525 if (!fwrite(&connId,
sizeof(
int), 1, fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1526 if (!fwrite(&weight,
sizeof(
float), 1, fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1527 if (!fwrite(&maxWeight,
sizeof(
float), 1, fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1528 if (!fwrite(&delay,
sizeof(
int), 1, fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1533 assert(numSynSaved == numSynToSave);
1673 if (fpInf_!=NULL && fpInf_!=stdout && fpInf_!=stderr)
1679 if (fpErr_ != NULL && fpErr_!=stdout && fpErr_!=stderr)
1685 if (fpDeb_!=NULL && fpDeb_!=stdout && fpDeb_!=stderr)
1691 if (fpLog_!=NULL && fpLog_!=stdout && fpLog_!=stderr)
1704 short int connId = -1;
1706 for (std::map<int, ConnectConfig>::iterator it = connectConfigMap.begin(); it != connectConfigMap.end(); it++) {
1707 if (it->second.grpSrc == grpIdPre && it->second.grpDest == grpIdPost) {
1708 connId = it->second.connId;
1719 if (connectConfigMap.find(connId) == connectConfigMap.end()) {
1720 KERNEL_ERROR(
"Total Connections = %d", numConnections);
1721 KERNEL_ERROR(
"ConnectId (%d) cannot be recognized", connId);
1724 return connectConfigMap[connId];
1731 fetchConductanceAMPA(gGrpId);
1733 std::vector<float> gAMPAvec;
1734 for (
int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1735 gAMPAvec.push_back(managerRuntimeData.
gAMPA[gNId]);
1744 fetchConductanceNMDA(gGrpId);
1746 std::vector<float> gNMDAvec;
1749 for (
int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1750 gNMDAvec.push_back(managerRuntimeData.
gNMDA_d[gNId] - managerRuntimeData.
gNMDA_r[gNId]);
1753 for (
int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1754 gNMDAvec.push_back(managerRuntimeData.
gNMDA[gNId]);
1764 fetchConductanceGABAa(gGrpId);
1766 std::vector<float> gGABAaVec;
1767 for (
int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1768 gGABAaVec.push_back(managerRuntimeData.
gGABAa[gNId]);
1777 fetchConductanceGABAb(gGrpId);
1779 std::vector<float> gGABAbVec;
1782 for (
int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1783 gGABAbVec.push_back(managerRuntimeData.
gGABAb_d[gNId] - managerRuntimeData.
gGABAb_r[gNId]);
1786 for (
int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1787 gGABAbVec.push_back(managerRuntimeData.
gGABAb[gNId]);
1795 assert(connId>=0 && connId<numConnections);
1797 return RangeDelay(connectConfigMap[connId].minDelay, connectConfigMap[connId].maxDelay);
1801 uint8_t*
SNN::getDelays(
int gGrpIdPre,
int gGrpIdPost,
int& numPreN,
int& numPostN) {
1802 int netIdPost = groupConfigMDMap[gGrpIdPost].netId;
1803 int lGrpIdPost = groupConfigMDMap[gGrpIdPost].lGrpId;
1807 for (
int lGrpId = 0; lGrpId < networkConfigs[netIdPost].
numGroupsAssigned; lGrpId++)
1808 if (groupConfigs[netIdPost][lGrpId].gGrpId == gGrpIdPre) {
1812 assert(lGrpIdPre != -1);
1814 numPreN = groupConfigMap[gGrpIdPre].numN;
1815 numPostN = groupConfigMap[gGrpIdPost].numN;
1817 delays =
new uint8_t[numPreN * numPostN];
1818 memset(delays, 0, numPreN * numPostN);
1820 fetchPostConnectionInfo(netIdPost);
1822 for (
int lNIdPre = groupConfigs[netIdPost][lGrpIdPre].lStartN; lNIdPre < groupConfigs[netIdPost][lGrpIdPre].
lEndN; lNIdPre++) {
1823 unsigned int offset = managerRuntimeData.
cumulativePost[lNIdPre];
1825 for (
int t = 0; t < glbNetworkConfig.
maxDelay; t++) {
1834 assert(lNIdPost < glbNetworkConfig.
numN);
1836 if (lNIdPost >= groupConfigs[netIdPost][lGrpIdPost].lStartN && lNIdPost <= groupConfigs[netIdPost][lGrpIdPost].lEndN) {
1837 delays[(lNIdPre - groupConfigs[netIdPost][lGrpIdPre].
lStartN) + numPreN * (lNIdPost - groupConfigs[netIdPost][lGrpIdPost].lStartN)] = t + 1;
1846 assert(gGrpId >= 0 && gGrpId < numGroups);
1848 return groupConfigMap[gGrpId].grid;
1854 for (
int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
1855 if (groupConfigMap[gGrpId].grpName.compare(grpName) == 0) {
1865 assert(gGrpId >= -1 && gGrpId < numGroups);
1870 return groupConfigMap[gGrpId].grpName;
1876 gInfo.
WithSTDP = groupConfigMap[gGrpId].stdpConfig.WithSTDP;
1877 gInfo.
WithESTDP = groupConfigMap[gGrpId].stdpConfig.WithESTDP;
1878 gInfo.
WithISTDP = groupConfigMap[gGrpId].stdpConfig.WithISTDP;
1879 gInfo.
WithESTDPtype = groupConfigMap[gGrpId].stdpConfig.WithESTDPtype;
1880 gInfo.
WithISTDPtype = groupConfigMap[gGrpId].stdpConfig.WithISTDPtype;
1881 gInfo.
WithESTDPcurve = groupConfigMap[gGrpId].stdpConfig.WithESTDPcurve;
1882 gInfo.
WithISTDPcurve = groupConfigMap[gGrpId].stdpConfig.WithISTDPcurve;
1883 gInfo.
ALPHA_MINUS_EXC = groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_EXC;
1884 gInfo.
ALPHA_PLUS_EXC = groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_EXC;
1886 gInfo.
TAU_PLUS_INV_EXC = groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_EXC;
1887 gInfo.
ALPHA_MINUS_INB = groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_INB;
1888 gInfo.
ALPHA_PLUS_INB = groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_INB;
1890 gInfo.
TAU_PLUS_INV_INB = groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_INB;
1891 gInfo.
GAMMA = groupConfigMap[gGrpId].stdpConfig.GAMMA;
1892 gInfo.
BETA_LTP = groupConfigMap[gGrpId].stdpConfig.BETA_LTP;
1893 gInfo.
BETA_LTD = groupConfigMap[gGrpId].stdpConfig.BETA_LTD;
1894 gInfo.
LAMBDA = groupConfigMap[gGrpId].stdpConfig.LAMBDA;
1895 gInfo.
DELTA = groupConfigMap[gGrpId].stdpConfig.DELTA;
1903 gInfo.
baseDP = groupConfigMap[gGrpId].neuromodulatorConfig.baseDP;
1904 gInfo.
base5HT = groupConfigMap[gGrpId].neuromodulatorConfig.base5HT;
1905 gInfo.
baseACh = groupConfigMap[gGrpId].neuromodulatorConfig.baseACh;
1906 gInfo.
baseNE = groupConfigMap[gGrpId].neuromodulatorConfig.baseNE;
1907 gInfo.
decayDP = groupConfigMap[gGrpId].neuromodulatorConfig.decayDP;
1908 gInfo.
decay5HT = groupConfigMap[gGrpId].neuromodulatorConfig.decay5HT;
1909 gInfo.
decayACh = groupConfigMap[gGrpId].neuromodulatorConfig.decayACh;
1910 gInfo.
decayNE = groupConfigMap[gGrpId].neuromodulatorConfig.decayNE;
1917 assert(gNId >= 0 && gNId < glbNetworkConfig.
numN);
1920 for (std::map<int, GroupConfigMD>::iterator grpIt = groupConfigMDMap.begin(); grpIt != groupConfigMDMap.end(); grpIt++) {
1921 if (gNId >= grpIt->second.gStartN && gNId <= grpIt->second.gEndN)
1922 gGrpId = grpIt->second.gGrpId;
1926 int neurId = gNId - groupConfigMDMap[gGrpId].gStartN;
1932 Grid3D grid = groupConfigMap[gGrpId].grid;
1933 assert(gGrpId >= 0 && gGrpId < numGroups);
1936 int intX = relNeurId % grid.
numX;
1937 int intY = (relNeurId / grid.
numX) % grid.
numY;
1938 int intZ = relNeurId / (grid.
numX * grid.
numY);
1944 return Point3D(coordX, coordY, coordZ);
1950 if (connectConfigMap.find(connId) == connectConfigMap.end()) {
1951 KERNEL_ERROR(
"Connection ID was not found. Quitting.");
1955 return connectConfigMap[connId].numberOfConnections;
1962 if (groupConfigMDMap[gGrpId].spikeMonitorId >= 0) {
1963 return spikeMonList[(groupConfigMDMap[gGrpId].spikeMonitorId)];
1972 if (groupConfigMDMap[gGrpId].spikeMonitorId >= 0) {
1973 return spikeMonCoreList[(groupConfigMDMap[gGrpId].spikeMonitorId)];
1983 if (groupConfigMDMap[gGrpId].neuronMonitorId >= 0) {
1984 return neuronMonList[(groupConfigMDMap[gGrpId].neuronMonitorId)];
1994 if (groupConfigMDMap[gGrpId].neuronMonitorId >= 0) {
1995 return neuronMonCoreList[(groupConfigMDMap[gGrpId].neuronMonitorId)];
2003 assert(connId>=0 && connId<numConnections);
2005 return RangeWeight(0.0f, connectConfigMap[connId].initWt, connectConfigMap[connId].maxWt);
2014 void SNN::SNNinit() {
2019 switch (loggerMode_) {
2023 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2024 fpDeb_ = fopen(
"nul",
"w");
2026 fpDeb_ = fopen(
"/dev/null",
"w");
2035 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2036 fpInf_ = fopen(
"nul",
"w");
2038 fpInf_ = fopen(
"/dev/null",
"w");
2041 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2042 fpDeb_ = fopen(
"nul",
"w");
2044 fpDeb_ = fopen(
"/dev/null",
"w");
2049 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2050 fpInf_ = fopen(
"nul",
"w");
2051 fpErr_ = fopen(
"nul",
"w");
2052 fpDeb_ = fopen(
"nul",
"w");
2054 fpInf_ = fopen(
"/dev/null",
"w");
2055 fpErr_ = fopen(
"/dev/null",
"w");
2056 fpDeb_ = fopen(
"/dev/null",
"w");
2067 #if defined(WIN32) || defined(WIN64)
2068 CreateDirectory(
"results", NULL);
2069 fpLog_ = fopen(
"results/carlsim.log",
"w");
2073 if (stat(
"results", &sb) == -1 || !S_ISDIR(sb.st_mode)) {
2075 createDir = mkdir(
"results", 0777);
2078 if (createDir == -1) {
2080 fprintf(stderr,
"Could not create directory \"results/\", which is required to "
2081 "store simulation results. Aborting simulation...\n");
2085 fpLog_ = fopen(
"results/carlsim.log",
"w");
2087 if (createDir == 0) {
2089 KERNEL_INFO(
"Created results directory \"results/\".");
2093 if (fpLog_ == NULL) {
2094 fprintf(stderr,
"Could not create the directory \"results/\" or the log file \"results/carlsim.log\""
2095 ", which is required to store simulation results. Aborting simulation...\n");
2099 KERNEL_INFO(
"*********************************************************************************");
2100 KERNEL_INFO(
"******************** Welcome to CARLsim %d.%d ***************************",
2102 KERNEL_INFO(
"*********************************************************************************\n");
2104 KERNEL_INFO(
"***************************** Configuring Network ********************************");
2105 KERNEL_INFO(
"Starting CARLsim simulation \"%s\" in %s mode",networkName_.c_str(),
2110 struct tm * timeinfo;
2112 timeinfo = localtime(&rawtime);
2113 KERNEL_DEBUG(
"Current local time and date: %s", asctime(timeinfo));
2118 simTimeRunStart = 0; simTimeRunStop = 0;
2119 simTimeLastRunSummary = 0;
2120 simTimeMs = 0; simTimeSec = 0; simTime = 0;
2124 numCompartmentConnections = 0;
2125 numSpikeGenGrps = 0;
2126 simulatorDeleted =
false;
2128 cumExecutionTime = 0.0;
2129 executionTime = 0.0;
2131 spikeRateUpdated =
false;
2132 numSpikeMonitor = 0;
2133 numNeuronMonitor = 0;
2134 numGroupMonitor = 0;
2135 numConnectionMonitor = 0;
2137 sim_with_compartments =
false;
2138 sim_with_fixedwts =
true;
2139 sim_with_conductances =
false;
2140 sim_with_stdp =
false;
2141 sim_with_modulated_stdp =
false;
2142 sim_with_homeostasis =
false;
2143 sim_with_stp =
false;
2144 sim_in_testing =
false;
2149 sim_with_NMDA_rise =
false;
2150 sim_with_GABAb_rise =
false;
2151 dAMPA = 1.0-1.0/5.0;
2152 rNMDA = 1.0-1.0/10.0;
2153 dNMDA = 1.0-1.0/150.0;
2155 dGABAa = 1.0-1.0/6.0;
2156 rGABAb = 1.0-1.0/100.0;
2157 dGABAb = 1.0-1.0/150.0;
2167 resetMonitors(
false);
2169 resetGroupConfigs(
false);
2171 resetConnectionConfigs(
false);
2182 runtimeData[netId].allocated =
false;
2185 memset(&managerRuntimeData, 0,
sizeof(
RuntimeData));
2189 wtANDwtChangeUpdateInterval_ = 1000;
2190 wtANDwtChangeUpdateIntervalCnt_ = 0;
2191 stdpScaleFactor_ = 1.0f;
2192 wtChangeDecay_ = 0.0f;
2196 CUDA_CREATE_TIMER(timer);
2197 CUDA_RESET_TIMER(timer);
2201 void SNN::advSimStep() {
2202 doSTPUpdateAndDecayCond();
2206 spikeGeneratorUpdate();
2214 updateTimingTable();
2222 globalStateUpdate();
2226 clearExtFiringTable();
2229 void SNN::doSTPUpdateAndDecayCond() {
2230 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2231 pthread_t threads[numCores + 1];
2234 int threadCount = 0;
2238 if (!groupPartitionLists[netId].empty()) {
2239 assert(runtimeData[netId].allocated);
2241 doSTPUpdateAndDecayCond_GPU(netId);
2243 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2244 doSTPUpdateAndDecayCond_CPU(netId);
2245 #else // Linux or MAC
2246 pthread_attr_t attr;
2247 pthread_attr_init(&attr);
2250 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
2252 argsThreadRoutine[threadCount].
snn_pointer =
this;
2253 argsThreadRoutine[threadCount].
netId = netId;
2254 argsThreadRoutine[threadCount].
lGrpId = 0;
2255 argsThreadRoutine[threadCount].
startIdx = 0;
2256 argsThreadRoutine[threadCount].
endIdx = 0;
2257 argsThreadRoutine[threadCount].
GtoLOffset = 0;
2259 pthread_create(&threads[threadCount], &attr, &SNN::helperDoSTPUpdateAndDecayCond_CPU, (
void*)&argsThreadRoutine[threadCount]);
2260 pthread_attr_destroy(&attr);
2267 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2269 for (
int i=0; i<threadCount; i++){
2270 pthread_join(threads[i], NULL);
2275 void SNN::spikeGeneratorUpdate() {
2277 if (spikeRateUpdated) {
2278 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2279 pthread_t threads[numCores + 1];
2282 int threadCount = 0;
2286 if (!groupPartitionLists[netId].empty()) {
2288 assignPoissonFiringRate_GPU(netId);
2290 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2291 assignPoissonFiringRate_CPU(netId);
2292 #else // Linux or MAC
2293 pthread_attr_t attr;
2294 pthread_attr_init(&attr);
2297 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
2299 argsThreadRoutine[threadCount].
snn_pointer =
this;
2300 argsThreadRoutine[threadCount].
netId = netId;
2301 argsThreadRoutine[threadCount].
lGrpId = 0;
2302 argsThreadRoutine[threadCount].
startIdx = 0;
2303 argsThreadRoutine[threadCount].
endIdx = 0;
2304 argsThreadRoutine[threadCount].
GtoLOffset = 0;
2306 pthread_create(&threads[threadCount], &attr, &SNN::helperAssignPoissonFiringRate_CPU, (
void*)&argsThreadRoutine[threadCount]);
2307 pthread_attr_destroy(&attr);
2314 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2316 for (
int i=0; i<threadCount; i++){
2317 pthread_join(threads[i], NULL);
2321 spikeRateUpdated =
false;
2325 generateUserDefinedSpikes();
2327 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2328 pthread_t threads[numCores + 1];
2331 int threadCount = 0;
2335 if (!groupPartitionLists[netId].empty()) {
2337 spikeGeneratorUpdate_GPU(netId);
2339 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2340 spikeGeneratorUpdate_CPU(netId);
2341 #else // Linux or MAC
2342 pthread_attr_t attr;
2343 pthread_attr_init(&attr);
2346 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
2348 argsThreadRoutine[threadCount].
snn_pointer =
this;
2349 argsThreadRoutine[threadCount].
netId = netId;
2350 argsThreadRoutine[threadCount].
lGrpId = 0;
2351 argsThreadRoutine[threadCount].
startIdx = 0;
2352 argsThreadRoutine[threadCount].
endIdx = 0;
2353 argsThreadRoutine[threadCount].
GtoLOffset = 0;
2355 pthread_create(&threads[threadCount], &attr, &SNN::helperSpikeGeneratorUpdate_CPU, (
void*)&argsThreadRoutine[threadCount]);
2356 pthread_attr_destroy(&attr);
2363 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2365 for (
int i=0; i<threadCount; i++){
2366 pthread_join(threads[i], NULL);
2374 void SNN::findFiring() {
2375 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2376 pthread_t threads[numCores + 1];
2379 int threadCount = 0;
2383 if (!groupPartitionLists[netId].empty()) {
2385 findFiring_GPU(netId);
2387 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2388 findFiring_CPU(netId);
2389 #else // Linux or MAC
2390 pthread_attr_t attr;
2391 pthread_attr_init(&attr);
2394 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
2396 argsThreadRoutine[threadCount].
snn_pointer =
this;
2397 argsThreadRoutine[threadCount].
netId = netId;
2398 argsThreadRoutine[threadCount].
lGrpId = 0;
2399 argsThreadRoutine[threadCount].
startIdx = 0;
2400 argsThreadRoutine[threadCount].
endIdx = 0;
2401 argsThreadRoutine[threadCount].
GtoLOffset = 0;
2403 pthread_create(&threads[threadCount], &attr, &SNN::helperFindFiring_CPU, (
void*)&argsThreadRoutine[threadCount]);
2404 pthread_attr_destroy(&attr);
2411 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2413 for (
int i=0; i<threadCount; i++){
2414 pthread_join(threads[i], NULL);
2419 void SNN::doCurrentUpdate() {
2420 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2421 pthread_t threads[numCores + 1];
2424 int threadCount = 0;
2428 if (!groupPartitionLists[netId].empty()) {
2430 doCurrentUpdateD2_GPU(netId);
2432 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2433 doCurrentUpdateD2_CPU(netId);
2434 #else // Linux or MAC
2435 pthread_attr_t attr;
2436 pthread_attr_init(&attr);
2439 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
2441 argsThreadRoutine[threadCount].
snn_pointer =
this;
2442 argsThreadRoutine[threadCount].
netId = netId;
2443 argsThreadRoutine[threadCount].
lGrpId = 0;
2444 argsThreadRoutine[threadCount].
startIdx = 0;
2445 argsThreadRoutine[threadCount].
endIdx = 0;
2446 argsThreadRoutine[threadCount].
GtoLOffset = 0;
2448 pthread_create(&threads[threadCount], &attr, &SNN::helperDoCurrentUpdateD2_CPU, (
void*)&argsThreadRoutine[threadCount]);
2449 pthread_attr_destroy(&attr);
2456 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2458 for (
int i=0; i<threadCount; i++){
2459 pthread_join(threads[i], NULL);
2465 if (!groupPartitionLists[netId].empty()) {
2467 doCurrentUpdateD1_GPU(netId);
2469 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2470 doCurrentUpdateD1_CPU(netId);
2471 #else // Linux or MAC
2472 pthread_attr_t attr;
2473 pthread_attr_init(&attr);
2476 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
2478 argsThreadRoutine[threadCount].
snn_pointer =
this;
2479 argsThreadRoutine[threadCount].
netId = netId;
2480 argsThreadRoutine[threadCount].
lGrpId = 0;
2481 argsThreadRoutine[threadCount].
startIdx = 0;
2482 argsThreadRoutine[threadCount].
endIdx = 0;
2483 argsThreadRoutine[threadCount].
GtoLOffset = 0;
2485 pthread_create(&threads[threadCount], &attr, &SNN::helperDoCurrentUpdateD1_CPU, (
void*)&argsThreadRoutine[threadCount]);
2486 pthread_attr_destroy(&attr);
2493 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2495 for (
int i=0; i<threadCount; i++){
2496 pthread_join(threads[i], NULL);
2501 void SNN::updateTimingTable() {
2502 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2503 pthread_t threads[numCores + 1];
2506 int threadCount = 0;
2510 if (!groupPartitionLists[netId].empty()) {
2512 updateTimingTable_GPU(netId);
2514 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2515 updateTimingTable_CPU(netId);
2516 #else // Linux or MAC
2517 pthread_attr_t attr;
2518 pthread_attr_init(&attr);
2521 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
2523 argsThreadRoutine[threadCount].
snn_pointer =
this;
2524 argsThreadRoutine[threadCount].
netId = netId;
2525 argsThreadRoutine[threadCount].
lGrpId = 0;
2526 argsThreadRoutine[threadCount].
startIdx = 0;
2527 argsThreadRoutine[threadCount].
endIdx = 0;
2528 argsThreadRoutine[threadCount].
GtoLOffset = 0;
2530 pthread_create(&threads[threadCount], &attr, &SNN::helperUpdateTimingTable_CPU, (
void*)&argsThreadRoutine[threadCount]);
2531 pthread_attr_destroy(&attr);
2537 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2539 for (
int i=0; i<threadCount; i++){
2540 pthread_join(threads[i], NULL);
2545 void SNN::globalStateUpdate() {
2546 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2547 pthread_t threads[numCores + 1];
2550 int threadCount = 0;
2554 if (!groupPartitionLists[netId].empty()) {
2556 globalStateUpdate_C_GPU(netId);
2558 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2559 globalStateUpdate_CPU(netId);
2560 #else // Linux or MAC
2561 pthread_attr_t attr;
2562 pthread_attr_init(&attr);
2565 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
2567 argsThreadRoutine[threadCount].
snn_pointer =
this;
2568 argsThreadRoutine[threadCount].
netId = netId;
2569 argsThreadRoutine[threadCount].
lGrpId = 0;
2570 argsThreadRoutine[threadCount].
startIdx = 0;
2571 argsThreadRoutine[threadCount].
endIdx = 0;
2572 argsThreadRoutine[threadCount].
GtoLOffset = 0;
2574 pthread_create(&threads[threadCount], &attr, &SNN::helperGlobalStateUpdate_CPU, (
void*)&argsThreadRoutine[threadCount]);
2575 pthread_attr_destroy(&attr);
2582 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2584 for (
int i=0; i<threadCount; i++){
2585 pthread_join(threads[i], NULL);
2590 if (!groupPartitionLists[netId].empty()) {
2592 globalStateUpdate_N_GPU(netId);
2597 if (!groupPartitionLists[netId].empty()) {
2599 globalStateUpdate_G_GPU(netId);
2604 void SNN::clearExtFiringTable() {
2605 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2606 pthread_t threads[numCores + 1];
2609 int threadCount = 0;
2613 if (!groupPartitionLists[netId].empty()) {
2615 clearExtFiringTable_GPU(netId);
2617 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2618 clearExtFiringTable_CPU(netId);
2619 #else // Linux or MAC
2620 pthread_attr_t attr;
2621 pthread_attr_init(&attr);
2624 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
2626 argsThreadRoutine[threadCount].
snn_pointer =
this;
2627 argsThreadRoutine[threadCount].
netId = netId;
2628 argsThreadRoutine[threadCount].
lGrpId = 0;
2629 argsThreadRoutine[threadCount].
startIdx = 0;
2630 argsThreadRoutine[threadCount].
endIdx = 0;
2631 argsThreadRoutine[threadCount].
GtoLOffset = 0;
2633 pthread_create(&threads[threadCount], &attr, &SNN::helperClearExtFiringTable_CPU, (
void*)&argsThreadRoutine[threadCount]);
2634 pthread_attr_destroy(&attr);
2641 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2643 for (
int i=0; i<threadCount; i++){
2644 pthread_join(threads[i], NULL);
2649 void SNN::updateWeights() {
2650 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2651 pthread_t threads[numCores + 1];
2654 int threadCount = 0;
2658 if (!groupPartitionLists[netId].empty()) {
2660 updateWeights_GPU(netId);
2662 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2663 updateWeights_CPU(netId);
2664 #else // Linux or MAC
2665 pthread_attr_t attr;
2666 pthread_attr_init(&attr);
2669 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
2671 argsThreadRoutine[threadCount].
snn_pointer =
this;
2672 argsThreadRoutine[threadCount].
netId = netId;
2673 argsThreadRoutine[threadCount].
lGrpId = 0;
2674 argsThreadRoutine[threadCount].
startIdx = 0;
2675 argsThreadRoutine[threadCount].
endIdx = 0;
2676 argsThreadRoutine[threadCount].
GtoLOffset = 0;
2678 pthread_create(&threads[threadCount], &attr, &SNN::helperUpdateWeights_CPU, (
void*)&argsThreadRoutine[threadCount]);
2679 pthread_attr_destroy(&attr);
2685 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2687 for (
int i=0; i<threadCount; i++){
2688 pthread_join(threads[i], NULL);
2694 void SNN::updateNetworkConfig(
int netId) {
2698 copyNetworkConfig(netId, cudaMemcpyHostToDevice);
2700 copyNetworkConfig(netId);
2703 void SNN::shiftSpikeTables() {
2704 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2705 pthread_t threads[numCores + 1];
2708 int threadCount = 0;
2712 if (!groupPartitionLists[netId].empty()) {
2714 shiftSpikeTables_F_GPU(netId);
2716 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2717 shiftSpikeTables_CPU(netId);
2718 #else // Linux or MAC
2719 pthread_attr_t attr;
2720 pthread_attr_init(&attr);
2723 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
2725 argsThreadRoutine[threadCount].
snn_pointer =
this;
2726 argsThreadRoutine[threadCount].
netId = netId;
2727 argsThreadRoutine[threadCount].
lGrpId = 0;
2728 argsThreadRoutine[threadCount].
startIdx = 0;
2729 argsThreadRoutine[threadCount].
endIdx = 0;
2730 argsThreadRoutine[threadCount].
GtoLOffset = 0;
2732 pthread_create(&threads[threadCount], &attr, &SNN::helperShiftSpikeTables_CPU, (
void*)&argsThreadRoutine[threadCount]);
2733 pthread_attr_destroy(&attr);
2740 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2742 for (
int i=0; i<threadCount; i++){
2743 pthread_join(threads[i], NULL);
2748 if (!groupPartitionLists[netId].empty()) {
2750 shiftSpikeTables_T_GPU(netId);
2755 void SNN::allocateSNN(
int netId) {
2759 allocateSNN_GPU(netId);
2761 allocateSNN_CPU(netId);
2764 void SNN::allocateManagerRuntimeData() {
2777 managerRuntimeData.
voltage =
new float[managerRTDSize.maxNumNReg];
2778 managerRuntimeData.
nextVoltage =
new float[managerRTDSize.maxNumNReg];
2779 managerRuntimeData.
recovery =
new float[managerRTDSize.maxNumNReg];
2780 managerRuntimeData.
Izh_a =
new float[managerRTDSize.maxNumNReg];
2781 managerRuntimeData.
Izh_b =
new float[managerRTDSize.maxNumNReg];
2782 managerRuntimeData.
Izh_c =
new float[managerRTDSize.maxNumNReg];
2783 managerRuntimeData.
Izh_d =
new float[managerRTDSize.maxNumNReg];
2784 managerRuntimeData.
Izh_C =
new float[managerRTDSize.maxNumNReg];
2785 managerRuntimeData.
Izh_k =
new float[managerRTDSize.maxNumNReg];
2786 managerRuntimeData.
Izh_vr =
new float[managerRTDSize.maxNumNReg];
2787 managerRuntimeData.
Izh_vt =
new float[managerRTDSize.maxNumNReg];
2788 managerRuntimeData.
Izh_vpeak =
new float[managerRTDSize.maxNumNReg];
2789 managerRuntimeData.
lif_tau_m =
new int[managerRTDSize.maxNumNReg];
2790 managerRuntimeData.
lif_tau_ref =
new int[managerRTDSize.maxNumNReg];
2791 managerRuntimeData.
lif_tau_ref_c =
new int[managerRTDSize.maxNumNReg];
2792 managerRuntimeData.
lif_vTh =
new float[managerRTDSize.maxNumNReg];
2793 managerRuntimeData.
lif_vReset =
new float[managerRTDSize.maxNumNReg];
2794 managerRuntimeData.
lif_gain =
new float[managerRTDSize.maxNumNReg];
2795 managerRuntimeData.
lif_bias =
new float[managerRTDSize.maxNumNReg];
2796 managerRuntimeData.
current =
new float[managerRTDSize.maxNumNReg];
2797 managerRuntimeData.
extCurrent =
new float[managerRTDSize.maxNumNReg];
2798 managerRuntimeData.
totalCurrent =
new float[managerRTDSize.maxNumNReg];
2799 managerRuntimeData.
curSpike =
new bool[managerRTDSize.maxNumNReg];
2800 memset(managerRuntimeData.
voltage, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2801 memset(managerRuntimeData.
nextVoltage, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2802 memset(managerRuntimeData.
recovery, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2803 memset(managerRuntimeData.
Izh_a, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2804 memset(managerRuntimeData.
Izh_b, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2805 memset(managerRuntimeData.
Izh_c, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2806 memset(managerRuntimeData.
Izh_d, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2807 memset(managerRuntimeData.
Izh_C, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2808 memset(managerRuntimeData.
Izh_k, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2809 memset(managerRuntimeData.
Izh_vr, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2810 memset(managerRuntimeData.
Izh_vt, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2811 memset(managerRuntimeData.
Izh_vpeak, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2812 memset(managerRuntimeData.
lif_tau_m, 0,
sizeof(
int) * managerRTDSize.maxNumNReg);
2813 memset(managerRuntimeData.
lif_tau_ref, 0,
sizeof(
int) * managerRTDSize.maxNumNReg);
2814 memset(managerRuntimeData.
lif_tau_ref_c, 0,
sizeof(
int) * managerRTDSize.maxNumNReg);
2815 memset(managerRuntimeData.
lif_vTh, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2816 memset(managerRuntimeData.
lif_vReset, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2817 memset(managerRuntimeData.
lif_gain, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2818 memset(managerRuntimeData.
lif_bias, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2819 memset(managerRuntimeData.
current, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2820 memset(managerRuntimeData.
extCurrent, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2821 memset(managerRuntimeData.
totalCurrent, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2822 memset(managerRuntimeData.
curSpike, 0,
sizeof(
bool) * managerRTDSize.maxNumNReg);
2831 managerRuntimeData.
gAMPA =
new float[managerRTDSize.glbNumNReg];
2832 managerRuntimeData.
gNMDA_r =
new float[managerRTDSize.glbNumNReg];
2833 managerRuntimeData.
gNMDA_d =
new float[managerRTDSize.glbNumNReg];
2834 managerRuntimeData.
gNMDA =
new float[managerRTDSize.glbNumNReg];
2835 memset(managerRuntimeData.
gAMPA, 0,
sizeof(
float) * managerRTDSize.glbNumNReg);
2836 memset(managerRuntimeData.
gNMDA_r, 0,
sizeof(
float) * managerRTDSize.glbNumNReg);
2837 memset(managerRuntimeData.
gNMDA_d, 0,
sizeof(
float) * managerRTDSize.glbNumNReg);
2838 memset(managerRuntimeData.
gNMDA, 0,
sizeof(
float) * managerRTDSize.glbNumNReg);
2840 managerRuntimeData.
gGABAa =
new float[managerRTDSize.glbNumNReg];
2841 managerRuntimeData.
gGABAb_r =
new float[managerRTDSize.glbNumNReg];
2842 managerRuntimeData.
gGABAb_d =
new float[managerRTDSize.glbNumNReg];
2843 managerRuntimeData.
gGABAb =
new float[managerRTDSize.glbNumNReg];
2844 memset(managerRuntimeData.
gGABAa, 0,
sizeof(
float) * managerRTDSize.glbNumNReg);
2845 memset(managerRuntimeData.
gGABAb_r, 0,
sizeof(
float) * managerRTDSize.glbNumNReg);
2846 memset(managerRuntimeData.
gGABAb_d, 0,
sizeof(
float) * managerRTDSize.glbNumNReg);
2847 memset(managerRuntimeData.
gGABAb, 0,
sizeof(
float) * managerRTDSize.glbNumNReg);
2850 managerRuntimeData.
grpDA =
new float[managerRTDSize.maxNumGroups];
2851 managerRuntimeData.
grp5HT =
new float[managerRTDSize.maxNumGroups];
2852 managerRuntimeData.
grpACh =
new float[managerRTDSize.maxNumGroups];
2853 managerRuntimeData.
grpNE =
new float[managerRTDSize.maxNumGroups];
2854 memset(managerRuntimeData.
grpDA, 0,
sizeof(
float) * managerRTDSize.maxNumGroups);
2855 memset(managerRuntimeData.
grp5HT, 0,
sizeof(
float) * managerRTDSize.maxNumGroups);
2856 memset(managerRuntimeData.
grpACh, 0,
sizeof(
float) * managerRTDSize.maxNumGroups);
2857 memset(managerRuntimeData.
grpNE, 0,
sizeof(
float) * managerRTDSize.maxNumGroups);
2860 managerRuntimeData.
grpDABuffer =
new float[managerRTDSize.maxNumGroups * 1000];
2861 managerRuntimeData.
grp5HTBuffer =
new float[managerRTDSize.maxNumGroups * 1000];
2862 managerRuntimeData.
grpAChBuffer =
new float[managerRTDSize.maxNumGroups * 1000];
2863 managerRuntimeData.
grpNEBuffer =
new float[managerRTDSize.maxNumGroups * 1000];
2864 memset(managerRuntimeData.
grpDABuffer, 0, managerRTDSize.maxNumGroups *
sizeof(
float) * 1000);
2865 memset(managerRuntimeData.
grp5HTBuffer, 0, managerRTDSize.maxNumGroups *
sizeof(
float) * 1000);
2866 memset(managerRuntimeData.
grpAChBuffer, 0, managerRTDSize.maxNumGroups *
sizeof(
float) * 1000);
2867 memset(managerRuntimeData.
grpNEBuffer, 0, managerRTDSize.maxNumGroups *
sizeof(
float) * 1000);
2869 managerRuntimeData.
lastSpikeTime =
new int[managerRTDSize.maxNumNAssigned];
2870 memset(managerRuntimeData.
lastSpikeTime, 0,
sizeof(
int) * managerRTDSize.maxNumNAssigned);
2872 managerRuntimeData.
nSpikeCnt =
new int[managerRTDSize.glbNumN];
2873 memset(managerRuntimeData.
nSpikeCnt, 0,
sizeof(
int) * managerRTDSize.glbNumN);
2876 managerRuntimeData.
avgFiring =
new float[managerRTDSize.maxNumN];
2877 managerRuntimeData.
baseFiring =
new float[managerRTDSize.maxNumN];
2878 memset(managerRuntimeData.
avgFiring, 0,
sizeof(
float) * managerRTDSize.maxNumN);
2879 memset(managerRuntimeData.
baseFiring, 0,
sizeof(
float) * managerRTDSize.maxNumN);
2884 managerRuntimeData.
stpu =
new float[managerRTDSize.maxNumN * (glbNetworkConfig.
maxDelay + 1)];
2885 managerRuntimeData.
stpx =
new float[managerRTDSize.maxNumN * (glbNetworkConfig.
maxDelay + 1)];
2886 memset(managerRuntimeData.
stpu, 0,
sizeof(
float) * managerRTDSize.maxNumN * (glbNetworkConfig.
maxDelay + 1));
2887 memset(managerRuntimeData.
stpx, 0,
sizeof(
float) * managerRTDSize.maxNumN * (glbNetworkConfig.
maxDelay + 1));
2889 managerRuntimeData.
Npre =
new unsigned short[managerRTDSize.maxNumNAssigned];
2890 managerRuntimeData.
Npre_plastic =
new unsigned short[managerRTDSize.maxNumNAssigned];
2891 managerRuntimeData.
Npost =
new unsigned short[managerRTDSize.maxNumNAssigned];
2892 managerRuntimeData.
cumulativePost =
new unsigned int[managerRTDSize.maxNumNAssigned];
2893 managerRuntimeData.
cumulativePre =
new unsigned int[managerRTDSize.maxNumNAssigned];
2894 memset(managerRuntimeData.
Npre, 0,
sizeof(
short) * managerRTDSize.maxNumNAssigned);
2895 memset(managerRuntimeData.
Npre_plastic, 0,
sizeof(
short) * managerRTDSize.maxNumNAssigned);
2896 memset(managerRuntimeData.
Npost, 0,
sizeof(
short) * managerRTDSize.maxNumNAssigned);
2897 memset(managerRuntimeData.
cumulativePost, 0,
sizeof(
int) * managerRTDSize.maxNumNAssigned);
2898 memset(managerRuntimeData.
cumulativePre, 0,
sizeof(
int) * managerRTDSize.maxNumNAssigned);
2908 managerRuntimeData.
wt =
new float[managerRTDSize.maxNumPreSynNet];
2909 managerRuntimeData.
wtChange =
new float[managerRTDSize.maxNumPreSynNet];
2910 managerRuntimeData.
maxSynWt =
new float[managerRTDSize.maxNumPreSynNet];
2911 managerRuntimeData.
synSpikeTime =
new int[managerRTDSize.maxNumPreSynNet];
2912 memset(managerRuntimeData.
wt, 0,
sizeof(
float) * managerRTDSize.maxNumPreSynNet);
2913 memset(managerRuntimeData.
wtChange, 0,
sizeof(
float) * managerRTDSize.maxNumPreSynNet);
2914 memset(managerRuntimeData.
maxSynWt, 0,
sizeof(
float) * managerRTDSize.maxNumPreSynNet);
2915 memset(managerRuntimeData.
synSpikeTime, 0,
sizeof(
int) * managerRTDSize.maxNumPreSynNet);
2917 mulSynFast =
new float[managerRTDSize.maxNumConnections];
2918 mulSynSlow =
new float[managerRTDSize.maxNumConnections];
2919 memset(mulSynFast, 0,
sizeof(
float) * managerRTDSize.maxNumConnections);
2920 memset(mulSynSlow, 0,
sizeof(
float) * managerRTDSize.maxNumConnections);
2922 managerRuntimeData.
connIdsPreIdx =
new short int[managerRTDSize.maxNumPreSynNet];
2923 memset(managerRuntimeData.
connIdsPreIdx, 0,
sizeof(
short int) * managerRTDSize.maxNumPreSynNet);
2925 managerRuntimeData.
grpIds =
new short int[managerRTDSize.maxNumNAssigned];
2926 memset(managerRuntimeData.
grpIds, 0,
sizeof(
short int) * managerRTDSize.maxNumNAssigned);
2928 managerRuntimeData.
spikeGenBits =
new unsigned int[managerRTDSize.maxNumNSpikeGen / 32 + 1];
2935 int SNN::assignGroup(
int gGrpId,
int availableNeuronId) {
2936 int newAvailableNeuronId;
2937 assert(groupConfigMDMap[gGrpId].gStartN == -1);
2938 groupConfigMDMap[gGrpId].gStartN = availableNeuronId;
2939 groupConfigMDMap[gGrpId].gEndN = availableNeuronId + groupConfigMap[gGrpId].numN - 1;
2942 gGrpId, groupConfigMap[gGrpId].grpName.c_str(), groupConfigMDMap[gGrpId].gStartN, groupConfigMDMap[gGrpId].gEndN);
2944 newAvailableNeuronId = availableNeuronId + groupConfigMap[gGrpId].numN;
2947 return newAvailableNeuronId;
2950 int SNN::assignGroup(std::list<GroupConfigMD>::iterator grpIt,
int localGroupId,
int availableNeuronId) {
2951 int newAvailableNeuronId;
2952 assert(grpIt->lGrpId == -1);
2953 grpIt->lGrpId = localGroupId;
2954 grpIt->lStartN = availableNeuronId;
2955 grpIt->lEndN = availableNeuronId + groupConfigMap[grpIt->gGrpId].numN - 1;
2957 grpIt->LtoGOffset = grpIt->gStartN - grpIt->lStartN;
2958 grpIt->GtoLOffset = grpIt->lStartN - grpIt->gStartN;
2960 KERNEL_DEBUG(
"Allocation for group (%s) [id:%d, local id:%d], St=%d, End=%d", groupConfigMap[grpIt->gGrpId].grpName.c_str(),
2961 grpIt->gGrpId, grpIt->lGrpId, grpIt->lStartN, grpIt->lEndN);
2963 newAvailableNeuronId = availableNeuronId + groupConfigMap[grpIt->gGrpId].numN;
2965 return newAvailableNeuronId;
2968 void SNN::generateGroupRuntime(
int netId,
int lGrpId) {
2969 resetNeuromodulator(netId, lGrpId);
2971 for(
int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].
lEndN; lNId++)
2972 resetNeuron(netId, lGrpId, lNId);
2975 void SNN::generateRuntimeGroupConfigs() {
2977 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
2979 int gGrpId = grpIt->gGrpId;
2980 int lGrpId = grpIt->lGrpId;
2984 groupConfigs[netId][lGrpId].
netId = grpIt->netId;
2985 groupConfigs[netId][lGrpId].
gGrpId = grpIt->gGrpId;
2986 groupConfigs[netId][lGrpId].
gStartN = grpIt->gStartN;
2987 groupConfigs[netId][lGrpId].
gEndN = grpIt->gEndN;
2988 groupConfigs[netId][lGrpId].
lGrpId = grpIt->lGrpId;
2989 groupConfigs[netId][lGrpId].
lStartN = grpIt->lStartN;
2990 groupConfigs[netId][lGrpId].
lEndN = grpIt->lEndN;
2991 groupConfigs[netId][lGrpId].
LtoGOffset = grpIt->LtoGOffset;
2992 groupConfigs[netId][lGrpId].
GtoLOffset = grpIt->GtoLOffset;
2993 groupConfigs[netId][lGrpId].
Type = groupConfigMap[gGrpId].type;
2994 groupConfigs[netId][lGrpId].
numN = groupConfigMap[gGrpId].numN;
2996 groupConfigs[netId][lGrpId].
numPreSynapses = grpIt->numPreSynapses;
2997 groupConfigs[netId][lGrpId].
isSpikeGenerator = groupConfigMap[gGrpId].isSpikeGenerator;
2998 groupConfigs[netId][lGrpId].
isSpikeGenFunc = groupConfigMap[gGrpId].spikeGenFunc != NULL ? true :
false;
2999 groupConfigs[netId][lGrpId].
WithSTP = groupConfigMap[gGrpId].stpConfig.WithSTP;
3000 groupConfigs[netId][lGrpId].
WithSTDP = groupConfigMap[gGrpId].stdpConfig.WithSTDP;
3001 groupConfigs[netId][lGrpId].
WithESTDP = groupConfigMap[gGrpId].stdpConfig.WithESTDP;
3002 groupConfigs[netId][lGrpId].
WithISTDP = groupConfigMap[gGrpId].stdpConfig.WithISTDP;
3003 groupConfigs[netId][lGrpId].
WithESTDPtype = groupConfigMap[gGrpId].stdpConfig.WithESTDPtype;
3004 groupConfigs[netId][lGrpId].
WithISTDPtype = groupConfigMap[gGrpId].stdpConfig.WithISTDPtype;
3005 groupConfigs[netId][lGrpId].
WithESTDPcurve = groupConfigMap[gGrpId].stdpConfig.WithESTDPcurve;
3006 groupConfigs[netId][lGrpId].
WithISTDPcurve = groupConfigMap[gGrpId].stdpConfig.WithISTDPcurve;
3007 groupConfigs[netId][lGrpId].
WithHomeostasis = groupConfigMap[gGrpId].homeoConfig.WithHomeostasis;
3008 groupConfigs[netId][lGrpId].
FixedInputWts = grpIt->fixedInputWts;
3010 groupConfigs[netId][lGrpId].
Noffset = grpIt->Noffset;
3011 groupConfigs[netId][lGrpId].
MaxDelay = grpIt->maxOutgoingDelay;
3012 groupConfigs[netId][lGrpId].
STP_A = groupConfigMap[gGrpId].stpConfig.STP_A;
3013 groupConfigs[netId][lGrpId].
STP_U = groupConfigMap[gGrpId].stpConfig.STP_U;
3014 groupConfigs[netId][lGrpId].
STP_tau_u_inv = groupConfigMap[gGrpId].stpConfig.STP_tau_u_inv;
3015 groupConfigs[netId][lGrpId].
STP_tau_x_inv = groupConfigMap[gGrpId].stpConfig.STP_tau_x_inv;
3016 groupConfigs[netId][lGrpId].
TAU_PLUS_INV_EXC = groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_EXC;
3017 groupConfigs[netId][lGrpId].
TAU_MINUS_INV_EXC = groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_EXC;
3018 groupConfigs[netId][lGrpId].
ALPHA_PLUS_EXC = groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_EXC;
3019 groupConfigs[netId][lGrpId].
ALPHA_MINUS_EXC = groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_EXC;
3020 groupConfigs[netId][lGrpId].
GAMMA = groupConfigMap[gGrpId].stdpConfig.GAMMA;
3021 groupConfigs[netId][lGrpId].
KAPPA = groupConfigMap[gGrpId].stdpConfig.KAPPA;
3022 groupConfigs[netId][lGrpId].
OMEGA = groupConfigMap[gGrpId].stdpConfig.OMEGA;
3023 groupConfigs[netId][lGrpId].
TAU_PLUS_INV_INB = groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_INB;
3024 groupConfigs[netId][lGrpId].
TAU_MINUS_INV_INB = groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_INB;
3025 groupConfigs[netId][lGrpId].
ALPHA_PLUS_INB = groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_INB;
3026 groupConfigs[netId][lGrpId].
ALPHA_MINUS_INB = groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_INB;
3027 groupConfigs[netId][lGrpId].
BETA_LTP = groupConfigMap[gGrpId].stdpConfig.BETA_LTP;
3028 groupConfigs[netId][lGrpId].
BETA_LTD = groupConfigMap[gGrpId].stdpConfig.BETA_LTD;
3029 groupConfigs[netId][lGrpId].
LAMBDA = groupConfigMap[gGrpId].stdpConfig.LAMBDA;
3030 groupConfigs[netId][lGrpId].
DELTA = groupConfigMap[gGrpId].stdpConfig.DELTA;
3033 groupConfigs[netId][lGrpId].
withCompartments = groupConfigMap[gGrpId].withCompartments;
3034 groupConfigs[netId][lGrpId].
compCouplingUp = groupConfigMap[gGrpId].compCouplingUp;
3035 groupConfigs[netId][lGrpId].
compCouplingDown = groupConfigMap[gGrpId].compCouplingDown;
3036 memset(&groupConfigs[netId][lGrpId].compNeighbors, 0,
sizeof(groupConfigs[netId][lGrpId].compNeighbors[0])*
MAX_NUM_COMP_CONN);
3037 memset(&groupConfigs[netId][lGrpId].compCoupling, 0,
sizeof(groupConfigs[netId][lGrpId].compCoupling[0])*
MAX_NUM_COMP_CONN);
3040 groupConfigs[netId][lGrpId].
avgTimeScale = groupConfigMap[gGrpId].homeoConfig.avgTimeScale;
3041 groupConfigs[netId][lGrpId].
avgTimeScale_decay = groupConfigMap[gGrpId].homeoConfig.avgTimeScaleDecay;
3042 groupConfigs[netId][lGrpId].
avgTimeScaleInv = groupConfigMap[gGrpId].homeoConfig.avgTimeScaleInv;
3043 groupConfigs[netId][lGrpId].
homeostasisScale = groupConfigMap[gGrpId].homeoConfig.homeostasisScale;
3046 groupConfigs[netId][lGrpId].
baseDP = groupConfigMap[gGrpId].neuromodulatorConfig.baseDP;
3047 groupConfigs[netId][lGrpId].
base5HT = groupConfigMap[gGrpId].neuromodulatorConfig.base5HT;
3048 groupConfigs[netId][lGrpId].
baseACh = groupConfigMap[gGrpId].neuromodulatorConfig.baseACh;
3049 groupConfigs[netId][lGrpId].
baseNE = groupConfigMap[gGrpId].neuromodulatorConfig.baseNE;
3050 groupConfigs[netId][lGrpId].
decayDP = groupConfigMap[gGrpId].neuromodulatorConfig.decayDP;
3051 groupConfigs[netId][lGrpId].
decay5HT = groupConfigMap[gGrpId].neuromodulatorConfig.decay5HT;
3052 groupConfigs[netId][lGrpId].
decayACh = groupConfigMap[gGrpId].neuromodulatorConfig.decayACh;
3053 groupConfigs[netId][lGrpId].
decayNE = groupConfigMap[gGrpId].neuromodulatorConfig.decayNE;
3056 if (netId == grpIt->netId) {
3057 groupConfigMDMap[gGrpId].netId = grpIt->netId;
3058 groupConfigMDMap[gGrpId].gGrpId = grpIt->gGrpId;
3059 groupConfigMDMap[gGrpId].gStartN = grpIt->gStartN;
3060 groupConfigMDMap[gGrpId].gEndN = grpIt->gEndN;
3061 groupConfigMDMap[gGrpId].lGrpId = grpIt->lGrpId;
3062 groupConfigMDMap[gGrpId].lStartN = grpIt->lStartN;
3063 groupConfigMDMap[gGrpId].lEndN = grpIt->lEndN;
3064 groupConfigMDMap[gGrpId].numPostSynapses = grpIt->numPostSynapses;
3065 groupConfigMDMap[gGrpId].numPreSynapses = grpIt->numPreSynapses;
3066 groupConfigMDMap[gGrpId].LtoGOffset = grpIt->LtoGOffset;
3067 groupConfigMDMap[gGrpId].GtoLOffset = grpIt->GtoLOffset;
3068 groupConfigMDMap[gGrpId].fixedInputWts = grpIt->fixedInputWts;
3069 groupConfigMDMap[gGrpId].hasExternalConnect = grpIt->hasExternalConnect;
3070 groupConfigMDMap[gGrpId].Noffset = grpIt->Noffset;
3071 groupConfigMDMap[gGrpId].maxOutgoingDelay = grpIt->maxOutgoingDelay;
3073 groupConfigs[netId][lGrpId].
withParamModel_9 = groupConfigMap[gGrpId].withParamModel_9;
3074 groupConfigs[netId][lGrpId].
isLIF = groupConfigMap[gGrpId].isLIF;
3091 void SNN::generateRuntimeConnectConfigs() {
3094 for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++) {
3095 connectConfigMap[connIt->connId] = *connIt;
3098 for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++) {
3099 connectConfigMap[connIt->connId] = *connIt;
3104 void SNN::generateRuntimeNetworkConfigs() {
3106 if (!groupPartitionLists[netId].empty()) {
3121 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3122 if (grpIt->netId == netId && grpIt->neuronMonitorId >= 0)
3133 networkConfigs[netId].
dAMPA = dAMPA;
3134 networkConfigs[netId].
rNMDA = rNMDA;
3135 networkConfigs[netId].
dNMDA = dNMDA;
3136 networkConfigs[netId].
sNMDA = sNMDA;
3137 networkConfigs[netId].
dGABAa = dGABAa;
3138 networkConfigs[netId].
rGABAb = rGABAb;
3139 networkConfigs[netId].
dGABAb = dGABAb;
3140 networkConfigs[netId].
sGABAb = sGABAb;
3147 findNumN(netId, networkConfigs[netId].numN, networkConfigs[netId].numNExternal, networkConfigs[netId].numNAssigned,
3148 networkConfigs[netId].numNReg, networkConfigs[netId].numNExcReg, networkConfigs[netId].numNInhReg,
3149 networkConfigs[netId].numNPois, networkConfigs[netId].numNExcPois, networkConfigs[netId].numNInhPois);
3153 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3154 if (grpIt->netId == netId)
3165 findMaxNumSynapsesNeurons(netId, networkConfigs[netId].maxNumPostSynN, networkConfigs[netId].maxNumPreSynN);
3168 findMaxSpikesD1D2(netId, networkConfigs[netId].maxSpikesD1, networkConfigs[netId].maxSpikesD2);
3171 findNumSynapsesNetwork(netId, networkConfigs[netId].numPostSynNet, networkConfigs[netId].numPreSynNet);
3175 findNumNSpikeGenAndOffset(netId);
3180 memset(&managerRTDSize, 0,
sizeof(ManagerRuntimeDataSize));
3182 if (!groupPartitionLists[netId].empty()) {
3184 if (networkConfigs[netId].numNReg > managerRTDSize.maxNumNReg) managerRTDSize.maxNumNReg = networkConfigs[netId].
numNReg;
3185 if (networkConfigs[netId].numN > managerRTDSize.maxNumN) managerRTDSize.maxNumN = networkConfigs[netId].
numN;
3186 if (networkConfigs[netId].numNAssigned > managerRTDSize.maxNumNAssigned) managerRTDSize.maxNumNAssigned = networkConfigs[netId].
numNAssigned;
3189 if (networkConfigs[netId].numNSpikeGen > managerRTDSize.maxNumNSpikeGen) managerRTDSize.maxNumNSpikeGen = networkConfigs[netId].
numNSpikeGen;
3192 if (networkConfigs[netId].numGroups > managerRTDSize.maxNumGroups) managerRTDSize.maxNumGroups = networkConfigs[netId].
numGroups;
3193 if (networkConfigs[netId].numConnections > managerRTDSize.maxNumConnections) managerRTDSize.maxNumConnections = networkConfigs[netId].
numConnections;
3196 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3197 if (groupConfigMap[grpIt->gGrpId].numN > managerRTDSize.maxNumNPerGroup) managerRTDSize.maxNumNPerGroup = groupConfigMap[grpIt->gGrpId].numN;
3201 if (networkConfigs[netId].maxSpikesD1 > managerRTDSize.maxMaxSpikeD1) managerRTDSize.maxMaxSpikeD1 = networkConfigs[netId].
maxSpikesD1;
3202 if (networkConfigs[netId].maxSpikesD2 > managerRTDSize.maxMaxSpikeD2) managerRTDSize.maxMaxSpikeD2 = networkConfigs[netId].
maxSpikesD2;
3205 if (networkConfigs[netId].numPreSynNet > managerRTDSize.maxNumPreSynNet) managerRTDSize.maxNumPreSynNet = networkConfigs[netId].
numPreSynNet;
3206 if (networkConfigs[netId].numPostSynNet > managerRTDSize.maxNumPostSynNet) managerRTDSize.maxNumPostSynNet = networkConfigs[netId].
numPostSynNet;
3209 managerRTDSize.glbNumN += networkConfigs[netId].
numN;
3210 managerRTDSize.glbNumNReg += networkConfigs[netId].
numNReg;
3224 void SNN::generateConnectionRuntime(
int netId) {
3225 std::map<int, int> GLoffset;
3226 std::map<int, int> GLgrpId;
3229 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3230 GLoffset[grpIt->gGrpId] = grpIt->GtoLOffset;
3231 GLgrpId[grpIt->gGrpId] = grpIt->lGrpId;
3236 for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
3238 mulSynFast[connIt->second.connId] = connIt->second.mulSynFast;
3239 mulSynSlow[connIt->second.connId] = connIt->second.mulSynSlow;
3245 int parsedConnections = 0;
3246 memset(managerRuntimeData.
Npost, 0,
sizeof(
short) * networkConfigs[netId].numNAssigned);
3247 memset(managerRuntimeData.
Npre, 0,
sizeof(
short) * networkConfigs[netId].numNAssigned);
3248 memset(managerRuntimeData.
Npre_plastic, 0,
sizeof(
short) * networkConfigs[netId].numNAssigned);
3249 for (std::list<ConnectionInfo>::iterator connIt = connectionLists[netId].begin(); connIt != connectionLists[netId].end(); connIt++) {
3250 connIt->srcGLoffset = GLoffset[connIt->grpSrc];
3252 KERNEL_ERROR(
"Error: the number of synapses exceeds maximum limit (%d) for neuron %d (group %d)",
SYNAPSE_ID_MASK, connIt->nSrc, connIt->grpSrc);
3255 if (managerRuntimeData.
Npre[connIt->nDest + GLoffset[connIt->grpDest]] ==
SYNAPSE_ID_MASK) {
3256 KERNEL_ERROR(
"Error: the number of synapses exceeds maximum limit (%d) for neuron %d (group %d)",
SYNAPSE_ID_MASK, connIt->nDest, connIt->grpDest);
3259 managerRuntimeData.
Npost[connIt->nSrc + GLoffset[connIt->grpSrc]]++;
3260 managerRuntimeData.
Npre[connIt->nDest + GLoffset[connIt->grpDest]]++;
3263 sim_with_fixedwts =
false;
3264 managerRuntimeData.
Npre_plastic[connIt->nDest + GLoffset[connIt->grpDest]]++;
3267 if (groupConfigMap[connIt->grpDest].homeoConfig.WithHomeostasis && groupConfigMDMap[connIt->grpDest].homeoId == -1)
3268 groupConfigMDMap[connIt->grpDest].homeoId = connIt->nDest + GLoffset[connIt->grpDest];
3287 parsedConnections++;
3289 assert(parsedConnections == networkConfigs[netId].numPostSynNet && parsedConnections == networkConfigs[netId].numPreSynNet);
3294 for (
int lNId = 1; lNId < networkConfigs[netId].
numNAssigned; lNId++) {
3300 memset(managerRuntimeData.
Npre, 0,
sizeof(
short) * networkConfigs[netId].numNAssigned);
3301 parsedConnections = 0;
3302 for (std::list<ConnectionInfo>::iterator connIt = connectionLists[netId].begin(); connIt != connectionLists[netId].end(); connIt++) {
3304 int pre_pos = managerRuntimeData.
cumulativePre[connIt->nDest + GLoffset[connIt->grpDest]] + managerRuntimeData.
Npre[connIt->nDest + GLoffset[connIt->grpDest]];
3305 assert(pre_pos < networkConfigs[netId].numPreSynNet);
3307 managerRuntimeData.
preSynapticIds[pre_pos] = SET_CONN_ID((connIt->nSrc + GLoffset[connIt->grpSrc]), 0, (GLgrpId[connIt->grpSrc]));
3308 connIt->preSynId = managerRuntimeData.
Npre[connIt->nDest + GLoffset[connIt->grpDest]];
3310 managerRuntimeData.
Npre[connIt->nDest+ GLoffset[connIt->grpDest]]++;
3311 parsedConnections++;
3319 for (std::list<ConnectionInfo>::iterator connIt = connectionLists[netId].begin(); connIt != connectionLists[netId].end(); connIt++) {
3321 int pre_pos = managerRuntimeData.
cumulativePre[connIt->nDest + GLoffset[connIt->grpDest]] + managerRuntimeData.
Npre[connIt->nDest + GLoffset[connIt->grpDest]];
3322 assert(pre_pos < networkConfigs[netId].numPreSynNet);
3324 managerRuntimeData.
preSynapticIds[pre_pos] = SET_CONN_ID((connIt->nSrc + GLoffset[connIt->grpSrc]), 0, (GLgrpId[connIt->grpSrc]));
3325 connIt->preSynId = managerRuntimeData.
Npre[connIt->nDest + GLoffset[connIt->grpDest]];
3327 managerRuntimeData.
Npre[connIt->nDest + GLoffset[connIt->grpDest]]++;
3328 parsedConnections++;
3335 assert(parsedConnections == networkConfigs[netId].numPreSynNet);
3341 for (
int lNId = 0; lNId < networkConfigs[netId].
numNAssigned; lNId++) {
3342 if (managerRuntimeData.
Npost[lNId] > 0) {
3343 std::list<ConnectionInfo> postConnectionList;
3345 targetConn.
nSrc = lNId ;
3347 std::list<ConnectionInfo>::iterator firstPostConn = std::find(connectionLists[netId].begin(), connectionLists[netId].end(), targetConn);
3348 std::list<ConnectionInfo>::iterator lastPostConn = firstPostConn;
3349 std::advance(lastPostConn, managerRuntimeData.
Npost[lNId]);
3350 managerRuntimeData.
Npost[lNId] = 0;
3352 postConnectionList.splice(postConnectionList.begin(), connectionLists[netId], firstPostConn, lastPostConn);
3355 int post_pos, pre_pos, lastDelay = 0;
3356 parsedConnections = 0;
3358 for (std::list<ConnectionInfo>::iterator connIt = postConnectionList.begin(); connIt != postConnectionList.end(); connIt++) {
3359 assert(connIt->nSrc + GLoffset[connIt->grpSrc] == lNId);
3360 post_pos = managerRuntimeData.
cumulativePost[connIt->nSrc + GLoffset[connIt->grpSrc]] + managerRuntimeData.
Npost[connIt->nSrc + GLoffset[connIt->grpSrc]];
3361 pre_pos = managerRuntimeData.
cumulativePre[connIt->nDest + GLoffset[connIt->grpDest]] + connIt->preSynId;
3363 assert(post_pos < networkConfigs[netId].numPostSynNet);
3367 managerRuntimeData.
postSynapticIds[post_pos] = SET_CONN_ID((connIt->nDest + GLoffset[connIt->grpDest]), connIt->preSynId, (GLgrpId[connIt->grpDest]));
3369 assert(connIt->delay > 0);
3370 if (connIt->delay > lastDelay) {
3373 }
else if (connIt->delay == lastDelay) {
3376 KERNEL_ERROR(
"Post-synaptic delays not sorted correctly... pre_id=%d, delay[%d]=%d, delay[%d]=%d",
3377 lNId, managerRuntimeData.
Npost[connIt->nSrc + GLoffset[connIt->grpSrc]], connIt->delay, managerRuntimeData.
Npost[connIt->nSrc + GLoffset[connIt->grpSrc]] - 1, lastDelay);
3379 lastDelay = connIt->delay;
3385 managerRuntimeData.
preSynapticIds[pre_pos] = SET_CONN_ID((connIt->nSrc + GLoffset[connIt->grpSrc]), managerRuntimeData.
Npost[connIt->nSrc + GLoffset[connIt->grpSrc]], (GLgrpId[connIt->grpSrc]));
3386 managerRuntimeData.
wt[pre_pos] = connIt->initWt;
3387 managerRuntimeData.
maxSynWt[pre_pos] = connIt->maxWt;
3390 managerRuntimeData.
Npost[connIt->nSrc + GLoffset[connIt->grpSrc]]++;
3391 parsedConnections++;
3397 assert(parsedConnections == managerRuntimeData.
Npost[lNId]);
3411 assert(connectionLists[netId].empty());
3457 void SNN::generateCompConnectionRuntime(
int netId)
3459 std::map<int, int> GLgrpId;
3461 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3462 GLgrpId[grpIt->gGrpId] = grpIt->lGrpId;
3468 for (std::list<compConnectConfig>::iterator connIt = localCompConnectLists[netId].begin(); connIt != localCompConnectLists[netId].end(); connIt++) {
3470 int grpLower = connIt->grpSrc;
3471 int grpUpper = connIt->grpDest;
3475 KERNEL_ERROR(
"Group %s(%d) exceeds max number of allowed compartmental connections (%d).",
3479 groupConfigs[netId][GLgrpId[grpLower]].
compNeighbors[i] = grpUpper;
3485 KERNEL_ERROR(
"Group %s(%d) exceeds max number of allowed compartmental connections (%d).",
3489 groupConfigs[netId][GLgrpId[grpUpper]].
compNeighbors[j] = grpLower;
3498 void SNN::generatePoissonGroupRuntime(
int netId,
int lGrpId) {
3499 for(
int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].
lEndN; lNId++)
3500 resetPoissonNeuron(netId, lGrpId, lNId);
3504 void SNN::collectGlobalNetworkConfigC() {
3506 for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
3507 if (connIt->second.maxDelay > glbNetworkConfig.
maxDelay)
3508 glbNetworkConfig.
maxDelay = connIt->second.maxDelay;
3510 assert(connectConfigMap.size() > 0 || glbNetworkConfig.
maxDelay != -1);
3513 for(
int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
3515 glbNetworkConfig.
numNExcPois += groupConfigMap[gGrpId].numN;
3517 glbNetworkConfig.
numNInhPois += groupConfigMap[gGrpId].numN;
3519 glbNetworkConfig.
numNExcReg += groupConfigMap[gGrpId].numN;
3521 glbNetworkConfig.
numNInhReg += groupConfigMap[gGrpId].numN;
3524 if (groupConfigMDMap[gGrpId].maxOutgoingDelay == 1)
3525 glbNetworkConfig.
numN1msDelay += groupConfigMap[gGrpId].numN;
3526 else if (groupConfigMDMap[gGrpId].maxOutgoingDelay >= 2)
3527 glbNetworkConfig.
numN2msDelay += groupConfigMap[gGrpId].numN;
3536 void SNN::collectGlobalNetworkConfigP() {
3539 if (!localConnectLists[netId].empty() || !externalConnectLists[netId].empty()) {
3540 for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++)
3541 glbNetworkConfig.
numSynNet += connIt->numberOfConnections;
3543 for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++)
3544 glbNetworkConfig.
numSynNet += connIt->numberOfConnections;
3551 void SNN::compileSNN() {
3552 KERNEL_DEBUG(
"Beginning compilation of the network....");
3558 compileGroupConfig();
3560 compileConnectConfig();
3564 collectGlobalNetworkConfigC();
3574 KERNEL_INFO(
"************************** Global Network Configuration *******************************");
3575 KERNEL_INFO(
"The number of neurons in the network (numN) = %d", glbNetworkConfig.
numN);
3576 KERNEL_INFO(
"The number of regular neurons in the network (numNReg:numNExcReg:numNInhReg) = %d:%d:%d", glbNetworkConfig.
numNReg, glbNetworkConfig.
numNExcReg, glbNetworkConfig.
numNInhReg);
3578 KERNEL_INFO(
"The maximum axonal delay in the network (maxDelay) = %d", glbNetworkConfig.
maxDelay);
3584 void SNN::compileConnectConfig() {
3588 void SNN::compileGroupConfig() {
3593 for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
3597 grpSrc = connIt->second.grpSrc;
3598 if (connIt->second.maxDelay > groupConfigMDMap[grpSrc].maxOutgoingDelay)
3599 groupConfigMDMap[grpSrc].maxOutgoingDelay = connIt->second.maxDelay;
3604 groupConfigMDMap[connIt->second.grpDest].fixedInputWts =
false;
3612 int assignedGroup = 0;
3613 int availableNeuronId = 0;
3614 for(
int order = 0; order < 4; order++) {
3615 for(
int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
3617 availableNeuronId = assignGroup(gGrpId, availableNeuronId);
3620 availableNeuronId = assignGroup(gGrpId, availableNeuronId);
3623 availableNeuronId = assignGroup(gGrpId, availableNeuronId);
3626 availableNeuronId = assignGroup(gGrpId, availableNeuronId);
3632 assert(assignedGroup == numGroups);
3635 void SNN::connectNetwork() {
3638 for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++) {
3639 switch(connIt->type) {
3641 connectRandom(netId, connIt,
false);
3644 connectFull(netId, connIt,
false);
3647 connectFull(netId, connIt,
false);
3650 connectOneToOne(netId, connIt,
false);
3653 connectGaussian(netId, connIt,
false);
3656 connectUserDefined(netId, connIt,
false);
3659 KERNEL_ERROR(
"Invalid connection type( should be 'random', 'full', 'full-no-direct', or 'one-to-one')");
3667 for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++) {
3668 switch(connIt->type) {
3670 connectRandom(netId, connIt,
true);
3673 connectFull(netId, connIt,
true);
3676 connectFull(netId, connIt,
true);
3679 connectOneToOne(netId, connIt,
true);
3682 connectGaussian(netId, connIt,
true);
3685 connectUserDefined(netId, connIt,
true);
3688 KERNEL_ERROR(
"Invalid connection type( should be 'random', 'full', 'full-no-direct', or 'one-to-one')");
3696 inline void SNN::connectNeurons(
int netId,
int _grpSrc,
int _grpDest,
int _nSrc,
int _nDest,
short int _connId,
int externalNetId) {
3699 connInfo.
grpSrc = _grpSrc;
3701 connInfo.
nSrc = _nSrc;
3702 connInfo.
nDest = _nDest;
3704 connInfo.
connId = _connId;
3707 connInfo.
maxWt = 0.0f;
3711 connInfo.
delay = connectConfigMap[_connId].minDelay + rand() % (connectConfigMap[_connId].maxDelay - connectConfigMap[_connId].minDelay + 1);
3712 assert((connInfo.
delay >= connectConfigMap[_connId].minDelay) && (connInfo.
delay <= connectConfigMap[_connId].maxDelay));
3715 float initWt = connectConfigMap[_connId].initWt;
3716 float maxWt = connectConfigMap[_connId].maxWt;
3722 connectionLists[netId].push_back(connInfo);
3725 if (externalNetId >= 0)
3726 connectionLists[externalNetId].push_back(connInfo);
3730 inline void SNN::connectNeurons(
int netId,
int _grpSrc,
int _grpDest,
int _nSrc,
int _nDest,
short int _connId,
float initWt,
float maxWt, uint8_t delay,
int externalNetId) {
3733 connInfo.
grpSrc = _grpSrc;
3735 connInfo.
nSrc = _nSrc;
3736 connInfo.
nDest = _nDest;
3738 connInfo.
connId = _connId;
3743 connInfo.
delay = delay;
3745 connectionLists[netId].push_back(connInfo);
3748 if (externalNetId >= 0)
3749 connectionLists[externalNetId].push_back(connInfo);
3753 void SNN::connectFull(
int netId, std::list<ConnectConfig>::iterator connIt,
bool isExternal) {
3754 int grpSrc = connIt->grpSrc;
3755 int grpDest = connIt->grpDest;
3757 int externalNetId = -1;
3760 externalNetId = groupConfigMDMap[grpDest].netId;
3761 assert(netId != externalNetId);
3764 int gPreStart = groupConfigMDMap[grpSrc].gStartN;
3765 for(
int gPreN = groupConfigMDMap[grpSrc].gStartN; gPreN <= groupConfigMDMap[grpSrc].gEndN; gPreN++) {
3767 int gPostStart = groupConfigMDMap[grpDest].gStartN;
3768 for(
int gPostN = groupConfigMDMap[grpDest].gStartN; gPostN <= groupConfigMDMap[grpDest].gEndN; gPostN++) {
3770 if(noDirect && gPreN == gPostN)
3778 connectNeurons(netId, grpSrc, grpDest, gPreN, gPostN, connIt->connId, externalNetId);
3779 connIt->numberOfConnections++;
3783 std::list<GroupConfigMD>::iterator grpIt;
3787 targetGrp.
gGrpId = grpSrc;
3788 grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3789 assert(grpIt != groupPartitionLists[netId].end());
3790 grpIt->numPostSynapses += connIt->numberOfConnections;
3792 targetGrp.
gGrpId = grpDest;
3793 grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3794 assert(grpIt != groupPartitionLists[netId].end());
3795 grpIt->numPreSynapses += connIt->numberOfConnections;
3799 targetGrp.
gGrpId = grpSrc;
3800 grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3801 assert(grpIt != groupPartitionLists[externalNetId].end());
3802 grpIt->numPostSynapses += connIt->numberOfConnections;
3804 targetGrp.
gGrpId = grpDest;
3805 grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3806 assert(grpIt != groupPartitionLists[externalNetId].end());
3807 grpIt->numPreSynapses += connIt->numberOfConnections;
3811 void SNN::connectGaussian(
int netId, std::list<ConnectConfig>::iterator connIt,
bool isExternal) {
3813 int grpSrc = connIt->grpSrc;
3814 int grpDest = connIt->grpDest;
3818 int externalNetId = -1;
3821 externalNetId = groupConfigMDMap[grpDest].netId;
3822 assert(netId != externalNetId);
3825 for(
int i = groupConfigMDMap[grpSrc].gStartN; i <= groupConfigMDMap[grpSrc].gEndN; i++) {
3828 for(
int j = groupConfigMDMap[grpDest].gStartN; j <= groupConfigMDMap[grpDest].gEndN; j++) {
3833 double rfDist =
getRFDist3D(connIt->connRadius,loc_i,loc_j);
3834 if (rfDist < 0.0 || rfDist > 1.0)
3842 double gauss = exp(-2.3026*rfDist);
3846 if (drand48() < connIt->connProbability) {
3847 float initWt = gauss * connIt->initWt;
3848 float maxWt = connIt->maxWt;
3849 uint8_t delay = connIt->minDelay + rand() % (connIt->maxDelay - connIt->minDelay + 1);
3850 assert((delay >= connIt->minDelay) && (delay <= connIt->maxDelay));
3852 connectNeurons(netId, grpSrc, grpDest, i, j, connIt->connId, initWt, maxWt, delay, externalNetId);
3853 connIt->numberOfConnections++;
3858 std::list<GroupConfigMD>::iterator grpIt;
3862 targetGrp.
gGrpId = grpSrc;
3863 grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3864 assert(grpIt != groupPartitionLists[netId].end());
3865 grpIt->numPostSynapses += connIt->numberOfConnections;
3867 targetGrp.
gGrpId = grpDest;
3868 grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3869 assert(grpIt != groupPartitionLists[netId].end());
3870 grpIt->numPreSynapses += connIt->numberOfConnections;
3874 targetGrp.
gGrpId = grpSrc;
3875 grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3876 assert(grpIt != groupPartitionLists[externalNetId].end());
3877 grpIt->numPostSynapses += connIt->numberOfConnections;
3879 targetGrp.
gGrpId = grpDest;
3880 grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3881 assert(grpIt != groupPartitionLists[externalNetId].end());
3882 grpIt->numPreSynapses += connIt->numberOfConnections;
3886 void SNN::connectOneToOne(
int netId, std::list<ConnectConfig>::iterator connIt,
bool isExternal) {
3887 int grpSrc = connIt->grpSrc;
3888 int grpDest = connIt->grpDest;
3889 int externalNetId = -1;
3892 externalNetId = groupConfigMDMap[grpDest].netId;
3893 assert(netId != externalNetId);
3896 assert( groupConfigMap[grpDest].numN == groupConfigMap[grpSrc].numN);
3899 for(
int gPreN = groupConfigMDMap[grpSrc].gStartN, gPostN = groupConfigMDMap[grpDest].gStartN; gPreN <= groupConfigMDMap[grpSrc].gEndN; gPreN++, gPostN++) {
3900 connectNeurons(netId, grpSrc, grpDest, gPreN, gPostN, connIt->connId, externalNetId);
3901 connIt->numberOfConnections++;
3904 std::list<GroupConfigMD>::iterator grpIt;
3908 targetGrp.
gGrpId = grpSrc;
3909 grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3910 assert(grpIt != groupPartitionLists[netId].end());
3911 grpIt->numPostSynapses += connIt->numberOfConnections;
3913 targetGrp.
gGrpId = grpDest;
3914 grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3915 assert(grpIt != groupPartitionLists[netId].end());
3916 grpIt->numPreSynapses += connIt->numberOfConnections;
3920 targetGrp.
gGrpId = grpSrc;
3921 grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3922 assert(grpIt != groupPartitionLists[externalNetId].end());
3923 grpIt->numPostSynapses += connIt->numberOfConnections;
3925 targetGrp.
gGrpId = grpDest;
3926 grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3927 assert(grpIt != groupPartitionLists[externalNetId].end());
3928 grpIt->numPreSynapses += connIt->numberOfConnections;
3933 void SNN::connectRandom(
int netId, std::list<ConnectConfig>::iterator connIt,
bool isExternal) {
3934 int grpSrc = connIt->grpSrc;
3935 int grpDest = connIt->grpDest;
3936 int externalNetId = -1;
3939 externalNetId = groupConfigMDMap[grpDest].netId;
3940 assert(netId != externalNetId);
3943 int gPreStart = groupConfigMDMap[grpSrc].gStartN;
3944 for(
int gPreN = groupConfigMDMap[grpSrc].gStartN; gPreN <= groupConfigMDMap[grpSrc].gEndN; gPreN++) {
3946 int gPostStart = groupConfigMDMap[grpDest].gStartN;
3947 for(
int gPostN = groupConfigMDMap[grpDest].gStartN; gPostN <= groupConfigMDMap[grpDest].gEndN; gPostN++) {
3953 if (drand48() < connIt->connProbability) {
3954 connectNeurons(netId, grpSrc, grpDest, gPreN, gPostN, connIt->connId, externalNetId);
3955 connIt->numberOfConnections++;
3960 std::list<GroupConfigMD>::iterator grpIt;
3964 targetGrp.
gGrpId = grpSrc;
3965 grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3966 assert(grpIt != groupPartitionLists[netId].end());
3967 grpIt->numPostSynapses += connIt->numberOfConnections;
3969 targetGrp.
gGrpId = grpDest;
3970 grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3971 assert(grpIt != groupPartitionLists[netId].end());
3972 grpIt->numPreSynapses += connIt->numberOfConnections;
3976 targetGrp.
gGrpId = grpSrc;
3977 grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3978 assert(grpIt != groupPartitionLists[externalNetId].end());
3979 grpIt->numPostSynapses += connIt->numberOfConnections;
3981 targetGrp.
gGrpId = grpDest;
3982 grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3983 assert(grpIt != groupPartitionLists[externalNetId].end());
3984 grpIt->numPreSynapses += connIt->numberOfConnections;
3991 void SNN::connectUserDefined(
int netId, std::list<ConnectConfig>::iterator connIt,
bool isExternal) {
3992 int grpSrc = connIt->grpSrc;
3993 int grpDest = connIt->grpDest;
3994 int externalNetId = -1;
3997 externalNetId = groupConfigMDMap[grpDest].netId;
3998 assert(netId != externalNetId);
4001 connIt->maxDelay = 0;
4002 int preStartN = groupConfigMDMap[grpSrc].gStartN;
4003 int postStartN = groupConfigMDMap[grpDest].gStartN;
4004 for (
int pre_nid = groupConfigMDMap[grpSrc].gStartN; pre_nid <= groupConfigMDMap[grpSrc].gEndN; pre_nid++) {
4006 for (
int post_nid = groupConfigMDMap[grpDest].gStartN; post_nid <= groupConfigMDMap[grpDest].gEndN; post_nid++) {
4007 float weight, maxWt, delay;
4010 connIt->conn->connect(
this, grpSrc, pre_nid - preStartN, grpDest, post_nid - postStartN, weight, maxWt, delay, connected);
4014 assert(abs(weight) <= abs(maxWt));
4019 if (fabs(maxWt) > connIt->maxWt)
4020 connIt->maxWt = fabs(maxWt);
4022 if (delay > connIt->maxDelay)
4023 connIt->maxDelay = delay;
4025 connectNeurons(netId, grpSrc, grpDest, pre_nid, post_nid, connIt->connId, weight, maxWt, delay, externalNetId);
4026 connIt->numberOfConnections++;
4031 std::list<GroupConfigMD>::iterator grpIt;
4035 targetGrp.
gGrpId = grpSrc;
4036 grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
4037 assert(grpIt != groupPartitionLists[netId].end());
4038 grpIt->numPostSynapses += connIt->numberOfConnections;
4040 targetGrp.
gGrpId = grpDest;
4041 grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
4042 assert(grpIt != groupPartitionLists[netId].end());
4043 grpIt->numPreSynapses += connIt->numberOfConnections;
4047 targetGrp.
gGrpId = grpSrc;
4048 grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
4049 assert(grpIt != groupPartitionLists[externalNetId].end());
4050 grpIt->numPostSynapses += connIt->numberOfConnections;
4052 targetGrp.
gGrpId = grpDest;
4053 grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
4054 assert(grpIt != groupPartitionLists[externalNetId].end());
4055 grpIt->numPreSynapses += connIt->numberOfConnections;
4231 void SNN::deleteRuntimeData() {
4235 CUDA_CHECK_ERRORS(cudaThreadSynchronize());
4238 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
4239 pthread_t threads[numCores + 1];
4242 int threadCount = 0;
4246 if (!groupPartitionLists[netId].empty()) {
4248 deleteRuntimeData_GPU(netId);
4250 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
4251 deleteRuntimeData_CPU(netId);
4252 #else // Linux or MAC
4253 pthread_attr_t attr;
4254 pthread_attr_init(&attr);
4257 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
4259 argsThreadRoutine[threadCount].
snn_pointer =
this;
4260 argsThreadRoutine[threadCount].
netId = netId;
4261 argsThreadRoutine[threadCount].
lGrpId = 0;
4262 argsThreadRoutine[threadCount].
startIdx = 0;
4263 argsThreadRoutine[threadCount].
endIdx = 0;
4264 argsThreadRoutine[threadCount].
GtoLOffset = 0;
4266 pthread_create(&threads[threadCount], &attr, &SNN::helperDeleteRuntimeData_CPU, (
void*)&argsThreadRoutine[threadCount]);
4267 pthread_attr_destroy(&attr);
4274 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
4276 for (
int i=0; i<threadCount; i++){
4277 pthread_join(threads[i], NULL);
4282 CUDA_DELETE_TIMER(timer);
4287 void SNN::deleteObjects() {
4288 if (simulatorDeleted)
4294 resetMonitors(
true);
4295 resetConnectionConfigs(
true);
4298 deleteManagerRuntimeData();
4300 deleteRuntimeData();
4303 if (loggerMode_ !=
CUSTOM) {
4305 if (fpInf_ != NULL && fpInf_ != stdout && fpInf_ != stderr)
4307 if (fpErr_ != NULL && fpErr_ != stdout && fpErr_ != stderr)
4309 if (fpDeb_ != NULL && fpDeb_ != stdout && fpDeb_ != stderr)
4311 if (fpLog_ != NULL && fpLog_ != stdout && fpLog_ != stderr)
4315 simulatorDeleted =
true;
4318 void SNN::findMaxNumSynapsesGroups(
int* _maxNumPostSynGrp,
int* _maxNumPreSynGrp) {
4319 *_maxNumPostSynGrp = 0;
4320 *_maxNumPreSynGrp = 0;
4323 for (
int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4326 if (groupConfigMDMap[gGrpId].numPostSynapses > *_maxNumPostSynGrp)
4327 *_maxNumPostSynGrp = groupConfigMDMap[gGrpId].numPostSynapses;
4328 if (groupConfigMDMap[gGrpId].numPreSynapses > *_maxNumPreSynGrp)
4329 *_maxNumPreSynGrp = groupConfigMDMap[gGrpId].numPreSynapses;
4333 void SNN::findMaxNumSynapsesNeurons(
int _netId,
int& _maxNumPostSynN,
int& _maxNumPreSynN) {
4334 int *tempNpre, *tempNpost;
4335 int nSrc, nDest, numNeurons;
4336 std::map<int, int> globalToLocalOffset;
4339 tempNpre =
new int[numNeurons];
4340 tempNpost =
new int[numNeurons];
4341 memset(tempNpre, 0,
sizeof(
int) * numNeurons);
4342 memset(tempNpost, 0,
sizeof(
int) * numNeurons);
4345 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[_netId].begin(); grpIt != groupPartitionLists[_netId].end(); grpIt++) {
4346 globalToLocalOffset[grpIt->gGrpId] = grpIt->GtoLOffset;
4350 for (std::list<ConnectionInfo>::iterator connIt = connectionLists[_netId].begin(); connIt != connectionLists[_netId].end(); connIt++) {
4351 nSrc = connIt->nSrc + globalToLocalOffset[connIt->grpSrc];
4352 nDest = connIt->nDest + globalToLocalOffset[connIt->grpDest];
4353 assert(nSrc < numNeurons); assert(nDest < numNeurons);
4359 _maxNumPostSynN = 0;
4361 for (
int nId = 0; nId < networkConfigs[_netId].
numN; nId++) {
4362 if (tempNpost[nId] > _maxNumPostSynN) _maxNumPostSynN = tempNpost[nId];
4363 if (tempNpre[nId] > _maxNumPreSynN) _maxNumPreSynN = tempNpre[nId];
4367 delete [] tempNpost;
4370 void SNN::findMaxSpikesD1D2(
int _netId,
unsigned int& _maxSpikesD1,
unsigned int& _maxSpikesD2) {
4371 _maxSpikesD1 = 0; _maxSpikesD2 = 0;
4372 for(std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[_netId].begin(); grpIt != groupPartitionLists[_netId].end(); grpIt++) {
4373 if (grpIt->maxOutgoingDelay == 1)
4380 void SNN::findNumN(
int _netId,
int& _numN,
int& _numNExternal,
int& _numNAssigned,
4381 int& _numNReg,
int& _numNExcReg,
int& _numNInhReg,
4382 int& _numNPois,
int& _numNExcPois,
int& _numNInhPois) {
4383 _numN = 0; _numNExternal = 0; _numNAssigned = 0;
4384 _numNReg = 0; _numNExcReg = 0; _numNInhReg = 0;
4385 _numNPois = 0; _numNExcPois = 0; _numNInhPois = 0;
4386 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[_netId].begin(); grpIt != groupPartitionLists[_netId].end(); grpIt++) {
4387 int sizeN = groupConfigMap[grpIt->gGrpId].numN;
4388 unsigned int type = groupConfigMap[grpIt->gGrpId].type;
4390 _numN += sizeN; _numNPois += sizeN; _numNExcPois += sizeN;
4392 _numN += sizeN; _numNPois += sizeN; _numNInhPois += sizeN;
4394 _numN += sizeN; _numNReg += sizeN; _numNExcReg += sizeN;
4396 _numN += sizeN; _numNReg += sizeN; _numNInhReg += sizeN;
4397 }
else if (grpIt->netId != _netId) {
4398 _numNExternal += sizeN;
4400 KERNEL_ERROR(
"Can't find catagory for the group [%d] ", grpIt->gGrpId);
4403 _numNAssigned += sizeN;
4406 assert(_numNReg == _numNExcReg + _numNInhReg);
4407 assert(_numNPois == _numNExcPois + _numNInhPois);
4408 assert(_numN == _numNReg + _numNPois);
4409 assert(_numNAssigned == _numN + _numNExternal);
4412 void SNN::findNumNSpikeGenAndOffset(
int _netId) {
4415 for(
int lGrpId = 0; lGrpId < networkConfigs[_netId].
numGroups; lGrpId++) {
4416 if (_netId == groupConfigs[_netId][lGrpId].netId && groupConfigs[_netId][lGrpId].isSpikeGenerator && groupConfigs[_netId][lGrpId].isSpikeGenFunc) {
4422 assert(networkConfigs[_netId].numNSpikeGen <= networkConfigs[_netId].numNPois);
4425 void SNN::findNumSynapsesNetwork(
int _netId,
int& _numPostSynNet,
int& _numPreSynNet) {
4429 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[_netId].begin(); grpIt != groupPartitionLists[_netId].end(); grpIt++) {
4430 _numPostSynNet += grpIt->numPostSynapses;
4431 _numPreSynNet += grpIt->numPreSynapses;
4432 assert(_numPostSynNet < INT_MAX);
4433 assert(_numPreSynNet < INT_MAX);
4436 assert(_numPreSynNet == _numPostSynNet);
4439 void SNN::fetchGroupState(
int netId,
int lGrpId) {
4441 copyGroupState(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost,
false);
4443 copyGroupState(netId, lGrpId, &managerRuntimeData, &runtimeData[netId],
false);
4446 void SNN::fetchWeightState(
int netId,
int lGrpId) {
4448 copyWeightState(netId, lGrpId, cudaMemcpyDeviceToHost);
4450 copyWeightState(netId, lGrpId);
4458 void SNN::fetchNeuronSpikeCount (
int gGrpId) {
4459 if (gGrpId ==
ALL) {
4460 for (
int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4461 fetchNeuronSpikeCount(gGrpId);
4464 int netId = groupConfigMDMap[gGrpId].netId;
4465 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
4466 int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
4469 copyNeuronSpikeCount(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost,
false, LtoGOffset);
4471 copyNeuronSpikeCount(netId, lGrpId, &managerRuntimeData, &runtimeData[netId],
false, LtoGOffset);
4475 void SNN::fetchSTPState(
int gGrpId) {
4483 void SNN::fetchConductanceAMPA(
int gGrpId) {
4484 if (gGrpId ==
ALL) {
4485 for (
int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4486 fetchConductanceAMPA(gGrpId);
4489 int netId = groupConfigMDMap[gGrpId].netId;
4490 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
4491 int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
4494 copyConductanceAMPA(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost,
false, LtoGOffset);
4496 copyConductanceAMPA(netId, lGrpId, &managerRuntimeData, &runtimeData[netId],
false, LtoGOffset);
4505 void SNN::fetchConductanceNMDA(
int gGrpId) {
4506 if (gGrpId ==
ALL) {
4507 for (
int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4508 fetchConductanceNMDA(gGrpId);
4511 int netId = groupConfigMDMap[gGrpId].netId;
4512 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
4513 int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
4516 copyConductanceNMDA(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost,
false, LtoGOffset);
4518 copyConductanceNMDA(netId, lGrpId, &managerRuntimeData, &runtimeData[netId],
false, LtoGOffset);
4527 void SNN::fetchConductanceGABAa(
int gGrpId) {
4528 if (gGrpId ==
ALL) {
4529 for (
int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4530 fetchConductanceGABAa(gGrpId);
4533 int netId = groupConfigMDMap[gGrpId].netId;
4534 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
4535 int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
4538 copyConductanceGABAa(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost,
false, LtoGOffset);
4540 copyConductanceGABAa(netId, lGrpId, &managerRuntimeData, &runtimeData[netId],
false, LtoGOffset);
4549 void SNN::fetchConductanceGABAb(
int gGrpId) {
4550 if (gGrpId ==
ALL) {
4551 for (
int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4552 fetchConductanceGABAb(gGrpId);
4555 int netId = groupConfigMDMap[gGrpId].netId;
4556 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
4557 int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
4560 copyConductanceGABAb(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost,
false, LtoGOffset);
4562 copyConductanceGABAb(netId, lGrpId, &managerRuntimeData, &runtimeData[netId],
false, LtoGOffset);
4567 void SNN::fetchGrpIdsLookupArray(
int netId) {
4569 copyGrpIdsLookupArray(netId, cudaMemcpyDeviceToHost);
4571 copyGrpIdsLookupArray(netId);
4574 void SNN::fetchConnIdsLookupArray(
int netId) {
4576 copyConnIdsLookupArray(netId, cudaMemcpyDeviceToHost);
4578 copyConnIdsLookupArray(netId);
4581 void SNN::fetchLastSpikeTime(
int netId) {
4583 copyLastSpikeTime(netId, cudaMemcpyDeviceToHost);
4585 copyLastSpikeTime(netId);
4588 void SNN::fetchPreConnectionInfo(
int netId) {
4590 copyPreConnectionInfo(netId,
ALL, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost,
false);
4592 copyPreConnectionInfo(netId,
ALL, &managerRuntimeData, &runtimeData[netId],
false);
4595 void SNN::fetchPostConnectionInfo(
int netId) {
4597 copyPostConnectionInfo(netId,
ALL, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost,
false);
4599 copyPostConnectionInfo(netId,
ALL, &managerRuntimeData, &runtimeData[netId],
false);
4602 void SNN::fetchSynapseState(
int netId) {
4604 copySynapseState(netId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost,
false);
4606 copySynapseState(netId, &managerRuntimeData, &runtimeData[netId],
false);
4613 void SNN::fetchNetworkSpikeCount() {
4614 unsigned int spikeCountD1, spikeCountD2, spikeCountExtD1, spikeCountExtD2;
4621 if (!groupPartitionLists[netId].empty()) {
4624 copyNetworkSpikeCount(netId, cudaMemcpyDeviceToHost,
4625 &spikeCountD1, &spikeCountD2,
4626 &spikeCountExtD1, &spikeCountExtD2);
4629 copyNetworkSpikeCount(netId,
4630 &spikeCountD1, &spikeCountD2,
4631 &spikeCountExtD1, &spikeCountExtD2);
4635 managerRuntimeData.
spikeCountD2 += spikeCountD2 - spikeCountExtD2;
4636 managerRuntimeData.
spikeCountD1 += spikeCountD1 - spikeCountExtD1;
4645 void SNN::fetchSpikeTables(
int netId) {
4647 copySpikeTables(netId, cudaMemcpyDeviceToHost);
4649 copySpikeTables(netId);
4652 void SNN::fetchNeuronStateBuffer(
int netId,
int lGrpId) {
4654 copyNeuronStateBuffer(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost,
false);
4656 copyNeuronStateBuffer(netId, lGrpId, &managerRuntimeData, &runtimeData[netId],
false);
4659 void SNN::fetchExtFiringTable(
int netId) {
4663 copyExtFiringTable(netId, cudaMemcpyDeviceToHost);
4665 copyExtFiringTable(netId);
4669 void SNN::fetchTimeTable(
int netId) {
4673 copyTimeTable(netId, cudaMemcpyDeviceToHost);
4675 copyTimeTable(netId,
true);
4679 void SNN::writeBackTimeTable(
int netId) {
4683 copyTimeTable(netId, cudaMemcpyHostToDevice);
4685 copyTimeTable(netId,
false);
4689 void SNN::transferSpikes(
void* dest,
int destNetId,
void* src,
int srcNetId,
int size) {
4692 checkAndSetGPUDevice(destNetId);
4693 CUDA_CHECK_ERRORS(cudaMemcpyPeer(dest, destNetId, src, srcNetId, size));
4695 checkAndSetGPUDevice(destNetId);
4696 CUDA_CHECK_ERRORS(cudaMemcpy(dest, src, size, cudaMemcpyHostToDevice));
4698 checkAndSetGPUDevice(srcNetId);
4699 CUDA_CHECK_ERRORS(cudaMemcpy(dest, src, size, cudaMemcpyDeviceToHost));
4701 memcpy(dest, src, size);
4705 memcpy(dest, src, size);
4709 void SNN::convertExtSpikesD2(
int netId,
int startIdx,
int endIdx,
int GtoLOffset) {
4711 convertExtSpikesD2_GPU(netId, startIdx, endIdx, GtoLOffset);
4713 convertExtSpikesD2_CPU(netId, startIdx, endIdx, GtoLOffset);
4716 void SNN::convertExtSpikesD1(
int netId,
int startIdx,
int endIdx,
int GtoLOffset) {
4718 convertExtSpikesD1_GPU(netId, startIdx, endIdx, GtoLOffset);
4720 convertExtSpikesD1_CPU(netId, startIdx, endIdx, GtoLOffset);
4723 void SNN::routeSpikes() {
4724 int firingTableIdxD2, firingTableIdxD1;
4727 for (std::list<RoutingTableEntry>::iterator rteItr = spikeRoutingTable.begin(); rteItr != spikeRoutingTable.end(); rteItr++) {
4728 int srcNetId = rteItr->srcNetId;
4729 int destNetId = rteItr->destNetId;
4731 fetchExtFiringTable(srcNetId);
4733 fetchTimeTable(destNetId);
4734 firingTableIdxD2 = managerRuntimeData.
timeTableD2[simTimeMs + glbNetworkConfig.
maxDelay + 1];
4735 firingTableIdxD1 = managerRuntimeData.
timeTableD1[simTimeMs + glbNetworkConfig.
maxDelay + 1];
4739 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
4740 pthread_t threads[(2 * networkConfigs[srcNetId].
numGroups) + 1];
4743 int threadCount = 0;
4746 for (
int lGrpId = 0; lGrpId < networkConfigs[srcNetId].
numGroups; lGrpId++) {
4747 if (groupConfigs[srcNetId][lGrpId].hasExternalConnect && managerRuntimeData.
extFiringTableEndIdxD2[lGrpId] > 0) {
4749 bool isFound =
false;
4750 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[destNetId].begin(); grpIt != groupPartitionLists[destNetId].end(); grpIt++) {
4751 if (grpIt->gGrpId == groupConfigs[srcNetId][lGrpId].gGrpId) {
4759 transferSpikes(runtimeData[destNetId].firingTableD2 + firingTableIdxD2, destNetId,
4764 convertExtSpikesD2_GPU(destNetId, firingTableIdxD2,
4769 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
4770 convertExtSpikesD2_CPU(destNetId, firingTableIdxD2,
4773 #else // Linux or MAC
4774 pthread_attr_t attr;
4775 pthread_attr_init(&attr);
4778 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
4780 argsThreadRoutine[threadCount].
snn_pointer =
this;
4781 argsThreadRoutine[threadCount].
netId = destNetId;
4782 argsThreadRoutine[threadCount].
lGrpId = 0;
4783 argsThreadRoutine[threadCount].
startIdx = firingTableIdxD2;
4785 argsThreadRoutine[threadCount].
GtoLOffset = GtoLOffset;
4787 pthread_create(&threads[threadCount], &attr, &SNN::helperConvertExtSpikesD2_CPU, (
void*)&argsThreadRoutine[threadCount]);
4788 pthread_attr_destroy(&attr);
4797 if (groupConfigs[srcNetId][lGrpId].hasExternalConnect && managerRuntimeData.
extFiringTableEndIdxD1[lGrpId] > 0) {
4799 bool isFound =
false;
4800 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[destNetId].begin(); grpIt != groupPartitionLists[destNetId].end(); grpIt++) {
4801 if (grpIt->gGrpId == groupConfigs[srcNetId][lGrpId].gGrpId) {
4802 GtoLOffset = grpIt->GtoLOffset;
4809 transferSpikes(runtimeData[destNetId].firingTableD1 + firingTableIdxD1, destNetId,
4813 convertExtSpikesD1_GPU(destNetId, firingTableIdxD1,
4818 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
4819 convertExtSpikesD1_CPU(destNetId, firingTableIdxD1,
4822 #else // Linux or MAC
4823 pthread_attr_t attr;
4824 pthread_attr_init(&attr);
4827 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
4829 argsThreadRoutine[threadCount].
snn_pointer =
this;
4830 argsThreadRoutine[threadCount].
netId = destNetId;
4831 argsThreadRoutine[threadCount].
lGrpId = 0;
4832 argsThreadRoutine[threadCount].
startIdx = firingTableIdxD1;
4834 argsThreadRoutine[threadCount].
GtoLOffset = GtoLOffset;
4836 pthread_create(&threads[threadCount], &attr, &SNN::helperConvertExtSpikesD1_CPU, (
void*)&argsThreadRoutine[threadCount]);
4837 pthread_attr_destroy(&attr);
4847 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
4849 for (
int i=0; i<threadCount; i++){
4850 pthread_join(threads[i], NULL);
4854 managerRuntimeData.
timeTableD2[simTimeMs + glbNetworkConfig.
maxDelay + 1] = firingTableIdxD2;
4855 managerRuntimeData.
timeTableD1[simTimeMs + glbNetworkConfig.
maxDelay + 1] = firingTableIdxD1;
4856 writeBackTimeTable(destNetId);
4863 float SNN::generateWeight(
int connProp,
float initWt,
float maxWt,
int nid,
int grpId) {
4884 assert(connId !=
ALL);
4885 assert(connId < numConnections);
4895 return (groupConfigMap[grpId].homeoConfig.WithHomeostasis);
4899 void SNN::verifyNetwork() {
4905 verifyCompartments();
4911 verifyHomeostasis();
4943 if (sim_with_stp && glbNetworkConfig.
maxDelay > 1) {
4944 KERNEL_ERROR(
"STP with delays > 1 ms is currently not supported.");
4949 KERNEL_ERROR(
"You are using a synaptic delay (%d) greater than MAX_SYN_DELAY defined in config.h", glbNetworkConfig.
maxDelay);
4954 void SNN::verifyCompartments() {
4955 for (std::map<int, compConnectConfig>::iterator it = compConnectConfigMap.begin(); it != compConnectConfigMap.end(); it++)
4957 int grpLower = it->second.grpSrc;
4958 int grpUpper = it->second.grpDest;
4961 if (!groupConfigMap[grpLower].withCompartments) {
4962 KERNEL_ERROR(
"Group %s(%d) is not compartmentally enabled, cannot be part of a compartmental connection.",
4963 groupConfigMap[grpLower].grpName.c_str(), grpLower);
4966 if (!groupConfigMap[grpUpper].withCompartments) {
4967 KERNEL_ERROR(
"Group %s(%d) is not compartmentally enabled, cannot be part of a compartmental connection.",
4968 groupConfigMap[grpUpper].grpName.c_str(), grpUpper);
4975 void SNN::verifySTDP() {
4977 if (groupConfigMap[gGrpId].stdpConfig.WithSTDP) {
4979 bool isAnyPlastic =
false;
4980 for (std::map<int, ConnectConfig>::iterator it = connectConfigMap.begin(); it != connectConfigMap.end(); it++) {
4981 if (it->second.grpDest == gGrpId) {
4990 if (!isAnyPlastic) {
4991 KERNEL_ERROR(
"If STDP on group %d (%s) is set, group must have some incoming plastic connections.",
4992 gGrpId, groupConfigMap[gGrpId].grpName.c_str());
5000 void SNN::verifyHomeostasis() {
5002 if (groupConfigMap[gGrpId].homeoConfig.WithHomeostasis) {
5003 if (!groupConfigMap[gGrpId].stdpConfig.WithSTDP) {
5004 KERNEL_ERROR(
"If homeostasis is enabled on group %d (%s), then STDP must be enabled, too.",
5005 gGrpId, groupConfigMap[gGrpId].grpName.c_str());
5067 return (rfDist >= 0.0 && rfDist <= 1.0);
5077 double rfDist = -1.0;
5083 if (radius.
radX==0 && pre.
x!=post.
x || radius.
radY==0 && pre.
y!=post.
y || radius.
radZ==0 && pre.
z!=post.
z) {
5087 double xTerm = (radius.
radX<=0) ? 0.0 : pow(pre.
x-post.
x,2)/pow(radius.
radX,2);
5088 double yTerm = (radius.
radY<=0) ? 0.0 : pow(pre.
y-post.
y,2)/pow(radius.
radY,2);
5089 double zTerm = (radius.
radZ<=0) ? 0.0 : pow(pre.
z-post.
z,2)/pow(radius.
radZ,2);
5090 rfDist = xTerm + yTerm + zTerm;
5096 void SNN::partitionSNN() {
5100 numAvailableGPUs = configGPUDevice();
5102 for (std::map<int, GroupConfigMD>::iterator grpIt = groupConfigMDMap.begin(); grpIt != groupConfigMDMap.end(); grpIt++) {
5104 int gGrpId = grpIt->second.gGrpId;
5105 int netId = groupConfigMap[gGrpId].preferredNetId;
5108 grpIt->second.netId = netId;
5109 numAssignedNeurons[netId] += groupConfigMap[gGrpId].numN;
5110 groupPartitionLists[netId].push_back(grpIt->second);
5115 if (preferredSimMode_ ==
CPU_MODE) {
5119 }
else if (preferredSimMode_ ==
GPU_MODE) {
5134 if (grpIt->second.netId == -1) {
5135 KERNEL_ERROR(
"Can't assign the group [%d] to any partition", grpIt->second.gGrpId);
5142 if (!groupPartitionLists[netId].empty()) {
5143 for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
5144 if (groupConfigMDMap[connIt->second.grpSrc].netId == netId && groupConfigMDMap[connIt->second.grpDest].netId == netId) {
5145 localConnectLists[netId].push_back(connectConfigMap[connIt->second.connId]);
5150 for (std::map<int, compConnectConfig>::iterator connIt = compConnectConfigMap.begin(); connIt != compConnectConfigMap.end(); connIt++) {
5151 if (groupConfigMDMap[connIt->second.grpSrc].netId == netId && groupConfigMDMap[connIt->second.grpDest].netId == netId) {
5152 localCompConnectLists[netId].push_back(compConnectConfigMap[connIt->second.connId]);
5159 spikeRoutingTable.clear();
5161 if (!groupPartitionLists[netId].empty()) {
5162 for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
5163 int srcNetId = groupConfigMDMap[connIt->second.grpSrc].netId;
5164 int destNetId = groupConfigMDMap[connIt->second.grpDest].netId;
5165 if (srcNetId == netId && destNetId != netId) {
5168 std::list<GroupConfigMD>::iterator srcGrpIt, destGrpIt;
5170 targetGroup.
gGrpId = connIt->second.grpSrc;
5171 srcGrpIt = find(groupPartitionLists[srcNetId].begin(), groupPartitionLists[srcNetId].end(), targetGroup);
5172 assert(srcGrpIt != groupPartitionLists[srcNetId].end());
5173 srcGrpIt->hasExternalConnect =
true;
5176 targetGroup.
gGrpId = connIt->second.grpDest;
5177 destGrpIt = find(groupPartitionLists[srcNetId].begin(), groupPartitionLists[srcNetId].end(), targetGroup);
5178 if (destGrpIt == groupPartitionLists[srcNetId].end()) {
5179 numAssignedNeurons[srcNetId] += groupConfigMap[connIt->second.grpDest].numN;
5180 groupPartitionLists[srcNetId].push_back(groupConfigMDMap[connIt->second.grpDest]);
5183 targetGroup.
gGrpId = connIt->second.grpSrc;
5184 srcGrpIt = find(groupPartitionLists[destNetId].begin(), groupPartitionLists[destNetId].end(), targetGroup);
5185 if (srcGrpIt == groupPartitionLists[destNetId].end()) {
5186 numAssignedNeurons[destNetId] += groupConfigMap[connIt->second.grpSrc].numN;
5187 groupPartitionLists[destNetId].push_back(groupConfigMDMap[connIt->second.grpSrc]);
5190 externalConnectLists[srcNetId].push_back(connectConfigMap[connIt->second.connId]);
5195 spikeRoutingTable.push_back(rte);
5201 spikeRoutingTable.unique();
5208 if (!groupPartitionLists[netId].empty()) {
5209 int availableNeuronId = 0;
5210 int localGroupId = 0;
5211 for (
int order = 0; order < 5; order++) {
5212 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
5213 unsigned int type = groupConfigMap[grpIt->gGrpId].type;
5215 availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
5218 availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
5221 availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
5224 availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
5226 }
else if (order == 4 && grpIt->netId != netId) {
5227 availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
5232 assert(availableNeuronId == numAssignedNeurons[netId]);
5233 assert(localGroupId == groupPartitionLists[netId].size());
5241 if (loadSimFID == NULL) {
5245 loadSimulation_internal(
false);
5248 collectGlobalNetworkConfigP();
5252 if (!groupPartitionLists[netId].empty()) {
5255 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++)
5256 printGroupInfo(netId, grpIt);
5259 if (!localConnectLists[netId].empty() || !externalConnectLists[netId].empty()) {
5261 for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++)
5262 printConnectionInfo(netId, connIt);
5264 for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++)
5265 printConnectionInfo(netId, connIt);
5270 printSikeRoutingInfo();
5275 int SNN::loadSimulation_internal(
bool onlyPlastic) {
5278 long file_position = ftell(loadSimFID);
5283 bool readErr =
false;
5289 fseek(loadSimFID, 0, SEEK_SET);
5292 result = fread(&tmpInt,
sizeof(
int), 1, loadSimFID);
5293 readErr |= (result!=1);
5294 if (tmpInt != 294338571) {
5295 KERNEL_ERROR(
"loadSimulation: Unknown file signature. This does not seem to be a "
5296 "simulation file created with CARLsim::saveSimulation.");
5301 result = fread(&tmpFloat,
sizeof(
float), 1, loadSimFID);
5302 readErr |= (result!=1);
5303 if (tmpFloat > 0.3f) {
5304 KERNEL_ERROR(
"loadSimulation: Unsupported version number (%f)",tmpFloat);
5309 result = fread(&tmpFloat,
sizeof(
float), 1, loadSimFID);
5310 readErr |= (result!=1);
5313 result = fread(&tmpFloat,
sizeof(
float), 1, loadSimFID);
5314 readErr |= (result!=1);
5317 result = fread(&tmpInt,
sizeof(
int), 1, loadSimFID);
5318 readErr |= (result!=1);
5319 if (tmpInt != glbNetworkConfig.
numN) {
5320 KERNEL_ERROR(
"loadSimulation: Number of neurons in file (%d) and simulation (%d) don't match.",
5321 tmpInt, glbNetworkConfig.
numN);
5345 result = fread(&tmpInt,
sizeof(
int), 1, loadSimFID);
5346 readErr |= (result!=1);
5347 if (tmpInt != numGroups) {
5348 KERNEL_ERROR(
"loadSimulation: Number of groups in file (%d) and simulation (%d) don't match.",
5355 fprintf(stderr,
"loadSimulation: Error while reading file header");
5361 for (
int g=0; g<numGroups; g++) {
5363 result = fread(&tmpInt,
sizeof(
int), 1, loadSimFID);
5364 readErr |= (result!=1);
5365 if (tmpInt != groupConfigMDMap[g].gStartN) {
5366 KERNEL_ERROR(
"loadSimulation: StartN in file (%d) and grpInfo (%d) for group %d don't match.",
5367 tmpInt, groupConfigMDMap[g].gStartN, g);
5372 result = fread(&tmpInt,
sizeof(
int), 1, loadSimFID);
5373 readErr |= (result!=1);
5374 if (tmpInt != groupConfigMDMap[g].gEndN) {
5375 KERNEL_ERROR(
"loadSimulation: EndN in file (%d) and grpInfo (%d) for group %d don't match.",
5376 tmpInt, groupConfigMDMap[g].gEndN, g);
5381 result = fread(&tmpInt,
sizeof(
int), 1, loadSimFID);
5382 readErr |= (result!=1);
5383 if (tmpInt != groupConfigMap[g].grid.numX) {
5384 KERNEL_ERROR(
"loadSimulation: numX in file (%d) and grpInfo (%d) for group %d don't match.",
5385 tmpInt, groupConfigMap[g].grid.numX, g);
5391 result = fread(&tmpInt,
sizeof(
int), 1, loadSimFID);
5392 readErr |= (result!=1);
5393 if (tmpInt != groupConfigMap[g].grid.numY) {
5394 KERNEL_ERROR(
"loadSimulation: numY in file (%d) and grpInfo (%d) for group %d don't match.",
5395 tmpInt, groupConfigMap[g].grid.numY, g);
5401 result = fread(&tmpInt,
sizeof(
int), 1, loadSimFID);
5402 readErr |= (result!=1);
5403 if (tmpInt != groupConfigMap[g].grid.numZ) {
5404 KERNEL_ERROR(
"loadSimulation: numZ in file (%d) and grpInfo (%d) for group %d don't match.",
5405 tmpInt, groupConfigMap[g].grid.numZ, g);
5412 result = fread(name,
sizeof(
char), 100, loadSimFID);
5413 readErr |= (result!=100);
5414 if (strcmp(name,groupConfigMap[g].grpName.c_str()) != 0) {
5415 KERNEL_ERROR(
"loadSimulation: Group names in file (%s) and grpInfo (%s) don't match.", name,
5416 groupConfigMap[g].grpName.c_str());
5422 KERNEL_ERROR(
"loadSimulation: Error while reading group info");
5451 result = fread(&net_count,
sizeof(
int), 1, loadSimFID);
5452 readErr |= (result!=1);
5454 for (
int i = 0; i < net_count; i++) {
5455 int synapse_count = 0;
5456 result = fread(&synapse_count,
sizeof(
int), 1, loadSimFID);
5457 for (
int j = 0; j < synapse_count; j++) {
5468 result = fread(&gGrpIdPre,
sizeof(
int), 1, loadSimFID);
5469 readErr != (result!=1);
5472 result = fread(&gGrpIdPost,
sizeof(
int), 1, loadSimFID);
5473 readErr != (result!=1);
5476 result = fread(&grpNIdPre,
sizeof(
int), 1, loadSimFID);
5477 readErr != (result!=1);
5480 result = fread(&grpNIdPost,
sizeof(
int), 1, loadSimFID);
5481 readErr != (result!=1);
5484 result = fread(&connId,
sizeof(
int), 1, loadSimFID);
5485 readErr != (result!=1);
5488 result = fread(&weight,
sizeof(
float), 1, loadSimFID);
5489 readErr != (result!=1);
5492 result = fread(&maxWeight,
sizeof(
float), 1, loadSimFID);
5493 readErr != (result!=1);
5496 result = fread(&delay,
sizeof(
int), 1, loadSimFID);
5497 readErr != (result!=1);
5500 if (connectConfigMap[connId].grpSrc != gGrpIdPre) {
5501 KERNEL_ERROR(
"loadSimulation: source group in file (%d) and in simulation (%d) for connection %d don't match.",
5502 gGrpIdPre , connectConfigMap[connId].grpSrc, connId);
5506 if (connectConfigMap[connId].grpDest != gGrpIdPost) {
5507 KERNEL_ERROR(
"loadSimulation: dest group in file (%d) and in simulation (%d) for connection %d don't match.",
5508 gGrpIdPost , connectConfigMap[connId].grpDest, connId);
5514 int netIdPre = groupConfigMDMap[gGrpIdPre].netId;
5515 int netIdPost = groupConfigMDMap[gGrpIdPost].netId;
5516 bool isExternal = (netIdPre != netIdPost);
5519 int globalNIdPre = groupConfigMDMap[gGrpIdPre].gStartN + grpNIdPre;
5520 int globalNIdPost = groupConfigMDMap[gGrpIdPost].gStartN + grpNIdPost;
5522 bool connected =
false;
5524 for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netIdPre].begin(); connIt != localConnectLists[netIdPre].end() && (!connected); connIt++) {
5525 if (connIt->connId == connId) {
5527 connectNeurons(netIdPre, gGrpIdPre, gGrpIdPost, globalNIdPre, globalNIdPost, connId, weight, maxWeight, delay, -1);
5530 connIt->numberOfConnections++;
5531 std::list<GroupConfigMD>::iterator grpIt;
5537 targetGrp.
gGrpId = gGrpIdPre;
5538 grpIt = std::find(groupPartitionLists[netIdPre].begin(), groupPartitionLists[netIdPre].end(), targetGrp);
5539 assert(grpIt != groupPartitionLists[netIdPre].end());
5540 grpIt->numPostSynapses += 1;
5542 targetGrp.
gGrpId = gGrpIdPost;
5543 grpIt = std::find(groupPartitionLists[netIdPre].begin(), groupPartitionLists[netIdPre].end(), targetGrp);
5544 assert(grpIt != groupPartitionLists[netIdPost].end());
5545 grpIt->numPreSynapses += 1;
5549 for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netIdPre].begin(); connIt != externalConnectLists[netIdPre].end() && (!connected); connIt++) {
5550 if (connIt->connId == connId) {
5552 connectNeurons(netIdPre, gGrpIdPre, gGrpIdPost, globalNIdPre, globalNIdPost, connId, weight, maxWeight, delay, netIdPost);
5555 connIt->numberOfConnections++;
5560 std::list<GroupConfigMD>::iterator grpIt;
5562 targetGrp.
gGrpId = gGrpIdPre;
5563 grpIt = std::find(groupPartitionLists[netIdPre].begin(), groupPartitionLists[netIdPre].end(), targetGrp);
5564 assert(grpIt != groupPartitionLists[netIdPre].end());
5565 grpIt->numPostSynapses += 1;
5567 targetGrp.
gGrpId = gGrpIdPost;
5568 grpIt = std::find(groupPartitionLists[netIdPre].begin(), groupPartitionLists[netIdPre].end(), targetGrp);
5569 assert(grpIt != groupPartitionLists[netIdPost].end());
5570 grpIt->numPreSynapses += 1;
5573 targetGrp.
gGrpId = gGrpIdPre;
5574 grpIt = std::find(groupPartitionLists[netIdPost].begin(), groupPartitionLists[netIdPost].end(), targetGrp);
5575 assert(grpIt != groupPartitionLists[netIdPost].end());
5576 grpIt->numPostSynapses += 1;
5578 targetGrp.
gGrpId = gGrpIdPost;
5579 grpIt = std::find(groupPartitionLists[netIdPost].begin(), groupPartitionLists[netIdPost].end(), targetGrp);
5580 assert(grpIt != groupPartitionLists[netIdPost].end());
5581 grpIt->numPreSynapses += 1;
5588 fseek(loadSimFID,file_position,SEEK_SET);
5593 void SNN::generateRuntimeSNN() {
5596 generateRuntimeGroupConfigs();
5599 generateRuntimeConnectConfigs();
5602 generateRuntimeNetworkConfigs();
5607 allocateManagerSpikeTables();
5613 allocateManagerRuntimeData();
5619 if (!groupPartitionLists[netId].empty()) {
5622 KERNEL_INFO(
"***************** Initializing GPU %d Runtime *************************", netId);
5629 for(
int lGrpId = 0; lGrpId < networkConfigs[netId].
numGroups; lGrpId++) {
5631 if (groupConfigs[netId][lGrpId].netId == netId && (groupConfigs[netId][lGrpId].Type &
POISSON_NEURON)) {
5635 generatePoissonGroupRuntime(netId, lGrpId);
5638 if (groupConfigs[netId][lGrpId].netId == netId && !(groupConfigs[netId][lGrpId].Type &
POISSON_NEURON)) {
5643 generateGroupRuntime(netId, lGrpId);
5648 for (
int lNId = 0; lNId < networkConfigs[netId].
numNAssigned; lNId++) {
5649 managerRuntimeData.
grpIds[lNId] = -1;
5650 for(
int lGrpId = 0; lGrpId < networkConfigs[netId].
numGroupsAssigned; lGrpId++) {
5651 if (lNId >= groupConfigs[netId][lGrpId].lStartN && lNId <= groupConfigs[netId][lGrpId].lEndN) {
5652 managerRuntimeData.
grpIds[lNId] = (
short int)lGrpId;
5656 assert(managerRuntimeData.
grpIds[lNId] != -1);
5662 generateConnectionRuntime(netId);
5664 generateCompConnectionRuntime(netId);
5667 resetCurrent(netId);
5669 resetConductances(netId);
5673 resetSynapse(netId,
false);
5680 numGPUs = 0; numCores = 0;
5692 void SNN::resetConductances(
int netId) {
5693 if (networkConfigs[netId].sim_with_conductances) {
5694 memset(managerRuntimeData.
gAMPA, 0,
sizeof(
float) * networkConfigs[netId].numNReg);
5695 if (networkConfigs[netId].sim_with_NMDA_rise) {
5696 memset(managerRuntimeData.
gNMDA_r, 0,
sizeof(
float) * networkConfigs[netId].numNReg);
5697 memset(managerRuntimeData.
gNMDA_d, 0,
sizeof(
float) * networkConfigs[netId].numNReg);
5699 memset(managerRuntimeData.
gNMDA, 0,
sizeof(
float) * networkConfigs[netId].numNReg);
5701 memset(managerRuntimeData.
gGABAa, 0,
sizeof(
float) * networkConfigs[netId].numNReg);
5702 if (networkConfigs[netId].sim_with_GABAb_rise) {
5703 memset(managerRuntimeData.
gGABAb_r, 0,
sizeof(
float) * networkConfigs[netId].numNReg);
5704 memset(managerRuntimeData.
gGABAb_d, 0,
sizeof(
float) * networkConfigs[netId].numNReg);
5706 memset(managerRuntimeData.
gGABAb, 0,
sizeof(
float) * networkConfigs[netId].numNReg);
5711 void SNN::resetCurrent(
int netId) {
5712 assert(managerRuntimeData.
current != NULL);
5713 memset(managerRuntimeData.
current, 0,
sizeof(
float) * networkConfigs[netId].numNReg);
5717 void SNN::resetFiringInformation() {
5726 resetPropogationBuffer();
5731 void SNN::resetTiming() {
5732 prevExecutionTime = cumExecutionTime;
5733 executionTime = 0.0f;
5736 void SNN::resetNeuromodulator(
int netId,
int lGrpId) {
5737 managerRuntimeData.
grpDA[lGrpId] = groupConfigs[netId][lGrpId].
baseDP;
5738 managerRuntimeData.
grp5HT[lGrpId] = groupConfigs[netId][lGrpId].
base5HT;
5739 managerRuntimeData.
grpACh[lGrpId] = groupConfigs[netId][lGrpId].
baseACh;
5740 managerRuntimeData.
grpNE[lGrpId] = groupConfigs[netId][lGrpId].
baseNE;
5746 void SNN::resetNeuron(
int netId,
int lGrpId,
int lNId) {
5747 int gGrpId = groupConfigs[netId][lGrpId].
gGrpId;
5748 assert(lNId < networkConfigs[netId].numNReg);
5750 if (groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a == -1 && groupConfigMap[gGrpId].isLIF == 0) {
5751 KERNEL_ERROR(
"setNeuronParameters must be called for group %s (G:%d,L:%d)",groupConfigMap[gGrpId].grpName.c_str(), gGrpId, lGrpId);
5755 if (groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_m == -1 && groupConfigMap[gGrpId].isLIF == 1) {
5756 KERNEL_ERROR(
"setNeuronParametersLIF must be called for group %s (G:%d,L:%d)",groupConfigMap[gGrpId].grpName.c_str(), gGrpId, lGrpId);
5760 managerRuntimeData.
Izh_a[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a_sd * (float)drand48();
5761 managerRuntimeData.
Izh_b[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b_sd * (float)drand48();
5762 managerRuntimeData.
Izh_c[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c_sd * (float)drand48();
5763 managerRuntimeData.
Izh_d[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d_sd * (float)drand48();
5764 managerRuntimeData.
Izh_C[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_C + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_C_sd * (float)drand48();
5765 managerRuntimeData.
Izh_k[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_k + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_k_sd * (float)drand48();
5766 managerRuntimeData.
Izh_vr[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vr + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vr_sd * (float)drand48();
5767 managerRuntimeData.
Izh_vt[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vt + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vt_sd * (float)drand48();
5768 managerRuntimeData.
Izh_vpeak[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vpeak + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vpeak_sd * (float)drand48();
5769 managerRuntimeData.
lif_tau_m[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_m;
5770 managerRuntimeData.
lif_tau_ref[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_ref;
5772 managerRuntimeData.
lif_vTh[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.lif_vTh;
5773 managerRuntimeData.
lif_vReset[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.lif_vReset;
5776 if (groupConfigs[netId][lGrpId].isLIF){
5778 float rmRange = (float)(groupConfigMap[gGrpId].neuralDynamicsConfig.lif_maxRmem - groupConfigMap[gGrpId].neuralDynamicsConfig.lif_minRmem);
5779 float minRmem = (float)groupConfigMap[gGrpId].neuralDynamicsConfig.lif_minRmem;
5780 managerRuntimeData.
lif_bias[lNId] = 0.0f;
5781 managerRuntimeData.
lif_gain[lNId] = minRmem + rmRange * (
float)drand48();
5787 if (groupConfigs[netId][lGrpId].WithHomeostasis) {
5789 if (drand48() > 0.5) {
5790 managerRuntimeData.
baseFiring[lNId] = groupConfigMap[gGrpId].homeoConfig.baseFiring + groupConfigMap[gGrpId].homeoConfig.baseFiringSD * -log(drand48());
5792 managerRuntimeData.
baseFiring[lNId] = groupConfigMap[gGrpId].homeoConfig.baseFiring - groupConfigMap[gGrpId].homeoConfig.baseFiringSD * -log(drand48());
5796 if (groupConfigMap[gGrpId].homeoConfig.baseFiring != 0.0f) {
5800 managerRuntimeData.
avgFiring[lNId] = 0.0f;
5806 if(groupConfigs[netId][lGrpId].WithSTP) {
5807 for (
int j = 0; j < networkConfigs[netId].
maxDelay + 1; j++) {
5808 int index =
STP_BUF_POS(lNId, j, networkConfigs[netId].maxDelay);
5809 managerRuntimeData.
stpu[index] = 0.0f;
5810 managerRuntimeData.
stpx[index] = 1.0f;
5815 void SNN::resetMonitors(
bool deallocate) {
5824 for (
int i=0; i<numSpikeMonitor; i++) {
5825 if (spikeMonList[i]!=NULL && deallocate)
delete spikeMonList[i];
5826 spikeMonList[i]=NULL;
5831 for (
int i = 0; i<numNeuronMonitor; i++) {
5832 if (neuronMonList[i] != NULL && deallocate)
delete neuronMonList[i];
5833 neuronMonList[i] = NULL;
5838 for (
int i=0; i<numGroupMonitor; i++) {
5839 if (groupMonList[i]!=NULL && deallocate)
delete groupMonList[i];
5840 groupMonList[i]=NULL;
5845 for (
int i=0; i<numConnectionMonitor; i++) {
5846 if (connMonList[i]!=NULL && deallocate)
delete connMonList[i];
5847 connMonList[i]=NULL;
5851 void SNN::resetGroupConfigs(
bool deallocate) {
5853 if (deallocate) groupConfigMap.clear();
5856 void SNN::resetConnectionConfigs(
bool deallocate) {
5858 if (deallocate) connectConfigMap.clear();
5861 void SNN::deleteManagerRuntimeData() {
5862 if (spikeBuf!=NULL)
delete spikeBuf;
5867 if (managerRuntimeData.
grpDA != NULL)
delete [] managerRuntimeData.
grpDA;
5868 if (managerRuntimeData.
grp5HT != NULL)
delete [] managerRuntimeData.
grp5HT;
5869 if (managerRuntimeData.
grpACh != NULL)
delete [] managerRuntimeData.
grpACh;
5870 if (managerRuntimeData.
grpNE != NULL)
delete [] managerRuntimeData.
grpNE;
5871 managerRuntimeData.
grpDA = NULL;
5872 managerRuntimeData.
grp5HT = NULL;
5873 managerRuntimeData.
grpACh = NULL;
5874 managerRuntimeData.
grpNE = NULL;
5886 if (managerRuntimeData.
voltage!=NULL)
delete[] managerRuntimeData.
voltage;
5888 if (managerRuntimeData.
recovery!=NULL)
delete[] managerRuntimeData.
recovery;
5889 if (managerRuntimeData.
current!=NULL)
delete[] managerRuntimeData.
current;
5892 if (managerRuntimeData.
curSpike != NULL)
delete[] managerRuntimeData.
curSpike;
5893 if (managerRuntimeData.
nVBuffer != NULL)
delete[] managerRuntimeData.
nVBuffer;
5894 if (managerRuntimeData.
nUBuffer != NULL)
delete[] managerRuntimeData.
nUBuffer;
5895 if (managerRuntimeData.
nIBuffer != NULL)
delete[] managerRuntimeData.
nIBuffer;
5900 if (managerRuntimeData.
Izh_a!=NULL)
delete[] managerRuntimeData.
Izh_a;
5901 if (managerRuntimeData.
Izh_b!=NULL)
delete[] managerRuntimeData.
Izh_b;
5902 if (managerRuntimeData.
Izh_c!=NULL)
delete[] managerRuntimeData.
Izh_c;
5903 if (managerRuntimeData.
Izh_d!=NULL)
delete[] managerRuntimeData.
Izh_d;
5904 if (managerRuntimeData.
Izh_C!=NULL)
delete[] managerRuntimeData.
Izh_C;
5905 if (managerRuntimeData.
Izh_k!=NULL)
delete[] managerRuntimeData.
Izh_k;
5906 if (managerRuntimeData.
Izh_vr!=NULL)
delete[] managerRuntimeData.
Izh_vr;
5907 if (managerRuntimeData.
Izh_vt!=NULL)
delete[] managerRuntimeData.
Izh_vt;
5909 managerRuntimeData.
Izh_a=NULL; managerRuntimeData.
Izh_b=NULL; managerRuntimeData.
Izh_c=NULL; managerRuntimeData.
Izh_d=NULL;
5910 managerRuntimeData.
Izh_C = NULL; managerRuntimeData.
Izh_k = NULL; managerRuntimeData.
Izh_vr = NULL; managerRuntimeData.
Izh_vt = NULL; managerRuntimeData.
Izh_vpeak = NULL;
5915 if (managerRuntimeData.
lif_vTh!=NULL)
delete[] managerRuntimeData.
lif_vTh;
5917 if (managerRuntimeData.
lif_gain!=NULL)
delete[] managerRuntimeData.
lif_gain;
5918 if (managerRuntimeData.
lif_bias!=NULL)
delete[] managerRuntimeData.
lif_bias;
5923 if (managerRuntimeData.
Npre!=NULL)
delete[] managerRuntimeData.
Npre;
5925 if (managerRuntimeData.
Npost!=NULL)
delete[] managerRuntimeData.
Npost;
5932 if (managerRuntimeData.
gAMPA!=NULL)
delete[] managerRuntimeData.
gAMPA;
5933 if (managerRuntimeData.
gNMDA!=NULL)
delete[] managerRuntimeData.
gNMDA;
5934 if (managerRuntimeData.
gNMDA_r!=NULL)
delete[] managerRuntimeData.
gNMDA_r;
5935 if (managerRuntimeData.
gNMDA_d!=NULL)
delete[] managerRuntimeData.
gNMDA_d;
5936 if (managerRuntimeData.
gGABAa!=NULL)
delete[] managerRuntimeData.
gGABAa;
5937 if (managerRuntimeData.
gGABAb!=NULL)
delete[] managerRuntimeData.
gGABAb;
5938 if (managerRuntimeData.
gGABAb_r!=NULL)
delete[] managerRuntimeData.
gGABAb_r;
5939 if (managerRuntimeData.
gGABAb_d!=NULL)
delete[] managerRuntimeData.
gGABAb_d;
5940 managerRuntimeData.
gAMPA=NULL; managerRuntimeData.
gNMDA=NULL; managerRuntimeData.
gNMDA_r=NULL; managerRuntimeData.
gNMDA_d=NULL;
5943 if (managerRuntimeData.
stpu!=NULL)
delete[] managerRuntimeData.
stpu;
5944 if (managerRuntimeData.
stpx!=NULL)
delete[] managerRuntimeData.
stpx;
5945 managerRuntimeData.
stpu=NULL; managerRuntimeData.
stpx=NULL;
5961 if (managerRuntimeData.
wt!=NULL)
delete[] managerRuntimeData.
wt;
5962 if (managerRuntimeData.
maxSynWt!=NULL)
delete[] managerRuntimeData.
maxSynWt;
5963 if (managerRuntimeData.
wtChange !=NULL)
delete[] managerRuntimeData.
wtChange;
5964 managerRuntimeData.
wt=NULL; managerRuntimeData.
maxSynWt=NULL; managerRuntimeData.
wtChange=NULL;
5966 if (mulSynFast!=NULL)
delete[] mulSynFast;
5967 if (mulSynSlow!=NULL)
delete[] mulSynSlow;
5969 mulSynFast=NULL; mulSynSlow=NULL; managerRuntimeData.
connIdsPreIdx=NULL;
5971 if (managerRuntimeData.
grpIds!=NULL)
delete[] managerRuntimeData.
grpIds;
5972 managerRuntimeData.
grpIds=NULL;
6000 void SNN::resetPoissonNeuron(
int netId,
int lGrpId,
int lNId) {
6001 assert(lNId < networkConfigs[netId].numN);
6003 if (groupConfigs[netId][lGrpId].WithHomeostasis)
6004 managerRuntimeData.
avgFiring[lNId] = 0.0f;
6006 if (groupConfigs[netId][lGrpId].WithSTP) {
6007 for (
int j = 0; j < networkConfigs[netId].
maxDelay + 1; j++) {
6008 int index =
STP_BUF_POS(lNId, j, networkConfigs[netId].maxDelay);
6009 managerRuntimeData.
stpu[index] = 0.0f;
6010 managerRuntimeData.
stpx[index] = 1.0f;
6015 void SNN::resetPropogationBuffer() {
6017 spikeBuf->
reset(0, 1023);
6025 void SNN::resetSynapse(
int netId,
bool changeWeights) {
6026 memset(managerRuntimeData.
wtChange, 0,
sizeof(
float) * networkConfigs[netId].numPreSynNet);
6028 for (
int syn = 0; syn < networkConfigs[netId].
numPreSynNet; syn++)
6032 void SNN::resetTimeTable() {
6033 memset(managerRuntimeData.
timeTableD2, 0,
sizeof(
int) * (1000 + glbNetworkConfig.
maxDelay + 1));
6034 memset(managerRuntimeData.
timeTableD1, 0,
sizeof(
int) * (1000 + glbNetworkConfig.
maxDelay + 1));
6037 void SNN::resetFiringTable() {
6038 memset(managerRuntimeData.
firingTableD2, 0,
sizeof(
int) * managerRTDSize.maxMaxSpikeD2);
6039 memset(managerRuntimeData.
firingTableD1, 0,
sizeof(
int) * managerRTDSize.maxMaxSpikeD1);
6042 memset(managerRuntimeData.
extFiringTableD2, 0,
sizeof(
int*) * managerRTDSize.maxNumGroups);
6043 memset(managerRuntimeData.
extFiringTableD1, 0,
sizeof(
int*) * managerRTDSize.maxNumGroups);
6046 void SNN::resetSpikeCnt(
int gGrpId) {
6047 assert(gGrpId >=
ALL);
6049 if (gGrpId ==
ALL) {
6050 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
6051 pthread_t threads[numCores + 1];
6054 int threadCount = 0;
6058 if (!groupPartitionLists[netId].empty()) {
6060 resetSpikeCnt_GPU(netId,
ALL);
6062 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
6063 resetSpikeCnt_CPU(netId,
ALL);
6064 #else // Linux or MAC
6065 pthread_attr_t attr;
6066 pthread_attr_init(&attr);
6069 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
6071 argsThreadRoutine[threadCount].
snn_pointer =
this;
6072 argsThreadRoutine[threadCount].
netId = netId;
6073 argsThreadRoutine[threadCount].
lGrpId =
ALL;
6074 argsThreadRoutine[threadCount].
startIdx = 0;
6075 argsThreadRoutine[threadCount].
endIdx = 0;
6076 argsThreadRoutine[threadCount].
GtoLOffset = 0;
6078 pthread_create(&threads[threadCount], &attr, &SNN::helperResetSpikeCnt_CPU, (
void*)&argsThreadRoutine[threadCount]);
6079 pthread_attr_destroy(&attr);
6086 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
6088 for (
int i=0; i<threadCount; i++){
6089 pthread_join(threads[i], NULL);
6094 int netId = groupConfigMDMap[gGrpId].netId;
6095 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
6098 resetSpikeCnt_GPU(netId, lGrpId);
6100 resetSpikeCnt_CPU(netId, lGrpId);
6106 inline SynInfo SNN::SET_CONN_ID(
int nId,
int sId,
int grpId) {
6122 void SNN::setGrpTimeSlice(
int gGrpId,
int timeSlice) {
6123 if (gGrpId ==
ALL) {
6124 for(
int grpId = 0; grpId < numGroups; grpId++) {
6125 if (groupConfigMap[grpId].isSpikeGenerator)
6126 setGrpTimeSlice(grpId, timeSlice);
6131 groupConfigMDMap[gGrpId].currTimeSlice = timeSlice;
6136 int SNN::setRandSeed(
int seed) {
6145 void SNN::fillSpikeGenBits(
int netId) {
6150 for (spikeBufIter = spikeBuf->
front(); spikeBufIter != spikeBufIterEnd; ++spikeBufIter) {
6152 int gGrpId = spikeBufIter->
grpId;
6154 if (groupConfigMDMap[gGrpId].netId == netId) {
6155 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
6156 int lNId = spikeBufIter->
neurId + groupConfigMDMap[gGrpId].GtoLOffset;
6159 assert(groupConfigMap[gGrpId].isSpikeGenerator ==
true);
6161 int nIdPos = (lNId - groupConfigs[netId][lGrpId].
lStartN + groupConfigs[netId][lGrpId].
Noffset);
6162 int nIdBitPos = nIdPos % 32;
6163 int nIdIndex = nIdPos / 32;
6165 assert(nIdIndex < (networkConfigs[netId].numNSpikeGen / 32 + 1));
6167 managerRuntimeData.
spikeGenBits[nIdIndex] |= (1 << nIdBitPos);
6172 void SNN::startTiming() { prevExecutionTime = cumExecutionTime; }
6173 void SNN::stopTiming() {
6174 executionTime += (cumExecutionTime - prevExecutionTime);
6175 prevExecutionTime = cumExecutionTime;
6184 if (shallUpdateWeights && !sim_in_testing) {
6186 if (wtANDwtChangeUpdateIntervalCnt_) {
6187 float storeScaleSTDP = stdpScaleFactor_;
6188 stdpScaleFactor_ = 1.0f/wtANDwtChangeUpdateIntervalCnt_;
6192 stdpScaleFactor_ = storeScaleSTDP;
6196 sim_in_testing =
true;
6199 if (!groupPartitionLists[netId].empty()) {
6201 updateNetworkConfig(netId);
6208 sim_in_testing =
false;
6211 if (!groupPartitionLists[netId].empty()) {
6213 updateNetworkConfig(netId);
6219 for (
int monId=0; monId<numConnectionMonitor; monId++) {
6222 if (timeInterval==1 || timeInterval>1 && (
getSimTime()%timeInterval)==0) {
6233 assert(connId >
ALL);
6234 std::vector< std::vector<float> > wtConnId;
6236 int grpIdPre = connectConfigMap[connId].grpSrc;
6237 int grpIdPost = connectConfigMap[connId].grpDest;
6239 int netIdPost = groupConfigMDMap[grpIdPost].netId;
6240 int lGrpIdPost = groupConfigMDMap[grpIdPost].lGrpId;
6243 for (
int i = 0; i < groupConfigMap[grpIdPre].numN; i++) {
6244 std::vector<float> wtSlice;
6245 for (
int j = 0; j < groupConfigMap[grpIdPost].numN; j++) {
6246 wtSlice.push_back(NAN);
6248 wtConnId.push_back(wtSlice);
6255 assert(grpIdPost >
ALL);
6258 fetchWeightState(netIdPost, lGrpIdPost);
6259 fetchConnIdsLookupArray(netIdPost);
6261 for (
int lNIdPost = groupConfigs[netIdPost][lGrpIdPost].lStartN; lNIdPost <= groupConfigs[netIdPost][lGrpIdPost].
lEndN; lNIdPost++) {
6262 unsigned int pos_ij = managerRuntimeData.
cumulativePre[lNIdPost];
6263 for (
int i = 0; i < managerRuntimeData.
Npre[lNIdPost]; i++, pos_ij++) {
6271 wtConnId[lNIdPre - groupConfigs[netIdPost][lGrpIdPre].
lStartN][lNIdPost - groupConfigs[netIdPost][lGrpIdPost].
lStartN] =
6272 fabs(managerRuntimeData.
wt[pos_ij]);
6281 if (!numGroupMonitor)
6284 if (gGrpId ==
ALL) {
6285 for (
int gGrpId = 0; gGrpId < numGroups; gGrpId++)
6288 int netId = groupConfigMDMap[gGrpId].netId;
6289 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
6292 int monitorId = groupConfigMDMap[gGrpId].groupMonitorId;
6295 if (monitorId < 0)
return;
6306 KERNEL_ERROR(
"updateGroupMonitor(grpId=%d) must be called at least once every second", gGrpId);
6309 fetchGroupState(netId, lGrpId);
6315 int numMsMin = lastUpdate % 1000;
6319 assert(numMsMin < numMsMax);
6332 bool writeGroupToFile = grpFileId != NULL;
6333 bool writeGroupToArray = grpMonObj->
isRecording();
6338 for(
int t = numMsMin; t < numMsMax; t++) {
6340 data = managerRuntimeData.
grpDABuffer[lGrpId * 1000 + t];
6343 int time = currentTimeSec * 1000 + t;
6345 if (writeGroupToFile) {
6349 if (writeGroupToArray) {
6354 if (grpFileId!=NULL)
6360 void SNN::userDefinedSpikeGenerator(
int gGrpId) {
6363 int netId = groupConfigMDMap[gGrpId].netId;
6364 int timeSlice = groupConfigMDMap[gGrpId].currTimeSlice;
6365 int currTime = simTime;
6368 fetchLastSpikeTime(netId);
6370 for(
int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
6372 int lNId = gNId + groupConfigMDMap[gGrpId].GtoLOffset;
6379 int endOfTimeWindow = std::min(currTime+timeSlice, simTimeRunStop);
6384 int nextSchedTime = spikeGenFunc->
nextSpikeTime(
this, gGrpId, gNId - groupConfigMDMap[gGrpId].gStartN, currTime, nextTime, endOfTimeWindow);
6391 if ((nextSchedTime==0 || nextSchedTime>nextTime) && nextSchedTime<endOfTimeWindow && nextSchedTime>=currTime) {
6396 nextTime = nextSchedTime;
6397 spikeBuf->
schedule(gNId, gGrpId, nextTime - currTime);
6405 void SNN::generateUserDefinedSpikes() {
6406 for(
int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
6407 if (groupConfigMap[gGrpId].isSpikeGenerator) {
6413 if(((simTime - groupConfigMDMap[gGrpId].sliceUpdateTime) >= groupConfigMDMap[gGrpId].currTimeSlice || simTime == simTimeRunStart)) {
6414 int timeSlice = groupConfigMDMap[gGrpId].currTimeSlice;
6415 groupConfigMDMap[gGrpId].sliceUpdateTime = simTime;
6422 if (groupConfigMap[gGrpId].spikeGenFunc != NULL) {
6423 userDefinedSpikeGenerator(gGrpId);
6435 void SNN::allocateManagerSpikeTables() {
6436 managerRuntimeData.
firingTableD2 =
new int[managerRTDSize.maxMaxSpikeD2];
6437 managerRuntimeData.
firingTableD1 =
new int[managerRTDSize.maxMaxSpikeD1];
6440 managerRuntimeData.
extFiringTableD2 =
new int*[managerRTDSize.maxNumGroups];
6441 managerRuntimeData.
extFiringTableD1 =
new int*[managerRTDSize.maxNumGroups];
6457 bool SNN::updateTime() {
6458 bool finishedOneSec =
false;
6462 if(++simTimeMs == 1000) {
6465 finishedOneSec =
true;
6471 KERNEL_WARN(
"Maximum Simulation Time Reached...Resetting simulation time");
6474 return finishedOneSec;
6480 if (!numSpikeMonitor)
6483 if (gGrpId ==
ALL) {
6484 for (
int gGrpId = 0; gGrpId < numGroups; gGrpId++)
6487 int netId = groupConfigMDMap[gGrpId].netId;
6488 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
6491 int monitorId = groupConfigMDMap[gGrpId].spikeMonitorId;
6494 if (monitorId < 0)
return;
6501 if ( ((
long int)
getSimTime()) - lastUpdate <= 0)
6504 if ( ((
long int)
getSimTime()) - lastUpdate > 1000)
6505 KERNEL_ERROR(
"updateSpikeMonitor(grpId=%d) must be called at least once every second",gGrpId);
6515 KERNEL_WARN(
"Reduce the cumulative recording time (currently %lu minutes) or the group size (currently %d) to avoid this.",spkMonObj->
getAccumTime()/(1000*60),this->getGroupNumNeurons(gGrpId));
6519 fetchSpikeTables(netId);
6520 fetchGrpIdsLookupArray(netId);
6526 int numMsMin = lastUpdate % 1000;
6530 assert(numMsMin < numMsMax);
6543 bool writeSpikesToFile = spkFileId != NULL;
6548 for (
int k = 0; k < 2; k++) {
6551 for(
int t = numMsMin; t < numMsMax; t++) {
6552 for(
int i = timeTablePtr[t + glbNetworkConfig.
maxDelay]; i < timeTablePtr[t + glbNetworkConfig.
maxDelay + 1]; i++) {
6554 int lNId = fireTablePtr[i];
6557 int this_grpId = managerRuntimeData.
grpIds[lNId];
6558 if (this_grpId != lGrpId)
6564 int nId = lNId - groupConfigs[netId][lGrpId].
lStartN;
6568 int time = currentTimeSec * 1000 + t;
6570 if (writeSpikesToFile) {
6572 cnt = fwrite(&time,
sizeof(
int), 1, spkFileId); assert(cnt==1);
6573 cnt = fwrite(&nId,
sizeof(
int), 1, spkFileId); assert(cnt==1);
6576 if (writeSpikesToArray) {
6577 spkMonObj->
pushAER(time, nId);
6583 if (spkFileId!=NULL)
6591 if (!numNeuronMonitor)
6596 if (gGrpId ==
ALL) {
6597 for (
int gGrpId = 0; gGrpId < numGroups; gGrpId++)
6602 int netId = groupConfigMDMap[gGrpId].netId;
6603 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
6606 int monitorId = groupConfigMDMap[gGrpId].neuronMonitorId;
6609 if (monitorId < 0)
return;
6616 if (((
long int)
getSimTime()) - lastUpdate <= 0)
6619 if (((
long int)
getSimTime()) - lastUpdate > 1000)
6620 KERNEL_ERROR(
"updateNeuronMonitor(grpId=%d) must be called at least once every second", gGrpId);
6635 fetchNeuronStateBuffer(netId, lGrpId);
6641 int numMsMin = lastUpdate % 1000;
6645 assert(numMsMin < numMsMax);
6659 bool writeNeuronStateToFile = nrnFileId != NULL;
6660 bool writeNeuronStateToArray = nrnMonObj->
isRecording();
6665 for (
int t = numMsMin; t < numMsMax; t++) {
6667 for (
int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].
lEndN; lNId++) {
6671 int this_grpId = managerRuntimeData.
grpIds[lNId];
6672 if (this_grpId != lGrpId)
6678 int nId = lNId - groupConfigs[netId][lGrpId].
lStartN;
6682 v = managerRuntimeData.
nVBuffer[idxBase + nId];
6683 u = managerRuntimeData.
nUBuffer[idxBase + nId];
6684 I = managerRuntimeData.
nIBuffer[idxBase + nId];
6689 int time = currentTimeSec * 1000 + t;
6694 if (writeNeuronStateToFile) {
6697 cnt = fwrite(&time,
sizeof(
int), 1, nrnFileId); assert(cnt == 1);
6698 cnt = fwrite(&nId,
sizeof(
int), 1, nrnFileId); assert(cnt == 1);
6699 cnt = fwrite(&v,
sizeof(
float), 1, nrnFileId); assert(cnt == 1);
6700 cnt = fwrite(&u,
sizeof(
float), 1, nrnFileId); assert(cnt == 1);
6701 cnt = fwrite(&I,
sizeof(
float), 1, nrnFileId); assert(cnt == 1);
6704 if (writeNeuronStateToArray) {
6711 if (nrnFileId != NULL)
6717 void SNN::printSimSummary() {
6722 etime = executionTime;
6724 fetchNetworkSpikeCount();
6727 KERNEL_INFO(
"******************** Simulation Summary ***************************");
6729 KERNEL_INFO(
"Network Parameters: \tnumNeurons = %d (numNExcReg:numNInhReg = %2.1f:%2.1f)",
6733 KERNEL_INFO(
"Simulation Mode:\t%s",sim_with_conductances?
"COBA":
"CUBA");
6735 KERNEL_INFO(
"Timing:\t\t\tModel Simulation Time = %lld sec", (
unsigned long long)simTimeSec);
6736 KERNEL_INFO(
"\t\t\tActual Execution Time = %4.2f sec", etime/1000.0f);
6737 KERNEL_INFO(
"Average Firing Rate:\t2+ms delay = %3.3f Hz",
6748 KERNEL_INFO(
"*********************************************************************************\n");