68 #define COMPACTION_ALIGNMENT_PRE 16
69 #define COMPACTION_ALIGNMENT_POST 0
78 : networkName_(name), preferredSimMode_(preferredSimMode), loggerMode_(loggerMode),
79 randSeed_(
SNN::setRandSeed(randSeed))
87 if (!simulatorDeleted)
96 short int SNN::connect(
int grpId1,
int grpId2,
const std::string& _type,
float initWt,
float maxWt,
float prob,
97 uint8_t minDelay, uint8_t maxDelay,
RadiusRF radius,
98 float _mulSynFast,
float _mulSynSlow,
bool synWtType) {
101 assert(grpId1 < numGroups);
102 assert(grpId2 < numGroups);
103 assert(minDelay <= maxDelay);
123 connConfig.
grpSrc = grpId1;
125 connConfig.
initWt = initWt;
126 connConfig.
maxWt = maxWt;
140 connConfig.
conn = NULL;
143 if ( _type.find(
"random") != std::string::npos) {
147 else if ( _type.find(
"full-no-direct") != std::string::npos) {
150 else if ( _type.find(
"full") != std::string::npos) {
153 else if ( _type.find(
"one-to-one") != std::string::npos) {
155 }
else if ( _type.find(
"gaussian") != std::string::npos) {
158 KERNEL_ERROR(
"Invalid connection type (should be 'random', 'full', 'one-to-one', 'full-no-direct', or 'gaussian')");
163 assert(connConfig.
connId == -1);
164 connConfig.
connId = numConnections;
169 connectConfigMap[numConnections] = connConfig;
174 return (numConnections - 1);
182 assert(grpId1 < numGroups);
183 assert(grpId2 < numGroups);
188 connConfig.
grpSrc = grpId1;
191 connConfig.
maxWt = 0.0f;
198 connConfig.
conn = conn;
204 assert(connConfig.
connId == -1);
205 connConfig.
connId = numConnections;
208 connectConfigMap[numConnections] = connConfig;
213 return (numConnections - 1);
218 assert(grpIdLower >= 0 && grpIdLower < numGroups);
219 assert(grpIdUpper >= 0 && grpIdUpper < numGroups);
220 assert(grpIdLower != grpIdUpper);
225 assert(groupConfigMap[grpIdLower].preferredNetId == groupConfigMap[grpIdUpper].preferredNetId);
230 sim_with_compartments =
true;
234 compConnConfig.
grpSrc = grpIdLower;
235 compConnConfig.
grpDest = grpIdUpper;
236 compConnConfig.
connId = -1;
239 assert(compConnConfig.
connId == -1);
240 compConnConfig.
connId = numCompartmentConnections;
243 compConnectConfigMap[numCompartmentConnections] = compConnConfig;
245 numCompartmentConnections++;
247 return (numCompartmentConnections - 1);
254 assert(neurType >= 0);
259 KERNEL_ERROR(
"Invalid type using createGroup... Cannot create poisson generators here.");
272 grpConfig.
type = neurType;
273 grpConfig.
numN = grid.
N;
276 grpConfig.
grid = grid;
277 grpConfig.
isLIF =
false;
279 if (preferredPartition ==
ANY) {
281 }
else if (preferredBackend ==
CPU_CORES) {
288 grpConfigMD.
gGrpId = numGroups;
291 groupConfigMap[numGroups] = grpConfig;
292 groupConfigMDMap[numGroups] = grpConfigMD;
297 return grpConfigMD.
gGrpId;
304 assert(neurType >= 0);
309 KERNEL_ERROR(
"Invalid type using createGroup... Cannot create poisson generators here.");
319 grpConfig.
type = neurType;
320 grpConfig.
numN = grid.
N;
322 grpConfig.
isLIF =
true;
324 grpConfig.
grid = grid;
326 if (preferredPartition ==
ANY) {
328 }
else if (preferredBackend ==
CPU_CORES) {
335 grpConfigMD.
gGrpId = numGroups;
338 groupConfigMap[numGroups] = grpConfig;
339 groupConfigMDMap[numGroups] = grpConfigMD;
344 return grpConfigMD.
gGrpId;
351 assert(neurType >= 0);
364 grpConfig.
numN = grid.
N;
366 grpConfig.
grid = grid;
367 grpConfig.
isLIF =
false;
369 if (preferredPartition ==
ANY) {
372 else if (preferredBackend ==
CPU_CORES) {
380 grpConfigMD.
gGrpId = numGroups;
383 groupConfigMap[numGroups] = grpConfig;
384 groupConfigMDMap[numGroups] = grpConfigMD;
390 return grpConfigMD.
gGrpId;
395 for (
int grpId = 0; grpId<numGroups; grpId++) {
400 groupConfigMap[gGrpId].withCompartments =
true;
401 groupConfigMap[gGrpId].compCouplingUp = couplingUp;
402 groupConfigMap[gGrpId].compCouplingDown = couplingDown;
403 glbNetworkConfig.
numComp += groupConfigMap[gGrpId].numN;
409 void SNN::setConductances(
bool isSet,
int tdAMPA,
int trNMDA,
int tdNMDA,
int tdGABAa,
int trGABAb,
int tdGABAb) {
411 assert(tdAMPA>0); assert(tdNMDA>0); assert(tdGABAa>0); assert(tdGABAb>0);
412 assert(trNMDA>=0); assert(trGABAb>=0);
413 assert(trNMDA!=tdNMDA); assert(trGABAb!=tdGABAb);
417 sim_with_conductances |= isSet;
418 dAMPA = 1.0-1.0/tdAMPA;
419 dNMDA = 1.0-1.0/tdNMDA;
420 dGABAa = 1.0-1.0/tdGABAa;
421 dGABAb = 1.0-1.0/tdGABAb;
425 sim_with_NMDA_rise =
true;
426 rNMDA = 1.0-1.0/trNMDA;
430 double tmax = (-tdNMDA*trNMDA*log(1.0*trNMDA/tdNMDA))/(tdNMDA-trNMDA);
431 sNMDA = 1.0/(exp(-tmax/tdNMDA)-exp(-tmax/trNMDA));
432 assert(!isinf(tmax) && !isnan(tmax) && tmax>=0);
433 assert(!isinf(sNMDA) && !isnan(sNMDA) && sNMDA>0);
438 sim_with_GABAb_rise =
true;
439 rGABAb = 1.0-1.0/trGABAb;
443 double tmax = (-tdGABAb*trGABAb*log(1.0*trGABAb/tdGABAb))/(tdGABAb-trGABAb);
444 sGABAb = 1.0/(exp(-tmax/tdGABAb)-exp(-tmax/trGABAb));
445 assert(!isinf(tmax) && !isnan(tmax)); assert(!isinf(sGABAb) && !isnan(sGABAb) && sGABAb>0);
448 if (sim_with_conductances) {
450 KERNEL_INFO(
" - AMPA decay time = %5d ms", tdAMPA);
451 KERNEL_INFO(
" - NMDA rise time %s = %5d ms", sim_with_NMDA_rise?
" ":
"(disabled)", trNMDA);
452 KERNEL_INFO(
" - GABAa decay time = %5d ms", tdGABAa);
453 KERNEL_INFO(
" - GABAb rise time %s = %5d ms", sim_with_GABAb_rise?
" ":
"(disabled)",trGABAb);
454 KERNEL_INFO(
" - GABAb decay time = %5d ms", tdGABAb);
456 KERNEL_INFO(
"Running CUBA mode (all synaptic conductances disabled)");
463 for(
int grpId = 0; grpId < numGroups; grpId++) {
468 sim_with_homeostasis |= isSet;
469 groupConfigMap[gGrpId].homeoConfig.WithHomeostasis = isSet;
470 groupConfigMap[gGrpId].homeoConfig.homeostasisScale = homeoScale;
471 groupConfigMap[gGrpId].homeoConfig.avgTimeScale = avgTimeScale;
472 groupConfigMap[gGrpId].homeoConfig.avgTimeScaleInv = 1.0f / avgTimeScale;
473 groupConfigMap[gGrpId].homeoConfig.avgTimeScaleDecay = (avgTimeScale * 1000.0f - 1.0f) / (avgTimeScale * 1000.0f);
475 KERNEL_INFO(
"Homeostasis parameters %s for %d (%s):\thomeoScale: %f, avgTimeScale: %f",
476 isSet?
"enabled":
"disabled", gGrpId, groupConfigMap[gGrpId].grpName.c_str(), homeoScale, avgTimeScale);
483 for(
int grpId = 0; grpId < numGroups; grpId++) {
488 groupConfigMap[gGrpId].homeoConfig.baseFiring = baseFiring;
489 groupConfigMap[gGrpId].homeoConfig.baseFiringSD = baseFiringSD;
491 KERNEL_INFO(
"Homeostatic base firing rate set for %d (%s):\tbaseFiring: %3.3f, baseFiringStd: %3.3f",
492 gGrpId, groupConfigMap[gGrpId].grpName.c_str(), baseFiring, baseFiringSD);
498 assert(numStepsPerMs >= 1 && numStepsPerMs <= 100);
501 glbNetworkConfig.
timeStep = 1.0f / numStepsPerMs;
506 float izh_c,
float izh_c_sd,
float izh_d,
float izh_d_sd)
508 assert(gGrpId >= -1);
509 assert(izh_a_sd >= 0); assert(izh_b_sd >= 0); assert(izh_c_sd >= 0); assert(izh_d_sd >= 0);
512 for(
int grpId = 0; grpId < numGroups; grpId++) {
513 setNeuronParameters(grpId, izh_a, izh_a_sd, izh_b, izh_b_sd, izh_c, izh_c_sd, izh_d, izh_d_sd);
516 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a = izh_a;
517 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a_sd = izh_a_sd;
518 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b = izh_b;
519 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b_sd = izh_b_sd;
520 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c = izh_c;
521 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c_sd = izh_c_sd;
522 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d = izh_d;
523 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d_sd = izh_d_sd;
524 groupConfigMap[gGrpId].withParamModel_9 = 0;
525 groupConfigMap[gGrpId].isLIF = 0;
531 float izh_vr,
float izh_vr_sd,
float izh_vt,
float izh_vt_sd,
532 float izh_a,
float izh_a_sd,
float izh_b,
float izh_b_sd,
533 float izh_vpeak,
float izh_vpeak_sd,
float izh_c,
float izh_c_sd,
534 float izh_d,
float izh_d_sd)
536 assert(gGrpId >= -1);
537 assert(izh_C_sd >= 0); assert(izh_k_sd >= 0); assert(izh_vr_sd >= 0);
538 assert(izh_vt_sd >= 0); assert(izh_a_sd >= 0); assert(izh_b_sd >= 0); assert(izh_vpeak_sd >= 0);
539 assert(izh_c_sd >= 0); assert(izh_d_sd >= 0);
542 for (
int grpId = 0; grpId<numGroups; grpId++) {
543 setNeuronParameters(grpId, izh_C, izh_C_sd, izh_k, izh_k_sd, izh_vr, izh_vr_sd, izh_vt, izh_vt_sd,
544 izh_a, izh_a_sd, izh_b, izh_b_sd, izh_vpeak, izh_vpeak_sd, izh_c, izh_c_sd,
549 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a = izh_a;
550 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a_sd = izh_a_sd;
551 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b = izh_b;
552 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b_sd = izh_b_sd;
553 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c = izh_c;
554 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c_sd = izh_c_sd;
555 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d = izh_d;
556 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d_sd = izh_d_sd;
557 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_C = izh_C;
558 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_C_sd = izh_C_sd;
559 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_k = izh_k;
560 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_k_sd = izh_k_sd;
561 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vr = izh_vr;
562 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vr_sd = izh_vr_sd;
563 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vt = izh_vt;
564 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vt_sd = izh_vt_sd;
565 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vpeak = izh_vpeak;
566 groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vpeak_sd = izh_vpeak_sd;
567 groupConfigMap[gGrpId].withParamModel_9 = 1;
568 groupConfigMap[gGrpId].isLIF = 0;
577 assert(gGrpId >= -1);
578 assert(tau_m >= 0); assert(tau_ref >= 0); assert(vReset < vTh);
579 assert(minRmem >= 0.0f); assert(minRmem <= maxRmem);
582 for(
int grpId = 0; grpId < numGroups; grpId++) {
586 groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_m = tau_m;
587 groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_ref = tau_ref;
588 groupConfigMap[gGrpId].neuralDynamicsConfig.lif_vTh = vTh;
589 groupConfigMap[gGrpId].neuralDynamicsConfig.lif_vReset = vReset;
590 groupConfigMap[gGrpId].neuralDynamicsConfig.lif_minRmem = minRmem;
591 groupConfigMap[gGrpId].neuralDynamicsConfig.lif_maxRmem = maxRmem;
592 groupConfigMap[gGrpId].withParamModel_9 = 0;
593 groupConfigMap[gGrpId].isLIF = 1;
598 float tauACh,
float baseNE,
float tauNE) {
600 assert(gGrpId >= -1);
601 assert(baseDP > 0.0f); assert(base5HT > 0.0f); assert(baseACh > 0.0f); assert(baseNE > 0.0f);
602 assert(tauDP > 0); assert(tau5HT > 0); assert(tauACh > 0); assert(tauNE > 0);
605 for (
int grpId = 0; grpId < numGroups; grpId++) {
606 setNeuromodulator(grpId, baseDP, tauDP, base5HT, tau5HT, baseACh, tauACh, baseNE, tauNE);
609 groupConfigMap[gGrpId].neuromodulatorConfig.baseDP = baseDP;
610 groupConfigMap[gGrpId].neuromodulatorConfig.decayDP = 1.0f - (1.0f / tauDP);
611 groupConfigMap[gGrpId].neuromodulatorConfig.base5HT = base5HT;
612 groupConfigMap[gGrpId].neuromodulatorConfig.decay5HT = 1.0f - (1.0f / tau5HT);
613 groupConfigMap[gGrpId].neuromodulatorConfig.baseACh = baseACh;
614 groupConfigMap[gGrpId].neuromodulatorConfig.decayACh = 1.0f - (1.0f / tauACh);
615 groupConfigMap[gGrpId].neuromodulatorConfig.baseNE = baseNE;
616 groupConfigMap[gGrpId].neuromodulatorConfig.decayNE = 1.0f - (1.0f / tauNE);
622 assert(gGrpId >= -1);
625 assert(tauPlus > 0.0f); assert(tauMinus > 0.0f); assert(gamma >= 0.0f);
629 for(
int grpId = 0; grpId < numGroups; grpId++) {
630 setESTDP(grpId, isSet, type, curve, alphaPlus, tauPlus, alphaMinus, tauMinus, gamma);
635 groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_EXC = alphaPlus;
636 groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_EXC = alphaMinus;
637 groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_EXC = 1.0f / tauPlus;
638 groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_EXC = 1.0f / tauMinus;
639 groupConfigMap[gGrpId].stdpConfig.GAMMA = gamma;
640 groupConfigMap[gGrpId].stdpConfig.KAPPA = (1 + exp(-gamma / tauPlus)) / (1 - exp(-gamma / tauPlus));
641 groupConfigMap[gGrpId].stdpConfig.OMEGA = alphaPlus * (1 - groupConfigMap[gGrpId].stdpConfig.KAPPA);
643 groupConfigMap[gGrpId].stdpConfig.WithESTDPtype = type;
644 groupConfigMap[gGrpId].stdpConfig.WithESTDPcurve = curve;
645 groupConfigMap[gGrpId].stdpConfig.WithESTDP = isSet;
646 groupConfigMap[gGrpId].stdpConfig.WithSTDP |= groupConfigMap[gGrpId].stdpConfig.WithESTDP;
647 sim_with_stdp |= groupConfigMap[gGrpId].stdpConfig.WithSTDP;
649 KERNEL_INFO(
"E-STDP %s for %s(%d)", isSet?
"enabled":
"disabled", groupConfigMap[gGrpId].grpName.c_str(), gGrpId);
655 assert(gGrpId >= -1);
658 assert(tau1 > 0); assert(tau2 > 0);
662 for(
int grpId = 0; grpId < numGroups; grpId++) {
663 setISTDP(grpId, isSet, type, curve, ab1, ab2, tau1, tau2);
669 groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_INB = ab1;
670 groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_INB = ab2;
671 groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_INB = 1.0f / tau1;
672 groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_INB = 1.0f / tau2;
673 groupConfigMap[gGrpId].stdpConfig.BETA_LTP = 0.0f;
674 groupConfigMap[gGrpId].stdpConfig.BETA_LTD = 0.0f;
675 groupConfigMap[gGrpId].stdpConfig.LAMBDA = 1.0f;
676 groupConfigMap[gGrpId].stdpConfig.DELTA = 1.0f;
678 groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_INB = 0.0f;
679 groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_INB = 0.0f;
680 groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_INB = 1.0f;
681 groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_INB = 1.0f;
682 groupConfigMap[gGrpId].stdpConfig.BETA_LTP = ab1;
683 groupConfigMap[gGrpId].stdpConfig.BETA_LTD = ab2;
684 groupConfigMap[gGrpId].stdpConfig.LAMBDA = tau1;
685 groupConfigMap[gGrpId].stdpConfig.DELTA = tau2;
689 groupConfigMap[gGrpId].stdpConfig.WithISTDPtype = type;
690 groupConfigMap[gGrpId].stdpConfig.WithISTDPcurve = curve;
691 groupConfigMap[gGrpId].stdpConfig.WithISTDP = isSet;
692 groupConfigMap[gGrpId].stdpConfig.WithSTDP |= groupConfigMap[gGrpId].stdpConfig.WithISTDP;
693 sim_with_stdp |= groupConfigMap[gGrpId].stdpConfig.WithSTDP;
695 KERNEL_INFO(
"I-STDP %s for %s(%d)", isSet?
"enabled":
"disabled", groupConfigMap[gGrpId].grpName.c_str(), gGrpId);
700 void SNN::setSTP(
int gGrpId,
bool isSet,
float STP_U,
float STP_tau_u,
float STP_tau_x) {
701 assert(gGrpId >= -1);
703 assert(STP_U > 0 && STP_U <= 1); assert(STP_tau_u > 0); assert(STP_tau_x > 0);
707 for(
int grpId = 0; grpId < numGroups; grpId++) {
708 setSTP(grpId, isSet, STP_U, STP_tau_u, STP_tau_x);
712 sim_with_stp |= isSet;
713 groupConfigMap[gGrpId].stpConfig.WithSTP = isSet;
714 groupConfigMap[gGrpId].stpConfig.STP_A = (STP_U > 0.0f) ? 1.0 / STP_U : 1.0f;
715 groupConfigMap[gGrpId].stpConfig.STP_U = STP_U;
716 groupConfigMap[gGrpId].stpConfig.STP_tau_u_inv = 1.0f / STP_tau_u;
717 groupConfigMap[gGrpId].stpConfig.STP_tau_x_inv = 1.0f / STP_tau_x;
719 KERNEL_INFO(
"STP %s for %d (%s):\tA: %1.4f, U: %1.4f, tau_u: %4.0f, tau_x: %4.0f", isSet?
"enabled":
"disabled",
720 gGrpId, groupConfigMap[gGrpId].grpName.c_str(), groupConfigMap[gGrpId].stpConfig.STP_A, STP_U, STP_tau_u, STP_tau_x);
725 assert(wtChangeDecay > 0.0f && wtChangeDecay < 1.0f);
727 switch (wtANDwtChangeUpdateInterval) {
729 wtANDwtChangeUpdateInterval_ = 10;
732 wtANDwtChangeUpdateInterval_ = 100;
736 wtANDwtChangeUpdateInterval_ = 1000;
740 if (enableWtChangeDecay) {
742 switch (wtANDwtChangeUpdateInterval) {
744 stdpScaleFactor_ = 0.005f;
747 stdpScaleFactor_ = 0.05f;
751 stdpScaleFactor_ = 0.5f;
755 wtChangeDecay_ = wtChangeDecay;
757 stdpScaleFactor_ = 1.0f;
758 wtChangeDecay_ = 0.0f;
761 KERNEL_INFO(
"Update weight and weight change every %d ms", wtANDwtChangeUpdateInterval_);
762 KERNEL_INFO(
"Weight Change Decay is %s", enableWtChangeDecay?
"enabled" :
"disable");
763 KERNEL_INFO(
"STDP scale factor = %1.3f, wtChangeDecay = %1.3f", stdpScaleFactor_, wtChangeDecay_);
780 generateRuntimeSNN();
795 assert(_nmsec >= 0 && _nmsec < 1000);
797 int runDurationMs = _nsec*1000 + _nmsec;
798 KERNEL_DEBUG(
"runNetwork: runDur=%dms, printRunSummary=%s", runDurationMs, printRunSummary?
"y":
"n");
804 printRunSummary = (loggerMode_==
SILENT) ?
false : printRunSummary;
807 if (simTime==0 && printRunSummary) {
809 KERNEL_INFO(
"******************** Running the simulation on %d GPU(s) and %d CPU(s) ***************************", numGPUs, numCores);
817 simTimeRunStart = simTime;
818 simTimeRunStop = simTime + runDurationMs;
819 assert(simTimeRunStop >= simTimeRunStart);
825 if (simTime == 0 && numConnectionMonitor) {
833 CUDA_RESET_TIMER(timer);
834 CUDA_START_TIMER(timer);
841 for(
int i = 0; i < runDurationMs; i++) {
846 if (!sim_with_fixedwts && wtANDwtChangeUpdateInterval_ == ++wtANDwtChangeUpdateIntervalCnt_) {
847 wtANDwtChangeUpdateIntervalCnt_ = 0;
848 if (!sim_in_testing) {
857 if (numSpikeMonitor) {
860 if (numGroupMonitor) {
863 if (numConnectionMonitor) {
866 if (numNeuronMonitor) {
873 fetchNeuronSpikeCount(
ALL);
879 if (printRunSummary) {
882 if (numSpikeMonitor) {
883 printStatusSpikeMonitor(
ALL);
885 if (numConnectionMonitor) {
886 printStatusConnectionMonitor(
ALL);
888 if (numGroupMonitor) {
889 printStatusGroupMonitor(
ALL);
893 simTimeLastRunSummary = simTime;
902 CUDA_STOP_TIMER(timer);
903 lastExecutionTime = CUDA_GET_TIMER_VALUE(timer);
904 cumExecutionTime += lastExecutionTime;
917 assert(connId>=0 && connId<numConnections);
919 int netId = groupConfigMDMap[connectConfigMap[connId].grpDest].netId;
920 int lGrpId = groupConfigMDMap[connectConfigMap[connId].grpDest].lGrpId;
922 fetchPreConnectionInfo(netId);
923 fetchConnIdsLookupArray(netId);
924 fetchSynapseState(netId);
926 for (
int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].
lEndN; lNId++) {
927 unsigned int cumIdx = managerRuntimeData.
cumulativePre[lNId];
930 unsigned int pos_ij = cumIdx;
931 for (
int j = 0; j < managerRuntimeData.
Npre[lNId]; pos_ij++, j++) {
934 float weight = managerRuntimeData.
wt[pos_ij] + bias;
938 bool needToPrintDebug = (weight > connectConfigMap[connId].maxWt || weight < 0.0f);
940 if (updateWeightRange) {
944 connectConfigMap[connId].maxWt = std::max(connectConfigMap[connId].maxWt, weight);
945 if (needToPrintDebug) {
946 KERNEL_DEBUG(
"biasWeights(%d,%f,%s): updated weight ranges to [%f,%f]", connId, bias,
947 (updateWeightRange?
"true":
"false"), 0.0f, connectConfigMap[connId].maxWt);
952 weight = std::min(weight, connectConfigMap[connId].maxWt);
954 weight = std::max(weight, 0.0f);
955 if (needToPrintDebug) {
956 KERNEL_DEBUG(
"biasWeights(%d,%f,%s): constrained weight %f to [%f,%f]", connId, bias,
957 (updateWeightRange?
"true":
"false"), weight, 0.0f, connectConfigMap[connId].maxWt);
962 managerRuntimeData.
wt[pos_ij] = weight;
963 managerRuntimeData.
maxSynWt[pos_ij] = connectConfigMap[connId].maxWt;
970 CUDA_CHECK_ERRORS( cudaMemcpy(&(runtimeData[netId].wt[cumIdx]), &(managerRuntimeData.
wt[cumIdx]),
sizeof(
float)*managerRuntimeData.
Npre[lNId],
971 cudaMemcpyHostToDevice) );
973 if (runtimeData[netId].maxSynWt != NULL) {
976 CUDA_CHECK_ERRORS( cudaMemcpy(&(runtimeData[netId].maxSynWt[cumIdx]), &(managerRuntimeData.
maxSynWt[cumIdx]),
977 sizeof(
float) * managerRuntimeData.
Npre[lNId], cudaMemcpyHostToDevice) );
983 memcpy(&runtimeData[netId].wt[cumIdx], &managerRuntimeData.
wt[cumIdx],
sizeof(
float) * managerRuntimeData.
Npre[lNId]);
985 if (runtimeData[netId].maxSynWt != NULL) {
988 memcpy(&runtimeData[netId].maxSynWt[cumIdx], &managerRuntimeData.
maxSynWt[cumIdx],
sizeof(
float) * managerRuntimeData.
Npre[lNId]);
1007 assert(connId>=0 && connId<numConnections);
1008 assert(scale>=0.0f);
1010 int netId = groupConfigMDMap[connectConfigMap[connId].grpDest].netId;
1011 int lGrpId = groupConfigMDMap[connectConfigMap[connId].grpDest].lGrpId;
1013 fetchPreConnectionInfo(netId);
1014 fetchConnIdsLookupArray(netId);
1015 fetchSynapseState(netId);
1018 for (
int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].
lEndN; lNId++) {
1019 unsigned int cumIdx = managerRuntimeData.
cumulativePre[lNId];
1022 unsigned int pos_ij = cumIdx;
1023 for (
int j = 0; j < managerRuntimeData.
Npre[lNId]; pos_ij++, j++) {
1026 float weight = managerRuntimeData.
wt[pos_ij] * scale;
1030 bool needToPrintDebug = (weight > connectConfigMap[connId].maxWt || weight < 0.0f);
1032 if (updateWeightRange) {
1036 connectConfigMap[connId].maxWt = std::max(connectConfigMap[connId].maxWt, weight);
1037 if (needToPrintDebug) {
1038 KERNEL_DEBUG(
"scaleWeights(%d,%f,%s): updated weight ranges to [%f,%f]", connId, scale,
1039 (updateWeightRange?
"true":
"false"), 0.0f, connectConfigMap[connId].maxWt);
1044 weight = std::min(weight, connectConfigMap[connId].maxWt);
1046 weight = std::max(weight, 0.0f);
1047 if (needToPrintDebug) {
1048 KERNEL_DEBUG(
"scaleWeights(%d,%f,%s): constrained weight %f to [%f,%f]", connId, scale,
1049 (updateWeightRange?
"true":
"false"), weight, 0.0f, connectConfigMap[connId].maxWt);
1054 managerRuntimeData.
wt[pos_ij] = weight;
1055 managerRuntimeData.
maxSynWt[pos_ij] = connectConfigMap[connId].maxWt;
1062 CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].wt[cumIdx], &managerRuntimeData.
wt[cumIdx],
sizeof(
float)*managerRuntimeData.
Npre[lNId],
1063 cudaMemcpyHostToDevice));
1065 if (runtimeData[netId].maxSynWt != NULL) {
1068 CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].maxSynWt[cumIdx], &managerRuntimeData.
maxSynWt[cumIdx],
1069 sizeof(
float) * managerRuntimeData.
Npre[lNId], cudaMemcpyHostToDevice));
1075 memcpy(&runtimeData[netId].wt[cumIdx], &managerRuntimeData.
wt[cumIdx],
sizeof(
float) * managerRuntimeData.
Npre[lNId]);
1077 if (runtimeData[netId].maxSynWt != NULL) {
1080 memcpy(&runtimeData[netId].maxSynWt[cumIdx], &managerRuntimeData.
maxSynWt[cumIdx],
sizeof(
float) * managerRuntimeData.
Npre[lNId]);
1089 int netId = groupConfigMDMap[gGrpId].netId;
1090 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
1093 if (groupConfigMDMap[gGrpId].groupMonitorId >= 0) {
1094 KERNEL_ERROR(
"setGroupMonitor has already been called on Group %d (%s).", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1101 groupMonCoreList[numGroupMonitor] = grpMonCoreObj;
1112 groupMonList[numGroupMonitor] = grpMonObj;
1115 groupConfigMDMap[gGrpId].groupMonitorId = numGroupMonitor;
1118 KERNEL_INFO(
"GroupMonitor set for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1135 if (connectConfigMap[connId].connectionMonitorId >= 0) {
1136 KERNEL_ERROR(
"setConnectionMonitor has already been called on Connection %d (MonitorId=%d)", connId, connectConfigMap[connId].connectionMonitorId);
1142 connectConfigMap[connId].connectionMonitorId = numConnectionMonitor;
1147 grpIdPre, grpIdPost);
1148 connMonCoreList[numConnectionMonitor] = connMonCoreObj;
1159 connMonList[numConnectionMonitor] = connMonObj;
1162 connMonCoreObj->
init();
1164 numConnectionMonitor++;
1165 KERNEL_INFO(
"ConnectionMonitor %d set for Connection %d: %d(%s) => %d(%s)", connectConfigMap[connId].connectionMonitorId, connId, grpIdPre,
getGroupName(grpIdPre).c_str(),
1176 assert(spikeGenFunc);
1177 assert(groupConfigMap[gGrpId].isSpikeGenerator);
1178 groupConfigMap[gGrpId].spikeGenFunc = spikeGenFunc;
1184 if (groupConfigMDMap[gGrpId].spikeMonitorId >= 0) {
1192 KERNEL_INFO(
"SpikeMonitor updated for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1198 spikeMonCoreList[numSpikeMonitor] = spkMonCoreObj;
1209 spikeMonList[numSpikeMonitor] = spkMonObj;
1212 groupConfigMDMap[gGrpId].spikeMonitorId = numSpikeMonitor;
1215 KERNEL_INFO(
"SpikeMonitor set for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1223 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
1224 int netId = groupConfigMDMap[gGrpId].netId;
1227 KERNEL_WARN(
"Due to limited memory space, only the first 128 neurons can be monitored by NeuronMonitor");
1231 if (groupConfigMDMap[gGrpId].neuronMonitorId >= 0) {
1239 KERNEL_INFO(
"NeuronMonitor updated for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1245 neuronMonCoreList[numNeuronMonitor] = nrnMonCoreObj;
1256 neuronMonList[numNeuronMonitor] = nrnMonObj;
1259 groupConfigMDMap[gGrpId].neuronMonitorId = numNeuronMonitor;
1262 KERNEL_INFO(
"NeuronMonitor set for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1272 int netId = groupConfigMDMap[gGrpId].netId;
1273 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
1275 assert(gGrpId >= 0 && lGrpId < networkConfigs[netId].numGroups);
1277 assert(groupConfigMap[gGrpId].isSpikeGenerator);
1278 assert(ratePtr->
getNumNeurons() == groupConfigMap[gGrpId].numN);
1279 assert(refPeriod >= 1);
1281 groupConfigMDMap[gGrpId].ratePtr = ratePtr;
1282 groupConfigMDMap[gGrpId].refractPeriod = refPeriod;
1283 spikeRateUpdated =
true;
1287 void SNN::setWeight(
short int connId,
int neurIdPre,
int neurIdPost,
float weight,
bool updateWeightRange) {
1289 assert(weight>=0.0f);
1291 assert(neurIdPre >= 0 && neurIdPre <
getGroupNumNeurons(connectConfigMap[connId].grpSrc));
1292 assert(neurIdPost >= 0 && neurIdPost <
getGroupNumNeurons(connectConfigMap[connId].grpDest));
1294 float maxWt = fabs(connectConfigMap[connId].maxWt);
1298 bool needToPrintDebug = (weight>maxWt || weight<minWt);
1300 int netId = groupConfigMDMap[connectConfigMap[connId].grpDest].netId;
1301 int postlGrpId = groupConfigMDMap[connectConfigMap[connId].grpDest].lGrpId;
1302 int prelGrpId = groupConfigMDMap[connectConfigMap[connId].grpSrc].lGrpId;
1304 fetchPreConnectionInfo(netId);
1305 fetchConnIdsLookupArray(netId);
1306 fetchSynapseState(netId);
1308 if (updateWeightRange) {
1312 maxWt = fmax(maxWt, weight);
1313 if (needToPrintDebug) {
1314 KERNEL_DEBUG(
"setWeight(%d,%d,%d,%f,%s): updated weight ranges to [%f,%f]", connId, neurIdPre, neurIdPost,
1315 weight, (updateWeightRange?
"true":
"false"), minWt, maxWt);
1320 weight = fmin(weight, maxWt);
1321 weight = fmax(weight, minWt);
1322 if (needToPrintDebug) {
1323 KERNEL_DEBUG(
"setWeight(%d,%d,%d,%f,%s): constrained weight %f to [%f,%f]", connId, neurIdPre, neurIdPost,
1324 weight, (updateWeightRange?
"true":
"false"), weight, minWt, maxWt);
1329 int neurIdPreReal = groupConfigs[netId][prelGrpId].
lStartN + neurIdPre;
1330 int neurIdPostReal = groupConfigs[netId][postlGrpId].
lStartN + neurIdPost;
1333 bool synapseFound =
false;
1334 int pos_ij = managerRuntimeData.
cumulativePre[neurIdPostReal];
1335 for (
int j = 0; j < managerRuntimeData.
Npre[neurIdPostReal]; pos_ij++, j++) {
1341 managerRuntimeData.
wt[pos_ij] =
isExcitatoryGroup(connectConfigMap[connId].grpSrc) ? weight : -1.0 * weight;
1347 CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].wt[pos_ij], &managerRuntimeData.
wt[pos_ij],
sizeof(
float), cudaMemcpyHostToDevice));
1348 if (runtimeData[netId].maxSynWt != NULL) {
1351 CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].maxSynWt[pos_ij], &managerRuntimeData.
maxSynWt[pos_ij],
sizeof(
float), cudaMemcpyHostToDevice));
1358 memcpy(&runtimeData[netId].wt[pos_ij], &managerRuntimeData.
wt[pos_ij],
sizeof(
float));
1359 if (runtimeData[netId].maxSynWt != NULL) {
1362 memcpy(&runtimeData[netId].maxSynWt[pos_ij], &managerRuntimeData.
maxSynWt[pos_ij],
sizeof(
float));
1367 synapseFound =
true;
1372 if (!synapseFound) {
1373 KERNEL_WARN(
"setWeight(%d,%d,%d,%f,%s): Synapse does not exist, not updated.", connId, neurIdPre, neurIdPost,
1374 weight, (updateWeightRange?
"true":
"false"));
1379 assert(grpId >= 0); assert(grpId < numGroups);
1383 int netId = groupConfigMDMap[grpId].netId;
1384 int lGrpId = groupConfigMDMap[grpId].lGrpId;
1394 for (
int lNId = groupConfigs[netId][lGrpId].lStartN, j = 0; lNId <= groupConfigs[netId][lGrpId].
lEndN; lNId++, j++) {
1395 managerRuntimeData.
extCurrent[lNId] = current[j];
1401 copyExternalCurrent(netId, lGrpId, &runtimeData[netId], cudaMemcpyHostToDevice,
false);
1404 copyExternalCurrent(netId, lGrpId, &runtimeData[netId],
false);
1419 if (!fwrite(&tmpInt,
sizeof(
int),1,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1423 if (!fwrite(&tmpFloat,
sizeof(
int),1,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1426 tmpFloat = ((float)simTimeSec) + ((float)simTimeMs)/1000.0f;
1427 if (!fwrite(&tmpFloat,
sizeof(
float),1,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1431 tmpFloat = executionTime/1000.0f;
1432 if (!fwrite(&tmpFloat,
sizeof(
float),1,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1437 if (!fwrite(&glbNetworkConfig.
numN,
sizeof(
int),1,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1443 if (!fwrite(&numGroups,
sizeof(
int),1,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1447 for (
int gGrpId=0;gGrpId<numGroups;gGrpId++) {
1448 if (!fwrite(&groupConfigMDMap[gGrpId].gStartN,
sizeof(
int),1,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1449 if (!fwrite(&groupConfigMDMap[gGrpId].gEndN,
sizeof(
int),1,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1451 if (!fwrite(&groupConfigMap[gGrpId].grid.numX,
sizeof(
int),1,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1452 if (!fwrite(&groupConfigMap[gGrpId].grid.numY,
sizeof(
int),1,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1453 if (!fwrite(&groupConfigMap[gGrpId].grid.numZ,
sizeof(
int),1,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1455 strncpy(name,groupConfigMap[gGrpId].grpName.c_str(),100);
1456 if (!fwrite(name,1,100,fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1459 if (!saveSynapseInfo)
return;
1464 if (!groupPartitionLists[netId].empty()) {
1468 if (!fwrite(&net_count,
sizeof(
int), 1, fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1472 if (!groupPartitionLists[netId].empty()) {
1474 fetchPreConnectionInfo(netId);
1475 fetchPostConnectionInfo(netId);
1476 fetchConnIdsLookupArray(netId);
1477 fetchSynapseState(netId);
1480 int numSynToSave = 0;
1481 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
1482 if (grpIt->netId == netId) {
1483 numSynToSave += grpIt->numPostSynapses;
1486 if (!fwrite(&numSynToSave,
sizeof(
int), 1, fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1488 int numSynSaved = 0;
1489 for (
int lNId = 0; lNId < networkConfigs[netId].
numNAssigned; lNId++) {
1493 for (
int t = 0; t < glbNetworkConfig.
maxDelay; t++) {
1501 int pre_pos = managerRuntimeData.
cumulativePre[lNIdPost] + preSynId;
1505 float weight = managerRuntimeData.
wt[pre_pos];
1506 float maxWeight = managerRuntimeData.
maxSynWt[pre_pos];
1512 int gGrpIdPre = groupConfigs[netId][lGrpIdPre].
gGrpId;
1513 int gGrpIdPost = groupConfigs[netId][lGrpIdPost].
gGrpId;
1514 int grpNIdPre = lNId - groupConfigs[netId][lGrpIdPre].
lStartN;
1515 int grpNIdPost = lNIdPost - groupConfigs[netId][lGrpIdPost].
lStartN;
1520 if (groupConfigMDMap[gGrpIdPre].netId == netId) {
1522 if (!fwrite(&gGrpIdPre,
sizeof(
int), 1, fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1523 if (!fwrite(&gGrpIdPost,
sizeof(
int), 1, fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1524 if (!fwrite(&grpNIdPre,
sizeof(
int), 1, fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1525 if (!fwrite(&grpNIdPost,
sizeof(
int), 1, fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1526 if (!fwrite(&connId,
sizeof(
int), 1, fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1527 if (!fwrite(&weight,
sizeof(
float), 1, fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1528 if (!fwrite(&maxWeight,
sizeof(
float), 1, fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1529 if (!fwrite(&delay,
sizeof(
int), 1, fid))
KERNEL_ERROR(
"saveSimulation fwrite error");
1534 assert(numSynSaved == numSynToSave);
1674 if (fpInf_!=NULL && fpInf_!=stdout && fpInf_!=stderr)
1680 if (fpErr_ != NULL && fpErr_!=stdout && fpErr_!=stderr)
1686 if (fpDeb_!=NULL && fpDeb_!=stdout && fpDeb_!=stderr)
1692 if (fpLog_!=NULL && fpLog_!=stdout && fpLog_!=stderr)
1705 short int connId = -1;
1707 for (std::map<int, ConnectConfig>::iterator it = connectConfigMap.begin(); it != connectConfigMap.end(); it++) {
1708 if (it->second.grpSrc == grpIdPre && it->second.grpDest == grpIdPost) {
1709 connId = it->second.connId;
1720 if (connectConfigMap.find(connId) == connectConfigMap.end()) {
1721 KERNEL_ERROR(
"Total Connections = %d", numConnections);
1722 KERNEL_ERROR(
"ConnectId (%d) cannot be recognized", connId);
1725 return connectConfigMap[connId];
1732 fetchConductanceAMPA(gGrpId);
1734 std::vector<float> gAMPAvec;
1735 for (
int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1736 gAMPAvec.push_back(managerRuntimeData.
gAMPA[gNId]);
1745 fetchConductanceNMDA(gGrpId);
1747 std::vector<float> gNMDAvec;
1750 for (
int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1751 gNMDAvec.push_back(managerRuntimeData.
gNMDA_d[gNId] - managerRuntimeData.
gNMDA_r[gNId]);
1754 for (
int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1755 gNMDAvec.push_back(managerRuntimeData.
gNMDA[gNId]);
1765 fetchConductanceGABAa(gGrpId);
1767 std::vector<float> gGABAaVec;
1768 for (
int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1769 gGABAaVec.push_back(managerRuntimeData.
gGABAa[gNId]);
1778 fetchConductanceGABAb(gGrpId);
1780 std::vector<float> gGABAbVec;
1783 for (
int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1784 gGABAbVec.push_back(managerRuntimeData.
gGABAb_d[gNId] - managerRuntimeData.
gGABAb_r[gNId]);
1787 for (
int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1788 gGABAbVec.push_back(managerRuntimeData.
gGABAb[gNId]);
1796 assert(connId>=0 && connId<numConnections);
1798 return RangeDelay(connectConfigMap[connId].minDelay, connectConfigMap[connId].maxDelay);
1802 uint8_t*
SNN::getDelays(
int gGrpIdPre,
int gGrpIdPost,
int& numPreN,
int& numPostN) {
1803 int netIdPost = groupConfigMDMap[gGrpIdPost].netId;
1804 int lGrpIdPost = groupConfigMDMap[gGrpIdPost].lGrpId;
1808 for (
int lGrpId = 0; lGrpId < networkConfigs[netIdPost].
numGroupsAssigned; lGrpId++)
1809 if (groupConfigs[netIdPost][lGrpId].gGrpId == gGrpIdPre) {
1813 assert(lGrpIdPre != -1);
1815 numPreN = groupConfigMap[gGrpIdPre].numN;
1816 numPostN = groupConfigMap[gGrpIdPost].numN;
1818 delays =
new uint8_t[numPreN * numPostN];
1819 memset(delays, 0, numPreN * numPostN);
1821 fetchPostConnectionInfo(netIdPost);
1823 for (
int lNIdPre = groupConfigs[netIdPost][lGrpIdPre].lStartN; lNIdPre < groupConfigs[netIdPost][lGrpIdPre].
lEndN; lNIdPre++) {
1824 unsigned int offset = managerRuntimeData.
cumulativePost[lNIdPre];
1826 for (
int t = 0; t < glbNetworkConfig.
maxDelay; t++) {
1835 assert(lNIdPost < glbNetworkConfig.
numN);
1837 if (lNIdPost >= groupConfigs[netIdPost][lGrpIdPost].lStartN && lNIdPost <= groupConfigs[netIdPost][lGrpIdPost].lEndN) {
1838 delays[(lNIdPre - groupConfigs[netIdPost][lGrpIdPre].
lStartN) + numPreN * (lNIdPost - groupConfigs[netIdPost][lGrpIdPost].lStartN)] = t + 1;
1847 assert(gGrpId >= 0 && gGrpId < numGroups);
1849 return groupConfigMap[gGrpId].grid;
1855 for (
int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
1856 if (groupConfigMap[gGrpId].grpName.compare(grpName) == 0) {
1866 assert(gGrpId >= -1 && gGrpId < numGroups);
1871 return groupConfigMap[gGrpId].grpName;
1877 gInfo.
WithSTDP = groupConfigMap[gGrpId].stdpConfig.WithSTDP;
1878 gInfo.
WithESTDP = groupConfigMap[gGrpId].stdpConfig.WithESTDP;
1879 gInfo.
WithISTDP = groupConfigMap[gGrpId].stdpConfig.WithISTDP;
1880 gInfo.
WithESTDPtype = groupConfigMap[gGrpId].stdpConfig.WithESTDPtype;
1881 gInfo.
WithISTDPtype = groupConfigMap[gGrpId].stdpConfig.WithISTDPtype;
1882 gInfo.
WithESTDPcurve = groupConfigMap[gGrpId].stdpConfig.WithESTDPcurve;
1883 gInfo.
WithISTDPcurve = groupConfigMap[gGrpId].stdpConfig.WithISTDPcurve;
1884 gInfo.
ALPHA_MINUS_EXC = groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_EXC;
1885 gInfo.
ALPHA_PLUS_EXC = groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_EXC;
1887 gInfo.
TAU_PLUS_INV_EXC = groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_EXC;
1888 gInfo.
ALPHA_MINUS_INB = groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_INB;
1889 gInfo.
ALPHA_PLUS_INB = groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_INB;
1891 gInfo.
TAU_PLUS_INV_INB = groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_INB;
1892 gInfo.
GAMMA = groupConfigMap[gGrpId].stdpConfig.GAMMA;
1893 gInfo.
BETA_LTP = groupConfigMap[gGrpId].stdpConfig.BETA_LTP;
1894 gInfo.
BETA_LTD = groupConfigMap[gGrpId].stdpConfig.BETA_LTD;
1895 gInfo.
LAMBDA = groupConfigMap[gGrpId].stdpConfig.LAMBDA;
1896 gInfo.
DELTA = groupConfigMap[gGrpId].stdpConfig.DELTA;
1904 gInfo.
baseDP = groupConfigMap[gGrpId].neuromodulatorConfig.baseDP;
1905 gInfo.
base5HT = groupConfigMap[gGrpId].neuromodulatorConfig.base5HT;
1906 gInfo.
baseACh = groupConfigMap[gGrpId].neuromodulatorConfig.baseACh;
1907 gInfo.
baseNE = groupConfigMap[gGrpId].neuromodulatorConfig.baseNE;
1908 gInfo.
decayDP = groupConfigMap[gGrpId].neuromodulatorConfig.decayDP;
1909 gInfo.
decay5HT = groupConfigMap[gGrpId].neuromodulatorConfig.decay5HT;
1910 gInfo.
decayACh = groupConfigMap[gGrpId].neuromodulatorConfig.decayACh;
1911 gInfo.
decayNE = groupConfigMap[gGrpId].neuromodulatorConfig.decayNE;
1918 assert(gNId >= 0 && gNId < glbNetworkConfig.
numN);
1921 for (std::map<int, GroupConfigMD>::iterator grpIt = groupConfigMDMap.begin(); grpIt != groupConfigMDMap.end(); grpIt++) {
1922 if (gNId >= grpIt->second.gStartN && gNId <= grpIt->second.gEndN)
1923 gGrpId = grpIt->second.gGrpId;
1927 int neurId = gNId - groupConfigMDMap[gGrpId].gStartN;
1933 Grid3D grid = groupConfigMap[gGrpId].grid;
1934 assert(gGrpId >= 0 && gGrpId < numGroups);
1937 int intX = relNeurId % grid.
numX;
1938 int intY = (relNeurId / grid.
numX) % grid.
numY;
1939 int intZ = relNeurId / (grid.
numX * grid.
numY);
1945 return Point3D(coordX, coordY, coordZ);
1951 if (connectConfigMap.find(connId) == connectConfigMap.end()) {
1952 KERNEL_ERROR(
"Connection ID was not found. Quitting.");
1956 return connectConfigMap[connId].numberOfConnections;
1963 if (groupConfigMDMap[gGrpId].spikeMonitorId >= 0) {
1964 return spikeMonList[(groupConfigMDMap[gGrpId].spikeMonitorId)];
1973 if (groupConfigMDMap[gGrpId].spikeMonitorId >= 0) {
1974 return spikeMonCoreList[(groupConfigMDMap[gGrpId].spikeMonitorId)];
1984 if (groupConfigMDMap[gGrpId].neuronMonitorId >= 0) {
1985 return neuronMonList[(groupConfigMDMap[gGrpId].neuronMonitorId)];
1995 if (groupConfigMDMap[gGrpId].neuronMonitorId >= 0) {
1996 return neuronMonCoreList[(groupConfigMDMap[gGrpId].neuronMonitorId)];
2004 assert(connId>=0 && connId<numConnections);
2006 return RangeWeight(0.0f, connectConfigMap[connId].initWt, connectConfigMap[connId].maxWt);
2015 void SNN::SNNinit() {
2020 switch (loggerMode_) {
2024 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2025 fpDeb_ = fopen(
"nul",
"w");
2027 fpDeb_ = fopen(
"/dev/null",
"w");
2036 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2037 fpInf_ = fopen(
"nul",
"w");
2039 fpInf_ = fopen(
"/dev/null",
"w");
2042 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2043 fpDeb_ = fopen(
"nul",
"w");
2045 fpDeb_ = fopen(
"/dev/null",
"w");
2050 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2051 fpInf_ = fopen(
"nul",
"w");
2052 fpErr_ = fopen(
"nul",
"w");
2053 fpDeb_ = fopen(
"nul",
"w");
2055 fpInf_ = fopen(
"/dev/null",
"w");
2056 fpErr_ = fopen(
"/dev/null",
"w");
2057 fpDeb_ = fopen(
"/dev/null",
"w");
2068 #if defined(WIN32) || defined(WIN64)
2069 CreateDirectory(
"results", NULL);
2070 fpLog_ = fopen(
"results/carlsim.log",
"w");
2074 if (stat(
"results", &sb) == -1 || !S_ISDIR(sb.st_mode)) {
2076 createDir = mkdir(
"results", 0777);
2079 if (createDir == -1) {
2081 fprintf(stderr,
"Could not create directory \"results/\", which is required to "
2082 "store simulation results. Aborting simulation...\n");
2086 fpLog_ = fopen(
"results/carlsim.log",
"w");
2088 if (createDir == 0) {
2090 KERNEL_INFO(
"Created results directory \"results/\".");
2094 if (fpLog_ == NULL) {
2095 fprintf(stderr,
"Could not create the directory \"results/\" or the log file \"results/carlsim.log\""
2096 ", which is required to store simulation results. Aborting simulation...\n");
2100 KERNEL_INFO(
"*********************************************************************************");
2101 KERNEL_INFO(
"******************** Welcome to CARLsim %d.%d ***************************",
2103 KERNEL_INFO(
"*********************************************************************************\n");
2105 KERNEL_INFO(
"***************************** Configuring Network ********************************");
2106 KERNEL_INFO(
"Starting CARLsim simulation \"%s\" in %s mode",networkName_.c_str(),
2111 struct tm * timeinfo;
2113 timeinfo = localtime(&rawtime);
2114 KERNEL_DEBUG(
"Current local time and date: %s", asctime(timeinfo));
2119 simTimeRunStart = 0; simTimeRunStop = 0;
2120 simTimeLastRunSummary = 0;
2121 simTimeMs = 0; simTimeSec = 0; simTime = 0;
2125 numCompartmentConnections = 0;
2126 numSpikeGenGrps = 0;
2127 simulatorDeleted =
false;
2129 cumExecutionTime = 0.0;
2130 executionTime = 0.0;
2132 spikeRateUpdated =
false;
2133 numSpikeMonitor = 0;
2134 numNeuronMonitor = 0;
2135 numGroupMonitor = 0;
2136 numConnectionMonitor = 0;
2138 sim_with_compartments =
false;
2139 sim_with_fixedwts =
true;
2140 sim_with_conductances =
false;
2141 sim_with_stdp =
false;
2142 sim_with_modulated_stdp =
false;
2143 sim_with_homeostasis =
false;
2144 sim_with_stp =
false;
2145 sim_in_testing =
false;
2150 sim_with_NMDA_rise =
false;
2151 sim_with_GABAb_rise =
false;
2152 dAMPA = 1.0-1.0/5.0;
2153 rNMDA = 1.0-1.0/10.0;
2154 dNMDA = 1.0-1.0/150.0;
2156 dGABAa = 1.0-1.0/6.0;
2157 rGABAb = 1.0-1.0/100.0;
2158 dGABAb = 1.0-1.0/150.0;
2168 resetMonitors(
false);
2170 resetGroupConfigs(
false);
2172 resetConnectionConfigs(
false);
2183 runtimeData[netId].allocated =
false;
2186 memset(&managerRuntimeData, 0,
sizeof(
RuntimeData));
2190 wtANDwtChangeUpdateInterval_ = 1000;
2191 wtANDwtChangeUpdateIntervalCnt_ = 0;
2192 stdpScaleFactor_ = 1.0f;
2193 wtChangeDecay_ = 0.0f;
2197 CUDA_CREATE_TIMER(timer);
2198 CUDA_RESET_TIMER(timer);
2202 void SNN::advSimStep() {
2203 doSTPUpdateAndDecayCond();
2207 spikeGeneratorUpdate();
2215 updateTimingTable();
2223 globalStateUpdate();
2227 clearExtFiringTable();
2230 void SNN::doSTPUpdateAndDecayCond() {
2231 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2232 pthread_t threads[numCores + 1];
2235 int threadCount = 0;
2239 if (!groupPartitionLists[netId].empty()) {
2240 assert(runtimeData[netId].allocated);
2242 doSTPUpdateAndDecayCond_GPU(netId);
2244 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2245 doSTPUpdateAndDecayCond_CPU(netId);
2246 #else // Linux or MAC
2247 pthread_attr_t attr;
2248 pthread_attr_init(&attr);
2251 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
2253 argsThreadRoutine[threadCount].
snn_pointer =
this;
2254 argsThreadRoutine[threadCount].
netId = netId;
2255 argsThreadRoutine[threadCount].
lGrpId = 0;
2256 argsThreadRoutine[threadCount].
startIdx = 0;
2257 argsThreadRoutine[threadCount].
endIdx = 0;
2258 argsThreadRoutine[threadCount].
GtoLOffset = 0;
2260 pthread_create(&threads[threadCount], &attr, &SNN::helperDoSTPUpdateAndDecayCond_CPU, (
void*)&argsThreadRoutine[threadCount]);
2261 pthread_attr_destroy(&attr);
2268 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2270 for (
int i=0; i<threadCount; i++){
2271 pthread_join(threads[i], NULL);
2276 void SNN::spikeGeneratorUpdate() {
2278 if (spikeRateUpdated) {
2279 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2280 pthread_t threads[numCores + 1];
2283 int threadCount = 0;
2287 if (!groupPartitionLists[netId].empty()) {
2289 assignPoissonFiringRate_GPU(netId);
2291 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2292 assignPoissonFiringRate_CPU(netId);
2293 #else // Linux or MAC
2294 pthread_attr_t attr;
2295 pthread_attr_init(&attr);
2298 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
2300 argsThreadRoutine[threadCount].
snn_pointer =
this;
2301 argsThreadRoutine[threadCount].
netId = netId;
2302 argsThreadRoutine[threadCount].
lGrpId = 0;
2303 argsThreadRoutine[threadCount].
startIdx = 0;
2304 argsThreadRoutine[threadCount].
endIdx = 0;
2305 argsThreadRoutine[threadCount].
GtoLOffset = 0;
2307 pthread_create(&threads[threadCount], &attr, &SNN::helperAssignPoissonFiringRate_CPU, (
void*)&argsThreadRoutine[threadCount]);
2308 pthread_attr_destroy(&attr);
2315 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2317 for (
int i=0; i<threadCount; i++){
2318 pthread_join(threads[i], NULL);
2322 spikeRateUpdated =
false;
2326 generateUserDefinedSpikes();
2328 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2329 pthread_t threads[numCores + 1];
2332 int threadCount = 0;
2336 if (!groupPartitionLists[netId].empty()) {
2338 spikeGeneratorUpdate_GPU(netId);
2340 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2341 spikeGeneratorUpdate_CPU(netId);
2342 #else // Linux or MAC
2343 pthread_attr_t attr;
2344 pthread_attr_init(&attr);
2347 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
2349 argsThreadRoutine[threadCount].
snn_pointer =
this;
2350 argsThreadRoutine[threadCount].
netId = netId;
2351 argsThreadRoutine[threadCount].
lGrpId = 0;
2352 argsThreadRoutine[threadCount].
startIdx = 0;
2353 argsThreadRoutine[threadCount].
endIdx = 0;
2354 argsThreadRoutine[threadCount].
GtoLOffset = 0;
2356 pthread_create(&threads[threadCount], &attr, &SNN::helperSpikeGeneratorUpdate_CPU, (
void*)&argsThreadRoutine[threadCount]);
2357 pthread_attr_destroy(&attr);
2364 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2366 for (
int i=0; i<threadCount; i++){
2367 pthread_join(threads[i], NULL);
2375 void SNN::findFiring() {
2376 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2377 pthread_t threads[numCores + 1];
2380 int threadCount = 0;
2384 if (!groupPartitionLists[netId].empty()) {
2386 findFiring_GPU(netId);
2388 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2389 findFiring_CPU(netId);
2390 #else // Linux or MAC
2391 pthread_attr_t attr;
2392 pthread_attr_init(&attr);
2395 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
2397 argsThreadRoutine[threadCount].
snn_pointer =
this;
2398 argsThreadRoutine[threadCount].
netId = netId;
2399 argsThreadRoutine[threadCount].
lGrpId = 0;
2400 argsThreadRoutine[threadCount].
startIdx = 0;
2401 argsThreadRoutine[threadCount].
endIdx = 0;
2402 argsThreadRoutine[threadCount].
GtoLOffset = 0;
2404 pthread_create(&threads[threadCount], &attr, &SNN::helperFindFiring_CPU, (
void*)&argsThreadRoutine[threadCount]);
2405 pthread_attr_destroy(&attr);
2412 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2414 for (
int i=0; i<threadCount; i++){
2415 pthread_join(threads[i], NULL);
2420 void SNN::doCurrentUpdate() {
2421 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2422 pthread_t threads[numCores + 1];
2425 int threadCount = 0;
2429 if (!groupPartitionLists[netId].empty()) {
2431 doCurrentUpdateD2_GPU(netId);
2433 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2434 doCurrentUpdateD2_CPU(netId);
2435 #else // Linux or MAC
2436 pthread_attr_t attr;
2437 pthread_attr_init(&attr);
2440 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
2442 argsThreadRoutine[threadCount].
snn_pointer =
this;
2443 argsThreadRoutine[threadCount].
netId = netId;
2444 argsThreadRoutine[threadCount].
lGrpId = 0;
2445 argsThreadRoutine[threadCount].
startIdx = 0;
2446 argsThreadRoutine[threadCount].
endIdx = 0;
2447 argsThreadRoutine[threadCount].
GtoLOffset = 0;
2449 pthread_create(&threads[threadCount], &attr, &SNN::helperDoCurrentUpdateD2_CPU, (
void*)&argsThreadRoutine[threadCount]);
2450 pthread_attr_destroy(&attr);
2457 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2459 for (
int i=0; i<threadCount; i++){
2460 pthread_join(threads[i], NULL);
2466 if (!groupPartitionLists[netId].empty()) {
2468 doCurrentUpdateD1_GPU(netId);
2470 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2471 doCurrentUpdateD1_CPU(netId);
2472 #else // Linux or MAC
2473 pthread_attr_t attr;
2474 pthread_attr_init(&attr);
2477 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
2479 argsThreadRoutine[threadCount].
snn_pointer =
this;
2480 argsThreadRoutine[threadCount].
netId = netId;
2481 argsThreadRoutine[threadCount].
lGrpId = 0;
2482 argsThreadRoutine[threadCount].
startIdx = 0;
2483 argsThreadRoutine[threadCount].
endIdx = 0;
2484 argsThreadRoutine[threadCount].
GtoLOffset = 0;
2486 pthread_create(&threads[threadCount], &attr, &SNN::helperDoCurrentUpdateD1_CPU, (
void*)&argsThreadRoutine[threadCount]);
2487 pthread_attr_destroy(&attr);
2494 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2496 for (
int i=0; i<threadCount; i++){
2497 pthread_join(threads[i], NULL);
2502 void SNN::updateTimingTable() {
2503 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2504 pthread_t threads[numCores + 1];
2507 int threadCount = 0;
2511 if (!groupPartitionLists[netId].empty()) {
2513 updateTimingTable_GPU(netId);
2515 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2516 updateTimingTable_CPU(netId);
2517 #else // Linux or MAC
2518 pthread_attr_t attr;
2519 pthread_attr_init(&attr);
2522 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
2524 argsThreadRoutine[threadCount].
snn_pointer =
this;
2525 argsThreadRoutine[threadCount].
netId = netId;
2526 argsThreadRoutine[threadCount].
lGrpId = 0;
2527 argsThreadRoutine[threadCount].
startIdx = 0;
2528 argsThreadRoutine[threadCount].
endIdx = 0;
2529 argsThreadRoutine[threadCount].
GtoLOffset = 0;
2531 pthread_create(&threads[threadCount], &attr, &SNN::helperUpdateTimingTable_CPU, (
void*)&argsThreadRoutine[threadCount]);
2532 pthread_attr_destroy(&attr);
2538 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2540 for (
int i=0; i<threadCount; i++){
2541 pthread_join(threads[i], NULL);
2546 void SNN::globalStateUpdate() {
2547 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2548 pthread_t threads[numCores + 1];
2551 int threadCount = 0;
2555 if (!groupPartitionLists[netId].empty()) {
2557 globalStateUpdate_C_GPU(netId);
2559 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2560 globalStateUpdate_CPU(netId);
2561 #else // Linux or MAC
2562 pthread_attr_t attr;
2563 pthread_attr_init(&attr);
2566 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
2568 argsThreadRoutine[threadCount].
snn_pointer =
this;
2569 argsThreadRoutine[threadCount].
netId = netId;
2570 argsThreadRoutine[threadCount].
lGrpId = 0;
2571 argsThreadRoutine[threadCount].
startIdx = 0;
2572 argsThreadRoutine[threadCount].
endIdx = 0;
2573 argsThreadRoutine[threadCount].
GtoLOffset = 0;
2575 pthread_create(&threads[threadCount], &attr, &SNN::helperGlobalStateUpdate_CPU, (
void*)&argsThreadRoutine[threadCount]);
2576 pthread_attr_destroy(&attr);
2583 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2585 for (
int i=0; i<threadCount; i++){
2586 pthread_join(threads[i], NULL);
2591 if (!groupPartitionLists[netId].empty()) {
2593 globalStateUpdate_N_GPU(netId);
2598 if (!groupPartitionLists[netId].empty()) {
2600 globalStateUpdate_G_GPU(netId);
2605 void SNN::clearExtFiringTable() {
2606 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2607 pthread_t threads[numCores + 1];
2610 int threadCount = 0;
2614 if (!groupPartitionLists[netId].empty()) {
2616 clearExtFiringTable_GPU(netId);
2618 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2619 clearExtFiringTable_CPU(netId);
2620 #else // Linux or MAC
2621 pthread_attr_t attr;
2622 pthread_attr_init(&attr);
2625 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
2627 argsThreadRoutine[threadCount].
snn_pointer =
this;
2628 argsThreadRoutine[threadCount].
netId = netId;
2629 argsThreadRoutine[threadCount].
lGrpId = 0;
2630 argsThreadRoutine[threadCount].
startIdx = 0;
2631 argsThreadRoutine[threadCount].
endIdx = 0;
2632 argsThreadRoutine[threadCount].
GtoLOffset = 0;
2634 pthread_create(&threads[threadCount], &attr, &SNN::helperClearExtFiringTable_CPU, (
void*)&argsThreadRoutine[threadCount]);
2635 pthread_attr_destroy(&attr);
2642 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2644 for (
int i=0; i<threadCount; i++){
2645 pthread_join(threads[i], NULL);
2650 void SNN::updateWeights() {
2651 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2652 pthread_t threads[numCores + 1];
2655 int threadCount = 0;
2659 if (!groupPartitionLists[netId].empty()) {
2661 updateWeights_GPU(netId);
2663 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2664 updateWeights_CPU(netId);
2665 #else // Linux or MAC
2666 pthread_attr_t attr;
2667 pthread_attr_init(&attr);
2670 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
2672 argsThreadRoutine[threadCount].
snn_pointer =
this;
2673 argsThreadRoutine[threadCount].
netId = netId;
2674 argsThreadRoutine[threadCount].
lGrpId = 0;
2675 argsThreadRoutine[threadCount].
startIdx = 0;
2676 argsThreadRoutine[threadCount].
endIdx = 0;
2677 argsThreadRoutine[threadCount].
GtoLOffset = 0;
2679 pthread_create(&threads[threadCount], &attr, &SNN::helperUpdateWeights_CPU, (
void*)&argsThreadRoutine[threadCount]);
2680 pthread_attr_destroy(&attr);
2686 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2688 for (
int i=0; i<threadCount; i++){
2689 pthread_join(threads[i], NULL);
2695 void SNN::updateNetworkConfig(
int netId) {
2699 copyNetworkConfig(netId, cudaMemcpyHostToDevice);
2701 copyNetworkConfig(netId);
2704 void SNN::shiftSpikeTables() {
2705 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2706 pthread_t threads[numCores + 1];
2709 int threadCount = 0;
2713 if (!groupPartitionLists[netId].empty()) {
2715 shiftSpikeTables_F_GPU(netId);
2717 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2718 shiftSpikeTables_CPU(netId);
2719 #else // Linux or MAC
2720 pthread_attr_t attr;
2721 pthread_attr_init(&attr);
2724 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
2726 argsThreadRoutine[threadCount].
snn_pointer =
this;
2727 argsThreadRoutine[threadCount].
netId = netId;
2728 argsThreadRoutine[threadCount].
lGrpId = 0;
2729 argsThreadRoutine[threadCount].
startIdx = 0;
2730 argsThreadRoutine[threadCount].
endIdx = 0;
2731 argsThreadRoutine[threadCount].
GtoLOffset = 0;
2733 pthread_create(&threads[threadCount], &attr, &SNN::helperShiftSpikeTables_CPU, (
void*)&argsThreadRoutine[threadCount]);
2734 pthread_attr_destroy(&attr);
2741 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2743 for (
int i=0; i<threadCount; i++){
2744 pthread_join(threads[i], NULL);
2749 if (!groupPartitionLists[netId].empty()) {
2751 shiftSpikeTables_T_GPU(netId);
2756 void SNN::allocateSNN(
int netId) {
2760 allocateSNN_GPU(netId);
2762 allocateSNN_CPU(netId);
2765 void SNN::allocateManagerRuntimeData() {
2778 managerRuntimeData.
voltage =
new float[managerRTDSize.maxNumNReg];
2779 managerRuntimeData.
nextVoltage =
new float[managerRTDSize.maxNumNReg];
2780 managerRuntimeData.
recovery =
new float[managerRTDSize.maxNumNReg];
2781 managerRuntimeData.
Izh_a =
new float[managerRTDSize.maxNumNReg];
2782 managerRuntimeData.
Izh_b =
new float[managerRTDSize.maxNumNReg];
2783 managerRuntimeData.
Izh_c =
new float[managerRTDSize.maxNumNReg];
2784 managerRuntimeData.
Izh_d =
new float[managerRTDSize.maxNumNReg];
2785 managerRuntimeData.
Izh_C =
new float[managerRTDSize.maxNumNReg];
2786 managerRuntimeData.
Izh_k =
new float[managerRTDSize.maxNumNReg];
2787 managerRuntimeData.
Izh_vr =
new float[managerRTDSize.maxNumNReg];
2788 managerRuntimeData.
Izh_vt =
new float[managerRTDSize.maxNumNReg];
2789 managerRuntimeData.
Izh_vpeak =
new float[managerRTDSize.maxNumNReg];
2790 managerRuntimeData.
lif_tau_m =
new int[managerRTDSize.maxNumNReg];
2791 managerRuntimeData.
lif_tau_ref =
new int[managerRTDSize.maxNumNReg];
2792 managerRuntimeData.
lif_tau_ref_c =
new int[managerRTDSize.maxNumNReg];
2793 managerRuntimeData.
lif_vTh =
new float[managerRTDSize.maxNumNReg];
2794 managerRuntimeData.
lif_vReset =
new float[managerRTDSize.maxNumNReg];
2795 managerRuntimeData.
lif_gain =
new float[managerRTDSize.maxNumNReg];
2796 managerRuntimeData.
lif_bias =
new float[managerRTDSize.maxNumNReg];
2797 managerRuntimeData.
current =
new float[managerRTDSize.maxNumNReg];
2798 managerRuntimeData.
extCurrent =
new float[managerRTDSize.maxNumNReg];
2799 managerRuntimeData.
totalCurrent =
new float[managerRTDSize.maxNumNReg];
2800 managerRuntimeData.
curSpike =
new bool[managerRTDSize.maxNumNReg];
2801 memset(managerRuntimeData.
voltage, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2802 memset(managerRuntimeData.
nextVoltage, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2803 memset(managerRuntimeData.
recovery, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2804 memset(managerRuntimeData.
Izh_a, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2805 memset(managerRuntimeData.
Izh_b, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2806 memset(managerRuntimeData.
Izh_c, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2807 memset(managerRuntimeData.
Izh_d, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2808 memset(managerRuntimeData.
Izh_C, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2809 memset(managerRuntimeData.
Izh_k, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2810 memset(managerRuntimeData.
Izh_vr, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2811 memset(managerRuntimeData.
Izh_vt, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2812 memset(managerRuntimeData.
Izh_vpeak, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2813 memset(managerRuntimeData.
lif_tau_m, 0,
sizeof(
int) * managerRTDSize.maxNumNReg);
2814 memset(managerRuntimeData.
lif_tau_ref, 0,
sizeof(
int) * managerRTDSize.maxNumNReg);
2815 memset(managerRuntimeData.
lif_tau_ref_c, 0,
sizeof(
int) * managerRTDSize.maxNumNReg);
2816 memset(managerRuntimeData.
lif_vTh, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2817 memset(managerRuntimeData.
lif_vReset, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2818 memset(managerRuntimeData.
lif_gain, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2819 memset(managerRuntimeData.
lif_bias, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2820 memset(managerRuntimeData.
current, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2821 memset(managerRuntimeData.
extCurrent, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2822 memset(managerRuntimeData.
totalCurrent, 0,
sizeof(
float) * managerRTDSize.maxNumNReg);
2823 memset(managerRuntimeData.
curSpike, 0,
sizeof(
bool) * managerRTDSize.maxNumNReg);
2832 managerRuntimeData.
gAMPA =
new float[managerRTDSize.glbNumNReg];
2833 managerRuntimeData.
gNMDA_r =
new float[managerRTDSize.glbNumNReg];
2834 managerRuntimeData.
gNMDA_d =
new float[managerRTDSize.glbNumNReg];
2835 managerRuntimeData.
gNMDA =
new float[managerRTDSize.glbNumNReg];
2836 memset(managerRuntimeData.
gAMPA, 0,
sizeof(
float) * managerRTDSize.glbNumNReg);
2837 memset(managerRuntimeData.
gNMDA_r, 0,
sizeof(
float) * managerRTDSize.glbNumNReg);
2838 memset(managerRuntimeData.
gNMDA_d, 0,
sizeof(
float) * managerRTDSize.glbNumNReg);
2839 memset(managerRuntimeData.
gNMDA, 0,
sizeof(
float) * managerRTDSize.glbNumNReg);
2841 managerRuntimeData.
gGABAa =
new float[managerRTDSize.glbNumNReg];
2842 managerRuntimeData.
gGABAb_r =
new float[managerRTDSize.glbNumNReg];
2843 managerRuntimeData.
gGABAb_d =
new float[managerRTDSize.glbNumNReg];
2844 managerRuntimeData.
gGABAb =
new float[managerRTDSize.glbNumNReg];
2845 memset(managerRuntimeData.
gGABAa, 0,
sizeof(
float) * managerRTDSize.glbNumNReg);
2846 memset(managerRuntimeData.
gGABAb_r, 0,
sizeof(
float) * managerRTDSize.glbNumNReg);
2847 memset(managerRuntimeData.
gGABAb_d, 0,
sizeof(
float) * managerRTDSize.glbNumNReg);
2848 memset(managerRuntimeData.
gGABAb, 0,
sizeof(
float) * managerRTDSize.glbNumNReg);
2851 managerRuntimeData.
grpDA =
new float[managerRTDSize.maxNumGroups];
2852 managerRuntimeData.
grp5HT =
new float[managerRTDSize.maxNumGroups];
2853 managerRuntimeData.
grpACh =
new float[managerRTDSize.maxNumGroups];
2854 managerRuntimeData.
grpNE =
new float[managerRTDSize.maxNumGroups];
2855 memset(managerRuntimeData.
grpDA, 0,
sizeof(
float) * managerRTDSize.maxNumGroups);
2856 memset(managerRuntimeData.
grp5HT, 0,
sizeof(
float) * managerRTDSize.maxNumGroups);
2857 memset(managerRuntimeData.
grpACh, 0,
sizeof(
float) * managerRTDSize.maxNumGroups);
2858 memset(managerRuntimeData.
grpNE, 0,
sizeof(
float) * managerRTDSize.maxNumGroups);
2861 managerRuntimeData.
grpDABuffer =
new float[managerRTDSize.maxNumGroups * 1000];
2862 managerRuntimeData.
grp5HTBuffer =
new float[managerRTDSize.maxNumGroups * 1000];
2863 managerRuntimeData.
grpAChBuffer =
new float[managerRTDSize.maxNumGroups * 1000];
2864 managerRuntimeData.
grpNEBuffer =
new float[managerRTDSize.maxNumGroups * 1000];
2865 memset(managerRuntimeData.
grpDABuffer, 0, managerRTDSize.maxNumGroups *
sizeof(
float) * 1000);
2866 memset(managerRuntimeData.
grp5HTBuffer, 0, managerRTDSize.maxNumGroups *
sizeof(
float) * 1000);
2867 memset(managerRuntimeData.
grpAChBuffer, 0, managerRTDSize.maxNumGroups *
sizeof(
float) * 1000);
2868 memset(managerRuntimeData.
grpNEBuffer, 0, managerRTDSize.maxNumGroups *
sizeof(
float) * 1000);
2870 managerRuntimeData.
lastSpikeTime =
new int[managerRTDSize.maxNumNAssigned];
2871 memset(managerRuntimeData.
lastSpikeTime, 0,
sizeof(
int) * managerRTDSize.maxNumNAssigned);
2873 managerRuntimeData.
nSpikeCnt =
new int[managerRTDSize.glbNumN];
2874 memset(managerRuntimeData.
nSpikeCnt, 0,
sizeof(
int) * managerRTDSize.glbNumN);
2877 managerRuntimeData.
avgFiring =
new float[managerRTDSize.maxNumN];
2878 managerRuntimeData.
baseFiring =
new float[managerRTDSize.maxNumN];
2879 memset(managerRuntimeData.
avgFiring, 0,
sizeof(
float) * managerRTDSize.maxNumN);
2880 memset(managerRuntimeData.
baseFiring, 0,
sizeof(
float) * managerRTDSize.maxNumN);
2885 managerRuntimeData.
stpu =
new float[managerRTDSize.maxNumN * (glbNetworkConfig.
maxDelay + 1)];
2886 managerRuntimeData.
stpx =
new float[managerRTDSize.maxNumN * (glbNetworkConfig.
maxDelay + 1)];
2887 memset(managerRuntimeData.
stpu, 0,
sizeof(
float) * managerRTDSize.maxNumN * (glbNetworkConfig.
maxDelay + 1));
2888 memset(managerRuntimeData.
stpx, 0,
sizeof(
float) * managerRTDSize.maxNumN * (glbNetworkConfig.
maxDelay + 1));
2890 managerRuntimeData.
Npre =
new unsigned short[managerRTDSize.maxNumNAssigned];
2891 managerRuntimeData.
Npre_plastic =
new unsigned short[managerRTDSize.maxNumNAssigned];
2892 managerRuntimeData.
Npost =
new unsigned short[managerRTDSize.maxNumNAssigned];
2893 managerRuntimeData.
cumulativePost =
new unsigned int[managerRTDSize.maxNumNAssigned];
2894 managerRuntimeData.
cumulativePre =
new unsigned int[managerRTDSize.maxNumNAssigned];
2895 memset(managerRuntimeData.
Npre, 0,
sizeof(
short) * managerRTDSize.maxNumNAssigned);
2896 memset(managerRuntimeData.
Npre_plastic, 0,
sizeof(
short) * managerRTDSize.maxNumNAssigned);
2897 memset(managerRuntimeData.
Npost, 0,
sizeof(
short) * managerRTDSize.maxNumNAssigned);
2898 memset(managerRuntimeData.
cumulativePost, 0,
sizeof(
int) * managerRTDSize.maxNumNAssigned);
2899 memset(managerRuntimeData.
cumulativePre, 0,
sizeof(
int) * managerRTDSize.maxNumNAssigned);
2909 managerRuntimeData.
wt =
new float[managerRTDSize.maxNumPreSynNet];
2910 managerRuntimeData.
wtChange =
new float[managerRTDSize.maxNumPreSynNet];
2911 managerRuntimeData.
maxSynWt =
new float[managerRTDSize.maxNumPreSynNet];
2912 managerRuntimeData.
synSpikeTime =
new int[managerRTDSize.maxNumPreSynNet];
2913 memset(managerRuntimeData.
wt, 0,
sizeof(
float) * managerRTDSize.maxNumPreSynNet);
2914 memset(managerRuntimeData.
wtChange, 0,
sizeof(
float) * managerRTDSize.maxNumPreSynNet);
2915 memset(managerRuntimeData.
maxSynWt, 0,
sizeof(
float) * managerRTDSize.maxNumPreSynNet);
2916 memset(managerRuntimeData.
synSpikeTime, 0,
sizeof(
int) * managerRTDSize.maxNumPreSynNet);
2918 mulSynFast =
new float[managerRTDSize.maxNumConnections];
2919 mulSynSlow =
new float[managerRTDSize.maxNumConnections];
2920 memset(mulSynFast, 0,
sizeof(
float) * managerRTDSize.maxNumConnections);
2921 memset(mulSynSlow, 0,
sizeof(
float) * managerRTDSize.maxNumConnections);
2923 managerRuntimeData.
connIdsPreIdx =
new short int[managerRTDSize.maxNumPreSynNet];
2924 memset(managerRuntimeData.
connIdsPreIdx, 0,
sizeof(
short int) * managerRTDSize.maxNumPreSynNet);
2926 managerRuntimeData.
grpIds =
new short int[managerRTDSize.maxNumNAssigned];
2927 memset(managerRuntimeData.
grpIds, 0,
sizeof(
short int) * managerRTDSize.maxNumNAssigned);
2929 managerRuntimeData.
spikeGenBits =
new unsigned int[managerRTDSize.maxNumNSpikeGen / 32 + 1];
2936 int SNN::assignGroup(
int gGrpId,
int availableNeuronId) {
2937 int newAvailableNeuronId;
2938 assert(groupConfigMDMap[gGrpId].gStartN == -1);
2939 groupConfigMDMap[gGrpId].gStartN = availableNeuronId;
2940 groupConfigMDMap[gGrpId].gEndN = availableNeuronId + groupConfigMap[gGrpId].numN - 1;
2943 gGrpId, groupConfigMap[gGrpId].grpName.c_str(), groupConfigMDMap[gGrpId].gStartN, groupConfigMDMap[gGrpId].gEndN);
2945 newAvailableNeuronId = availableNeuronId + groupConfigMap[gGrpId].numN;
2948 return newAvailableNeuronId;
2951 int SNN::assignGroup(std::list<GroupConfigMD>::iterator grpIt,
int localGroupId,
int availableNeuronId) {
2952 int newAvailableNeuronId;
2953 assert(grpIt->lGrpId == -1);
2954 grpIt->lGrpId = localGroupId;
2955 grpIt->lStartN = availableNeuronId;
2956 grpIt->lEndN = availableNeuronId + groupConfigMap[grpIt->gGrpId].numN - 1;
2958 grpIt->LtoGOffset = grpIt->gStartN - grpIt->lStartN;
2959 grpIt->GtoLOffset = grpIt->lStartN - grpIt->gStartN;
2961 KERNEL_DEBUG(
"Allocation for group (%s) [id:%d, local id:%d], St=%d, End=%d", groupConfigMap[grpIt->gGrpId].grpName.c_str(),
2962 grpIt->gGrpId, grpIt->lGrpId, grpIt->lStartN, grpIt->lEndN);
2964 newAvailableNeuronId = availableNeuronId + groupConfigMap[grpIt->gGrpId].numN;
2966 return newAvailableNeuronId;
2969 void SNN::generateGroupRuntime(
int netId,
int lGrpId) {
2970 resetNeuromodulator(netId, lGrpId);
2972 for(
int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].
lEndN; lNId++)
2973 resetNeuron(netId, lGrpId, lNId);
2976 void SNN::generateRuntimeGroupConfigs() {
2978 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
2980 int gGrpId = grpIt->gGrpId;
2981 int lGrpId = grpIt->lGrpId;
2985 groupConfigs[netId][lGrpId].
netId = grpIt->netId;
2986 groupConfigs[netId][lGrpId].
gGrpId = grpIt->gGrpId;
2987 groupConfigs[netId][lGrpId].
gStartN = grpIt->gStartN;
2988 groupConfigs[netId][lGrpId].
gEndN = grpIt->gEndN;
2989 groupConfigs[netId][lGrpId].
lGrpId = grpIt->lGrpId;
2990 groupConfigs[netId][lGrpId].
lStartN = grpIt->lStartN;
2991 groupConfigs[netId][lGrpId].
lEndN = grpIt->lEndN;
2992 groupConfigs[netId][lGrpId].
LtoGOffset = grpIt->LtoGOffset;
2993 groupConfigs[netId][lGrpId].
GtoLOffset = grpIt->GtoLOffset;
2994 groupConfigs[netId][lGrpId].
Type = groupConfigMap[gGrpId].type;
2995 groupConfigs[netId][lGrpId].
numN = groupConfigMap[gGrpId].numN;
2997 groupConfigs[netId][lGrpId].
numPreSynapses = grpIt->numPreSynapses;
2998 groupConfigs[netId][lGrpId].
isSpikeGenerator = groupConfigMap[gGrpId].isSpikeGenerator;
2999 groupConfigs[netId][lGrpId].
isSpikeGenFunc = groupConfigMap[gGrpId].spikeGenFunc != NULL ? true :
false;
3000 groupConfigs[netId][lGrpId].
WithSTP = groupConfigMap[gGrpId].stpConfig.WithSTP;
3001 groupConfigs[netId][lGrpId].
WithSTDP = groupConfigMap[gGrpId].stdpConfig.WithSTDP;
3002 groupConfigs[netId][lGrpId].
WithESTDP = groupConfigMap[gGrpId].stdpConfig.WithESTDP;
3003 groupConfigs[netId][lGrpId].
WithISTDP = groupConfigMap[gGrpId].stdpConfig.WithISTDP;
3004 groupConfigs[netId][lGrpId].
WithESTDPtype = groupConfigMap[gGrpId].stdpConfig.WithESTDPtype;
3005 groupConfigs[netId][lGrpId].
WithISTDPtype = groupConfigMap[gGrpId].stdpConfig.WithISTDPtype;
3006 groupConfigs[netId][lGrpId].
WithESTDPcurve = groupConfigMap[gGrpId].stdpConfig.WithESTDPcurve;
3007 groupConfigs[netId][lGrpId].
WithISTDPcurve = groupConfigMap[gGrpId].stdpConfig.WithISTDPcurve;
3008 groupConfigs[netId][lGrpId].
WithHomeostasis = groupConfigMap[gGrpId].homeoConfig.WithHomeostasis;
3009 groupConfigs[netId][lGrpId].
FixedInputWts = grpIt->fixedInputWts;
3011 groupConfigs[netId][lGrpId].
Noffset = grpIt->Noffset;
3012 groupConfigs[netId][lGrpId].
MaxDelay = grpIt->maxOutgoingDelay;
3013 groupConfigs[netId][lGrpId].
STP_A = groupConfigMap[gGrpId].stpConfig.STP_A;
3014 groupConfigs[netId][lGrpId].
STP_U = groupConfigMap[gGrpId].stpConfig.STP_U;
3015 groupConfigs[netId][lGrpId].
STP_tau_u_inv = groupConfigMap[gGrpId].stpConfig.STP_tau_u_inv;
3016 groupConfigs[netId][lGrpId].
STP_tau_x_inv = groupConfigMap[gGrpId].stpConfig.STP_tau_x_inv;
3017 groupConfigs[netId][lGrpId].
TAU_PLUS_INV_EXC = groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_EXC;
3018 groupConfigs[netId][lGrpId].
TAU_MINUS_INV_EXC = groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_EXC;
3019 groupConfigs[netId][lGrpId].
ALPHA_PLUS_EXC = groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_EXC;
3020 groupConfigs[netId][lGrpId].
ALPHA_MINUS_EXC = groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_EXC;
3021 groupConfigs[netId][lGrpId].
GAMMA = groupConfigMap[gGrpId].stdpConfig.GAMMA;
3022 groupConfigs[netId][lGrpId].
KAPPA = groupConfigMap[gGrpId].stdpConfig.KAPPA;
3023 groupConfigs[netId][lGrpId].
OMEGA = groupConfigMap[gGrpId].stdpConfig.OMEGA;
3024 groupConfigs[netId][lGrpId].
TAU_PLUS_INV_INB = groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_INB;
3025 groupConfigs[netId][lGrpId].
TAU_MINUS_INV_INB = groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_INB;
3026 groupConfigs[netId][lGrpId].
ALPHA_PLUS_INB = groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_INB;
3027 groupConfigs[netId][lGrpId].
ALPHA_MINUS_INB = groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_INB;
3028 groupConfigs[netId][lGrpId].
BETA_LTP = groupConfigMap[gGrpId].stdpConfig.BETA_LTP;
3029 groupConfigs[netId][lGrpId].
BETA_LTD = groupConfigMap[gGrpId].stdpConfig.BETA_LTD;
3030 groupConfigs[netId][lGrpId].
LAMBDA = groupConfigMap[gGrpId].stdpConfig.LAMBDA;
3031 groupConfigs[netId][lGrpId].
DELTA = groupConfigMap[gGrpId].stdpConfig.DELTA;
3034 groupConfigs[netId][lGrpId].
withCompartments = groupConfigMap[gGrpId].withCompartments;
3035 groupConfigs[netId][lGrpId].
compCouplingUp = groupConfigMap[gGrpId].compCouplingUp;
3036 groupConfigs[netId][lGrpId].
compCouplingDown = groupConfigMap[gGrpId].compCouplingDown;
3037 memset(&groupConfigs[netId][lGrpId].compNeighbors, 0,
sizeof(groupConfigs[netId][lGrpId].compNeighbors[0])*
MAX_NUM_COMP_CONN);
3038 memset(&groupConfigs[netId][lGrpId].compCoupling, 0,
sizeof(groupConfigs[netId][lGrpId].compCoupling[0])*
MAX_NUM_COMP_CONN);
3041 groupConfigs[netId][lGrpId].
avgTimeScale = groupConfigMap[gGrpId].homeoConfig.avgTimeScale;
3042 groupConfigs[netId][lGrpId].
avgTimeScale_decay = groupConfigMap[gGrpId].homeoConfig.avgTimeScaleDecay;
3043 groupConfigs[netId][lGrpId].
avgTimeScaleInv = groupConfigMap[gGrpId].homeoConfig.avgTimeScaleInv;
3044 groupConfigs[netId][lGrpId].
homeostasisScale = groupConfigMap[gGrpId].homeoConfig.homeostasisScale;
3047 groupConfigs[netId][lGrpId].
baseDP = groupConfigMap[gGrpId].neuromodulatorConfig.baseDP;
3048 groupConfigs[netId][lGrpId].
base5HT = groupConfigMap[gGrpId].neuromodulatorConfig.base5HT;
3049 groupConfigs[netId][lGrpId].
baseACh = groupConfigMap[gGrpId].neuromodulatorConfig.baseACh;
3050 groupConfigs[netId][lGrpId].
baseNE = groupConfigMap[gGrpId].neuromodulatorConfig.baseNE;
3051 groupConfigs[netId][lGrpId].
decayDP = groupConfigMap[gGrpId].neuromodulatorConfig.decayDP;
3052 groupConfigs[netId][lGrpId].
decay5HT = groupConfigMap[gGrpId].neuromodulatorConfig.decay5HT;
3053 groupConfigs[netId][lGrpId].
decayACh = groupConfigMap[gGrpId].neuromodulatorConfig.decayACh;
3054 groupConfigs[netId][lGrpId].
decayNE = groupConfigMap[gGrpId].neuromodulatorConfig.decayNE;
3057 if (netId == grpIt->netId) {
3058 groupConfigMDMap[gGrpId].netId = grpIt->netId;
3059 groupConfigMDMap[gGrpId].gGrpId = grpIt->gGrpId;
3060 groupConfigMDMap[gGrpId].gStartN = grpIt->gStartN;
3061 groupConfigMDMap[gGrpId].gEndN = grpIt->gEndN;
3062 groupConfigMDMap[gGrpId].lGrpId = grpIt->lGrpId;
3063 groupConfigMDMap[gGrpId].lStartN = grpIt->lStartN;
3064 groupConfigMDMap[gGrpId].lEndN = grpIt->lEndN;
3065 groupConfigMDMap[gGrpId].numPostSynapses = grpIt->numPostSynapses;
3066 groupConfigMDMap[gGrpId].numPreSynapses = grpIt->numPreSynapses;
3067 groupConfigMDMap[gGrpId].LtoGOffset = grpIt->LtoGOffset;
3068 groupConfigMDMap[gGrpId].GtoLOffset = grpIt->GtoLOffset;
3069 groupConfigMDMap[gGrpId].fixedInputWts = grpIt->fixedInputWts;
3070 groupConfigMDMap[gGrpId].hasExternalConnect = grpIt->hasExternalConnect;
3071 groupConfigMDMap[gGrpId].Noffset = grpIt->Noffset;
3072 groupConfigMDMap[gGrpId].maxOutgoingDelay = grpIt->maxOutgoingDelay;
3074 groupConfigs[netId][lGrpId].
withParamModel_9 = groupConfigMap[gGrpId].withParamModel_9;
3075 groupConfigs[netId][lGrpId].
isLIF = groupConfigMap[gGrpId].isLIF;
3092 void SNN::generateRuntimeConnectConfigs() {
3095 for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++) {
3096 connectConfigMap[connIt->connId] = *connIt;
3099 for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++) {
3100 connectConfigMap[connIt->connId] = *connIt;
3105 void SNN::generateRuntimeNetworkConfigs() {
3107 if (!groupPartitionLists[netId].empty()) {
3122 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3123 if (grpIt->netId == netId && grpIt->neuronMonitorId >= 0)
3134 networkConfigs[netId].
dAMPA = dAMPA;
3135 networkConfigs[netId].
rNMDA = rNMDA;
3136 networkConfigs[netId].
dNMDA = dNMDA;
3137 networkConfigs[netId].
sNMDA = sNMDA;
3138 networkConfigs[netId].
dGABAa = dGABAa;
3139 networkConfigs[netId].
rGABAb = rGABAb;
3140 networkConfigs[netId].
dGABAb = dGABAb;
3141 networkConfigs[netId].
sGABAb = sGABAb;
3148 findNumN(netId, networkConfigs[netId].numN, networkConfigs[netId].numNExternal, networkConfigs[netId].numNAssigned,
3149 networkConfigs[netId].numNReg, networkConfigs[netId].numNExcReg, networkConfigs[netId].numNInhReg,
3150 networkConfigs[netId].numNPois, networkConfigs[netId].numNExcPois, networkConfigs[netId].numNInhPois);
3154 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3155 if (grpIt->netId == netId)
3166 findMaxNumSynapsesNeurons(netId, networkConfigs[netId].maxNumPostSynN, networkConfigs[netId].maxNumPreSynN);
3169 findMaxSpikesD1D2(netId, networkConfigs[netId].maxSpikesD1, networkConfigs[netId].maxSpikesD2);
3172 findNumSynapsesNetwork(netId, networkConfigs[netId].numPostSynNet, networkConfigs[netId].numPreSynNet);
3176 findNumNSpikeGenAndOffset(netId);
3181 memset(&managerRTDSize, 0,
sizeof(ManagerRuntimeDataSize));
3183 if (!groupPartitionLists[netId].empty()) {
3185 if (networkConfigs[netId].numNReg > managerRTDSize.maxNumNReg) managerRTDSize.maxNumNReg = networkConfigs[netId].
numNReg;
3186 if (networkConfigs[netId].numN > managerRTDSize.maxNumN) managerRTDSize.maxNumN = networkConfigs[netId].
numN;
3187 if (networkConfigs[netId].numNAssigned > managerRTDSize.maxNumNAssigned) managerRTDSize.maxNumNAssigned = networkConfigs[netId].
numNAssigned;
3190 if (networkConfigs[netId].numNSpikeGen > managerRTDSize.maxNumNSpikeGen) managerRTDSize.maxNumNSpikeGen = networkConfigs[netId].
numNSpikeGen;
3193 if (networkConfigs[netId].numGroups > managerRTDSize.maxNumGroups) managerRTDSize.maxNumGroups = networkConfigs[netId].
numGroups;
3194 if (networkConfigs[netId].numConnections > managerRTDSize.maxNumConnections) managerRTDSize.maxNumConnections = networkConfigs[netId].
numConnections;
3197 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3198 if (groupConfigMap[grpIt->gGrpId].numN > managerRTDSize.maxNumNPerGroup) managerRTDSize.maxNumNPerGroup = groupConfigMap[grpIt->gGrpId].numN;
3202 if (networkConfigs[netId].maxSpikesD1 > managerRTDSize.maxMaxSpikeD1) managerRTDSize.maxMaxSpikeD1 = networkConfigs[netId].
maxSpikesD1;
3203 if (networkConfigs[netId].maxSpikesD2 > managerRTDSize.maxMaxSpikeD2) managerRTDSize.maxMaxSpikeD2 = networkConfigs[netId].
maxSpikesD2;
3206 if (networkConfigs[netId].numPreSynNet > managerRTDSize.maxNumPreSynNet) managerRTDSize.maxNumPreSynNet = networkConfigs[netId].
numPreSynNet;
3207 if (networkConfigs[netId].numPostSynNet > managerRTDSize.maxNumPostSynNet) managerRTDSize.maxNumPostSynNet = networkConfigs[netId].
numPostSynNet;
3210 managerRTDSize.glbNumN += networkConfigs[netId].
numN;
3211 managerRTDSize.glbNumNReg += networkConfigs[netId].
numNReg;
3225 void SNN::generateConnectionRuntime(
int netId) {
3226 std::map<int, int> GLoffset;
3227 std::map<int, int> GLgrpId;
3230 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3231 GLoffset[grpIt->gGrpId] = grpIt->GtoLOffset;
3232 GLgrpId[grpIt->gGrpId] = grpIt->lGrpId;
3237 for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
3239 mulSynFast[connIt->second.connId] = connIt->second.mulSynFast;
3240 mulSynSlow[connIt->second.connId] = connIt->second.mulSynSlow;
3246 int parsedConnections = 0;
3247 memset(managerRuntimeData.
Npost, 0,
sizeof(
short) * networkConfigs[netId].numNAssigned);
3248 memset(managerRuntimeData.
Npre, 0,
sizeof(
short) * networkConfigs[netId].numNAssigned);
3249 memset(managerRuntimeData.
Npre_plastic, 0,
sizeof(
short) * networkConfigs[netId].numNAssigned);
3250 for (std::list<ConnectionInfo>::iterator connIt = connectionLists[netId].begin(); connIt != connectionLists[netId].end(); connIt++) {
3251 connIt->srcGLoffset = GLoffset[connIt->grpSrc];
3253 KERNEL_ERROR(
"Error: the number of synapses exceeds maximum limit (%d) for neuron %d (group %d)",
SYNAPSE_ID_MASK, connIt->nSrc, connIt->grpSrc);
3256 if (managerRuntimeData.
Npre[connIt->nDest + GLoffset[connIt->grpDest]] ==
SYNAPSE_ID_MASK) {
3257 KERNEL_ERROR(
"Error: the number of synapses exceeds maximum limit (%d) for neuron %d (group %d)",
SYNAPSE_ID_MASK, connIt->nDest, connIt->grpDest);
3260 managerRuntimeData.
Npost[connIt->nSrc + GLoffset[connIt->grpSrc]]++;
3261 managerRuntimeData.
Npre[connIt->nDest + GLoffset[connIt->grpDest]]++;
3264 sim_with_fixedwts =
false;
3265 managerRuntimeData.
Npre_plastic[connIt->nDest + GLoffset[connIt->grpDest]]++;
3268 if (groupConfigMap[connIt->grpDest].homeoConfig.WithHomeostasis && groupConfigMDMap[connIt->grpDest].homeoId == -1)
3269 groupConfigMDMap[connIt->grpDest].homeoId = connIt->nDest + GLoffset[connIt->grpDest];
3288 parsedConnections++;
3290 assert(parsedConnections == networkConfigs[netId].numPostSynNet && parsedConnections == networkConfigs[netId].numPreSynNet);
3295 for (
int lNId = 1; lNId < networkConfigs[netId].
numNAssigned; lNId++) {
3301 memset(managerRuntimeData.
Npre, 0,
sizeof(
short) * networkConfigs[netId].numNAssigned);
3302 parsedConnections = 0;
3303 for (std::list<ConnectionInfo>::iterator connIt = connectionLists[netId].begin(); connIt != connectionLists[netId].end(); connIt++) {
3305 int pre_pos = managerRuntimeData.
cumulativePre[connIt->nDest + GLoffset[connIt->grpDest]] + managerRuntimeData.
Npre[connIt->nDest + GLoffset[connIt->grpDest]];
3306 assert(pre_pos < networkConfigs[netId].numPreSynNet);
3308 managerRuntimeData.
preSynapticIds[pre_pos] = SET_CONN_ID((connIt->nSrc + GLoffset[connIt->grpSrc]), 0, (GLgrpId[connIt->grpSrc]));
3309 connIt->preSynId = managerRuntimeData.
Npre[connIt->nDest + GLoffset[connIt->grpDest]];
3311 managerRuntimeData.
Npre[connIt->nDest+ GLoffset[connIt->grpDest]]++;
3312 parsedConnections++;
3320 for (std::list<ConnectionInfo>::iterator connIt = connectionLists[netId].begin(); connIt != connectionLists[netId].end(); connIt++) {
3322 int pre_pos = managerRuntimeData.
cumulativePre[connIt->nDest + GLoffset[connIt->grpDest]] + managerRuntimeData.
Npre[connIt->nDest + GLoffset[connIt->grpDest]];
3323 assert(pre_pos < networkConfigs[netId].numPreSynNet);
3325 managerRuntimeData.
preSynapticIds[pre_pos] = SET_CONN_ID((connIt->nSrc + GLoffset[connIt->grpSrc]), 0, (GLgrpId[connIt->grpSrc]));
3326 connIt->preSynId = managerRuntimeData.
Npre[connIt->nDest + GLoffset[connIt->grpDest]];
3328 managerRuntimeData.
Npre[connIt->nDest + GLoffset[connIt->grpDest]]++;
3329 parsedConnections++;
3336 assert(parsedConnections == networkConfigs[netId].numPreSynNet);
3342 for (
int lNId = 0; lNId < networkConfigs[netId].
numNAssigned; lNId++) {
3343 if (managerRuntimeData.
Npost[lNId] > 0) {
3344 std::list<ConnectionInfo> postConnectionList;
3346 targetConn.
nSrc = lNId ;
3348 std::list<ConnectionInfo>::iterator firstPostConn = std::find(connectionLists[netId].begin(), connectionLists[netId].end(), targetConn);
3349 std::list<ConnectionInfo>::iterator lastPostConn = firstPostConn;
3350 std::advance(lastPostConn, managerRuntimeData.
Npost[lNId]);
3351 managerRuntimeData.
Npost[lNId] = 0;
3353 postConnectionList.splice(postConnectionList.begin(), connectionLists[netId], firstPostConn, lastPostConn);
3356 int post_pos, pre_pos, lastDelay = 0;
3357 parsedConnections = 0;
3359 for (std::list<ConnectionInfo>::iterator connIt = postConnectionList.begin(); connIt != postConnectionList.end(); connIt++) {
3360 assert(connIt->nSrc + GLoffset[connIt->grpSrc] == lNId);
3361 post_pos = managerRuntimeData.
cumulativePost[connIt->nSrc + GLoffset[connIt->grpSrc]] + managerRuntimeData.
Npost[connIt->nSrc + GLoffset[connIt->grpSrc]];
3362 pre_pos = managerRuntimeData.
cumulativePre[connIt->nDest + GLoffset[connIt->grpDest]] + connIt->preSynId;
3364 assert(post_pos < networkConfigs[netId].numPostSynNet);
3368 managerRuntimeData.
postSynapticIds[post_pos] = SET_CONN_ID((connIt->nDest + GLoffset[connIt->grpDest]), connIt->preSynId, (GLgrpId[connIt->grpDest]));
3370 assert(connIt->delay > 0);
3371 if (connIt->delay > lastDelay) {
3374 }
else if (connIt->delay == lastDelay) {
3377 KERNEL_ERROR(
"Post-synaptic delays not sorted correctly... pre_id=%d, delay[%d]=%d, delay[%d]=%d",
3378 lNId, managerRuntimeData.
Npost[connIt->nSrc + GLoffset[connIt->grpSrc]], connIt->delay, managerRuntimeData.
Npost[connIt->nSrc + GLoffset[connIt->grpSrc]] - 1, lastDelay);
3380 lastDelay = connIt->delay;
3386 managerRuntimeData.
preSynapticIds[pre_pos] = SET_CONN_ID((connIt->nSrc + GLoffset[connIt->grpSrc]), managerRuntimeData.
Npost[connIt->nSrc + GLoffset[connIt->grpSrc]], (GLgrpId[connIt->grpSrc]));
3387 managerRuntimeData.
wt[pre_pos] = connIt->initWt;
3388 managerRuntimeData.
maxSynWt[pre_pos] = connIt->maxWt;
3391 managerRuntimeData.
Npost[connIt->nSrc + GLoffset[connIt->grpSrc]]++;
3392 parsedConnections++;
3398 assert(parsedConnections == managerRuntimeData.
Npost[lNId]);
3412 assert(connectionLists[netId].empty());
3458 void SNN::generateCompConnectionRuntime(
int netId)
3460 std::map<int, int> GLgrpId;
3462 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3463 GLgrpId[grpIt->gGrpId] = grpIt->lGrpId;
3469 for (std::list<compConnectConfig>::iterator connIt = localCompConnectLists[netId].begin(); connIt != localCompConnectLists[netId].end(); connIt++) {
3471 int grpLower = connIt->grpSrc;
3472 int grpUpper = connIt->grpDest;
3476 KERNEL_ERROR(
"Group %s(%d) exceeds max number of allowed compartmental connections (%d).",
3480 groupConfigs[netId][GLgrpId[grpLower]].
compNeighbors[i] = grpUpper;
3486 KERNEL_ERROR(
"Group %s(%d) exceeds max number of allowed compartmental connections (%d).",
3490 groupConfigs[netId][GLgrpId[grpUpper]].
compNeighbors[j] = grpLower;
3499 void SNN::generatePoissonGroupRuntime(
int netId,
int lGrpId) {
3500 for(
int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].
lEndN; lNId++)
3501 resetPoissonNeuron(netId, lGrpId, lNId);
3505 void SNN::collectGlobalNetworkConfigC() {
3507 for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
3508 if (connIt->second.maxDelay > glbNetworkConfig.
maxDelay)
3509 glbNetworkConfig.
maxDelay = connIt->second.maxDelay;
3511 assert(connectConfigMap.size() > 0 || glbNetworkConfig.
maxDelay != -1);
3514 for(
int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
3516 glbNetworkConfig.
numNExcPois += groupConfigMap[gGrpId].numN;
3518 glbNetworkConfig.
numNInhPois += groupConfigMap[gGrpId].numN;
3520 glbNetworkConfig.
numNExcReg += groupConfigMap[gGrpId].numN;
3522 glbNetworkConfig.
numNInhReg += groupConfigMap[gGrpId].numN;
3525 if (groupConfigMDMap[gGrpId].maxOutgoingDelay == 1)
3526 glbNetworkConfig.
numN1msDelay += groupConfigMap[gGrpId].numN;
3527 else if (groupConfigMDMap[gGrpId].maxOutgoingDelay >= 2)
3528 glbNetworkConfig.
numN2msDelay += groupConfigMap[gGrpId].numN;
3537 void SNN::collectGlobalNetworkConfigP() {
3540 if (!localConnectLists[netId].empty() || !externalConnectLists[netId].empty()) {
3541 for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++)
3542 glbNetworkConfig.
numSynNet += connIt->numberOfConnections;
3544 for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++)
3545 glbNetworkConfig.
numSynNet += connIt->numberOfConnections;
3552 void SNN::compileSNN() {
3553 KERNEL_DEBUG(
"Beginning compilation of the network....");
3559 compileGroupConfig();
3561 compileConnectConfig();
3565 collectGlobalNetworkConfigC();
3575 KERNEL_INFO(
"************************** Global Network Configuration *******************************");
3576 KERNEL_INFO(
"The number of neurons in the network (numN) = %d", glbNetworkConfig.
numN);
3577 KERNEL_INFO(
"The number of regular neurons in the network (numNReg:numNExcReg:numNInhReg) = %d:%d:%d", glbNetworkConfig.
numNReg, glbNetworkConfig.
numNExcReg, glbNetworkConfig.
numNInhReg);
3579 KERNEL_INFO(
"The maximum axonal delay in the network (maxDelay) = %d", glbNetworkConfig.
maxDelay);
3585 void SNN::compileConnectConfig() {
3589 void SNN::compileGroupConfig() {
3594 for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
3598 grpSrc = connIt->second.grpSrc;
3599 if (connIt->second.maxDelay > groupConfigMDMap[grpSrc].maxOutgoingDelay)
3600 groupConfigMDMap[grpSrc].maxOutgoingDelay = connIt->second.maxDelay;
3605 groupConfigMDMap[connIt->second.grpDest].fixedInputWts =
false;
3613 int assignedGroup = 0;
3614 int availableNeuronId = 0;
3615 for(
int order = 0; order < 4; order++) {
3616 for(
int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
3618 availableNeuronId = assignGroup(gGrpId, availableNeuronId);
3621 availableNeuronId = assignGroup(gGrpId, availableNeuronId);
3624 availableNeuronId = assignGroup(gGrpId, availableNeuronId);
3627 availableNeuronId = assignGroup(gGrpId, availableNeuronId);
3633 assert(assignedGroup == numGroups);
3636 void SNN::connectNetwork() {
3639 for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++) {
3640 switch(connIt->type) {
3642 connectRandom(netId, connIt,
false);
3645 connectFull(netId, connIt,
false);
3648 connectFull(netId, connIt,
false);
3651 connectOneToOne(netId, connIt,
false);
3654 connectGaussian(netId, connIt,
false);
3657 connectUserDefined(netId, connIt,
false);
3660 KERNEL_ERROR(
"Invalid connection type( should be 'random', 'full', 'full-no-direct', or 'one-to-one')");
3668 for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++) {
3669 switch(connIt->type) {
3671 connectRandom(netId, connIt,
true);
3674 connectFull(netId, connIt,
true);
3677 connectFull(netId, connIt,
true);
3680 connectOneToOne(netId, connIt,
true);
3683 connectGaussian(netId, connIt,
true);
3686 connectUserDefined(netId, connIt,
true);
3689 KERNEL_ERROR(
"Invalid connection type( should be 'random', 'full', 'full-no-direct', or 'one-to-one')");
3697 inline void SNN::connectNeurons(
int netId,
int _grpSrc,
int _grpDest,
int _nSrc,
int _nDest,
short int _connId,
int externalNetId) {
3700 connInfo.
grpSrc = _grpSrc;
3702 connInfo.
nSrc = _nSrc;
3703 connInfo.
nDest = _nDest;
3705 connInfo.
connId = _connId;
3708 connInfo.
maxWt = 0.0f;
3712 connInfo.
delay = connectConfigMap[_connId].minDelay + rand() % (connectConfigMap[_connId].maxDelay - connectConfigMap[_connId].minDelay + 1);
3713 assert((connInfo.
delay >= connectConfigMap[_connId].minDelay) && (connInfo.
delay <= connectConfigMap[_connId].maxDelay));
3716 float initWt = connectConfigMap[_connId].initWt;
3717 float maxWt = connectConfigMap[_connId].maxWt;
3723 connectionLists[netId].push_back(connInfo);
3726 if (externalNetId >= 0)
3727 connectionLists[externalNetId].push_back(connInfo);
3731 inline void SNN::connectNeurons(
int netId,
int _grpSrc,
int _grpDest,
int _nSrc,
int _nDest,
short int _connId,
float initWt,
float maxWt, uint8_t delay,
int externalNetId) {
3734 connInfo.
grpSrc = _grpSrc;
3736 connInfo.
nSrc = _nSrc;
3737 connInfo.
nDest = _nDest;
3739 connInfo.
connId = _connId;
3744 connInfo.
delay = delay;
3746 connectionLists[netId].push_back(connInfo);
3749 if (externalNetId >= 0)
3750 connectionLists[externalNetId].push_back(connInfo);
3754 void SNN::connectFull(
int netId, std::list<ConnectConfig>::iterator connIt,
bool isExternal) {
3755 int grpSrc = connIt->grpSrc;
3756 int grpDest = connIt->grpDest;
3758 int externalNetId = -1;
3761 externalNetId = groupConfigMDMap[grpDest].netId;
3762 assert(netId != externalNetId);
3765 int gPreStart = groupConfigMDMap[grpSrc].gStartN;
3766 for(
int gPreN = groupConfigMDMap[grpSrc].gStartN; gPreN <= groupConfigMDMap[grpSrc].gEndN; gPreN++) {
3768 int gPostStart = groupConfigMDMap[grpDest].gStartN;
3769 for(
int gPostN = groupConfigMDMap[grpDest].gStartN; gPostN <= groupConfigMDMap[grpDest].gEndN; gPostN++) {
3771 if(noDirect && gPreN == gPostN)
3779 connectNeurons(netId, grpSrc, grpDest, gPreN, gPostN, connIt->connId, externalNetId);
3780 connIt->numberOfConnections++;
3784 std::list<GroupConfigMD>::iterator grpIt;
3788 targetGrp.
gGrpId = grpSrc;
3789 grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3790 assert(grpIt != groupPartitionLists[netId].end());
3791 grpIt->numPostSynapses += connIt->numberOfConnections;
3793 targetGrp.
gGrpId = grpDest;
3794 grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3795 assert(grpIt != groupPartitionLists[netId].end());
3796 grpIt->numPreSynapses += connIt->numberOfConnections;
3800 targetGrp.
gGrpId = grpSrc;
3801 grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3802 assert(grpIt != groupPartitionLists[externalNetId].end());
3803 grpIt->numPostSynapses += connIt->numberOfConnections;
3805 targetGrp.
gGrpId = grpDest;
3806 grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3807 assert(grpIt != groupPartitionLists[externalNetId].end());
3808 grpIt->numPreSynapses += connIt->numberOfConnections;
3812 void SNN::connectGaussian(
int netId, std::list<ConnectConfig>::iterator connIt,
bool isExternal) {
3814 int grpSrc = connIt->grpSrc;
3815 int grpDest = connIt->grpDest;
3819 int externalNetId = -1;
3822 externalNetId = groupConfigMDMap[grpDest].netId;
3823 assert(netId != externalNetId);
3826 for(
int i = groupConfigMDMap[grpSrc].gStartN; i <= groupConfigMDMap[grpSrc].gEndN; i++) {
3829 for(
int j = groupConfigMDMap[grpDest].gStartN; j <= groupConfigMDMap[grpDest].gEndN; j++) {
3834 double rfDist =
getRFDist3D(connIt->connRadius,loc_i,loc_j);
3835 if (rfDist < 0.0 || rfDist > 1.0)
3843 double gauss = exp(-2.3026*rfDist);
3847 if (drand48() < connIt->connProbability) {
3848 float initWt = gauss * connIt->initWt;
3849 float maxWt = connIt->maxWt;
3850 uint8_t delay = connIt->minDelay + rand() % (connIt->maxDelay - connIt->minDelay + 1);
3851 assert((delay >= connIt->minDelay) && (delay <= connIt->maxDelay));
3853 connectNeurons(netId, grpSrc, grpDest, i, j, connIt->connId, initWt, maxWt, delay, externalNetId);
3854 connIt->numberOfConnections++;
3859 std::list<GroupConfigMD>::iterator grpIt;
3863 targetGrp.
gGrpId = grpSrc;
3864 grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3865 assert(grpIt != groupPartitionLists[netId].end());
3866 grpIt->numPostSynapses += connIt->numberOfConnections;
3868 targetGrp.
gGrpId = grpDest;
3869 grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3870 assert(grpIt != groupPartitionLists[netId].end());
3871 grpIt->numPreSynapses += connIt->numberOfConnections;
3875 targetGrp.
gGrpId = grpSrc;
3876 grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3877 assert(grpIt != groupPartitionLists[externalNetId].end());
3878 grpIt->numPostSynapses += connIt->numberOfConnections;
3880 targetGrp.
gGrpId = grpDest;
3881 grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3882 assert(grpIt != groupPartitionLists[externalNetId].end());
3883 grpIt->numPreSynapses += connIt->numberOfConnections;
3887 void SNN::connectOneToOne(
int netId, std::list<ConnectConfig>::iterator connIt,
bool isExternal) {
3888 int grpSrc = connIt->grpSrc;
3889 int grpDest = connIt->grpDest;
3890 int externalNetId = -1;
3893 externalNetId = groupConfigMDMap[grpDest].netId;
3894 assert(netId != externalNetId);
3897 assert( groupConfigMap[grpDest].numN == groupConfigMap[grpSrc].numN);
3900 for(
int gPreN = groupConfigMDMap[grpSrc].gStartN, gPostN = groupConfigMDMap[grpDest].gStartN; gPreN <= groupConfigMDMap[grpSrc].gEndN; gPreN++, gPostN++) {
3901 connectNeurons(netId, grpSrc, grpDest, gPreN, gPostN, connIt->connId, externalNetId);
3902 connIt->numberOfConnections++;
3905 std::list<GroupConfigMD>::iterator grpIt;
3909 targetGrp.
gGrpId = grpSrc;
3910 grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3911 assert(grpIt != groupPartitionLists[netId].end());
3912 grpIt->numPostSynapses += connIt->numberOfConnections;
3914 targetGrp.
gGrpId = grpDest;
3915 grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3916 assert(grpIt != groupPartitionLists[netId].end());
3917 grpIt->numPreSynapses += connIt->numberOfConnections;
3921 targetGrp.
gGrpId = grpSrc;
3922 grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3923 assert(grpIt != groupPartitionLists[externalNetId].end());
3924 grpIt->numPostSynapses += connIt->numberOfConnections;
3926 targetGrp.
gGrpId = grpDest;
3927 grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3928 assert(grpIt != groupPartitionLists[externalNetId].end());
3929 grpIt->numPreSynapses += connIt->numberOfConnections;
3934 void SNN::connectRandom(
int netId, std::list<ConnectConfig>::iterator connIt,
bool isExternal) {
3935 int grpSrc = connIt->grpSrc;
3936 int grpDest = connIt->grpDest;
3937 int externalNetId = -1;
3940 externalNetId = groupConfigMDMap[grpDest].netId;
3941 assert(netId != externalNetId);
3944 int gPreStart = groupConfigMDMap[grpSrc].gStartN;
3945 for(
int gPreN = groupConfigMDMap[grpSrc].gStartN; gPreN <= groupConfigMDMap[grpSrc].gEndN; gPreN++) {
3947 int gPostStart = groupConfigMDMap[grpDest].gStartN;
3948 for(
int gPostN = groupConfigMDMap[grpDest].gStartN; gPostN <= groupConfigMDMap[grpDest].gEndN; gPostN++) {
3954 if (drand48() < connIt->connProbability) {
3955 connectNeurons(netId, grpSrc, grpDest, gPreN, gPostN, connIt->connId, externalNetId);
3956 connIt->numberOfConnections++;
3961 std::list<GroupConfigMD>::iterator grpIt;
3965 targetGrp.
gGrpId = grpSrc;
3966 grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3967 assert(grpIt != groupPartitionLists[netId].end());
3968 grpIt->numPostSynapses += connIt->numberOfConnections;
3970 targetGrp.
gGrpId = grpDest;
3971 grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3972 assert(grpIt != groupPartitionLists[netId].end());
3973 grpIt->numPreSynapses += connIt->numberOfConnections;
3977 targetGrp.
gGrpId = grpSrc;
3978 grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3979 assert(grpIt != groupPartitionLists[externalNetId].end());
3980 grpIt->numPostSynapses += connIt->numberOfConnections;
3982 targetGrp.
gGrpId = grpDest;
3983 grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3984 assert(grpIt != groupPartitionLists[externalNetId].end());
3985 grpIt->numPreSynapses += connIt->numberOfConnections;
3992 void SNN::connectUserDefined(
int netId, std::list<ConnectConfig>::iterator connIt,
bool isExternal) {
3993 int grpSrc = connIt->grpSrc;
3994 int grpDest = connIt->grpDest;
3995 int externalNetId = -1;
3998 externalNetId = groupConfigMDMap[grpDest].netId;
3999 assert(netId != externalNetId);
4002 connIt->maxDelay = 0;
4003 int preStartN = groupConfigMDMap[grpSrc].gStartN;
4004 int postStartN = groupConfigMDMap[grpDest].gStartN;
4005 for (
int pre_nid = groupConfigMDMap[grpSrc].gStartN; pre_nid <= groupConfigMDMap[grpSrc].gEndN; pre_nid++) {
4007 for (
int post_nid = groupConfigMDMap[grpDest].gStartN; post_nid <= groupConfigMDMap[grpDest].gEndN; post_nid++) {
4008 float weight, maxWt, delay;
4011 connIt->conn->connect(
this, grpSrc, pre_nid - preStartN, grpDest, post_nid - postStartN, weight, maxWt, delay, connected);
4015 assert(abs(weight) <= abs(maxWt));
4020 if (fabs(maxWt) > connIt->maxWt)
4021 connIt->maxWt = fabs(maxWt);
4023 if (delay > connIt->maxDelay)
4024 connIt->maxDelay = delay;
4026 connectNeurons(netId, grpSrc, grpDest, pre_nid, post_nid, connIt->connId, weight, maxWt, delay, externalNetId);
4027 connIt->numberOfConnections++;
4032 std::list<GroupConfigMD>::iterator grpIt;
4036 targetGrp.
gGrpId = grpSrc;
4037 grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
4038 assert(grpIt != groupPartitionLists[netId].end());
4039 grpIt->numPostSynapses += connIt->numberOfConnections;
4041 targetGrp.
gGrpId = grpDest;
4042 grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
4043 assert(grpIt != groupPartitionLists[netId].end());
4044 grpIt->numPreSynapses += connIt->numberOfConnections;
4048 targetGrp.
gGrpId = grpSrc;
4049 grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
4050 assert(grpIt != groupPartitionLists[externalNetId].end());
4051 grpIt->numPostSynapses += connIt->numberOfConnections;
4053 targetGrp.
gGrpId = grpDest;
4054 grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
4055 assert(grpIt != groupPartitionLists[externalNetId].end());
4056 grpIt->numPreSynapses += connIt->numberOfConnections;
4232 void SNN::deleteRuntimeData() {
4236 CUDA_CHECK_ERRORS(cudaThreadSynchronize());
4239 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
4240 pthread_t threads[numCores + 1];
4243 int threadCount = 0;
4247 if (!groupPartitionLists[netId].empty()) {
4249 deleteRuntimeData_GPU(netId);
4251 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
4252 deleteRuntimeData_CPU(netId);
4253 #else // Linux or MAC
4254 pthread_attr_t attr;
4255 pthread_attr_init(&attr);
4258 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
4260 argsThreadRoutine[threadCount].
snn_pointer =
this;
4261 argsThreadRoutine[threadCount].
netId = netId;
4262 argsThreadRoutine[threadCount].
lGrpId = 0;
4263 argsThreadRoutine[threadCount].
startIdx = 0;
4264 argsThreadRoutine[threadCount].
endIdx = 0;
4265 argsThreadRoutine[threadCount].
GtoLOffset = 0;
4267 pthread_create(&threads[threadCount], &attr, &SNN::helperDeleteRuntimeData_CPU, (
void*)&argsThreadRoutine[threadCount]);
4268 pthread_attr_destroy(&attr);
4275 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
4277 for (
int i=0; i<threadCount; i++){
4278 pthread_join(threads[i], NULL);
4283 CUDA_DELETE_TIMER(timer);
4288 void SNN::deleteObjects() {
4289 if (simulatorDeleted)
4295 resetMonitors(
true);
4296 resetConnectionConfigs(
true);
4299 deleteManagerRuntimeData();
4301 deleteRuntimeData();
4304 if (loggerMode_ !=
CUSTOM) {
4306 if (fpInf_ != NULL && fpInf_ != stdout && fpInf_ != stderr)
4308 if (fpErr_ != NULL && fpErr_ != stdout && fpErr_ != stderr)
4310 if (fpDeb_ != NULL && fpDeb_ != stdout && fpDeb_ != stderr)
4312 if (fpLog_ != NULL && fpLog_ != stdout && fpLog_ != stderr)
4316 simulatorDeleted =
true;
4319 void SNN::findMaxNumSynapsesGroups(
int* _maxNumPostSynGrp,
int* _maxNumPreSynGrp) {
4320 *_maxNumPostSynGrp = 0;
4321 *_maxNumPreSynGrp = 0;
4324 for (
int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4327 if (groupConfigMDMap[gGrpId].numPostSynapses > *_maxNumPostSynGrp)
4328 *_maxNumPostSynGrp = groupConfigMDMap[gGrpId].numPostSynapses;
4329 if (groupConfigMDMap[gGrpId].numPreSynapses > *_maxNumPreSynGrp)
4330 *_maxNumPreSynGrp = groupConfigMDMap[gGrpId].numPreSynapses;
4334 void SNN::findMaxNumSynapsesNeurons(
int _netId,
int& _maxNumPostSynN,
int& _maxNumPreSynN) {
4335 int *tempNpre, *tempNpost;
4336 int nSrc, nDest, numNeurons;
4337 std::map<int, int> globalToLocalOffset;
4340 tempNpre =
new int[numNeurons];
4341 tempNpost =
new int[numNeurons];
4342 memset(tempNpre, 0,
sizeof(
int) * numNeurons);
4343 memset(tempNpost, 0,
sizeof(
int) * numNeurons);
4346 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[_netId].begin(); grpIt != groupPartitionLists[_netId].end(); grpIt++) {
4347 globalToLocalOffset[grpIt->gGrpId] = grpIt->GtoLOffset;
4351 for (std::list<ConnectionInfo>::iterator connIt = connectionLists[_netId].begin(); connIt != connectionLists[_netId].end(); connIt++) {
4352 nSrc = connIt->nSrc + globalToLocalOffset[connIt->grpSrc];
4353 nDest = connIt->nDest + globalToLocalOffset[connIt->grpDest];
4354 assert(nSrc < numNeurons); assert(nDest < numNeurons);
4360 _maxNumPostSynN = 0;
4362 for (
int nId = 0; nId < networkConfigs[_netId].
numN; nId++) {
4363 if (tempNpost[nId] > _maxNumPostSynN) _maxNumPostSynN = tempNpost[nId];
4364 if (tempNpre[nId] > _maxNumPreSynN) _maxNumPreSynN = tempNpre[nId];
4368 delete [] tempNpost;
4371 void SNN::findMaxSpikesD1D2(
int _netId,
unsigned int& _maxSpikesD1,
unsigned int& _maxSpikesD2) {
4372 _maxSpikesD1 = 0; _maxSpikesD2 = 0;
4373 for(std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[_netId].begin(); grpIt != groupPartitionLists[_netId].end(); grpIt++) {
4374 if (grpIt->maxOutgoingDelay == 1)
4381 void SNN::findNumN(
int _netId,
int& _numN,
int& _numNExternal,
int& _numNAssigned,
4382 int& _numNReg,
int& _numNExcReg,
int& _numNInhReg,
4383 int& _numNPois,
int& _numNExcPois,
int& _numNInhPois) {
4384 _numN = 0; _numNExternal = 0; _numNAssigned = 0;
4385 _numNReg = 0; _numNExcReg = 0; _numNInhReg = 0;
4386 _numNPois = 0; _numNExcPois = 0; _numNInhPois = 0;
4387 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[_netId].begin(); grpIt != groupPartitionLists[_netId].end(); grpIt++) {
4388 int sizeN = groupConfigMap[grpIt->gGrpId].numN;
4389 unsigned int type = groupConfigMap[grpIt->gGrpId].type;
4391 _numN += sizeN; _numNPois += sizeN; _numNExcPois += sizeN;
4393 _numN += sizeN; _numNPois += sizeN; _numNInhPois += sizeN;
4395 _numN += sizeN; _numNReg += sizeN; _numNExcReg += sizeN;
4397 _numN += sizeN; _numNReg += sizeN; _numNInhReg += sizeN;
4398 }
else if (grpIt->netId != _netId) {
4399 _numNExternal += sizeN;
4401 KERNEL_ERROR(
"Can't find catagory for the group [%d] ", grpIt->gGrpId);
4404 _numNAssigned += sizeN;
4407 assert(_numNReg == _numNExcReg + _numNInhReg);
4408 assert(_numNPois == _numNExcPois + _numNInhPois);
4409 assert(_numN == _numNReg + _numNPois);
4410 assert(_numNAssigned == _numN + _numNExternal);
4413 void SNN::findNumNSpikeGenAndOffset(
int _netId) {
4416 for(
int lGrpId = 0; lGrpId < networkConfigs[_netId].
numGroups; lGrpId++) {
4417 if (_netId == groupConfigs[_netId][lGrpId].netId && groupConfigs[_netId][lGrpId].isSpikeGenerator && groupConfigs[_netId][lGrpId].isSpikeGenFunc) {
4423 assert(networkConfigs[_netId].numNSpikeGen <= networkConfigs[_netId].numNPois);
4426 void SNN::findNumSynapsesNetwork(
int _netId,
int& _numPostSynNet,
int& _numPreSynNet) {
4430 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[_netId].begin(); grpIt != groupPartitionLists[_netId].end(); grpIt++) {
4431 _numPostSynNet += grpIt->numPostSynapses;
4432 _numPreSynNet += grpIt->numPreSynapses;
4433 assert(_numPostSynNet < INT_MAX);
4434 assert(_numPreSynNet < INT_MAX);
4437 assert(_numPreSynNet == _numPostSynNet);
4440 void SNN::fetchGroupState(
int netId,
int lGrpId) {
4442 copyGroupState(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost,
false);
4444 copyGroupState(netId, lGrpId, &managerRuntimeData, &runtimeData[netId],
false);
4447 void SNN::fetchWeightState(
int netId,
int lGrpId) {
4449 copyWeightState(netId, lGrpId, cudaMemcpyDeviceToHost);
4451 copyWeightState(netId, lGrpId);
4459 void SNN::fetchNeuronSpikeCount (
int gGrpId) {
4460 if (gGrpId ==
ALL) {
4461 for (
int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4462 fetchNeuronSpikeCount(gGrpId);
4465 int netId = groupConfigMDMap[gGrpId].netId;
4466 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
4467 int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
4470 copyNeuronSpikeCount(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost,
false, LtoGOffset);
4472 copyNeuronSpikeCount(netId, lGrpId, &managerRuntimeData, &runtimeData[netId],
false, LtoGOffset);
4476 void SNN::fetchSTPState(
int gGrpId) {
4484 void SNN::fetchConductanceAMPA(
int gGrpId) {
4485 if (gGrpId ==
ALL) {
4486 for (
int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4487 fetchConductanceAMPA(gGrpId);
4490 int netId = groupConfigMDMap[gGrpId].netId;
4491 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
4492 int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
4495 copyConductanceAMPA(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost,
false, LtoGOffset);
4497 copyConductanceAMPA(netId, lGrpId, &managerRuntimeData, &runtimeData[netId],
false, LtoGOffset);
4506 void SNN::fetchConductanceNMDA(
int gGrpId) {
4507 if (gGrpId ==
ALL) {
4508 for (
int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4509 fetchConductanceNMDA(gGrpId);
4512 int netId = groupConfigMDMap[gGrpId].netId;
4513 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
4514 int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
4517 copyConductanceNMDA(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost,
false, LtoGOffset);
4519 copyConductanceNMDA(netId, lGrpId, &managerRuntimeData, &runtimeData[netId],
false, LtoGOffset);
4528 void SNN::fetchConductanceGABAa(
int gGrpId) {
4529 if (gGrpId ==
ALL) {
4530 for (
int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4531 fetchConductanceGABAa(gGrpId);
4534 int netId = groupConfigMDMap[gGrpId].netId;
4535 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
4536 int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
4539 copyConductanceGABAa(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost,
false, LtoGOffset);
4541 copyConductanceGABAa(netId, lGrpId, &managerRuntimeData, &runtimeData[netId],
false, LtoGOffset);
4550 void SNN::fetchConductanceGABAb(
int gGrpId) {
4551 if (gGrpId ==
ALL) {
4552 for (
int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4553 fetchConductanceGABAb(gGrpId);
4556 int netId = groupConfigMDMap[gGrpId].netId;
4557 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
4558 int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
4561 copyConductanceGABAb(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost,
false, LtoGOffset);
4563 copyConductanceGABAb(netId, lGrpId, &managerRuntimeData, &runtimeData[netId],
false, LtoGOffset);
4568 void SNN::fetchGrpIdsLookupArray(
int netId) {
4570 copyGrpIdsLookupArray(netId, cudaMemcpyDeviceToHost);
4572 copyGrpIdsLookupArray(netId);
4575 void SNN::fetchConnIdsLookupArray(
int netId) {
4577 copyConnIdsLookupArray(netId, cudaMemcpyDeviceToHost);
4579 copyConnIdsLookupArray(netId);
4582 void SNN::fetchLastSpikeTime(
int netId) {
4584 copyLastSpikeTime(netId, cudaMemcpyDeviceToHost);
4586 copyLastSpikeTime(netId);
4589 void SNN::fetchPreConnectionInfo(
int netId) {
4591 copyPreConnectionInfo(netId,
ALL, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost,
false);
4593 copyPreConnectionInfo(netId,
ALL, &managerRuntimeData, &runtimeData[netId],
false);
4596 void SNN::fetchPostConnectionInfo(
int netId) {
4598 copyPostConnectionInfo(netId,
ALL, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost,
false);
4600 copyPostConnectionInfo(netId,
ALL, &managerRuntimeData, &runtimeData[netId],
false);
4603 void SNN::fetchSynapseState(
int netId) {
4605 copySynapseState(netId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost,
false);
4607 copySynapseState(netId, &managerRuntimeData, &runtimeData[netId],
false);
4614 void SNN::fetchNetworkSpikeCount() {
4615 unsigned int spikeCountD1, spikeCountD2, spikeCountExtD1, spikeCountExtD2;
4622 if (!groupPartitionLists[netId].empty()) {
4625 copyNetworkSpikeCount(netId, cudaMemcpyDeviceToHost,
4626 &spikeCountD1, &spikeCountD2,
4627 &spikeCountExtD1, &spikeCountExtD2);
4630 copyNetworkSpikeCount(netId,
4631 &spikeCountD1, &spikeCountD2,
4632 &spikeCountExtD1, &spikeCountExtD2);
4636 managerRuntimeData.
spikeCountD2 += spikeCountD2 - spikeCountExtD2;
4637 managerRuntimeData.
spikeCountD1 += spikeCountD1 - spikeCountExtD1;
4646 void SNN::fetchSpikeTables(
int netId) {
4648 copySpikeTables(netId, cudaMemcpyDeviceToHost);
4650 copySpikeTables(netId);
4653 void SNN::fetchNeuronStateBuffer(
int netId,
int lGrpId) {
4655 copyNeuronStateBuffer(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost,
false);
4657 copyNeuronStateBuffer(netId, lGrpId, &managerRuntimeData, &runtimeData[netId],
false);
4660 void SNN::fetchExtFiringTable(
int netId) {
4664 copyExtFiringTable(netId, cudaMemcpyDeviceToHost);
4666 copyExtFiringTable(netId);
4670 void SNN::fetchTimeTable(
int netId) {
4674 copyTimeTable(netId, cudaMemcpyDeviceToHost);
4676 copyTimeTable(netId,
true);
4680 void SNN::writeBackTimeTable(
int netId) {
4684 copyTimeTable(netId, cudaMemcpyHostToDevice);
4686 copyTimeTable(netId,
false);
4690 void SNN::transferSpikes(
void* dest,
int destNetId,
void* src,
int srcNetId,
int size) {
4693 checkAndSetGPUDevice(destNetId);
4694 CUDA_CHECK_ERRORS(cudaMemcpyPeer(dest, destNetId, src, srcNetId, size));
4696 checkAndSetGPUDevice(destNetId);
4697 CUDA_CHECK_ERRORS(cudaMemcpy(dest, src, size, cudaMemcpyHostToDevice));
4699 checkAndSetGPUDevice(srcNetId);
4700 CUDA_CHECK_ERRORS(cudaMemcpy(dest, src, size, cudaMemcpyDeviceToHost));
4702 memcpy(dest, src, size);
4706 memcpy(dest, src, size);
4710 void SNN::convertExtSpikesD2(
int netId,
int startIdx,
int endIdx,
int GtoLOffset) {
4712 convertExtSpikesD2_GPU(netId, startIdx, endIdx, GtoLOffset);
4714 convertExtSpikesD2_CPU(netId, startIdx, endIdx, GtoLOffset);
4717 void SNN::convertExtSpikesD1(
int netId,
int startIdx,
int endIdx,
int GtoLOffset) {
4719 convertExtSpikesD1_GPU(netId, startIdx, endIdx, GtoLOffset);
4721 convertExtSpikesD1_CPU(netId, startIdx, endIdx, GtoLOffset);
4724 void SNN::routeSpikes() {
4725 int firingTableIdxD2, firingTableIdxD1;
4728 for (std::list<RoutingTableEntry>::iterator rteItr = spikeRoutingTable.begin(); rteItr != spikeRoutingTable.end(); rteItr++) {
4729 int srcNetId = rteItr->srcNetId;
4730 int destNetId = rteItr->destNetId;
4732 fetchExtFiringTable(srcNetId);
4734 fetchTimeTable(destNetId);
4735 firingTableIdxD2 = managerRuntimeData.
timeTableD2[simTimeMs + glbNetworkConfig.
maxDelay + 1];
4736 firingTableIdxD1 = managerRuntimeData.
timeTableD1[simTimeMs + glbNetworkConfig.
maxDelay + 1];
4740 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
4741 pthread_t threads[(2 * networkConfigs[srcNetId].
numGroups) + 1];
4744 int threadCount = 0;
4747 for (
int lGrpId = 0; lGrpId < networkConfigs[srcNetId].
numGroups; lGrpId++) {
4748 if (groupConfigs[srcNetId][lGrpId].hasExternalConnect && managerRuntimeData.
extFiringTableEndIdxD2[lGrpId] > 0) {
4750 bool isFound =
false;
4751 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[destNetId].begin(); grpIt != groupPartitionLists[destNetId].end(); grpIt++) {
4752 if (grpIt->gGrpId == groupConfigs[srcNetId][lGrpId].gGrpId) {
4760 transferSpikes(runtimeData[destNetId].firingTableD2 + firingTableIdxD2, destNetId,
4765 convertExtSpikesD2_GPU(destNetId, firingTableIdxD2,
4770 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
4771 convertExtSpikesD2_CPU(destNetId, firingTableIdxD2,
4774 #else // Linux or MAC
4775 pthread_attr_t attr;
4776 pthread_attr_init(&attr);
4779 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
4781 argsThreadRoutine[threadCount].
snn_pointer =
this;
4782 argsThreadRoutine[threadCount].
netId = destNetId;
4783 argsThreadRoutine[threadCount].
lGrpId = 0;
4784 argsThreadRoutine[threadCount].
startIdx = firingTableIdxD2;
4786 argsThreadRoutine[threadCount].
GtoLOffset = GtoLOffset;
4788 pthread_create(&threads[threadCount], &attr, &SNN::helperConvertExtSpikesD2_CPU, (
void*)&argsThreadRoutine[threadCount]);
4789 pthread_attr_destroy(&attr);
4798 if (groupConfigs[srcNetId][lGrpId].hasExternalConnect && managerRuntimeData.
extFiringTableEndIdxD1[lGrpId] > 0) {
4800 bool isFound =
false;
4801 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[destNetId].begin(); grpIt != groupPartitionLists[destNetId].end(); grpIt++) {
4802 if (grpIt->gGrpId == groupConfigs[srcNetId][lGrpId].gGrpId) {
4803 GtoLOffset = grpIt->GtoLOffset;
4810 transferSpikes(runtimeData[destNetId].firingTableD1 + firingTableIdxD1, destNetId,
4814 convertExtSpikesD1_GPU(destNetId, firingTableIdxD1,
4819 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
4820 convertExtSpikesD1_CPU(destNetId, firingTableIdxD1,
4823 #else // Linux or MAC
4824 pthread_attr_t attr;
4825 pthread_attr_init(&attr);
4828 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
4830 argsThreadRoutine[threadCount].
snn_pointer =
this;
4831 argsThreadRoutine[threadCount].
netId = destNetId;
4832 argsThreadRoutine[threadCount].
lGrpId = 0;
4833 argsThreadRoutine[threadCount].
startIdx = firingTableIdxD1;
4835 argsThreadRoutine[threadCount].
GtoLOffset = GtoLOffset;
4837 pthread_create(&threads[threadCount], &attr, &SNN::helperConvertExtSpikesD1_CPU, (
void*)&argsThreadRoutine[threadCount]);
4838 pthread_attr_destroy(&attr);
4848 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
4850 for (
int i=0; i<threadCount; i++){
4851 pthread_join(threads[i], NULL);
4855 managerRuntimeData.
timeTableD2[simTimeMs + glbNetworkConfig.
maxDelay + 1] = firingTableIdxD2;
4856 managerRuntimeData.
timeTableD1[simTimeMs + glbNetworkConfig.
maxDelay + 1] = firingTableIdxD1;
4857 writeBackTimeTable(destNetId);
4864 float SNN::generateWeight(
int connProp,
float initWt,
float maxWt,
int nid,
int grpId) {
4885 assert(connId !=
ALL);
4886 assert(connId < numConnections);
4896 return (groupConfigMap[grpId].homeoConfig.WithHomeostasis);
4900 void SNN::verifyNetwork() {
4906 verifyCompartments();
4912 verifyHomeostasis();
4944 if (sim_with_stp && glbNetworkConfig.
maxDelay > 1) {
4945 KERNEL_ERROR(
"STP with delays > 1 ms is currently not supported.");
4950 KERNEL_ERROR(
"You are using a synaptic delay (%d) greater than MAX_SYN_DELAY defined in config.h", glbNetworkConfig.
maxDelay);
4955 void SNN::verifyCompartments() {
4956 for (std::map<int, compConnectConfig>::iterator it = compConnectConfigMap.begin(); it != compConnectConfigMap.end(); it++)
4958 int grpLower = it->second.grpSrc;
4959 int grpUpper = it->second.grpDest;
4962 if (!groupConfigMap[grpLower].withCompartments) {
4963 KERNEL_ERROR(
"Group %s(%d) is not compartmentally enabled, cannot be part of a compartmental connection.",
4964 groupConfigMap[grpLower].grpName.c_str(), grpLower);
4967 if (!groupConfigMap[grpUpper].withCompartments) {
4968 KERNEL_ERROR(
"Group %s(%d) is not compartmentally enabled, cannot be part of a compartmental connection.",
4969 groupConfigMap[grpUpper].grpName.c_str(), grpUpper);
4976 void SNN::verifySTDP() {
4978 if (groupConfigMap[gGrpId].stdpConfig.WithSTDP) {
4980 bool isAnyPlastic =
false;
4981 for (std::map<int, ConnectConfig>::iterator it = connectConfigMap.begin(); it != connectConfigMap.end(); it++) {
4982 if (it->second.grpDest == gGrpId) {
4991 if (!isAnyPlastic) {
4992 KERNEL_ERROR(
"If STDP on group %d (%s) is set, group must have some incoming plastic connections.",
4993 gGrpId, groupConfigMap[gGrpId].grpName.c_str());
5001 void SNN::verifyHomeostasis() {
5003 if (groupConfigMap[gGrpId].homeoConfig.WithHomeostasis) {
5004 if (!groupConfigMap[gGrpId].stdpConfig.WithSTDP) {
5005 KERNEL_ERROR(
"If homeostasis is enabled on group %d (%s), then STDP must be enabled, too.",
5006 gGrpId, groupConfigMap[gGrpId].grpName.c_str());
5068 return (rfDist >= 0.0 && rfDist <= 1.0);
5078 double rfDist = -1.0;
5084 if (radius.
radX==0 && pre.
x!=post.
x || radius.
radY==0 && pre.
y!=post.
y || radius.
radZ==0 && pre.
z!=post.
z) {
5088 double xTerm = (radius.
radX<=0) ? 0.0 : pow(pre.
x-post.
x,2)/pow(radius.
radX,2);
5089 double yTerm = (radius.
radY<=0) ? 0.0 : pow(pre.
y-post.
y,2)/pow(radius.
radY,2);
5090 double zTerm = (radius.
radZ<=0) ? 0.0 : pow(pre.
z-post.
z,2)/pow(radius.
radZ,2);
5091 rfDist = xTerm + yTerm + zTerm;
5097 void SNN::partitionSNN() {
5101 numAvailableGPUs = configGPUDevice();
5103 for (std::map<int, GroupConfigMD>::iterator grpIt = groupConfigMDMap.begin(); grpIt != groupConfigMDMap.end(); grpIt++) {
5105 int gGrpId = grpIt->second.gGrpId;
5106 int netId = groupConfigMap[gGrpId].preferredNetId;
5109 grpIt->second.netId = netId;
5110 numAssignedNeurons[netId] += groupConfigMap[gGrpId].numN;
5111 groupPartitionLists[netId].push_back(grpIt->second);
5116 if (preferredSimMode_ ==
CPU_MODE) {
5120 }
else if (preferredSimMode_ ==
GPU_MODE) {
5135 if (grpIt->second.netId == -1) {
5136 KERNEL_ERROR(
"Can't assign the group [%d] to any partition", grpIt->second.gGrpId);
5143 if (!groupPartitionLists[netId].empty()) {
5144 for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
5145 if (groupConfigMDMap[connIt->second.grpSrc].netId == netId && groupConfigMDMap[connIt->second.grpDest].netId == netId) {
5146 localConnectLists[netId].push_back(connectConfigMap[connIt->second.connId]);
5151 for (std::map<int, compConnectConfig>::iterator connIt = compConnectConfigMap.begin(); connIt != compConnectConfigMap.end(); connIt++) {
5152 if (groupConfigMDMap[connIt->second.grpSrc].netId == netId && groupConfigMDMap[connIt->second.grpDest].netId == netId) {
5153 localCompConnectLists[netId].push_back(compConnectConfigMap[connIt->second.connId]);
5160 spikeRoutingTable.clear();
5162 if (!groupPartitionLists[netId].empty()) {
5163 for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
5164 int srcNetId = groupConfigMDMap[connIt->second.grpSrc].netId;
5165 int destNetId = groupConfigMDMap[connIt->second.grpDest].netId;
5166 if (srcNetId == netId && destNetId != netId) {
5169 std::list<GroupConfigMD>::iterator srcGrpIt, destGrpIt;
5171 targetGroup.
gGrpId = connIt->second.grpSrc;
5172 srcGrpIt = find(groupPartitionLists[srcNetId].begin(), groupPartitionLists[srcNetId].end(), targetGroup);
5173 assert(srcGrpIt != groupPartitionLists[srcNetId].end());
5174 srcGrpIt->hasExternalConnect =
true;
5177 targetGroup.
gGrpId = connIt->second.grpDest;
5178 destGrpIt = find(groupPartitionLists[srcNetId].begin(), groupPartitionLists[srcNetId].end(), targetGroup);
5179 if (destGrpIt == groupPartitionLists[srcNetId].end()) {
5180 numAssignedNeurons[srcNetId] += groupConfigMap[connIt->second.grpDest].numN;
5181 groupPartitionLists[srcNetId].push_back(groupConfigMDMap[connIt->second.grpDest]);
5184 targetGroup.
gGrpId = connIt->second.grpSrc;
5185 srcGrpIt = find(groupPartitionLists[destNetId].begin(), groupPartitionLists[destNetId].end(), targetGroup);
5186 if (srcGrpIt == groupPartitionLists[destNetId].end()) {
5187 numAssignedNeurons[destNetId] += groupConfigMap[connIt->second.grpSrc].numN;
5188 groupPartitionLists[destNetId].push_back(groupConfigMDMap[connIt->second.grpSrc]);
5191 externalConnectLists[srcNetId].push_back(connectConfigMap[connIt->second.connId]);
5196 spikeRoutingTable.push_back(rte);
5202 spikeRoutingTable.unique();
5209 if (!groupPartitionLists[netId].empty()) {
5210 int availableNeuronId = 0;
5211 int localGroupId = 0;
5212 for (
int order = 0; order < 5; order++) {
5213 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
5214 unsigned int type = groupConfigMap[grpIt->gGrpId].type;
5216 availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
5219 availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
5222 availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
5225 availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
5227 }
else if (order == 4 && grpIt->netId != netId) {
5228 availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
5233 assert(availableNeuronId == numAssignedNeurons[netId]);
5234 assert(localGroupId == groupPartitionLists[netId].size());
5242 if (loadSimFID == NULL) {
5246 loadSimulation_internal(
false);
5249 collectGlobalNetworkConfigP();
5253 if (!groupPartitionLists[netId].empty()) {
5256 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++)
5257 printGroupInfo(netId, grpIt);
5260 if (!localConnectLists[netId].empty() || !externalConnectLists[netId].empty()) {
5262 for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++)
5263 printConnectionInfo(netId, connIt);
5265 for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++)
5266 printConnectionInfo(netId, connIt);
5271 printSikeRoutingInfo();
5276 int SNN::loadSimulation_internal(
bool onlyPlastic) {
5279 long file_position = ftell(loadSimFID);
5284 bool readErr =
false;
5290 fseek(loadSimFID, 0, SEEK_SET);
5293 result = fread(&tmpInt,
sizeof(
int), 1, loadSimFID);
5294 readErr |= (result!=1);
5295 if (tmpInt != 294338571) {
5296 KERNEL_ERROR(
"loadSimulation: Unknown file signature. This does not seem to be a "
5297 "simulation file created with CARLsim::saveSimulation.");
5302 result = fread(&tmpFloat,
sizeof(
float), 1, loadSimFID);
5303 readErr |= (result!=1);
5304 if (tmpFloat > 0.3f) {
5305 KERNEL_ERROR(
"loadSimulation: Unsupported version number (%f)",tmpFloat);
5310 result = fread(&tmpFloat,
sizeof(
float), 1, loadSimFID);
5311 readErr |= (result!=1);
5314 result = fread(&tmpFloat,
sizeof(
float), 1, loadSimFID);
5315 readErr |= (result!=1);
5318 result = fread(&tmpInt,
sizeof(
int), 1, loadSimFID);
5319 readErr |= (result!=1);
5320 if (tmpInt != glbNetworkConfig.
numN) {
5321 KERNEL_ERROR(
"loadSimulation: Number of neurons in file (%d) and simulation (%d) don't match.",
5322 tmpInt, glbNetworkConfig.
numN);
5346 result = fread(&tmpInt,
sizeof(
int), 1, loadSimFID);
5347 readErr |= (result!=1);
5348 if (tmpInt != numGroups) {
5349 KERNEL_ERROR(
"loadSimulation: Number of groups in file (%d) and simulation (%d) don't match.",
5356 fprintf(stderr,
"loadSimulation: Error while reading file header");
5362 for (
int g=0; g<numGroups; g++) {
5364 result = fread(&tmpInt,
sizeof(
int), 1, loadSimFID);
5365 readErr |= (result!=1);
5366 if (tmpInt != groupConfigMDMap[g].gStartN) {
5367 KERNEL_ERROR(
"loadSimulation: StartN in file (%d) and grpInfo (%d) for group %d don't match.",
5368 tmpInt, groupConfigMDMap[g].gStartN, g);
5373 result = fread(&tmpInt,
sizeof(
int), 1, loadSimFID);
5374 readErr |= (result!=1);
5375 if (tmpInt != groupConfigMDMap[g].gEndN) {
5376 KERNEL_ERROR(
"loadSimulation: EndN in file (%d) and grpInfo (%d) for group %d don't match.",
5377 tmpInt, groupConfigMDMap[g].gEndN, g);
5382 result = fread(&tmpInt,
sizeof(
int), 1, loadSimFID);
5383 readErr |= (result!=1);
5384 if (tmpInt != groupConfigMap[g].grid.numX) {
5385 KERNEL_ERROR(
"loadSimulation: numX in file (%d) and grpInfo (%d) for group %d don't match.",
5386 tmpInt, groupConfigMap[g].grid.numX, g);
5392 result = fread(&tmpInt,
sizeof(
int), 1, loadSimFID);
5393 readErr |= (result!=1);
5394 if (tmpInt != groupConfigMap[g].grid.numY) {
5395 KERNEL_ERROR(
"loadSimulation: numY in file (%d) and grpInfo (%d) for group %d don't match.",
5396 tmpInt, groupConfigMap[g].grid.numY, g);
5402 result = fread(&tmpInt,
sizeof(
int), 1, loadSimFID);
5403 readErr |= (result!=1);
5404 if (tmpInt != groupConfigMap[g].grid.numZ) {
5405 KERNEL_ERROR(
"loadSimulation: numZ in file (%d) and grpInfo (%d) for group %d don't match.",
5406 tmpInt, groupConfigMap[g].grid.numZ, g);
5413 result = fread(name,
sizeof(
char), 100, loadSimFID);
5414 readErr |= (result!=100);
5415 if (strcmp(name,groupConfigMap[g].grpName.c_str()) != 0) {
5416 KERNEL_ERROR(
"loadSimulation: Group names in file (%s) and grpInfo (%s) don't match.", name,
5417 groupConfigMap[g].grpName.c_str());
5423 KERNEL_ERROR(
"loadSimulation: Error while reading group info");
5452 result = fread(&net_count,
sizeof(
int), 1, loadSimFID);
5453 readErr |= (result!=1);
5455 for (
int i = 0; i < net_count; i++) {
5456 int synapse_count = 0;
5457 result = fread(&synapse_count,
sizeof(
int), 1, loadSimFID);
5458 for (
int j = 0; j < synapse_count; j++) {
5469 result = fread(&gGrpIdPre,
sizeof(
int), 1, loadSimFID);
5470 readErr != (result!=1);
5473 result = fread(&gGrpIdPost,
sizeof(
int), 1, loadSimFID);
5474 readErr != (result!=1);
5477 result = fread(&grpNIdPre,
sizeof(
int), 1, loadSimFID);
5478 readErr != (result!=1);
5481 result = fread(&grpNIdPost,
sizeof(
int), 1, loadSimFID);
5482 readErr != (result!=1);
5485 result = fread(&connId,
sizeof(
int), 1, loadSimFID);
5486 readErr != (result!=1);
5489 result = fread(&weight,
sizeof(
float), 1, loadSimFID);
5490 readErr != (result!=1);
5493 result = fread(&maxWeight,
sizeof(
float), 1, loadSimFID);
5494 readErr != (result!=1);
5497 result = fread(&delay,
sizeof(
int), 1, loadSimFID);
5498 readErr != (result!=1);
5501 if (connectConfigMap[connId].grpSrc != gGrpIdPre) {
5502 KERNEL_ERROR(
"loadSimulation: source group in file (%d) and in simulation (%d) for connection %d don't match.",
5503 gGrpIdPre , connectConfigMap[connId].grpSrc, connId);
5507 if (connectConfigMap[connId].grpDest != gGrpIdPost) {
5508 KERNEL_ERROR(
"loadSimulation: dest group in file (%d) and in simulation (%d) for connection %d don't match.",
5509 gGrpIdPost , connectConfigMap[connId].grpDest, connId);
5515 int netIdPre = groupConfigMDMap[gGrpIdPre].netId;
5516 int netIdPost = groupConfigMDMap[gGrpIdPost].netId;
5517 bool isExternal = (netIdPre != netIdPost);
5520 int globalNIdPre = groupConfigMDMap[gGrpIdPre].gStartN + grpNIdPre;
5521 int globalNIdPost = groupConfigMDMap[gGrpIdPost].gStartN + grpNIdPost;
5523 bool connected =
false;
5525 for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netIdPre].begin(); connIt != localConnectLists[netIdPre].end() && (!connected); connIt++) {
5526 if (connIt->connId == connId) {
5528 connectNeurons(netIdPre, gGrpIdPre, gGrpIdPost, globalNIdPre, globalNIdPost, connId, weight, maxWeight, delay, -1);
5531 connIt->numberOfConnections++;
5532 std::list<GroupConfigMD>::iterator grpIt;
5538 targetGrp.
gGrpId = gGrpIdPre;
5539 grpIt = std::find(groupPartitionLists[netIdPre].begin(), groupPartitionLists[netIdPre].end(), targetGrp);
5540 assert(grpIt != groupPartitionLists[netIdPre].end());
5541 grpIt->numPostSynapses += 1;
5543 targetGrp.
gGrpId = gGrpIdPost;
5544 grpIt = std::find(groupPartitionLists[netIdPre].begin(), groupPartitionLists[netIdPre].end(), targetGrp);
5545 assert(grpIt != groupPartitionLists[netIdPost].end());
5546 grpIt->numPreSynapses += 1;
5550 for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netIdPre].begin(); connIt != externalConnectLists[netIdPre].end() && (!connected); connIt++) {
5551 if (connIt->connId == connId) {
5553 connectNeurons(netIdPre, gGrpIdPre, gGrpIdPost, globalNIdPre, globalNIdPost, connId, weight, maxWeight, delay, netIdPost);
5556 connIt->numberOfConnections++;
5561 std::list<GroupConfigMD>::iterator grpIt;
5563 targetGrp.
gGrpId = gGrpIdPre;
5564 grpIt = std::find(groupPartitionLists[netIdPre].begin(), groupPartitionLists[netIdPre].end(), targetGrp);
5565 assert(grpIt != groupPartitionLists[netIdPre].end());
5566 grpIt->numPostSynapses += 1;
5568 targetGrp.
gGrpId = gGrpIdPost;
5569 grpIt = std::find(groupPartitionLists[netIdPre].begin(), groupPartitionLists[netIdPre].end(), targetGrp);
5570 assert(grpIt != groupPartitionLists[netIdPost].end());
5571 grpIt->numPreSynapses += 1;
5574 targetGrp.
gGrpId = gGrpIdPre;
5575 grpIt = std::find(groupPartitionLists[netIdPost].begin(), groupPartitionLists[netIdPost].end(), targetGrp);
5576 assert(grpIt != groupPartitionLists[netIdPost].end());
5577 grpIt->numPostSynapses += 1;
5579 targetGrp.
gGrpId = gGrpIdPost;
5580 grpIt = std::find(groupPartitionLists[netIdPost].begin(), groupPartitionLists[netIdPost].end(), targetGrp);
5581 assert(grpIt != groupPartitionLists[netIdPost].end());
5582 grpIt->numPreSynapses += 1;
5589 fseek(loadSimFID,file_position,SEEK_SET);
5594 void SNN::generateRuntimeSNN() {
5597 generateRuntimeGroupConfigs();
5600 generateRuntimeConnectConfigs();
5603 generateRuntimeNetworkConfigs();
5608 allocateManagerSpikeTables();
5614 allocateManagerRuntimeData();
5620 if (!groupPartitionLists[netId].empty()) {
5623 KERNEL_INFO(
"***************** Initializing GPU %d Runtime *************************", netId);
5630 for(
int lGrpId = 0; lGrpId < networkConfigs[netId].
numGroups; lGrpId++) {
5632 if (groupConfigs[netId][lGrpId].netId == netId && (groupConfigs[netId][lGrpId].Type &
POISSON_NEURON)) {
5636 generatePoissonGroupRuntime(netId, lGrpId);
5639 if (groupConfigs[netId][lGrpId].netId == netId && !(groupConfigs[netId][lGrpId].Type &
POISSON_NEURON)) {
5644 generateGroupRuntime(netId, lGrpId);
5649 for (
int lNId = 0; lNId < networkConfigs[netId].
numNAssigned; lNId++) {
5650 managerRuntimeData.
grpIds[lNId] = -1;
5651 for(
int lGrpId = 0; lGrpId < networkConfigs[netId].
numGroupsAssigned; lGrpId++) {
5652 if (lNId >= groupConfigs[netId][lGrpId].lStartN && lNId <= groupConfigs[netId][lGrpId].lEndN) {
5653 managerRuntimeData.
grpIds[lNId] = (
short int)lGrpId;
5657 assert(managerRuntimeData.
grpIds[lNId] != -1);
5663 generateConnectionRuntime(netId);
5665 generateCompConnectionRuntime(netId);
5668 resetCurrent(netId);
5670 resetConductances(netId);
5674 resetSynapse(netId,
false);
5681 numGPUs = 0; numCores = 0;
5693 void SNN::resetConductances(
int netId) {
5694 if (networkConfigs[netId].sim_with_conductances) {
5695 memset(managerRuntimeData.
gAMPA, 0,
sizeof(
float) * networkConfigs[netId].numNReg);
5696 if (networkConfigs[netId].sim_with_NMDA_rise) {
5697 memset(managerRuntimeData.
gNMDA_r, 0,
sizeof(
float) * networkConfigs[netId].numNReg);
5698 memset(managerRuntimeData.
gNMDA_d, 0,
sizeof(
float) * networkConfigs[netId].numNReg);
5700 memset(managerRuntimeData.
gNMDA, 0,
sizeof(
float) * networkConfigs[netId].numNReg);
5702 memset(managerRuntimeData.
gGABAa, 0,
sizeof(
float) * networkConfigs[netId].numNReg);
5703 if (networkConfigs[netId].sim_with_GABAb_rise) {
5704 memset(managerRuntimeData.
gGABAb_r, 0,
sizeof(
float) * networkConfigs[netId].numNReg);
5705 memset(managerRuntimeData.
gGABAb_d, 0,
sizeof(
float) * networkConfigs[netId].numNReg);
5707 memset(managerRuntimeData.
gGABAb, 0,
sizeof(
float) * networkConfigs[netId].numNReg);
5712 void SNN::resetCurrent(
int netId) {
5713 assert(managerRuntimeData.
current != NULL);
5714 memset(managerRuntimeData.
current, 0,
sizeof(
float) * networkConfigs[netId].numNReg);
5718 void SNN::resetFiringInformation() {
5727 resetPropogationBuffer();
5732 void SNN::resetTiming() {
5733 prevExecutionTime = cumExecutionTime;
5734 executionTime = 0.0f;
5737 void SNN::resetNeuromodulator(
int netId,
int lGrpId) {
5738 managerRuntimeData.
grpDA[lGrpId] = groupConfigs[netId][lGrpId].
baseDP;
5739 managerRuntimeData.
grp5HT[lGrpId] = groupConfigs[netId][lGrpId].
base5HT;
5740 managerRuntimeData.
grpACh[lGrpId] = groupConfigs[netId][lGrpId].
baseACh;
5741 managerRuntimeData.
grpNE[lGrpId] = groupConfigs[netId][lGrpId].
baseNE;
5747 void SNN::resetNeuron(
int netId,
int lGrpId,
int lNId) {
5748 int gGrpId = groupConfigs[netId][lGrpId].
gGrpId;
5749 assert(lNId < networkConfigs[netId].numNReg);
5751 if (groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a == -1 && groupConfigMap[gGrpId].isLIF == 0) {
5752 KERNEL_ERROR(
"setNeuronParameters must be called for group %s (G:%d,L:%d)",groupConfigMap[gGrpId].grpName.c_str(), gGrpId, lGrpId);
5756 if (groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_m == -1 && groupConfigMap[gGrpId].isLIF == 1) {
5757 KERNEL_ERROR(
"setNeuronParametersLIF must be called for group %s (G:%d,L:%d)",groupConfigMap[gGrpId].grpName.c_str(), gGrpId, lGrpId);
5761 managerRuntimeData.
Izh_a[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a_sd * (float)drand48();
5762 managerRuntimeData.
Izh_b[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b_sd * (float)drand48();
5763 managerRuntimeData.
Izh_c[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c_sd * (float)drand48();
5764 managerRuntimeData.
Izh_d[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d_sd * (float)drand48();
5765 managerRuntimeData.
Izh_C[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_C + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_C_sd * (float)drand48();
5766 managerRuntimeData.
Izh_k[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_k + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_k_sd * (float)drand48();
5767 managerRuntimeData.
Izh_vr[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vr + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vr_sd * (float)drand48();
5768 managerRuntimeData.
Izh_vt[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vt + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vt_sd * (float)drand48();
5769 managerRuntimeData.
Izh_vpeak[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vpeak + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vpeak_sd * (float)drand48();
5770 managerRuntimeData.
lif_tau_m[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_m;
5771 managerRuntimeData.
lif_tau_ref[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_ref;
5773 managerRuntimeData.
lif_vTh[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.lif_vTh;
5774 managerRuntimeData.
lif_vReset[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.lif_vReset;
5777 if (groupConfigs[netId][lGrpId].isLIF){
5779 float rmRange = (float)(groupConfigMap[gGrpId].neuralDynamicsConfig.lif_maxRmem - groupConfigMap[gGrpId].neuralDynamicsConfig.lif_minRmem);
5780 float minRmem = (float)groupConfigMap[gGrpId].neuralDynamicsConfig.lif_minRmem;
5781 managerRuntimeData.
lif_bias[lNId] = 0.0f;
5782 managerRuntimeData.
lif_gain[lNId] = minRmem + rmRange * (
float)drand48();
5788 if (groupConfigs[netId][lGrpId].WithHomeostasis) {
5790 if (drand48() > 0.5) {
5791 managerRuntimeData.
baseFiring[lNId] = groupConfigMap[gGrpId].homeoConfig.baseFiring + groupConfigMap[gGrpId].homeoConfig.baseFiringSD * -log(drand48());
5793 managerRuntimeData.
baseFiring[lNId] = groupConfigMap[gGrpId].homeoConfig.baseFiring - groupConfigMap[gGrpId].homeoConfig.baseFiringSD * -log(drand48());
5797 if (groupConfigMap[gGrpId].homeoConfig.baseFiring != 0.0f) {
5801 managerRuntimeData.
avgFiring[lNId] = 0.0f;
5807 if(groupConfigs[netId][lGrpId].WithSTP) {
5808 for (
int j = 0; j < networkConfigs[netId].
maxDelay + 1; j++) {
5809 int index =
STP_BUF_POS(lNId, j, networkConfigs[netId].maxDelay);
5810 managerRuntimeData.
stpu[index] = 0.0f;
5811 managerRuntimeData.
stpx[index] = 1.0f;
5816 void SNN::resetMonitors(
bool deallocate) {
5825 for (
int i=0; i<numSpikeMonitor; i++) {
5826 if (spikeMonList[i]!=NULL && deallocate)
delete spikeMonList[i];
5827 spikeMonList[i]=NULL;
5832 for (
int i = 0; i<numNeuronMonitor; i++) {
5833 if (neuronMonList[i] != NULL && deallocate)
delete neuronMonList[i];
5834 neuronMonList[i] = NULL;
5839 for (
int i=0; i<numGroupMonitor; i++) {
5840 if (groupMonList[i]!=NULL && deallocate)
delete groupMonList[i];
5841 groupMonList[i]=NULL;
5846 for (
int i=0; i<numConnectionMonitor; i++) {
5847 if (connMonList[i]!=NULL && deallocate)
delete connMonList[i];
5848 connMonList[i]=NULL;
5852 void SNN::resetGroupConfigs(
bool deallocate) {
5854 if (deallocate) groupConfigMap.clear();
5857 void SNN::resetConnectionConfigs(
bool deallocate) {
5859 if (deallocate) connectConfigMap.clear();
5862 void SNN::deleteManagerRuntimeData() {
5863 if (spikeBuf!=NULL)
delete spikeBuf;
5868 if (managerRuntimeData.
grpDA != NULL)
delete [] managerRuntimeData.
grpDA;
5869 if (managerRuntimeData.
grp5HT != NULL)
delete [] managerRuntimeData.
grp5HT;
5870 if (managerRuntimeData.
grpACh != NULL)
delete [] managerRuntimeData.
grpACh;
5871 if (managerRuntimeData.
grpNE != NULL)
delete [] managerRuntimeData.
grpNE;
5872 managerRuntimeData.
grpDA = NULL;
5873 managerRuntimeData.
grp5HT = NULL;
5874 managerRuntimeData.
grpACh = NULL;
5875 managerRuntimeData.
grpNE = NULL;
5887 if (managerRuntimeData.
voltage!=NULL)
delete[] managerRuntimeData.
voltage;
5889 if (managerRuntimeData.
recovery!=NULL)
delete[] managerRuntimeData.
recovery;
5890 if (managerRuntimeData.
current!=NULL)
delete[] managerRuntimeData.
current;
5893 if (managerRuntimeData.
curSpike != NULL)
delete[] managerRuntimeData.
curSpike;
5894 if (managerRuntimeData.
nVBuffer != NULL)
delete[] managerRuntimeData.
nVBuffer;
5895 if (managerRuntimeData.
nUBuffer != NULL)
delete[] managerRuntimeData.
nUBuffer;
5896 if (managerRuntimeData.
nIBuffer != NULL)
delete[] managerRuntimeData.
nIBuffer;
5901 if (managerRuntimeData.
Izh_a!=NULL)
delete[] managerRuntimeData.
Izh_a;
5902 if (managerRuntimeData.
Izh_b!=NULL)
delete[] managerRuntimeData.
Izh_b;
5903 if (managerRuntimeData.
Izh_c!=NULL)
delete[] managerRuntimeData.
Izh_c;
5904 if (managerRuntimeData.
Izh_d!=NULL)
delete[] managerRuntimeData.
Izh_d;
5905 if (managerRuntimeData.
Izh_C!=NULL)
delete[] managerRuntimeData.
Izh_C;
5906 if (managerRuntimeData.
Izh_k!=NULL)
delete[] managerRuntimeData.
Izh_k;
5907 if (managerRuntimeData.
Izh_vr!=NULL)
delete[] managerRuntimeData.
Izh_vr;
5908 if (managerRuntimeData.
Izh_vt!=NULL)
delete[] managerRuntimeData.
Izh_vt;
5910 managerRuntimeData.
Izh_a=NULL; managerRuntimeData.
Izh_b=NULL; managerRuntimeData.
Izh_c=NULL; managerRuntimeData.
Izh_d=NULL;
5911 managerRuntimeData.
Izh_C = NULL; managerRuntimeData.
Izh_k = NULL; managerRuntimeData.
Izh_vr = NULL; managerRuntimeData.
Izh_vt = NULL; managerRuntimeData.
Izh_vpeak = NULL;
5916 if (managerRuntimeData.
lif_vTh!=NULL)
delete[] managerRuntimeData.
lif_vTh;
5918 if (managerRuntimeData.
lif_gain!=NULL)
delete[] managerRuntimeData.
lif_gain;
5919 if (managerRuntimeData.
lif_bias!=NULL)
delete[] managerRuntimeData.
lif_bias;
5924 if (managerRuntimeData.
Npre!=NULL)
delete[] managerRuntimeData.
Npre;
5926 if (managerRuntimeData.
Npost!=NULL)
delete[] managerRuntimeData.
Npost;
5933 if (managerRuntimeData.
gAMPA!=NULL)
delete[] managerRuntimeData.
gAMPA;
5934 if (managerRuntimeData.
gNMDA!=NULL)
delete[] managerRuntimeData.
gNMDA;
5935 if (managerRuntimeData.
gNMDA_r!=NULL)
delete[] managerRuntimeData.
gNMDA_r;
5936 if (managerRuntimeData.
gNMDA_d!=NULL)
delete[] managerRuntimeData.
gNMDA_d;
5937 if (managerRuntimeData.
gGABAa!=NULL)
delete[] managerRuntimeData.
gGABAa;
5938 if (managerRuntimeData.
gGABAb!=NULL)
delete[] managerRuntimeData.
gGABAb;
5939 if (managerRuntimeData.
gGABAb_r!=NULL)
delete[] managerRuntimeData.
gGABAb_r;
5940 if (managerRuntimeData.
gGABAb_d!=NULL)
delete[] managerRuntimeData.
gGABAb_d;
5941 managerRuntimeData.
gAMPA=NULL; managerRuntimeData.
gNMDA=NULL; managerRuntimeData.
gNMDA_r=NULL; managerRuntimeData.
gNMDA_d=NULL;
5944 if (managerRuntimeData.
stpu!=NULL)
delete[] managerRuntimeData.
stpu;
5945 if (managerRuntimeData.
stpx!=NULL)
delete[] managerRuntimeData.
stpx;
5946 managerRuntimeData.
stpu=NULL; managerRuntimeData.
stpx=NULL;
5962 if (managerRuntimeData.
wt!=NULL)
delete[] managerRuntimeData.
wt;
5963 if (managerRuntimeData.
maxSynWt!=NULL)
delete[] managerRuntimeData.
maxSynWt;
5964 if (managerRuntimeData.
wtChange !=NULL)
delete[] managerRuntimeData.
wtChange;
5965 managerRuntimeData.
wt=NULL; managerRuntimeData.
maxSynWt=NULL; managerRuntimeData.
wtChange=NULL;
5967 if (mulSynFast!=NULL)
delete[] mulSynFast;
5968 if (mulSynSlow!=NULL)
delete[] mulSynSlow;
5970 mulSynFast=NULL; mulSynSlow=NULL; managerRuntimeData.
connIdsPreIdx=NULL;
5972 if (managerRuntimeData.
grpIds!=NULL)
delete[] managerRuntimeData.
grpIds;
5973 managerRuntimeData.
grpIds=NULL;
6001 void SNN::resetPoissonNeuron(
int netId,
int lGrpId,
int lNId) {
6002 assert(lNId < networkConfigs[netId].numN);
6004 if (groupConfigs[netId][lGrpId].WithHomeostasis)
6005 managerRuntimeData.
avgFiring[lNId] = 0.0f;
6007 if (groupConfigs[netId][lGrpId].WithSTP) {
6008 for (
int j = 0; j < networkConfigs[netId].
maxDelay + 1; j++) {
6009 int index =
STP_BUF_POS(lNId, j, networkConfigs[netId].maxDelay);
6010 managerRuntimeData.
stpu[index] = 0.0f;
6011 managerRuntimeData.
stpx[index] = 1.0f;
6016 void SNN::resetPropogationBuffer() {
6018 spikeBuf->
reset(0, 1023);
6026 void SNN::resetSynapse(
int netId,
bool changeWeights) {
6027 memset(managerRuntimeData.
wtChange, 0,
sizeof(
float) * networkConfigs[netId].numPreSynNet);
6029 for (
int syn = 0; syn < networkConfigs[netId].
numPreSynNet; syn++)
6033 void SNN::resetTimeTable() {
6034 memset(managerRuntimeData.
timeTableD2, 0,
sizeof(
int) * (1000 + glbNetworkConfig.
maxDelay + 1));
6035 memset(managerRuntimeData.
timeTableD1, 0,
sizeof(
int) * (1000 + glbNetworkConfig.
maxDelay + 1));
6038 void SNN::resetFiringTable() {
6039 memset(managerRuntimeData.
firingTableD2, 0,
sizeof(
int) * managerRTDSize.maxMaxSpikeD2);
6040 memset(managerRuntimeData.
firingTableD1, 0,
sizeof(
int) * managerRTDSize.maxMaxSpikeD1);
6043 memset(managerRuntimeData.
extFiringTableD2, 0,
sizeof(
int*) * managerRTDSize.maxNumGroups);
6044 memset(managerRuntimeData.
extFiringTableD1, 0,
sizeof(
int*) * managerRTDSize.maxNumGroups);
6047 void SNN::resetSpikeCnt(
int gGrpId) {
6048 assert(gGrpId >=
ALL);
6050 if (gGrpId ==
ALL) {
6051 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
6052 pthread_t threads[numCores + 1];
6055 int threadCount = 0;
6059 if (!groupPartitionLists[netId].empty()) {
6061 resetSpikeCnt_GPU(netId,
ALL);
6063 #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
6064 resetSpikeCnt_CPU(netId,
ALL);
6065 #else // Linux or MAC
6066 pthread_attr_t attr;
6067 pthread_attr_init(&attr);
6070 pthread_attr_setaffinity_np(&attr,
sizeof(cpu_set_t), &cpus);
6072 argsThreadRoutine[threadCount].
snn_pointer =
this;
6073 argsThreadRoutine[threadCount].
netId = netId;
6074 argsThreadRoutine[threadCount].
lGrpId =
ALL;
6075 argsThreadRoutine[threadCount].
startIdx = 0;
6076 argsThreadRoutine[threadCount].
endIdx = 0;
6077 argsThreadRoutine[threadCount].
GtoLOffset = 0;
6079 pthread_create(&threads[threadCount], &attr, &SNN::helperResetSpikeCnt_CPU, (
void*)&argsThreadRoutine[threadCount]);
6080 pthread_attr_destroy(&attr);
6087 #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
6089 for (
int i=0; i<threadCount; i++){
6090 pthread_join(threads[i], NULL);
6095 int netId = groupConfigMDMap[gGrpId].netId;
6096 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
6099 resetSpikeCnt_GPU(netId, lGrpId);
6101 resetSpikeCnt_CPU(netId, lGrpId);
6107 inline SynInfo SNN::SET_CONN_ID(
int nId,
int sId,
int grpId) {
6123 void SNN::setGrpTimeSlice(
int gGrpId,
int timeSlice) {
6124 if (gGrpId ==
ALL) {
6125 for(
int grpId = 0; grpId < numGroups; grpId++) {
6126 if (groupConfigMap[grpId].isSpikeGenerator)
6127 setGrpTimeSlice(grpId, timeSlice);
6132 groupConfigMDMap[gGrpId].currTimeSlice = timeSlice;
6137 int SNN::setRandSeed(
int seed) {
6146 void SNN::fillSpikeGenBits(
int netId) {
6151 for (spikeBufIter = spikeBuf->
front(); spikeBufIter != spikeBufIterEnd; ++spikeBufIter) {
6153 int gGrpId = spikeBufIter->
grpId;
6155 if (groupConfigMDMap[gGrpId].netId == netId) {
6156 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
6157 int lNId = spikeBufIter->
neurId + groupConfigMDMap[gGrpId].GtoLOffset;
6160 assert(groupConfigMap[gGrpId].isSpikeGenerator ==
true);
6162 int nIdPos = (lNId - groupConfigs[netId][lGrpId].
lStartN + groupConfigs[netId][lGrpId].
Noffset);
6163 int nIdBitPos = nIdPos % 32;
6164 int nIdIndex = nIdPos / 32;
6166 assert(nIdIndex < (networkConfigs[netId].numNSpikeGen / 32 + 1));
6168 managerRuntimeData.
spikeGenBits[nIdIndex] |= (1 << nIdBitPos);
6173 void SNN::startTiming() { prevExecutionTime = cumExecutionTime; }
6174 void SNN::stopTiming() {
6175 executionTime += (cumExecutionTime - prevExecutionTime);
6176 prevExecutionTime = cumExecutionTime;
6185 if (shallUpdateWeights && !sim_in_testing) {
6187 if (wtANDwtChangeUpdateIntervalCnt_) {
6188 float storeScaleSTDP = stdpScaleFactor_;
6189 stdpScaleFactor_ = 1.0f/wtANDwtChangeUpdateIntervalCnt_;
6193 stdpScaleFactor_ = storeScaleSTDP;
6197 sim_in_testing =
true;
6200 if (!groupPartitionLists[netId].empty()) {
6202 updateNetworkConfig(netId);
6209 sim_in_testing =
false;
6212 if (!groupPartitionLists[netId].empty()) {
6214 updateNetworkConfig(netId);
6220 for (
int monId=0; monId<numConnectionMonitor; monId++) {
6223 if (timeInterval==1 || timeInterval>1 && (
getSimTime()%timeInterval)==0) {
6234 assert(connId >
ALL);
6235 std::vector< std::vector<float> > wtConnId;
6237 int grpIdPre = connectConfigMap[connId].grpSrc;
6238 int grpIdPost = connectConfigMap[connId].grpDest;
6240 int netIdPost = groupConfigMDMap[grpIdPost].netId;
6241 int lGrpIdPost = groupConfigMDMap[grpIdPost].lGrpId;
6244 for (
int i = 0; i < groupConfigMap[grpIdPre].numN; i++) {
6245 std::vector<float> wtSlice;
6246 for (
int j = 0; j < groupConfigMap[grpIdPost].numN; j++) {
6247 wtSlice.push_back(NAN);
6249 wtConnId.push_back(wtSlice);
6256 assert(grpIdPost >
ALL);
6259 fetchWeightState(netIdPost, lGrpIdPost);
6260 fetchConnIdsLookupArray(netIdPost);
6262 for (
int lNIdPost = groupConfigs[netIdPost][lGrpIdPost].lStartN; lNIdPost <= groupConfigs[netIdPost][lGrpIdPost].
lEndN; lNIdPost++) {
6263 unsigned int pos_ij = managerRuntimeData.
cumulativePre[lNIdPost];
6264 for (
int i = 0; i < managerRuntimeData.
Npre[lNIdPost]; i++, pos_ij++) {
6272 wtConnId[lNIdPre - groupConfigs[netIdPost][lGrpIdPre].
lStartN][lNIdPost - groupConfigs[netIdPost][lGrpIdPost].
lStartN] =
6273 fabs(managerRuntimeData.
wt[pos_ij]);
6282 if (!numGroupMonitor)
6285 if (gGrpId ==
ALL) {
6286 for (
int gGrpId = 0; gGrpId < numGroups; gGrpId++)
6289 int netId = groupConfigMDMap[gGrpId].netId;
6290 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
6293 int monitorId = groupConfigMDMap[gGrpId].groupMonitorId;
6296 if (monitorId < 0)
return;
6307 KERNEL_ERROR(
"updateGroupMonitor(grpId=%d) must be called at least once every second", gGrpId);
6310 fetchGroupState(netId, lGrpId);
6316 int numMsMin = lastUpdate % 1000;
6320 assert(numMsMin < numMsMax);
6333 bool writeGroupToFile = grpFileId != NULL;
6334 bool writeGroupToArray = grpMonObj->
isRecording();
6339 for(
int t = numMsMin; t < numMsMax; t++) {
6341 data = managerRuntimeData.
grpDABuffer[lGrpId * 1000 + t];
6344 int time = currentTimeSec * 1000 + t;
6346 if (writeGroupToFile) {
6350 if (writeGroupToArray) {
6355 if (grpFileId!=NULL)
6361 void SNN::userDefinedSpikeGenerator(
int gGrpId) {
6364 int netId = groupConfigMDMap[gGrpId].netId;
6365 int timeSlice = groupConfigMDMap[gGrpId].currTimeSlice;
6366 int currTime = simTime;
6369 fetchLastSpikeTime(netId);
6371 for(
int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
6373 int lNId = gNId + groupConfigMDMap[gGrpId].GtoLOffset;
6380 int endOfTimeWindow = std::min(currTime+timeSlice, simTimeRunStop);
6385 int nextSchedTime = spikeGenFunc->
nextSpikeTime(
this, gGrpId, gNId - groupConfigMDMap[gGrpId].gStartN, currTime, nextTime, endOfTimeWindow);
6392 if ((nextSchedTime==0 || nextSchedTime>nextTime) && nextSchedTime<endOfTimeWindow && nextSchedTime>=currTime) {
6397 nextTime = nextSchedTime;
6398 spikeBuf->
schedule(gNId, gGrpId, nextTime - currTime);
6406 void SNN::generateUserDefinedSpikes() {
6407 for(
int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
6408 if (groupConfigMap[gGrpId].isSpikeGenerator) {
6414 if(((simTime - groupConfigMDMap[gGrpId].sliceUpdateTime) >= groupConfigMDMap[gGrpId].currTimeSlice || simTime == simTimeRunStart)) {
6415 int timeSlice = groupConfigMDMap[gGrpId].currTimeSlice;
6416 groupConfigMDMap[gGrpId].sliceUpdateTime = simTime;
6423 if (groupConfigMap[gGrpId].spikeGenFunc != NULL) {
6424 userDefinedSpikeGenerator(gGrpId);
6436 void SNN::allocateManagerSpikeTables() {
6437 managerRuntimeData.
firingTableD2 =
new int[managerRTDSize.maxMaxSpikeD2];
6438 managerRuntimeData.
firingTableD1 =
new int[managerRTDSize.maxMaxSpikeD1];
6441 managerRuntimeData.
extFiringTableD2 =
new int*[managerRTDSize.maxNumGroups];
6442 managerRuntimeData.
extFiringTableD1 =
new int*[managerRTDSize.maxNumGroups];
6458 bool SNN::updateTime() {
6459 bool finishedOneSec =
false;
6463 if(++simTimeMs == 1000) {
6466 finishedOneSec =
true;
6472 KERNEL_WARN(
"Maximum Simulation Time Reached...Resetting simulation time");
6475 return finishedOneSec;
6481 if (!numSpikeMonitor)
6484 if (gGrpId ==
ALL) {
6485 for (
int gGrpId = 0; gGrpId < numGroups; gGrpId++)
6488 int netId = groupConfigMDMap[gGrpId].netId;
6489 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
6492 int monitorId = groupConfigMDMap[gGrpId].spikeMonitorId;
6495 if (monitorId < 0)
return;
6502 if ( ((
long int)
getSimTime()) - lastUpdate <= 0)
6505 if ( ((
long int)
getSimTime()) - lastUpdate > 1000)
6506 KERNEL_ERROR(
"updateSpikeMonitor(grpId=%d) must be called at least once every second",gGrpId);
6516 KERNEL_WARN(
"Reduce the cumulative recording time (currently %lu minutes) or the group size (currently %d) to avoid this.",spkMonObj->
getAccumTime()/(1000*60),this->getGroupNumNeurons(gGrpId));
6520 fetchSpikeTables(netId);
6521 fetchGrpIdsLookupArray(netId);
6527 int numMsMin = lastUpdate % 1000;
6531 assert(numMsMin < numMsMax);
6544 bool writeSpikesToFile = spkFileId != NULL;
6549 for (
int k = 0; k < 2; k++) {
6552 for(
int t = numMsMin; t < numMsMax; t++) {
6553 for(
int i = timeTablePtr[t + glbNetworkConfig.
maxDelay]; i < timeTablePtr[t + glbNetworkConfig.
maxDelay + 1]; i++) {
6555 int lNId = fireTablePtr[i];
6558 int this_grpId = managerRuntimeData.
grpIds[lNId];
6559 if (this_grpId != lGrpId)
6565 int nId = lNId - groupConfigs[netId][lGrpId].
lStartN;
6569 int time = currentTimeSec * 1000 + t;
6571 if (writeSpikesToFile) {
6573 cnt = fwrite(&time,
sizeof(
int), 1, spkFileId); assert(cnt==1);
6574 cnt = fwrite(&nId,
sizeof(
int), 1, spkFileId); assert(cnt==1);
6577 if (writeSpikesToArray) {
6578 spkMonObj->
pushAER(time, nId);
6584 if (spkFileId!=NULL)
6592 if (!numNeuronMonitor)
6597 if (gGrpId ==
ALL) {
6598 for (
int gGrpId = 0; gGrpId < numGroups; gGrpId++)
6603 int netId = groupConfigMDMap[gGrpId].netId;
6604 int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
6607 int monitorId = groupConfigMDMap[gGrpId].neuronMonitorId;
6610 if (monitorId < 0)
return;
6617 if (((
long int)
getSimTime()) - lastUpdate <= 0)
6620 if (((
long int)
getSimTime()) - lastUpdate > 1000)
6621 KERNEL_ERROR(
"updateNeuronMonitor(grpId=%d) must be called at least once every second", gGrpId);
6636 fetchNeuronStateBuffer(netId, lGrpId);
6642 int numMsMin = lastUpdate % 1000;
6646 assert(numMsMin < numMsMax);
6660 bool writeNeuronStateToFile = nrnFileId != NULL;
6661 bool writeNeuronStateToArray = nrnMonObj->
isRecording();
6666 for (
int t = numMsMin; t < numMsMax; t++) {
6668 for (
int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].
lEndN; lNId++) {
6672 int this_grpId = managerRuntimeData.
grpIds[lNId];
6673 if (this_grpId != lGrpId)
6679 int nId = lNId - groupConfigs[netId][lGrpId].
lStartN;
6683 v = managerRuntimeData.
nVBuffer[idxBase + nId];
6684 u = managerRuntimeData.
nUBuffer[idxBase + nId];
6685 I = managerRuntimeData.
nIBuffer[idxBase + nId];
6690 int time = currentTimeSec * 1000 + t;
6695 if (writeNeuronStateToFile) {
6698 cnt = fwrite(&time,
sizeof(
int), 1, nrnFileId); assert(cnt == 1);
6699 cnt = fwrite(&nId,
sizeof(
int), 1, nrnFileId); assert(cnt == 1);
6700 cnt = fwrite(&v,
sizeof(
float), 1, nrnFileId); assert(cnt == 1);
6701 cnt = fwrite(&u,
sizeof(
float), 1, nrnFileId); assert(cnt == 1);
6702 cnt = fwrite(&I,
sizeof(
float), 1, nrnFileId); assert(cnt == 1);
6705 if (writeNeuronStateToArray) {
6712 if (nrnFileId != NULL)
6718 void SNN::printSimSummary() {
6723 etime = executionTime;
6725 fetchNetworkSpikeCount();
6728 KERNEL_INFO(
"******************** Simulation Summary ***************************");
6730 KERNEL_INFO(
"Network Parameters: \tnumNeurons = %d (numNExcReg:numNInhReg = %2.1f:%2.1f)",
6734 KERNEL_INFO(
"Simulation Mode:\t%s",sim_with_conductances?
"COBA":
"CUBA");
6736 KERNEL_INFO(
"Timing:\t\t\tModel Simulation Time = %lld sec", (
unsigned long long)simTimeSec);
6737 KERNEL_INFO(
"\t\t\tActual Execution Time = %4.2f sec", etime/1000.0f);
6738 KERNEL_INFO(
"Average Firing Rate:\t2+ms delay = %3.3f Hz",
6749 KERNEL_INFO(
"*********************************************************************************\n");