57 #ifdef __NO_PTHREADS__ 58 void SNN::spikeGeneratorUpdate_CPU(
int netId) {
60 void* SNN::spikeGeneratorUpdate_CPU(
int netId) {
62 assert(runtimeData[netId].allocated);
63 assert(runtimeData[netId].memType ==
CPU_MEM);
67 for (
int poisN = 0; poisN < networkConfigs[netId].numNPois; poisN++) {
69 runtimeData[netId].
randNum[poisN] = drand48();
73 if (networkConfigs[netId].numNSpikeGen > 0) {
77 memset(managerRuntimeData.
spikeGenBits, 0,
sizeof(
int) * (networkConfigs[netId].numNSpikeGen / 32 + 1));
80 fillSpikeGenBits(netId);
83 memcpy(runtimeData[netId].spikeGenBits, managerRuntimeData.
spikeGenBits,
sizeof(
int) * (networkConfigs[netId].numNSpikeGen / 32 + 1));
87 #ifndef __NO_PTHREADS__ // POSIX 89 void* SNN::helperSpikeGeneratorUpdate_CPU(
void* arguments) {
97 #ifdef __NO_PTHREADS__ 98 void SNN::updateTimingTable_CPU(
int netId) {
100 void* SNN::updateTimingTable_CPU(
int netId) {
102 assert(runtimeData[netId].memType ==
CPU_MEM);
108 #ifndef __NO_PTHREADS__ // POSIX 110 void* SNN::helperUpdateTimingTable_CPU(
void* arguments) {
233 #ifdef __NO_PTHREADS__ 234 void SNN::convertExtSpikesD2_CPU(
int netId,
int startIdx,
int endIdx,
int GtoLOffset) {
236 void* SNN::convertExtSpikesD2_CPU(
int netId,
int startIdx,
int endIdx,
int GtoLOffset) {
238 int spikeCountExtRx = endIdx - startIdx;
246 for (
int extIdx = startIdx; extIdx < endIdx; extIdx++)
247 runtimeData[netId].firingTableD2[extIdx] += GtoLOffset;
250 #ifndef __NO_PTHREADS__ // POSIX 252 void* SNN::helperConvertExtSpikesD2_CPU(
void* arguments) {
260 #ifdef __NO_PTHREADS__ 261 void SNN::convertExtSpikesD1_CPU(
int netId,
int startIdx,
int endIdx,
int GtoLOffset) {
263 void* SNN::convertExtSpikesD1_CPU(
int netId,
int startIdx,
int endIdx,
int GtoLOffset) {
265 int spikeCountExtRx = endIdx - startIdx;
272 for (
int extIdx = startIdx; extIdx < endIdx; extIdx++)
273 runtimeData[netId].firingTableD1[extIdx] += GtoLOffset;
276 #ifndef __NO_PTHREADS__ // POSIX 278 void* SNN::helperConvertExtSpikesD1_CPU(
void* arguments) {
286 #ifdef __NO_PTHREADS__ 287 void SNN::clearExtFiringTable_CPU(
int netId) {
289 void* SNN::clearExtFiringTable_CPU(
int netId) {
291 assert(runtimeData[netId].memType ==
CPU_MEM);
293 memset(runtimeData[netId].extFiringTableEndIdxD1, 0,
sizeof(
int) * networkConfigs[netId].numGroups);
294 memset(runtimeData[netId].extFiringTableEndIdxD2, 0,
sizeof(
int) * networkConfigs[netId].numGroups);
297 #ifndef __NO_PTHREADS__ // POSIX 299 void* SNN::helperClearExtFiringTable_CPU(
void* arguments) {
307 void SNN::copyTimeTable(
int netId,
bool toManager) {
311 memcpy(managerRuntimeData.
timeTableD2, runtimeData[netId].timeTableD2,
sizeof(
int) * (1000 + glbNetworkConfig.
maxDelay + 1));
312 memcpy(managerRuntimeData.
timeTableD1, runtimeData[netId].timeTableD1,
sizeof(
int) * (1000 + glbNetworkConfig.
maxDelay + 1));
314 memcpy(runtimeData[netId].timeTableD2, managerRuntimeData.
timeTableD2,
sizeof(
int)*(1000 + glbNetworkConfig.
maxDelay + 1));
315 memcpy(runtimeData[netId].timeTableD1, managerRuntimeData.
timeTableD1,
sizeof(
int)*(1000 + glbNetworkConfig.
maxDelay + 1));
319 void SNN::copyExtFiringTable(
int netId) {
322 memcpy(managerRuntimeData.
extFiringTableEndIdxD2, runtimeData[netId].extFiringTableEndIdxD2,
sizeof(
int) * networkConfigs[netId].numGroups);
323 memcpy(managerRuntimeData.
extFiringTableEndIdxD1, runtimeData[netId].extFiringTableEndIdxD1,
sizeof(
int) * networkConfigs[netId].numGroups);
324 memcpy(managerRuntimeData.
extFiringTableD2, runtimeData[netId].extFiringTableD2,
sizeof(
int*) * networkConfigs[netId].numGroups);
325 memcpy(managerRuntimeData.
extFiringTableD1, runtimeData[netId].extFiringTableD1,
sizeof(
int*) * networkConfigs[netId].numGroups);
333 #ifdef __NO_PTHREADS__ 334 void SNN::resetSpikeCnt_CPU(
int netId,
int lGrpId) {
336 void* SNN::resetSpikeCnt_CPU(
int netId,
int lGrpId) {
338 assert(runtimeData[netId].memType ==
CPU_MEM);
341 memset(runtimeData[netId].nSpikeCnt, 0,
sizeof(
int) * networkConfigs[netId].numN);
343 int lStartN = groupConfigs[netId][lGrpId].
lStartN;
344 int numN = groupConfigs[netId][lGrpId].
numN;
345 memset(runtimeData[netId].nSpikeCnt + lStartN, 0,
sizeof(
int) * numN);
349 #ifndef __NO_PTHREADS__ // POSIX 351 void* SNN::helperResetSpikeCnt_CPU(
void* arguments) {
361 #ifdef __NO_PTHREADS__ 362 void SNN::doCurrentUpdateD1_CPU(
int netId) {
364 void* SNN::doCurrentUpdateD1_CPU(
int netId) {
366 assert(runtimeData[netId].memType ==
CPU_MEM);
368 int k = runtimeData[netId].
timeTableD1[simTimeMs + networkConfigs[netId].maxDelay + 1] - 1;
369 int k_end = runtimeData[netId].
timeTableD1[simTimeMs + networkConfigs[netId].maxDelay];
371 while((k >= k_end) && (k >= 0)) {
384 assert(postNId < networkConfigs[netId].numNAssigned);
387 assert(synId < (runtimeData[netId].Npre[postNId]));
389 if (postNId < networkConfigs[netId].numN)
390 generatePostSynapticSpike(lNId , postNId, synId, 0, netId);
397 #ifndef __NO_PTHREADS__ // POSIX 399 void* SNN::helperDoCurrentUpdateD1_CPU(
void* arguments) {
409 #ifdef __NO_PTHREADS__ 410 void SNN::doCurrentUpdateD2_CPU(
int netId) {
412 void* SNN::doCurrentUpdateD2_CPU(
int netId) {
414 assert(runtimeData[netId].memType ==
CPU_MEM);
416 if (networkConfigs[netId].maxDelay > 1) {
417 int k = runtimeData[netId].
timeTableD2[simTimeMs + 1 + networkConfigs[netId].maxDelay] - 1;
418 int k_end = runtimeData[netId].
timeTableD2[simTimeMs + 1];
419 int t_pos = simTimeMs;
421 while ((k >= k_end) && (k >= 0)) {
426 while (!((k >= runtimeData[netId].timeTableD2[t_pos + networkConfigs[netId].maxDelay]) && (k < runtimeData[netId].timeTableD2[t_pos + networkConfigs[netId].maxDelay + 1]))) {
428 assert((t_pos + networkConfigs[netId].maxDelay - 1) >= 0);
433 int tD = simTimeMs - t_pos;
435 assert((tD < networkConfigs[netId].maxDelay) && (tD >= 0));
448 assert(postNId < networkConfigs[netId].numNAssigned);
451 assert(synId < (runtimeData[netId].Npre[postNId]));
453 if (postNId < networkConfigs[netId].numN)
454 generatePostSynapticSpike(lNId , postNId, synId, tD, netId);
462 #ifndef __NO_PTHREADS__ // POSIX 464 void* SNN::helperDoCurrentUpdateD2_CPU(
void* arguments) {
472 #ifdef __NO_PTHREADS__ 473 void SNN::doSTPUpdateAndDecayCond_CPU(
int netId) {
475 void* SNN::doSTPUpdateAndDecayCond_CPU(
int netId) {
477 assert(runtimeData[netId].memType ==
CPU_MEM);
480 for (
int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
481 for(
int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].
lEndN; lNId++) {
482 if (groupConfigs[netId][lGrpId].WithSTP) {
485 #ifdef LN_I_CALC_TYPES 488 if (groupConfigs[netId][lGrpId].WithNM4STP) {
489 std::vector<float> nm;
490 nm.push_back(groupConfigs[netId][lGrpId].activeDP ? runtimeData[netId].grpDA[lGrpId] : 0.f);
491 nm.push_back(groupConfigs[netId][lGrpId].active5HT ? runtimeData[netId].grp5HT[lGrpId] : 0.f);
492 nm.push_back(groupConfigs[netId][lGrpId].activeACh ? runtimeData[netId].grpACh[lGrpId] : 0.f);
493 nm.push_back(groupConfigs[netId][lGrpId].activeNE ? runtimeData[netId].grpNE[lGrpId] : 0.f);
494 auto& config = groupConfigs[netId][lGrpId];
495 float tau_u = 1.0f / tau_u_inv;
496 float tau_x = 1.0f / tau_x_inv;
497 float w_tau_u = 0.0f;
498 float w_tau_x = 0.0f;
499 for (
int i = 0; i <
NM_NE + 1; i++) {
500 w_tau_u += nm[i] * config.
wstptauu[i];
501 w_tau_x += nm[i] * config.wstptaux[i];
503 w_tau_u *= config.wstptauu[NM_NE + 1];
504 w_tau_x *= config.wstptaux[NM_NE + 1];
505 tau_u *= w_tau_u + config.wstptauu[NM_NE + 2];
506 tau_x *= w_tau_x + config.wstptaux[NM_NE + 2];
507 tau_u_inv = 1.0f / tau_u;
508 tau_x_inv = 1.0f / tau_x;
511 runtimeData[netId].
stpu[ind_plus] = runtimeData[netId].
stpu[ind_minus] * (1.0f - tau_u_inv);
512 runtimeData[netId].
stpx[ind_plus] = runtimeData[netId].
stpx[ind_minus] + (1.0f - runtimeData[netId].
stpx[ind_minus]) * tau_x_inv;
514 runtimeData[netId].
stpu[ind_plus] = runtimeData[netId].
stpu[ind_minus] * (1.0f - groupConfigs[netId][lGrpId].
STP_tau_u_inv);
515 runtimeData[netId].
stpx[ind_plus] = runtimeData[netId].
stpx[ind_minus] + (1.0f - runtimeData[netId].
stpx[ind_minus]) * groupConfigs[netId][lGrpId].STP_tau_x_inv;
520 #ifdef LN_I_CALC_TYPES 521 auto& groupConfig = groupConfigs[netId][lGrpId];
522 switch (groupConfig.icalcType) {
525 if (
IS_REGULAR_NEURON(lNId, networkConfigs[netId].numNReg, networkConfigs[netId].numNPois)) {
526 runtimeData[netId].
gAMPA[lNId] *= groupConfig.dAMPA;
527 if (groupConfig.with_NMDA_rise) {
528 runtimeData[netId].
gNMDA_r[lNId] *= groupConfig.rNMDA;
529 runtimeData[netId].
gNMDA_d[lNId] *= groupConfig.dNMDA;
532 runtimeData[netId].
gNMDA[lNId] *= groupConfig.dNMDA;
535 runtimeData[netId].
gGABAa[lNId] *= groupConfig.dGABAa;
536 if (groupConfig.with_GABAb_rise) {
537 runtimeData[netId].
gGABAb_r[lNId] *= groupConfig.rGABAb;
538 runtimeData[netId].
gGABAb_d[lNId] *= groupConfig.dGABAb;
541 runtimeData[netId].
gGABAb[lNId] *= groupConfig.dGABAb;
547 if (networkConfigs[netId].sim_with_conductances &&
IS_REGULAR_NEURON(lNId, networkConfigs[netId].numNReg, networkConfigs[netId].numNPois)) {
548 runtimeData[netId].
gAMPA[lNId] *= dAMPA;
549 if (sim_with_NMDA_rise) {
550 runtimeData[netId].
gNMDA_r[lNId] *= rNMDA;
551 runtimeData[netId].
gNMDA_d[lNId] *= dNMDA;
554 runtimeData[netId].
gNMDA[lNId] *= dNMDA;
557 runtimeData[netId].
gGABAa[lNId] *= dGABAa;
558 if (sim_with_GABAb_rise) {
559 runtimeData[netId].
gGABAb_r[lNId] *= rGABAb;
560 runtimeData[netId].
gGABAb_d[lNId] *= dGABAb;
563 runtimeData[netId].
gGABAb[lNId] *= dGABAb;
570 #ifdef LN_I_CALC_TYPES 578 for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
581 int lConnId = connIt->second.connId;
584 auto &config = connIt->second;
585 switch (config.icalcType) {
587 if (groupConfigs[netId][config.grpDest].activeNE) {
588 float ne = runtimeData[netId].
grpNE[config.grpDest];
589 mulSynFast[lConnId] = 0.1f;
590 mulSynSlow[lConnId] = 15.0f - 10.0f * exp(-ne * 5.0f);
595 if (groupConfigs[netId][config.grpDest].activeDP) {
596 float da = runtimeData[netId].
grpDA[config.grpDest];
597 mulSynFast[lConnId] = 1.0f + exp(-da * 5.0f);
598 mulSynSlow[lConnId] = mulSynFast[lConnId];
603 if (groupConfigs[netId][config.grpDest].activeDP) {
604 float da = runtimeData[netId].
grpDA[config.grpDest];
608 if (da >= 0 && da < 1.f / 6.f) {
609 y1 = 1.2f * 0.5f * tanh((0.f - 0.f / 3.f) * a);
610 y2 = 1.2f * 0.5f * tanh((1.f / 6.f - 0 / 3.f) * a);
611 mu = (0.6f - y2) + 1.2f * 0.5f * (tanh((da - 0.f / 3.f) * a));
613 if (da >= 1.f / 6.f & da < 1.f / 2.f) {
614 y1 = 0.6f + 0.4f * 0.5f * (1.f + tanh((1.f / 6.f - 1.f / 3.f) * a));
615 y2 = 0.6f + 0.4f * 0.5f * (1.f + tanh((1.f / 2.f - 1.f / 3.f) * a));
616 mu = 0.6f + 0.4f * 0.5f * (1.f + 0.4f / (y2 - y1) * tanh((da - 1.f / 3.f) * a));
618 if (da >= 1.f / 2.f & da < 5.f / 6.f) {
619 y1 = 1.0f + 0.8f * 0.5f * (1.f + tanh((1.f / 2.f - 2.f / 3.f) * a));
620 y2 = 1.0f + 0.8f * 0.5f * (1.f + tanh((5.f / 6.f - 2.f / 3.f) * a));
621 mu = 1.0f + 0.8f * 0.5f * (1.f + 0.8f / (y2 - y1) * tanh((da - 2.f / 3.f) * a));
624 y1 = tanh((5.f / 6.f - 3.f / 3.f) * a);
625 y2 = tanh((1.f - 3.f / 3.f) * a);
626 mu = 1.8f + 1.6f * 0.5f * (1.f + 1.f / (y2 - y1) * tanh((da - 3.f / 3.f) * a));
628 mulSynFast[lConnId] = mu;
629 mulSynSlow[lConnId] = mu;
639 #ifndef __NO_PTHREADS__ // POSIX 641 void* SNN::helperDoSTPUpdateAndDecayCond_CPU(
void* arguments) {
649 #ifdef __NO_PTHREADS__ 650 void SNN::findFiring_CPU(
int netId) {
652 void* SNN::findFiring_CPU(
int netId) {
654 assert(runtimeData[netId].memType ==
CPU_MEM);
656 for(
int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
657 for (
int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].
lEndN; lNId++) {
658 bool needToWrite =
false;
660 if (groupConfigs[netId][lGrpId].Type & POISSON_NEURON) {
661 if(groupConfigs[netId][lGrpId].isSpikeGenFunc) {
662 unsigned int offset = lNId - groupConfigs[netId][lGrpId].
lStartN + groupConfigs[netId][lGrpId].
Noffset;
663 needToWrite = getSpikeGenBit(offset, netId);
665 needToWrite = getPoissonSpike(lNId, netId);
671 if (runtimeData[netId].curSpike[lNId]) {
672 runtimeData[netId].
curSpike[lNId] =
false;
677 if (networkConfigs[netId].sim_with_nm && lNId - groupConfigs[netId][lGrpId].lStartN <
MAX_NEURON_MON_GRP_SZIE) {
679 runtimeData[netId].
nVBuffer[idxBase + lNId - groupConfigs[netId][lGrpId].
lStartN] = runtimeData[netId].
voltage[lNId];
680 runtimeData[netId].
nUBuffer[idxBase + lNId - groupConfigs[netId][lGrpId].lStartN] = runtimeData[netId].
recovery[lNId];
687 bool hasSpace =
false;
691 if (groupConfigs[netId][lGrpId].MaxDelay == 1)
693 if (runtimeData[netId].spikeCountD1Sec + 1 < networkConfigs[netId].maxSpikesD1) {
698 if (runtimeData[netId].spikeCountD2Sec + runtimeData[netId].spikeCountLastSecLeftD2 + 1 < networkConfigs[netId].maxSpikesD2) {
707 if (groupConfigs[netId][lGrpId].MaxDelay == 1) {
717 if (groupConfigs[netId][lGrpId].hasExternalConnect) {
719 if (groupConfigs[netId][lGrpId].MaxDelay == 1) {
726 assert(extFireId != -1);
730 if (groupConfigs[netId][lGrpId].WithSTP) {
731 firingUpdateSTP(lNId, lGrpId, netId);
737 if (
IS_REGULAR_NEURON(lNId, networkConfigs[netId].numNReg, networkConfigs[netId].numNPois))
738 resetFiredNeuron(lNId, lGrpId, netId);
741 if (!sim_in_testing && groupConfigs[netId][lGrpId].WithSTDP) {
742 updateLTP(lNId, lGrpId, netId);
749 #ifndef __NO_PTHREADS__ // POSIX 751 void* SNN::helperFindFiring_CPU(
void* arguments) {
762 #define LN_ELIGIBILITY_INSEARCH 763 #define LN_MOST_RECENT 767 void SNN::findWavefrontPath_CPU(std::vector<int>& path, std::vector<float>& eligibility,
int netId,
int grpId,
int startNId,
int goalNId) {
770 assert(runtimeData[netId].memType ==
CPU_MEM);
771 assert(groupConfigs[netId][grpId].WithAxonPlast);
773 KERNEL_INFO(
"findWavefrontPath_CPU from %d to %d in group %d of net %d", startNId, goalNId, grpId, netId);
775 int numN = groupConfigs[netId][grpId].
numN;
779 int lEndN = groupConfigs[netId][grpId].
lEndN;
780 int lStartN = groupConfigs[netId][grpId].
lStartN;
783 auto &rtD = runtimeData[netId];
786 auto × = rtD.firingTimesD2;
793 std::vector<bool> valid_ids(numN,
true);
795 std::vector<unsigned int> path_times;
800 auto searchPrevFiring = [&](
int fired,
int last) {
801 KERNEL_DEBUG(
"searching previous firing for Nid %d starting on %d\n", fired, last);
804 KERNEL_DEBUG(
"firingTableD2[%d]=%d\n", index, rtD.firingTableD2[index]);
805 if (rtD.firingTableD2[index] == fired) {
806 KERNEL_DEBUG(
"NId %d found at index %d (%d)\n", fired, index, rtD.firingTimesD2[index]);
810 if (rtD.firingTableD2[index] == startNId) {
811 KERNEL_DEBUG(
"Nid %d has no firing that lead to startNId %d\n", fired, startNId);
812 #ifdef LN_MOST_RECENT 821 #ifdef LN_MOST_RECENT 828 auto appendToPath = [&](
int lNId,
unsigned int time) {
829 if (lNId < lStartN || lNId > lEndN ) {
830 printf(
"reject append to path: %d\n", lNId);
834 path.push_back(lNId);
835 valid_ids[lNId - lStartN] =
false;
836 path_times.push_back(time);
839 auto assignEligibility = [&](
int lNId,
float e_i) {
840 #ifdef LN_ELIGIBILITY_INSEARCH // do nothing for new search 846 if (lNId < lStartN || lNId > lEndN) {
847 KERNEL_DEBUG(
"WARNING: Reject as NId is not part of the group %d", lNId);
850 auto e_i_prev = eligibility[lNId - lStartN];
851 if (e_i_prev > 0.0f && e_i_prev > e_i)
852 eligibility[lNId - lStartN] = e_i;
854 eligibility[lNId - lStartN] = e_i;
858 #ifndef LN_ELIGIBILITY_INSEARCH 859 auto assignEligibility2 = [&](
int lNId,
float e_i) {
866 if (eligibility[lNId - lStartN] > 0.0f)
868 eligibility[lNId - lStartN] = e_i;
872 auto current = goalNId;
876 auto sCD2 = rtD.spikeCountD2Sec;
877 auto fTD2 = rtD.firingTableD2[sCD2 > 0 ? sCD2 - 1 : sCD2];
878 auto tTD2 = rtD.firingTimesD2[sCD2 > 0 ? sCD2 - 1 : sCD2];
879 KERNEL_DEBUG(
"Firings D2: spikeCountD2Sec: %d firingTableD2[spikeCountD2Sec-1]: %d timeTableD2[spikeCountD2Sec-1] %d\n", sCD2, fTD2, tTD2);
882 int last = searchPrevFiring(current, rtD.spikeCountD2Sec - 1);
883 #ifdef LN_MOST_RECENT 890 unsigned int current_time = rtD.firingTimesD2[last];
891 appendToPath(current, current_time);
894 int t_0 = current_time;
896 float base = 1.0f - 1.0f / float(tau);
897 auto eligibility_i = [&](
int t) {
898 float e_i = std::pow(base, t_0 - t);
912 assignEligibility(current, eligibility_i(current_time));
915 for (
int iteration = 1; iteration < numN && current != startNId; iteration++) {
917 for (
auto t_last = rtD.firingTimesD2[last];
918 rtD.firingTimesD2[last] == t_last;
920 KERNEL_DEBUG(
"skipping firings at same time of current: %d\n", last);
923 #ifdef LN_MOST_RECENT 929 KERNEL_DEBUG(
"skipping iteration current id is negative %d\n", current);
930 #ifdef LN_MOST_RECENT 937 auto npre = rtD.Npre[current] ;
938 auto cumPre = rtD.cumulativePre[current];
939 for (
int j = 0; j < npre; j++) {
941 auto preSynId = rtD.preSynapticIds[cumPre + j];
942 auto preNId = preSynId.nId;
944 if (preNId < lStartN || preNId > lEndN) {
945 KERNEL_DEBUG(
"skipping neuron outside the group: %d\n", preNId);
948 if (!valid_ids[preNId - lStartN]) {
953 auto last_pre = last;
957 unsigned int offset = rtD.cumulativePost[lNId];
962 for (
int t = 0; !found && t < glbNetworkConfig.
maxDelay; t++) {
965 SynInfo post_info = rtD.postSynapticIds[offset + idx_d];
967 if (lNIdPost == current) {
970 KERNEL_DEBUG(
"d(%d, %d) = %d\n", lNId, lNIdPost, delay);
974 while (last_pre > 0 && rtD.firingTimesD2[last_pre] > current_time - delay) {
975 KERNEL_DEBUG(
"skipping firings at frame %d of currnet_time %d\n", last_pre, rtD.firingTimesD2[last]);
976 assignEligibility(rtD.firingTableD2[last_pre], eligibility_i(rtD.firingTimesD2[last_pre]));
983 int j_fired = searchPrevFiring(preNId, last_pre);
984 #ifdef LN_MOST_RECENT 987 if (j_fired == MAXINT)
990 assignEligibility(preNId, eligibility_i(rtD.firingTimesD2[j_fired]));
992 #ifdef LN_MOST_RECENT 1000 #ifdef LN_MOST_RECENT 1001 j_max = std::max(j_max, j_fired);
1003 j_min = std::min(j_min, j_fired);
1007 #ifdef LN_MOST_RECENT 1010 if (j_min == MAXINT) {
1012 KERNEL_INFO(
"None of the pre was found. Path was incomplete. Break");
1015 #ifdef LN_MOST_RECENT 1020 current = rtD.firingTableD2[last];
1021 current_time = rtD.firingTimesD2[last];
1022 appendToPath(current, current_time);
1025 std::ostringstream string_stream;
1026 for (
auto iter = path.rbegin(); iter < path.rend(); iter++) {
1027 if (iter != path.rbegin())
1028 string_stream <<
",";
1029 string_stream << *iter;
1032 KERNEL_INFO(
"path: %s", string_stream.str().c_str());
1035 #ifndef LN_ELIGIBILITY_INSEARCH 1039 unsigned i_goal = 0, t_goal = 0, i_start = 0, t_start = 0;
1041 KERNEL_DEBUG(
"Firings D2: spikeCountD2Sec: %d firingTableD2[spikeCountD2Sec-1]: %d timeTableD2[spikeCountD2Sec-1] %d\n", sCD2, fTD2, tTD2);
1044 for (
unsigned last = rtD.spikeCountD2Sec - 1; last >= 0; last--) {
1045 if (rtD.firingTableD2[last] == goalNId) {
1047 t_goal = rtD.firingTimesD2[last];
1048 KERNEL_INFO(
"Goal firings: firingTableD2[%d]=%d, timeTableD2[%d]=%d\n", i_goal, goalNId, i_goal, t_goal);
1058 for (
unsigned last = rtD.spikeCountD2Sec - 1;
1059 last >= 0 && rtD.firingTimesD2[last] >= start_t;
1064 for (
unsigned first = std::max(0, last); first < i_goal; first++) {
1065 if (rtD.firingTableD2[first] == startNId) {
1067 t_start = rtD.firingTimesD2[first];
1068 KERNEL_INFO(
"Start firings: firingTableD2[%d]=%d, firingTimesD2[%d]=%d\n", i_start, startNId, i_start, t_start);
1075 for (
unsigned i = i_start; rtD.firingTimesD2[i] <= t_goal; i++) {
1076 auto firedNId = rtD.firingTableD2[i];
1078 if (firedNId >= lStartN && firedNId <= lEndN) {
1079 assignEligibility2(firedNId, eligibility_i(rtD.firingTimesD2[i]));
1089 #define PATCH_updateDelays_PostNId 1090 #define PATCH_updateDelays_PostNId_Break 1097 bool SNN::updateDelays_CPU(
int netId,
int gGrpIdPre,
int gGrpIdPost, std::vector<std::tuple<int, int, uint8_t>> connDelays) {
1100 int netIdPost = groupConfigMDMap[gGrpIdPost].netId;
1101 int lGrpIdPost = groupConfigMDMap[gGrpIdPost].lGrpId;
1104 for (
int lGrpId = 0; lGrpId < networkConfigs[netIdPost].numGroupsAssigned; lGrpId++)
1105 if (groupConfigs[netIdPost][lGrpId].gGrpId == gGrpIdPre) {
1109 assert(lGrpIdPre != -1);
1111 int numPreN = groupConfigMap[gGrpIdPre].numN;
1112 int numPostN = groupConfigMap[gGrpIdPost].numN;
1115 int lStartNIdPre = groupConfigs[netIdPost][lGrpIdPre].
lStartN;
1116 int lEndNIdPre = groupConfigs[netIdPost][lGrpIdPre].
lEndN;
1117 int lStartNIdPost = groupConfigs[netIdPost][lGrpIdPost].
lStartN;
1118 int lEndNIdPost = groupConfigs[netIdPost][lGrpIdPost].
lEndN;
1120 uint8_t* delays =
new uint8_t[(numPreN + 1) * (numPostN + 1)];
1121 memset(delays, 0, numPreN * numPostN);
1122 for (
int lNIdPre = groupConfigs[netIdPost][lGrpIdPre].lStartN; lNIdPre <= groupConfigs[netIdPost][lGrpIdPre].
lEndN; lNIdPre++) {
1123 unsigned int offset = managerRuntimeData.
cumulativePost[lNIdPre];
1124 for (
int t = 0; t < glbNetworkConfig.
maxDelay; t++) {
1131 assert(lNIdPost < glbNetworkConfig.
numN);
1132 if (lNIdPost >= groupConfigs[netIdPost][lGrpIdPost].lStartN && lNIdPost <= groupConfigs[netIdPost][lGrpIdPost].lEndN) {
1133 delays[(lNIdPre - groupConfigs[netIdPost][lGrpIdPre].
lStartN) + numPreN * (lNIdPost - groupConfigs[netIdPost][lGrpIdPost].lStartN)] = t + 1;
1140 auto getDelay = [&](
int pre,
int post) {
return delays[post * numPostN + pre]; };
1141 auto setDelay = [&](
int pre,
int post, int8_t delay) { delays[post * numPostN + pre] = delay; };
1143 #ifdef DEBUG_updateDelays_CPU 1145 auto printDelays = [&]() {
1146 for (
int i = 0; i < numPreN; i++) {
1147 for (
int j = 0; j < numPostN; j++) {
1148 int d = getDelay(i, j);
1150 printf(
"pre:%d post:%d delay:%d\n", i, j, d);
1155 const int buff_len = 30000;
1156 char buffer[buff_len];
1160 for (
auto iter = connDelays.begin(); iter != connDelays.end(); iter++)
1162 #ifdef DEBUG_updateDelays_CPU 1165 std::map<int, int> GLoffset;
1166 std::map<int, int> GLgrpId;
1169 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
1170 GLoffset[grpIt->gGrpId] = grpIt->GtoLOffset;
1171 GLgrpId[grpIt->gGrpId] = grpIt->lGrpId;
1175 connInfo.
grpSrc = lGrpIdPre;
1176 connInfo.
grpDest = lGrpIdPost;
1178 connInfo.
maxWt = -1.0f;
1182 int post_pos, pre_pos;
1183 enum { left, right, none } direction;
1187 connInfo.
nSrc = std::get<0>(*iter);
1188 connInfo.
nDest = std::get<1>(*iter);
1189 connInfo.
delay = std::get<2>(*iter);
1190 #ifdef DEBUG_updateDelays_CPU 1192 printf(
"before pre=%d, post=%d delay=%d\n%s\n", connInfo.
nSrc, connInfo.
nDest, connInfo.
delay, buffer);
1196 int old_delay = getDelay(connInfo.
nSrc, connInfo.
nDest);
1199 if (connInfo.
delay < old_delay)
1201 else if (connInfo.
delay > old_delay)
1208 connInfo.
nSrc += lStartNIdPre;
1209 connInfo.
nDest += lStartNIdPost;
1212 int lNIdPre = connInfo.
nSrc + GLoffset[connInfo.
grpSrc];
1213 unsigned int offset = runtimeData[netId].
cumulativePre[lNIdPre];
1216 int delay_left = -1;
1221 int n = runtimeData[netId].
Npost[connInfo.
nSrc];
1223 if (direction == left) {
1225 for (
int pos = start + n - 1; pos >= start; pos--)
1226 if (runtimeData[netId].postSynapticIds[pos].nId == connInfo.
nDest) {
1233 while (post_pos > 0)
1236 #ifdef PATCH_updateDelays_ConnGroup 1237 if (runtimeData[netId].postSynapticIds[post_pos].gsId != 0)
1241 #ifdef PATCH_updateDelays_PostNId 1242 if (nId_left < lStartNIdPost || nId_left > lEndNIdPost)
1243 #ifndef PATCH_updateDelays_PostNId_Break 1249 delay_left = getDelay(connInfo.
nSrc - lStartNIdPre, nId_left - lStartNIdPost);
1250 if (connInfo.
delay < delay_left) {
1251 auto postSynapticIds_synId = runtimeData[netId].
postSynapticIds[post_pos];
1253 runtimeData[netId].
postSynapticIds[post_pos + 1] = postSynapticIds_synId;
1259 else if (direction == right) {
1262 for (
int pos = start; pos < start + n; pos++)
1263 if (runtimeData[netId].postSynapticIds[pos].nId == connInfo.
nDest) {
1269 while (post_pos < start + n - 1) {
1271 #ifdef PATCH_updateDelays_ConnGroup 1272 if (runtimeData[netId].postSynapticIds[post_pos].gsId != 0)
1276 #ifdef PATCH_updateDelays_PostNId 1277 if (nId_left < lStartNIdPost || nId_left > lEndNIdPost)
1278 #ifndef PATCH_updateDelays_PostNId_Break 1284 delay_left = getDelay(connInfo.
nSrc - lStartNIdPre, nId_left - lStartNIdPost);
1285 if (connInfo.
delay > delay_left) {
1286 auto postSynapticIds_synId = runtimeData[netId].
postSynapticIds[post_pos];
1289 runtimeData[netId].
postSynapticIds[post_pos - 1] = postSynapticIds_synId;
1298 for (
int synId = runtimeData[netId].cumulativePost[connInfo.
nSrc]; synId < runtimeData[netId].
Npost[connInfo.
nSrc] + runtimeData[netId].
cumulativePost[connInfo.
nSrc]; synId++) {
1300 int nId = postSynInfo.
nId;
1302 int pre_pos = runtimeData[netId].
cumulativePre[nId] + preSynId;
1304 #ifdef PATCH_updateDelays_ConnGroup 1305 if (preSynInfo.
gsId != 0)
1309 #define SET_CONN_GRP_ID(val, grpId) ((grpId << NUM_SYNAPSE_BITS) | GET_CONN_SYN_ID(val)) 1313 #ifdef DEBUG_updateDelays_CPU 1314 printf(
"%d\n", post_pos);
1318 int t_old = old_delay - 1;
1326 KERNEL_ERROR(
"Post-synaptic delay was not sorted correctly pre_id=%d, offset=%d", lNIdPre, offset);
1329 int t_new = connInfo.
delay - 1;
1337 KERNEL_ERROR(
"Post-synaptic delay was not sorted correctly pre_id=%d, offset=%d", lNIdPre, offset);
1341 for (
int t = 0; t < glbNetworkConfig.
maxDelay + 1; t++) {
1348 #ifdef DEBUG_updateDelays_CPU 1350 printf(
"after pre=%d, post=%d delay=%d\n%s\n", connInfo.
nSrc, connInfo.
nDest, connInfo.
delay, buffer);
1358 printf(
"WARNING: skipping setDelay (offset!=n): pre=%d, post=%d delay=%d\n",
1372 void SNN::printEntrails_CPU(
char* buffer,
unsigned length,
int netId,
int lGrpIdPre,
int lGrpIdPost) {
1374 const int lineBufferLength = 1024;
1375 char lineBuffer[lineBufferLength];
1394 int numPostN = groupConfigs[netId][lGrpIdPost].
numN;
1397 int lStartNIdPre = groupConfigs[netId][lGrpIdPre].
lStartN;
1398 int lEndNIdPre = groupConfigs[netId][lGrpIdPre].
lEndN;
1399 int lStartNIdPost = groupConfigs[netId][lGrpIdPost].
lStartN;
1400 int lEndNIdPost = groupConfigs[netId][lGrpIdPost].
lEndN;
1404 std::map<int, int> GLoffset;
1405 std::map<int, int> GLgrpId;
1408 for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
1409 GLoffset[grpIt->gGrpId] = grpIt->GtoLOffset;
1410 GLgrpId[grpIt->gGrpId] = grpIt->lGrpId;
1413 int numN = glbNetworkConfig.
numN;
1414 int maxDelay = glbNetworkConfig.
maxDelay;
1416 strcpy_s(buffer, length,
"");
1417 auto append = [&]() {strcat_s(buffer, length, lineBuffer); };
1419 sprintf_s(lineBuffer, lineBufferLength,
"%-9s %-9s %-9s %-9s\n",
"Npost",
"Npre",
"cumPost",
"cumPre");
1421 for (
int lNId = lStartNIdPre; lNId <= lEndNIdPre; lNId++) {
1422 int _Npost = runtimeData[netId].
Npost[lNId];
1423 int _Npre = runtimeData[netId].
Npre[lNId];
1425 int _cumulativePre = runtimeData[netId].
cumulativePre[lNId];
1426 sprintf_s(lineBuffer, lineBufferLength,
"[%3d] %3d [%3d] %3d [%3d] %3d [%3d] %3d\n", lNId, _Npost, lNId, _Npre, lNId, _cumulativePost, lNId, _cumulativePre);
1434 int numPreSynapses = groupConfigs[netId][lGrpIdPost].
numPreSynapses;
1437 sprintf_s(lineBuffer, lineBufferLength,
"\n%s (%s, %s, %s)\n",
"postSynapticIds",
"connectionGroupId",
"synapseId",
"postNId");
1439 for (
int pos = start; pos < numPostSynapses; pos++) {
1444 sprintf_s(lineBuffer, lineBufferLength,
"[%3d] {%3d %3d} %3d \n", pos, grpId, synId, lNId);
1448 sprintf_s(lineBuffer, lineBufferLength,
"\n%s (%s, %s, %s)\n",
"preSynapticIds",
"connectionGroupId",
"synapseId",
"preNId");
1450 for (
int pos = start; pos < numPreSynapses; pos++) {
1455 sprintf_s(lineBuffer, lineBufferLength,
"[%3d] {%3d %3d} %3d\n", pos, grpId, synId, lNId);
1460 int numPreN = groupConfigs[netId][lGrpIdPre].
numN;
1462 sprintf_s(lineBuffer, lineBufferLength,
"\npostDelayInfo (pre x d)\n "); append();
1463 for (
int t = 0; t < maxDelay + 1; t++) {
1464 sprintf_s(lineBuffer, lineBufferLength,
"%4d ", t+1); append();
1466 sprintf_s(lineBuffer, lineBufferLength,
"\n"); append();
1467 for (
int lNId = lStartNIdPre; lNId <= lEndNIdPre; lNId++)
1469 sprintf_s(lineBuffer, lineBufferLength,
"%2d ", lNId); append();
1470 for (
int t = 0; t < maxDelay + 1; t++) {
1471 sprintf_s(lineBuffer, lineBufferLength,
"[%d,%d]",
1472 runtimeData[netId].postDelayInfo[lNId * (maxDelay + 1) + t].delay_index_start,
1473 runtimeData[netId].postDelayInfo[lNId * (maxDelay + 1) + t].delay_length); append();
1475 sprintf_s(lineBuffer, lineBufferLength,
"\n"); append();
1486 void SNN::updateLTP(
int lNId,
int lGrpId,
int netId) {
1487 unsigned int pos_ij = runtimeData[netId].
cumulativePre[lNId];
1489 for(
int j = 0; j < runtimeData[netId].
Npre_plastic[lNId]; pos_ij++, j++) {
1490 int stdp_tDiff = (simTime - runtimeData[netId].
synSpikeTime[pos_ij]);
1491 assert(!((stdp_tDiff < 0) && (runtimeData[netId].synSpikeTime[pos_ij] !=
MAX_SIMULATION_TIME)));
1493 auto weight_nm = [&](
float& nm,
int i_nm) {
1495 case NM_DA: nm *= runtimeData[netId].
grpDA[lGrpId];
1501 case NM_NE: nm *= runtimeData[netId].
grpNE[lGrpId];
1506 if (stdp_tDiff > 0) {
1510 if (connectConfigMap[connId].stdpConfig.WithESTDP && runtimeData[netId].
maxSynWt[pos_ij] >= 0) {
1512 switch (connectConfigMap[connId].stdpConfig.WithESTDPcurve) {
1514 #ifdef LN_I_CALC_TYPES 1515 if (stdp_tDiff * connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_EXC < 25) {
1516 if (connectConfigMap[connId].stdpConfig.WithESTDPtype ==
PKA_PLC_MOD) {
1518 float nm_pka = connectConfigMap[connId].stdpConfig.W_PKA;
1519 weight_nm(nm_pka, connectConfigMap[connId].stdpConfig.NM_PKA);
1521 float nm_plc = connectConfigMap[connId].stdpConfig.W_PLC;
1522 weight_nm(nm_plc, connectConfigMap[connId].stdpConfig.NM_PLC);
1526 float a_p = connectConfigMap[connId].stdpConfig.ALPHA_PLUS_EXC;
1527 float tau_p_inv = connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_EXC;
1530 float pka = nm_pka * 2 *
STDP(stdp_tDiff, a_p, tau_p_inv);
1533 float plc = nm_plc *
STDP(stdp_tDiff, -a_p, tau_p_inv);
1535 runtimeData[netId].
wtChange[pos_ij] += pka + plc;
1538 runtimeData[netId].
wtChange[pos_ij] +=
STDP(stdp_tDiff, connectConfigMap[connId].stdpConfig.ALPHA_PLUS_EXC, connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_EXC);
1543 if (stdp_tDiff * connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_EXC < 25) {
1544 runtimeData[netId].
wtChange[pos_ij] +=
STDP(stdp_tDiff, connectConfigMap[connId].stdpConfig.ALPHA_PLUS_EXC, connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_EXC);
1549 if (stdp_tDiff * connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_EXC < 25) {
1550 if (stdp_tDiff <= connectConfigMap[connId].stdpConfig.GAMMA)
1551 runtimeData[netId].
wtChange[pos_ij] += connectConfigMap[connId].stdpConfig.OMEGA + connectConfigMap[connId].stdpConfig.KAPPA *
STDP(stdp_tDiff, connectConfigMap[connId].stdpConfig.ALPHA_PLUS_EXC, connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_EXC);
1553 runtimeData[netId].
wtChange[pos_ij] -=
STDP(stdp_tDiff, connectConfigMap[connId].stdpConfig.ALPHA_PLUS_EXC, connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_EXC);
1560 }
else if (connectConfigMap[connId].stdpConfig.WithISTDP && runtimeData[netId].
maxSynWt[pos_ij] < 0) {
1562 switch (connectConfigMap[connId].stdpConfig.WithISTDPcurve) {
1564 #ifdef LN_I_CALC_TYPES 1565 if (stdp_tDiff * connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_INB < 25) {
1566 if (connectConfigMap[connId].stdpConfig.WithESTDPtype ==
PKA_PLC_MOD) {
1568 float nm_pka = connectConfigMap[connId].stdpConfig.W_PKA;
1569 weight_nm(nm_pka, connectConfigMap[connId].stdpConfig.NM_PKA);
1571 float nm_plc = connectConfigMap[connId].stdpConfig.W_PLC;
1572 weight_nm(nm_plc, connectConfigMap[connId].stdpConfig.NM_PLC);
1578 runtimeData[netId].
wtChange[pos_ij] -=
STDP(stdp_tDiff, connectConfigMap[connId].stdpConfig.ALPHA_PLUS_INB, connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_INB);
1583 if (stdp_tDiff * connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_INB < 25) {
1584 runtimeData[netId].
wtChange[pos_ij] -=
STDP(stdp_tDiff, connectConfigMap[connId].stdpConfig.ALPHA_PLUS_INB, connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_INB);
1589 if (stdp_tDiff <= connectConfigMap[connId].stdpConfig.LAMBDA) {
1590 runtimeData[netId].
wtChange[pos_ij] -= connectConfigMap[connId].stdpConfig.BETA_LTP;
1592 }
else if (stdp_tDiff <= connectConfigMap[connId].stdpConfig.DELTA) {
1593 runtimeData[netId].
wtChange[pos_ij] -= connectConfigMap[connId].stdpConfig.BETA_LTD;
1606 void SNN::firingUpdateSTP(
int lNId,
int lGrpId,
int netId) {
1609 int ind_plus =
STP_BUF_POS(lNId, simTime, networkConfigs[netId].maxDelay);
1610 int ind_minus =
STP_BUF_POS(lNId, (simTime - 1), networkConfigs[netId].maxDelay);
1614 #ifdef LN_I_CALC_TYPES 1615 auto& config = groupConfigs[netId][lGrpId];
1616 float stp_u = config.
STP_U;
1617 if (config.WithNM4STP) {
1618 std::vector<float> nm;
1619 nm.push_back(groupConfigs[netId][lGrpId].activeDP ? runtimeData[netId].grpDA[lGrpId] : 0.f);
1620 nm.push_back(groupConfigs[netId][lGrpId].active5HT ? runtimeData[netId].grp5HT[lGrpId] : 0.f);
1621 nm.push_back(groupConfigs[netId][lGrpId].activeACh ? runtimeData[netId].grpACh[lGrpId] : 0.f);
1622 nm.push_back(groupConfigs[netId][lGrpId].activeNE ? runtimeData[netId].grpNE[lGrpId] : 0.f);
1623 auto& config = groupConfigs[netId][lGrpId];
1624 float w_stp_u = 0.0f;
1625 for (
int i = 0; i <
NM_NE + 1; i++) {
1626 w_stp_u += nm[i] * config.
wstpu[i];
1628 w_stp_u *= config.wstpu[NM_NE + 1];
1629 stp_u *= w_stp_u + config.wstpu[NM_NE + 2];
1632 runtimeData[netId].
stpu[ind_plus] += stp_u * (1.0f - runtimeData[netId].
stpu[ind_minus]);
1634 runtimeData[netId].
stpu[ind_plus] += groupConfigs[netId][lGrpId].
STP_U * (1.0f - runtimeData[netId].
stpu[ind_minus]);
1638 runtimeData[netId].
stpx[ind_plus] -= runtimeData[netId].
stpu[ind_plus] * runtimeData[netId].
stpx[ind_minus];
1641 void SNN::resetFiredNeuron(
int lNId,
short int lGrpId,
int netId) {
1642 if (groupConfigs[netId][lGrpId].WithSTDP
1643 #ifdef LN_AXON_PLAST
1644 || groupConfigs[netId][lGrpId].WithAxonPlast
1649 if (networkConfigs[netId].sim_with_homeostasis) {
1655 bool SNN::getPoissonSpike(
int lNId,
int netId) {
1659 return runtimeData[netId].
randNum[lNId - networkConfigs[netId].numNReg] * 1000.0f
1660 < runtimeData[netId].
poissonFireRate[lNId - networkConfigs[netId].numNReg];
1663 bool SNN::getSpikeGenBit(
unsigned int nIdPos,
int netId) {
1664 const int nIdBitPos = nIdPos % 32;
1665 const int nIdIndex = nIdPos / 32;
1666 return ((runtimeData[netId].spikeGenBits[nIdIndex] >> nIdBitPos) & 0x1);
1682 void SNN::generatePostSynapticSpike(
int preNId,
int postNId,
int synId,
int tD,
int netId) {
1684 unsigned int pos = runtimeData[netId].
cumulativePre[postNId] + synId;
1685 assert(postNId < networkConfigs[netId].numNReg);
1688 short int post_grpId = runtimeData[netId].
grpIds[postNId];
1689 short int pre_grpId = runtimeData[netId].
grpIds[preNId];
1691 unsigned int pre_type = groupConfigs[netId][pre_grpId].
Type;
1698 assert(mulIndex >= 0 && mulIndex < numConnections);
1703 float change = runtimeData[netId].
wt[pos];
1706 if (groupConfigs[netId][pre_grpId].WithSTP) {
1713 int ind_minus =
STP_BUF_POS(preNId, (simTime-tD-1), networkConfigs[netId].maxDelay);
1714 int ind_plus =
STP_BUF_POS(preNId, (simTime-tD), networkConfigs[netId].maxDelay);
1715 #ifdef LN_I_CALC_TYPES 1716 auto& config = groupConfigs[netId][pre_grpId];
1717 float stp_a = config.
STP_A;
1718 float stp_u = config.STP_U;
1719 if (config.WithNM4STP) {
1720 std::vector<float> nm;
1721 nm.push_back(groupConfigs[netId][pre_grpId].activeDP ? runtimeData[netId].grpDA[pre_grpId] : 0.f);
1722 nm.push_back(groupConfigs[netId][pre_grpId].active5HT ? runtimeData[netId].grp5HT[pre_grpId] : 0.f);
1723 nm.push_back(groupConfigs[netId][pre_grpId].activeACh ? runtimeData[netId].grpACh[pre_grpId] : 0.f);
1724 nm.push_back(groupConfigs[netId][pre_grpId].activeNE ? runtimeData[netId].grpNE[pre_grpId] : 0.f);
1725 auto& config = groupConfigs[netId][pre_grpId];
1726 float w_stp_u = 0.0f;
1727 for (
int i = 0; i <
NM_NE + 1; i++) {
1728 w_stp_u += nm[i] * config.
wstpu[i];
1730 w_stp_u *= config.wstpu[NM_NE + 1];
1731 stp_u *= w_stp_u + config.wstpu[NM_NE + 2];
1732 stp_a = (stp_u > 0.0f) ? 1.0 / stp_u : 1.0f;
1735 change *= stp_a * runtimeData[netId].
stpu[ind_plus] * runtimeData[netId].
stpx[ind_minus];
1737 change *= groupConfigs[netId][pre_grpId].
STP_A * runtimeData[netId].
stpu[ind_plus] * runtimeData[netId].
stpx[ind_minus];
1750 #ifdef LN_I_CALC_TYPES 1752 auto &groupConfig = groupConfigs[netId][post_grpId];
1754 switch (groupConfig.icalcType) {
1758 runtimeData[netId].
gAMPA[postNId] += change * mulSynFast[mulIndex];
1762 if (groupConfig.with_NMDA_rise) {
1763 runtimeData[netId].
gNMDA_r[postNId] += change * groupConfig.sNMDA * mulSynSlow[mulIndex];
1764 runtimeData[netId].
gNMDA_d[postNId] += change * groupConfig.sNMDA * mulSynSlow[mulIndex];
1767 runtimeData[netId].
gNMDA[postNId] += change * mulSynSlow[mulIndex];
1773 runtimeData[netId].
gGABAa[postNId] -= change * mulSynFast[mulIndex];
1775 if (groupConfig.with_GABAb_rise) {
1776 runtimeData[netId].
gGABAb_r[postNId] -= change * groupConfig.sGABAb * mulSynSlow[mulIndex];
1777 runtimeData[netId].
gGABAb_d[postNId] -= change * groupConfig.sGABAb * mulSynSlow[mulIndex];
1780 runtimeData[netId].
gGABAb[postNId] -= change * mulSynSlow[mulIndex];
1785 runtimeData[netId].
current[postNId] += change;
1788 runtimeData[netId].
current[postNId] += change;
1807 if (sim_with_conductances) {
1809 runtimeData[netId].
gAMPA [postNId] += change * mulSynFast[mulIndex];
1811 if (sim_with_NMDA_rise) {
1812 runtimeData[netId].
gNMDA_r[postNId] += change * sNMDA * mulSynSlow[mulIndex];
1813 runtimeData[netId].
gNMDA_d[postNId] += change * sNMDA * mulSynSlow[mulIndex];
1815 runtimeData[netId].
gNMDA [postNId] += change * mulSynSlow[mulIndex];
1819 runtimeData[netId].
gGABAa[postNId] -= change * mulSynFast[mulIndex];
1821 if (sim_with_GABAb_rise) {
1822 runtimeData[netId].
gGABAb_r[postNId] -= change * sGABAb * mulSynSlow[mulIndex];
1823 runtimeData[netId].
gGABAb_d[postNId] -= change * sGABAb * mulSynSlow[mulIndex];
1825 runtimeData[netId].
gGABAb[postNId] -= change * mulSynSlow[mulIndex];
1829 runtimeData[netId].
current[postNId] += change;
1842 runtimeData[netId].
grpDA[post_grpId] += groupConfigs[netId][post_grpId].
releaseDP;
1847 runtimeData[netId].
grp5HT[post_grpId] += groupConfigs[netId][post_grpId].
release5HT;
1851 runtimeData[netId].
grpACh[post_grpId] += groupConfigs[netId][post_grpId].
releaseACh;
1855 runtimeData[netId].
grpNE[post_grpId] += groupConfigs[netId][post_grpId].
releaseNE;
1861 if (!sim_in_testing && connectConfigMap[mulIndex].stdpConfig.WithSTDP) {
1862 int stdp_tDiff = (simTime - runtimeData[netId].
lastSpikeTime[postNId]);
1864 if (stdp_tDiff >= 0) {
1867 switch (connectConfigMap[mulIndex].stdpConfig.WithISTDPcurve) {
1870 if (stdp_tDiff * connectConfigMap[mulIndex].stdpConfig.TAU_MINUS_INV_INB < 25) {
1871 runtimeData[netId].
wtChange[pos] -=
STDP(stdp_tDiff, connectConfigMap[mulIndex].stdpConfig.ALPHA_MINUS_INB, connectConfigMap[mulIndex].stdpConfig.TAU_MINUS_INV_INB);
1875 if (stdp_tDiff <= connectConfigMap[mulIndex].stdpConfig.LAMBDA) {
1876 runtimeData[netId].
wtChange[pos] -= connectConfigMap[mulIndex].stdpConfig.BETA_LTP;
1877 }
else if (stdp_tDiff <= connectConfigMap[mulIndex].stdpConfig.DELTA) {
1878 runtimeData[netId].
wtChange[pos] -= connectConfigMap[mulIndex].stdpConfig.BETA_LTD;
1885 }
else if (connectConfigMap[mulIndex].stdpConfig.WithESTDP && ((pre_type &
TARGET_AMPA) || (pre_type &
TARGET_NMDA))) {
1887 switch (connectConfigMap[mulIndex].stdpConfig.WithESTDPcurve) {
1888 #ifdef LN_I_CALC_TYPES 1890 if (stdp_tDiff * connectConfigMap[mulIndex].stdpConfig.TAU_MINUS_INV_EXC < 25)
1892 if (connectConfigMap[mulIndex].stdpConfig.WithESTDPtype ==
PKA_PLC_MOD) {
1894 auto weight_nm = [&](
float& nm,
int i_nm) {
1896 case NM_DA: nm *= runtimeData[netId].
grpDA[post_grpId];
1898 case NM_5HT: nm *= runtimeData[netId].
grp5HT[post_grpId];
1900 case NM_ACh: nm *= runtimeData[netId].
grpACh[post_grpId];
1902 case NM_NE: nm *= runtimeData[netId].
grpNE[post_grpId];
1907 float nm_pka = connectConfigMap[mulIndex].stdpConfig.W_PKA;
1908 weight_nm(nm_pka, connectConfigMap[mulIndex].stdpConfig.NM_PKA);
1910 float nm_plc = connectConfigMap[mulIndex].stdpConfig.W_PLC;
1911 weight_nm(nm_plc, connectConfigMap[mulIndex].stdpConfig.NM_PLC);
1915 float a_m = connectConfigMap[mulIndex].stdpConfig.ALPHA_MINUS_EXC;
1916 float tau_m_inv = connectConfigMap[mulIndex].stdpConfig.TAU_MINUS_INV_EXC;
1919 float pka_m = nm_pka *
STDP(stdp_tDiff, -a_m, tau_m_inv);
1922 float plc_m = nm_plc * 2 *
STDP(stdp_tDiff, a_m, tau_m_inv);
1924 runtimeData[netId].
wtChange[pos] += pka_m + plc_m;
1928 runtimeData[netId].
wtChange[pos] +=
STDP(stdp_tDiff, connectConfigMap[mulIndex].stdpConfig.ALPHA_MINUS_EXC, connectConfigMap[mulIndex].stdpConfig.TAU_MINUS_INV_EXC);
1935 if (stdp_tDiff * connectConfigMap[mulIndex].stdpConfig.TAU_MINUS_INV_EXC < 25)
1936 runtimeData[netId].
wtChange[pos] +=
STDP(stdp_tDiff, connectConfigMap[mulIndex].stdpConfig.ALPHA_MINUS_EXC, connectConfigMap[mulIndex].stdpConfig.TAU_MINUS_INV_EXC);
1944 assert(!((stdp_tDiff < 0) && (runtimeData[netId].lastSpikeTime[postNId] !=
MAX_SIMULATION_TIME)));
1950 float dvdtIzhikevich4(
float volt,
float recov,
float totalCurrent,
float timeStep = 1.0f) {
1951 return (((0.04f * volt + 5.0f) * volt + 140.0f - recov + totalCurrent) * timeStep);
1956 float dudtIzhikevich4(
float volt,
float recov,
float izhA,
float izhB,
float timeStep = 1.0f) {
1957 return (izhA * (izhB * volt - recov) * timeStep);
1962 float dvdtIzhikevich9(
float volt,
float recov,
float invCapac,
float izhK,
float voltRest,
1963 float voltInst,
float totalCurrent,
float timeStep = 1.0f)
1965 return ((izhK * (volt - voltRest) * (volt - voltInst) - recov + totalCurrent) * invCapac * timeStep);
1970 float dudtIzhikevich9(
float volt,
float recov,
float voltRest,
float izhA,
float izhB,
float timeStep = 1.0f) {
1971 return (izhA * (izhB * (volt - voltRest) - recov) * timeStep);
1976 float dvdtLIF(
float volt,
float lif_vReset,
float lif_gain,
float lif_bias,
int lif_tau_m,
float totalCurrent,
float timeStep = 1.0f) {
1977 return ((lif_vReset -volt + ((totalCurrent * lif_gain) + lif_bias))/ (
float) lif_tau_m) * timeStep;
1980 float SNN::getCompCurrent(
int netid,
int lGrpId,
int lneurId,
float const0,
float const1) {
1981 float compCurrent = 0.0f;
1985 int lGrpIdOther = groupConfigs[netid][lGrpId].
compNeighbors[k];
1986 int lneurIdOther = lneurId - groupConfigs[netid][lGrpId].
lStartN + groupConfigs[netid][lGrpIdOther].
lStartN;
1987 compCurrent += groupConfigs[netid][lGrpId].
compCoupling[k] * ((runtimeData[netid].
voltage[lneurIdOther] + const1)
1988 - (runtimeData[netid].voltage[lneurId] + const0));
1994 #ifdef __NO_PTHREADS__ 1995 void SNN::globalStateUpdate_CPU(
int netId) {
1997 void* SNN::globalStateUpdate_CPU(
int netId) {
1999 assert(runtimeData[netId].memType ==
CPU_MEM);
2001 float timeStep = networkConfigs[netId].timeStep;
2004 for (
int j = 1; j <= networkConfigs[netId].simNumStepsPerMs; j++) {
2005 bool lastIter = (j == networkConfigs[netId].simNumStepsPerMs);
2006 for (
int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
2007 if (groupConfigs[netId][lGrpId].Type & POISSON_NEURON) {
2008 if (groupConfigs[netId][lGrpId].WithHomeostasis & (lastIter)) {
2009 for (
int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].
lEndN; lNId++)
2010 runtimeData[netId].avgFiring[lNId] *= groupConfigs[netId][lGrpId].avgTimeScale_decay;
2015 for (
int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].
lEndN; lNId++) {
2016 assert(lNId < networkConfigs[netId].numNReg);
2020 float v = runtimeData[netId].
voltage[lNId];
2021 float v_next = runtimeData[netId].
nextVoltage[lNId];
2022 float u = runtimeData[netId].
recovery[lNId];
2023 float I_sum, NMDAtmp;
2024 float gNMDA, gGABAb;
2025 float gAMPA, gGABAa;
2028 float k = runtimeData[netId].
Izh_k[lNId];
2029 float vr = runtimeData[netId].
Izh_vr[lNId];
2030 float vt = runtimeData[netId].
Izh_vt[lNId];
2031 float inverse_C = 1.0f / runtimeData[netId].
Izh_C[lNId];
2032 float vpeak = runtimeData[netId].
Izh_vpeak[lNId];
2033 float a = runtimeData[netId].
Izh_a[lNId];
2034 float b = runtimeData[netId].
Izh_b[lNId];
2037 int lif_tau_m = runtimeData[netId].
lif_tau_m[lNId];
2038 int lif_tau_ref = runtimeData[netId].
lif_tau_ref[lNId];
2040 float lif_vTh = runtimeData[netId].
lif_vTh[lNId];
2041 float lif_vReset = runtimeData[netId].
lif_vReset[lNId];
2042 float lif_gain = runtimeData[netId].
lif_gain[lNId];
2043 float lif_bias = runtimeData[netId].
lif_bias[lNId];
2045 float totalCurrent = runtimeData[netId].
extCurrent[lNId];
2047 #ifdef LN_I_CALC_TYPES 2048 auto& config = groupConfigs[netId][lGrpId];
2049 switch(config.icalcType) {
2052 NMDAtmp = (v + 80.0f) * (v + 80.0f) / 60.0f / 60.0f;
2053 gNMDA = (config.with_NMDA_rise) ? (runtimeData[netId].gNMDA_d[lNId] - runtimeData[netId].gNMDA_r[lNId]) : runtimeData[netId].
gNMDA[lNId];
2054 gGABAb = (config.with_GABAb_rise) ? (runtimeData[netId].gGABAb_d[lNId] - runtimeData[netId].gGABAb_r[lNId]) : runtimeData[netId].
gGABAb[lNId];
2055 gAMPA = runtimeData[netId].
gAMPA[lNId];
2056 gGABAa = runtimeData[netId].
gGABAa[lNId];
2057 I_sum = -(gAMPA * (v - 0.0f)
2058 + gNMDA * NMDAtmp / (1.0f + NMDAtmp) * (v - 0.0f)
2059 + gGABAa * (v + 70.0f)
2060 + gGABAb * (v + 90.0f));
2065 assert(lambda > 0.0f);
2066 float mu = 1.0f - 0.5f * (exp((ne - 1.0f) / lambda) + exp((da - 1.0f) / lambda));
2071 totalCurrent += I_sum;
2074 totalCurrent += runtimeData[netId].
current[lNId];
2077 totalCurrent += runtimeData[netId].
current[lNId];
2080 nm += runtimeData[netId].
grpDA[lGrpId] * config.nm4w[
NM_DA];
2081 nm += runtimeData[netId].
grp5HT[lGrpId] * config.nm4w[
NM_5HT];
2082 nm += runtimeData[netId].
grpACh[lGrpId] * config.nm4w[
NM_ACh];
2083 nm += runtimeData[netId].
grpNE[lGrpId] * config.nm4w[
NM_NE];
2093 if (networkConfigs[netId].sim_with_conductances) {
2094 NMDAtmp = (v + 80.0f) * (v + 80.0f) / 60.0f / 60.0f;
2095 gNMDA = (networkConfigs[netId].sim_with_NMDA_rise) ? (runtimeData[netId].gNMDA_d[lNId] - runtimeData[netId].gNMDA_r[lNId]) : runtimeData[netId].
gNMDA[lNId];
2096 gGABAb = (networkConfigs[netId].sim_with_GABAb_rise) ? (runtimeData[netId].gGABAb_d[lNId] - runtimeData[netId].gGABAb_r[lNId]) : runtimeData[netId].
gGABAb[lNId];
2098 I_sum = -(runtimeData[netId].
gAMPA[lNId] * (v - 0.0f)
2099 + gNMDA * NMDAtmp / (1.0f + NMDAtmp) * (v - 0.0f)
2100 + runtimeData[netId].gGABAa[lNId] * (v + 70.0f)
2101 + gGABAb * (v + 90.0f));
2103 totalCurrent += I_sum;
2106 totalCurrent += runtimeData[netId].
current[lNId];
2109 if (groupConfigs[netId][lGrpId].withCompartments) {
2110 totalCurrent += getCompCurrent(netId, lGrpId, lNId);
2113 switch (networkConfigs[netId].simIntegrationMethod) {
2115 if (!groupConfigs[netId][lGrpId].withParamModel_9 && !groupConfigs[netId][lGrpId].isLIF)
2119 if (v_next > 30.0f) {
2121 runtimeData[netId].
curSpike[lNId] =
true;
2122 v_next = runtimeData[netId].
Izh_c[lNId];
2123 u += runtimeData[netId].
Izh_d[lNId];
2126 else if (!groupConfigs[netId][lGrpId].isLIF)
2129 v_next = v +
dvdtIzhikevich9(v, u, inverse_C, k, vr, vt, totalCurrent, timeStep);
2130 if (v_next > vpeak) {
2132 runtimeData[netId].
curSpike[lNId] =
true;
2133 v_next = runtimeData[netId].
Izh_c[lNId];
2134 u += runtimeData[netId].
Izh_d[lNId];
2139 if (lif_tau_ref_c > 0){
2142 v_next = lif_vReset;
2146 if (v_next > lif_vTh) {
2147 runtimeData[netId].
curSpike[lNId] =
true;
2148 v_next = lif_vReset;
2158 v_next = v +
dvdtLIF(v, lif_vReset, lif_gain, lif_bias, lif_tau_m, totalCurrent, timeStep);
2163 if (groupConfigs[netId][lGrpId].isLIF){
2164 if (v_next < lif_vReset) v_next = lif_vReset;
2167 if (v_next < -90.0f) v_next = -90.0f;
2169 if (!groupConfigs[netId][lGrpId].withParamModel_9)
2182 if (!groupConfigs[netId][lGrpId].withParamModel_9 && !groupConfigs[netId][lGrpId].isLIF) {
2187 float k2 =
dvdtIzhikevich4(v + k1 / 2.0f, u + l1 / 2.0f, totalCurrent,
2189 float l2 =
dudtIzhikevich4(v + k1 / 2.0f, u + l1 / 2.0f, a, b, timeStep);
2191 float k3 =
dvdtIzhikevich4(v + k2 / 2.0f, u + l2 / 2.0f, totalCurrent,
2193 float l3 =
dudtIzhikevich4(v + k2 / 2.0f, u + l2 / 2.0f, a, b, timeStep);
2197 v_next = v + (1.0f / 6.0f) * (k1 + 2.0f * k2 + 2.0f * k3 + k4);
2198 if (v_next > 30.0f) {
2200 runtimeData[netId].
curSpike[lNId] =
true;
2201 v_next = runtimeData[netId].
Izh_c[lNId];
2202 u += runtimeData[netId].
Izh_d[lNId];
2204 if (v_next < -90.0f) v_next = -90.0f;
2206 u += (1.0f / 6.0f) * (l1 + 2.0f * l2 + 2.0f * l3 + l4);
2208 else if(!groupConfigs[netId][lGrpId].isLIF){
2214 float k2 =
dvdtIzhikevich9(v + k1 / 2.0f, u + l1 / 2.0f, inverse_C, k, vr, vt,
2215 totalCurrent, timeStep);
2216 float l2 =
dudtIzhikevich9(v + k1 / 2.0f, u + l1 / 2.0f, vr, a, b, timeStep);
2218 float k3 =
dvdtIzhikevich9(v + k2 / 2.0f, u + l2 / 2.0f, inverse_C, k, vr, vt,
2219 totalCurrent, timeStep);
2220 float l3 =
dudtIzhikevich9(v + k2 / 2.0f, u + l2 / 2.0f, vr, a, b, timeStep);
2223 totalCurrent, timeStep);
2226 v_next = v + (1.0f / 6.0f) * (k1 + 2.0f * k2 + 2.0f * k3 + k4);
2228 if (v_next > vpeak) {
2230 runtimeData[netId].
curSpike[lNId] =
true;
2231 v_next = runtimeData[netId].
Izh_c[lNId];
2232 u += runtimeData[netId].
Izh_d[lNId];
2235 if (v_next < -90.0f) v_next = -90.0f;
2237 u += (1.0f / 6.0f) * (l1 + 2.0f * l2 + 2.0f * l3 + l4);
2241 if (lif_tau_ref_c > 0){
2244 v_next = lif_vReset;
2248 if (v_next > lif_vTh) {
2249 runtimeData[netId].
curSpike[lNId] =
true;
2250 v_next = lif_vReset;
2260 v_next = v +
dvdtLIF(v, lif_vReset, lif_gain, lif_bias, lif_tau_m, totalCurrent, timeStep);
2263 if (v_next < lif_vReset) v_next = lif_vReset;
2272 runtimeData[netId].
recovery[lNId] = u;
2277 #ifdef LN_I_CALC_TYPES 2278 switch (groupConfigs[netId][lGrpId].icalcType) {
2281 runtimeData[netId].
current[lNId] = I_sum;
2286 runtimeData[netId].
current[lNId] = 0.0f;
2292 if (networkConfigs[netId].sim_with_conductances) {
2293 runtimeData[netId].
current[lNId] = I_sum;
2297 runtimeData[netId].
current[lNId] = 0.0f;
2302 if (groupConfigs[netId][lGrpId].WithHomeostasis)
2306 if (networkConfigs[netId].sim_with_nm && lNId - groupConfigs[netId][lGrpId].lStartN <
MAX_NEURON_MON_GRP_SZIE) {
2308 runtimeData[netId].
nIBuffer[idxBase + lNId - groupConfigs[netId][lGrpId].
lStartN] = totalCurrent;
2318 #ifndef LN_FIX_ALL_DECAY_CPU 2319 #ifndef LN_FIX_DA_DECAY_CPU 2320 if ((groupConfigs[netId][lGrpId].WithESTDPtype ==
DA_MOD || groupConfigs[netId][lGrpId].WithISTDP ==
DA_MOD) && runtimeData[netId].grpDA[lGrpId] > groupConfigs[netId][lGrpId].baseDP) {
2321 runtimeData[netId].
grpDA[lGrpId] *= groupConfigs[netId][lGrpId].
decayDP;
2324 if (groupConfigs[netId][lGrpId].WithESTDPtype ==
DA_MOD || groupConfigs[netId][lGrpId].WithISTDP ==
DA_MOD) {
2325 float baseDP_ = groupConfigs[netId][lGrpId].
baseDP;
2326 if(runtimeData[netId].grpDA[lGrpId] > baseDP_) {
2327 runtimeData[netId].
grpDA[lGrpId] *= groupConfigs[netId][lGrpId].
decayDP;
2328 if(runtimeData[netId].grpDA[lGrpId] < baseDP_)
2329 runtimeData[netId].
grpDA[lGrpId] = baseDP_;
2333 runtimeData[netId].
grpDABuffer[lGrpId * 1000 + simTimeMs] = runtimeData[netId].
grpDA[lGrpId];
2337 if (groupConfigs[netId][lGrpId].WithESTDPtype == MOD_NE_A1 || groupConfigs[netId][lGrpId].WithISTDP == MOD_NE_A1) {
2338 float baseNE_ = groupConfigs[netId][lGrpId].
baseNE;
2339 if (runtimeData[netId].grpNE[lGrpId] > baseNE_) {
2340 runtimeData[netId].
grpNE[lGrpId] *= groupConfigs[netId][lGrpId].
decayNE;
2341 if (runtimeData[netId].grpNE[lGrpId] < baseNE_)
2342 runtimeData[netId].
grpNE[lGrpId] = baseNE_;
2350 if (groupConfigs[netId][lGrpId].activeDP) {
2351 float baseDP_ = groupConfigs[netId][lGrpId].
baseDP;
2352 if (runtimeData[netId].grpDA[lGrpId] > baseDP_) {
2353 runtimeData[netId].
grpDA[lGrpId] *= groupConfigs[netId][lGrpId].
decayDP;
2354 if (runtimeData[netId].grpDA[lGrpId] < baseDP_)
2355 runtimeData[netId].
grpDA[lGrpId] = baseDP_;
2357 runtimeData[netId].
grpDABuffer[lGrpId * 1000 + simTimeMs] = runtimeData[netId].
grpDA[lGrpId];
2360 if (groupConfigs[netId][lGrpId].active5HT) {
2361 float base5HT_ = groupConfigs[netId][lGrpId].
base5HT;
2362 if (runtimeData[netId].grp5HT[lGrpId] > base5HT_) {
2363 runtimeData[netId].
grp5HT[lGrpId] *= groupConfigs[netId][lGrpId].
decay5HT;
2364 if (runtimeData[netId].grp5HT[lGrpId] < base5HT_)
2365 runtimeData[netId].
grp5HT[lGrpId] = base5HT_;
2367 runtimeData[netId].
grp5HTBuffer[lGrpId * 1000 + simTimeMs] = runtimeData[netId].
grp5HT[lGrpId];
2370 if (groupConfigs[netId][lGrpId].activeACh) {
2371 float baseACh_ = groupConfigs[netId][lGrpId].
baseACh;
2372 if (runtimeData[netId].grpACh[lGrpId] > baseACh_) {
2373 runtimeData[netId].
grpACh[lGrpId] *= groupConfigs[netId][lGrpId].
decayACh;
2374 if (runtimeData[netId].grpACh[lGrpId] < baseACh_)
2375 runtimeData[netId].
grpACh[lGrpId] = baseACh_;
2377 runtimeData[netId].
grpAChBuffer[lGrpId * 1000 + simTimeMs] = runtimeData[netId].
grpACh[lGrpId];
2380 if (groupConfigs[netId][lGrpId].activeNE) {
2381 float baseNE_ = groupConfigs[netId][lGrpId].
baseNE;
2382 if (runtimeData[netId].grpNE[lGrpId] > baseNE_) {
2383 runtimeData[netId].
grpNE[lGrpId] *= groupConfigs[netId][lGrpId].
decayNE;
2384 if (runtimeData[netId].grpNE[lGrpId] < baseNE_)
2385 runtimeData[netId].
grpNE[lGrpId] = baseNE_;
2387 runtimeData[netId].
grpNEBuffer[lGrpId * 1000 + simTimeMs] = runtimeData[netId].
grpNE[lGrpId];
2391 #ifdef LN_FIX_ALL_DECAY_CPU 2399 memcpy(runtimeData[netId].voltage, runtimeData[netId].nextVoltage,
sizeof(
float)*networkConfigs[netId].numNReg);
2404 #ifndef __NO_PTHREADS__ // POSIX 2406 void* SNN::helperGlobalStateUpdate_CPU(
void* arguments) {
2415 #ifdef __NO_PTHREADS__ 2416 void SNN::updateWeights_CPU(
int netId) {
2418 void* SNN::updateWeights_CPU(
int netId) {
2421 assert(sim_in_testing==
false);
2422 assert(sim_with_fixedwts==
false);
2423 assert(runtimeData[netId].memType ==
CPU_MEM);
2426 for (
int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
2428 if (groupConfigs[netId][lGrpId].FixedInputWts || !(groupConfigs[netId][lGrpId].WithSTDP))
2432 for (
int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].
lEndN; lNId++) {
2433 assert(lNId < networkConfigs[netId].numNReg);
2434 unsigned int offset = runtimeData[netId].
cumulativePre[lNId];
2435 float diff_firing = 0.0;
2436 float homeostasisScale = 1.0;
2438 if (groupConfigs[netId][lGrpId].WithHomeostasis) {
2439 assert(runtimeData[netId].baseFiring[lNId] > 0);
2440 diff_firing = 1 - runtimeData[netId].
avgFiring[lNId] / runtimeData[netId].
baseFiring[lNId];
2444 if (lNId == groupConfigs[netId][lGrpId].lStartN)
2445 KERNEL_DEBUG(
"Weights, Change at %d (diff_firing: %f)", simTimeSec, diff_firing);
2447 for (
int j = 0; j < runtimeData[netId].
Npre_plastic[lNId]; j++) {
2452 float effectiveWtChange = stdpScaleFactor_ * runtimeData[netId].
wtChange[offset + j];
2458 switch (connectConfigMap[connId].stdpConfig.WithESTDPtype) {
2460 #ifdef LN_I_CALC_TYPES 2463 if (groupConfigs[netId][lGrpId].WithHomeostasis) {
2464 runtimeData[netId].
wt[offset + j] += (diff_firing*runtimeData[netId].
wt[offset + j] * homeostasisScale + runtimeData[netId].
wtChange[offset + j])*runtimeData[netId].baseFiring[lNId] / groupConfigs[netId][lGrpId].avgTimeScale / (1 + fabs(diff_firing) * 50);
2467 runtimeData[netId].
wt[offset + j] += effectiveWtChange;
2474 if (groupConfigs[netId][lGrpId].WithHomeostasis) {
2476 effectiveWtChange = (runtimeData[netId].
grpDA + connectConfigMap[connId].stdpConfig.WithESTDPtype -
DA_MOD)[lGrpId];
2477 runtimeData[netId].
wt[offset + j] += (diff_firing*runtimeData[netId].
wt[offset + j] * homeostasisScale + effectiveWtChange)*runtimeData[netId].baseFiring[lNId] / groupConfigs[netId][lGrpId].avgTimeScale / (1 + fabs(diff_firing) * 50);
2480 runtimeData[netId].
wt[offset + j] += (runtimeData[netId].
grpDA + connectConfigMap[connId].stdpConfig.WithESTDPtype -
DA_MOD)[lGrpId] * effectiveWtChange;
2489 switch (connectConfigMap[connId].stdpConfig.WithISTDPtype) {
2491 if (groupConfigs[netId][lGrpId].WithHomeostasis) {
2492 runtimeData[netId].
wt[offset + j] += (diff_firing*runtimeData[netId].
wt[offset + j] * homeostasisScale + runtimeData[netId].
wtChange[offset + j])*runtimeData[netId].baseFiring[lNId] / groupConfigs[netId][lGrpId].avgTimeScale / (1 + fabs(diff_firing) * 50);
2495 runtimeData[netId].
wt[offset + j] += effectiveWtChange;
2502 if (groupConfigs[netId][lGrpId].WithHomeostasis) {
2504 effectiveWtChange = (runtimeData[netId].
grpDA + connectConfigMap[connId].stdpConfig.WithISTDPtype -
DA_MOD)[lGrpId] * effectiveWtChange;
2505 runtimeData[netId].
wt[offset + j] += (diff_firing*runtimeData[netId].
wt[offset + j] * homeostasisScale + effectiveWtChange)*runtimeData[netId].baseFiring[lNId] / groupConfigs[netId][lGrpId].avgTimeScale / (1 + fabs(diff_firing) * 50);
2508 runtimeData[netId].
wt[offset + j] += (runtimeData[netId].
grpDA + connectConfigMap[connId].stdpConfig.WithISTDPtype -
DA_MOD)[lGrpId] * effectiveWtChange;
2519 runtimeData[netId].
wtChange[offset + j] *= wtChangeDecay_;
2522 if (runtimeData[netId].maxSynWt[offset + j] >= 0) {
2523 if (runtimeData[netId].wt[offset + j] >= runtimeData[netId].maxSynWt[offset + j])
2524 runtimeData[netId].
wt[offset + j] = runtimeData[netId].
maxSynWt[offset + j];
2525 if (runtimeData[netId].wt[offset + j] < 0)
2526 runtimeData[netId].
wt[offset + j] = 0.0;
2529 if (runtimeData[netId].wt[offset + j] <= runtimeData[netId].maxSynWt[offset + j])
2530 runtimeData[netId].
wt[offset + j] = runtimeData[netId].
maxSynWt[offset + j];
2531 if (runtimeData[netId].wt[offset + j] > 0)
2532 runtimeData[netId].
wt[offset + j] = 0.0;
2539 #ifndef __NO_PTHREADS__ // POSIX 2541 void* SNN::helperUpdateWeights_CPU(
void* arguments) {
2553 #ifdef __NO_PTHREADS__ 2554 void SNN::shiftSpikeTables_CPU(
int netId) {
2556 void* SNN::shiftSpikeTables_CPU(
int netId) {
2558 assert(runtimeData[netId].memType ==
CPU_MEM);
2561 for(
int p = runtimeData[netId].timeTableD2[999], k = 0; p < runtimeData[netId].
timeTableD2[999 + networkConfigs[netId].maxDelay + 1]; p++, k++) {
2563 #ifdef LN_AXON_PLAST 2568 for(
int i = 0; i < networkConfigs[netId].maxDelay; i++) {
2573 runtimeData[netId].
timeTableD1[networkConfigs[netId].maxDelay] = 0;
2586 #ifndef __NO_PTHREADS__ // POSIX 2588 void* SNN::helperShiftSpikeTables_CPU(
void* arguments) {
2596 void SNN::allocateSNN_CPU(
int netId) {
2609 runtimeData[netId].
randNum =
new float[networkConfigs[netId].numNPois];
2617 copyPreConnectionInfo(netId,
ALL, &runtimeData[netId], &managerRuntimeData,
true);
2618 copyPostConnectionInfo(netId,
ALL, &runtimeData[netId], &managerRuntimeData,
true);
2623 copySynapseState(netId, &runtimeData[netId], &managerRuntimeData,
true);
2633 copyNeuronState(netId,
ALL, &runtimeData[netId],
true);
2638 copySTPState(netId,
ALL, &runtimeData[netId], &managerRuntimeData,
true);
2645 copyGroupState(netId,
ALL, &runtimeData[netId], &managerRuntimeData,
true);
2654 copyAuxiliaryData(netId,
ALL, &runtimeData[netId],
true);
2663 #ifdef LN_I_CALC_TYPES 2668 for (
int lGrpId = 0; lGrpId < networkConfigs[netId].numGroupsAssigned; lGrpId++) {
2669 KERNEL_DEBUG(
"Settings for Group %s:", groupConfigMap[groupConfigs[netId][lGrpId].gGrpId].grpName.c_str());
2671 KERNEL_DEBUG(
"\tType: %d",(
int)groupConfigs[netId][lGrpId].Type);
2672 KERNEL_DEBUG(
"\tNumN: %d",groupConfigs[netId][lGrpId].numN);
2673 KERNEL_DEBUG(
"\tM: %d",groupConfigs[netId][lGrpId].numPostSynapses);
2674 KERNEL_DEBUG(
"\tPreM: %d",groupConfigs[netId][lGrpId].numPreSynapses);
2675 KERNEL_DEBUG(
"\tspikeGenerator: %d",(
int)groupConfigs[netId][lGrpId].isSpikeGenerator);
2676 KERNEL_DEBUG(
"\tFixedInputWts: %d",(
int)groupConfigs[netId][lGrpId].FixedInputWts);
2677 KERNEL_DEBUG(
"\tMaxDelay: %d",(
int)groupConfigs[netId][lGrpId].MaxDelay);
2696 KERNEL_DEBUG(
"\tWithSTP: %d",(
int)groupConfigs[netId][lGrpId].WithSTP);
2697 if (groupConfigs[netId][lGrpId].WithSTP) {
2698 KERNEL_DEBUG(
"\t\tSTP_U: %f", groupConfigs[netId][lGrpId].STP_U);
2702 KERNEL_DEBUG(
"\tspikeGen: %s", groupConfigs[netId][lGrpId].isSpikeGenFunc?
"is Set" :
"is not set ");
2727 void SNN::copyPreConnectionInfo(
int netId,
int lGrpId,
RuntimeData* dest,
RuntimeData* src,
bool allocateMem) {
2728 int lengthN, lengthSyn, posN, posSyn;
2730 if (lGrpId ==
ALL) {
2731 lengthN = networkConfigs[netId].numNAssigned;
2734 lengthN = groupConfigs[netId][lGrpId].
numN;
2735 posN = groupConfigs[netId][lGrpId].
lStartN;
2740 dest->
Npre =
new unsigned short[networkConfigs[netId].numNAssigned];
2741 memcpy(&dest->
Npre[posN], &src->
Npre[posN],
sizeof(
short) * lengthN);
2744 if (!sim_with_fixedwts) {
2747 dest->
Npre_plastic =
new unsigned short[networkConfigs[netId].numNAssigned];
2752 float* Npre_plasticInv =
new float[networkConfigs[netId].numNAssigned];
2754 for (
int i = 0; i < networkConfigs[netId].numNAssigned; i++)
2755 Npre_plasticInv[i] = 1.0f / managerRuntimeData.
Npre_plastic[i];
2757 dest->
Npre_plasticInv =
new float[networkConfigs[netId].numNAssigned];
2758 memcpy(dest->
Npre_plasticInv, Npre_plasticInv,
sizeof(
float) * networkConfigs[netId].numNAssigned);
2760 delete[] Npre_plasticInv;
2766 dest->
cumulativePre =
new unsigned int[networkConfigs[netId].numNAssigned];
2770 if (lGrpId ==
ALL) {
2771 lengthSyn = networkConfigs[netId].numPreSynNet;
2775 for (
int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].
lEndN; lNId++)
2776 lengthSyn += dest->
Npre[lNId];
2802 void SNN::copyPostConnectionInfo(
int netId,
int lGrpId,
RuntimeData* dest,
RuntimeData* src,
bool allocateMem) {
2803 int lengthN, lengthSyn, posN, posSyn;
2805 if (lGrpId ==
ALL) {
2806 lengthN = networkConfigs[netId].numNAssigned;
2809 lengthN = groupConfigs[netId][lGrpId].
numN;
2810 posN = groupConfigs[netId][lGrpId].
lStartN;
2815 dest->
Npost =
new unsigned short[networkConfigs[netId].numNAssigned];
2816 memcpy(&dest->
Npost[posN], &src->
Npost[posN],
sizeof(
short) * lengthN);
2820 dest->
cumulativePost =
new unsigned int[networkConfigs[netId].numNAssigned];
2825 if (lGrpId ==
ALL) {
2826 lengthSyn = networkConfigs[netId].numPostSynNet;
2830 for (
int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].
lEndN; lNId++)
2831 lengthSyn += dest->
Npost[lNId];
2863 assert(networkConfigs[netId].numPreSynNet > 0);
2867 dest->
wt =
new float[networkConfigs[netId].numPreSynNet];
2868 memcpy(dest->
wt, src->
wt,
sizeof(
float) * networkConfigs[netId].numPreSynNet);
2873 if (!sim_with_fixedwts) {
2876 dest->
wtChange =
new float[networkConfigs[netId].numPreSynNet];
2877 memcpy(dest->
wtChange, src->
wtChange,
sizeof(
float) * networkConfigs[netId].numPreSynNet);
2881 dest->
maxSynWt =
new float[networkConfigs[netId].numPreSynNet];
2882 memcpy(dest->
maxSynWt, src->
maxSynWt,
sizeof(
float) * networkConfigs[netId].numPreSynNet);
2902 void SNN::copyNeuronState(
int netId,
int lGrpId,
RuntimeData* dest,
bool allocateMem) {
2907 length = networkConfigs[netId].numNReg;
2910 ptrPos = groupConfigs[netId][lGrpId].
lStartN;
2911 length = groupConfigs[netId][lGrpId].
numN;
2914 assert(length <= networkConfigs[netId].numNReg);
2919 if(!allocateMem && groupConfigs[netId][lGrpId].Type & POISSON_NEURON)
2923 dest->
recovery =
new float[length];
2924 memcpy(&dest->
recovery[ptrPos], &managerRuntimeData.
recovery[ptrPos],
sizeof(
float) * length);
2927 dest->
voltage =
new float[length];
2928 memcpy(&dest->
voltage[ptrPos], &managerRuntimeData.
voltage[ptrPos],
sizeof(
float) * length);
2936 dest->
current =
new float[length];
2937 memcpy(&dest->
current[ptrPos], &managerRuntimeData.
current[ptrPos],
sizeof(
float) * length);
2939 #ifdef LN_I_CALC_TYPES 2940 if (lGrpId ==
ALL) {
2946 if(sim_with_conductances) {
2948 copyConductanceAMPA(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2949 copyConductanceNMDA(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2950 copyConductanceGABAa(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2951 copyConductanceGABAb(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2954 switch (groupConfigs[netId][lGrpId].icalcType) {
2958 copyConductanceAMPA(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2959 copyConductanceNMDA(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2960 copyConductanceGABAa(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2961 copyConductanceGABAb(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2971 if (sim_with_conductances) {
2973 copyConductanceAMPA(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2974 copyConductanceNMDA(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2975 copyConductanceGABAa(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2976 copyConductanceGABAb(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2982 copyExternalCurrent(netId, lGrpId, dest, allocateMem);
2986 memcpy(&dest->
curSpike[ptrPos], &managerRuntimeData.
curSpike[ptrPos],
sizeof(
bool) * length);
2988 copyNeuronParameters(netId, lGrpId, dest, allocateMem);
2990 if (networkConfigs[netId].sim_with_nm)
2991 copyNeuronStateBuffer(netId, lGrpId, dest, &managerRuntimeData, allocateMem);
2993 if (sim_with_homeostasis) {
2998 memcpy(&dest->
avgFiring[ptrPos], &managerRuntimeData.
avgFiring[ptrPos],
sizeof(
float) * length);
3020 void SNN::copyConductanceAMPA(
int netId,
int lGrpId,
RuntimeData* dest,
RuntimeData* src,
bool allocateMem,
int destOffset) {
3027 length = networkConfigs[netId].numNReg;
3029 ptrPos = groupConfigs[netId][lGrpId].
lStartN;
3030 length = groupConfigs[netId][lGrpId].
numN;
3032 assert(length <= networkConfigs[netId].numNReg);
3036 assert(src->
gAMPA != NULL);
3038 dest->
gAMPA =
new float[length];
3039 memcpy(&dest->
gAMPA[ptrPos + destOffset], &src->
gAMPA[ptrPos],
sizeof(
float) * length);
3060 void SNN::copyConductanceNMDA(
int netId,
int lGrpId,
RuntimeData* dest,
RuntimeData* src,
bool allocateMem,
int destOffset) {
3067 length = networkConfigs[netId].numNReg;
3069 ptrPos = groupConfigs[netId][lGrpId].
lStartN;
3070 length = groupConfigs[netId][lGrpId].
numN;
3072 assert(length <= networkConfigs[netId].numNReg);
3075 #ifdef LN_I_CALC_TYPES 3078 dest->
gNMDA_r =
new float[length];
3079 memcpy(&dest->
gNMDA_r[ptrPos], &src->
gNMDA_r[ptrPos],
sizeof(
float) * length);
3083 dest->
gNMDA_d =
new float[length];
3084 memcpy(&dest->
gNMDA_d[ptrPos], &src->
gNMDA_d[ptrPos],
sizeof(
float) * length);
3086 assert(src->
gNMDA != NULL);
3088 dest->
gNMDA =
new float[length];
3089 memcpy(&dest->
gNMDA[ptrPos + destOffset], &src->
gNMDA[ptrPos],
sizeof(
float) * length);
3094 dest->
gNMDA_r =
new float[length];
3095 memcpy(&dest->
gNMDA_r[ptrPos], &src->
gNMDA_r[ptrPos],
sizeof(
float) * length);
3099 dest->
gNMDA_d =
new float[length];
3100 memcpy(&dest->
gNMDA_d[ptrPos], &src->
gNMDA_d[ptrPos],
sizeof(
float) * length);
3102 assert(src->
gNMDA != NULL);
3104 dest->
gNMDA =
new float[length];
3105 memcpy(&dest->
gNMDA[ptrPos + destOffset], &src->
gNMDA[ptrPos],
sizeof(
float) * length);
3128 void SNN::copyConductanceGABAa(
int netId,
int lGrpId,
RuntimeData* dest,
RuntimeData* src,
bool allocateMem,
int destOffset) {
3135 length = networkConfigs[netId].numNReg;
3137 ptrPos = groupConfigs[netId][lGrpId].
lStartN;
3138 length = groupConfigs[netId][lGrpId].
numN;
3140 assert(length <= networkConfigs[netId].numNReg);
3143 assert(src->
gGABAa != NULL);
3145 dest->
gGABAa =
new float[length];
3146 memcpy(&dest->
gGABAa[ptrPos + destOffset], &src->
gGABAa[ptrPos],
sizeof(
float) * length);
3167 void SNN::copyConductanceGABAb(
int netId,
int lGrpId,
RuntimeData* dest,
RuntimeData* src,
bool allocateMem,
int destOffset) {
3172 if (lGrpId ==
ALL) {
3174 length = networkConfigs[netId].numNReg;
3176 ptrPos = groupConfigs[netId][lGrpId].
lStartN;
3177 length = groupConfigs[netId][lGrpId].
numN;
3179 assert(length <= networkConfigs[netId].numNReg);
3182 #ifdef LN_I_CALC_TYPES 3185 dest->
gGABAb_r =
new float[length];
3186 memcpy(&dest->
gGABAb_r[ptrPos], &src->
gGABAb_r[ptrPos],
sizeof(
float) * length);
3190 dest->
gGABAb_d =
new float[length];
3191 memcpy(&dest->
gGABAb_d[ptrPos], &src->
gGABAb_d[ptrPos],
sizeof(
float) * length);
3193 assert(src->
gGABAb != NULL);
3195 dest->
gGABAb =
new float[length];
3196 memcpy(&dest->
gGABAb[ptrPos + destOffset], &src->
gGABAb[ptrPos],
sizeof(
float) * length);
3201 dest->
gGABAb_r =
new float[length];
3202 memcpy(&dest->
gGABAb_r[ptrPos], &src->
gGABAb_r[ptrPos],
sizeof(
float) * length);
3206 dest->
gGABAb_d =
new float[length];
3207 memcpy(&dest->
gGABAb_d[ptrPos], &src->
gGABAb_d[ptrPos],
sizeof(
float) * length);
3209 assert(src->
gGABAb != NULL);
3211 dest->
gGABAb =
new float[length];
3212 memcpy(&dest->
gGABAb[ptrPos + destOffset], &src->
gGABAb[ptrPos],
sizeof(
float) * length);
3234 void SNN::copyNeuronStateBuffer(
int netId,
int lGrpId,
RuntimeData* dest,
RuntimeData* src,
bool allocateMem) {
3246 if (lGrpId ==
ALL) {
3251 memcpy(&dest->
nVBuffer[ptrPos], &src->
nVBuffer[ptrPos],
sizeof(
float) * length);
3252 memcpy(&dest->
nUBuffer[ptrPos], &src->
nUBuffer[ptrPos],
sizeof(
float) * length);
3253 memcpy(&dest->
nIBuffer[ptrPos], &src->
nIBuffer[ptrPos],
sizeof(
float) * length);
3256 for (
int t = 0; t < 1000; t++) {
3260 assert((ptrPos + length) <= networkConfigs[netId].numGroups * MAX_NEURON_MON_GRP_SZIE * 1000);
3264 memcpy(&dest->
nVBuffer[ptrPos], &src->
nVBuffer[ptrPos],
sizeof(
float) * length);
3265 memcpy(&dest->
nUBuffer[ptrPos], &src->
nUBuffer[ptrPos],
sizeof(
float) * length);
3266 memcpy(&dest->
nIBuffer[ptrPos], &src->
nIBuffer[ptrPos],
sizeof(
float) * length);
3289 void SNN::copyExternalCurrent(
int netId,
int lGrpId,
RuntimeData* dest,
bool allocateMem) {
3294 lengthN = networkConfigs[netId].numNReg;
3296 assert(lGrpId >= 0);
3297 posN = groupConfigs[netId][lGrpId].
lStartN;
3298 lengthN = groupConfigs[netId][lGrpId].
numN;
3300 assert(lengthN >= 0 && lengthN <= networkConfigs[netId].numNReg);
3302 KERNEL_DEBUG(
"copyExternalCurrent: lGrpId=%d, ptrPos=%d, length=%d, allocate=%s", lGrpId, posN, lengthN, allocateMem?
"y":
"n");
3306 memcpy(&(dest->
extCurrent[posN]), &(managerRuntimeData.
extCurrent[posN]),
sizeof(
float) * lengthN);
3327 void SNN::copyNeuronParameters(
int netId,
int lGrpId,
RuntimeData* dest,
bool allocateMem) {
3332 assert(lGrpId ==
ALL);
3333 assert(dest->
Izh_a == NULL);
3334 assert(dest->
Izh_b == NULL);
3335 assert(dest->
Izh_c == NULL);
3336 assert(dest->
Izh_d == NULL);
3337 assert(dest->
Izh_C == NULL);
3338 assert(dest->
Izh_k == NULL);
3339 assert(dest->
Izh_vr == NULL);
3340 assert(dest->
Izh_vt == NULL);
3345 assert(dest->
lif_vTh == NULL);
3353 length = networkConfigs[netId].numNReg;
3356 ptrPos = groupConfigs[netId][lGrpId].
lStartN;
3357 length = groupConfigs[netId][lGrpId].
numN;
3361 dest->
Izh_a =
new float[length];
3362 memcpy(&dest->
Izh_a[ptrPos], &(managerRuntimeData.
Izh_a[ptrPos]),
sizeof(
float) * length);
3365 dest->
Izh_b =
new float[length];
3366 memcpy(&dest->
Izh_b[ptrPos], &(managerRuntimeData.
Izh_b[ptrPos]),
sizeof(
float) * length);
3369 dest->
Izh_c =
new float[length];
3370 memcpy(&dest->
Izh_c[ptrPos], &(managerRuntimeData.
Izh_c[ptrPos]),
sizeof(
float) * length);
3373 dest->
Izh_d =
new float[length];
3374 memcpy(&dest->
Izh_d[ptrPos], &(managerRuntimeData.
Izh_d[ptrPos]),
sizeof(
float) * length);
3377 dest->
Izh_C =
new float[length];
3378 memcpy(&dest->
Izh_C[ptrPos], &(managerRuntimeData.
Izh_C[ptrPos]),
sizeof(
float) * length);
3381 dest->
Izh_k =
new float[length];
3382 memcpy(&dest->
Izh_k[ptrPos], &(managerRuntimeData.
Izh_k[ptrPos]),
sizeof(
float) * length);
3385 dest->
Izh_vr =
new float[length];
3386 memcpy(&dest->
Izh_vr[ptrPos], &(managerRuntimeData.
Izh_vr[ptrPos]),
sizeof(
float) * length);
3389 dest->
Izh_vt =
new float[length];
3390 memcpy(&dest->
Izh_vt[ptrPos], &(managerRuntimeData.
Izh_vt[ptrPos]),
sizeof(
float) * length);
3394 memcpy(&dest->
Izh_vpeak[ptrPos], &(managerRuntimeData.
Izh_vpeak[ptrPos]),
sizeof(
float) * length);
3399 memcpy(&dest->
lif_tau_m[ptrPos], &(managerRuntimeData.
lif_tau_m[ptrPos]),
sizeof(
int) * length);
3410 dest->
lif_vTh =
new float[length];
3411 memcpy(&dest->
lif_vTh[ptrPos], &(managerRuntimeData.
lif_vTh[ptrPos]),
sizeof(
float) * length);
3415 memcpy(&dest->
lif_vReset[ptrPos], &(managerRuntimeData.
lif_vReset[ptrPos]),
sizeof(
float) * length);
3418 dest->
lif_gain =
new float[length];
3419 memcpy(&dest->
lif_gain[ptrPos], &(managerRuntimeData.
lif_gain[ptrPos]),
sizeof(
float) * length);
3422 dest->
lif_bias =
new float[length];
3423 memcpy(&dest->
lif_bias[ptrPos], &(managerRuntimeData.
lif_bias[ptrPos]),
sizeof(
float) * length);
3426 if (sim_with_homeostasis) {
3427 float* baseFiringInv =
new float[length];
3428 for(
int nid = 0; nid < length; nid++) {
3429 if (managerRuntimeData.
baseFiring[nid] != 0.0f)
3430 baseFiringInv[nid] = 1.0f / managerRuntimeData.
baseFiring[ptrPos + nid];
3432 baseFiringInv[nid] = 0.0;
3437 memcpy(&dest->
baseFiringInv[ptrPos], baseFiringInv,
sizeof(
float) * length);
3443 delete [] baseFiringInv;
3468 assert(dest->
stpu == NULL);
3469 assert(dest->
stpx == NULL);
3471 assert(dest->
stpu != NULL);
3472 assert(dest->
stpx != NULL);
3474 assert(src->
stpu != NULL); assert(src->
stpx != NULL);
3477 dest->
stpu =
new float[networkConfigs[netId].numN * (networkConfigs[netId].maxDelay + 1)];
3478 memcpy(dest->
stpu, src->
stpu,
sizeof(
float) * networkConfigs[netId].numN * (networkConfigs[netId].maxDelay + 1));
3481 dest->
stpx =
new float[networkConfigs[netId].numN * (networkConfigs[netId].maxDelay + 1)];
3482 memcpy(dest->
stpx, src->
stpx,
sizeof(
float) * networkConfigs[netId].numN * (networkConfigs[netId].maxDelay + 1));
3506 dest->
grpDA =
new float[networkConfigs[netId].numGroups];
3507 dest->
grp5HT =
new float[networkConfigs[netId].numGroups];
3508 dest->
grpACh =
new float[networkConfigs[netId].numGroups];
3509 dest->
grpNE =
new float[networkConfigs[netId].numGroups];
3511 memcpy(dest->
grpDA, src->
grpDA,
sizeof(
float) * networkConfigs[netId].numGroups);
3512 memcpy(dest->
grp5HT, src->
grp5HT,
sizeof(
float) * networkConfigs[netId].numGroups);
3513 memcpy(dest->
grpACh, src->
grpACh,
sizeof(
float) * networkConfigs[netId].numGroups);
3514 memcpy(dest->
grpNE, src->
grpNE,
sizeof(
float) * networkConfigs[netId].numGroups);
3516 if (lGrpId ==
ALL) {
3519 dest->
grpDABuffer =
new float[1000 * networkConfigs[netId].numGroups];
3520 dest->
grp5HTBuffer =
new float[1000 * networkConfigs[netId].numGroups];
3521 dest->
grpAChBuffer =
new float[1000 * networkConfigs[netId].numGroups];
3522 dest->
grpNEBuffer =
new float[1000 * networkConfigs[netId].numGroups];
3529 assert(!allocateMem);
3557 void SNN::copyAuxiliaryData(
int netId,
int lGrpId,
RuntimeData* dest,
bool allocateMem) {
3558 assert(networkConfigs[netId].numN > 0);
3561 dest->
spikeGenBits =
new unsigned int[networkConfigs[netId].numNSpikeGen / 32 + 1];
3562 memset(dest->
spikeGenBits, 0,
sizeof(
int) * (networkConfigs[netId].numNSpikeGen / 32 + 1));
3567 memset(dest->
poissonFireRate, 0,
sizeof(
float) * networkConfigs[netId].numNPois);
3572 networkConfigs[netId].I_setLength = ceil(((networkConfigs[netId].maxNumPreSynN) / 32.0f));
3573 dest->
I_set =
new int[networkConfigs[netId].numNReg * networkConfigs[netId].I_setLength];
3575 assert(networkConfigs[netId].maxNumPreSynN >= 0);
3576 memset(dest->
I_set, 0,
sizeof(
int) * networkConfigs[netId].numNReg * networkConfigs[netId].I_setLength);
3580 dest->
synSpikeTime =
new int[networkConfigs[netId].numPreSynNet];
3587 dest->
lastSpikeTime =
new int[networkConfigs[netId].numNAssigned];
3591 copyNeuronSpikeCount(netId, lGrpId, dest, &managerRuntimeData,
true, 0);
3595 dest->
grpIds =
new short int[networkConfigs[netId].numNAssigned];
3596 memcpy(dest->
grpIds, managerRuntimeData.
grpIds,
sizeof(
short int) * networkConfigs[netId].numNAssigned);
3600 dest->
connIdsPreIdx =
new short int[networkConfigs[netId].numPreSynNet];
3631 memset(dest->
timeTableD2, 0,
sizeof(
int) * TIMING_COUNT);
3637 #ifdef LN_AXON_PLAST 3644 dest->
firingTableD1 =
new int[networkConfigs[netId].maxSpikesD1];
3645 if (networkConfigs[netId].maxSpikesD1 > 0)
3650 dest->
firingTableD2 =
new int[networkConfigs[netId].maxSpikesD2];
3651 if (networkConfigs[netId].maxSpikesD2 > 0)
3655 #ifdef LN_AXON_PLAST 3657 dest->
firingTimesD2 =
new unsigned int[networkConfigs[netId].maxSpikesD2];
3658 if (networkConfigs[netId].maxSpikesD2 > 0)
3666 memset(dest->
extFiringTableD1, 0 ,
sizeof(
int*) * networkConfigs[netId].numGroups);
3667 for (
int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
3668 if (groupConfigs[netId][lGrpId].hasExternalConnect) {
3678 memset(dest->
extFiringTableD2, 0 ,
sizeof(
int*) * networkConfigs[netId].numGroups);
3679 for (
int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
3680 if (groupConfigs[netId][lGrpId].hasExternalConnect) {
3717 void SNN::copyNeuronSpikeCount(
int netId,
int lGrpId,
RuntimeData* dest,
RuntimeData* src,
bool allocateMem,
int destOffset) {
3722 lengthN = networkConfigs[netId].numN;
3724 posN = groupConfigs[netId][lGrpId].
lStartN;
3725 lengthN = groupConfigs[netId][lGrpId].
numN;
3727 assert(lengthN > 0 && lengthN <= networkConfigs[netId].numN);
3732 memcpy(&dest->
nSpikeCnt[posN + destOffset], &src->
nSpikeCnt[posN],
sizeof(
int) * lengthN);
3736 #ifdef __NO_PTHREADS__ 3737 void SNN::assignPoissonFiringRate_CPU(
int netId) {
3739 void* SNN::assignPoissonFiringRate_CPU(
int netId) {
3741 assert(runtimeData[netId].memType ==
CPU_MEM);
3743 for (
int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
3745 if (groupConfigs[netId][lGrpId].isSpikeGenerator) {
3746 int lNId = groupConfigs[netId][lGrpId].
lStartN;
3747 int gGrpId = groupConfigs[netId][lGrpId].
gGrpId;
3748 PoissonRate* rate = groupConfigMDMap[gGrpId].ratePtr;
3751 if (groupConfigMap[gGrpId].spikeGenFunc || rate == NULL)
3754 assert(runtimeData[netId].poissonFireRate != NULL);
3755 assert(rate->
isOnGPU() ==
false);
3757 memcpy(&runtimeData[netId].poissonFireRate[lNId - networkConfigs[netId].numNReg], rate->
getRatePtrCPU(),
3763 #ifndef __NO_PTHREADS__ // POSIX 3765 void* SNN::helperAssignPoissonFiringRate_CPU(
void* arguments) {
3787 void SNN::copyWeightState(
int netId,
int lGrpId) {
3788 int lengthSyn, posSyn;
3791 copyPreConnectionInfo(netId, lGrpId, &managerRuntimeData, &runtimeData[netId],
false);
3793 if (lGrpId ==
ALL) {
3794 lengthSyn = networkConfigs[netId].numPreSynNet;
3799 for (
int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].
lEndN; lNId++)
3800 lengthSyn += managerRuntimeData.
Npre[lNId];
3805 assert(posSyn < networkConfigs[netId].numPreSynNet || networkConfigs[netId].numPreSynNet == 0);
3806 assert(lengthSyn <= networkConfigs[netId].numPreSynNet);
3808 memcpy(&managerRuntimeData.
wt[posSyn], &runtimeData[netId].
wt[posSyn],
sizeof(
float) * lengthSyn);
3813 if ((!sim_with_fixedwts) || sim_with_stdp) {
3815 memcpy(&managerRuntimeData.
wtChange[posSyn], &runtimeData[netId].
wtChange[posSyn],
sizeof(
float) * lengthSyn);
3819 void SNN::copyNetworkConfig(
int netId) {
3823 void SNN::copyGrpIdsLookupArray(
int netId) {
3824 memcpy(managerRuntimeData.
grpIds, runtimeData[netId].
grpIds,
sizeof(
short int) * networkConfigs[netId].numNAssigned);
3827 void SNN::copyConnIdsLookupArray(
int netId) {
3828 memcpy(managerRuntimeData.
connIdsPreIdx, runtimeData[netId].
connIdsPreIdx,
sizeof(
short int) * networkConfigs[netId].numPreSynNet);
3831 void SNN::copyLastSpikeTime(
int netId) {
3839 void SNN::copyCurSpikes(
int netId) {
3841 int length = networkConfigs[netId].numNReg;
3845 memcpy(managerRuntimeData.
curSpike, runtimeData[netId].
curSpike,
sizeof(
bool) * length );
3850 void SNN::copyRandNum(
int netId) {
3864 int length = networkConfigs[netId].numNPois;
3866 memcpy(managerRuntimeData.
randNum, runtimeData[netId].
randNum,
sizeof(
float) * length );
3874 void SNN::copyPoissonFireRate(
int netId) {
3889 int length = networkConfigs[netId].numNPois;
3899 void SNN::copySpikeGenBits(
int netId) {
3911 int length = networkConfigs[netId].numNSpikeGen;
3913 assert(
sizeof(
int) == 4);
3930 void SNN::copyNetworkSpikeCount(
int netId,
3931 unsigned int* spikeCountD1,
unsigned int* spikeCountD2,
3932 unsigned int* spikeCountExtD1,
unsigned int* spikeCountExtD2) {
3945 void SNN::copySpikeTables(
int netId) {
3946 unsigned int spikeCountD1Sec, spikeCountD2Sec, spikeCountLastSecLeftD2;
3951 memcpy(managerRuntimeData.
firingTableD2, runtimeData[netId].
firingTableD2,
sizeof(
int) * (spikeCountD2Sec + spikeCountLastSecLeftD2));
3953 #ifdef LN_AXON_PLAST 3954 memcpy(managerRuntimeData.
firingTimesD2, runtimeData[netId].
firingTimesD2,
sizeof(
unsigned int) * (spikeCountD2Sec + spikeCountLastSecLeftD2));
3956 memcpy(managerRuntimeData.
timeTableD2, runtimeData[netId].
timeTableD2,
sizeof(
int) * (1000 + networkConfigs[netId].maxDelay + 1));
3957 memcpy(managerRuntimeData.
timeTableD1, runtimeData[netId].
timeTableD1,
sizeof(
int) * (1000 + networkConfigs[netId].maxDelay + 1));
3960 #ifdef __NO_PTHREADS__ 3961 void SNN::deleteRuntimeData_CPU(
int netId) {
3963 void* SNN::deleteRuntimeData_CPU(
int netId) {
3965 assert(runtimeData[netId].memType ==
CPU_MEM);
3967 delete [] runtimeData[netId].
voltage;
3969 delete [] runtimeData[netId].
recovery;
3970 delete [] runtimeData[netId].
current;
3972 delete [] runtimeData[netId].
curSpike;
3973 delete [] runtimeData[netId].
Npre;
3976 delete [] runtimeData[netId].
Npost;
3980 delete [] runtimeData[netId].
wt;
3981 delete [] runtimeData[netId].
wtChange;
3982 delete [] runtimeData[netId].
maxSynWt;
3988 delete [] runtimeData[netId].
grpDA;
3989 delete [] runtimeData[netId].
grp5HT;
3990 delete [] runtimeData[netId].
grpACh;
3991 delete [] runtimeData[netId].
grpNE;
3998 if (networkConfigs[netId].sim_with_nm) {
3999 delete[] runtimeData[netId].
nVBuffer;
4000 delete[] runtimeData[netId].
nUBuffer;
4001 delete[] runtimeData[netId].
nIBuffer;
4004 delete [] runtimeData[netId].
grpIds;
4006 delete [] runtimeData[netId].
Izh_a;
4007 delete [] runtimeData[netId].
Izh_b;
4008 delete [] runtimeData[netId].
Izh_c;
4009 delete [] runtimeData[netId].
Izh_d;
4010 delete [] runtimeData[netId].
Izh_C;
4011 delete [] runtimeData[netId].
Izh_k;
4012 delete [] runtimeData[netId].
Izh_vr;
4013 delete [] runtimeData[netId].
Izh_vt;
4019 delete [] runtimeData[netId].
lif_vTh;
4021 delete [] runtimeData[netId].
lif_gain;
4022 delete [] runtimeData[netId].
lif_bias;
4024 delete [] runtimeData[netId].
gAMPA;
4025 #ifdef LN_I_CALC_TYPES 4028 delete[] runtimeData[netId].
gNMDA_r;
4029 delete[] runtimeData[netId].
gNMDA_d;
4030 delete[] runtimeData[netId].
gNMDA;
4031 delete[] runtimeData[netId].
gGABAa;
4032 delete[] runtimeData[netId].
gGABAb_r;
4033 delete[] runtimeData[netId].
gGABAb_d;
4034 delete[] runtimeData[netId].
gGABAb;
4036 if (sim_with_NMDA_rise) {
4037 delete [] runtimeData[netId].
gNMDA_r;
4038 delete [] runtimeData[netId].
gNMDA_d;
4041 delete [] runtimeData[netId].
gNMDA;
4043 delete [] runtimeData[netId].
gGABAa;
4044 if (sim_with_GABAb_rise) {
4045 delete [] runtimeData[netId].
gGABAb_r;
4046 delete [] runtimeData[netId].
gGABAb_d;
4049 delete [] runtimeData[netId].
gGABAb;
4053 delete [] runtimeData[netId].
stpu;
4054 delete [] runtimeData[netId].
stpx;
4061 delete [] runtimeData[netId].
I_set;
4072 #ifdef LN_AXON_PLAST 4077 tempPtrs =
new int*[networkConfigs[netId].numGroups];
4080 memcpy(tempPtrs, runtimeData[netId].extFiringTableD2,
sizeof(
int*) * networkConfigs[netId].numGroups);
4081 for (
int i = 0; i < networkConfigs[netId].numGroups; i++)
4082 delete [] tempPtrs[i];
4086 memcpy(tempPtrs, runtimeData[netId].extFiringTableD1,
sizeof(
int*) * networkConfigs[netId].numGroups);
4087 for (
int i = 0; i < networkConfigs[netId].numGroups; i++)
4088 delete [] tempPtrs[i];
4096 if (runtimeData[netId].randNum != NULL)
delete [] runtimeData[netId].
randNum;
4097 runtimeData[netId].
randNum = NULL;
4099 #ifdef LN_I_CALC_TYPES 4106 #ifndef __NO_PTHREADS__ // POSIX 4108 void* SNN::helperDeleteRuntimeData_CPU(
void* arguments) {
float * maxSynWt
maximum synaptic weight for a connection
float * voltage
membrane potential for each regular neuron
Class for generating Poisson spike trains.
float * randNum
firing random number. max value is 10,000
int * synSpikeTime
stores the last spike time of a synapse
#define IS_REGULAR_NEURON(nid, numNReg, numNPois)
float decayNE
decay rate for Noradrenaline, published by GroupConfig
bool isSimulationWithGABAbRise()
float dvdtIzhikevich9(float volt, float recov, float invCapac, float izhK, float voltRest, float voltInst, float totalCurrent, float timeStep=1.0f)
4 NM weighted (and normalized,boosted,damped), Niedermeier (2021)
#define ALL
CARLsim common definitions.
float homeostasisScale
published by GroupConfig
unsigned int * timeTableD1
firing table, only used in CPU_MODE currently
int * nSpikeCnt
homeostatic plasticity variables
float * baseFiringInv
only used on GPU
serotonin-modulated STDP, nearest-neighbor
unsigned int spikeCountD1Sec
the total number of spikes with axonal delay == 1 in 1 second, used in CPU_MODE currently ...
unsigned int spikeCountD1
the total number of spikes with anxonal delay == 1 in a simulation, used in CPU_MODE currently ...
#define KERNEL_ERROR_UNKNOWN_INTEG
unsigned int spikeCountExtRxD2
the number of external spikes with axonal delay > 1 in a simulation, used in CPU_MODE currently ...
bool allocated
true if all data has been allocated
float decayDP
decay rate for Dopaamine, published by GroupConfig
NE alpha2 receptor (connection), Avery, Dutt, Krichmar (2013)
runtime data is allocated on CPU (main) memory
float * gAMPA
conductance of gAMPA
#define KERNEL_WARN(formatc,...)
#define MAX_NEURON_MON_GRP_SZIE
#define NEURON_MAX_FIRING_RATE
int * lif_tau_m
parameters for a LIF spiking group
unsigned int * cumulativePre
unsigned * firingTimesD2
stores the actual firing time
float * Npre_plasticInv
stores the 1/number of plastic input connections, only used on GPU
float dvdtIzhikevich4(float volt, float recov, float totalCurrent, float timeStep=1.0f)
float * getRatePtrCPU()
Returns pointer to CPU-allocated firing rate array (deprecated)
unsigned int spikeCountD2
the total number of spikes with anxonal delay > 1 in a simulation, used in CPU_MODE currently ...
void exitSimulation(int val=1)
deallocates all dynamical structures and exits
float wstptauu[NM_NE+3]
Array size = last index + 1 + additional elementsnorm + base.
#define KERNEL_ERROR(formatc,...)
int * extFiringTableEndIdxD1
acetylcholine-modulated STDP, nearest-neighbor
int numN
published by GroupConfig
float decay5HT
decay rate for Serotonin, published by GroupConfig
float * wt
stores the weight change of a synaptic connection
void printEntrails(char *buffer, unsigned length, int gGrpIdPre, int gGrpIdPost)
float baseNE
baseline concentration of Noradrenaline, published by GroupConfig
int numPreSynapses
the total number of pre-connections of a group, published by GroupConfigMD
int ** extFiringTableD1
external firing table, only used on GPU
standard STDP of Bi & Poo (2001), nearest-neighbor
float releaseDP
release per spike for Dopaamine
unsigned short * Npre_plastic
stores the number of plastic input connections to a neuron
int gGrpId
published by GroupConfigMD
int numPostSynapses
the total number of post-connections of a group, published by GroupConfigMD
unsigned int spikeCountLastSecLeftD2
the nubmer of spike left in the last second, used in CPU_MODE currently
#define KERNEL_INFO(formatc,...)
bool isSimulationWithCOBA()
float baseDP
baseline concentration of Dopamine, published by GroupConfig
float decayACh
decay rate for Acetylcholine, published by GroupConfig
int maxDelay
maximum axonal delay in the gloabl network
short int * connIdsPreIdx
connectId, per synapse, presynaptic cumulative indexing
unsigned int spikeCount
the total number of spikes in a simulation, used in CPU_MODE currently
protein kinase/phospholiphase controlled LTP/LPD adopted from Nadim & Bucher (2014) ...
int * I_set
an array of bits indicating which synapse got a spike
dopamine-modulated STDP, nearest-neighbor
bool isSimulationWithNMDARise()
unsigned short * Npost
stores the number of output connections from a neuron.
unsigned int spikeCountExtRxD1Sec
int getNumNeurons()
Returns the number of neurons for which to generate Poisson spike trains.
int * extFiringTableEndIdxD2
unsigned int Type
published by GroupConfig
unsigned int spikeCountExtRxD2Sec
float base5HT
baseline concentration of Serotonin, published by GroupConfig
DA D2 receptor (connection), Avery, Krichmar (2015)
int * lastSpikeTime
stores the last spike time of a neuron
float * gGABAa
conductance of gGABAa
unsigned int spikeCountD2Sec
the total number of spikes with axonal delay > 1 in 1 second, used in CPU_MODE currently ...
float releaseACh
release per spike for Acetylcholine
unsigned short * Npre
stores the number of input connections to a neuron
int Noffset
the offset of spike generator (poisson) neurons [0, numNPois), published by GroupConfigMD ...
#define GET_CONN_NEURON_ID(val)
unsigned int spikeCountExtRxD1
the number of external spikes with axonal delay == 1 in a simulation, used in CPU_MODE currently ...
int gsId
group id and synapse id
float dudtIzhikevich9(float volt, float recov, float voltRest, float izhA, float izhB, float timeStep=1.0f)
unsigned int * timeTableD2
firing table, only used in CPU_MODE currently
#define MAX_SIMULATION_TIME
#define GET_CONN_SYN_ID(val)
float avgTimeScale_decay
published by GroupConfig
int numN
number of neurons in the global network
int lEndN
published by GroupConfigMD
float * gGABAb
conductance of gGABAb
#define KERNEL_DEBUG(formatc,...)
#define SET_CONN_GRP_ID(val, grpId)
DelayInfo * postDelayInfo
delay information
float baseACh
baseline concentration of Acetylcholine, published by GroupConfig
int ** extFiringTableD2
external firing table, only used on GPU
NE alpha1 receptor with DA antagonist, Avery, Dutt, Krichmar (2013)
int LtoGOffset
published by GroupConfigMD
#define GET_CONN_GRP_ID(val)
float STP_A
published by GroupConfig
Contains all of CARLsim's core functionality.
float STP_U
published by GroupConfig
float STP_tau_x_inv
published by GroupConfig
float dudtIzhikevich4(float volt, float recov, float izhA, float izhB, float timeStep=1.0f)
SynInfo * postSynapticIds
10 bit syn id, 22 bit neuron id, ordered based on delay
int lStartN
published by GroupConfigMD
unsigned int * cumulativePost
norepinephrine-modulated STDP, nearest-neighbor
unsigned int * spikeGenBits
CPU multithreading subroutine (that takes single argument) struct argument.
standard exponential curve
float avgTimeScale
< homeostatic plasticity variables
unsigned int spikeCountSec
the total number of spikes in 1 second, used in CPU_MODE currently
DA D1 receptor (connection), Avery, Dutt, Krichmar (2013)
float dvdtLIF(float volt, float lif_vReset, float lif_gain, float lif_bias, int lif_tau_m, float totalCurrent, float timeStep=1.0f)
float * gNMDA
conductance of gNMDA
bool isOnGPU()
Checks whether the firing rates are allocated on CPU or GPU.
float STP_tau_u_inv
published by GroupConfig
unsigned int nPoissonSpikes
the total number of spikes of poisson neurons, used in CPU_MODE currently
#define STP_BUF_POS(nid, t, maxDelay)
float * wtChange
stores the weight change of a synaptic connection
float releaseNE
release per spike for Noradrenaline
float release5HT
release per spike for Serotonin
float * nextVoltage
membrane potential buffer (next/future time step) for each regular neuron
float wstpu[NM_NE+3]
Array size = last index + 1 + additional elementsnorm + base.