CARLsim  6.1.0
CARLsim: a GPU-accelerated SNN simulator
snn_cpu_module.cpp
Go to the documentation of this file.
1 /* * Copyright (c) 2016 Regents of the University of California. All rights reserved.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions
5 * are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 *
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * 3. The names of its contributors may not be used to endorse or promote
15 * products derived from this software without specific prior written
16 * permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
22 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * *********************************************************************************************** *
31 * CARLsim
32 * created by: (MDR) Micah Richert, (JN) Jayram M. Nageswaran
33 * maintained by:
34 * (MA) Mike Avery <averym@uci.edu>
35 * (MB) Michael Beyeler <mbeyeler@uci.edu>,
36 * (KDC) Kristofor Carlson <kdcarlso@uci.edu>
37 * (TSC) Ting-Shuo Chou <tingshuc@uci.edu>
38 * (HK) Hirak J Kashyap <kashyaph@uci.edu>
39 *
40 * CARLsim v1.0: JM, MDR
41 * CARLsim v2.0/v2.1/v2.2: JM, MDR, MA, MB, KDC
42 * CARLsim3: MB, KDC, TSC
43 * CARLsim4: TSC, HK
44 * CARLsim5: HK, JX, KC
45 * CARLsim6: LN, JX, KC, KW
46 *
47 * CARLsim available from http://socsci.uci.edu/~jkrichma/CARLsim/
48 * Ver 12/31/2016
49 */
50 
51 #include <snn.h>
52 #include <error_code.h>
53 
54 #include <spike_buffer.h>
55 
56 // spikeGeneratorUpdate_CPU on CPUs
57 #ifdef __NO_PTHREADS__
58  void SNN::spikeGeneratorUpdate_CPU(int netId) {
59 #else // POSIX
60  void* SNN::spikeGeneratorUpdate_CPU(int netId) {
61 #endif
62  assert(runtimeData[netId].allocated);
63  assert(runtimeData[netId].memType == CPU_MEM);
64 
65  // FIXME: skip this step if all spike gen neuron are possion neuron (generated by rate)
66  // update the random number for poisson spike generator (spikes generated by rate)
67  for (int poisN = 0; poisN < networkConfigs[netId].numNPois; poisN++) {
68  // set CPU_MODE Random Gen, store random number to g(c)puRandNums
69  runtimeData[netId].randNum[poisN] = drand48();
70  }
71 
72  // Use spike generators (user-defined callback function)
73  if (networkConfigs[netId].numNSpikeGen > 0) {
74  assert(managerRuntimeData.spikeGenBits != NULL);
75 
76  // reset the bit status of the spikeGenBits...
77  memset(managerRuntimeData.spikeGenBits, 0, sizeof(int) * (networkConfigs[netId].numNSpikeGen / 32 + 1));
78 
79  // fill spikeGenBits from SpikeBuffer
80  fillSpikeGenBits(netId);
81 
82  // copy the spikeGenBits from the manager to the CPU runtime
83  memcpy(runtimeData[netId].spikeGenBits, managerRuntimeData.spikeGenBits, sizeof(int) * (networkConfigs[netId].numNSpikeGen / 32 + 1));
84  }
85 }
86 
87 #ifndef __NO_PTHREADS__ // POSIX
88  // Static multithreading subroutine method - helper for the above method
89  void* SNN::helperSpikeGeneratorUpdate_CPU(void* arguments) {
90  ThreadStruct* args = (ThreadStruct*) arguments;
91  //printf("\nThread ID: %lu and CPU: %d\n",pthread_self(), sched_getcpu());
92  ((SNN *)args->snn_pointer) -> spikeGeneratorUpdate_CPU(args->netId);
93  pthread_exit(0);
94  }
95 #endif
96 
97 #ifdef __NO_PTHREADS__
98  void SNN::updateTimingTable_CPU(int netId) {
99 #else // POSIX
100  void* SNN::updateTimingTable_CPU(int netId) {
101 #endif
102  assert(runtimeData[netId].memType == CPU_MEM);
103 
104  runtimeData[netId].timeTableD2[simTimeMs + networkConfigs[netId].maxDelay + 1] = runtimeData[netId].spikeCountD2Sec + runtimeData[netId].spikeCountLastSecLeftD2;
105  runtimeData[netId].timeTableD1[simTimeMs + networkConfigs[netId].maxDelay + 1] = runtimeData[netId].spikeCountD1Sec;
106 }
107 
108 #ifndef __NO_PTHREADS__ // POSIX
109  // Static multithreading subroutine method - helper for the above method
110  void* SNN::helperUpdateTimingTable_CPU(void* arguments) {
111  ThreadStruct* args = (ThreadStruct*) arguments;
112  //printf("\nThread ID: %lu and CPU: %d\n",pthread_self(), sched_getcpu());
113  ((SNN *)args->snn_pointer) -> updateTimingTable_CPU(args->netId);
114  pthread_exit(0);
115  }
116 #endif
117 
118 //void SNN::routeSpikes_CPU() {
119 // int firingTableIdxD2, firingTableIdxD1;
120 // int GtoLOffset;
121 // // ToDo: route spikes using routing table. currently only exchange spikes between GPU0 and GPU1
122 // // GPU0 -> GPU1
123 // if (!groupPartitionLists[0].empty() && !groupPartitionLists[1].empty()) {
124 // memcpy(managerRuntimeData.extFiringTableEndIdxD2, runtimeData[0].extFiringTableEndIdxD2, sizeof(int) * networkConfigs[0].numGroups);
125 // memcpy(managerRuntimeData.extFiringTableEndIdxD1, runtimeData[0].extFiringTableEndIdxD1, sizeof(int) * networkConfigs[0].numGroups);
126 // memcpy(managerRuntimeData.extFiringTableD2, runtimeData[0].extFiringTableD2, sizeof(int*) * networkConfigs[0].numGroups);
127 // memcpy(managerRuntimeData.extFiringTableD1, runtimeData[0].extFiringTableD1, sizeof(int*) * networkConfigs[0].numGroups);
128 // //KERNEL_DEBUG("GPU0 D1ex:%d/D2ex:%d", managerRuntimeData.extFiringTableEndIdxD1[0], managerRuntimeData.extFiringTableEndIdxD2[0]);
129 //
130 // memcpy(managerRuntimeData.timeTableD2, runtimeData[1].timeTableD2, sizeof(int) * (1000 + glbNetworkConfig.maxDelay + 1));
131 // memcpy(managerRuntimeData.timeTableD1, runtimeData[1].timeTableD1, sizeof(int) * (1000 + glbNetworkConfig.maxDelay + 1));
132 // firingTableIdxD2 = managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1];
133 // firingTableIdxD1 = managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1];
134 // //KERNEL_DEBUG("GPU1 D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2);
135 //
136 // for (int lGrpId = 0; lGrpId < networkConfigs[0].numGroups; lGrpId++) {
137 // if (groupConfigs[0][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD2[lGrpId] > 0) {
138 // memcpy(runtimeData[1].firingTableD2 + firingTableIdxD2,
139 // managerRuntimeData.extFiringTableD2[lGrpId],
140 // sizeof(int) * managerRuntimeData.extFiringTableEndIdxD2[lGrpId]);
141 //
142 // for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[1].begin(); grpIt != groupPartitionLists[1].end(); grpIt++) {
143 // if (grpIt->gGrpId == groupConfigs[0][lGrpId].gGrpId)
144 // GtoLOffset = grpIt->GtoLOffset;
145 // }
146 //
147 // convertExtSpikesD2_CPU(1, firingTableIdxD2,
148 // firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId],
149 // GtoLOffset); // [StartIdx, EndIdx)
150 // firingTableIdxD2 += managerRuntimeData.extFiringTableEndIdxD2[lGrpId];
151 // }
152 //
153 // if (groupConfigs[0][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD1[lGrpId] > 0) {
154 // memcpy(runtimeData[1].firingTableD1 + firingTableIdxD1,
155 // managerRuntimeData.extFiringTableD1[lGrpId],
156 // sizeof(int) * managerRuntimeData.extFiringTableEndIdxD1[lGrpId]);
157 //
158 // for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[1].begin(); grpIt != groupPartitionLists[1].end(); grpIt++) {
159 // if (grpIt->gGrpId == groupConfigs[0][lGrpId].gGrpId)
160 // GtoLOffset = grpIt->GtoLOffset;
161 // }
162 //
163 // convertExtSpikesD1_CPU(1, firingTableIdxD1,
164 // firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId],
165 // GtoLOffset); // [StartIdx, EndIdx)
166 // firingTableIdxD1 += managerRuntimeData.extFiringTableEndIdxD1[lGrpId];
167 //
168 // }
169 // //KERNEL_DEBUG("GPU1 New D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2);
170 // }
171 // managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD2;
172 // managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD1;
173 // memcpy(runtimeData[1].timeTableD2, managerRuntimeData.timeTableD2, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1));
174 // memcpy(runtimeData[1].timeTableD1, managerRuntimeData.timeTableD1, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1));
175 // }
176 //
177 // // GPU1 -> GPU0
178 // if (!groupPartitionLists[1].empty() && !groupPartitionLists[0].empty()) {
179 // memcpy(managerRuntimeData.extFiringTableEndIdxD2, runtimeData[1].extFiringTableEndIdxD2, sizeof(int) * networkConfigs[1].numGroups);
180 // memcpy(managerRuntimeData.extFiringTableEndIdxD1, runtimeData[1].extFiringTableEndIdxD1, sizeof(int) * networkConfigs[1].numGroups);
181 // memcpy(managerRuntimeData.extFiringTableD2, runtimeData[1].extFiringTableD2, sizeof(int*) * networkConfigs[1].numGroups);
182 // memcpy(managerRuntimeData.extFiringTableD1, runtimeData[1].extFiringTableD1, sizeof(int*) * networkConfigs[1].numGroups);
183 // //KERNEL_DEBUG("GPU1 D1ex:%d/D2ex:%d", managerRuntimeData.extFiringTableEndIdxD1[0], managerRuntimeData.extFiringTableEndIdxD2[0]);
184 //
185 // memcpy(managerRuntimeData.timeTableD2, runtimeData[0].timeTableD2, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1));
186 // memcpy(managerRuntimeData.timeTableD1, runtimeData[0].timeTableD1, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1));
187 // firingTableIdxD2 = managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1];
188 // firingTableIdxD1 = managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1];
189 // //KERNEL_DEBUG("GPU0 D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2);
190 //
191 // for (int lGrpId = 0; lGrpId < networkConfigs[1].numGroups; lGrpId++) {
192 // if (groupConfigs[1][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD2[lGrpId] > 0) {
193 // memcpy(runtimeData[0].firingTableD2 + firingTableIdxD2,
194 // managerRuntimeData.extFiringTableD2[lGrpId],
195 // sizeof(int) * managerRuntimeData.extFiringTableEndIdxD2[lGrpId]);
196 //
197 // for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[0].begin(); grpIt != groupPartitionLists[0].end(); grpIt++) {
198 // if (grpIt->gGrpId == groupConfigs[1][lGrpId].gGrpId)
199 // GtoLOffset = grpIt->GtoLOffset;
200 // }
201 //
202 // convertExtSpikesD2_CPU(0, firingTableIdxD2,
203 // firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId],
204 // GtoLOffset); // [StartIdx, EndIdx)
205 // firingTableIdxD2 += managerRuntimeData.extFiringTableEndIdxD2[lGrpId];
206 // }
207 //
208 // if (groupConfigs[1][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD1[lGrpId] > 0) {
209 // memcpy(runtimeData[0].firingTableD1 + firingTableIdxD1,
210 // managerRuntimeData.extFiringTableD1[lGrpId],
211 // sizeof(int) * managerRuntimeData.extFiringTableEndIdxD1[lGrpId]);
212 //
213 // for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[0].begin(); grpIt != groupPartitionLists[0].end(); grpIt++) {
214 // if (grpIt->gGrpId == groupConfigs[1][lGrpId].gGrpId)
215 // GtoLOffset = grpIt->GtoLOffset;
216 // }
217 //
218 // convertExtSpikesD1_CPU(0, firingTableIdxD1,
219 // firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId],
220 // GtoLOffset); // [StartIdx, EndIdx)
221 // firingTableIdxD1 += managerRuntimeData.extFiringTableEndIdxD1[lGrpId];
222 // }
223 // //KERNEL_DEBUG("GPU0 New D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2);
224 // }
225 // managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD2;
226 // managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD1;
227 // memcpy(runtimeData[0].timeTableD2, managerRuntimeData.timeTableD2, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1));
228 // memcpy(runtimeData[0].timeTableD1, managerRuntimeData.timeTableD1, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1));
229 // }
230 //
231 //}
232 
233 #ifdef __NO_PTHREADS__
234  void SNN::convertExtSpikesD2_CPU(int netId, int startIdx, int endIdx, int GtoLOffset) {
235 #else // POSIX
236  void* SNN::convertExtSpikesD2_CPU(int netId, int startIdx, int endIdx, int GtoLOffset) {
237 #endif
238  int spikeCountExtRx = endIdx - startIdx; // received external spike count
239 
240  runtimeData[netId].spikeCountD2Sec += spikeCountExtRx;
241  runtimeData[netId].spikeCountExtRxD2 += spikeCountExtRx;
242  runtimeData[netId].spikeCountExtRxD2Sec += spikeCountExtRx;
243 
244  // FIXME: if endIdx - startIdx > 64 * 128
245  //if (firingTableIdx < endIdx)
246  for (int extIdx = startIdx; extIdx < endIdx; extIdx++)
247  runtimeData[netId].firingTableD2[extIdx] += GtoLOffset;
248 }
249 
250 #ifndef __NO_PTHREADS__ // POSIX
251  // Static multithreading subroutine method - helper for the above method
252  void* SNN::helperConvertExtSpikesD2_CPU(void* arguments) {
253  ThreadStruct* args = (ThreadStruct*) arguments;
254  //printf("\nThread ID: %lu and CPU: %d\n",pthread_self(), sched_getcpu());
255  ((SNN *)args->snn_pointer) -> convertExtSpikesD2_CPU(args->netId, args->startIdx, args->endIdx, args->GtoLOffset);
256  pthread_exit(0);
257  }
258 #endif
259 
260 #ifdef __NO_PTHREADS__
261  void SNN::convertExtSpikesD1_CPU(int netId, int startIdx, int endIdx, int GtoLOffset) {
262 #else // POSIX
263  void* SNN::convertExtSpikesD1_CPU(int netId, int startIdx, int endIdx, int GtoLOffset) {
264 #endif
265  int spikeCountExtRx = endIdx - startIdx; // received external spike count
266 
267  runtimeData[netId].spikeCountD1Sec += spikeCountExtRx;
268  runtimeData[netId].spikeCountExtRxD1 += spikeCountExtRx;
269  runtimeData[netId].spikeCountExtRxD1Sec += spikeCountExtRx;
270 
271  // FIXME: if endIdx - startIdx > 64 * 128
272  for (int extIdx = startIdx; extIdx < endIdx; extIdx++)
273  runtimeData[netId].firingTableD1[extIdx] += GtoLOffset;
274 }
275 
276 #ifndef __NO_PTHREADS__ // POSIX
277  // Static multithreading subroutine method - helper for the above method
278  void* SNN::helperConvertExtSpikesD1_CPU(void* arguments) {
279  ThreadStruct* args = (ThreadStruct*) arguments;
280  //printf("\nThread ID: %lu and CPU: %d\n",pthread_self(), sched_getcpu());
281  ((SNN *)args->snn_pointer) -> convertExtSpikesD1_CPU(args->netId, args->startIdx, args->endIdx, args->GtoLOffset);
282  pthread_exit(0);
283  }
284 #endif
285 
286 #ifdef __NO_PTHREADS__
287  void SNN::clearExtFiringTable_CPU(int netId) {
288 #else // POSIX
289  void* SNN::clearExtFiringTable_CPU(int netId) {
290 #endif
291  assert(runtimeData[netId].memType == CPU_MEM);
292 
293  memset(runtimeData[netId].extFiringTableEndIdxD1, 0, sizeof(int) * networkConfigs[netId].numGroups);
294  memset(runtimeData[netId].extFiringTableEndIdxD2, 0, sizeof(int) * networkConfigs[netId].numGroups);
295 }
296 
297 #ifndef __NO_PTHREADS__ // POSIX
298  // Static multithreading subroutine method - helper for the above method
299  void* SNN::helperClearExtFiringTable_CPU(void* arguments) {
300  ThreadStruct* args = (ThreadStruct*) arguments;
301  //printf("\nThread ID: %lu and CPU: %d\n",pthread_self(), sched_getcpu());
302  ((SNN *)args->snn_pointer) -> clearExtFiringTable_CPU(args->netId);
303  pthread_exit(0);
304  }
305 #endif
306 
307 void SNN::copyTimeTable(int netId, bool toManager) {
308  assert(netId >= CPU_RUNTIME_BASE);
309 
310  if (toManager) {
311  memcpy(managerRuntimeData.timeTableD2, runtimeData[netId].timeTableD2, sizeof(int) * (1000 + glbNetworkConfig.maxDelay + 1));
312  memcpy(managerRuntimeData.timeTableD1, runtimeData[netId].timeTableD1, sizeof(int) * (1000 + glbNetworkConfig.maxDelay + 1));
313  } else {
314  memcpy(runtimeData[netId].timeTableD2, managerRuntimeData.timeTableD2, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1));
315  memcpy(runtimeData[netId].timeTableD1, managerRuntimeData.timeTableD1, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1));
316  }
317 }
318 
319 void SNN::copyExtFiringTable(int netId) {
320  assert(netId >= CPU_RUNTIME_BASE);
321 
322  memcpy(managerRuntimeData.extFiringTableEndIdxD2, runtimeData[netId].extFiringTableEndIdxD2, sizeof(int) * networkConfigs[netId].numGroups);
323  memcpy(managerRuntimeData.extFiringTableEndIdxD1, runtimeData[netId].extFiringTableEndIdxD1, sizeof(int) * networkConfigs[netId].numGroups);
324  memcpy(managerRuntimeData.extFiringTableD2, runtimeData[netId].extFiringTableD2, sizeof(int*) * networkConfigs[netId].numGroups);
325  memcpy(managerRuntimeData.extFiringTableD1, runtimeData[netId].extFiringTableD1, sizeof(int*) * networkConfigs[netId].numGroups);
326  //KERNEL_DEBUG("GPU0 D1ex:%d/D2ex:%d", managerRuntimeData.extFiringTableEndIdxD1[0], managerRuntimeData.extFiringTableEndIdxD2[0]);
327 }
328 
329 // resets nSpikeCnt[]
330 // used for management of manager runtime data
331 // FIXME: make sure this is right when separating cpu_module to a standalone class
332 // FIXME: currently this function clear nSpikeCnt of manager runtime data
333 #ifdef __NO_PTHREADS__
334  void SNN::resetSpikeCnt_CPU(int netId, int lGrpId) {
335 #else // POSIX
336  void* SNN::resetSpikeCnt_CPU(int netId, int lGrpId) {
337 #endif
338  assert(runtimeData[netId].memType == CPU_MEM);
339 
340  if (lGrpId == ALL) {
341  memset(runtimeData[netId].nSpikeCnt, 0, sizeof(int) * networkConfigs[netId].numN);
342  } else {
343  int lStartN = groupConfigs[netId][lGrpId].lStartN;
344  int numN = groupConfigs[netId][lGrpId].numN;
345  memset(runtimeData[netId].nSpikeCnt + lStartN, 0, sizeof(int) * numN);
346  }
347 }
348 
349 #ifndef __NO_PTHREADS__ // POSIX
350  // Static multithreading subroutine method - helper for the above method
351  void* SNN::helperResetSpikeCnt_CPU(void* arguments) {
352  ThreadStruct* args = (ThreadStruct*) arguments;
353  //printf("\nThread ID: %lu and CPU: %d\n",pthread_self(), sched_getcpu());
354  ((SNN *)args->snn_pointer) -> resetSpikeCnt_CPU(args->netId, args->lGrpId);
355  pthread_exit(0);
356  }
357 #endif
358 
359 // This method loops through all spikes that are generated by neurons with a delay of 1ms
360 // and delivers the spikes to the appropriate post-synaptic neuron
361 #ifdef __NO_PTHREADS__
362  void SNN::doCurrentUpdateD1_CPU(int netId) {
363 #else // POSIX
364  void* SNN::doCurrentUpdateD1_CPU(int netId) {
365 #endif
366  assert(runtimeData[netId].memType == CPU_MEM);
367 
368  int k = runtimeData[netId].timeTableD1[simTimeMs + networkConfigs[netId].maxDelay + 1] - 1;
369  int k_end = runtimeData[netId].timeTableD1[simTimeMs + networkConfigs[netId].maxDelay];
370 
371  while((k >= k_end) && (k >= 0)) {
372  int lNId = runtimeData[netId].firingTableD1[k];
373  //assert(lNId < networkConfigs[netId].numN);
374 
375  DelayInfo dPar = runtimeData[netId].postDelayInfo[lNId * (networkConfigs[netId].maxDelay + 1)];
376 
377  unsigned int offset = runtimeData[netId].cumulativePost[lNId];
378 
379  for(int idx_d = dPar.delay_index_start; idx_d < (dPar.delay_index_start + dPar.delay_length); idx_d = idx_d + 1) {
380  // get synaptic info...
381  SynInfo postInfo = runtimeData[netId].postSynapticIds[offset + idx_d];
382 
383  int postNId = GET_CONN_NEURON_ID(postInfo);
384  assert(postNId < networkConfigs[netId].numNAssigned);
385 
386  int synId = GET_CONN_SYN_ID(postInfo);
387  assert(synId < (runtimeData[netId].Npre[postNId]));
388 
389  if (postNId < networkConfigs[netId].numN) // test if post-neuron is a local neuron
390  generatePostSynapticSpike(lNId /* preNId */, postNId, synId, 0, netId);
391  }
392 
393  k = k - 1;
394  }
395 }
396 
397 #ifndef __NO_PTHREADS__ // POSIX
398  // Static multithreading subroutine method - helper for the above method
399  void* SNN::helperDoCurrentUpdateD1_CPU(void* arguments) {
400  ThreadStruct* args = (ThreadStruct*) arguments;
401  //printf("\nThread ID: %lu and CPU: %d\n",pthread_self(), sched_getcpu());
402  ((SNN *)args->snn_pointer) -> doCurrentUpdateD1_CPU(args->netId);
403  pthread_exit(0);
404  }
405 #endif
406 
407 // This method loops through all spikes that are generated by neurons with a delay of 2+ms
408 // and delivers the spikes to the appropriate post-synaptic neuron
409 #ifdef __NO_PTHREADS__
410  void SNN::doCurrentUpdateD2_CPU(int netId) {
411 #else // POSIX
412  void* SNN::doCurrentUpdateD2_CPU(int netId) {
413 #endif
414  assert(runtimeData[netId].memType == CPU_MEM);
415 
416  if (networkConfigs[netId].maxDelay > 1) {
417  int k = runtimeData[netId].timeTableD2[simTimeMs + 1 + networkConfigs[netId].maxDelay] - 1;
418  int k_end = runtimeData[netId].timeTableD2[simTimeMs + 1];
419  int t_pos = simTimeMs;
420 
421  while ((k >= k_end) && (k >= 0)) {
422  // get the neuron id from the index k
423  int lNId = runtimeData[netId].firingTableD2[k];
424 
425  // find the time of firing from the timeTable using index k
426  while (!((k >= runtimeData[netId].timeTableD2[t_pos + networkConfigs[netId].maxDelay]) && (k < runtimeData[netId].timeTableD2[t_pos + networkConfigs[netId].maxDelay + 1]))) {
427  t_pos = t_pos - 1;
428  assert((t_pos + networkConfigs[netId].maxDelay - 1) >= 0);
429  }
430 
431  // \TODO: Instead of using the complex timeTable, can neuronFiringTime value...???
432  // Calculate the time difference between time of firing of neuron and the current time...
433  int tD = simTimeMs - t_pos;
434 
435  assert((tD < networkConfigs[netId].maxDelay) && (tD >= 0));
436  //assert(lNId < networkConfigs[netId].numN);
437 
438  DelayInfo dPar = runtimeData[netId].postDelayInfo[lNId * (networkConfigs[netId].maxDelay + 1) + tD];
439 
440  unsigned int offset = runtimeData[netId].cumulativePost[lNId];
441 
442  // for each delay variables
443  for (int idx_d = dPar.delay_index_start; idx_d < (dPar.delay_index_start + dPar.delay_length); idx_d = idx_d + 1) {
444  // get synaptic info...
445  SynInfo postInfo = runtimeData[netId].postSynapticIds[offset + idx_d];
446 
447  int postNId = GET_CONN_NEURON_ID(postInfo);
448  assert(postNId < networkConfigs[netId].numNAssigned);
449 
450  int synId = GET_CONN_SYN_ID(postInfo);
451  assert(synId < (runtimeData[netId].Npre[postNId]));
452 
453  if (postNId < networkConfigs[netId].numN) // test if post-neuron is a local neuron
454  generatePostSynapticSpike(lNId /* preNId */, postNId, synId, tD, netId);
455  }
456 
457  k = k - 1;
458  }
459  }
460 }
461 
462 #ifndef __NO_PTHREADS__ // POSIX
463  // Static multithreading subroutine method - helper for the above method
464  void* SNN::helperDoCurrentUpdateD2_CPU(void* arguments) {
465  ThreadStruct* args = (ThreadStruct*) arguments;
466  //printf("\nThread ID: %lu and CPU: %d\n",pthread_self(), sched_getcpu());
467  ((SNN *)args->snn_pointer) -> doCurrentUpdateD2_CPU(args->netId);
468  pthread_exit(0);
469  }
470 #endif
471 
472 #ifdef __NO_PTHREADS__
473  void SNN::doSTPUpdateAndDecayCond_CPU(int netId) {
474 #else // POSIX
475  void* SNN::doSTPUpdateAndDecayCond_CPU(int netId) {
476 #endif
477  assert(runtimeData[netId].memType == CPU_MEM);
478  // ToDo: This can be further optimized using multiple threads allocated on mulitple CPU cores
479  //decay the STP variables before adding new spikes.
480  for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
481  for(int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++) {
482  if (groupConfigs[netId][lGrpId].WithSTP) {
483  int ind_plus = STP_BUF_POS(lNId, simTime, glbNetworkConfig.maxDelay);
484  int ind_minus = STP_BUF_POS(lNId, (simTime - 1), glbNetworkConfig.maxDelay);
485 #ifdef LN_I_CALC_TYPES
486  float tau_u_inv = groupConfigs[netId][lGrpId].STP_tau_u_inv;
487  float tau_x_inv = groupConfigs[netId][lGrpId].STP_tau_x_inv;
488  if (groupConfigs[netId][lGrpId].WithNM4STP) {
489  std::vector<float> nm;
490  nm.push_back(groupConfigs[netId][lGrpId].activeDP ? runtimeData[netId].grpDA[lGrpId] : 0.f); // baseDP is 0 at t=0 ???
491  nm.push_back(groupConfigs[netId][lGrpId].active5HT ? runtimeData[netId].grp5HT[lGrpId] : 0.f);
492  nm.push_back(groupConfigs[netId][lGrpId].activeACh ? runtimeData[netId].grpACh[lGrpId] : 0.f);
493  nm.push_back(groupConfigs[netId][lGrpId].activeNE ? runtimeData[netId].grpNE[lGrpId] : 0.f);
494  auto& config = groupConfigs[netId][lGrpId];
495  float tau_u = 1.0f / tau_u_inv;
496  float tau_x = 1.0f / tau_x_inv;
497  float w_tau_u = 0.0f;
498  float w_tau_x = 0.0f;
499  for (int i = 0; i < NM_NE + 1; i++) {
500  w_tau_u += nm[i] * config.wstptauu[i];
501  w_tau_x += nm[i] * config.wstptaux[i];
502  }
503  w_tau_u *= config.wstptauu[NM_NE + 1];
504  w_tau_x *= config.wstptaux[NM_NE + 1];
505  tau_u *= w_tau_u + config.wstptauu[NM_NE + 2];
506  tau_x *= w_tau_x + config.wstptaux[NM_NE + 2];
507  tau_u_inv = 1.0f / tau_u;
508  tau_x_inv = 1.0f / tau_x;
509  }
510  //KERNEL_DEBUG("grp[%d] tau_u = %f tau_x = %f\n", lGrpId, 1.0f/tau_u_inv, 1.0f/tau_x_inv);
511  runtimeData[netId].stpu[ind_plus] = runtimeData[netId].stpu[ind_minus] * (1.0f - tau_u_inv);
512  runtimeData[netId].stpx[ind_plus] = runtimeData[netId].stpx[ind_minus] + (1.0f - runtimeData[netId].stpx[ind_minus]) * tau_x_inv;
513 #else
514  runtimeData[netId].stpu[ind_plus] = runtimeData[netId].stpu[ind_minus] * (1.0f - groupConfigs[netId][lGrpId].STP_tau_u_inv);
515  runtimeData[netId].stpx[ind_plus] = runtimeData[netId].stpx[ind_minus] + (1.0f - runtimeData[netId].stpx[ind_minus]) * groupConfigs[netId][lGrpId].STP_tau_x_inv;
516 #endif
517  }
518 
519  // decay conductances
520 #ifdef LN_I_CALC_TYPES
521  auto& groupConfig = groupConfigs[netId][lGrpId];
522  switch (groupConfig.icalcType) {
523  case COBA:
524  case alpha1_ADK13:
525  if (IS_REGULAR_NEURON(lNId, networkConfigs[netId].numNReg, networkConfigs[netId].numNPois)) {
526  runtimeData[netId].gAMPA[lNId] *= groupConfig.dAMPA;
527  if (groupConfig.with_NMDA_rise) {
528  runtimeData[netId].gNMDA_r[lNId] *= groupConfig.rNMDA; // rise
529  runtimeData[netId].gNMDA_d[lNId] *= groupConfig.dNMDA; // decay
530  }
531  else {
532  runtimeData[netId].gNMDA[lNId] *= groupConfig.dNMDA; // instantaneous rise
533  }
534 
535  runtimeData[netId].gGABAa[lNId] *= groupConfig.dGABAa;
536  if (groupConfig.with_GABAb_rise) {
537  runtimeData[netId].gGABAb_r[lNId] *= groupConfig.rGABAb; // rise
538  runtimeData[netId].gGABAb_d[lNId] *= groupConfig.dGABAb; // decay
539  }
540  else {
541  runtimeData[netId].gGABAb[lNId] *= groupConfig.dGABAb; // instantaneous rise
542  }
543  }
544  break;
545  }
546 #else
547  if (networkConfigs[netId].sim_with_conductances && IS_REGULAR_NEURON(lNId, networkConfigs[netId].numNReg, networkConfigs[netId].numNPois)) {
548  runtimeData[netId].gAMPA[lNId] *= dAMPA;
549  if (sim_with_NMDA_rise) {
550  runtimeData[netId].gNMDA_r[lNId] *= rNMDA; // rise
551  runtimeData[netId].gNMDA_d[lNId] *= dNMDA; // decay
552  }
553  else {
554  runtimeData[netId].gNMDA[lNId] *= dNMDA; // instantaneous rise
555  }
556 
557  runtimeData[netId].gGABAa[lNId] *= dGABAa;
558  if (sim_with_GABAb_rise) {
559  runtimeData[netId].gGABAb_r[lNId] *= rGABAb; // rise
560  runtimeData[netId].gGABAb_d[lNId] *= dGABAb; // decay
561  }
562  else {
563  runtimeData[netId].gGABAb[lNId] *= dGABAb; // instantaneous rise
564  }
565  }
566 #endif
567  }
568  }
569 
570 #ifdef LN_I_CALC_TYPES
571  //for (int lConnId = 0; lConnId < networkConfigs[netId].numConnections; lConnId++) {
572  //
573  // auto config = connectConfigs[netId][lConnId];
574 
575  // see defintion in void SNN::generateConnectionRuntime(int netId) {
576  // see usage in void SNN::generatePostSynapticSpike(int preNId, int postNId, int synId, int tD, int netId) {
577  //
578  for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
579  // store scaling factors for synaptic currents in connection-centric array
580 
581  int lConnId = connIt->second.connId; // global ???
582  //printf("lConnId=%d\n", lConnId);
583 
584  auto &config = connIt->second;
585  switch (config.icalcType) {
586  case alpha2A_ADK13:
587  if (groupConfigs[netId][config.grpDest].activeNE) {
588  float ne = runtimeData[netId].grpNE[config.grpDest]; // target group normalized
589  mulSynFast[lConnId] = 0.1f; // AMPA
590  mulSynSlow[lConnId] = 15.0f - 10.0f * exp(-ne * 5.0f); // NMDA
591  //printf("ne[%d]=%f lConnId=%d nmda=%f\n", config.grpDest, ne, lConnId, mulSynSlow[lConnId]);
592  }
593  break;
594  case D1_ADK13:
595  if (groupConfigs[netId][config.grpDest].activeDP) {
596  float da = runtimeData[netId].grpDA[config.grpDest]; // target group normalized
597  mulSynFast[lConnId] = 1.0f + exp(-da * 5.0f); // AMPA
598  mulSynSlow[lConnId] = mulSynFast[lConnId]; // NMDA
599  //printf("da[%d]=%f nmda=%f\n", config.grpDest, da, mulSynSlow[lConnId]);
600  }
601  break;
602  case D2_AK15:
603  if (groupConfigs[netId][config.grpDest].activeDP) {
604  float da = runtimeData[netId].grpDA[config.grpDest]; // target group normalized
605  float mu = 0.0f; // da < 0
606  float a = 18.f;
607  float y1, y2;
608  if (da >= 0 && da < 1.f / 6.f) {
609  y1 = 1.2f * 0.5f * tanh((0.f - 0.f / 3.f) * a);
610  y2 = 1.2f * 0.5f * tanh((1.f / 6.f - 0 / 3.f) * a);
611  mu = (0.6f - y2) + 1.2f * 0.5f * (tanh((da - 0.f / 3.f) * a));
612  } else
613  if (da >= 1.f / 6.f & da < 1.f / 2.f) {
614  y1 = 0.6f + 0.4f * 0.5f * (1.f + tanh((1.f / 6.f - 1.f / 3.f) * a));
615  y2 = 0.6f + 0.4f * 0.5f * (1.f + tanh((1.f / 2.f - 1.f / 3.f) * a));
616  mu = 0.6f + 0.4f * 0.5f * (1.f + 0.4f / (y2 - y1) * tanh((da - 1.f / 3.f) * a));
617  } else
618  if (da >= 1.f / 2.f & da < 5.f / 6.f) {
619  y1 = 1.0f + 0.8f * 0.5f * (1.f + tanh((1.f / 2.f - 2.f / 3.f) * a));
620  y2 = 1.0f + 0.8f * 0.5f * (1.f + tanh((5.f / 6.f - 2.f / 3.f) * a));
621  mu = 1.0f + 0.8f * 0.5f * (1.f + 0.8f / (y2 - y1) * tanh((da - 2.f / 3.f) * a));
622  }
623  else { // da >= 5/6
624  y1 = tanh((5.f / 6.f - 3.f / 3.f) * a);
625  y2 = tanh((1.f - 3.f / 3.f) * a);
626  mu = 1.8f + 1.6f * 0.5f * (1.f + 1.f / (y2 - y1) * tanh((da - 3.f / 3.f) * a));
627  }
628  mulSynFast[lConnId] = mu; // AMPA
629  mulSynSlow[lConnId] = mu; // NMDA
630  //printf("da[%d]=%f nmda=%f\n", config.grpDest, da, mulSynSlow[lConnId]);
631  }
632  break;
633  }
634  }
635 #endif
636 
637 }
638 
639 #ifndef __NO_PTHREADS__ // POSIX
640  // Static multithreading subroutine method - helper for the above method
641  void* SNN::helperDoSTPUpdateAndDecayCond_CPU(void* arguments) {
642  ThreadStruct* args = (ThreadStruct*) arguments;
643  //printf("\nThread ID: %lu and CPU: %d\n",pthread_self(), sched_getcpu());
644  ((SNN *)args->snn_pointer) -> doSTPUpdateAndDecayCond_CPU(args->netId);
645  pthread_exit(0);
646  }
647 #endif
648 
649 #ifdef __NO_PTHREADS__
650  void SNN::findFiring_CPU(int netId) {
651 #else // POSIX
652  void* SNN::findFiring_CPU(int netId) {
653 #endif
654  assert(runtimeData[netId].memType == CPU_MEM);
655  // ToDo: This can be further optimized using multiple threads allocated on mulitple CPU cores
656  for(int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
657  for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++) {
658  bool needToWrite = false;
659  // given group of neurons belong to the poisson group....
660  if (groupConfigs[netId][lGrpId].Type & POISSON_NEURON) {
661  if(groupConfigs[netId][lGrpId].isSpikeGenFunc) {
662  unsigned int offset = lNId - groupConfigs[netId][lGrpId].lStartN + groupConfigs[netId][lGrpId].Noffset;
663  needToWrite = getSpikeGenBit(offset, netId);
664  } else { // spikes generated by poission rate
665  needToWrite = getPoissonSpike(lNId, netId);
666  }
667  // Note: valid lastSpikeTime of spike gen neurons is required by userDefinedSpikeGenerator()
668  if (needToWrite)
669  runtimeData[netId].lastSpikeTime[lNId] = simTime;
670  } else { // Regular neuron
671  if (runtimeData[netId].curSpike[lNId]) {
672  runtimeData[netId].curSpike[lNId] = false;
673  needToWrite = true;
674  }
675 
676  // log v, u value if any active neuron monitor is presented
677  if (networkConfigs[netId].sim_with_nm && lNId - groupConfigs[netId][lGrpId].lStartN < MAX_NEURON_MON_GRP_SZIE) {
678  int idxBase = networkConfigs[netId].numGroups * MAX_NEURON_MON_GRP_SZIE * simTimeMs + lGrpId * MAX_NEURON_MON_GRP_SZIE;
679  runtimeData[netId].nVBuffer[idxBase + lNId - groupConfigs[netId][lGrpId].lStartN] = runtimeData[netId].voltage[lNId];
680  runtimeData[netId].nUBuffer[idxBase + lNId - groupConfigs[netId][lGrpId].lStartN] = runtimeData[netId].recovery[lNId];
681  //KERNEL_INFO("simTimeMs: %d --base:%d -- %f -- %f --%f --%f", simTimeMs, idxBase + lNId - groupConfigs[netId][lGrpId].lStartN, runtimeData[netId].voltage[lNId], runtimeData[netId].recovery[lNId], runtimeData[netId].nVBuffer[idxBase + lNId - groupConfigs[netId][lGrpId].lStartN], runtimeData[netId].nUBuffer[idxBase + lNId - groupConfigs[netId][lGrpId].lStartN]);
682  }
683  }
684 
685  // this flag is set if with_stdp is set and also grpType is set to have GROUP_SYN_FIXED
686  if (needToWrite) {
687  bool hasSpace = false;
688  int fireId = -1;
689 
690  // update spike count: spikeCountD2Sec(W), spikeCountD1Sec(W), spikeCountLastSecLeftD2(R)
691  if (groupConfigs[netId][lGrpId].MaxDelay == 1)
692  {
693  if (runtimeData[netId].spikeCountD1Sec + 1 < networkConfigs[netId].maxSpikesD1) {
694  fireId = runtimeData[netId].spikeCountD1Sec;
695  runtimeData[netId].spikeCountD1Sec++;
696  }
697  } else { // MaxDelay > 1
698  if (runtimeData[netId].spikeCountD2Sec + runtimeData[netId].spikeCountLastSecLeftD2 + 1 < networkConfigs[netId].maxSpikesD2) {
699  fireId = runtimeData[netId].spikeCountD2Sec + runtimeData[netId].spikeCountLastSecLeftD2;
700  runtimeData[netId].spikeCountD2Sec++;
701  }
702  }
703 
704  if (fireId == -1) // no space availabe in firing table, drop the spike
705  continue;
706  // update firing table: firingTableD1(W), firingTableD2(W)
707  if (groupConfigs[netId][lGrpId].MaxDelay == 1) {
708  runtimeData[netId].firingTableD1[fireId] = lNId;
709  } else { // MaxDelay > 1
710  runtimeData[netId].firingTableD2[fireId] = lNId;
711 #ifdef LN_AXON_PLAST
712  runtimeData[netId].firingTimesD2[fireId] = simTime;
713 #endif
714  }
715 
716  // update external firing table: extFiringTableEndIdxD1(W), extFiringTableEndIdxD2(W), extFiringTableD1(W), extFiringTableD2(W)
717  if (groupConfigs[netId][lGrpId].hasExternalConnect) {
718  int extFireId = -1;
719  if (groupConfigs[netId][lGrpId].MaxDelay == 1) {
720  extFireId = runtimeData[netId].extFiringTableEndIdxD1[lGrpId]++;
721  runtimeData[netId].extFiringTableD1[lGrpId][extFireId] = lNId + groupConfigs[netId][lGrpId].LtoGOffset;
722  } else { // MaxDelay > 1
723  extFireId = runtimeData[netId].extFiringTableEndIdxD2[lGrpId]++;
724  runtimeData[netId].extFiringTableD2[lGrpId][extFireId] = lNId + groupConfigs[netId][lGrpId].LtoGOffset;
725  }
726  assert(extFireId != -1);
727  }
728 
729  // update STP for neurons that fire
730  if (groupConfigs[netId][lGrpId].WithSTP) {
731  firingUpdateSTP(lNId, lGrpId, netId);
732  }
733 
734  // keep track of number spikes per neuron
735  runtimeData[netId].nSpikeCnt[lNId]++;
736 
737  if (IS_REGULAR_NEURON(lNId, networkConfigs[netId].numNReg, networkConfigs[netId].numNPois))
738  resetFiredNeuron(lNId, lGrpId, netId);
739 
740  // STDP calculation: the post-synaptic neuron fires after the arrival of a pre-synaptic spike
741  if (!sim_in_testing && groupConfigs[netId][lGrpId].WithSTDP) {
742  updateLTP(lNId, lGrpId, netId);
743  }
744  }
745  }
746  }
747 }
748 
749 #ifndef __NO_PTHREADS__ // POSIX
750  // Static multithreading subroutine method - helper for the above method
751  void* SNN::helperFindFiring_CPU(void* arguments) {
752  ThreadStruct* args = (ThreadStruct*) arguments;
753  //printf("\nThread ID: %lu and CPU: %d\n",pthread_self(), sched_getcpu());
754  ((SNN *)args->snn_pointer) -> findFiring_CPU(args->netId);
755  pthread_exit(0);
756  }
757 #endif
758 
759 
760 #ifdef LN_AXON_PLAST
761 
762 #define LN_ELIGIBILITY_INSEARCH
763 #define LN_MOST_RECENT
764 
765 
766 //#ifdef __NO_PTHREADS__
767  void SNN::findWavefrontPath_CPU(std::vector<int>& path, std::vector<float>& eligibility, int netId, int grpId, int startNId, int goalNId) {
768 
769 
770  assert(runtimeData[netId].memType == CPU_MEM);
771  assert(groupConfigs[netId][grpId].WithAxonPlast); // eligibility trace, runtimeData[netId].lastSpikeTime
772 
773  KERNEL_INFO("findWavefrontPath_CPU from %d to %d in group %d of net %d", startNId, goalNId, grpId, netId);
774 
775  int numN = groupConfigs[netId][grpId].numN; // neurons in group -> max
776  int tau = groupConfigs[netId][grpId].AxonPlast_TAU; // neurons in group -> max
777 
778  //Restrict the search in the group boundaries
779  int lEndN = groupConfigs[netId][grpId].lEndN;
780  int lStartN = groupConfigs[netId][grpId].lStartN;
781 
782  // last spike
783  auto &rtD = runtimeData[netId];
784 
785  auto &firing = rtD.firingTableD2; // unsigned int *
786  auto &times = rtD.firingTimesD2;
787 
788  // hold the valid neuron IDs, that can be searched.
789  // initially all neurons of the group are valid
790  // if a neuron was found and added to the path, the neuron no longer is eligible to be searched.
791  // to avoid circles, the indeces are relative neuron Ids
792  // rId e: [lStartN .. lEndN] - lStartN;
793  std::vector<bool> valid_ids(numN, true);
794 
795  std::vector<unsigned int> path_times; // hold the firing times of the path
796 
797  // search previous, most recent firing of the neuron idetified by fired
798  // fired: lNId which to search for
799  // last: start
800  auto searchPrevFiring = [&](int fired, int last) {
801  KERNEL_DEBUG("searching previous firing for Nid %d starting on %d\n", fired, last);
802  int index = last;
803  while (index >= 0) {
804  KERNEL_DEBUG("firingTableD2[%d]=%d\n", index, rtD.firingTableD2[index]);
805  if (rtD.firingTableD2[index] == fired) {
806  KERNEL_DEBUG("NId %d found at index %d (%d)\n", fired, index, rtD.firingTimesD2[index]);
807  return index; // found
808  }
809  else
810  if (rtD.firingTableD2[index] == startNId) { // hot fix for nId 3 issue,
811  KERNEL_DEBUG("Nid %d has no firing that lead to startNId %d\n", fired, startNId);
812 #ifdef LN_MOST_RECENT
813  return -1; // most recent
814 #else
815  return MAXINT;
816 #endif
817  }
818  else
819  index--;
820  }
821 #ifdef LN_MOST_RECENT
822  return -1; // most recent
823 #else
824  return MAXINT;
825 #endif
826  };
827 
828  auto appendToPath = [&](int lNId, unsigned int time) {
829  if (lNId < lStartN || lNId > lEndN ) {
830  printf("reject append to path: %d\n", lNId);
831  return;
832  }
833  KERNEL_DEBUG("append to path: %d\n\n", lNId);
834  path.push_back(lNId);
835  valid_ids[lNId - lStartN] = false;
836  path_times.push_back(time);
837  };
838 
839  auto assignEligibility = [&](int lNId, float e_i) {
840 #ifdef LN_ELIGIBILITY_INSEARCH // do nothing for new search
841  KERNEL_DEBUG("e[%d] = %f\n", lNId, e_i);
842  if (e_i > 1.0f) {
843  KERNEL_DEBUG("WARNING: Rejected (e_i > 1.0)");
844  return;
845  }
846  if (lNId < lStartN || lNId > lEndN) {
847  KERNEL_DEBUG("WARNING: Reject as NId is not part of the group %d", lNId);
848  return;
849  }
850  auto e_i_prev = eligibility[lNId - lStartN];
851  if (e_i_prev > 0.0f && e_i_prev > e_i)
852  eligibility[lNId - lStartN] = e_i; // fix wave front
853  else
854  eligibility[lNId - lStartN] = e_i; // assign ralative NId
855 #endif
856  };
857 
858 #ifndef LN_ELIGIBILITY_INSEARCH
859  auto assignEligibility2 = [&](int lNId, float e_i) {
860  KERNEL_DEBUG("e[%d] = %f\n", lNId, e_i);
861  if (e_i > 1.0f) {
862  KERNEL_DEBUG("WARNING: Rejected (e_i > 1.0)\n");
863  return;
864  }
865  //wavefront the first hit of the wave define the eligibility
866  if (eligibility[lNId - lStartN] > 0.0f)
867  return;
868  eligibility[lNId - lStartN] = e_i; // assign ralative NId
869  };
870 #endif
871 
872  auto current = goalNId;
873 
874 
875  // initial iteration
876  auto sCD2 = rtD.spikeCountD2Sec;
877  auto fTD2 = rtD.firingTableD2[sCD2 > 0 ? sCD2 - 1 : sCD2];
878  auto tTD2 = rtD.firingTimesD2[sCD2 > 0 ? sCD2 - 1 : sCD2]; // seems to be empty all the time
879  KERNEL_DEBUG("Firings D2: spikeCountD2Sec: %d firingTableD2[spikeCountD2Sec-1]: %d timeTableD2[spikeCountD2Sec-1] %d\n", sCD2, fTD2, tTD2);
880 
881 
882  int last = searchPrevFiring(current, rtD.spikeCountD2Sec - 1);
883 #ifdef LN_MOST_RECENT
884  if (last == -1)
885 #else
886  if (last == MAXINT)
887 #endif
888  return;
889 
890  unsigned int current_time = rtD.firingTimesD2[last];
891  appendToPath(current, current_time);
892 
893 
894  int t_0 = current_time;
895 
896  float base = 1.0f - 1.0f / float(tau);
897  auto eligibility_i = [&](int t) {
898  float e_i = std::pow(base, t_0 - t);
899 
900  if (t == -1) { // most recent
901  KERNEL_WARN("WARNING: Rejected (t = MAXINT)\n");
902  return 0.0f;
903  }
904  if (e_i > 1.0f) {
905  KERNEL_WARN("WARNING: Rejected (e_i > 1.0)\n");
906  return 0.0f;
907  }
908  KERNEL_DEBUG("%dms=%f\n", t, e_i);
909  return e_i;
910  };
911 
912  assignEligibility(current, eligibility_i(current_time));
913 
914  // iteration
915  for (int iteration = 1; iteration < numN && current != startNId; iteration++) {
916 
917  for (auto t_last = rtD.firingTimesD2[last];
918  rtD.firingTimesD2[last] == t_last;
919  last--)
920  KERNEL_DEBUG("skipping firings at same time of current: %d\n", last);
921 
922  //printf("search pre with minimal delay of %d, iteration %d\n", current, iteration);
923 #ifdef LN_MOST_RECENT
924  int j_max = -1; // index with invalid delay
925 #else
926  int j_min = MAXINT;
927 #endif
928  if (current < 0) {
929  KERNEL_DEBUG("skipping iteration current id is negative %d\n", current);
930 #ifdef LN_MOST_RECENT
931  last = -1;
932 #else
933  last = MAXINT;
934 #endif
935  break;
936  }
937  auto npre = rtD.Npre[current] ;
938  auto cumPre = rtD.cumulativePre[current];
939  for (int j = 0; j < npre; j++) {
940  // pre
941  auto preSynId = rtD.preSynapticIds[cumPre + j];
942  auto preNId = preSynId.nId;
943 
944  if (preNId < lStartN || preNId > lEndN) {
945  KERNEL_DEBUG("skipping neuron outside the group: %d\n", preNId);
946  continue;
947  }
948  if (!valid_ids[preNId - lStartN]) {
949  KERNEL_DEBUG("skipping circular neuron: %d\n", preNId);
950  continue;
951  }
952 
953  auto last_pre = last;
954 
955  {
956  int lNId = preNId;
957  unsigned int offset = rtD.cumulativePost[lNId];
958 
959  // search each synapse starting from from neuron lNId
960  auto found = false;
961  auto delay = -1;
962  for (int t = 0; !found && t < glbNetworkConfig.maxDelay; t++) {
963  DelayInfo dPar = rtD.postDelayInfo[lNId * (glbNetworkConfig.maxDelay + 1) + t];
964  for (int idx_d = dPar.delay_index_start; !found && idx_d < (dPar.delay_index_start + dPar.delay_length); idx_d++) {
965  SynInfo post_info = rtD.postSynapticIds[offset + idx_d];
966  int lNIdPost = GET_CONN_NEURON_ID(post_info);
967  if (lNIdPost == current) {
968  found = true;
969  delay = t + 1;
970  KERNEL_DEBUG("d(%d, %d) = %d\n", lNId, lNIdPost, delay);
971  }
972  }
973  }
974  while (last_pre > 0 && rtD.firingTimesD2[last_pre] > current_time - delay) {
975  KERNEL_DEBUG("skipping firings at frame %d of currnet_time %d\n", last_pre, rtD.firingTimesD2[last]);
976  assignEligibility(rtD.firingTableD2[last_pre], eligibility_i(rtD.firingTimesD2[last_pre])); // Fix assign eligibility for skipped tho
977  last_pre--;
978  }
979 
980  }
981 
982  // search for pos in firing
983  int j_fired = searchPrevFiring(preNId, last_pre);
984 #ifdef LN_MOST_RECENT
985  if (j_fired == -1) // Most recent
986 #else
987  if (j_fired == MAXINT) // || j_fired == 2147483646)
988 #endif
989  continue;
990  assignEligibility(preNId, eligibility_i(rtD.firingTimesD2[j_fired]));
991  if (j == 0)
992 #ifdef LN_MOST_RECENT
993  j_max = j_fired;
994 #else
995  j_min = j_fired;
996 #endif
997  else {
998 
999 
1000 #ifdef LN_MOST_RECENT
1001  j_max = std::max(j_max, j_fired);
1002 #else
1003  j_min = std::min(j_min, j_fired);
1004 #endif
1005  }
1006  }
1007 #ifdef LN_MOST_RECENT
1008  if (j_max < 0) {
1009 #else
1010  if (j_min == MAXINT) {
1011 #endif
1012  KERNEL_INFO("None of the pre was found. Path was incomplete. Break");
1013  break;
1014  }
1015 #ifdef LN_MOST_RECENT
1016  last = j_max;
1017 #else
1018  last = j_min;
1019 #endif
1020  current = rtD.firingTableD2[last];
1021  current_time = rtD.firingTimesD2[last];
1022  appendToPath(current, current_time);
1023  }
1024 
1025  std::ostringstream string_stream;
1026  for (auto iter = path.rbegin(); iter < path.rend(); iter++) {
1027  if (iter != path.rbegin())
1028  string_stream << ",";
1029  string_stream << *iter;
1030  }
1031 
1032  KERNEL_INFO("path: %s", string_stream.str().c_str());
1033 
1034 
1035 #ifndef LN_ELIGIBILITY_INSEARCH
1036  //wave front
1037  {
1038  // initial iteration
1039  unsigned i_goal = 0, t_goal = 0, i_start = 0, t_start = 0;
1040 
1041  KERNEL_DEBUG("Firings D2: spikeCountD2Sec: %d firingTableD2[spikeCountD2Sec-1]: %d timeTableD2[spikeCountD2Sec-1] %d\n", sCD2, fTD2, tTD2);
1042 
1043  // seek last firing of goalNId
1044  for (unsigned last = rtD.spikeCountD2Sec - 1; last >= 0; last--) {
1045  if (rtD.firingTableD2[last] == goalNId) {
1046  i_goal = last;
1047  t_goal = rtD.firingTimesD2[last];
1048  KERNEL_INFO("Goal firings: firingTableD2[%d]=%d, timeTableD2[%d]=%d\n", i_goal, goalNId, i_goal, t_goal);
1049  break;
1050  }
1051  if (last == 0) // unsigned
1052  break;
1053  }
1054 
1055  // seek first firing of startNId
1056  auto start_t = 1; //
1057  // seek first firing before start_t
1058  for (unsigned last = rtD.spikeCountD2Sec - 1;
1059  last >= 0 && rtD.firingTimesD2[last] >= start_t;
1060  last--)
1061  if(last==0) // unsigned
1062  break;
1063  // search first firing
1064  for (unsigned first = std::max(0, last); first < i_goal; first++) {
1065  if (rtD.firingTableD2[first] == startNId) {
1066  i_start = first;
1067  t_start = rtD.firingTimesD2[first];
1068  KERNEL_INFO("Start firings: firingTableD2[%d]=%d, firingTimesD2[%d]=%d\n", i_start, startNId, i_start, t_start);
1069  break;
1070  }
1071  if (last == 0) // unsigned
1072  break;
1073  }
1074 
1075  for (unsigned i = i_start; rtD.firingTimesD2[i] <= t_goal; i++) {
1076  auto firedNId = rtD.firingTableD2[i];
1077  //ensure firedNId belongs to current group
1078  if (firedNId >= lStartN && firedNId <= lEndN) {
1079  assignEligibility2(firedNId, eligibility_i(rtD.firingTimesD2[i]));
1080  }
1081  }
1082 
1083  }
1084 
1085 #endif
1086 
1087 }
1088 
1089 #define PATCH_updateDelays_PostNId
1090 #define PATCH_updateDelays_PostNId_Break
1091 
1092 
1093 /*
1094  * snn_manager.cpp SNN::generateConnectionRuntime
1095  *
1096  */
1097 bool SNN::updateDelays_CPU(int netId, int gGrpIdPre, int gGrpIdPost, std::vector<std::tuple<int, int, uint8_t>> connDelays) {
1098 
1099  // snatch old delays
1100  int netIdPost = groupConfigMDMap[gGrpIdPost].netId;
1101  int lGrpIdPost = groupConfigMDMap[gGrpIdPost].lGrpId;
1102  int lGrpIdPre = -1;
1103 
1104  for (int lGrpId = 0; lGrpId < networkConfigs[netIdPost].numGroupsAssigned; lGrpId++)
1105  if (groupConfigs[netIdPost][lGrpId].gGrpId == gGrpIdPre) {
1106  lGrpIdPre = lGrpId;
1107  break;
1108  }
1109  assert(lGrpIdPre != -1);
1110 
1111  int numPreN = groupConfigMap[gGrpIdPre].numN;
1112  int numPostN = groupConfigMap[gGrpIdPost].numN;
1113 
1114  //Cache group boundaries
1115  int lStartNIdPre = groupConfigs[netIdPost][lGrpIdPre].lStartN;
1116  int lEndNIdPre = groupConfigs[netIdPost][lGrpIdPre].lEndN;
1117  int lStartNIdPost = groupConfigs[netIdPost][lGrpIdPost].lStartN;
1118  int lEndNIdPost = groupConfigs[netIdPost][lGrpIdPost].lEndN;
1119 
1120  uint8_t* delays = new uint8_t[(numPreN + 1) * (numPostN + 1)];
1121  memset(delays, 0, numPreN * numPostN);
1122  for (int lNIdPre = groupConfigs[netIdPost][lGrpIdPre].lStartN; lNIdPre <= groupConfigs[netIdPost][lGrpIdPre].lEndN; lNIdPre++) {
1123  unsigned int offset = managerRuntimeData.cumulativePost[lNIdPre];
1124  for (int t = 0; t < glbNetworkConfig.maxDelay; t++) {
1125  DelayInfo dPar = managerRuntimeData.postDelayInfo[lNIdPre * (glbNetworkConfig.maxDelay + 1) + t];
1126  for (int idx_d = dPar.delay_index_start; idx_d < (dPar.delay_index_start + dPar.delay_length); idx_d++) {
1127  // get synaptic info
1128  SynInfo postSynInfo = managerRuntimeData.postSynapticIds[offset + idx_d];
1129  // get local post neuron id
1130  int lNIdPost = GET_CONN_NEURON_ID(postSynInfo);
1131  assert(lNIdPost < glbNetworkConfig.numN);
1132  if (lNIdPost >= groupConfigs[netIdPost][lGrpIdPost].lStartN && lNIdPost <= groupConfigs[netIdPost][lGrpIdPost].lEndN) {
1133  delays[(lNIdPre - groupConfigs[netIdPost][lGrpIdPre].lStartN) + numPreN * (lNIdPost - groupConfigs[netIdPost][lGrpIdPost].lStartN)] = t + 1;
1134  }
1135  }
1136  }
1137  }
1138 
1139  // access to serialized storage, grouped by pre
1140  auto getDelay = [&](int pre, int post) { return delays[post * numPostN + pre]; };
1141  auto setDelay = [&](int pre, int post, int8_t delay) { delays[post * numPostN + pre] = delay; };
1142 
1143 #ifdef DEBUG_updateDelays_CPU
1144  // iterate over pre and print the delay of each post connection
1145  auto printDelays = [&]() {
1146  for (int i = 0; i < numPreN; i++) {
1147  for (int j = 0; j < numPostN; j++) {
1148  int d = getDelay(i, j);
1149  if (d > 0)
1150  printf("pre:%d post:%d delay:%d\n", i, j, d);
1151  }
1152  }
1153  };
1154 
1155  const int buff_len = 30000;
1156  char buffer[buff_len];
1157 #endif
1158 
1159  //auto iter = connDelays.begin();
1160  for (auto iter = connDelays.begin(); iter != connDelays.end(); iter++)
1161  {
1162 #ifdef DEBUG_updateDelays_CPU
1163  printDelays();
1164 #endif
1165  std::map<int, int> GLoffset; // global nId to local nId offset
1166  std::map<int, int> GLgrpId; // global grpId to local grpId offset
1167 
1168  // load offset between global neuron id and local neuron id
1169  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
1170  GLoffset[grpIt->gGrpId] = grpIt->GtoLOffset;
1171  GLgrpId[grpIt->gGrpId] = grpIt->lGrpId;
1172  }
1173 
1174  ConnectionInfo connInfo;
1175  connInfo.grpSrc = lGrpIdPre;
1176  connInfo.grpDest = lGrpIdPost;
1177  connInfo.initWt = -1.0f;
1178  connInfo.maxWt = -1.0f;
1179  connInfo.connId = -1;
1180  connInfo.preSynId = -1;
1181 
1182  int post_pos, pre_pos;
1183  enum { left, right, none } direction;
1184 
1185  {
1186  // new delay
1187  connInfo.nSrc = std::get<0>(*iter);
1188  connInfo.nDest = std::get<1>(*iter);
1189  connInfo.delay = std::get<2>(*iter);
1190 #ifdef DEBUG_updateDelays_CPU
1191  printEntrails(buffer, buff_len, 8, gGrpIdPre, gGrpIdPost);
1192  printf("before pre=%d, post=%d delay=%d\n%s\n", connInfo.nSrc, connInfo.nDest, connInfo.delay, buffer);
1193 #endif
1194 
1195  // old delay
1196  int old_delay = getDelay(connInfo.nSrc, connInfo.nDest);
1197 
1198  // direction
1199  if (connInfo.delay < old_delay)
1200  direction = left;
1201  else if (connInfo.delay > old_delay)
1202  direction = right;
1203  else
1204  direction = none;
1205 
1206 
1207  // translate relative indices to local
1208  connInfo.nSrc += lStartNIdPre;
1209  connInfo.nDest += lStartNIdPost;
1210 
1211  connInfo.srcGLoffset = GLoffset[connInfo.nSrc];
1212  int lNIdPre = connInfo.nSrc + GLoffset[connInfo.grpSrc];
1213  unsigned int offset = runtimeData[netId].cumulativePre[lNIdPre];
1214 
1215  int nId_left = -1;
1216  int delay_left = -1;
1217 
1218  int post_pos = -1;
1219  int pre_pos = -1;
1220  int start = runtimeData[netId].cumulativePost[connInfo.nSrc]; // start index
1221  int n = runtimeData[netId].Npost[connInfo.nSrc]; // neuron has n synapses
1222 
1223  if (direction == left) { // down
1224  // search post_pos of syn to nDest from end
1225  for (int pos = start + n - 1; pos >= start; pos--) // up
1226  if (runtimeData[netId].postSynapticIds[pos].nId == connInfo.nDest) {
1227  post_pos = pos;
1228  break;
1229  }
1230  // move left (up) while higher delay
1231 
1232  //int moved = 0;
1233  while (post_pos > 0)
1234  {
1235  post_pos--;
1236 #ifdef PATCH_updateDelays_ConnGroup
1237  if (runtimeData[netId].postSynapticIds[post_pos].gsId != 0)
1238  continue;
1239 #endif
1240  nId_left = runtimeData[netId].postSynapticIds[post_pos].nId;
1241 #ifdef PATCH_updateDelays_PostNId
1242  if (nId_left < lStartNIdPost || nId_left > lEndNIdPost)
1243 #ifndef PATCH_updateDelays_PostNId_Break
1244  continue;
1245 #else
1246  break;
1247 #endif
1248 #endif
1249  delay_left = getDelay(connInfo.nSrc - lStartNIdPre, nId_left - lStartNIdPost); // rel id
1250  if (connInfo.delay < delay_left) {
1251  auto postSynapticIds_synId = runtimeData[netId].postSynapticIds[post_pos];
1252  runtimeData[netId].postSynapticIds[post_pos] = runtimeData[netId].postSynapticIds[post_pos + 1];
1253  runtimeData[netId].postSynapticIds[post_pos + 1] = postSynapticIds_synId;
1254  }
1255  else
1256  break;
1257  }
1258  }
1259  else if (direction == right) { // down
1260 
1261  // search position of syn to nDest from start
1262  for (int pos = start; pos < start + n; pos++) // down
1263  if (runtimeData[netId].postSynapticIds[pos].nId == connInfo.nDest) {
1264  post_pos = pos;
1265  break;
1266  }
1267 
1268  // move right (down) while lower delay
1269  while (post_pos < start + n - 1) {
1270  post_pos++;
1271 #ifdef PATCH_updateDelays_ConnGroup
1272  if (runtimeData[netId].postSynapticIds[post_pos].gsId != 0)
1273  continue;
1274 #endif
1275  nId_left = runtimeData[netId].postSynapticIds[post_pos].nId;
1276 #ifdef PATCH_updateDelays_PostNId
1277  if (nId_left < lStartNIdPost || nId_left > lEndNIdPost)
1278 #ifndef PATCH_updateDelays_PostNId_Break
1279  continue;
1280 #else
1281  break;
1282 #endif
1283 #endif
1284  delay_left = getDelay(connInfo.nSrc - lStartNIdPre, nId_left - lStartNIdPost); // rel id
1285  if (connInfo.delay > delay_left) {
1286  auto postSynapticIds_synId = runtimeData[netId].postSynapticIds[post_pos];
1287 
1288  runtimeData[netId].postSynapticIds[post_pos] = runtimeData[netId].postSynapticIds[post_pos - 1];
1289  runtimeData[netId].postSynapticIds[post_pos - 1] = postSynapticIds_synId;
1290  }
1291  else
1292  break;
1293  }
1294  }
1295  else
1296  continue; // skip nop
1297 
1298  for (int synId = runtimeData[netId].cumulativePost[connInfo.nSrc]; synId < runtimeData[netId].Npost[connInfo.nSrc] + runtimeData[netId].cumulativePost[connInfo.nSrc]; synId++) {
1299  SynInfo& postSynInfo = runtimeData[netId].postSynapticIds[synId];
1300  int nId = postSynInfo.nId;
1301  int preSynId = GET_CONN_SYN_ID(postSynInfo);
1302  int pre_pos = runtimeData[netId].cumulativePre[nId] + preSynId;
1303  SynInfo& preSynInfo = runtimeData[netId].preSynapticIds[pre_pos];
1304 #ifdef PATCH_updateDelays_ConnGroup
1305  if (preSynInfo.gsId != 0)
1306  continue;
1307 #endif
1308  preSynInfo.gsId = synId - runtimeData[netId].cumulativePost[connInfo.nSrc];
1309 #define SET_CONN_GRP_ID(val, grpId) ((grpId << NUM_SYNAPSE_BITS) | GET_CONN_SYN_ID(val))
1310  preSynInfo.gsId = SET_CONN_GRP_ID(preSynInfo, lGrpIdPre);
1311  }
1312 
1313 #ifdef DEBUG_updateDelays_CPU
1314  printf("%d\n", post_pos);
1315 #endif
1316 
1317  // old postDelayInfo
1318  int t_old = old_delay - 1; // transform delay in ms to offset: 1 ms becomes 0, 2 ms becomes 1, ..
1319  DelayInfo& dPar_old = runtimeData[netId].postDelayInfo[lNIdPre * (glbNetworkConfig.maxDelay + 1) + t_old];
1320 
1321  if (dPar_old.delay_length >= 0) {
1322  dPar_old.delay_length--; // decrement
1323  dPar_old.delay_index_start = 0; // reset entry
1324  }
1325  else {
1326  KERNEL_ERROR("Post-synaptic delay was not sorted correctly pre_id=%d, offset=%d", lNIdPre, offset);
1327  }
1328 
1329  int t_new = connInfo.delay - 1; // transform delay in ms to offset: 1 ms becomes 0, 2 ms becomes 1, ..
1330  DelayInfo& dPar_new = runtimeData[netId].postDelayInfo[lNIdPre * (glbNetworkConfig.maxDelay + 1) + t_new];
1331 
1332  if (dPar_new.delay_length >= 0) {
1333  dPar_new.delay_length++; // decrement
1334  dPar_old.delay_index_start = 0; // reset entry
1335  }
1336  else {
1337  KERNEL_ERROR("Post-synaptic delay was not sorted correctly pre_id=%d, offset=%d", lNIdPre, offset);
1338  }
1339 
1340  offset = 0;
1341  for (int t = 0; t < glbNetworkConfig.maxDelay + 1; t++) {
1342  DelayInfo& dPar = runtimeData[netId].postDelayInfo[lNIdPre * (glbNetworkConfig.maxDelay + 1) + t];
1343  if (dPar.delay_length > 0) {
1344  dPar.delay_index_start = offset;
1345  offset += dPar.delay_length;
1346  }
1347  }
1348 #ifdef DEBUG_updateDelays_CPU
1349  printEntrails(buffer, buff_len, 8, gGrpIdPre, gGrpIdPost);
1350  printf("after pre=%d, post=%d delay=%d\n%s\n", connInfo.nSrc, connInfo.nDest, connInfo.delay, buffer);
1351 #endif
1352 
1353  if (offset == n) {
1354  setDelay(connInfo.nSrc, connInfo.nDest, connInfo.delay); // update delay cache
1355  }
1356  else
1357  {
1358  printf("WARNING: skipping setDelay (offset!=n): pre=%d, post=%d delay=%d\n",
1359  connInfo.nSrc, connInfo.nDest, connInfo.delay);
1360  }
1361 
1362  }
1363  }
1364 
1365  delete[] delays;
1366 
1367  return false;
1368 }
1369 
1370 
1371 
1372 void SNN::printEntrails_CPU(char* buffer, unsigned length, int netId, int lGrpIdPre, int lGrpIdPost) {
1373 
1374  const int lineBufferLength = 1024;
1375  char lineBuffer[lineBufferLength];
1376 
1378  //int netIdPost = groupConfigMDMap[gGrpIdPost].netId;
1379  //int lGrpIdPost = groupConfigMDMap[gGrpIdPost].lGrpId;
1380  //int lGrpIdPre = -1;
1381 
1382  //for (int lGrpId = 0; lGrpId < networkConfigs[netIdPost].numGroupsAssigned; lGrpId++)
1383  // if (groupConfigs[netIdPost][lGrpId].gGrpId == gGrpIdPre) {
1384  // lGrpIdPre = lGrpId;
1385  // break;
1386  // }
1387  //assert(lGrpIdPre != -1);
1388 
1389  // Manager asserts netIdPre == netIdPost;
1390 
1391 
1392  //int numPreN = groupConfigMap[gGrpIdPre].numN; // already used below
1393  //int numPostN = groupConfigMap[gGrpIdPost].numN;
1394  int numPostN = groupConfigs[netId][lGrpIdPost].numN;
1395 
1396  //Cache group boundaries
1397  int lStartNIdPre = groupConfigs[netId][lGrpIdPre].lStartN;
1398  int lEndNIdPre = groupConfigs[netId][lGrpIdPre].lEndN;
1399  int lStartNIdPost = groupConfigs[netId][lGrpIdPost].lStartN;
1400  int lEndNIdPost = groupConfigs[netId][lGrpIdPost].lEndN;
1401 
1402 
1403 
1404  std::map<int, int> GLoffset; // global nId to local nId offset
1405  std::map<int, int> GLgrpId; // global grpId to local grpId offset
1406 
1407  // load offset between global neuron id and local neuron id
1408  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
1409  GLoffset[grpIt->gGrpId] = grpIt->GtoLOffset;
1410  GLgrpId[grpIt->gGrpId] = grpIt->lGrpId;
1411  }
1412 
1413  int numN = glbNetworkConfig.numN;
1414  int maxDelay = glbNetworkConfig.maxDelay;
1415 
1416  strcpy_s(buffer, length, ""); // init buffer
1417  auto append = [&]() {strcat_s(buffer, length, lineBuffer); };
1418 
1419  sprintf_s(lineBuffer, lineBufferLength, "%-9s %-9s %-9s %-9s\n", "Npost", "Npre", "cumPost", "cumPre");
1420  append();
1421  for (int lNId = lStartNIdPre; lNId <= lEndNIdPre; lNId++) {
1422  int _Npost = runtimeData[netId].Npost[lNId];
1423  int _Npre = runtimeData[netId].Npre[lNId];
1424  int _cumulativePost = runtimeData[netId].cumulativePost[lNId];
1425  int _cumulativePre = runtimeData[netId].cumulativePre[lNId];
1426  sprintf_s(lineBuffer, lineBufferLength, "[%3d] %3d [%3d] %3d [%3d] %3d [%3d] %3d\n", lNId, _Npost, lNId, _Npre, lNId, _cumulativePost, lNId, _cumulativePre);
1427  append();
1428  }
1429 
1430  int start = 0;
1431 
1432  // Manager asserts netIdPre == netIdPost;
1433  int numPostSynapses = groupConfigs[netId][lGrpIdPre].numPostSynapses;
1434  int numPreSynapses = groupConfigs[netId][lGrpIdPost].numPreSynapses;
1435 
1436 
1437  sprintf_s(lineBuffer, lineBufferLength, "\n%s (%s, %s, %s)\n", "postSynapticIds", "connectionGroupId", "synapseId", "postNId");
1438  append();
1439  for (int pos = start; pos < numPostSynapses; pos++) { // maybe some kind of max, to to pos_post, pos_pre
1440  auto postSynInfo = runtimeData[netId].postSynapticIds[pos]; // post_pos
1441  int lNId = GET_CONN_NEURON_ID(postSynInfo);
1442  int grpId = GET_CONN_GRP_ID(postSynInfo);
1443  int synId = GET_CONN_SYN_ID(postSynInfo);
1444  sprintf_s(lineBuffer, lineBufferLength, "[%3d] {%3d %3d} %3d \n", pos, grpId, synId, lNId);
1445  append();
1446  }
1447 
1448  sprintf_s(lineBuffer, lineBufferLength, "\n%s (%s, %s, %s)\n", "preSynapticIds", "connectionGroupId", "synapseId", "preNId");
1449  append();
1450  for (int pos = start; pos < numPreSynapses; pos++) { // maybe some kind of max, to to pos_post, pos_pre
1451  auto preSynInfo = runtimeData[netId].preSynapticIds[pos]; // post_pos
1452  int lNId = GET_CONN_NEURON_ID(preSynInfo);
1453  int grpId = GET_CONN_GRP_ID(preSynInfo);
1454  int synId = GET_CONN_SYN_ID(preSynInfo);
1455  sprintf_s(lineBuffer, lineBufferLength, "[%3d] {%3d %3d} %3d\n", pos, grpId, synId, lNId);
1456  append();
1457  }
1458 
1459  //int numPreN = groupConfigMap[gGrpIdPre].numN;
1460  int numPreN = groupConfigs[netId][lGrpIdPre].numN;
1461 
1462  sprintf_s(lineBuffer, lineBufferLength, "\npostDelayInfo (pre x d)\n "); append();
1463  for (int t = 0; t < maxDelay + 1; t++) {
1464  sprintf_s(lineBuffer, lineBufferLength, "%4d ", t+1); append();
1465  }
1466  sprintf_s(lineBuffer, lineBufferLength, "\n"); append();
1467  for (int lNId = lStartNIdPre; lNId <= lEndNIdPre; lNId++)
1468  {
1469  sprintf_s(lineBuffer, lineBufferLength, "%2d ", lNId); append();
1470  for (int t = 0; t < maxDelay + 1; t++) {
1471  sprintf_s(lineBuffer, lineBufferLength, "[%d,%d]",
1472  runtimeData[netId].postDelayInfo[lNId * (maxDelay + 1) + t].delay_index_start,
1473  runtimeData[netId].postDelayInfo[lNId * (maxDelay + 1) + t].delay_length); append();
1474  }
1475  sprintf_s(lineBuffer, lineBufferLength, "\n"); append();
1476  }
1477 
1478 }
1479 
1480 
1481 #endif
1482 
1483 
1484 
1485 
1486 void SNN::updateLTP(int lNId, int lGrpId, int netId) {
1487  unsigned int pos_ij = runtimeData[netId].cumulativePre[lNId]; // the index of pre-synaptic neuron
1488  short connId;
1489  for(int j = 0; j < runtimeData[netId].Npre_plastic[lNId]; pos_ij++, j++) {
1490  int stdp_tDiff = (simTime - runtimeData[netId].synSpikeTime[pos_ij]);
1491  assert(!((stdp_tDiff < 0) && (runtimeData[netId].synSpikeTime[pos_ij] != MAX_SIMULATION_TIME)));
1492 
1493  auto weight_nm = [&](float& nm, int i_nm) {
1494  switch (i_nm) { // index
1495  case NM_DA: nm *= runtimeData[netId].grpDA[lGrpId];
1496  break;
1497  case NM_5HT: nm *= runtimeData[netId].grp5HT[lGrpId];
1498  break;
1499  case NM_ACh: nm *= runtimeData[netId].grpACh[lGrpId];
1500  break;
1501  case NM_NE: nm *= runtimeData[netId].grpNE[lGrpId];
1502  break;
1503  };
1504  };
1505 
1506  if (stdp_tDiff > 0) {
1507  // identify connection ID
1508  connId = runtimeData[netId].connIdsPreIdx[pos_ij];
1509  // check this is an excitatory or inhibitory synapse
1510  if (connectConfigMap[connId].stdpConfig.WithESTDP && runtimeData[netId].maxSynWt[pos_ij] >= 0) { // excitatory synapse
1511  // Handle E-STDP curve
1512  switch (connectConfigMap[connId].stdpConfig.WithESTDPcurve) {
1513  case EXP_CURVE: // exponential curve
1514 #ifdef LN_I_CALC_TYPES
1515  if (stdp_tDiff * connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_EXC < 25) {
1516  if (connectConfigMap[connId].stdpConfig.WithESTDPtype == PKA_PLC_MOD) {
1517 
1518  float nm_pka = connectConfigMap[connId].stdpConfig.W_PKA;
1519  weight_nm(nm_pka, connectConfigMap[connId].stdpConfig.NM_PKA);
1520 
1521  float nm_plc = connectConfigMap[connId].stdpConfig.W_PLC;
1522  weight_nm(nm_plc, connectConfigMap[connId].stdpConfig.NM_PLC);
1523 
1524  //printf("LTP nm_pka=%f nm_plc=%f\n", nm_pka, nm_plc);
1525 
1526  float a_p = connectConfigMap[connId].stdpConfig.ALPHA_PLUS_EXC;
1527  float tau_p_inv = connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_EXC;
1528 
1529  // f_pka = ne * 2 * (a_p * exp(-t * tau_p_inv))
1530  float pka = nm_pka * 2 * STDP(stdp_tDiff, a_p, tau_p_inv);
1531 
1532  //f_plc = ach * (-a_p * exp(-t * tau_p_inv))
1533  float plc = nm_plc * STDP(stdp_tDiff, -a_p, tau_p_inv);
1534 
1535  runtimeData[netId].wtChange[pos_ij] += pka + plc;
1536  }
1537  else {
1538  runtimeData[netId].wtChange[pos_ij] += STDP(stdp_tDiff, connectConfigMap[connId].stdpConfig.ALPHA_PLUS_EXC, connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_EXC);
1539  }
1540  }
1541  break;
1542 #else
1543  if (stdp_tDiff * connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_EXC < 25) {
1544  runtimeData[netId].wtChange[pos_ij] += STDP(stdp_tDiff, connectConfigMap[connId].stdpConfig.ALPHA_PLUS_EXC, connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_EXC);
1545  }
1546  break;
1547 #endif
1548  case TIMING_BASED_CURVE: // sc curve
1549  if (stdp_tDiff * connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_EXC < 25) {
1550  if (stdp_tDiff <= connectConfigMap[connId].stdpConfig.GAMMA)
1551  runtimeData[netId].wtChange[pos_ij] += connectConfigMap[connId].stdpConfig.OMEGA + connectConfigMap[connId].stdpConfig.KAPPA * STDP(stdp_tDiff, connectConfigMap[connId].stdpConfig.ALPHA_PLUS_EXC, connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_EXC);
1552  else // stdp_tDiff > GAMMA
1553  runtimeData[netId].wtChange[pos_ij] -= STDP(stdp_tDiff, connectConfigMap[connId].stdpConfig.ALPHA_PLUS_EXC, connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_EXC);
1554  }
1555  break;
1556  default:
1557  KERNEL_ERROR("Invalid E-STDP curve!");
1558  break;
1559  }
1560  } else if (connectConfigMap[connId].stdpConfig.WithISTDP && runtimeData[netId].maxSynWt[pos_ij] < 0) { // inhibitory synapse
1561  // Handle I-STDP curve // Handle I-STDP curve
1562  switch (connectConfigMap[connId].stdpConfig.WithISTDPcurve) {
1563  case EXP_CURVE: // exponential curve
1564 #ifdef LN_I_CALC_TYPES
1565  if (stdp_tDiff * connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_INB < 25) { // LTP of inhibitory synapse, which decreases synapse weight
1566  if (connectConfigMap[connId].stdpConfig.WithESTDPtype == PKA_PLC_MOD) {
1567 
1568  float nm_pka = connectConfigMap[connId].stdpConfig.W_PKA;
1569  weight_nm(nm_pka, connectConfigMap[connId].stdpConfig.NM_PKA);
1570 
1571  float nm_plc = connectConfigMap[connId].stdpConfig.W_PLC;
1572  weight_nm(nm_plc, connectConfigMap[connId].stdpConfig.NM_PLC);
1573 
1574  assert(0); // not implemented
1575  }
1576  else
1577  {
1578  runtimeData[netId].wtChange[pos_ij] -= STDP(stdp_tDiff, connectConfigMap[connId].stdpConfig.ALPHA_PLUS_INB, connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_INB);
1579  }
1580  }
1581  break;
1582 #else
1583  if (stdp_tDiff * connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_INB < 25) { // LTP of inhibitory synapse, which decreases synapse weight
1584  runtimeData[netId].wtChange[pos_ij] -= STDP(stdp_tDiff, connectConfigMap[connId].stdpConfig.ALPHA_PLUS_INB, connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_INB);
1585  }
1586  break;
1587 #endif
1588  case PULSE_CURVE: // pulse curve
1589  if (stdp_tDiff <= connectConfigMap[connId].stdpConfig.LAMBDA) { // LTP of inhibitory synapse, which decreases synapse weight
1590  runtimeData[netId].wtChange[pos_ij] -= connectConfigMap[connId].stdpConfig.BETA_LTP;
1591  //printf("I-STDP LTP\n");
1592  } else if (stdp_tDiff <= connectConfigMap[connId].stdpConfig.DELTA) { // LTD of inhibitory syanpse, which increase sysnapse weight
1593  runtimeData[netId].wtChange[pos_ij] -= connectConfigMap[connId].stdpConfig.BETA_LTD;
1594  //printf("I-STDP LTD\n");
1595  } else { /*do nothing*/}
1596  break;
1597  default:
1598  KERNEL_ERROR("Invalid I-STDP curve!");
1599  break;
1600  }
1601  }
1602  }
1603  }
1604 }
1605 
1606 void SNN::firingUpdateSTP(int lNId, int lGrpId, int netId) {
1607  // update the spike-dependent part of du/dt and dx/dt
1608  // we need to retrieve the STP values from the right buffer position (right before vs. right after the spike)
1609  int ind_plus = STP_BUF_POS(lNId, simTime, networkConfigs[netId].maxDelay); // index of right after the spike, such as in u^+
1610  int ind_minus = STP_BUF_POS(lNId, (simTime - 1), networkConfigs[netId].maxDelay); // index of right before the spike, such as in u^-
1611 
1612  // du/dt = -u/tau_F + U * (1-u^-) * \delta(t-t_{spk})
1613 
1614 #ifdef LN_I_CALC_TYPES
1615  auto& config = groupConfigs[netId][lGrpId];
1616  float stp_u = config.STP_U;
1617  if (config.WithNM4STP) {
1618  std::vector<float> nm;
1619  nm.push_back(groupConfigs[netId][lGrpId].activeDP ? runtimeData[netId].grpDA[lGrpId] : 0.f); // baseDP is 0 at t=0 ???
1620  nm.push_back(groupConfigs[netId][lGrpId].active5HT ? runtimeData[netId].grp5HT[lGrpId] : 0.f);
1621  nm.push_back(groupConfigs[netId][lGrpId].activeACh ? runtimeData[netId].grpACh[lGrpId] : 0.f);
1622  nm.push_back(groupConfigs[netId][lGrpId].activeNE ? runtimeData[netId].grpNE[lGrpId] : 0.f);
1623  auto& config = groupConfigs[netId][lGrpId]; // LN2021 \todo refact duplicate
1624  float w_stp_u = 0.0f;
1625  for (int i = 0; i < NM_NE + 1; i++) {
1626  w_stp_u += nm[i] * config.wstpu[i];
1627  }
1628  w_stp_u *= config.wstpu[NM_NE + 1];
1629  stp_u *= w_stp_u + config.wstpu[NM_NE + 2];
1630  //KERNEL_DEBUG("grp[%d] stp_u %f\n", lGrpId, stp_u);
1631  }
1632  runtimeData[netId].stpu[ind_plus] += stp_u * (1.0f - runtimeData[netId].stpu[ind_minus]);
1633 #else
1634  runtimeData[netId].stpu[ind_plus] += groupConfigs[netId][lGrpId].STP_U * (1.0f - runtimeData[netId].stpu[ind_minus]);
1635 #endif
1636 
1637  // dx/dt = (1-x)/tau_D - u^+ * x^- * \delta(t-t_{spk})
1638  runtimeData[netId].stpx[ind_plus] -= runtimeData[netId].stpu[ind_plus] * runtimeData[netId].stpx[ind_minus];
1639 }
1640 
1641 void SNN::resetFiredNeuron(int lNId, short int lGrpId, int netId) {
1642  if (groupConfigs[netId][lGrpId].WithSTDP
1643 #ifdef LN_AXON_PLAST
1644  || groupConfigs[netId][lGrpId].WithAxonPlast // E-Prop Eligibility Trace
1645 #endif
1646  )
1647  runtimeData[netId].lastSpikeTime[lNId] = simTime;
1648 
1649  if (networkConfigs[netId].sim_with_homeostasis) {
1650  // with homeostasis flag can be used here.
1651  runtimeData[netId].avgFiring[lNId] += 1000 / (groupConfigs[netId][lGrpId].avgTimeScale * 1000);
1652  }
1653 }
1654 
1655 bool SNN::getPoissonSpike(int lNId, int netId) {
1656  // Random number value is less than the poisson firing probability
1657  // if poisson firing probability is say 1.0 then the random poisson ptr
1658  // will always be less than 1.0 and hence it will continiously fire
1659  return runtimeData[netId].randNum[lNId - networkConfigs[netId].numNReg] * 1000.0f
1660  < runtimeData[netId].poissonFireRate[lNId - networkConfigs[netId].numNReg];
1661 }
1662 
1663 bool SNN::getSpikeGenBit(unsigned int nIdPos, int netId) {
1664  const int nIdBitPos = nIdPos % 32;
1665  const int nIdIndex = nIdPos / 32;
1666  return ((runtimeData[netId].spikeGenBits[nIdIndex] >> nIdBitPos) & 0x1);
1667 }
1668 
1669 /*
1670 * The sequence of handling an post synaptic spike in CPU mode:
1671 * P1. Load wt into change (temporary variable)
1672 * P2. Modulate change by STP (if enabled)
1673 * P3-1. Modulate change by d_mulSynSlow and d_mulSynFast
1674 * P3-2. Accumulate g(AMPA,NMDA,GABAa,GABAb) or current
1675 * P4. Update synSpikeTime
1676 * P5. Update DA,5HT,ACh,NE accordingly
1677 * P6. Update STDP wtChange
1678 * P7. Update v(voltage), u(recovery)
1679 * P8. Update homeostasis
1680 * P9. Decay and log DA,5HT,ACh,NE
1681 */
1682 void SNN::generatePostSynapticSpike(int preNId, int postNId, int synId, int tD, int netId) {
1683  // get the cumulative position for quick access
1684  unsigned int pos = runtimeData[netId].cumulativePre[postNId] + synId;
1685  assert(postNId < networkConfigs[netId].numNReg); // \FIXME is this assert supposed to be for pos?
1686 
1687  // get group id of pre- / post-neuron
1688  short int post_grpId = runtimeData[netId].grpIds[postNId];
1689  short int pre_grpId = runtimeData[netId].grpIds[preNId];
1690 
1691  unsigned int pre_type = groupConfigs[netId][pre_grpId].Type;
1692 
1693  // get connect info from the cumulative synapse index for mulSynFast/mulSynSlow (requires less memory than storing
1694  // mulSynFast/Slow per synapse or storing a pointer to grpConnectInfo_s)
1695  // mulSynFast will be applied to fast currents (either AMPA or GABAa)
1696  // mulSynSlow will be applied to slow currents (either NMDA or GABAb)
1697  short int mulIndex = runtimeData[netId].connIdsPreIdx[pos];
1698  assert(mulIndex >= 0 && mulIndex < numConnections);
1699 
1700  // P1
1701  // for each presynaptic spike, postsynaptic (synaptic) current is going to increase by some amplitude (change)
1702  // generally speaking, this amplitude is the weight; but it can be modulated by STP
1703  float change = runtimeData[netId].wt[pos];
1704 
1705  // P2
1706  if (groupConfigs[netId][pre_grpId].WithSTP) {
1707  // if pre-group has STP enabled, we need to modulate the weight
1708  // NOTE: Order is important! (Tsodyks & Markram, 1998; Mongillo, Barak, & Tsodyks, 2008)
1709  // use u^+ (value right after spike-update) but x^- (value right before spike-update)
1710 
1711  // dI/dt = -I/tau_S + A * u^+ * x^- * \delta(t-t_{spk})
1712  // I noticed that for connect(.., RangeDelay(1), ..) tD will be 0
1713  int ind_minus = STP_BUF_POS(preNId, (simTime-tD-1), networkConfigs[netId].maxDelay);
1714  int ind_plus = STP_BUF_POS(preNId, (simTime-tD), networkConfigs[netId].maxDelay);
1715 #ifdef LN_I_CALC_TYPES
1716  auto& config = groupConfigs[netId][pre_grpId];
1717  float stp_a = config.STP_A;
1718  float stp_u = config.STP_U;
1719  if (config.WithNM4STP) {
1720  std::vector<float> nm;
1721  nm.push_back(groupConfigs[netId][pre_grpId].activeDP ? runtimeData[netId].grpDA[pre_grpId] : 0.f); // baseDP is 0 at t=0 ???
1722  nm.push_back(groupConfigs[netId][pre_grpId].active5HT ? runtimeData[netId].grp5HT[pre_grpId] : 0.f);
1723  nm.push_back(groupConfigs[netId][pre_grpId].activeACh ? runtimeData[netId].grpACh[pre_grpId] : 0.f);
1724  nm.push_back(groupConfigs[netId][pre_grpId].activeNE ? runtimeData[netId].grpNE[pre_grpId] : 0.f);
1725  auto& config = groupConfigs[netId][pre_grpId];
1726  float w_stp_u = 0.0f;
1727  for (int i = 0; i < NM_NE + 1; i++) {
1728  w_stp_u += nm[i] * config.wstpu[i];
1729  }
1730  w_stp_u *= config.wstpu[NM_NE + 1];
1731  stp_u *= w_stp_u + config.wstpu[NM_NE + 2];
1732  stp_a = (stp_u > 0.0f) ? 1.0 / stp_u : 1.0f; // scaling factor weighted
1733  //KERNEL_DEBUG("grp[%d] stp_u = %f stp_a = %f\n", pre_grpId, stp_u, stp_a);
1734  }
1735  change *= stp_a * runtimeData[netId].stpu[ind_plus] * runtimeData[netId].stpx[ind_minus];
1736 #else
1737  change *= groupConfigs[netId][pre_grpId].STP_A * runtimeData[netId].stpu[ind_plus] * runtimeData[netId].stpx[ind_minus];
1738 #endif
1739  //printf("%d: %d[%d], numN=%d, td=%d, maxDelay_=%d, ind-=%d, ind+=%d, stpu=[%f,%f], stpx=[%f,%f], change=%f, wt=%f\n",
1740  // simTime, pre_grpId, preNId,
1741  // groupConfigs[netId][pre_grpId].numN, tD, networkConfigs[netId].maxDelay, ind_minus, ind_plus,
1742  // runtimeData[netId].stpu[ind_minus], runtimeData[netId].stpu[ind_plus],
1743  // runtimeData[netId].stpx[ind_minus], runtimeData[netId].stpx[ind_plus],
1744  // change, runtimeData[netId].wt[pos]);
1745  }
1746 
1747  // P3-1, P3-2
1748  // update currents
1749  // NOTE: it's faster to += 0.0 rather than checking for zero and not updating
1750 #ifdef LN_I_CALC_TYPES
1751 // \todo this needs to be resolved the argumentation for the post_grpId is that the local neuronIndex is also of the post neuron !!! -_> therefore it must be the post group
1752  auto &groupConfig = groupConfigs[netId][post_grpId];
1753 // auto& groupConfig = groupConfigs[netId][pre_grpId]; // Fix LN20210912 did not solve the issue with -inf check data types float double
1754  switch (groupConfig.icalcType) { // \todo LN issue double check post_
1755  case COBA:
1756  case alpha1_ADK13:
1757  if (pre_type & TARGET_AMPA) // if postNId expresses AMPAR
1758  runtimeData[netId].gAMPA[postNId] += change * mulSynFast[mulIndex]; // scale by some factor
1759 //if (mulSynFast[mulIndex] != 1.0f)
1760 // printf("mulSynFast[%d]=%f\n", mulIndex, mulSynFast[mulIndex]);
1761  if (pre_type & TARGET_NMDA) {
1762  if (groupConfig.with_NMDA_rise) {
1763  runtimeData[netId].gNMDA_r[postNId] += change * groupConfig.sNMDA * mulSynSlow[mulIndex];
1764  runtimeData[netId].gNMDA_d[postNId] += change * groupConfig.sNMDA * mulSynSlow[mulIndex];
1765  }
1766  else {
1767  runtimeData[netId].gNMDA[postNId] += change * mulSynSlow[mulIndex];
1768 //if(mulSynSlow[mulIndex] != 1.0f)
1769 // printf("mulSynSlow[%d]=%f\n", mulIndex, mulSynSlow[mulIndex]);
1770  }
1771  }
1772  if (pre_type & TARGET_GABAa)
1773  runtimeData[netId].gGABAa[postNId] -= change * mulSynFast[mulIndex]; // wt should be negative for GABAa and GABAb
1774  if (pre_type & TARGET_GABAb) {
1775  if (groupConfig.with_GABAb_rise) {
1776  runtimeData[netId].gGABAb_r[postNId] -= change * groupConfig.sGABAb * mulSynSlow[mulIndex];
1777  runtimeData[netId].gGABAb_d[postNId] -= change * groupConfig.sGABAb * mulSynSlow[mulIndex];
1778  }
1779  else {
1780  runtimeData[netId].gGABAb[postNId] -= change * mulSynSlow[mulIndex];
1781  }
1782  }
1783  break;
1784  case CUBA:
1785  runtimeData[netId].current[postNId] += change;
1786  break;
1787  case NM4W_LN21:
1788  runtimeData[netId].current[postNId] += change; // LN2021 \todo refact see above COBA
1789  break;
1790 
1791  //if (groupConfig.Type & EXCITATORY_NEURON) {
1792  // assert(NM_UNKNOWN == NM_NE + 1);
1793  // float nm = .0f; // nm weighted
1794  // nm += runtimeData[netId].grpDA[post_grpId] * groupConfig.nm4w[NM_DA];
1795  // nm += runtimeData[netId].grp5HT[post_grpId] * groupConfig.nm4w[NM_5HT];
1796  // nm += runtimeData[netId].grpACh[post_grpId] * groupConfig.nm4w[NM_ACh];
1797  // nm += runtimeData[netId].grpNE[post_grpId] * groupConfig.nm4w[NM_NE];
1798  // nm *= groupConfig.nm4w[NM_UNKNOWN]; // normalize/boost
1799  // runtimeData[netId].current[postNId] += change * nm;
1800  //};
1801  //break;
1802 
1803  default:
1804  ; // do nothing;
1805  }
1806 #else
1807  if (sim_with_conductances) {
1808  if (pre_type & TARGET_AMPA) // if postNId expresses AMPAR
1809  runtimeData[netId].gAMPA [postNId] += change * mulSynFast[mulIndex]; // scale by some factor
1810  if (pre_type & TARGET_NMDA) {
1811  if (sim_with_NMDA_rise) {
1812  runtimeData[netId].gNMDA_r[postNId] += change * sNMDA * mulSynSlow[mulIndex];
1813  runtimeData[netId].gNMDA_d[postNId] += change * sNMDA * mulSynSlow[mulIndex];
1814  } else {
1815  runtimeData[netId].gNMDA [postNId] += change * mulSynSlow[mulIndex];
1816  }
1817  }
1818  if (pre_type & TARGET_GABAa)
1819  runtimeData[netId].gGABAa[postNId] -= change * mulSynFast[mulIndex]; // wt should be negative for GABAa and GABAb
1820  if (pre_type & TARGET_GABAb) {
1821  if (sim_with_GABAb_rise) {
1822  runtimeData[netId].gGABAb_r[postNId] -= change * sGABAb * mulSynSlow[mulIndex];
1823  runtimeData[netId].gGABAb_d[postNId] -= change * sGABAb * mulSynSlow[mulIndex];
1824  } else {
1825  runtimeData[netId].gGABAb[postNId] -= change * mulSynSlow[mulIndex];
1826  }
1827  }
1828  } else {
1829  runtimeData[netId].current[postNId] += change;
1830  }
1831 #endif
1832 
1833  // P4
1834  runtimeData[netId].synSpikeTime[pos] = simTime;
1835 
1836  // P5
1837  // Got one spike from dopaminergic neuron, increase dopamine concentration in the target area
1838  if (pre_type & TARGET_DA) {
1839  //runtimeData[netId].grpDA[post_grpId] += 0.04; // LN20201002 this is bug, fix: 0.04f to avoid double conversion
1840  //runtimeData[netId].grpDA[post_grpId] += 0.04f; // LN20201002 this is bug, fix: 0.04f to avoid double conversion
1841  // \todo LN2021 increase must be at least be parameter, see base, tau
1842  runtimeData[netId].grpDA[post_grpId] += groupConfigs[netId][post_grpId].releaseDP;
1843  }
1844  //\todo LN2021: // see abvove: *P5.Update DA, 5HT, ACh, NE accordingly
1845  if (pre_type & TARGET_5HT) {
1846  //runtimeData[netId].grp5HT[post_grpId] += 0.04f; // \todo param
1847  runtimeData[netId].grp5HT[post_grpId] += groupConfigs[netId][post_grpId].release5HT;
1848  }
1849  if (pre_type & TARGET_ACH) {
1850  //runtimeData[netId].grpACh[post_grpId] += 0.04f; // \todo param
1851  runtimeData[netId].grpACh[post_grpId] += groupConfigs[netId][post_grpId].releaseACh;
1852  }
1853  if (pre_type & TARGET_NE) {
1854  //runtimeData[netId].grpNE[post_grpId] += 0.04f; // \todo param
1855  runtimeData[netId].grpNE[post_grpId] += groupConfigs[netId][post_grpId].releaseNE;
1856  }
1857 
1858 
1859  // P6
1860  // STDP calculation: the post-synaptic neuron fires before the arrival of a pre-synaptic spike
1861  if (!sim_in_testing && connectConfigMap[mulIndex].stdpConfig.WithSTDP) {
1862  int stdp_tDiff = (simTime - runtimeData[netId].lastSpikeTime[postNId]);
1863 
1864  if (stdp_tDiff >= 0) {
1865  if (connectConfigMap[mulIndex].stdpConfig.WithISTDP && ((pre_type & TARGET_GABAa) || (pre_type & TARGET_GABAb))) { // inhibitory syanpse
1866  // Handle I-STDP curve
1867  switch (connectConfigMap[mulIndex].stdpConfig.WithISTDPcurve) {
1868  case EXP_CURVE: // exponential curve
1869 //LN2021 \todo see E-STDP
1870  if (stdp_tDiff * connectConfigMap[mulIndex].stdpConfig.TAU_MINUS_INV_INB < 25) { // LTD of inhibitory syanpse, which increase synapse weight
1871  runtimeData[netId].wtChange[pos] -= STDP(stdp_tDiff, connectConfigMap[mulIndex].stdpConfig.ALPHA_MINUS_INB, connectConfigMap[mulIndex].stdpConfig.TAU_MINUS_INV_INB);
1872  }
1873  break;
1874  case PULSE_CURVE: // pulse curve
1875  if (stdp_tDiff <= connectConfigMap[mulIndex].stdpConfig.LAMBDA) { // LTP of inhibitory synapse, which decreases synapse weight
1876  runtimeData[netId].wtChange[pos] -= connectConfigMap[mulIndex].stdpConfig.BETA_LTP;
1877  } else if (stdp_tDiff <= connectConfigMap[mulIndex].stdpConfig.DELTA) { // LTD of inhibitory syanpse, which increase synapse weight
1878  runtimeData[netId].wtChange[pos] -= connectConfigMap[mulIndex].stdpConfig.BETA_LTD;
1879  } else { /*do nothing*/ }
1880  break;
1881  default:
1882  KERNEL_ERROR("Invalid I-STDP curve");
1883  break;
1884  }
1885  } else if (connectConfigMap[mulIndex].stdpConfig.WithESTDP && ((pre_type & TARGET_AMPA) || (pre_type & TARGET_NMDA))) { // excitatory synapse
1886  // Handle E-STDP curve
1887  switch (connectConfigMap[mulIndex].stdpConfig.WithESTDPcurve) {
1888 #ifdef LN_I_CALC_TYPES
1889  case EXP_CURVE: // exponential curve
1890  if (stdp_tDiff * connectConfigMap[mulIndex].stdpConfig.TAU_MINUS_INV_EXC < 25)
1891 
1892  if (connectConfigMap[mulIndex].stdpConfig.WithESTDPtype == PKA_PLC_MOD) {
1893 
1894  auto weight_nm = [&](float& nm, int i_nm) {
1895  switch (i_nm) { // index
1896  case NM_DA: nm *= runtimeData[netId].grpDA[post_grpId]; // LN2021 \issue
1897  break;
1898  case NM_5HT: nm *= runtimeData[netId].grp5HT[post_grpId];
1899  break;
1900  case NM_ACh: nm *= runtimeData[netId].grpACh[post_grpId];
1901  break;
1902  case NM_NE: nm *= runtimeData[netId].grpNE[post_grpId];
1903  break;
1904  };
1905  };
1906 
1907  float nm_pka = connectConfigMap[mulIndex].stdpConfig.W_PKA;
1908  weight_nm(nm_pka, connectConfigMap[mulIndex].stdpConfig.NM_PKA);
1909 
1910  float nm_plc = connectConfigMap[mulIndex].stdpConfig.W_PLC;
1911  weight_nm(nm_plc, connectConfigMap[mulIndex].stdpConfig.NM_PLC);
1912 
1913  // printf("LTD nm_pka=%f nm_plc=%f\n", nm_pka, nm_plc);
1914 
1915  float a_m = connectConfigMap[mulIndex].stdpConfig.ALPHA_MINUS_EXC;
1916  float tau_m_inv = connectConfigMap[mulIndex].stdpConfig.TAU_MINUS_INV_EXC;
1917 
1918  // f_pka_m = ne * (-a_m * exp(t_m * tau_p_inv))
1919  float pka_m = nm_pka * STDP(stdp_tDiff, -a_m, tau_m_inv); // no sign switch, see below: -= instead of +=
1920 
1921  //f_plc_m = ach * 2 * (a_m * exp(t_m * tau_p_inv))
1922  float plc_m = nm_plc * 2 * STDP(stdp_tDiff, a_m, tau_m_inv); // no sign spwtich, see below: -= instead of +=
1923 
1924  runtimeData[netId].wtChange[pos] += pka_m + plc_m;
1925  }
1926  else
1927  {
1928  runtimeData[netId].wtChange[pos] += STDP(stdp_tDiff, connectConfigMap[mulIndex].stdpConfig.ALPHA_MINUS_EXC, connectConfigMap[mulIndex].stdpConfig.TAU_MINUS_INV_EXC);
1929  }
1930  break;
1931 #else
1932  case EXP_CURVE: // exponential curve
1933 #endif
1934  case TIMING_BASED_CURVE: // sc curve
1935  if (stdp_tDiff * connectConfigMap[mulIndex].stdpConfig.TAU_MINUS_INV_EXC < 25)
1936  runtimeData[netId].wtChange[pos] += STDP(stdp_tDiff, connectConfigMap[mulIndex].stdpConfig.ALPHA_MINUS_EXC, connectConfigMap[mulIndex].stdpConfig.TAU_MINUS_INV_EXC);
1937  break;
1938  default:
1939  KERNEL_ERROR("Invalid E-STDP curve");
1940  break;
1941  }
1942  } else { /*do nothing*/ }
1943  }
1944  assert(!((stdp_tDiff < 0) && (runtimeData[netId].lastSpikeTime[postNId] != MAX_SIMULATION_TIME)));
1945  }
1946 }
1947 
1948 // single integration step for voltage equation of 4-param Izhikevich
1949 inline
1950 float dvdtIzhikevich4(float volt, float recov, float totalCurrent, float timeStep = 1.0f) {
1951  return (((0.04f * volt + 5.0f) * volt + 140.0f - recov + totalCurrent) * timeStep);
1952 }
1953 
1954 // single integration step for recovery equation of 4-param Izhikevich
1955 inline
1956 float dudtIzhikevich4(float volt, float recov, float izhA, float izhB, float timeStep = 1.0f) {
1957  return (izhA * (izhB * volt - recov) * timeStep);
1958 }
1959 
1960 // single integration step for voltage equation of 9-param Izhikevich
1961 inline
1962 float dvdtIzhikevich9(float volt, float recov, float invCapac, float izhK, float voltRest,
1963  float voltInst, float totalCurrent, float timeStep = 1.0f)
1964 {
1965  return ((izhK * (volt - voltRest) * (volt - voltInst) - recov + totalCurrent) * invCapac * timeStep);
1966 }
1967 
1968 // single integration step for recovery equation of 9-param Izhikevich
1969 inline
1970 float dudtIzhikevich9(float volt, float recov, float voltRest, float izhA, float izhB, float timeStep = 1.0f) {
1971  return (izhA * (izhB * (volt - voltRest) - recov) * timeStep);
1972 }
1973 
1974 // single integration step for voltage equation of LIF neurons
1975 inline
1976 float dvdtLIF(float volt, float lif_vReset, float lif_gain, float lif_bias, int lif_tau_m, float totalCurrent, float timeStep = 1.0f) {
1977  return ((lif_vReset -volt + ((totalCurrent * lif_gain) + lif_bias))/ (float) lif_tau_m) * timeStep;
1978 }
1979 
1980 float SNN::getCompCurrent(int netid, int lGrpId, int lneurId, float const0, float const1) {
1981  float compCurrent = 0.0f;
1982  for (int k = 0; k < groupConfigs[netid][lGrpId].numCompNeighbors; k++) {
1983  // compartment connections are always one-to-one, which means that the i-th neuron in grpId connects
1984  // to the i-th neuron in grpIdOther
1985  int lGrpIdOther = groupConfigs[netid][lGrpId].compNeighbors[k];
1986  int lneurIdOther = lneurId - groupConfigs[netid][lGrpId].lStartN + groupConfigs[netid][lGrpIdOther].lStartN;
1987  compCurrent += groupConfigs[netid][lGrpId].compCoupling[k] * ((runtimeData[netid].voltage[lneurIdOther] + const1)
1988  - (runtimeData[netid].voltage[lneurId] + const0));
1989  }
1990 
1991  return compCurrent;
1992 }
1993 
1994 #ifdef __NO_PTHREADS__
1995  void SNN::globalStateUpdate_CPU(int netId) {
1996 #else // POSIX
1997  void* SNN::globalStateUpdate_CPU(int netId) {
1998 #endif
1999  assert(runtimeData[netId].memType == CPU_MEM);
2000 
2001  float timeStep = networkConfigs[netId].timeStep;
2002 
2003  // loop that allows smaller integration time step for v's and u's
2004  for (int j = 1; j <= networkConfigs[netId].simNumStepsPerMs; j++) {
2005  bool lastIter = (j == networkConfigs[netId].simNumStepsPerMs);
2006  for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
2007  if (groupConfigs[netId][lGrpId].Type & POISSON_NEURON) {
2008  if (groupConfigs[netId][lGrpId].WithHomeostasis & (lastIter)) {
2009  for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++)
2010  runtimeData[netId].avgFiring[lNId] *= groupConfigs[netId][lGrpId].avgTimeScale_decay;
2011  }
2012  continue;
2013  }
2014 
2015  for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++) {
2016  assert(lNId < networkConfigs[netId].numNReg);
2017 
2018  // P7
2019  // update conductances
2020  float v = runtimeData[netId].voltage[lNId];
2021  float v_next = runtimeData[netId].nextVoltage[lNId];
2022  float u = runtimeData[netId].recovery[lNId];
2023  float I_sum, NMDAtmp;
2024  float gNMDA, gGABAb;
2025  float gAMPA, gGABAa;
2026 
2027  // pre-load izhikevich variables to avoid unnecessary memory accesses & unclutter the code.
2028  float k = runtimeData[netId].Izh_k[lNId];
2029  float vr = runtimeData[netId].Izh_vr[lNId];
2030  float vt = runtimeData[netId].Izh_vt[lNId];
2031  float inverse_C = 1.0f / runtimeData[netId].Izh_C[lNId];
2032  float vpeak = runtimeData[netId].Izh_vpeak[lNId];
2033  float a = runtimeData[netId].Izh_a[lNId];
2034  float b = runtimeData[netId].Izh_b[lNId];
2035 
2036  // pre-load LIF parameters
2037  int lif_tau_m = runtimeData[netId].lif_tau_m[lNId];
2038  int lif_tau_ref = runtimeData[netId].lif_tau_ref[lNId];
2039  int lif_tau_ref_c = runtimeData[netId].lif_tau_ref_c[lNId];
2040  float lif_vTh = runtimeData[netId].lif_vTh[lNId];
2041  float lif_vReset = runtimeData[netId].lif_vReset[lNId];
2042  float lif_gain = runtimeData[netId].lif_gain[lNId];
2043  float lif_bias = runtimeData[netId].lif_bias[lNId];
2044 
2045  float totalCurrent = runtimeData[netId].extCurrent[lNId];
2046 
2047 #ifdef LN_I_CALC_TYPES
2048  auto& config = groupConfigs[netId][lGrpId];
2049  switch(config.icalcType) {
2050  case COBA:
2051  case alpha1_ADK13:
2052  NMDAtmp = (v + 80.0f) * (v + 80.0f) / 60.0f / 60.0f;
2053  gNMDA = (config.with_NMDA_rise) ? (runtimeData[netId].gNMDA_d[lNId] - runtimeData[netId].gNMDA_r[lNId]) : runtimeData[netId].gNMDA[lNId];
2054  gGABAb = (config.with_GABAb_rise) ? (runtimeData[netId].gGABAb_d[lNId] - runtimeData[netId].gGABAb_r[lNId]) : runtimeData[netId].gGABAb[lNId];
2055  gAMPA = runtimeData[netId].gAMPA[lNId];
2056  gGABAa = runtimeData[netId].gGABAa[lNId];
2057  I_sum = -(gAMPA * (v - 0.0f)
2058  + gNMDA * NMDAtmp / (1.0f + NMDAtmp) * (v - 0.0f)
2059  + gGABAa * (v + 70.0f)
2060  + gGABAb * (v + 90.0f));
2061  if (config.icalcType == alpha1_ADK13) {
2062  float ne = runtimeData[netId].grpNE[lGrpId] * config.nm4w[NM_DA] / config.nm4w[NM_UNKNOWN]; // normalize
2063  float da = runtimeData[netId].grpDA[lGrpId] * config.nm4w[NM_NE] / config.nm4w[NM_UNKNOWN]; // normalize
2064  float lambda = config.nm4w[NM_UNKNOWN + 1]; // nm base = lambda
2065  assert(lambda > 0.0f);
2066  float mu = 1.0f - 0.5f * (exp((ne - 1.0f) / lambda) + exp((da - 1.0f) / lambda));
2067  //if(I_sum > 0.0f)
2068  // printf("alpha1 mu=%f Isum=%f totalCurrent=%f (lGrpId=%d)\n", mu, I_sum, totalCurrent, lGrpId);
2069  I_sum *= mu;
2070  }
2071  totalCurrent += I_sum;
2072  break;
2073  case CUBA:
2074  totalCurrent += runtimeData[netId].current[lNId];
2075  break;
2076  case NM4W_LN21:
2077  totalCurrent += runtimeData[netId].current[lNId];
2078  {
2079  float nm = .0f;
2080  nm += runtimeData[netId].grpDA[lGrpId] * config.nm4w[NM_DA];
2081  nm += runtimeData[netId].grp5HT[lGrpId] * config.nm4w[NM_5HT];
2082  nm += runtimeData[netId].grpACh[lGrpId] * config.nm4w[NM_ACh];
2083  nm += runtimeData[netId].grpNE[lGrpId] * config.nm4w[NM_NE];
2084  nm *= config.nm4w[NM_UNKNOWN]; // normalize/boost
2085  nm += config.nm4w[NM_UNKNOWN + 1]; // nm base
2086  totalCurrent *= nm;
2087  }
2088  break;
2089  default:
2090  ; // do nothing
2091  }
2092 #else
2093  if (networkConfigs[netId].sim_with_conductances) {
2094  NMDAtmp = (v + 80.0f) * (v + 80.0f) / 60.0f / 60.0f;
2095  gNMDA = (networkConfigs[netId].sim_with_NMDA_rise) ? (runtimeData[netId].gNMDA_d[lNId] - runtimeData[netId].gNMDA_r[lNId]) : runtimeData[netId].gNMDA[lNId];
2096  gGABAb = (networkConfigs[netId].sim_with_GABAb_rise) ? (runtimeData[netId].gGABAb_d[lNId] - runtimeData[netId].gGABAb_r[lNId]) : runtimeData[netId].gGABAb[lNId];
2097 
2098  I_sum = -(runtimeData[netId].gAMPA[lNId] * (v - 0.0f)
2099  + gNMDA * NMDAtmp / (1.0f + NMDAtmp) * (v - 0.0f)
2100  + runtimeData[netId].gGABAa[lNId] * (v + 70.0f)
2101  + gGABAb * (v + 90.0f));
2102 
2103  totalCurrent += I_sum;
2104  }
2105  else {
2106  totalCurrent += runtimeData[netId].current[lNId];
2107  }
2108 #endif
2109  if (groupConfigs[netId][lGrpId].withCompartments) {
2110  totalCurrent += getCompCurrent(netId, lGrpId, lNId);
2111  }
2112 
2113  switch (networkConfigs[netId].simIntegrationMethod) {
2114  case FORWARD_EULER:
2115  if (!groupConfigs[netId][lGrpId].withParamModel_9 && !groupConfigs[netId][lGrpId].isLIF)
2116  {
2117  // update vpos and upos for the current neuron
2118  v_next = v + dvdtIzhikevich4(v, u, totalCurrent, timeStep);
2119  if (v_next > 30.0f) {
2120  v_next = 30.0f; // break the loop but evaluate u[i]
2121  runtimeData[netId].curSpike[lNId] = true;
2122  v_next = runtimeData[netId].Izh_c[lNId];
2123  u += runtimeData[netId].Izh_d[lNId];
2124  }
2125  }
2126  else if (!groupConfigs[netId][lGrpId].isLIF)
2127  {
2128  // update vpos and upos for the current neuron
2129  v_next = v + dvdtIzhikevich9(v, u, inverse_C, k, vr, vt, totalCurrent, timeStep);
2130  if (v_next > vpeak) {
2131  v_next = vpeak; // break the loop but evaluate u[i]
2132  runtimeData[netId].curSpike[lNId] = true;
2133  v_next = runtimeData[netId].Izh_c[lNId];
2134  u += runtimeData[netId].Izh_d[lNId];
2135  }
2136  }
2137 
2138  else{
2139  if (lif_tau_ref_c > 0){
2140  if(lastIter){
2141  runtimeData[netId].lif_tau_ref_c[lNId] -= 1;
2142  v_next = lif_vReset;
2143  }
2144  }
2145  else{
2146  if (v_next > lif_vTh) {
2147  runtimeData[netId].curSpike[lNId] = true;
2148  v_next = lif_vReset;
2149 
2150  if(lastIter){
2151  runtimeData[netId].lif_tau_ref_c[lNId] = lif_tau_ref;
2152  }
2153  else{
2154  runtimeData[netId].lif_tau_ref_c[lNId] = lif_tau_ref + 1;
2155  }
2156  }
2157  else{
2158  v_next = v + dvdtLIF(v, lif_vReset, lif_gain, lif_bias, lif_tau_m, totalCurrent, timeStep);
2159  }
2160  }
2161  }
2162 
2163  if (groupConfigs[netId][lGrpId].isLIF){
2164  if (v_next < lif_vReset) v_next = lif_vReset;
2165  }
2166  else{
2167  if (v_next < -90.0f) v_next = -90.0f;
2168 
2169  if (!groupConfigs[netId][lGrpId].withParamModel_9)
2170  {
2171  u += dudtIzhikevich4(v_next, u, a, b, timeStep);
2172  }
2173  else
2174  {
2175  u += dudtIzhikevich9(v_next, u, vr, a, b, timeStep);
2176  }
2177  }
2178  break;
2179 
2180  case RUNGE_KUTTA4:
2181 
2182  if (!groupConfigs[netId][lGrpId].withParamModel_9 && !groupConfigs[netId][lGrpId].isLIF) {
2183  // 4-param Izhikevich
2184  float k1 = dvdtIzhikevich4(v, u, totalCurrent, timeStep);
2185  float l1 = dudtIzhikevich4(v, u, a, b, timeStep);
2186 
2187  float k2 = dvdtIzhikevich4(v + k1 / 2.0f, u + l1 / 2.0f, totalCurrent,
2188  timeStep);
2189  float l2 = dudtIzhikevich4(v + k1 / 2.0f, u + l1 / 2.0f, a, b, timeStep);
2190 
2191  float k3 = dvdtIzhikevich4(v + k2 / 2.0f, u + l2 / 2.0f, totalCurrent,
2192  timeStep);
2193  float l3 = dudtIzhikevich4(v + k2 / 2.0f, u + l2 / 2.0f, a, b, timeStep);
2194 
2195  float k4 = dvdtIzhikevich4(v + k3, u + l3, totalCurrent, timeStep);
2196  float l4 = dudtIzhikevich4(v + k3, u + l3, a, b, timeStep);
2197  v_next = v + (1.0f / 6.0f) * (k1 + 2.0f * k2 + 2.0f * k3 + k4);
2198  if (v_next > 30.0f) {
2199  v_next = 30.0f;
2200  runtimeData[netId].curSpike[lNId] = true;
2201  v_next = runtimeData[netId].Izh_c[lNId];
2202  u += runtimeData[netId].Izh_d[lNId];
2203  }
2204  if (v_next < -90.0f) v_next = -90.0f;
2205 
2206  u += (1.0f / 6.0f) * (l1 + 2.0f * l2 + 2.0f * l3 + l4);
2207  }
2208  else if(!groupConfigs[netId][lGrpId].isLIF){
2209  // 9-param Izhikevich
2210  float k1 = dvdtIzhikevich9(v, u, inverse_C, k, vr, vt, totalCurrent,
2211  timeStep);
2212  float l1 = dudtIzhikevich9(v, u, vr, a, b, timeStep);
2213 
2214  float k2 = dvdtIzhikevich9(v + k1 / 2.0f, u + l1 / 2.0f, inverse_C, k, vr, vt,
2215  totalCurrent, timeStep);
2216  float l2 = dudtIzhikevich9(v + k1 / 2.0f, u + l1 / 2.0f, vr, a, b, timeStep);
2217 
2218  float k3 = dvdtIzhikevich9(v + k2 / 2.0f, u + l2 / 2.0f, inverse_C, k, vr, vt,
2219  totalCurrent, timeStep);
2220  float l3 = dudtIzhikevich9(v + k2 / 2.0f, u + l2 / 2.0f, vr, a, b, timeStep);
2221 
2222  float k4 = dvdtIzhikevich9(v + k3, u + l3, inverse_C, k, vr, vt,
2223  totalCurrent, timeStep);
2224  float l4 = dudtIzhikevich9(v + k3, u + l3, vr, a, b, timeStep);
2225 
2226  v_next = v + (1.0f / 6.0f) * (k1 + 2.0f * k2 + 2.0f * k3 + k4);
2227 
2228  if (v_next > vpeak) {
2229  v_next = vpeak; // break the loop but evaluate u[i]
2230  runtimeData[netId].curSpike[lNId] = true;
2231  v_next = runtimeData[netId].Izh_c[lNId];
2232  u += runtimeData[netId].Izh_d[lNId];
2233  }
2234 
2235  if (v_next < -90.0f) v_next = -90.0f;
2236 
2237  u += (1.0f / 6.0f) * (l1 + 2.0f * l2 + 2.0f * l3 + l4);
2238  }
2239  else{
2240  //LIF integration is always FORWARD_EULER
2241  if (lif_tau_ref_c > 0){
2242  if(lastIter){
2243  runtimeData[netId].lif_tau_ref_c[lNId] -= 1;
2244  v_next = lif_vReset;
2245  }
2246  }
2247  else{
2248  if (v_next > lif_vTh) {
2249  runtimeData[netId].curSpike[lNId] = true;
2250  v_next = lif_vReset;
2251 
2252  if(lastIter){
2253  runtimeData[netId].lif_tau_ref_c[lNId] = lif_tau_ref;
2254  }
2255  else{
2256  runtimeData[netId].lif_tau_ref_c[lNId] = lif_tau_ref + 1;
2257  }
2258  }
2259  else{
2260  v_next = v + dvdtLIF(v, lif_vReset, lif_gain, lif_bias, lif_tau_m, totalCurrent, timeStep);
2261  }
2262  }
2263  if (v_next < lif_vReset) v_next = lif_vReset;
2264  }
2265  break;
2266  case UNKNOWN_INTEGRATION:
2267  default:
2269  }
2270 
2271  runtimeData[netId].nextVoltage[lNId] = v_next;
2272  runtimeData[netId].recovery[lNId] = u;
2273 
2274  // update current & average firing rate for homeostasis once per globalStateUpdate_CPU call
2275  if (lastIter)
2276  {
2277 #ifdef LN_I_CALC_TYPES
2278  switch (groupConfigs[netId][lGrpId].icalcType) {
2279  case COBA:
2280  case alpha1_ADK13:
2281  runtimeData[netId].current[lNId] = I_sum;
2282  break;
2283  case CUBA:
2284  case NM4W_LN21:
2285  // current must be reset here for CUBA and not STPUpdateAndDecayConductances
2286  runtimeData[netId].current[lNId] = 0.0f;
2287  break;
2288  default:
2289  ; // do nothing
2290  }
2291 #else
2292  if (networkConfigs[netId].sim_with_conductances) {
2293  runtimeData[netId].current[lNId] = I_sum;
2294  }
2295  else {
2296  // current must be reset here for CUBA and not STPUpdateAndDecayConductances
2297  runtimeData[netId].current[lNId] = 0.0f;
2298  }
2299 #endif
2300  // P8
2301  // update average firing rate for homeostasis
2302  if (groupConfigs[netId][lGrpId].WithHomeostasis)
2303  runtimeData[netId].avgFiring[lNId] *= groupConfigs[netId][lGrpId].avgTimeScale_decay;
2304 
2305  // log i value if any active neuron monitor is presented
2306  if (networkConfigs[netId].sim_with_nm && lNId - groupConfigs[netId][lGrpId].lStartN < MAX_NEURON_MON_GRP_SZIE) {
2307  int idxBase = networkConfigs[netId].numGroups * MAX_NEURON_MON_GRP_SZIE * simTimeMs + lGrpId * MAX_NEURON_MON_GRP_SZIE;
2308  runtimeData[netId].nIBuffer[idxBase + lNId - groupConfigs[netId][lGrpId].lStartN] = totalCurrent;
2309  }
2310  }
2311  } // end StartN...EndN
2312 
2313  // decay dopamine concentration once per globalStateUpdate_CPU call
2314  if (lastIter)
2315  {
2316  // P9
2317  // decay dopamine concentration
2318 #ifndef LN_FIX_ALL_DECAY_CPU
2319 #ifndef LN_FIX_DA_DECAY_CPU
2320  if ((groupConfigs[netId][lGrpId].WithESTDPtype == DA_MOD || groupConfigs[netId][lGrpId].WithISTDP == DA_MOD) && runtimeData[netId].grpDA[lGrpId] > groupConfigs[netId][lGrpId].baseDP) {
2321  runtimeData[netId].grpDA[lGrpId] *= groupConfigs[netId][lGrpId].decayDP;
2322  }
2323 #else
2324  if (groupConfigs[netId][lGrpId].WithESTDPtype == DA_MOD || groupConfigs[netId][lGrpId].WithISTDP == DA_MOD) {
2325  float baseDP_ = groupConfigs[netId][lGrpId].baseDP;
2326  if(runtimeData[netId].grpDA[lGrpId] > baseDP_) {
2327  runtimeData[netId].grpDA[lGrpId] *= groupConfigs[netId][lGrpId].decayDP;
2328  if(runtimeData[netId].grpDA[lGrpId] < baseDP_)
2329  runtimeData[netId].grpDA[lGrpId] = baseDP_;
2330  }
2331  }
2332 
2333  runtimeData[netId].grpDABuffer[lGrpId * 1000 + simTimeMs] = runtimeData[netId].grpDA[lGrpId];
2334 
2335  // LN2021
2336 
2337  if (groupConfigs[netId][lGrpId].WithESTDPtype == MOD_NE_A1 || groupConfigs[netId][lGrpId].WithISTDP == MOD_NE_A1) {
2338  float baseNE_ = groupConfigs[netId][lGrpId].baseNE;
2339  if (runtimeData[netId].grpNE[lGrpId] > baseNE_) {
2340  runtimeData[netId].grpNE[lGrpId] *= groupConfigs[netId][lGrpId].decayNE;
2341  if (runtimeData[netId].grpNE[lGrpId] < baseNE_)
2342  runtimeData[netId].grpNE[lGrpId] = baseNE_;
2343  }
2344  }
2345 #endif
2346 #else
2347 
2348  // LN2021 \todo refactor if design holds
2349 
2350  if (groupConfigs[netId][lGrpId].activeDP) {
2351  float baseDP_ = groupConfigs[netId][lGrpId].baseDP;
2352  if (runtimeData[netId].grpDA[lGrpId] > baseDP_) {
2353  runtimeData[netId].grpDA[lGrpId] *= groupConfigs[netId][lGrpId].decayDP;
2354  if (runtimeData[netId].grpDA[lGrpId] < baseDP_)
2355  runtimeData[netId].grpDA[lGrpId] = baseDP_;
2356  }
2357  runtimeData[netId].grpDABuffer[lGrpId * 1000 + simTimeMs] = runtimeData[netId].grpDA[lGrpId];
2358  }
2359 
2360  if (groupConfigs[netId][lGrpId].active5HT) {
2361  float base5HT_ = groupConfigs[netId][lGrpId].base5HT;
2362  if (runtimeData[netId].grp5HT[lGrpId] > base5HT_) {
2363  runtimeData[netId].grp5HT[lGrpId] *= groupConfigs[netId][lGrpId].decay5HT;
2364  if (runtimeData[netId].grp5HT[lGrpId] < base5HT_)
2365  runtimeData[netId].grp5HT[lGrpId] = base5HT_;
2366  }
2367  runtimeData[netId].grp5HTBuffer[lGrpId * 1000 + simTimeMs] = runtimeData[netId].grp5HT[lGrpId];
2368  }
2369 
2370  if (groupConfigs[netId][lGrpId].activeACh) {
2371  float baseACh_ = groupConfigs[netId][lGrpId].baseACh;
2372  if (runtimeData[netId].grpACh[lGrpId] > baseACh_) {
2373  runtimeData[netId].grpACh[lGrpId] *= groupConfigs[netId][lGrpId].decayACh;
2374  if (runtimeData[netId].grpACh[lGrpId] < baseACh_)
2375  runtimeData[netId].grpACh[lGrpId] = baseACh_;
2376  }
2377  runtimeData[netId].grpAChBuffer[lGrpId * 1000 + simTimeMs] = runtimeData[netId].grpACh[lGrpId];
2378  }
2379 
2380  if (groupConfigs[netId][lGrpId].activeNE) {
2381  float baseNE_ = groupConfigs[netId][lGrpId].baseNE;
2382  if (runtimeData[netId].grpNE[lGrpId] > baseNE_) {
2383  runtimeData[netId].grpNE[lGrpId] *= groupConfigs[netId][lGrpId].decayNE;
2384  if (runtimeData[netId].grpNE[lGrpId] < baseNE_)
2385  runtimeData[netId].grpNE[lGrpId] = baseNE_;
2386  }
2387  runtimeData[netId].grpNEBuffer[lGrpId * 1000 + simTimeMs] = runtimeData[netId].grpNE[lGrpId];
2388  }
2389 
2390 #endif
2391 #ifdef LN_FIX_ALL_DECAY_CPU
2392 #endif
2393  }
2394  } // end numGroups
2395 
2396  // Only after we are done computing nextVoltage for all neurons do we copy the new values to the voltage array.
2397  // This is crucial for GPU (asynchronous kernel launch) and in the future for a multi-threaded CARLsim version.
2398 
2399  memcpy(runtimeData[netId].voltage, runtimeData[netId].nextVoltage, sizeof(float)*networkConfigs[netId].numNReg);
2400 
2401  } // end simNumStepsPerMs loop
2402 }
2403 
2404 #ifndef __NO_PTHREADS__ // POSIX
2405  // Static multithreading subroutine method - helper for the above method
2406  void* SNN::helperGlobalStateUpdate_CPU(void* arguments) {
2407  ThreadStruct* args = (ThreadStruct*) arguments;
2408  //printf("\nThread ID: %lu and CPU: %d\n",pthread_self(), sched_getcpu());
2409  ((SNN *)args->snn_pointer) -> globalStateUpdate_CPU(args->netId);
2410  pthread_exit(0);
2411  }
2412 #endif
2413 
2414 // This function updates the synaptic weights from its derivatives..
2415 #ifdef __NO_PTHREADS__
2416  void SNN::updateWeights_CPU(int netId) {
2417 #else // POSIX
2418  void* SNN::updateWeights_CPU(int netId) {
2419 #endif
2420  // at this point we have already checked for sim_in_testing and sim_with_fixedwts
2421  assert(sim_in_testing==false);
2422  assert(sim_with_fixedwts==false);
2423  assert(runtimeData[netId].memType == CPU_MEM);
2424 
2425  // update synaptic weights here for all the neurons..
2426  for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
2427  // no changable weights so continue without changing..
2428  if (groupConfigs[netId][lGrpId].FixedInputWts || !(groupConfigs[netId][lGrpId].WithSTDP))
2429  continue;
2430 
2431  short int connId;
2432  for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++) {
2433  assert(lNId < networkConfigs[netId].numNReg);
2434  unsigned int offset = runtimeData[netId].cumulativePre[lNId];
2435  float diff_firing = 0.0;
2436  float homeostasisScale = 1.0;
2437 
2438  if (groupConfigs[netId][lGrpId].WithHomeostasis) {
2439  assert(runtimeData[netId].baseFiring[lNId] > 0);
2440  diff_firing = 1 - runtimeData[netId].avgFiring[lNId] / runtimeData[netId].baseFiring[lNId];
2441  homeostasisScale = groupConfigs[netId][lGrpId].homeostasisScale;
2442  }
2443 
2444  if (lNId == groupConfigs[netId][lGrpId].lStartN)
2445  KERNEL_DEBUG("Weights, Change at %d (diff_firing: %f)", simTimeSec, diff_firing);
2446 
2447  for (int j = 0; j < runtimeData[netId].Npre_plastic[lNId]; j++) {
2448  connId = runtimeData[netId].connIdsPreIdx[offset + j];
2449 
2450  // if (i==groupConfigs[0][g].StartN)
2451  // KERNEL_DEBUG("%1.2f %1.2f \t", wt[offset+j]*10, wtChange[offset+j]*10);
2452  float effectiveWtChange = stdpScaleFactor_ * runtimeData[netId].wtChange[offset + j];
2453  // if (wtChange[offset+j])
2454  // printf("connId=%d, wtChange[%d]=%f\n",connIdsPreIdx[offset+j],offset+j,wtChange[offset+j]);
2455 
2456  // homeostatic weight update
2457  // FIXME: check WithESTDPtype and WithISTDPtype first and then do weight change update
2458  switch (connectConfigMap[connId].stdpConfig.WithESTDPtype) {
2459  case STANDARD:
2460 #ifdef LN_I_CALC_TYPES
2461  case PKA_PLC_MOD:
2462 #endif
2463  if (groupConfigs[netId][lGrpId].WithHomeostasis) {
2464  runtimeData[netId].wt[offset + j] += (diff_firing*runtimeData[netId].wt[offset + j] * homeostasisScale + runtimeData[netId].wtChange[offset + j])*runtimeData[netId].baseFiring[lNId] / groupConfigs[netId][lGrpId].avgTimeScale / (1 + fabs(diff_firing) * 50);
2465  } else {
2466  // just STDP weight update
2467  runtimeData[netId].wt[offset + j] += effectiveWtChange;
2468  }
2469  break;
2470  case DA_MOD:
2471  case SE_MOD:
2472  case AC_MOD:
2473  case NE_MOD:
2474  if (groupConfigs[netId][lGrpId].WithHomeostasis) {
2475  //effectiveWtChange = runtimeData[netId].grpDA[lGrpId] * effectiveWtChange;
2476  effectiveWtChange = (runtimeData[netId].grpDA + connectConfigMap[connId].stdpConfig.WithESTDPtype - DA_MOD)[lGrpId];
2477  runtimeData[netId].wt[offset + j] += (diff_firing*runtimeData[netId].wt[offset + j] * homeostasisScale + effectiveWtChange)*runtimeData[netId].baseFiring[lNId] / groupConfigs[netId][lGrpId].avgTimeScale / (1 + fabs(diff_firing) * 50);
2478  } else {
2479  //runtimeData[netId].wt[offset + j] += runtimeData[netId].grpDA[lGrpId] * effectiveWtChange;
2480  runtimeData[netId].wt[offset + j] += (runtimeData[netId].grpDA + connectConfigMap[connId].stdpConfig.WithESTDPtype - DA_MOD)[lGrpId] * effectiveWtChange;
2481  }
2482  break;
2483  case UNKNOWN_STDP:
2484  default:
2485  // we shouldn't even be in here if !WithSTDP
2486  break;
2487  }
2488 
2489  switch (connectConfigMap[connId].stdpConfig.WithISTDPtype) {
2490  case STANDARD:
2491  if (groupConfigs[netId][lGrpId].WithHomeostasis) {
2492  runtimeData[netId].wt[offset + j] += (diff_firing*runtimeData[netId].wt[offset + j] * homeostasisScale + runtimeData[netId].wtChange[offset + j])*runtimeData[netId].baseFiring[lNId] / groupConfigs[netId][lGrpId].avgTimeScale / (1 + fabs(diff_firing) * 50);
2493  } else {
2494  // just STDP weight update
2495  runtimeData[netId].wt[offset + j] += effectiveWtChange;
2496  }
2497  break;
2498  case DA_MOD:
2499  case SE_MOD:
2500  case AC_MOD:
2501  case NE_MOD:
2502  if (groupConfigs[netId][lGrpId].WithHomeostasis) {
2503  //effectiveWtChange = runtimeData[netId].grpDA[lGrpId] * effectiveWtChange;
2504  effectiveWtChange = (runtimeData[netId].grpDA + connectConfigMap[connId].stdpConfig.WithISTDPtype - DA_MOD)[lGrpId] * effectiveWtChange;
2505  runtimeData[netId].wt[offset + j] += (diff_firing*runtimeData[netId].wt[offset + j] * homeostasisScale + effectiveWtChange)*runtimeData[netId].baseFiring[lNId] / groupConfigs[netId][lGrpId].avgTimeScale / (1 + fabs(diff_firing) * 50);
2506  } else {
2507  //runtimeData[netId].wt[offset + j] += runtimeData[netId].grpDA[lGrpId] * effectiveWtChange;
2508  runtimeData[netId].wt[offset + j] += (runtimeData[netId].grpDA + connectConfigMap[connId].stdpConfig.WithISTDPtype - DA_MOD)[lGrpId] * effectiveWtChange;
2509  }
2510  break;
2511  case UNKNOWN_STDP:
2512  default:
2513  // we shouldn't even be in here if !WithSTDP
2514  break;
2515  }
2516 
2517  // It is users' choice to decay weight change or not
2518  // see setWeightAndWeightChangeUpdate()
2519  runtimeData[netId].wtChange[offset + j] *= wtChangeDecay_;
2520 
2521  // if this is an excitatory or inhibitory synapse
2522  if (runtimeData[netId].maxSynWt[offset + j] >= 0) {
2523  if (runtimeData[netId].wt[offset + j] >= runtimeData[netId].maxSynWt[offset + j])
2524  runtimeData[netId].wt[offset + j] = runtimeData[netId].maxSynWt[offset + j];
2525  if (runtimeData[netId].wt[offset + j] < 0)
2526  runtimeData[netId].wt[offset + j] = 0.0;
2527  }
2528  else {
2529  if (runtimeData[netId].wt[offset + j] <= runtimeData[netId].maxSynWt[offset + j])
2530  runtimeData[netId].wt[offset + j] = runtimeData[netId].maxSynWt[offset + j];
2531  if (runtimeData[netId].wt[offset + j] > 0)
2532  runtimeData[netId].wt[offset + j] = 0.0;
2533  }
2534  }
2535  }
2536  }
2537 }
2538 
2539 #ifndef __NO_PTHREADS__ // POSIX
2540  // Static multithreading subroutine method - helper for the above method
2541  void* SNN::helperUpdateWeights_CPU(void* arguments) {
2542  ThreadStruct* args = (ThreadStruct*) arguments;
2543  //printf("\nThread ID: %lu and CPU: %d\n",pthread_self(), sched_getcpu());
2544  ((SNN *)args->snn_pointer) -> updateWeights_CPU(args->netId);
2545  pthread_exit(0);
2546  }
2547 #endif
2548 
2553  #ifdef __NO_PTHREADS__
2554  void SNN::shiftSpikeTables_CPU(int netId) {
2555 #else // POSIX
2556  void* SNN::shiftSpikeTables_CPU(int netId) {
2557 #endif
2558  assert(runtimeData[netId].memType == CPU_MEM);
2559  // Read the neuron ids that fired in the last glbNetworkConfig.maxDelay seconds
2560  // and put it to the beginning of the firing table...
2561  for(int p = runtimeData[netId].timeTableD2[999], k = 0; p < runtimeData[netId].timeTableD2[999 + networkConfigs[netId].maxDelay + 1]; p++, k++) {
2562  runtimeData[netId].firingTableD2[k] = runtimeData[netId].firingTableD2[p];
2563 #ifdef LN_AXON_PLAST
2564  runtimeData[netId].firingTimesD2[k] = runtimeData[netId].firingTimesD2[p];
2565 #endif
2566  }
2567 
2568  for(int i = 0; i < networkConfigs[netId].maxDelay; i++) {
2569  runtimeData[netId].timeTableD2[i + 1] = runtimeData[netId].timeTableD2[1000 + i + 1] - runtimeData[netId].timeTableD2[1000];
2570  runtimeData[netId].timeTableD1[i + 1] = runtimeData[netId].timeTableD1[1000 + i + 1] - runtimeData[netId].timeTableD1[1000];
2571  }
2572 
2573  runtimeData[netId].timeTableD1[networkConfigs[netId].maxDelay] = 0;
2574  runtimeData[netId].spikeCountD2 += runtimeData[netId].spikeCountD2Sec;
2575  runtimeData[netId].spikeCountD1 += runtimeData[netId].spikeCountD1Sec;
2576 
2577  runtimeData[netId].spikeCountD2Sec = 0;
2578  runtimeData[netId].spikeCountD1Sec = 0;
2579 
2580  runtimeData[netId].spikeCountExtRxD2Sec = 0;
2581  runtimeData[netId].spikeCountExtRxD1Sec = 0;
2582 
2583  runtimeData[netId].spikeCountLastSecLeftD2 = runtimeData[netId].timeTableD2[networkConfigs[netId].maxDelay];
2584 }
2585 
2586 #ifndef __NO_PTHREADS__ // POSIX
2587  // Static multithreading subroutine method - helper for the above method
2588  void* SNN::helperShiftSpikeTables_CPU(void* arguments) {
2589  ThreadStruct* args = (ThreadStruct*) arguments;
2590  //printf("\nThread ID: %lu and CPU: %d\n",pthread_self(), sched_getcpu());
2591  ((SNN *)args->snn_pointer) -> shiftSpikeTables_CPU(args->netId);
2592  pthread_exit(0);
2593  }
2594 #endif
2595 
2596 void SNN::allocateSNN_CPU(int netId) {
2597  // setup memory type of CPU runtime data
2598  runtimeData[netId].memType = CPU_MEM;
2599 
2600  // display some memory management info
2601  //size_t avail, total, previous;
2602  //float toMB = std::pow(1024.0f, 2);
2603  //KERNEL_INFO("CPU Memory Management: (Total %2.3f MB)",(float)(total/toMB));
2604  //KERNEL_INFO("Data\t\t\tSize\t\tTotal Used\tTotal Available");
2605  //KERNEL_INFO("Init:\t\t\t%2.3f MB\t%2.3f MB\t%2.3f MB",(float)(total)/toMB,(float)((total-avail)/toMB), (float)(avail/toMB));
2606  //previous=avail;
2607 
2608  // allocate SNN::runtimeData[0].randNum for random number generators
2609  runtimeData[netId].randNum = new float[networkConfigs[netId].numNPois];
2610  //KERNEL_INFO("Random Gen:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB",(float)(previous-avail)/toMB, (float)((total-avail)/toMB),(float)(avail/toMB));
2611  //previous=avail;
2612 
2613 
2614  // initialize (copy from SNN) runtimeData[0].Npre, runtimeData[0].Npre_plastic, runtimeData[0].Npre_plasticInv, runtimeData[0].cumulativePre
2615  // initialize (copy from SNN) runtimeData[0].cumulativePost, runtimeData[0].Npost, runtimeData[0].postDelayInfo
2616  // initialize (copy from SNN) runtimeData[0].postSynapticIds, runtimeData[0].preSynapticIds
2617  copyPreConnectionInfo(netId, ALL, &runtimeData[netId], &managerRuntimeData, true);
2618  copyPostConnectionInfo(netId, ALL, &runtimeData[netId], &managerRuntimeData, true);
2619  //KERNEL_INFO("Conn Info:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB",(float)(previous-avail)/toMB,(float)((total-avail)/toMB), (float)(avail/toMB));
2620  //previous=avail;
2621 
2622  // initialize (copy from SNN) runtimeData[0].wt, runtimeData[0].wtChange, runtimeData[0].maxSynWt
2623  copySynapseState(netId, &runtimeData[netId], &managerRuntimeData, true);
2624  //KERNEL_INFO("Syn State:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB",(float)(previous-avail)/toMB,(float)((total-avail)/toMB), (float)(avail/toMB));
2625  //previous=avail;
2626 
2627  // copy the neuron state information to the CPU runtime
2628  // initialize (copy from managerRuntimeData) runtimeData[0].recovery, runtimeData[0].voltage, runtimeData[0].current
2629  // initialize (copy from managerRuntimeData) runtimeData[0].gGABAa, runtimeData[0].gGABAb, runtimeData[0].gAMPA, runtimeData[0].gNMDA
2630  // initialize (copy from SNN) runtimeData[0].Izh_a, runtimeData[0].Izh_b, runtimeData[0].Izh_c, runtimeData[0].Izh_d
2631  // initialize (copy form SNN) runtimeData[0].baseFiring, runtimeData[0].baseFiringInv
2632  // initialize (copy from SNN) runtimeData[0].n(V,U,I)Buffer[]
2633  copyNeuronState(netId, ALL, &runtimeData[netId], true);
2634 
2635  // copy STP state, considered as neuron state
2636  if (sim_with_stp) {
2637  // initialize (copy from SNN) stpu, stpx
2638  copySTPState(netId, ALL, &runtimeData[netId], &managerRuntimeData, true);
2639  }
2640  //KERNEL_INFO("Neuron State:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB",(float)(previous-avail)/toMB,(float)((total-avail)/toMB), (float)(avail/toMB));
2641  //previous=avail;
2642 
2643  // initialize (copy from SNN) runtimeData[0].grpDA(5HT,ACh,NE)
2644  // initialize (copy from SNN) runtimeData[0].grpDA(5HT,ACh,NE)Buffer[]
2645  copyGroupState(netId, ALL, &runtimeData[netId], &managerRuntimeData, true);
2646  //KERNEL_INFO("Group State:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB",(float)(previous-avail)/toMB,(float)((total-avail)/toMB), (float)(avail/toMB));
2647  //previous=avail;
2648 
2649  // initialize (cudaMemset) runtimeData[0].I_set, runtimeData[0].poissonFireRate
2650  // initialize (copy from SNN) runtimeData[0].firingTableD1, runtimeData[0].firingTableD2
2651  // initialize (cudaMalloc) runtimeData[0].spikeGenBits
2652  // initialize (copy from managerRuntimeData) runtimeData[0].nSpikeCnt,
2653  // initialize (copy from SNN) runtimeData[0].synSpikeTime, runtimeData[0].lastSpikeTime
2654  copyAuxiliaryData(netId, ALL, &runtimeData[netId], true);
2655  //KERNEL_INFO("Auxiliary Data:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB\n\n",(float)(previous-avail)/toMB,(float)((total-avail)/toMB), (float)(avail/toMB));
2656  //previous=avail;
2657 
2658  // TODO: move mulSynFast, mulSynSlow to ConnectConfig structure
2659  // copy connection configs
2660  //CUDA_CHECK_ERRORS(cudaMemcpyToSymbol(d_mulSynFast, mulSynFast, sizeof(float) * networkConfigs[netId].numConnections, 0, cudaMemcpyHostToDevice));
2661  //CUDA_CHECK_ERRORS(cudaMemcpyToSymbol(d_mulSynSlow, mulSynSlow, sizeof(float) * networkConfigs[netId].numConnections, 0, cudaMemcpyHostToDevice));
2662 
2663 #ifdef LN_I_CALC_TYPES
2664  // \todo LN 2021 group config extensions ?? in same functions?
2665 #endif
2666 
2667  KERNEL_DEBUG("Transfering group settings to CPU:");
2668  for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroupsAssigned; lGrpId++) {
2669  KERNEL_DEBUG("Settings for Group %s:", groupConfigMap[groupConfigs[netId][lGrpId].gGrpId].grpName.c_str());
2670 
2671  KERNEL_DEBUG("\tType: %d",(int)groupConfigs[netId][lGrpId].Type);
2672  KERNEL_DEBUG("\tNumN: %d",groupConfigs[netId][lGrpId].numN);
2673  KERNEL_DEBUG("\tM: %d",groupConfigs[netId][lGrpId].numPostSynapses);
2674  KERNEL_DEBUG("\tPreM: %d",groupConfigs[netId][lGrpId].numPreSynapses);
2675  KERNEL_DEBUG("\tspikeGenerator: %d",(int)groupConfigs[netId][lGrpId].isSpikeGenerator);
2676  KERNEL_DEBUG("\tFixedInputWts: %d",(int)groupConfigs[netId][lGrpId].FixedInputWts);
2677  KERNEL_DEBUG("\tMaxDelay: %d",(int)groupConfigs[netId][lGrpId].MaxDelay);
2678 // LN2021 \todo Kexin port to Connect
2679  // KERNEL_DEBUG("\tWithSTDP: %d",(int)groupConfigs[netId][lGrpId].WithSTDP);
2680  // if (groupConfigs[netId][lGrpId].WithSTDP) {
2681  // KERNEL_DEBUG("\t\tE-STDP type: %s",stdpType_string[groupConfigs[netId][lGrpId].WithESTDPtype]);
2682  // KERNEL_DEBUG("\t\tTAU_PLUS_INV_EXC: %f",groupConfigs[netId][lGrpId].TAU_PLUS_INV_EXC);
2683  // KERNEL_DEBUG("\t\tTAU_MINUS_INV_EXC: %f",groupConfigs[netId][lGrpId].TAU_MINUS_INV_EXC);
2684  // KERNEL_DEBUG("\t\tALPHA_PLUS_EXC: %f",groupConfigs[netId][lGrpId].ALPHA_PLUS_EXC);
2685  // KERNEL_DEBUG("\t\tALPHA_MINUS_EXC: %f",groupConfigs[netId][lGrpId].ALPHA_MINUS_EXC);
2686  // KERNEL_DEBUG("\t\tI-STDP type: %s",stdpType_string[groupConfigs[netId][lGrpId].WithISTDPtype]);
2687  // KERNEL_DEBUG("\t\tTAU_PLUS_INV_INB: %f",groupConfigs[netId][lGrpId].TAU_PLUS_INV_INB);
2688  // KERNEL_DEBUG("\t\tTAU_MINUS_INV_INB: %f",groupConfigs[netId][lGrpId].TAU_MINUS_INV_INB);
2689  // KERNEL_DEBUG("\t\tALPHA_PLUS_INB: %f",groupConfigs[netId][lGrpId].ALPHA_PLUS_INB);
2690  // KERNEL_DEBUG("\t\tALPHA_MINUS_INB: %f",groupConfigs[netId][lGrpId].ALPHA_MINUS_INB);
2691  // KERNEL_DEBUG("\t\tLAMBDA: %f",groupConfigs[netId][lGrpId].LAMBDA);
2692  // KERNEL_DEBUG("\t\tDELTA: %f",groupConfigs[netId][lGrpId].DELTA);
2693  // KERNEL_DEBUG("\t\tBETA_LTP: %f",groupConfigs[netId][lGrpId].BETA_LTP);
2694  // KERNEL_DEBUG("\t\tBETA_LTD: %f",groupConfigs[netId][lGrpId].BETA_LTD);
2695  // }
2696  KERNEL_DEBUG("\tWithSTP: %d",(int)groupConfigs[netId][lGrpId].WithSTP);
2697  if (groupConfigs[netId][lGrpId].WithSTP) {
2698  KERNEL_DEBUG("\t\tSTP_U: %f", groupConfigs[netId][lGrpId].STP_U);
2699 // KERNEL_DEBUG("\t\tSTP_tD: %f",groupConfigs[netId][lGrpId].STP_tD);
2700 // KERNEL_DEBUG("\t\tSTP_tF: %f",groupConfigs[netId][lGrpId].STP_tF);
2701  }
2702  KERNEL_DEBUG("\tspikeGen: %s", groupConfigs[netId][lGrpId].isSpikeGenFunc? "is Set" : "is not set ");
2703  }
2704 
2705  // allocation of CPU runtime data is done
2706  runtimeData[netId].allocated = true;
2707 }
2708 
2727 void SNN::copyPreConnectionInfo(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, bool allocateMem) {
2728  int lengthN, lengthSyn, posN, posSyn;
2729 
2730  if (lGrpId == ALL) {
2731  lengthN = networkConfigs[netId].numNAssigned;
2732  posN = 0;
2733  } else {
2734  lengthN = groupConfigs[netId][lGrpId].numN;
2735  posN = groupConfigs[netId][lGrpId].lStartN;
2736  }
2737 
2738  // connection synaptic lengths and cumulative lengths...
2739  if(allocateMem)
2740  dest->Npre = new unsigned short[networkConfigs[netId].numNAssigned];
2741  memcpy(&dest->Npre[posN], &src->Npre[posN], sizeof(short) * lengthN);
2742 
2743  // we don't need these data structures if the network doesn't have any plastic synapses at all
2744  if (!sim_with_fixedwts) {
2745  // presyn excitatory connections
2746  if(allocateMem)
2747  dest->Npre_plastic = new unsigned short[networkConfigs[netId].numNAssigned];
2748  memcpy(&dest->Npre_plastic[posN], &src->Npre_plastic[posN], sizeof(short) * lengthN);
2749 
2750  // Npre_plasticInv is only used on GPUs, only allocate and copy it during initialization
2751  if(allocateMem) {
2752  float* Npre_plasticInv = new float[networkConfigs[netId].numNAssigned];
2753 
2754  for (int i = 0; i < networkConfigs[netId].numNAssigned; i++)
2755  Npre_plasticInv[i] = 1.0f / managerRuntimeData.Npre_plastic[i];
2756 
2757  dest->Npre_plasticInv = new float[networkConfigs[netId].numNAssigned];
2758  memcpy(dest->Npre_plasticInv, Npre_plasticInv, sizeof(float) * networkConfigs[netId].numNAssigned);
2759 
2760  delete[] Npre_plasticInv;
2761  }
2762  }
2763 
2764  // beginning position for the pre-synaptic information
2765  if(allocateMem)
2766  dest->cumulativePre = new unsigned int[networkConfigs[netId].numNAssigned];
2767  memcpy(&dest->cumulativePre[posN], &src->cumulativePre[posN], sizeof(int) * lengthN);
2768 
2769  // Npre, cumulativePre has been copied to destination
2770  if (lGrpId == ALL) {
2771  lengthSyn = networkConfigs[netId].numPreSynNet;
2772  posSyn = 0;
2773  } else {
2774  lengthSyn = 0;
2775  for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++)
2776  lengthSyn += dest->Npre[lNId];
2777 
2778  posSyn = dest->cumulativePre[groupConfigs[netId][lGrpId].lStartN];
2779  }
2780 
2781  if(allocateMem)
2782  dest->preSynapticIds = new SynInfo[networkConfigs[netId].numPreSynNet];
2783  memcpy(&dest->preSynapticIds[posSyn], &src->preSynapticIds[posSyn], sizeof(SynInfo) * lengthSyn);
2784 }
2785 
2802 void SNN::copyPostConnectionInfo(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, bool allocateMem) {
2803  int lengthN, lengthSyn, posN, posSyn;
2804 
2805  if (lGrpId == ALL) {
2806  lengthN = networkConfigs[netId].numNAssigned;
2807  posN = 0;
2808  } else {
2809  lengthN = groupConfigs[netId][lGrpId].numN;
2810  posN = groupConfigs[netId][lGrpId].lStartN;
2811  }
2812 
2813  // number of postsynaptic connections
2814  if(allocateMem)
2815  dest->Npost = new unsigned short[networkConfigs[netId].numNAssigned];
2816  memcpy(&dest->Npost[posN], &src->Npost[posN], sizeof(short) * lengthN);
2817 
2818  // beginning position for the post-synaptic information
2819  if(allocateMem)
2820  dest->cumulativePost = new unsigned int[networkConfigs[netId].numNAssigned];
2821  memcpy(&dest->cumulativePost[posN], &src->cumulativePost[posN], sizeof(int) * lengthN);
2822 
2823 
2824  // Npost, cumulativePost has been copied to destination
2825  if (lGrpId == ALL) {
2826  lengthSyn = networkConfigs[netId].numPostSynNet;
2827  posSyn = 0;
2828  } else {
2829  lengthSyn = 0;
2830  for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++)
2831  lengthSyn += dest->Npost[lNId];
2832 
2833  posSyn = dest->cumulativePost[groupConfigs[netId][lGrpId].lStartN];
2834  }
2835 
2836  // actual post synaptic connection information...
2837  if(allocateMem)
2838  dest->postSynapticIds = new SynInfo[networkConfigs[netId].numPostSynNet];
2839  memcpy(&dest->postSynapticIds[posSyn], &src->postSynapticIds[posSyn], sizeof(SynInfo) * lengthSyn);
2840 
2841  // static specific mapping and actual post-synaptic delay metric
2842  if(allocateMem)
2843  dest->postDelayInfo = new DelayInfo[networkConfigs[netId].numNAssigned * (glbNetworkConfig.maxDelay + 1)];
2844  memcpy(&dest->postDelayInfo[posN * (glbNetworkConfig.maxDelay + 1)], &src->postDelayInfo[posN * (glbNetworkConfig.maxDelay + 1)], sizeof(DelayInfo) * lengthN * (glbNetworkConfig.maxDelay + 1));
2845 }
2846 
2862 void SNN::copySynapseState(int netId, RuntimeData* dest, RuntimeData* src, bool allocateMem) {
2863  assert(networkConfigs[netId].numPreSynNet > 0);
2864 
2865  // synaptic information based
2866  if(allocateMem)
2867  dest->wt = new float[networkConfigs[netId].numPreSynNet];
2868  memcpy(dest->wt, src->wt, sizeof(float) * networkConfigs[netId].numPreSynNet);
2869 
2870  // we don't need these data structures if the network doesn't have any plastic synapses at all
2871  // they show up in updateLTP() and updateSynapticWeights(), two functions that do not get called if
2872  // sim_with_fixedwts is set
2873  if (!sim_with_fixedwts) {
2874  // synaptic weight derivative
2875  if(allocateMem)
2876  dest->wtChange = new float[networkConfigs[netId].numPreSynNet];
2877  memcpy(dest->wtChange, src->wtChange, sizeof(float) * networkConfigs[netId].numPreSynNet);
2878 
2879  // synaptic weight maximum value
2880  if(allocateMem)
2881  dest->maxSynWt = new float[networkConfigs[netId].numPreSynNet];
2882  memcpy(dest->maxSynWt, src->maxSynWt, sizeof(float) * networkConfigs[netId].numPreSynNet);
2883  }
2884 }
2885 
2902 void SNN::copyNeuronState(int netId, int lGrpId, RuntimeData* dest, bool allocateMem) {
2903  int ptrPos, length;
2904 
2905  if(lGrpId == ALL) {
2906  ptrPos = 0;
2907  length = networkConfigs[netId].numNReg;
2908  }
2909  else {
2910  ptrPos = groupConfigs[netId][lGrpId].lStartN;
2911  length = groupConfigs[netId][lGrpId].numN;
2912  }
2913 
2914  assert(length <= networkConfigs[netId].numNReg);
2915 
2916  if (length == 0)
2917  return;
2918 
2919  if(!allocateMem && groupConfigs[netId][lGrpId].Type & POISSON_NEURON)
2920  return;
2921 
2922  if(allocateMem)
2923  dest->recovery = new float[length];
2924  memcpy(&dest->recovery[ptrPos], &managerRuntimeData.recovery[ptrPos], sizeof(float) * length);
2925 
2926  if(allocateMem)
2927  dest->voltage = new float[length];
2928  memcpy(&dest->voltage[ptrPos], &managerRuntimeData.voltage[ptrPos], sizeof(float) * length);
2929 
2930  if (allocateMem)
2931  dest->nextVoltage = new float[length];
2932  memcpy(&dest->nextVoltage[ptrPos], &managerRuntimeData.nextVoltage[ptrPos], sizeof(float) * length);
2933 
2934  //neuron input current...
2935  if(allocateMem)
2936  dest->current = new float[length];
2937  memcpy(&dest->current[ptrPos], &managerRuntimeData.current[ptrPos], sizeof(float) * length);
2938 
2939 #ifdef LN_I_CALC_TYPES
2940  if (lGrpId == ALL) {
2941  // LN20210913 if ommitted crash at #491: runtimeData[netId].gAMPA[lNId] *= groupConfig.dAMPA;
2942  // Exception thrown at 0x00007FFD1C9F807D (carlsimd.dll) in carlsim-tests.exe: 0xC0000005: Access violation reading location 0x0000000000000000.
2943  // \todo LN 20210913 smell
2944  //
2945  // see void SNN::copyConductanceAMPA(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem, int destOffset) {
2946  if(sim_with_conductances) {
2947  //conductance information
2948  copyConductanceAMPA(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2949  copyConductanceNMDA(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2950  copyConductanceGABAa(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2951  copyConductanceGABAb(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2952  }
2953  } else {
2954  switch (groupConfigs[netId][lGrpId].icalcType) { // \todo LN verify that only the part of the group is copied
2955  case COBA:
2956  case alpha1_ADK13:
2957  //conductance information
2958  copyConductanceAMPA(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2959  copyConductanceNMDA(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2960  copyConductanceGABAa(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2961  copyConductanceGABAb(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2962  break;
2963  case CUBA:
2964  case NM4W_LN21:
2965  ; // do nothing
2966  default:
2967  ; // do nothing
2968  }
2969  }
2970 #else
2971  if (sim_with_conductances) {
2972  //conductance information
2973  copyConductanceAMPA(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2974  copyConductanceNMDA(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2975  copyConductanceGABAa(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2976  copyConductanceGABAb(netId, lGrpId, dest, &managerRuntimeData, allocateMem, 0);
2977  }
2978 #endif
2979 
2980  // copying external current needs to be done separately because setExternalCurrent needs to call it, too
2981  // do it only from host to device
2982  copyExternalCurrent(netId, lGrpId, dest, allocateMem);
2983 
2984  if (allocateMem)
2985  dest->curSpike = new bool[length];
2986  memcpy(&dest->curSpike[ptrPos], &managerRuntimeData.curSpike[ptrPos], sizeof(bool) * length);
2987 
2988  copyNeuronParameters(netId, lGrpId, dest, allocateMem);
2989 
2990  if (networkConfigs[netId].sim_with_nm)
2991  copyNeuronStateBuffer(netId, lGrpId, dest, &managerRuntimeData, allocateMem);
2992 
2993  if (sim_with_homeostasis) {
2994  //Included to enable homeostasis in CPU_MODE.
2995  // Avg. Firing...
2996  if(allocateMem)
2997  dest->avgFiring = new float[length];
2998  memcpy(&dest->avgFiring[ptrPos], &managerRuntimeData.avgFiring[ptrPos], sizeof(float) * length);
2999  }
3000 }
3001 
3020 void SNN::copyConductanceAMPA(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, bool allocateMem, int destOffset) {
3021  assert(isSimulationWithCOBA());
3022 
3023  int ptrPos, length;
3024 
3025  if(lGrpId == ALL) {
3026  ptrPos = 0;
3027  length = networkConfigs[netId].numNReg;
3028  } else {
3029  ptrPos = groupConfigs[netId][lGrpId].lStartN;
3030  length = groupConfigs[netId][lGrpId].numN;
3031  }
3032  assert(length <= networkConfigs[netId].numNReg);
3033  assert(length > 0);
3034 
3035  //conductance information
3036  assert(src->gAMPA != NULL);
3037  if(allocateMem)
3038  dest->gAMPA = new float[length];
3039  memcpy(&dest->gAMPA[ptrPos + destOffset], &src->gAMPA[ptrPos], sizeof(float) * length);
3040 }
3041 
3060 void SNN::copyConductanceNMDA(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, bool allocateMem, int destOffset) {
3061  assert(isSimulationWithCOBA());
3062 
3063  int ptrPos, length;
3064 
3065  if(lGrpId == ALL) {
3066  ptrPos = 0;
3067  length = networkConfigs[netId].numNReg;
3068  } else {
3069  ptrPos = groupConfigs[netId][lGrpId].lStartN;
3070  length = groupConfigs[netId][lGrpId].numN;
3071  }
3072  assert(length <= networkConfigs[netId].numNReg);
3073  assert(length > 0);
3074 
3075 #ifdef LN_I_CALC_TYPES
3076  assert(src->gNMDA_r != NULL);
3077  if (allocateMem)
3078  dest->gNMDA_r = new float[length];
3079  memcpy(&dest->gNMDA_r[ptrPos], &src->gNMDA_r[ptrPos], sizeof(float) * length);
3080 
3081  assert(src->gNMDA_d != NULL);
3082  if (allocateMem)
3083  dest->gNMDA_d = new float[length];
3084  memcpy(&dest->gNMDA_d[ptrPos], &src->gNMDA_d[ptrPos], sizeof(float) * length);
3085 
3086  assert(src->gNMDA != NULL);
3087  if (allocateMem)
3088  dest->gNMDA = new float[length];
3089  memcpy(&dest->gNMDA[ptrPos + destOffset], &src->gNMDA[ptrPos], sizeof(float) * length);
3090 #else
3091  if (isSimulationWithNMDARise()) {
3092  assert(src->gNMDA_r != NULL);
3093  if(allocateMem)
3094  dest->gNMDA_r = new float[length];
3095  memcpy(&dest->gNMDA_r[ptrPos], &src->gNMDA_r[ptrPos], sizeof(float) * length);
3096 
3097  assert(src->gNMDA_d != NULL);
3098  if(allocateMem)
3099  dest->gNMDA_d = new float[length];
3100  memcpy(&dest->gNMDA_d[ptrPos], &src->gNMDA_d[ptrPos], sizeof(float) * length);
3101  } else {
3102  assert(src->gNMDA != NULL);
3103  if(allocateMem)
3104  dest->gNMDA = new float[length];
3105  memcpy(&dest->gNMDA[ptrPos + destOffset], &src->gNMDA[ptrPos], sizeof(float) * length);
3106  }
3107 #endif
3108 }
3109 
3128 void SNN::copyConductanceGABAa(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, bool allocateMem, int destOffset) {
3129  assert(isSimulationWithCOBA());
3130 
3131  int ptrPos, length;
3132 
3133  if(lGrpId == ALL) {
3134  ptrPos = 0;
3135  length = networkConfigs[netId].numNReg;
3136  } else {
3137  ptrPos = groupConfigs[netId][lGrpId].lStartN;
3138  length = groupConfigs[netId][lGrpId].numN;
3139  }
3140  assert(length <= networkConfigs[netId].numNReg);
3141  assert(length > 0);
3142 
3143  assert(src->gGABAa != NULL);
3144  if(allocateMem)
3145  dest->gGABAa = new float[length];
3146  memcpy(&dest->gGABAa[ptrPos + destOffset], &src->gGABAa[ptrPos], sizeof(float) * length);
3147 }
3148 
3167 void SNN::copyConductanceGABAb(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, bool allocateMem, int destOffset) {
3168  assert(isSimulationWithCOBA());
3169 
3170  int ptrPos, length;
3171 
3172  if (lGrpId == ALL) {
3173  ptrPos = 0;
3174  length = networkConfigs[netId].numNReg;
3175  } else {
3176  ptrPos = groupConfigs[netId][lGrpId].lStartN;
3177  length = groupConfigs[netId][lGrpId].numN;
3178  }
3179  assert(length <= networkConfigs[netId].numNReg);
3180  assert(length > 0);
3181 
3182 #ifdef LN_I_CALC_TYPES
3183  assert(src->gGABAb_r != NULL);
3184  if (allocateMem)
3185  dest->gGABAb_r = new float[length];
3186  memcpy(&dest->gGABAb_r[ptrPos], &src->gGABAb_r[ptrPos], sizeof(float) * length);
3187 
3188  assert(src->gGABAb_d != NULL);
3189  if (allocateMem)
3190  dest->gGABAb_d = new float[length];
3191  memcpy(&dest->gGABAb_d[ptrPos], &src->gGABAb_d[ptrPos], sizeof(float) * length);
3192 
3193  assert(src->gGABAb != NULL);
3194  if (allocateMem)
3195  dest->gGABAb = new float[length];
3196  memcpy(&dest->gGABAb[ptrPos + destOffset], &src->gGABAb[ptrPos], sizeof(float) * length);
3197 #else
3198  if (isSimulationWithGABAbRise()) {
3199  assert(src->gGABAb_r != NULL);
3200  if(allocateMem)
3201  dest->gGABAb_r = new float[length];
3202  memcpy(&dest->gGABAb_r[ptrPos], &src->gGABAb_r[ptrPos], sizeof(float) * length);
3203 
3204  assert(src->gGABAb_d != NULL);
3205  if(allocateMem)
3206  dest->gGABAb_d = new float[length];
3207  memcpy(&dest->gGABAb_d[ptrPos], &src->gGABAb_d[ptrPos], sizeof(float) * length);
3208  } else {
3209  assert(src->gGABAb != NULL);
3210  if(allocateMem)
3211  dest->gGABAb = new float[length];
3212  memcpy(&dest->gGABAb[ptrPos + destOffset], &src->gGABAb[ptrPos], sizeof(float) * length);
3213  }
3214 #endif
3215 }
3216 
3234 void SNN::copyNeuronStateBuffer(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, bool allocateMem) {
3235  int ptrPos, length;
3236 
3237  assert(src->nVBuffer != NULL);
3238  if (allocateMem) dest->nVBuffer = new float[networkConfigs[netId].numGroups * MAX_NEURON_MON_GRP_SZIE * 1000];
3239 
3240  assert(src->nUBuffer != NULL);
3241  if (allocateMem) dest->nUBuffer = new float[networkConfigs[netId].numGroups * MAX_NEURON_MON_GRP_SZIE * 1000];
3242 
3243  assert(src->nIBuffer != NULL);
3244  if (allocateMem) dest->nIBuffer = new float[networkConfigs[netId].numGroups * MAX_NEURON_MON_GRP_SZIE * 1000];
3245 
3246  if (lGrpId == ALL) {
3247  ptrPos = 0;
3248  length = networkConfigs[netId].numGroups * MAX_NEURON_MON_GRP_SZIE * 1000;
3249 
3250  // copy neuron information
3251  memcpy(&dest->nVBuffer[ptrPos], &src->nVBuffer[ptrPos], sizeof(float) * length);
3252  memcpy(&dest->nUBuffer[ptrPos], &src->nUBuffer[ptrPos], sizeof(float) * length);
3253  memcpy(&dest->nIBuffer[ptrPos], &src->nIBuffer[ptrPos], sizeof(float) * length);
3254  }
3255  else {
3256  for (int t = 0; t < 1000; t++) {
3257  ptrPos = networkConfigs[netId].numGroups * MAX_NEURON_MON_GRP_SZIE * t + lGrpId * MAX_NEURON_MON_GRP_SZIE;
3258  length = MAX_NEURON_MON_GRP_SZIE;
3259 
3260  assert((ptrPos + length) <= networkConfigs[netId].numGroups * MAX_NEURON_MON_GRP_SZIE * 1000);
3261  assert(length > 0);
3262 
3263  // copy neuron information
3264  memcpy(&dest->nVBuffer[ptrPos], &src->nVBuffer[ptrPos], sizeof(float) * length);
3265  memcpy(&dest->nUBuffer[ptrPos], &src->nUBuffer[ptrPos], sizeof(float) * length);
3266  memcpy(&dest->nIBuffer[ptrPos], &src->nIBuffer[ptrPos], sizeof(float) * length);
3267  }
3268  }
3269 }
3270 
3271 
3289 void SNN::copyExternalCurrent(int netId, int lGrpId, RuntimeData* dest, bool allocateMem) {
3290  int posN, lengthN;
3291 
3292  if(lGrpId == ALL) {
3293  posN = 0;
3294  lengthN = networkConfigs[netId].numNReg;
3295  } else {
3296  assert(lGrpId >= 0);
3297  posN = groupConfigs[netId][lGrpId].lStartN;
3298  lengthN = groupConfigs[netId][lGrpId].numN;
3299  }
3300  assert(lengthN >= 0 && lengthN <= networkConfigs[netId].numNReg); // assert NOT poisson neurons
3301 
3302  KERNEL_DEBUG("copyExternalCurrent: lGrpId=%d, ptrPos=%d, length=%d, allocate=%s", lGrpId, posN, lengthN, allocateMem?"y":"n");
3303 
3304  if(allocateMem)
3305  dest->extCurrent = new float[lengthN];
3306  memcpy(&(dest->extCurrent[posN]), &(managerRuntimeData.extCurrent[posN]), sizeof(float) * lengthN);
3307 }
3308 
3327 void SNN::copyNeuronParameters(int netId, int lGrpId, RuntimeData* dest, bool allocateMem) {
3328  int ptrPos, length;
3329 
3330  // when allocating we are allocating the memory.. we need to do it completely... to avoid memory fragmentation..
3331  if (allocateMem) {
3332  assert(lGrpId == ALL);
3333  assert(dest->Izh_a == NULL);
3334  assert(dest->Izh_b == NULL);
3335  assert(dest->Izh_c == NULL);
3336  assert(dest->Izh_d == NULL);
3337  assert(dest->Izh_C == NULL);
3338  assert(dest->Izh_k == NULL);
3339  assert(dest->Izh_vr == NULL);
3340  assert(dest->Izh_vt == NULL);
3341  assert(dest->Izh_vpeak == NULL);
3342  assert(dest->lif_tau_m == NULL);
3343  assert(dest->lif_tau_ref == NULL);
3344  assert(dest->lif_tau_ref_c == NULL);
3345  assert(dest->lif_vTh == NULL);
3346  assert(dest->lif_vReset == NULL);
3347  assert(dest->lif_gain == NULL);
3348  assert(dest->lif_bias == NULL);
3349  }
3350 
3351  if(lGrpId == ALL) {
3352  ptrPos = 0;
3353  length = networkConfigs[netId].numNReg;
3354  }
3355  else {
3356  ptrPos = groupConfigs[netId][lGrpId].lStartN;
3357  length = groupConfigs[netId][lGrpId].numN;
3358  }
3359 
3360  if(allocateMem)
3361  dest->Izh_a = new float[length];
3362  memcpy(&dest->Izh_a[ptrPos], &(managerRuntimeData.Izh_a[ptrPos]), sizeof(float) * length);
3363 
3364  if(allocateMem)
3365  dest->Izh_b = new float[length];
3366  memcpy(&dest->Izh_b[ptrPos], &(managerRuntimeData.Izh_b[ptrPos]), sizeof(float) * length);
3367 
3368  if(allocateMem)
3369  dest->Izh_c = new float[length];
3370  memcpy(&dest->Izh_c[ptrPos], &(managerRuntimeData.Izh_c[ptrPos]), sizeof(float) * length);
3371 
3372  if(allocateMem)
3373  dest->Izh_d = new float[length];
3374  memcpy(&dest->Izh_d[ptrPos], &(managerRuntimeData.Izh_d[ptrPos]), sizeof(float) * length);
3375 
3376  if (allocateMem)
3377  dest->Izh_C = new float[length];
3378  memcpy(&dest->Izh_C[ptrPos], &(managerRuntimeData.Izh_C[ptrPos]), sizeof(float) * length);
3379 
3380  if (allocateMem)
3381  dest->Izh_k = new float[length];
3382  memcpy(&dest->Izh_k[ptrPos], &(managerRuntimeData.Izh_k[ptrPos]), sizeof(float) * length);
3383 
3384  if (allocateMem)
3385  dest->Izh_vr = new float[length];
3386  memcpy(&dest->Izh_vr[ptrPos], &(managerRuntimeData.Izh_vr[ptrPos]), sizeof(float) * length);
3387 
3388  if (allocateMem)
3389  dest->Izh_vt = new float[length];
3390  memcpy(&dest->Izh_vt[ptrPos], &(managerRuntimeData.Izh_vt[ptrPos]), sizeof(float) * length);
3391 
3392  if (allocateMem)
3393  dest->Izh_vpeak = new float[length];
3394  memcpy(&dest->Izh_vpeak[ptrPos], &(managerRuntimeData.Izh_vpeak[ptrPos]), sizeof(float) * length);
3395 
3396  //LIF neuron
3397  if(allocateMem)
3398  dest->lif_tau_m = new int[length];
3399  memcpy(&dest->lif_tau_m[ptrPos], &(managerRuntimeData.lif_tau_m[ptrPos]), sizeof(int) * length);
3400 
3401  if(allocateMem)
3402  dest->lif_tau_ref = new int[length];
3403  memcpy(&dest->lif_tau_ref[ptrPos], &(managerRuntimeData.lif_tau_ref[ptrPos]), sizeof(int) * length);
3404 
3405  if(allocateMem)
3406  dest->lif_tau_ref_c = new int[length];
3407  memcpy(&dest->lif_tau_ref_c[ptrPos], &(managerRuntimeData.lif_tau_ref_c[ptrPos]), sizeof(int) * length);
3408 
3409  if(allocateMem)
3410  dest->lif_vTh = new float[length];
3411  memcpy(&dest->lif_vTh[ptrPos], &(managerRuntimeData.lif_vTh[ptrPos]), sizeof(float) * length);
3412 
3413  if(allocateMem)
3414  dest->lif_vReset = new float[length];
3415  memcpy(&dest->lif_vReset[ptrPos], &(managerRuntimeData.lif_vReset[ptrPos]), sizeof(float) * length);
3416 
3417  if(allocateMem)
3418  dest->lif_gain = new float[length];
3419  memcpy(&dest->lif_gain[ptrPos], &(managerRuntimeData.lif_gain[ptrPos]), sizeof(float) * length);
3420 
3421  if(allocateMem)
3422  dest->lif_bias = new float[length];
3423  memcpy(&dest->lif_bias[ptrPos], &(managerRuntimeData.lif_bias[ptrPos]), sizeof(float) * length);
3424 
3425  // pre-compute baseFiringInv for fast computation on CPU cores
3426  if (sim_with_homeostasis) {
3427  float* baseFiringInv = new float[length];
3428  for(int nid = 0; nid < length; nid++) {
3429  if (managerRuntimeData.baseFiring[nid] != 0.0f)
3430  baseFiringInv[nid] = 1.0f / managerRuntimeData.baseFiring[ptrPos + nid];
3431  else
3432  baseFiringInv[nid] = 0.0;
3433  }
3434 
3435  if(allocateMem)
3436  dest->baseFiringInv = new float[length];
3437  memcpy(&dest->baseFiringInv[ptrPos], baseFiringInv, sizeof(float) * length);
3438 
3439  if(allocateMem)
3440  dest->baseFiring = new float[length];
3441  memcpy(&dest->baseFiring[ptrPos], managerRuntimeData.baseFiring, sizeof(float) * length);
3442 
3443  delete [] baseFiringInv;
3444  }
3445 }
3446 
3465 void SNN::copySTPState(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, bool allocateMem) {
3466  // STP feature is optional, do addtional check for memory space
3467  if(allocateMem) {
3468  assert(dest->stpu == NULL);
3469  assert(dest->stpx == NULL);
3470  } else {
3471  assert(dest->stpu != NULL);
3472  assert(dest->stpx != NULL);
3473  }
3474  assert(src->stpu != NULL); assert(src->stpx != NULL);
3475 
3476  if(allocateMem)
3477  dest->stpu = new float[networkConfigs[netId].numN * (networkConfigs[netId].maxDelay + 1)];
3478  memcpy(dest->stpu, src->stpu, sizeof(float) * networkConfigs[netId].numN * (networkConfigs[netId].maxDelay + 1));
3479 
3480  if(allocateMem)
3481  dest->stpx = new float[networkConfigs[netId].numN * (networkConfigs[netId].maxDelay + 1)];
3482  memcpy(dest->stpx, src->stpx, sizeof(float) * networkConfigs[netId].numN * (networkConfigs[netId].maxDelay + 1));
3483 }
3484 
3485 // ToDo: move grpDA(5HT, ACh, NE)Buffer to copyAuxiliaryData
3503 void SNN::copyGroupState(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, bool allocateMem) {
3504  if (allocateMem) {
3505  assert(dest->memType == CPU_MEM && !dest->allocated);
3506  dest->grpDA = new float[networkConfigs[netId].numGroups];
3507  dest->grp5HT = new float[networkConfigs[netId].numGroups];
3508  dest->grpACh = new float[networkConfigs[netId].numGroups];
3509  dest->grpNE = new float[networkConfigs[netId].numGroups];
3510  }
3511  memcpy(dest->grpDA, src->grpDA, sizeof(float) * networkConfigs[netId].numGroups);
3512  memcpy(dest->grp5HT, src->grp5HT, sizeof(float) * networkConfigs[netId].numGroups);
3513  memcpy(dest->grpACh, src->grpACh, sizeof(float) * networkConfigs[netId].numGroups);
3514  memcpy(dest->grpNE, src->grpNE, sizeof(float) * networkConfigs[netId].numGroups);
3515 
3516  if (lGrpId == ALL) {
3517  if (allocateMem) {
3518  assert(dest->memType == CPU_MEM && !dest->allocated);
3519  dest->grpDABuffer = new float[1000 * networkConfigs[netId].numGroups];
3520  dest->grp5HTBuffer = new float[1000 * networkConfigs[netId].numGroups];
3521  dest->grpAChBuffer = new float[1000 * networkConfigs[netId].numGroups];
3522  dest->grpNEBuffer = new float[1000 * networkConfigs[netId].numGroups];
3523  }
3524  memcpy(dest->grpDABuffer, src->grpDABuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups);
3525  memcpy(dest->grp5HTBuffer, src->grp5HTBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups);
3526  memcpy(dest->grpAChBuffer, src->grpAChBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups);
3527  memcpy(dest->grpNEBuffer, src->grpNEBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups);
3528  } else {
3529  assert(!allocateMem);
3530  memcpy(&dest->grpDABuffer[lGrpId * 1000], &src->grpDABuffer[lGrpId * 1000], sizeof(float) * 1000);
3531  memcpy(&dest->grp5HTBuffer[lGrpId * 1000], &src->grp5HTBuffer[lGrpId * 1000], sizeof(float) * 1000);
3532  memcpy(&dest->grpAChBuffer[lGrpId * 1000], &src->grpAChBuffer[lGrpId * 1000], sizeof(float) * 1000);
3533  memcpy(&dest->grpNEBuffer[lGrpId * 1000], &src->grpNEBuffer[lGrpId * 1000], sizeof(float) * 1000);
3534  }
3535 }
3536 
3557 void SNN::copyAuxiliaryData(int netId, int lGrpId, RuntimeData* dest, bool allocateMem) {
3558  assert(networkConfigs[netId].numN > 0);
3559 
3560  if(allocateMem)
3561  dest->spikeGenBits = new unsigned int[networkConfigs[netId].numNSpikeGen / 32 + 1];
3562  memset(dest->spikeGenBits, 0, sizeof(int) * (networkConfigs[netId].numNSpikeGen / 32 + 1));
3563 
3564  // allocate the poisson neuron poissonFireRate
3565  if(allocateMem)
3566  dest->poissonFireRate = new float[networkConfigs[netId].numNPois];
3567  memset(dest->poissonFireRate, 0, sizeof(float) * networkConfigs[netId].numNPois);
3568 
3569  // synaptic auxiliary data
3570  // I_set: a bit vector indicates which synapse got a spike
3571  if(allocateMem) {
3572  networkConfigs[netId].I_setLength = ceil(((networkConfigs[netId].maxNumPreSynN) / 32.0f));
3573  dest->I_set = new int[networkConfigs[netId].numNReg * networkConfigs[netId].I_setLength];
3574  }
3575  assert(networkConfigs[netId].maxNumPreSynN >= 0);
3576  memset(dest->I_set, 0, sizeof(int) * networkConfigs[netId].numNReg * networkConfigs[netId].I_setLength);
3577 
3578  // synSpikeTime: an array indicates the last time when a synapse got a spike
3579  if(allocateMem)
3580  dest->synSpikeTime = new int[networkConfigs[netId].numPreSynNet];
3581  memcpy(dest->synSpikeTime, managerRuntimeData.synSpikeTime, sizeof(int) * networkConfigs[netId].numPreSynNet);
3582 
3583  // neural auxiliary data
3584  // lastSpikeTime: an array indicates the last time of a neuron emitting a spike
3585  // neuron firing time
3586  if(allocateMem)
3587  dest->lastSpikeTime = new int[networkConfigs[netId].numNAssigned];
3588  memcpy(dest->lastSpikeTime, managerRuntimeData.lastSpikeTime, sizeof(int) * networkConfigs[netId].numNAssigned);
3589 
3590  // auxiliary data for recording spike count of each neuron
3591  copyNeuronSpikeCount(netId, lGrpId, dest, &managerRuntimeData, true, 0);
3592 
3593  // quick lookup array for local group ids
3594  if(allocateMem)
3595  dest->grpIds = new short int[networkConfigs[netId].numNAssigned];
3596  memcpy(dest->grpIds, managerRuntimeData.grpIds, sizeof(short int) * networkConfigs[netId].numNAssigned);
3597 
3598  // quick lookup array for conn ids
3599  if(allocateMem)
3600  dest->connIdsPreIdx = new short int[networkConfigs[netId].numPreSynNet];
3601  memcpy(dest->connIdsPreIdx, managerRuntimeData.connIdsPreIdx, sizeof(short int) * networkConfigs[netId].numPreSynNet);
3602 
3603  // reset variable related to spike count
3604  // Note: the GPU counterpart is not required to do this
3605  dest->spikeCountSec = 0;
3606  dest->spikeCountD1Sec = 0;
3607  dest->spikeCountD2Sec = 0;
3608  dest->spikeCountExtRxD1Sec = 0;
3609  dest->spikeCountExtRxD2Sec = 0;
3610  dest->spikeCountLastSecLeftD2 = 0;
3611  dest->spikeCount = 0;
3612  dest->spikeCountD1 = 0;
3613  dest->spikeCountD2 = 0;
3614  dest->nPoissonSpikes = 0;
3615  dest->spikeCountExtRxD1 = 0;
3616  dest->spikeCountExtRxD2 = 0;
3617 
3618  // time talbe
3619  // Note: the GPU counterpart is not required to do this
3620  if (allocateMem) {
3621  assert(dest->timeTableD1 == NULL);
3622  assert(dest->timeTableD2 == NULL);
3623  }
3624 
3625  if (allocateMem)
3626  dest->timeTableD1 = new unsigned int[TIMING_COUNT];
3627  memset(dest->timeTableD1, 0, sizeof(int) * TIMING_COUNT);
3628 
3629  if (allocateMem)
3630  dest->timeTableD2 = new unsigned int[TIMING_COUNT];
3631  memset(dest->timeTableD2, 0, sizeof(int) * TIMING_COUNT);
3632 
3633  // firing table
3634  if (allocateMem) {
3635  assert(dest->firingTableD1 == NULL);
3636  assert(dest->firingTableD2 == NULL);
3637 #ifdef LN_AXON_PLAST
3638  assert(dest->firingTimesD2 == NULL);
3639 #endif
3640  }
3641 
3642  // allocate 1ms firing table
3643  if (allocateMem)
3644  dest->firingTableD1 = new int[networkConfigs[netId].maxSpikesD1];
3645  if (networkConfigs[netId].maxSpikesD1 > 0)
3646  memcpy(dest->firingTableD1, managerRuntimeData.firingTableD1, sizeof(int) * networkConfigs[netId].maxSpikesD1);
3647 
3648  // allocate 2+ms firing table
3649  if(allocateMem)
3650  dest->firingTableD2 = new int[networkConfigs[netId].maxSpikesD2];
3651  if (networkConfigs[netId].maxSpikesD2 > 0)
3652  memcpy(dest->firingTableD2, managerRuntimeData.firingTableD2, sizeof(int) * networkConfigs[netId].maxSpikesD2);
3653 
3654  // allocate 2+ms firing times
3655 #ifdef LN_AXON_PLAST
3656  if (allocateMem)
3657  dest->firingTimesD2 = new unsigned int[networkConfigs[netId].maxSpikesD2];
3658  if (networkConfigs[netId].maxSpikesD2 > 0)
3659  memcpy(dest->firingTimesD2, managerRuntimeData.firingTimesD2, sizeof(unsigned int) * networkConfigs[netId].maxSpikesD2);
3660 #endif
3661 
3662 
3663  // allocate external 1ms firing table
3664  if (allocateMem) {
3665  dest->extFiringTableD1 = new int*[networkConfigs[netId].numGroups];
3666  memset(dest->extFiringTableD1, 0 /* NULL */, sizeof(int*) * networkConfigs[netId].numGroups);
3667  for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
3668  if (groupConfigs[netId][lGrpId].hasExternalConnect) {
3669  dest->extFiringTableD1[lGrpId] = new int[groupConfigs[netId][lGrpId].numN * NEURON_MAX_FIRING_RATE];
3670  memset(dest->extFiringTableD1[lGrpId], 0, sizeof(int) * groupConfigs[netId][lGrpId].numN * NEURON_MAX_FIRING_RATE);
3671  }
3672  }
3673  }
3674 
3675  // allocate external 2+ms firing table
3676  if (allocateMem) {
3677  dest->extFiringTableD2 = new int*[networkConfigs[netId].numGroups];
3678  memset(dest->extFiringTableD2, 0 /* NULL */, sizeof(int*) * networkConfigs[netId].numGroups);
3679  for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
3680  if (groupConfigs[netId][lGrpId].hasExternalConnect) {
3681  dest->extFiringTableD2[lGrpId] = new int[groupConfigs[netId][lGrpId].numN * NEURON_MAX_FIRING_RATE];
3682  memset(dest->extFiringTableD2[lGrpId], 0, sizeof(int) * groupConfigs[netId][lGrpId].numN * NEURON_MAX_FIRING_RATE);
3683  }
3684  }
3685  }
3686 
3687  // allocate external 1ms firing table index
3688  if (allocateMem)
3689  dest->extFiringTableEndIdxD1 = new int[networkConfigs[netId].numGroups];
3690  memset(dest->extFiringTableEndIdxD1, 0, sizeof(int) * networkConfigs[netId].numGroups);
3691 
3692 
3693  // allocate external 2+ms firing table index
3694  if (allocateMem)
3695  dest->extFiringTableEndIdxD2 = new int[networkConfigs[netId].numGroups];
3696  memset(dest->extFiringTableEndIdxD2, 0, sizeof(int) * networkConfigs[netId].numGroups);
3697 }
3698 
3717 void SNN::copyNeuronSpikeCount(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, bool allocateMem, int destOffset) {
3718  int posN, lengthN;
3719 
3720  if(lGrpId == ALL) {
3721  posN = 0;
3722  lengthN = networkConfigs[netId].numN;
3723  } else {
3724  posN = groupConfigs[netId][lGrpId].lStartN;
3725  lengthN = groupConfigs[netId][lGrpId].numN;
3726  }
3727  assert(lengthN > 0 && lengthN <= networkConfigs[netId].numN);
3728 
3729  // spike count information
3730  if(allocateMem)
3731  dest->nSpikeCnt = new int[lengthN];
3732  memcpy(&dest->nSpikeCnt[posN + destOffset], &src->nSpikeCnt[posN], sizeof(int) * lengthN);
3733 }
3734 
3735 
3736 #ifdef __NO_PTHREADS__
3737  void SNN::assignPoissonFiringRate_CPU(int netId) {
3738 #else // POSIX
3739  void* SNN::assignPoissonFiringRate_CPU(int netId) {
3740 #endif
3741  assert(runtimeData[netId].memType == CPU_MEM);
3742 
3743  for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
3744  // given group of neurons belong to the poisson group....
3745  if (groupConfigs[netId][lGrpId].isSpikeGenerator) {
3746  int lNId = groupConfigs[netId][lGrpId].lStartN;
3747  int gGrpId = groupConfigs[netId][lGrpId].gGrpId;
3748  PoissonRate* rate = groupConfigMDMap[gGrpId].ratePtr;
3749 
3750  // if spikeGenFunc group does not have a Poisson pointer, skip
3751  if (groupConfigMap[gGrpId].spikeGenFunc || rate == NULL)
3752  continue;
3753 
3754  assert(runtimeData[netId].poissonFireRate != NULL);
3755  assert(rate->isOnGPU() == false);
3756  // rates allocated on CPU
3757  memcpy(&runtimeData[netId].poissonFireRate[lNId - networkConfigs[netId].numNReg], rate->getRatePtrCPU(),
3758  sizeof(float) * rate->getNumNeurons());
3759  }
3760  }
3761 }
3762 
3763 #ifndef __NO_PTHREADS__ // POSIX
3764  // Static multithreading subroutine method - helper for the above method
3765  void* SNN::helperAssignPoissonFiringRate_CPU(void* arguments) {
3766  ThreadStruct* args = (ThreadStruct*) arguments;
3767  //printf("\nThread ID: %lu and CPU: %d\n",pthread_self(), sched_getcpu());
3768  ((SNN *)args->snn_pointer) -> assignPoissonFiringRate_CPU(args->netId);
3769  pthread_exit(0);
3770  }
3771 #endif
3772 
3787 void SNN::copyWeightState(int netId, int lGrpId) {
3788  int lengthSyn, posSyn;
3789 
3790  // first copy pre-connections info
3791  copyPreConnectionInfo(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], false);
3792 
3793  if (lGrpId == ALL) {
3794  lengthSyn = networkConfigs[netId].numPreSynNet;
3795  posSyn = 0;
3796  }
3797  else {
3798  lengthSyn = 0;
3799  for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++)
3800  lengthSyn += managerRuntimeData.Npre[lNId];
3801 
3802  posSyn = managerRuntimeData.cumulativePre[groupConfigs[netId][lGrpId].lStartN];
3803  }
3804 
3805  assert(posSyn < networkConfigs[netId].numPreSynNet || networkConfigs[netId].numPreSynNet == 0);
3806  assert(lengthSyn <= networkConfigs[netId].numPreSynNet);
3807 
3808  memcpy(&managerRuntimeData.wt[posSyn], &runtimeData[netId].wt[posSyn], sizeof(float) * lengthSyn);
3809 
3810  // copy firing time for individual synapses
3811  //CUDA_CHECK_ERRORS(cudaMemcpy(&managerRuntimeData.synSpikeTime[cumPos_syn], &runtimeData[netId].synSpikeTime[cumPos_syn], sizeof(int) * length_wt, cudaMemcpyDeviceToHost));
3812 
3813  if ((!sim_with_fixedwts) || sim_with_stdp) {
3814  // copy synaptic weight derivative
3815  memcpy(&managerRuntimeData.wtChange[posSyn], &runtimeData[netId].wtChange[posSyn], sizeof(float) * lengthSyn);
3816  }
3817 }
3818 
3819 void SNN::copyNetworkConfig(int netId) {
3820  // do nothing, CPU computing backend can access networkConfigs[] directly
3821 }
3822 
3823 void SNN::copyGrpIdsLookupArray(int netId) {
3824  memcpy(managerRuntimeData.grpIds, runtimeData[netId].grpIds, sizeof(short int) * networkConfigs[netId].numNAssigned);
3825 }
3826 
3827 void SNN::copyConnIdsLookupArray(int netId) {
3828  memcpy(managerRuntimeData.connIdsPreIdx, runtimeData[netId].connIdsPreIdx, sizeof(short int) * networkConfigs[netId].numPreSynNet);
3829 }
3830 
3831 void SNN::copyLastSpikeTime(int netId) {
3832  memcpy(managerRuntimeData.lastSpikeTime, runtimeData[netId].lastSpikeTime, sizeof(int) * networkConfigs[netId].numN);
3833 }
3834 
3839 void SNN::copyCurSpikes(int netId) {
3840  // get a amount of regular (Izhikevich) neurons in the network
3841  int length = networkConfigs[netId].numNReg;
3842 
3843  // .\carlsim\kernel\src\snn_cpu_module.cpp(1713): dest->curSpike = new bool[length];
3844  // memory is already allocated, \sa void SNN::allocateManagerRuntimeData()
3845  memcpy(managerRuntimeData.curSpike, runtimeData[netId].curSpike, sizeof(bool) * length );
3846 }
3847 
3848 /* Poisson Neurons
3849 */
3850 void SNN::copyRandNum(int netId) {
3851 
3852  // snn_datastructure.h
3853  //float* randNum; //!< firing random number. max value is 10,000
3854 
3855  //bool SNN::getPoissonSpike(int lNId, int netId) {
3859  //return runtimeData[netId].randNum[lNId - networkConfigs[netId].numNReg] * 1000.0f
3860  // < runtimeData[netId].poissonFireRate[lNId - networkConfigs[netId].numNReg];
3861  //}
3862 
3863  // get a amount of Poisson neurons in the network
3864  int length = networkConfigs[netId].numNPois; // - networkConfigs[netId].numNSpikeGen;
3865 
3866  memcpy(managerRuntimeData.randNum, runtimeData[netId].randNum, sizeof(float) * length );
3867 
3868  //memcpy(managerRuntimeData.poissonFireRate, runtimeData[netId].poissonFireRate, sizeof(float) * length );
3869 }
3870 
3871 
3872 /* Poisson Neurons, fire Rate
3873 */
3874 void SNN::copyPoissonFireRate(int netId) {
3875 
3876  // snn_datastructure.h
3877  //float* randNum; //!< firing random number. max value is 10,000
3878 
3879  //bool SNN::getPoissonSpike(int lNId, int netId) {
3883  //return runtimeData[netId].randNum[lNId - networkConfigs[netId].numNReg] * 1000.0f
3884  // < runtimeData[netId].poissonFireRate[lNId - networkConfigs[netId].numNReg];
3885  //}
3886 
3887  // get a amount of Poisson neurons in the network
3888  //int length = networkConfigs[netId].numNRateGen; // should be equal to numNPois but is 0
3889  int length = networkConfigs[netId].numNPois; // - networkConfigs[netId].numNSpikeGen;
3890 
3891  memcpy(managerRuntimeData.poissonFireRate, runtimeData[netId].poissonFireRate, sizeof(float) * length );
3892 }
3893 
3894 
3895 
3896 
3899 void SNN::copySpikeGenBits(int netId) {
3900 
3901  // snn_datastructure.h
3902  //unsigned int* spikeGenBits;
3903 
3904  //bool SNN::getSpikeGenBit(unsigned int nIdPos, int netId) {
3905  //const int nIdBitPos = nIdPos % 32;
3906  //const int nIdIndex = nIdPos / 32;
3907  //return ((runtimeData[netId].spikeGenBits[nIdIndex] >> nIdBitPos) & 0x1);
3908  //}
3909 
3910  // get a amount of neurons controlled by spike generator in the network
3911  int length = networkConfigs[netId].numNSpikeGen;
3912 
3913  assert(sizeof(int) == 4); // 4 bytes = 32 bits
3914 
3915  //assert(false); // test GPU ..
3916 
3918  //memset(managerRuntimeData.spikeGenBits, 0, sizeof(int) * (networkConfigs[netId].numNSpikeGen / 32 + 1));
3919 
3920 
3921  //\todo spikeGenBits seems alrady by filled CAUTION
3922  //\todo ISSUE: offset 1ms, the spikeGenBits are _before_ the step, however, but then they are not yet in the runtime data
3923  memcpy(managerRuntimeData.spikeGenBits, runtimeData[netId].spikeGenBits, length / 32 + 1 );
3924 }
3925 
3926 
3930 void SNN::copyNetworkSpikeCount(int netId,
3931  unsigned int* spikeCountD1, unsigned int* spikeCountD2,
3932  unsigned int* spikeCountExtD1, unsigned int* spikeCountExtD2) {
3933 
3934  *spikeCountExtD2 = runtimeData[netId].spikeCountExtRxD2;
3935  *spikeCountExtD1 = runtimeData[netId].spikeCountExtRxD1;
3936  *spikeCountD2 = runtimeData[netId].spikeCountD2;
3937  *spikeCountD1 = runtimeData[netId].spikeCountD1;
3938 }
3939 
3945 void SNN::copySpikeTables(int netId) {
3946  unsigned int spikeCountD1Sec, spikeCountD2Sec, spikeCountLastSecLeftD2;
3947 
3948  spikeCountLastSecLeftD2 = runtimeData[netId].spikeCountLastSecLeftD2;
3949  spikeCountD2Sec = runtimeData[netId].spikeCountD2Sec;
3950  spikeCountD1Sec = runtimeData[netId].spikeCountD1Sec;
3951  memcpy(managerRuntimeData.firingTableD2, runtimeData[netId].firingTableD2, sizeof(int) * (spikeCountD2Sec + spikeCountLastSecLeftD2));
3952  memcpy(managerRuntimeData.firingTableD1, runtimeData[netId].firingTableD1, sizeof(int) * spikeCountD1Sec);
3953 #ifdef LN_AXON_PLAST
3954  memcpy(managerRuntimeData.firingTimesD2, runtimeData[netId].firingTimesD2, sizeof(unsigned int) * (spikeCountD2Sec + spikeCountLastSecLeftD2));
3955 #endif
3956  memcpy(managerRuntimeData.timeTableD2, runtimeData[netId].timeTableD2, sizeof(int) * (1000 + networkConfigs[netId].maxDelay + 1));
3957  memcpy(managerRuntimeData.timeTableD1, runtimeData[netId].timeTableD1, sizeof(int) * (1000 + networkConfigs[netId].maxDelay + 1));
3958 }
3959 
3960 #ifdef __NO_PTHREADS__
3961  void SNN::deleteRuntimeData_CPU(int netId) {
3962 #else // POSIX
3963  void* SNN::deleteRuntimeData_CPU(int netId) {
3964 #endif
3965  assert(runtimeData[netId].memType == CPU_MEM);
3966  // free all pointers
3967  delete [] runtimeData[netId].voltage;
3968  delete [] runtimeData[netId].nextVoltage;
3969  delete [] runtimeData[netId].recovery;
3970  delete [] runtimeData[netId].current;
3971  delete [] runtimeData[netId].extCurrent;
3972  delete [] runtimeData[netId].curSpike;
3973  delete [] runtimeData[netId].Npre;
3974  delete [] runtimeData[netId].Npre_plastic;
3975  delete [] runtimeData[netId].Npre_plasticInv;
3976  delete [] runtimeData[netId].Npost;
3977  delete [] runtimeData[netId].cumulativePost;
3978  delete [] runtimeData[netId].cumulativePre;
3979  delete [] runtimeData[netId].synSpikeTime;
3980  delete [] runtimeData[netId].wt;
3981  delete [] runtimeData[netId].wtChange;
3982  delete [] runtimeData[netId].maxSynWt;
3983  delete [] runtimeData[netId].nSpikeCnt;
3984  delete [] runtimeData[netId].avgFiring;
3985  delete [] runtimeData[netId].baseFiring;
3986  delete [] runtimeData[netId].baseFiringInv;
3987 
3988  delete [] runtimeData[netId].grpDA;
3989  delete [] runtimeData[netId].grp5HT;
3990  delete [] runtimeData[netId].grpACh;
3991  delete [] runtimeData[netId].grpNE;
3992 
3993  delete [] runtimeData[netId].grpDABuffer;
3994  delete [] runtimeData[netId].grp5HTBuffer;
3995  delete [] runtimeData[netId].grpAChBuffer;
3996  delete [] runtimeData[netId].grpNEBuffer;
3997 
3998  if (networkConfigs[netId].sim_with_nm) {
3999  delete[] runtimeData[netId].nVBuffer;
4000  delete[] runtimeData[netId].nUBuffer;
4001  delete[] runtimeData[netId].nIBuffer;
4002  }
4003 
4004  delete [] runtimeData[netId].grpIds;
4005 
4006  delete [] runtimeData[netId].Izh_a;
4007  delete [] runtimeData[netId].Izh_b;
4008  delete [] runtimeData[netId].Izh_c;
4009  delete [] runtimeData[netId].Izh_d;
4010  delete [] runtimeData[netId].Izh_C;
4011  delete [] runtimeData[netId].Izh_k;
4012  delete [] runtimeData[netId].Izh_vr;
4013  delete [] runtimeData[netId].Izh_vt;
4014  delete [] runtimeData[netId].Izh_vpeak;
4015 
4016  delete [] runtimeData[netId].lif_tau_m;
4017  delete [] runtimeData[netId].lif_tau_ref;
4018  delete [] runtimeData[netId].lif_tau_ref_c;
4019  delete [] runtimeData[netId].lif_vTh;
4020  delete [] runtimeData[netId].lif_vReset;
4021  delete [] runtimeData[netId].lif_gain;
4022  delete [] runtimeData[netId].lif_bias;
4023 
4024  delete [] runtimeData[netId].gAMPA;
4025 #ifdef LN_I_CALC_TYPES
4026  // group -> sim is mixed
4027  // \todo check creation in cpu_module -> new, malloc
4028  delete[] runtimeData[netId].gNMDA_r;
4029  delete[] runtimeData[netId].gNMDA_d;
4030  delete[] runtimeData[netId].gNMDA;
4031  delete[] runtimeData[netId].gGABAa;
4032  delete[] runtimeData[netId].gGABAb_r;
4033  delete[] runtimeData[netId].gGABAb_d;
4034  delete[] runtimeData[netId].gGABAb;
4035 #else
4036  if (sim_with_NMDA_rise) {
4037  delete [] runtimeData[netId].gNMDA_r;
4038  delete [] runtimeData[netId].gNMDA_d;
4039  }
4040  else {
4041  delete [] runtimeData[netId].gNMDA;
4042  }
4043  delete [] runtimeData[netId].gGABAa;
4044  if (sim_with_GABAb_rise) {
4045  delete [] runtimeData[netId].gGABAb_r;
4046  delete [] runtimeData[netId].gGABAb_d;
4047  }
4048  else {
4049  delete [] runtimeData[netId].gGABAb;
4050  }
4051 #endif
4052 
4053  delete [] runtimeData[netId].stpu;
4054  delete [] runtimeData[netId].stpx;
4055 
4056  delete [] runtimeData[netId].connIdsPreIdx;
4057 
4058  delete [] runtimeData[netId].postDelayInfo;
4059  delete [] runtimeData[netId].postSynapticIds;
4060  delete [] runtimeData[netId].preSynapticIds;
4061  delete [] runtimeData[netId].I_set;
4062  delete [] runtimeData[netId].poissonFireRate;
4063  delete [] runtimeData[netId].lastSpikeTime;
4064  delete [] runtimeData[netId].spikeGenBits;
4065 
4066  delete [] runtimeData[netId].timeTableD1;
4067  delete [] runtimeData[netId].timeTableD2;
4068 
4069  delete [] runtimeData[netId].firingTableD2;
4070  delete [] runtimeData[netId].firingTableD1;
4071 
4072 #ifdef LN_AXON_PLAST
4073  delete [] runtimeData[netId].firingTimesD2;
4074 #endif
4075 
4076  int** tempPtrs;
4077  tempPtrs = new int*[networkConfigs[netId].numGroups];
4078 
4079  // fetch device memory address stored in extFiringTableD2
4080  memcpy(tempPtrs, runtimeData[netId].extFiringTableD2, sizeof(int*) * networkConfigs[netId].numGroups);
4081  for (int i = 0; i < networkConfigs[netId].numGroups; i++)
4082  delete [] tempPtrs[i];
4083  delete [] runtimeData[netId].extFiringTableD2;
4084 
4085  // fetch device memory address stored in extFiringTableD1
4086  memcpy(tempPtrs, runtimeData[netId].extFiringTableD1, sizeof(int*) * networkConfigs[netId].numGroups);
4087  for (int i = 0; i < networkConfigs[netId].numGroups; i++)
4088  delete [] tempPtrs[i];
4089  delete [] runtimeData[netId].extFiringTableD1;
4090 
4091  delete [] tempPtrs;
4092 
4093  delete [] runtimeData[netId].extFiringTableEndIdxD2;
4094  delete [] runtimeData[netId].extFiringTableEndIdxD1;
4095 
4096  if (runtimeData[netId].randNum != NULL) delete [] runtimeData[netId].randNum;
4097  runtimeData[netId].randNum = NULL;
4098 
4099 #ifdef LN_I_CALC_TYPES
4100  //if(runtimeData[netId].!= NULL)
4101  // \todo LN2021
4102 #endif
4103 
4104 }
4105 
4106 #ifndef __NO_PTHREADS__ // POSIX
4107  // Static multithreading subroutine method - helper for the above method
4108  void* SNN::helperDeleteRuntimeData_CPU(void* arguments) {
4109  ThreadStruct* args = (ThreadStruct*) arguments;
4110  //printf("\nThread ID: %lu and CPU: %d\n",pthread_self(), sched_getcpu());
4111  ((SNN *)args->snn_pointer) -> deleteRuntimeData_CPU(args->netId);
4112  pthread_exit(0);
4113  }
4114 #endif
float * maxSynWt
maximum synaptic weight for a connection
float * voltage
membrane potential for each regular neuron
Class for generating Poisson spike trains.
Definition: poisson_rate.h:88
float * randNum
firing random number. max value is 10,000
int * synSpikeTime
stores the last spike time of a synapse
#define IS_REGULAR_NEURON(nid, numNReg, numNPois)
float decayNE
decay rate for Noradrenaline, published by GroupConfig
timing-based curve
bool isSimulationWithGABAbRise()
Definition: snn.h:766
float dvdtIzhikevich9(float volt, float recov, float invCapac, float izhK, float voltRest, float voltInst, float totalCurrent, float timeStep=1.0f)
4 NM weighted (and normalized,boosted,damped), Niedermeier (2021)
#define ALL
CARLsim common definitions.
float homeostasisScale
published by GroupConfig
unsigned int * timeTableD1
firing table, only used in CPU_MODE currently
int * nSpikeCnt
homeostatic plasticity variables
float * baseFiringInv
only used on GPU
serotonin-modulated STDP, nearest-neighbor
unsigned int spikeCountD1Sec
the total number of spikes with axonal delay == 1 in 1 second, used in CPU_MODE currently ...
unsigned int spikeCountD1
the total number of spikes with anxonal delay == 1 in a simulation, used in CPU_MODE currently ...
#define TIMING_COUNT
#define KERNEL_ERROR_UNKNOWN_INTEG
Definition: error_code.h:188
unsigned int spikeCountExtRxD2
the number of external spikes with axonal delay > 1 in a simulation, used in CPU_MODE currently ...
#define TARGET_NE
bool allocated
true if all data has been allocated
float decayDP
decay rate for Dopaamine, published by GroupConfig
NE alpha2 receptor (connection), Avery, Dutt, Krichmar (2013)
runtime data is allocated on CPU (main) memory
float * gAMPA
conductance of gAMPA
#define KERNEL_WARN(formatc,...)
#define MAX_NEURON_MON_GRP_SZIE
#define STDP(t, a, b)
#define NEURON_MAX_FIRING_RATE
int * lif_tau_m
parameters for a LIF spiking group
unsigned int * cumulativePre
unsigned * firingTimesD2
stores the actual firing time
float * Npre_plasticInv
stores the 1/number of plastic input connections, only used on GPU
float dvdtIzhikevich4(float volt, float recov, float totalCurrent, float timeStep=1.0f)
float * getRatePtrCPU()
Returns pointer to CPU-allocated firing rate array (deprecated)
unsigned int spikeCountD2
the total number of spikes with anxonal delay > 1 in a simulation, used in CPU_MODE currently ...
void exitSimulation(int val=1)
deallocates all dynamical structures and exits
#define CPU_RUNTIME_BASE
float wstptauu[NM_NE+3]
Array size = last index + 1 + additional elementsnorm + base.
#define KERNEL_ERROR(formatc,...)
acetylcholine-modulated STDP, nearest-neighbor
int numN
published by GroupConfig
float decay5HT
decay rate for Serotonin, published by GroupConfig
float * wt
stores the weight change of a synaptic connection
void printEntrails(char *buffer, unsigned length, int gGrpIdPre, int gGrpIdPost)
float baseNE
baseline concentration of Noradrenaline, published by GroupConfig
int numPreSynapses
the total number of pre-connections of a group, published by GroupConfigMD
symmetric pulse curve
int ** extFiringTableD1
external firing table, only used on GPU
standard STDP of Bi & Poo (2001), nearest-neighbor
float releaseDP
release per spike for Dopaamine
unsigned short * Npre_plastic
stores the number of plastic input connections to a neuron
int gGrpId
published by GroupConfigMD
int numPostSynapses
the total number of post-connections of a group, published by GroupConfigMD
unsigned int spikeCountLastSecLeftD2
the nubmer of spike left in the last second, used in CPU_MODE currently
#define KERNEL_INFO(formatc,...)
bool isSimulationWithCOBA()
Definition: snn.h:763
float baseDP
baseline concentration of Dopamine, published by GroupConfig
float decayACh
decay rate for Acetylcholine, published by GroupConfig
int maxDelay
maximum axonal delay in the gloabl network
short int * connIdsPreIdx
connectId, per synapse, presynaptic cumulative indexing
unsigned int spikeCount
the total number of spikes in a simulation, used in CPU_MODE currently
protein kinase/phospholiphase controlled LTP/LPD adopted from Nadim & Bucher (2014) ...
int * I_set
an array of bits indicating which synapse got a spike
dopamine-modulated STDP, nearest-neighbor
#define TARGET_GABAb
bool isSimulationWithNMDARise()
Definition: snn.h:765
unsigned short * Npost
stores the number of output connections from a neuron.
unsigned int spikeCountExtRxD1Sec
#define TARGET_GABAa
int getNumNeurons()
Returns the number of neurons for which to generate Poisson spike trains.
unsigned int Type
published by GroupConfig
unsigned int spikeCountExtRxD2Sec
float base5HT
baseline concentration of Serotonin, published by GroupConfig
DA D2 receptor (connection), Avery, Krichmar (2015)
conductance
int * lastSpikeTime
stores the last spike time of a neuron
float * gGABAa
conductance of gGABAa
SynInfo * preSynapticIds
unsigned int spikeCountD2Sec
the total number of spikes with axonal delay > 1 in 1 second, used in CPU_MODE currently ...
float releaseACh
release per spike for Acetylcholine
unsigned short * Npre
stores the number of input connections to a neuron
int Noffset
the offset of spike generator (poisson) neurons [0, numNPois), published by GroupConfigMD ...
#define TARGET_DA
#define GET_CONN_NEURON_ID(val)
unsigned int spikeCountExtRxD1
the number of external spikes with axonal delay == 1 in a simulation, used in CPU_MODE currently ...
int gsId
group id and synapse id
float dudtIzhikevich9(float volt, float recov, float voltRest, float izhA, float izhB, float timeStep=1.0f)
int nId
neuron id
unsigned int * timeTableD2
firing table, only used in CPU_MODE currently
#define MAX_SIMULATION_TIME
#define GET_CONN_SYN_ID(val)
float avgTimeScale_decay
published by GroupConfig
int numN
number of neurons in the global network
int lEndN
published by GroupConfigMD
float * gGABAb
conductance of gGABAb
#define KERNEL_DEBUG(formatc,...)
#define SET_CONN_GRP_ID(val, grpId)
DelayInfo * postDelayInfo
delay information
float baseACh
baseline concentration of Acetylcholine, published by GroupConfig
int ** extFiringTableD2
external firing table, only used on GPU
NE alpha1 receptor with DA antagonist, Avery, Dutt, Krichmar (2013)
int LtoGOffset
published by GroupConfigMD
#define GET_CONN_GRP_ID(val)
float STP_A
published by GroupConfig
Contains all of CARLsim&#39;s core functionality.
Definition: snn.h:138
float STP_U
published by GroupConfig
#define TARGET_5HT
float STP_tau_x_inv
published by GroupConfig
float dudtIzhikevich4(float volt, float recov, float izhA, float izhB, float timeStep=1.0f)
SynInfo * postSynapticIds
10 bit syn id, 22 bit neuron id, ordered based on delay
int lStartN
published by GroupConfigMD
unsigned int * cumulativePost
norepinephrine-modulated STDP, nearest-neighbor
unsigned int * spikeGenBits
CPU multithreading subroutine (that takes single argument) struct argument.
short int * grpIds
#define TARGET_NMDA
standard exponential curve
float avgTimeScale
< homeostatic plasticity variables
acetylcholine
unsigned int spikeCountSec
the total number of spikes in 1 second, used in CPU_MODE currently
DA D1 receptor (connection), Avery, Dutt, Krichmar (2013)
float dvdtLIF(float volt, float lif_vReset, float lif_gain, float lif_bias, int lif_tau_m, float totalCurrent, float timeStep=1.0f)
#define TARGET_ACH
float * gNMDA
conductance of gNMDA
bool isOnGPU()
Checks whether the firing rates are allocated on CPU or GPU.
float STP_tau_u_inv
published by GroupConfig
unsigned int nPoissonSpikes
the total number of spikes of poisson neurons, used in CPU_MODE currently
#define STP_BUF_POS(nid, t, maxDelay)
#define TARGET_AMPA
float * wtChange
stores the weight change of a synaptic connection
float releaseNE
release per spike for Noradrenaline
float release5HT
release per spike for Serotonin
float * nextVoltage
membrane potential buffer (next/future time step) for each regular neuron
float wstpu[NM_NE+3]
Array size = last index + 1 + additional elementsnorm + base.